]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/net/ethernet/broadcom/bnxt/bnxt.c
bnxt_en: Refactor bnxt_dbg_dump_states().
[mirror_ubuntu-bionic-kernel.git] / drivers / net / ethernet / broadcom / bnxt / bnxt.c
CommitLineData
c0c050c5
MC
1/* Broadcom NetXtreme-C/E network driver.
2 *
3 * Copyright (c) 2014-2015 Broadcom Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 */
9
10#include <linux/module.h>
11
12#include <linux/stringify.h>
13#include <linux/kernel.h>
14#include <linux/timer.h>
15#include <linux/errno.h>
16#include <linux/ioport.h>
17#include <linux/slab.h>
18#include <linux/vmalloc.h>
19#include <linux/interrupt.h>
20#include <linux/pci.h>
21#include <linux/netdevice.h>
22#include <linux/etherdevice.h>
23#include <linux/skbuff.h>
24#include <linux/dma-mapping.h>
25#include <linux/bitops.h>
26#include <linux/io.h>
27#include <linux/irq.h>
28#include <linux/delay.h>
29#include <asm/byteorder.h>
30#include <asm/page.h>
31#include <linux/time.h>
32#include <linux/mii.h>
33#include <linux/if.h>
34#include <linux/if_vlan.h>
35#include <net/ip.h>
36#include <net/tcp.h>
37#include <net/udp.h>
38#include <net/checksum.h>
39#include <net/ip6_checksum.h>
40#if defined(CONFIG_VXLAN) || defined(CONFIG_VXLAN_MODULE)
41#include <net/vxlan.h>
42#endif
43#ifdef CONFIG_NET_RX_BUSY_POLL
44#include <net/busy_poll.h>
45#endif
46#include <linux/workqueue.h>
47#include <linux/prefetch.h>
48#include <linux/cache.h>
49#include <linux/log2.h>
50#include <linux/aer.h>
51#include <linux/bitmap.h>
52#include <linux/cpu_rmap.h>
53
54#include "bnxt_hsi.h"
55#include "bnxt.h"
56#include "bnxt_sriov.h"
57#include "bnxt_ethtool.h"
58
59#define BNXT_TX_TIMEOUT (5 * HZ)
60
61static const char version[] =
62 "Broadcom NetXtreme-C/E driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION "\n";
63
64MODULE_LICENSE("GPL");
65MODULE_DESCRIPTION("Broadcom BCM573xx network driver");
66MODULE_VERSION(DRV_MODULE_VERSION);
67
68#define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN)
69#define BNXT_RX_DMA_OFFSET NET_SKB_PAD
70#define BNXT_RX_COPY_THRESH 256
71
72#define BNXT_TX_PUSH_THRESH 92
73
74enum board_idx {
fbc9a523 75 BCM57301,
c0c050c5
MC
76 BCM57302,
77 BCM57304,
fbc9a523 78 BCM57402,
c0c050c5
MC
79 BCM57404,
80 BCM57406,
81 BCM57304_VF,
82 BCM57404_VF,
83};
84
85/* indexed by enum above */
86static const struct {
87 char *name;
88} board_info[] = {
fbc9a523
DC
89 { "Broadcom BCM57301 NetXtreme-C Single-port 10Gb Ethernet" },
90 { "Broadcom BCM57302 NetXtreme-C Dual-port 10Gb/25Gb Ethernet" },
c0c050c5 91 { "Broadcom BCM57304 NetXtreme-C Dual-port 10Gb/25Gb/40Gb/50Gb Ethernet" },
fbc9a523 92 { "Broadcom BCM57402 NetXtreme-E Dual-port 10Gb Ethernet" },
c0c050c5 93 { "Broadcom BCM57404 NetXtreme-E Dual-port 10Gb/25Gb Ethernet" },
fbc9a523 94 { "Broadcom BCM57406 NetXtreme-E Dual-port 10GBase-T Ethernet" },
c0c050c5
MC
95 { "Broadcom BCM57304 NetXtreme-C Ethernet Virtual Function" },
96 { "Broadcom BCM57404 NetXtreme-E Ethernet Virtual Function" },
97};
98
99static const struct pci_device_id bnxt_pci_tbl[] = {
fbc9a523 100 { PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 },
c0c050c5
MC
101 { PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 },
102 { PCI_VDEVICE(BROADCOM, 0x16ca), .driver_data = BCM57304 },
fbc9a523 103 { PCI_VDEVICE(BROADCOM, 0x16d0), .driver_data = BCM57402 },
c0c050c5
MC
104 { PCI_VDEVICE(BROADCOM, 0x16d1), .driver_data = BCM57404 },
105 { PCI_VDEVICE(BROADCOM, 0x16d2), .driver_data = BCM57406 },
106#ifdef CONFIG_BNXT_SRIOV
107 { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = BCM57304_VF },
108 { PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = BCM57404_VF },
109#endif
110 { 0 }
111};
112
113MODULE_DEVICE_TABLE(pci, bnxt_pci_tbl);
114
115static const u16 bnxt_vf_req_snif[] = {
116 HWRM_FUNC_CFG,
117 HWRM_PORT_PHY_QCFG,
118 HWRM_CFA_L2_FILTER_ALLOC,
119};
120
121static bool bnxt_vf_pciid(enum board_idx idx)
122{
123 return (idx == BCM57304_VF || idx == BCM57404_VF);
124}
125
126#define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID)
127#define DB_CP_FLAGS (DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS)
128#define DB_CP_IRQ_DIS_FLAGS (DB_KEY_CP | DB_IRQ_DIS)
129
130#define BNXT_CP_DB_REARM(db, raw_cons) \
131 writel(DB_CP_REARM_FLAGS | RING_CMP(raw_cons), db)
132
133#define BNXT_CP_DB(db, raw_cons) \
134 writel(DB_CP_FLAGS | RING_CMP(raw_cons), db)
135
136#define BNXT_CP_DB_IRQ_DIS(db) \
137 writel(DB_CP_IRQ_DIS_FLAGS, db)
138
139static inline u32 bnxt_tx_avail(struct bnxt *bp, struct bnxt_tx_ring_info *txr)
140{
141 /* Tell compiler to fetch tx indices from memory. */
142 barrier();
143
144 return bp->tx_ring_size -
145 ((txr->tx_prod - txr->tx_cons) & bp->tx_ring_mask);
146}
147
148static const u16 bnxt_lhint_arr[] = {
149 TX_BD_FLAGS_LHINT_512_AND_SMALLER,
150 TX_BD_FLAGS_LHINT_512_TO_1023,
151 TX_BD_FLAGS_LHINT_1024_TO_2047,
152 TX_BD_FLAGS_LHINT_1024_TO_2047,
153 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
154 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
155 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
156 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
157 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
158 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
159 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
160 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
161 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
162 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
163 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
164 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
165 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
166 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
167 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
168};
169
170static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
171{
172 struct bnxt *bp = netdev_priv(dev);
173 struct tx_bd *txbd;
174 struct tx_bd_ext *txbd1;
175 struct netdev_queue *txq;
176 int i;
177 dma_addr_t mapping;
178 unsigned int length, pad = 0;
179 u32 len, free_size, vlan_tag_flags, cfa_action, flags;
180 u16 prod, last_frag;
181 struct pci_dev *pdev = bp->pdev;
182 struct bnxt_napi *bnapi;
183 struct bnxt_tx_ring_info *txr;
184 struct bnxt_sw_tx_bd *tx_buf;
185
186 i = skb_get_queue_mapping(skb);
187 if (unlikely(i >= bp->tx_nr_rings)) {
188 dev_kfree_skb_any(skb);
189 return NETDEV_TX_OK;
190 }
191
192 bnapi = bp->bnapi[i];
193 txr = &bnapi->tx_ring;
194 txq = netdev_get_tx_queue(dev, i);
195 prod = txr->tx_prod;
196
197 free_size = bnxt_tx_avail(bp, txr);
198 if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) {
199 netif_tx_stop_queue(txq);
200 return NETDEV_TX_BUSY;
201 }
202
203 length = skb->len;
204 len = skb_headlen(skb);
205 last_frag = skb_shinfo(skb)->nr_frags;
206
207 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
208
209 txbd->tx_bd_opaque = prod;
210
211 tx_buf = &txr->tx_buf_ring[prod];
212 tx_buf->skb = skb;
213 tx_buf->nr_frags = last_frag;
214
215 vlan_tag_flags = 0;
216 cfa_action = 0;
217 if (skb_vlan_tag_present(skb)) {
218 vlan_tag_flags = TX_BD_CFA_META_KEY_VLAN |
219 skb_vlan_tag_get(skb);
220 /* Currently supports 8021Q, 8021AD vlan offloads
221 * QINQ1, QINQ2, QINQ3 vlan headers are deprecated
222 */
223 if (skb->vlan_proto == htons(ETH_P_8021Q))
224 vlan_tag_flags |= 1 << TX_BD_CFA_META_TPID_SHIFT;
225 }
226
227 if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh) {
228 struct tx_push_bd *push = txr->tx_push;
229 struct tx_bd *tx_push = &push->txbd1;
230 struct tx_bd_ext *tx_push1 = &push->txbd2;
231 void *pdata = tx_push1 + 1;
232 int j;
233
234 /* Set COAL_NOW to be ready quickly for the next push */
235 tx_push->tx_bd_len_flags_type =
236 cpu_to_le32((length << TX_BD_LEN_SHIFT) |
237 TX_BD_TYPE_LONG_TX_BD |
238 TX_BD_FLAGS_LHINT_512_AND_SMALLER |
239 TX_BD_FLAGS_COAL_NOW |
240 TX_BD_FLAGS_PACKET_END |
241 (2 << TX_BD_FLAGS_BD_CNT_SHIFT));
242
243 if (skb->ip_summed == CHECKSUM_PARTIAL)
244 tx_push1->tx_bd_hsize_lflags =
245 cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
246 else
247 tx_push1->tx_bd_hsize_lflags = 0;
248
249 tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
250 tx_push1->tx_bd_cfa_action = cpu_to_le32(cfa_action);
251
252 skb_copy_from_linear_data(skb, pdata, len);
253 pdata += len;
254 for (j = 0; j < last_frag; j++) {
255 skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
256 void *fptr;
257
258 fptr = skb_frag_address_safe(frag);
259 if (!fptr)
260 goto normal_tx;
261
262 memcpy(pdata, fptr, skb_frag_size(frag));
263 pdata += skb_frag_size(frag);
264 }
265
266 memcpy(txbd, tx_push, sizeof(*txbd));
267 prod = NEXT_TX(prod);
268 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
269 memcpy(txbd, tx_push1, sizeof(*txbd));
270 prod = NEXT_TX(prod);
271 push->doorbell =
272 cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | prod);
273 txr->tx_prod = prod;
274
275 netdev_tx_sent_queue(txq, skb->len);
276
277 __iowrite64_copy(txr->tx_doorbell, push,
278 (length + sizeof(*push) + 8) / 8);
279
280 tx_buf->is_push = 1;
281
282 goto tx_done;
283 }
284
285normal_tx:
286 if (length < BNXT_MIN_PKT_SIZE) {
287 pad = BNXT_MIN_PKT_SIZE - length;
288 if (skb_pad(skb, pad)) {
289 /* SKB already freed. */
290 tx_buf->skb = NULL;
291 return NETDEV_TX_OK;
292 }
293 length = BNXT_MIN_PKT_SIZE;
294 }
295
296 mapping = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE);
297
298 if (unlikely(dma_mapping_error(&pdev->dev, mapping))) {
299 dev_kfree_skb_any(skb);
300 tx_buf->skb = NULL;
301 return NETDEV_TX_OK;
302 }
303
304 dma_unmap_addr_set(tx_buf, mapping, mapping);
305 flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD |
306 ((last_frag + 2) << TX_BD_FLAGS_BD_CNT_SHIFT);
307
308 txbd->tx_bd_haddr = cpu_to_le64(mapping);
309
310 prod = NEXT_TX(prod);
311 txbd1 = (struct tx_bd_ext *)
312 &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
313
314 txbd1->tx_bd_hsize_lflags = 0;
315 if (skb_is_gso(skb)) {
316 u32 hdr_len;
317
318 if (skb->encapsulation)
319 hdr_len = skb_inner_network_offset(skb) +
320 skb_inner_network_header_len(skb) +
321 inner_tcp_hdrlen(skb);
322 else
323 hdr_len = skb_transport_offset(skb) +
324 tcp_hdrlen(skb);
325
326 txbd1->tx_bd_hsize_lflags = cpu_to_le32(TX_BD_FLAGS_LSO |
327 TX_BD_FLAGS_T_IPID |
328 (hdr_len << (TX_BD_HSIZE_SHIFT - 1)));
329 length = skb_shinfo(skb)->gso_size;
330 txbd1->tx_bd_mss = cpu_to_le32(length);
331 length += hdr_len;
332 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
333 txbd1->tx_bd_hsize_lflags =
334 cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
335 txbd1->tx_bd_mss = 0;
336 }
337
338 length >>= 9;
339 flags |= bnxt_lhint_arr[length];
340 txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
341
342 txbd1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
343 txbd1->tx_bd_cfa_action = cpu_to_le32(cfa_action);
344 for (i = 0; i < last_frag; i++) {
345 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
346
347 prod = NEXT_TX(prod);
348 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
349
350 len = skb_frag_size(frag);
351 mapping = skb_frag_dma_map(&pdev->dev, frag, 0, len,
352 DMA_TO_DEVICE);
353
354 if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
355 goto tx_dma_error;
356
357 tx_buf = &txr->tx_buf_ring[prod];
358 dma_unmap_addr_set(tx_buf, mapping, mapping);
359
360 txbd->tx_bd_haddr = cpu_to_le64(mapping);
361
362 flags = len << TX_BD_LEN_SHIFT;
363 txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
364 }
365
366 flags &= ~TX_BD_LEN;
367 txbd->tx_bd_len_flags_type =
368 cpu_to_le32(((len + pad) << TX_BD_LEN_SHIFT) | flags |
369 TX_BD_FLAGS_PACKET_END);
370
371 netdev_tx_sent_queue(txq, skb->len);
372
373 /* Sync BD data before updating doorbell */
374 wmb();
375
376 prod = NEXT_TX(prod);
377 txr->tx_prod = prod;
378
379 writel(DB_KEY_TX | prod, txr->tx_doorbell);
380 writel(DB_KEY_TX | prod, txr->tx_doorbell);
381
382tx_done:
383
384 mmiowb();
385
386 if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) {
387 netif_tx_stop_queue(txq);
388
389 /* netif_tx_stop_queue() must be done before checking
390 * tx index in bnxt_tx_avail() below, because in
391 * bnxt_tx_int(), we update tx index before checking for
392 * netif_tx_queue_stopped().
393 */
394 smp_mb();
395 if (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)
396 netif_tx_wake_queue(txq);
397 }
398 return NETDEV_TX_OK;
399
400tx_dma_error:
401 last_frag = i;
402
403 /* start back at beginning and unmap skb */
404 prod = txr->tx_prod;
405 tx_buf = &txr->tx_buf_ring[prod];
406 tx_buf->skb = NULL;
407 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
408 skb_headlen(skb), PCI_DMA_TODEVICE);
409 prod = NEXT_TX(prod);
410
411 /* unmap remaining mapped pages */
412 for (i = 0; i < last_frag; i++) {
413 prod = NEXT_TX(prod);
414 tx_buf = &txr->tx_buf_ring[prod];
415 dma_unmap_page(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
416 skb_frag_size(&skb_shinfo(skb)->frags[i]),
417 PCI_DMA_TODEVICE);
418 }
419
420 dev_kfree_skb_any(skb);
421 return NETDEV_TX_OK;
422}
423
424static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
425{
426 struct bnxt_tx_ring_info *txr = &bnapi->tx_ring;
427 int index = bnapi->index;
428 struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, index);
429 u16 cons = txr->tx_cons;
430 struct pci_dev *pdev = bp->pdev;
431 int i;
432 unsigned int tx_bytes = 0;
433
434 for (i = 0; i < nr_pkts; i++) {
435 struct bnxt_sw_tx_bd *tx_buf;
436 struct sk_buff *skb;
437 int j, last;
438
439 tx_buf = &txr->tx_buf_ring[cons];
440 cons = NEXT_TX(cons);
441 skb = tx_buf->skb;
442 tx_buf->skb = NULL;
443
444 if (tx_buf->is_push) {
445 tx_buf->is_push = 0;
446 goto next_tx_int;
447 }
448
449 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
450 skb_headlen(skb), PCI_DMA_TODEVICE);
451 last = tx_buf->nr_frags;
452
453 for (j = 0; j < last; j++) {
454 cons = NEXT_TX(cons);
455 tx_buf = &txr->tx_buf_ring[cons];
456 dma_unmap_page(
457 &pdev->dev,
458 dma_unmap_addr(tx_buf, mapping),
459 skb_frag_size(&skb_shinfo(skb)->frags[j]),
460 PCI_DMA_TODEVICE);
461 }
462
463next_tx_int:
464 cons = NEXT_TX(cons);
465
466 tx_bytes += skb->len;
467 dev_kfree_skb_any(skb);
468 }
469
470 netdev_tx_completed_queue(txq, nr_pkts, tx_bytes);
471 txr->tx_cons = cons;
472
473 /* Need to make the tx_cons update visible to bnxt_start_xmit()
474 * before checking for netif_tx_queue_stopped(). Without the
475 * memory barrier, there is a small possibility that bnxt_start_xmit()
476 * will miss it and cause the queue to be stopped forever.
477 */
478 smp_mb();
479
480 if (unlikely(netif_tx_queue_stopped(txq)) &&
481 (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
482 __netif_tx_lock(txq, smp_processor_id());
483 if (netif_tx_queue_stopped(txq) &&
484 bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh &&
485 txr->dev_state != BNXT_DEV_STATE_CLOSING)
486 netif_tx_wake_queue(txq);
487 __netif_tx_unlock(txq);
488 }
489}
490
491static inline u8 *__bnxt_alloc_rx_data(struct bnxt *bp, dma_addr_t *mapping,
492 gfp_t gfp)
493{
494 u8 *data;
495 struct pci_dev *pdev = bp->pdev;
496
497 data = kmalloc(bp->rx_buf_size, gfp);
498 if (!data)
499 return NULL;
500
501 *mapping = dma_map_single(&pdev->dev, data + BNXT_RX_DMA_OFFSET,
502 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
503
504 if (dma_mapping_error(&pdev->dev, *mapping)) {
505 kfree(data);
506 data = NULL;
507 }
508 return data;
509}
510
511static inline int bnxt_alloc_rx_data(struct bnxt *bp,
512 struct bnxt_rx_ring_info *rxr,
513 u16 prod, gfp_t gfp)
514{
515 struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
516 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[prod];
517 u8 *data;
518 dma_addr_t mapping;
519
520 data = __bnxt_alloc_rx_data(bp, &mapping, gfp);
521 if (!data)
522 return -ENOMEM;
523
524 rx_buf->data = data;
525 dma_unmap_addr_set(rx_buf, mapping, mapping);
526
527 rxbd->rx_bd_haddr = cpu_to_le64(mapping);
528
529 return 0;
530}
531
532static void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons,
533 u8 *data)
534{
535 u16 prod = rxr->rx_prod;
536 struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
537 struct rx_bd *cons_bd, *prod_bd;
538
539 prod_rx_buf = &rxr->rx_buf_ring[prod];
540 cons_rx_buf = &rxr->rx_buf_ring[cons];
541
542 prod_rx_buf->data = data;
543
544 dma_unmap_addr_set(prod_rx_buf, mapping,
545 dma_unmap_addr(cons_rx_buf, mapping));
546
547 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
548 cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
549
550 prod_bd->rx_bd_haddr = cons_bd->rx_bd_haddr;
551}
552
553static inline u16 bnxt_find_next_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
554{
555 u16 next, max = rxr->rx_agg_bmap_size;
556
557 next = find_next_zero_bit(rxr->rx_agg_bmap, max, idx);
558 if (next >= max)
559 next = find_first_zero_bit(rxr->rx_agg_bmap, max);
560 return next;
561}
562
563static inline int bnxt_alloc_rx_page(struct bnxt *bp,
564 struct bnxt_rx_ring_info *rxr,
565 u16 prod, gfp_t gfp)
566{
567 struct rx_bd *rxbd =
568 &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
569 struct bnxt_sw_rx_agg_bd *rx_agg_buf;
570 struct pci_dev *pdev = bp->pdev;
571 struct page *page;
572 dma_addr_t mapping;
573 u16 sw_prod = rxr->rx_sw_agg_prod;
574
575 page = alloc_page(gfp);
576 if (!page)
577 return -ENOMEM;
578
579 mapping = dma_map_page(&pdev->dev, page, 0, PAGE_SIZE,
580 PCI_DMA_FROMDEVICE);
581 if (dma_mapping_error(&pdev->dev, mapping)) {
582 __free_page(page);
583 return -EIO;
584 }
585
586 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
587 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
588
589 __set_bit(sw_prod, rxr->rx_agg_bmap);
590 rx_agg_buf = &rxr->rx_agg_ring[sw_prod];
591 rxr->rx_sw_agg_prod = NEXT_RX_AGG(sw_prod);
592
593 rx_agg_buf->page = page;
594 rx_agg_buf->mapping = mapping;
595 rxbd->rx_bd_haddr = cpu_to_le64(mapping);
596 rxbd->rx_bd_opaque = sw_prod;
597 return 0;
598}
599
600static void bnxt_reuse_rx_agg_bufs(struct bnxt_napi *bnapi, u16 cp_cons,
601 u32 agg_bufs)
602{
603 struct bnxt *bp = bnapi->bp;
604 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
605 struct bnxt_rx_ring_info *rxr = &bnapi->rx_ring;
606 u16 prod = rxr->rx_agg_prod;
607 u16 sw_prod = rxr->rx_sw_agg_prod;
608 u32 i;
609
610 for (i = 0; i < agg_bufs; i++) {
611 u16 cons;
612 struct rx_agg_cmp *agg;
613 struct bnxt_sw_rx_agg_bd *cons_rx_buf, *prod_rx_buf;
614 struct rx_bd *prod_bd;
615 struct page *page;
616
617 agg = (struct rx_agg_cmp *)
618 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
619 cons = agg->rx_agg_cmp_opaque;
620 __clear_bit(cons, rxr->rx_agg_bmap);
621
622 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
623 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
624
625 __set_bit(sw_prod, rxr->rx_agg_bmap);
626 prod_rx_buf = &rxr->rx_agg_ring[sw_prod];
627 cons_rx_buf = &rxr->rx_agg_ring[cons];
628
629 /* It is possible for sw_prod to be equal to cons, so
630 * set cons_rx_buf->page to NULL first.
631 */
632 page = cons_rx_buf->page;
633 cons_rx_buf->page = NULL;
634 prod_rx_buf->page = page;
635
636 prod_rx_buf->mapping = cons_rx_buf->mapping;
637
638 prod_bd = &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
639
640 prod_bd->rx_bd_haddr = cpu_to_le64(cons_rx_buf->mapping);
641 prod_bd->rx_bd_opaque = sw_prod;
642
643 prod = NEXT_RX_AGG(prod);
644 sw_prod = NEXT_RX_AGG(sw_prod);
645 cp_cons = NEXT_CMP(cp_cons);
646 }
647 rxr->rx_agg_prod = prod;
648 rxr->rx_sw_agg_prod = sw_prod;
649}
650
651static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
652 struct bnxt_rx_ring_info *rxr, u16 cons,
653 u16 prod, u8 *data, dma_addr_t dma_addr,
654 unsigned int len)
655{
656 int err;
657 struct sk_buff *skb;
658
659 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
660 if (unlikely(err)) {
661 bnxt_reuse_rx_data(rxr, cons, data);
662 return NULL;
663 }
664
665 skb = build_skb(data, 0);
666 dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
667 PCI_DMA_FROMDEVICE);
668 if (!skb) {
669 kfree(data);
670 return NULL;
671 }
672
673 skb_reserve(skb, BNXT_RX_OFFSET);
674 skb_put(skb, len);
675 return skb;
676}
677
678static struct sk_buff *bnxt_rx_pages(struct bnxt *bp, struct bnxt_napi *bnapi,
679 struct sk_buff *skb, u16 cp_cons,
680 u32 agg_bufs)
681{
682 struct pci_dev *pdev = bp->pdev;
683 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
684 struct bnxt_rx_ring_info *rxr = &bnapi->rx_ring;
685 u16 prod = rxr->rx_agg_prod;
686 u32 i;
687
688 for (i = 0; i < agg_bufs; i++) {
689 u16 cons, frag_len;
690 struct rx_agg_cmp *agg;
691 struct bnxt_sw_rx_agg_bd *cons_rx_buf;
692 struct page *page;
693 dma_addr_t mapping;
694
695 agg = (struct rx_agg_cmp *)
696 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
697 cons = agg->rx_agg_cmp_opaque;
698 frag_len = (le32_to_cpu(agg->rx_agg_cmp_len_flags_type) &
699 RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT;
700
701 cons_rx_buf = &rxr->rx_agg_ring[cons];
702 skb_fill_page_desc(skb, i, cons_rx_buf->page, 0, frag_len);
703 __clear_bit(cons, rxr->rx_agg_bmap);
704
705 /* It is possible for bnxt_alloc_rx_page() to allocate
706 * a sw_prod index that equals the cons index, so we
707 * need to clear the cons entry now.
708 */
709 mapping = dma_unmap_addr(cons_rx_buf, mapping);
710 page = cons_rx_buf->page;
711 cons_rx_buf->page = NULL;
712
713 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_ATOMIC) != 0) {
714 struct skb_shared_info *shinfo;
715 unsigned int nr_frags;
716
717 shinfo = skb_shinfo(skb);
718 nr_frags = --shinfo->nr_frags;
719 __skb_frag_set_page(&shinfo->frags[nr_frags], NULL);
720
721 dev_kfree_skb(skb);
722
723 cons_rx_buf->page = page;
724
725 /* Update prod since possibly some pages have been
726 * allocated already.
727 */
728 rxr->rx_agg_prod = prod;
729 bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs - i);
730 return NULL;
731 }
732
733 dma_unmap_page(&pdev->dev, mapping, PAGE_SIZE,
734 PCI_DMA_FROMDEVICE);
735
736 skb->data_len += frag_len;
737 skb->len += frag_len;
738 skb->truesize += PAGE_SIZE;
739
740 prod = NEXT_RX_AGG(prod);
741 cp_cons = NEXT_CMP(cp_cons);
742 }
743 rxr->rx_agg_prod = prod;
744 return skb;
745}
746
747static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
748 u8 agg_bufs, u32 *raw_cons)
749{
750 u16 last;
751 struct rx_agg_cmp *agg;
752
753 *raw_cons = ADV_RAW_CMP(*raw_cons, agg_bufs);
754 last = RING_CMP(*raw_cons);
755 agg = (struct rx_agg_cmp *)
756 &cpr->cp_desc_ring[CP_RING(last)][CP_IDX(last)];
757 return RX_AGG_CMP_VALID(agg, *raw_cons);
758}
759
760static inline struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data,
761 unsigned int len,
762 dma_addr_t mapping)
763{
764 struct bnxt *bp = bnapi->bp;
765 struct pci_dev *pdev = bp->pdev;
766 struct sk_buff *skb;
767
768 skb = napi_alloc_skb(&bnapi->napi, len);
769 if (!skb)
770 return NULL;
771
772 dma_sync_single_for_cpu(&pdev->dev, mapping,
773 bp->rx_copy_thresh, PCI_DMA_FROMDEVICE);
774
775 memcpy(skb->data - BNXT_RX_OFFSET, data, len + BNXT_RX_OFFSET);
776
777 dma_sync_single_for_device(&pdev->dev, mapping,
778 bp->rx_copy_thresh,
779 PCI_DMA_FROMDEVICE);
780
781 skb_put(skb, len);
782 return skb;
783}
784
785static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
786 struct rx_tpa_start_cmp *tpa_start,
787 struct rx_tpa_start_cmp_ext *tpa_start1)
788{
789 u8 agg_id = TPA_START_AGG_ID(tpa_start);
790 u16 cons, prod;
791 struct bnxt_tpa_info *tpa_info;
792 struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
793 struct rx_bd *prod_bd;
794 dma_addr_t mapping;
795
796 cons = tpa_start->rx_tpa_start_cmp_opaque;
797 prod = rxr->rx_prod;
798 cons_rx_buf = &rxr->rx_buf_ring[cons];
799 prod_rx_buf = &rxr->rx_buf_ring[prod];
800 tpa_info = &rxr->rx_tpa[agg_id];
801
802 prod_rx_buf->data = tpa_info->data;
803
804 mapping = tpa_info->mapping;
805 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
806
807 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
808
809 prod_bd->rx_bd_haddr = cpu_to_le64(mapping);
810
811 tpa_info->data = cons_rx_buf->data;
812 cons_rx_buf->data = NULL;
813 tpa_info->mapping = dma_unmap_addr(cons_rx_buf, mapping);
814
815 tpa_info->len =
816 le32_to_cpu(tpa_start->rx_tpa_start_cmp_len_flags_type) >>
817 RX_TPA_START_CMP_LEN_SHIFT;
818 if (likely(TPA_START_HASH_VALID(tpa_start))) {
819 u32 hash_type = TPA_START_HASH_TYPE(tpa_start);
820
821 tpa_info->hash_type = PKT_HASH_TYPE_L4;
822 tpa_info->gso_type = SKB_GSO_TCPV4;
823 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
824 if (hash_type == 3)
825 tpa_info->gso_type = SKB_GSO_TCPV6;
826 tpa_info->rss_hash =
827 le32_to_cpu(tpa_start->rx_tpa_start_cmp_rss_hash);
828 } else {
829 tpa_info->hash_type = PKT_HASH_TYPE_NONE;
830 tpa_info->gso_type = 0;
831 if (netif_msg_rx_err(bp))
832 netdev_warn(bp->dev, "TPA packet without valid hash\n");
833 }
834 tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2);
835 tpa_info->metadata = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata);
836
837 rxr->rx_prod = NEXT_RX(prod);
838 cons = NEXT_RX(cons);
839 cons_rx_buf = &rxr->rx_buf_ring[cons];
840
841 bnxt_reuse_rx_data(rxr, cons, cons_rx_buf->data);
842 rxr->rx_prod = NEXT_RX(rxr->rx_prod);
843 cons_rx_buf->data = NULL;
844}
845
846static void bnxt_abort_tpa(struct bnxt *bp, struct bnxt_napi *bnapi,
847 u16 cp_cons, u32 agg_bufs)
848{
849 if (agg_bufs)
850 bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs);
851}
852
853#define BNXT_IPV4_HDR_SIZE (sizeof(struct iphdr) + sizeof(struct tcphdr))
854#define BNXT_IPV6_HDR_SIZE (sizeof(struct ipv6hdr) + sizeof(struct tcphdr))
855
856static inline struct sk_buff *bnxt_gro_skb(struct bnxt_tpa_info *tpa_info,
857 struct rx_tpa_end_cmp *tpa_end,
858 struct rx_tpa_end_cmp_ext *tpa_end1,
859 struct sk_buff *skb)
860{
d1611c3a 861#ifdef CONFIG_INET
c0c050c5
MC
862 struct tcphdr *th;
863 int payload_off, tcp_opt_len = 0;
864 int len, nw_off;
27e24189 865 u16 segs;
c0c050c5 866
27e24189
MC
867 segs = TPA_END_TPA_SEGS(tpa_end);
868 if (segs == 1)
869 return skb;
870
871 NAPI_GRO_CB(skb)->count = segs;
c0c050c5
MC
872 skb_shinfo(skb)->gso_size =
873 le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len);
874 skb_shinfo(skb)->gso_type = tpa_info->gso_type;
875 payload_off = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) &
876 RX_TPA_END_CMP_PAYLOAD_OFFSET) >>
877 RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT;
878 if (TPA_END_GRO_TS(tpa_end))
879 tcp_opt_len = 12;
880
c0c050c5
MC
881 if (tpa_info->gso_type == SKB_GSO_TCPV4) {
882 struct iphdr *iph;
883
884 nw_off = payload_off - BNXT_IPV4_HDR_SIZE - tcp_opt_len -
885 ETH_HLEN;
886 skb_set_network_header(skb, nw_off);
887 iph = ip_hdr(skb);
888 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
889 len = skb->len - skb_transport_offset(skb);
890 th = tcp_hdr(skb);
891 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
892 } else if (tpa_info->gso_type == SKB_GSO_TCPV6) {
893 struct ipv6hdr *iph;
894
895 nw_off = payload_off - BNXT_IPV6_HDR_SIZE - tcp_opt_len -
896 ETH_HLEN;
897 skb_set_network_header(skb, nw_off);
898 iph = ipv6_hdr(skb);
899 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
900 len = skb->len - skb_transport_offset(skb);
901 th = tcp_hdr(skb);
902 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
903 } else {
904 dev_kfree_skb_any(skb);
905 return NULL;
906 }
907 tcp_gro_complete(skb);
908
909 if (nw_off) { /* tunnel */
910 struct udphdr *uh = NULL;
911
912 if (skb->protocol == htons(ETH_P_IP)) {
913 struct iphdr *iph = (struct iphdr *)skb->data;
914
915 if (iph->protocol == IPPROTO_UDP)
916 uh = (struct udphdr *)(iph + 1);
917 } else {
918 struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
919
920 if (iph->nexthdr == IPPROTO_UDP)
921 uh = (struct udphdr *)(iph + 1);
922 }
923 if (uh) {
924 if (uh->check)
925 skb_shinfo(skb)->gso_type |=
926 SKB_GSO_UDP_TUNNEL_CSUM;
927 else
928 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
929 }
930 }
931#endif
932 return skb;
933}
934
935static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
936 struct bnxt_napi *bnapi,
937 u32 *raw_cons,
938 struct rx_tpa_end_cmp *tpa_end,
939 struct rx_tpa_end_cmp_ext *tpa_end1,
940 bool *agg_event)
941{
942 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
943 struct bnxt_rx_ring_info *rxr = &bnapi->rx_ring;
944 u8 agg_id = TPA_END_AGG_ID(tpa_end);
945 u8 *data, agg_bufs;
946 u16 cp_cons = RING_CMP(*raw_cons);
947 unsigned int len;
948 struct bnxt_tpa_info *tpa_info;
949 dma_addr_t mapping;
950 struct sk_buff *skb;
951
952 tpa_info = &rxr->rx_tpa[agg_id];
953 data = tpa_info->data;
954 prefetch(data);
955 len = tpa_info->len;
956 mapping = tpa_info->mapping;
957
958 agg_bufs = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) &
959 RX_TPA_END_CMP_AGG_BUFS) >> RX_TPA_END_CMP_AGG_BUFS_SHIFT;
960
961 if (agg_bufs) {
962 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons))
963 return ERR_PTR(-EBUSY);
964
965 *agg_event = true;
966 cp_cons = NEXT_CMP(cp_cons);
967 }
968
969 if (unlikely(agg_bufs > MAX_SKB_FRAGS)) {
970 bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
971 netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n",
972 agg_bufs, (int)MAX_SKB_FRAGS);
973 return NULL;
974 }
975
976 if (len <= bp->rx_copy_thresh) {
977 skb = bnxt_copy_skb(bnapi, data, len, mapping);
978 if (!skb) {
979 bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
980 return NULL;
981 }
982 } else {
983 u8 *new_data;
984 dma_addr_t new_mapping;
985
986 new_data = __bnxt_alloc_rx_data(bp, &new_mapping, GFP_ATOMIC);
987 if (!new_data) {
988 bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
989 return NULL;
990 }
991
992 tpa_info->data = new_data;
993 tpa_info->mapping = new_mapping;
994
995 skb = build_skb(data, 0);
996 dma_unmap_single(&bp->pdev->dev, mapping, bp->rx_buf_use_size,
997 PCI_DMA_FROMDEVICE);
998
999 if (!skb) {
1000 kfree(data);
1001 bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
1002 return NULL;
1003 }
1004 skb_reserve(skb, BNXT_RX_OFFSET);
1005 skb_put(skb, len);
1006 }
1007
1008 if (agg_bufs) {
1009 skb = bnxt_rx_pages(bp, bnapi, skb, cp_cons, agg_bufs);
1010 if (!skb) {
1011 /* Page reuse already handled by bnxt_rx_pages(). */
1012 return NULL;
1013 }
1014 }
1015 skb->protocol = eth_type_trans(skb, bp->dev);
1016
1017 if (tpa_info->hash_type != PKT_HASH_TYPE_NONE)
1018 skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type);
1019
1020 if (tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) {
1021 netdev_features_t features = skb->dev->features;
1022 u16 vlan_proto = tpa_info->metadata >>
1023 RX_CMP_FLAGS2_METADATA_TPID_SFT;
1024
1025 if (((features & NETIF_F_HW_VLAN_CTAG_RX) &&
1026 vlan_proto == ETH_P_8021Q) ||
1027 ((features & NETIF_F_HW_VLAN_STAG_RX) &&
1028 vlan_proto == ETH_P_8021AD)) {
1029 __vlan_hwaccel_put_tag(skb, htons(vlan_proto),
1030 tpa_info->metadata &
1031 RX_CMP_FLAGS2_METADATA_VID_MASK);
1032 }
1033 }
1034
1035 skb_checksum_none_assert(skb);
1036 if (likely(tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_L4_CS_CALC)) {
1037 skb->ip_summed = CHECKSUM_UNNECESSARY;
1038 skb->csum_level =
1039 (tpa_info->flags2 & RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3;
1040 }
1041
1042 if (TPA_END_GRO(tpa_end))
1043 skb = bnxt_gro_skb(tpa_info, tpa_end, tpa_end1, skb);
1044
1045 return skb;
1046}
1047
1048/* returns the following:
1049 * 1 - 1 packet successfully received
1050 * 0 - successful TPA_START, packet not completed yet
1051 * -EBUSY - completion ring does not have all the agg buffers yet
1052 * -ENOMEM - packet aborted due to out of memory
1053 * -EIO - packet aborted due to hw error indicated in BD
1054 */
1055static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,
1056 bool *agg_event)
1057{
1058 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
1059 struct bnxt_rx_ring_info *rxr = &bnapi->rx_ring;
1060 struct net_device *dev = bp->dev;
1061 struct rx_cmp *rxcmp;
1062 struct rx_cmp_ext *rxcmp1;
1063 u32 tmp_raw_cons = *raw_cons;
1064 u16 cons, prod, cp_cons = RING_CMP(tmp_raw_cons);
1065 struct bnxt_sw_rx_bd *rx_buf;
1066 unsigned int len;
1067 u8 *data, agg_bufs, cmp_type;
1068 dma_addr_t dma_addr;
1069 struct sk_buff *skb;
1070 int rc = 0;
1071
1072 rxcmp = (struct rx_cmp *)
1073 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1074
1075 tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
1076 cp_cons = RING_CMP(tmp_raw_cons);
1077 rxcmp1 = (struct rx_cmp_ext *)
1078 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1079
1080 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
1081 return -EBUSY;
1082
1083 cmp_type = RX_CMP_TYPE(rxcmp);
1084
1085 prod = rxr->rx_prod;
1086
1087 if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP) {
1088 bnxt_tpa_start(bp, rxr, (struct rx_tpa_start_cmp *)rxcmp,
1089 (struct rx_tpa_start_cmp_ext *)rxcmp1);
1090
1091 goto next_rx_no_prod;
1092
1093 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1094 skb = bnxt_tpa_end(bp, bnapi, &tmp_raw_cons,
1095 (struct rx_tpa_end_cmp *)rxcmp,
1096 (struct rx_tpa_end_cmp_ext *)rxcmp1,
1097 agg_event);
1098
1099 if (unlikely(IS_ERR(skb)))
1100 return -EBUSY;
1101
1102 rc = -ENOMEM;
1103 if (likely(skb)) {
1104 skb_record_rx_queue(skb, bnapi->index);
1105 skb_mark_napi_id(skb, &bnapi->napi);
1106 if (bnxt_busy_polling(bnapi))
1107 netif_receive_skb(skb);
1108 else
1109 napi_gro_receive(&bnapi->napi, skb);
1110 rc = 1;
1111 }
1112 goto next_rx_no_prod;
1113 }
1114
1115 cons = rxcmp->rx_cmp_opaque;
1116 rx_buf = &rxr->rx_buf_ring[cons];
1117 data = rx_buf->data;
1118 prefetch(data);
1119
1120 agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) & RX_CMP_AGG_BUFS) >>
1121 RX_CMP_AGG_BUFS_SHIFT;
1122
1123 if (agg_bufs) {
1124 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
1125 return -EBUSY;
1126
1127 cp_cons = NEXT_CMP(cp_cons);
1128 *agg_event = true;
1129 }
1130
1131 rx_buf->data = NULL;
1132 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) {
1133 bnxt_reuse_rx_data(rxr, cons, data);
1134 if (agg_bufs)
1135 bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs);
1136
1137 rc = -EIO;
1138 goto next_rx;
1139 }
1140
1141 len = le32_to_cpu(rxcmp->rx_cmp_len_flags_type) >> RX_CMP_LEN_SHIFT;
1142 dma_addr = dma_unmap_addr(rx_buf, mapping);
1143
1144 if (len <= bp->rx_copy_thresh) {
1145 skb = bnxt_copy_skb(bnapi, data, len, dma_addr);
1146 bnxt_reuse_rx_data(rxr, cons, data);
1147 if (!skb) {
1148 rc = -ENOMEM;
1149 goto next_rx;
1150 }
1151 } else {
1152 skb = bnxt_rx_skb(bp, rxr, cons, prod, data, dma_addr, len);
1153 if (!skb) {
1154 rc = -ENOMEM;
1155 goto next_rx;
1156 }
1157 }
1158
1159 if (agg_bufs) {
1160 skb = bnxt_rx_pages(bp, bnapi, skb, cp_cons, agg_bufs);
1161 if (!skb) {
1162 rc = -ENOMEM;
1163 goto next_rx;
1164 }
1165 }
1166
1167 if (RX_CMP_HASH_VALID(rxcmp)) {
1168 u32 hash_type = RX_CMP_HASH_TYPE(rxcmp);
1169 enum pkt_hash_types type = PKT_HASH_TYPE_L4;
1170
1171 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
1172 if (hash_type != 1 && hash_type != 3)
1173 type = PKT_HASH_TYPE_L3;
1174 skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type);
1175 }
1176
1177 skb->protocol = eth_type_trans(skb, dev);
1178
1179 if (rxcmp1->rx_cmp_flags2 &
1180 cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) {
1181 netdev_features_t features = skb->dev->features;
1182 u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data);
1183 u16 vlan_proto = meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT;
1184
1185 if (((features & NETIF_F_HW_VLAN_CTAG_RX) &&
1186 vlan_proto == ETH_P_8021Q) ||
1187 ((features & NETIF_F_HW_VLAN_STAG_RX) &&
1188 vlan_proto == ETH_P_8021AD))
1189 __vlan_hwaccel_put_tag(skb, htons(vlan_proto),
1190 meta_data &
1191 RX_CMP_FLAGS2_METADATA_VID_MASK);
1192 }
1193
1194 skb_checksum_none_assert(skb);
1195 if (RX_CMP_L4_CS_OK(rxcmp1)) {
1196 if (dev->features & NETIF_F_RXCSUM) {
1197 skb->ip_summed = CHECKSUM_UNNECESSARY;
1198 skb->csum_level = RX_CMP_ENCAP(rxcmp1);
1199 }
1200 } else {
665e350d
SB
1201 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) {
1202 if (dev->features & NETIF_F_RXCSUM)
1203 cpr->rx_l4_csum_errors++;
1204 }
c0c050c5
MC
1205 }
1206
1207 skb_record_rx_queue(skb, bnapi->index);
1208 skb_mark_napi_id(skb, &bnapi->napi);
1209 if (bnxt_busy_polling(bnapi))
1210 netif_receive_skb(skb);
1211 else
1212 napi_gro_receive(&bnapi->napi, skb);
1213 rc = 1;
1214
1215next_rx:
1216 rxr->rx_prod = NEXT_RX(prod);
1217
1218next_rx_no_prod:
1219 *raw_cons = tmp_raw_cons;
1220
1221 return rc;
1222}
1223
1224static int bnxt_async_event_process(struct bnxt *bp,
1225 struct hwrm_async_event_cmpl *cmpl)
1226{
1227 u16 event_id = le16_to_cpu(cmpl->event_id);
1228
1229 /* TODO CHIMP_FW: Define event id's for link change, error etc */
1230 switch (event_id) {
1231 case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
1232 set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event);
1233 schedule_work(&bp->sp_task);
1234 break;
1235 default:
1236 netdev_err(bp->dev, "unhandled ASYNC event (id 0x%x)\n",
1237 event_id);
1238 break;
1239 }
1240 return 0;
1241}
1242
1243static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp)
1244{
1245 u16 cmpl_type = TX_CMP_TYPE(txcmp), vf_id, seq_id;
1246 struct hwrm_cmpl *h_cmpl = (struct hwrm_cmpl *)txcmp;
1247 struct hwrm_fwd_req_cmpl *fwd_req_cmpl =
1248 (struct hwrm_fwd_req_cmpl *)txcmp;
1249
1250 switch (cmpl_type) {
1251 case CMPL_BASE_TYPE_HWRM_DONE:
1252 seq_id = le16_to_cpu(h_cmpl->sequence_id);
1253 if (seq_id == bp->hwrm_intr_seq_id)
1254 bp->hwrm_intr_seq_id = HWRM_SEQ_ID_INVALID;
1255 else
1256 netdev_err(bp->dev, "Invalid hwrm seq id %d\n", seq_id);
1257 break;
1258
1259 case CMPL_BASE_TYPE_HWRM_FWD_REQ:
1260 vf_id = le16_to_cpu(fwd_req_cmpl->source_id);
1261
1262 if ((vf_id < bp->pf.first_vf_id) ||
1263 (vf_id >= bp->pf.first_vf_id + bp->pf.active_vfs)) {
1264 netdev_err(bp->dev, "Msg contains invalid VF id %x\n",
1265 vf_id);
1266 return -EINVAL;
1267 }
1268
1269 set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap);
1270 set_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event);
1271 schedule_work(&bp->sp_task);
1272 break;
1273
1274 case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
1275 bnxt_async_event_process(bp,
1276 (struct hwrm_async_event_cmpl *)txcmp);
1277
1278 default:
1279 break;
1280 }
1281
1282 return 0;
1283}
1284
1285static irqreturn_t bnxt_msix(int irq, void *dev_instance)
1286{
1287 struct bnxt_napi *bnapi = dev_instance;
1288 struct bnxt *bp = bnapi->bp;
1289 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
1290 u32 cons = RING_CMP(cpr->cp_raw_cons);
1291
1292 prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
1293 napi_schedule(&bnapi->napi);
1294 return IRQ_HANDLED;
1295}
1296
1297static inline int bnxt_has_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
1298{
1299 u32 raw_cons = cpr->cp_raw_cons;
1300 u16 cons = RING_CMP(raw_cons);
1301 struct tx_cmp *txcmp;
1302
1303 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
1304
1305 return TX_CMP_VALID(txcmp, raw_cons);
1306}
1307
c0c050c5
MC
1308static irqreturn_t bnxt_inta(int irq, void *dev_instance)
1309{
1310 struct bnxt_napi *bnapi = dev_instance;
1311 struct bnxt *bp = bnapi->bp;
1312 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
1313 u32 cons = RING_CMP(cpr->cp_raw_cons);
1314 u32 int_status;
1315
1316 prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
1317
1318 if (!bnxt_has_work(bp, cpr)) {
11809490 1319 int_status = readl(bp->bar0 + BNXT_CAG_REG_LEGACY_INT_STATUS);
c0c050c5
MC
1320 /* return if erroneous interrupt */
1321 if (!(int_status & (0x10000 << cpr->cp_ring_struct.fw_ring_id)))
1322 return IRQ_NONE;
1323 }
1324
1325 /* disable ring IRQ */
1326 BNXT_CP_DB_IRQ_DIS(cpr->cp_doorbell);
1327
1328 /* Return here if interrupt is shared and is disabled. */
1329 if (unlikely(atomic_read(&bp->intr_sem) != 0))
1330 return IRQ_HANDLED;
1331
1332 napi_schedule(&bnapi->napi);
1333 return IRQ_HANDLED;
1334}
1335
1336static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
1337{
1338 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
1339 u32 raw_cons = cpr->cp_raw_cons;
1340 u32 cons;
1341 int tx_pkts = 0;
1342 int rx_pkts = 0;
1343 bool rx_event = false;
1344 bool agg_event = false;
1345 struct tx_cmp *txcmp;
1346
1347 while (1) {
1348 int rc;
1349
1350 cons = RING_CMP(raw_cons);
1351 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
1352
1353 if (!TX_CMP_VALID(txcmp, raw_cons))
1354 break;
1355
1356 if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) {
1357 tx_pkts++;
1358 /* return full budget so NAPI will complete. */
1359 if (unlikely(tx_pkts > bp->tx_wake_thresh))
1360 rx_pkts = budget;
1361 } else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
1362 rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &agg_event);
1363 if (likely(rc >= 0))
1364 rx_pkts += rc;
1365 else if (rc == -EBUSY) /* partial completion */
1366 break;
1367 rx_event = true;
1368 } else if (unlikely((TX_CMP_TYPE(txcmp) ==
1369 CMPL_BASE_TYPE_HWRM_DONE) ||
1370 (TX_CMP_TYPE(txcmp) ==
1371 CMPL_BASE_TYPE_HWRM_FWD_REQ) ||
1372 (TX_CMP_TYPE(txcmp) ==
1373 CMPL_BASE_TYPE_HWRM_ASYNC_EVENT))) {
1374 bnxt_hwrm_handler(bp, txcmp);
1375 }
1376 raw_cons = NEXT_RAW_CMP(raw_cons);
1377
1378 if (rx_pkts == budget)
1379 break;
1380 }
1381
1382 cpr->cp_raw_cons = raw_cons;
1383 /* ACK completion ring before freeing tx ring and producing new
1384 * buffers in rx/agg rings to prevent overflowing the completion
1385 * ring.
1386 */
1387 BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
1388
1389 if (tx_pkts)
1390 bnxt_tx_int(bp, bnapi, tx_pkts);
1391
1392 if (rx_event) {
1393 struct bnxt_rx_ring_info *rxr = &bnapi->rx_ring;
1394
1395 writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell);
1396 writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell);
1397 if (agg_event) {
1398 writel(DB_KEY_RX | rxr->rx_agg_prod,
1399 rxr->rx_agg_doorbell);
1400 writel(DB_KEY_RX | rxr->rx_agg_prod,
1401 rxr->rx_agg_doorbell);
1402 }
1403 }
1404 return rx_pkts;
1405}
1406
1407static int bnxt_poll(struct napi_struct *napi, int budget)
1408{
1409 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
1410 struct bnxt *bp = bnapi->bp;
1411 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
1412 int work_done = 0;
1413
1414 if (!bnxt_lock_napi(bnapi))
1415 return budget;
1416
1417 while (1) {
1418 work_done += bnxt_poll_work(bp, bnapi, budget - work_done);
1419
1420 if (work_done >= budget)
1421 break;
1422
1423 if (!bnxt_has_work(bp, cpr)) {
1424 napi_complete(napi);
1425 BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons);
1426 break;
1427 }
1428 }
1429 mmiowb();
1430 bnxt_unlock_napi(bnapi);
1431 return work_done;
1432}
1433
1434#ifdef CONFIG_NET_RX_BUSY_POLL
1435static int bnxt_busy_poll(struct napi_struct *napi)
1436{
1437 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
1438 struct bnxt *bp = bnapi->bp;
1439 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
1440 int rx_work, budget = 4;
1441
1442 if (atomic_read(&bp->intr_sem) != 0)
1443 return LL_FLUSH_FAILED;
1444
1445 if (!bnxt_lock_poll(bnapi))
1446 return LL_FLUSH_BUSY;
1447
1448 rx_work = bnxt_poll_work(bp, bnapi, budget);
1449
1450 BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons);
1451
1452 bnxt_unlock_poll(bnapi);
1453 return rx_work;
1454}
1455#endif
1456
1457static void bnxt_free_tx_skbs(struct bnxt *bp)
1458{
1459 int i, max_idx;
1460 struct pci_dev *pdev = bp->pdev;
1461
1462 if (!bp->bnapi)
1463 return;
1464
1465 max_idx = bp->tx_nr_pages * TX_DESC_CNT;
1466 for (i = 0; i < bp->tx_nr_rings; i++) {
1467 struct bnxt_napi *bnapi = bp->bnapi[i];
1468 struct bnxt_tx_ring_info *txr;
1469 int j;
1470
1471 if (!bnapi)
1472 continue;
1473
1474 txr = &bnapi->tx_ring;
1475 for (j = 0; j < max_idx;) {
1476 struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
1477 struct sk_buff *skb = tx_buf->skb;
1478 int k, last;
1479
1480 if (!skb) {
1481 j++;
1482 continue;
1483 }
1484
1485 tx_buf->skb = NULL;
1486
1487 if (tx_buf->is_push) {
1488 dev_kfree_skb(skb);
1489 j += 2;
1490 continue;
1491 }
1492
1493 dma_unmap_single(&pdev->dev,
1494 dma_unmap_addr(tx_buf, mapping),
1495 skb_headlen(skb),
1496 PCI_DMA_TODEVICE);
1497
1498 last = tx_buf->nr_frags;
1499 j += 2;
1500 for (k = 0; k < last; k++, j = NEXT_TX(j)) {
1501 skb_frag_t *frag = &skb_shinfo(skb)->frags[k];
1502
1503 tx_buf = &txr->tx_buf_ring[j];
1504 dma_unmap_page(
1505 &pdev->dev,
1506 dma_unmap_addr(tx_buf, mapping),
1507 skb_frag_size(frag), PCI_DMA_TODEVICE);
1508 }
1509 dev_kfree_skb(skb);
1510 }
1511 netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
1512 }
1513}
1514
1515static void bnxt_free_rx_skbs(struct bnxt *bp)
1516{
1517 int i, max_idx, max_agg_idx;
1518 struct pci_dev *pdev = bp->pdev;
1519
1520 if (!bp->bnapi)
1521 return;
1522
1523 max_idx = bp->rx_nr_pages * RX_DESC_CNT;
1524 max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT;
1525 for (i = 0; i < bp->rx_nr_rings; i++) {
1526 struct bnxt_napi *bnapi = bp->bnapi[i];
1527 struct bnxt_rx_ring_info *rxr;
1528 int j;
1529
1530 if (!bnapi)
1531 continue;
1532
1533 rxr = &bnapi->rx_ring;
1534
1535 if (rxr->rx_tpa) {
1536 for (j = 0; j < MAX_TPA; j++) {
1537 struct bnxt_tpa_info *tpa_info =
1538 &rxr->rx_tpa[j];
1539 u8 *data = tpa_info->data;
1540
1541 if (!data)
1542 continue;
1543
1544 dma_unmap_single(
1545 &pdev->dev,
1546 dma_unmap_addr(tpa_info, mapping),
1547 bp->rx_buf_use_size,
1548 PCI_DMA_FROMDEVICE);
1549
1550 tpa_info->data = NULL;
1551
1552 kfree(data);
1553 }
1554 }
1555
1556 for (j = 0; j < max_idx; j++) {
1557 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[j];
1558 u8 *data = rx_buf->data;
1559
1560 if (!data)
1561 continue;
1562
1563 dma_unmap_single(&pdev->dev,
1564 dma_unmap_addr(rx_buf, mapping),
1565 bp->rx_buf_use_size,
1566 PCI_DMA_FROMDEVICE);
1567
1568 rx_buf->data = NULL;
1569
1570 kfree(data);
1571 }
1572
1573 for (j = 0; j < max_agg_idx; j++) {
1574 struct bnxt_sw_rx_agg_bd *rx_agg_buf =
1575 &rxr->rx_agg_ring[j];
1576 struct page *page = rx_agg_buf->page;
1577
1578 if (!page)
1579 continue;
1580
1581 dma_unmap_page(&pdev->dev,
1582 dma_unmap_addr(rx_agg_buf, mapping),
1583 PAGE_SIZE, PCI_DMA_FROMDEVICE);
1584
1585 rx_agg_buf->page = NULL;
1586 __clear_bit(j, rxr->rx_agg_bmap);
1587
1588 __free_page(page);
1589 }
1590 }
1591}
1592
1593static void bnxt_free_skbs(struct bnxt *bp)
1594{
1595 bnxt_free_tx_skbs(bp);
1596 bnxt_free_rx_skbs(bp);
1597}
1598
1599static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_struct *ring)
1600{
1601 struct pci_dev *pdev = bp->pdev;
1602 int i;
1603
1604 for (i = 0; i < ring->nr_pages; i++) {
1605 if (!ring->pg_arr[i])
1606 continue;
1607
1608 dma_free_coherent(&pdev->dev, ring->page_size,
1609 ring->pg_arr[i], ring->dma_arr[i]);
1610
1611 ring->pg_arr[i] = NULL;
1612 }
1613 if (ring->pg_tbl) {
1614 dma_free_coherent(&pdev->dev, ring->nr_pages * 8,
1615 ring->pg_tbl, ring->pg_tbl_map);
1616 ring->pg_tbl = NULL;
1617 }
1618 if (ring->vmem_size && *ring->vmem) {
1619 vfree(*ring->vmem);
1620 *ring->vmem = NULL;
1621 }
1622}
1623
1624static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_struct *ring)
1625{
1626 int i;
1627 struct pci_dev *pdev = bp->pdev;
1628
1629 if (ring->nr_pages > 1) {
1630 ring->pg_tbl = dma_alloc_coherent(&pdev->dev,
1631 ring->nr_pages * 8,
1632 &ring->pg_tbl_map,
1633 GFP_KERNEL);
1634 if (!ring->pg_tbl)
1635 return -ENOMEM;
1636 }
1637
1638 for (i = 0; i < ring->nr_pages; i++) {
1639 ring->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
1640 ring->page_size,
1641 &ring->dma_arr[i],
1642 GFP_KERNEL);
1643 if (!ring->pg_arr[i])
1644 return -ENOMEM;
1645
1646 if (ring->nr_pages > 1)
1647 ring->pg_tbl[i] = cpu_to_le64(ring->dma_arr[i]);
1648 }
1649
1650 if (ring->vmem_size) {
1651 *ring->vmem = vzalloc(ring->vmem_size);
1652 if (!(*ring->vmem))
1653 return -ENOMEM;
1654 }
1655 return 0;
1656}
1657
1658static void bnxt_free_rx_rings(struct bnxt *bp)
1659{
1660 int i;
1661
1662 if (!bp->bnapi)
1663 return;
1664
1665 for (i = 0; i < bp->rx_nr_rings; i++) {
1666 struct bnxt_napi *bnapi = bp->bnapi[i];
1667 struct bnxt_rx_ring_info *rxr;
1668 struct bnxt_ring_struct *ring;
1669
1670 if (!bnapi)
1671 continue;
1672
1673 rxr = &bnapi->rx_ring;
1674
1675 kfree(rxr->rx_tpa);
1676 rxr->rx_tpa = NULL;
1677
1678 kfree(rxr->rx_agg_bmap);
1679 rxr->rx_agg_bmap = NULL;
1680
1681 ring = &rxr->rx_ring_struct;
1682 bnxt_free_ring(bp, ring);
1683
1684 ring = &rxr->rx_agg_ring_struct;
1685 bnxt_free_ring(bp, ring);
1686 }
1687}
1688
1689static int bnxt_alloc_rx_rings(struct bnxt *bp)
1690{
1691 int i, rc, agg_rings = 0, tpa_rings = 0;
1692
1693 if (bp->flags & BNXT_FLAG_AGG_RINGS)
1694 agg_rings = 1;
1695
1696 if (bp->flags & BNXT_FLAG_TPA)
1697 tpa_rings = 1;
1698
1699 for (i = 0; i < bp->rx_nr_rings; i++) {
1700 struct bnxt_napi *bnapi = bp->bnapi[i];
1701 struct bnxt_rx_ring_info *rxr;
1702 struct bnxt_ring_struct *ring;
1703
1704 if (!bnapi)
1705 continue;
1706
1707 rxr = &bnapi->rx_ring;
1708 ring = &rxr->rx_ring_struct;
1709
1710 rc = bnxt_alloc_ring(bp, ring);
1711 if (rc)
1712 return rc;
1713
1714 if (agg_rings) {
1715 u16 mem_size;
1716
1717 ring = &rxr->rx_agg_ring_struct;
1718 rc = bnxt_alloc_ring(bp, ring);
1719 if (rc)
1720 return rc;
1721
1722 rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1;
1723 mem_size = rxr->rx_agg_bmap_size / 8;
1724 rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL);
1725 if (!rxr->rx_agg_bmap)
1726 return -ENOMEM;
1727
1728 if (tpa_rings) {
1729 rxr->rx_tpa = kcalloc(MAX_TPA,
1730 sizeof(struct bnxt_tpa_info),
1731 GFP_KERNEL);
1732 if (!rxr->rx_tpa)
1733 return -ENOMEM;
1734 }
1735 }
1736 }
1737 return 0;
1738}
1739
1740static void bnxt_free_tx_rings(struct bnxt *bp)
1741{
1742 int i;
1743 struct pci_dev *pdev = bp->pdev;
1744
1745 if (!bp->bnapi)
1746 return;
1747
1748 for (i = 0; i < bp->tx_nr_rings; i++) {
1749 struct bnxt_napi *bnapi = bp->bnapi[i];
1750 struct bnxt_tx_ring_info *txr;
1751 struct bnxt_ring_struct *ring;
1752
1753 if (!bnapi)
1754 continue;
1755
1756 txr = &bnapi->tx_ring;
1757
1758 if (txr->tx_push) {
1759 dma_free_coherent(&pdev->dev, bp->tx_push_size,
1760 txr->tx_push, txr->tx_push_mapping);
1761 txr->tx_push = NULL;
1762 }
1763
1764 ring = &txr->tx_ring_struct;
1765
1766 bnxt_free_ring(bp, ring);
1767 }
1768}
1769
1770static int bnxt_alloc_tx_rings(struct bnxt *bp)
1771{
1772 int i, j, rc;
1773 struct pci_dev *pdev = bp->pdev;
1774
1775 bp->tx_push_size = 0;
1776 if (bp->tx_push_thresh) {
1777 int push_size;
1778
1779 push_size = L1_CACHE_ALIGN(sizeof(struct tx_push_bd) +
1780 bp->tx_push_thresh);
1781
1782 if (push_size > 128) {
1783 push_size = 0;
1784 bp->tx_push_thresh = 0;
1785 }
1786
1787 bp->tx_push_size = push_size;
1788 }
1789
1790 for (i = 0, j = 0; i < bp->tx_nr_rings; i++) {
1791 struct bnxt_napi *bnapi = bp->bnapi[i];
1792 struct bnxt_tx_ring_info *txr;
1793 struct bnxt_ring_struct *ring;
1794
1795 if (!bnapi)
1796 continue;
1797
1798 txr = &bnapi->tx_ring;
1799 ring = &txr->tx_ring_struct;
1800
1801 rc = bnxt_alloc_ring(bp, ring);
1802 if (rc)
1803 return rc;
1804
1805 if (bp->tx_push_size) {
1806 struct tx_bd *txbd;
1807 dma_addr_t mapping;
1808
1809 /* One pre-allocated DMA buffer to backup
1810 * TX push operation
1811 */
1812 txr->tx_push = dma_alloc_coherent(&pdev->dev,
1813 bp->tx_push_size,
1814 &txr->tx_push_mapping,
1815 GFP_KERNEL);
1816
1817 if (!txr->tx_push)
1818 return -ENOMEM;
1819
1820 txbd = &txr->tx_push->txbd1;
1821
1822 mapping = txr->tx_push_mapping +
1823 sizeof(struct tx_push_bd);
1824 txbd->tx_bd_haddr = cpu_to_le64(mapping);
1825
1826 memset(txbd + 1, 0, sizeof(struct tx_bd_ext));
1827 }
1828 ring->queue_id = bp->q_info[j].queue_id;
1829 if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1))
1830 j++;
1831 }
1832 return 0;
1833}
1834
1835static void bnxt_free_cp_rings(struct bnxt *bp)
1836{
1837 int i;
1838
1839 if (!bp->bnapi)
1840 return;
1841
1842 for (i = 0; i < bp->cp_nr_rings; i++) {
1843 struct bnxt_napi *bnapi = bp->bnapi[i];
1844 struct bnxt_cp_ring_info *cpr;
1845 struct bnxt_ring_struct *ring;
1846
1847 if (!bnapi)
1848 continue;
1849
1850 cpr = &bnapi->cp_ring;
1851 ring = &cpr->cp_ring_struct;
1852
1853 bnxt_free_ring(bp, ring);
1854 }
1855}
1856
1857static int bnxt_alloc_cp_rings(struct bnxt *bp)
1858{
1859 int i, rc;
1860
1861 for (i = 0; i < bp->cp_nr_rings; i++) {
1862 struct bnxt_napi *bnapi = bp->bnapi[i];
1863 struct bnxt_cp_ring_info *cpr;
1864 struct bnxt_ring_struct *ring;
1865
1866 if (!bnapi)
1867 continue;
1868
1869 cpr = &bnapi->cp_ring;
1870 ring = &cpr->cp_ring_struct;
1871
1872 rc = bnxt_alloc_ring(bp, ring);
1873 if (rc)
1874 return rc;
1875 }
1876 return 0;
1877}
1878
1879static void bnxt_init_ring_struct(struct bnxt *bp)
1880{
1881 int i;
1882
1883 for (i = 0; i < bp->cp_nr_rings; i++) {
1884 struct bnxt_napi *bnapi = bp->bnapi[i];
1885 struct bnxt_cp_ring_info *cpr;
1886 struct bnxt_rx_ring_info *rxr;
1887 struct bnxt_tx_ring_info *txr;
1888 struct bnxt_ring_struct *ring;
1889
1890 if (!bnapi)
1891 continue;
1892
1893 cpr = &bnapi->cp_ring;
1894 ring = &cpr->cp_ring_struct;
1895 ring->nr_pages = bp->cp_nr_pages;
1896 ring->page_size = HW_CMPD_RING_SIZE;
1897 ring->pg_arr = (void **)cpr->cp_desc_ring;
1898 ring->dma_arr = cpr->cp_desc_mapping;
1899 ring->vmem_size = 0;
1900
1901 rxr = &bnapi->rx_ring;
1902 ring = &rxr->rx_ring_struct;
1903 ring->nr_pages = bp->rx_nr_pages;
1904 ring->page_size = HW_RXBD_RING_SIZE;
1905 ring->pg_arr = (void **)rxr->rx_desc_ring;
1906 ring->dma_arr = rxr->rx_desc_mapping;
1907 ring->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages;
1908 ring->vmem = (void **)&rxr->rx_buf_ring;
1909
1910 ring = &rxr->rx_agg_ring_struct;
1911 ring->nr_pages = bp->rx_agg_nr_pages;
1912 ring->page_size = HW_RXBD_RING_SIZE;
1913 ring->pg_arr = (void **)rxr->rx_agg_desc_ring;
1914 ring->dma_arr = rxr->rx_agg_desc_mapping;
1915 ring->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages;
1916 ring->vmem = (void **)&rxr->rx_agg_ring;
1917
1918 txr = &bnapi->tx_ring;
1919 ring = &txr->tx_ring_struct;
1920 ring->nr_pages = bp->tx_nr_pages;
1921 ring->page_size = HW_RXBD_RING_SIZE;
1922 ring->pg_arr = (void **)txr->tx_desc_ring;
1923 ring->dma_arr = txr->tx_desc_mapping;
1924 ring->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages;
1925 ring->vmem = (void **)&txr->tx_buf_ring;
1926 }
1927}
1928
1929static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type)
1930{
1931 int i;
1932 u32 prod;
1933 struct rx_bd **rx_buf_ring;
1934
1935 rx_buf_ring = (struct rx_bd **)ring->pg_arr;
1936 for (i = 0, prod = 0; i < ring->nr_pages; i++) {
1937 int j;
1938 struct rx_bd *rxbd;
1939
1940 rxbd = rx_buf_ring[i];
1941 if (!rxbd)
1942 continue;
1943
1944 for (j = 0; j < RX_DESC_CNT; j++, rxbd++, prod++) {
1945 rxbd->rx_bd_len_flags_type = cpu_to_le32(type);
1946 rxbd->rx_bd_opaque = prod;
1947 }
1948 }
1949}
1950
1951static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
1952{
1953 struct net_device *dev = bp->dev;
1954 struct bnxt_napi *bnapi = bp->bnapi[ring_nr];
1955 struct bnxt_rx_ring_info *rxr;
1956 struct bnxt_ring_struct *ring;
1957 u32 prod, type;
1958 int i;
1959
1960 if (!bnapi)
1961 return -EINVAL;
1962
1963 type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) |
1964 RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP;
1965
1966 if (NET_IP_ALIGN == 2)
1967 type |= RX_BD_FLAGS_SOP;
1968
1969 rxr = &bnapi->rx_ring;
1970 ring = &rxr->rx_ring_struct;
1971 bnxt_init_rxbd_pages(ring, type);
1972
1973 prod = rxr->rx_prod;
1974 for (i = 0; i < bp->rx_ring_size; i++) {
1975 if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL) != 0) {
1976 netdev_warn(dev, "init'ed rx ring %d with %d/%d skbs only\n",
1977 ring_nr, i, bp->rx_ring_size);
1978 break;
1979 }
1980 prod = NEXT_RX(prod);
1981 }
1982 rxr->rx_prod = prod;
1983 ring->fw_ring_id = INVALID_HW_RING_ID;
1984
edd0c2cc
MC
1985 ring = &rxr->rx_agg_ring_struct;
1986 ring->fw_ring_id = INVALID_HW_RING_ID;
1987
c0c050c5
MC
1988 if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
1989 return 0;
1990
c0c050c5
MC
1991 type = ((u32)PAGE_SIZE << RX_BD_LEN_SHIFT) |
1992 RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP;
1993
1994 bnxt_init_rxbd_pages(ring, type);
1995
1996 prod = rxr->rx_agg_prod;
1997 for (i = 0; i < bp->rx_agg_ring_size; i++) {
1998 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL) != 0) {
1999 netdev_warn(dev, "init'ed rx ring %d with %d/%d pages only\n",
2000 ring_nr, i, bp->rx_ring_size);
2001 break;
2002 }
2003 prod = NEXT_RX_AGG(prod);
2004 }
2005 rxr->rx_agg_prod = prod;
c0c050c5
MC
2006
2007 if (bp->flags & BNXT_FLAG_TPA) {
2008 if (rxr->rx_tpa) {
2009 u8 *data;
2010 dma_addr_t mapping;
2011
2012 for (i = 0; i < MAX_TPA; i++) {
2013 data = __bnxt_alloc_rx_data(bp, &mapping,
2014 GFP_KERNEL);
2015 if (!data)
2016 return -ENOMEM;
2017
2018 rxr->rx_tpa[i].data = data;
2019 rxr->rx_tpa[i].mapping = mapping;
2020 }
2021 } else {
2022 netdev_err(bp->dev, "No resource allocated for LRO/GRO\n");
2023 return -ENOMEM;
2024 }
2025 }
2026
2027 return 0;
2028}
2029
2030static int bnxt_init_rx_rings(struct bnxt *bp)
2031{
2032 int i, rc = 0;
2033
2034 for (i = 0; i < bp->rx_nr_rings; i++) {
2035 rc = bnxt_init_one_rx_ring(bp, i);
2036 if (rc)
2037 break;
2038 }
2039
2040 return rc;
2041}
2042
2043static int bnxt_init_tx_rings(struct bnxt *bp)
2044{
2045 u16 i;
2046
2047 bp->tx_wake_thresh = max_t(int, bp->tx_ring_size / 2,
2048 MAX_SKB_FRAGS + 1);
2049
2050 for (i = 0; i < bp->tx_nr_rings; i++) {
2051 struct bnxt_napi *bnapi = bp->bnapi[i];
2052 struct bnxt_tx_ring_info *txr = &bnapi->tx_ring;
2053 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
2054
2055 ring->fw_ring_id = INVALID_HW_RING_ID;
2056 }
2057
2058 return 0;
2059}
2060
2061static void bnxt_free_ring_grps(struct bnxt *bp)
2062{
2063 kfree(bp->grp_info);
2064 bp->grp_info = NULL;
2065}
2066
2067static int bnxt_init_ring_grps(struct bnxt *bp, bool irq_re_init)
2068{
2069 int i;
2070
2071 if (irq_re_init) {
2072 bp->grp_info = kcalloc(bp->cp_nr_rings,
2073 sizeof(struct bnxt_ring_grp_info),
2074 GFP_KERNEL);
2075 if (!bp->grp_info)
2076 return -ENOMEM;
2077 }
2078 for (i = 0; i < bp->cp_nr_rings; i++) {
2079 if (irq_re_init)
2080 bp->grp_info[i].fw_stats_ctx = INVALID_HW_RING_ID;
2081 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
2082 bp->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID;
2083 bp->grp_info[i].agg_fw_ring_id = INVALID_HW_RING_ID;
2084 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
2085 }
2086 return 0;
2087}
2088
2089static void bnxt_free_vnics(struct bnxt *bp)
2090{
2091 kfree(bp->vnic_info);
2092 bp->vnic_info = NULL;
2093 bp->nr_vnics = 0;
2094}
2095
2096static int bnxt_alloc_vnics(struct bnxt *bp)
2097{
2098 int num_vnics = 1;
2099
2100#ifdef CONFIG_RFS_ACCEL
2101 if (bp->flags & BNXT_FLAG_RFS)
2102 num_vnics += bp->rx_nr_rings;
2103#endif
2104
2105 bp->vnic_info = kcalloc(num_vnics, sizeof(struct bnxt_vnic_info),
2106 GFP_KERNEL);
2107 if (!bp->vnic_info)
2108 return -ENOMEM;
2109
2110 bp->nr_vnics = num_vnics;
2111 return 0;
2112}
2113
2114static void bnxt_init_vnics(struct bnxt *bp)
2115{
2116 int i;
2117
2118 for (i = 0; i < bp->nr_vnics; i++) {
2119 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
2120
2121 vnic->fw_vnic_id = INVALID_HW_RING_ID;
2122 vnic->fw_rss_cos_lb_ctx = INVALID_HW_RING_ID;
2123 vnic->fw_l2_ctx_id = INVALID_HW_RING_ID;
2124
2125 if (bp->vnic_info[i].rss_hash_key) {
2126 if (i == 0)
2127 prandom_bytes(vnic->rss_hash_key,
2128 HW_HASH_KEY_SIZE);
2129 else
2130 memcpy(vnic->rss_hash_key,
2131 bp->vnic_info[0].rss_hash_key,
2132 HW_HASH_KEY_SIZE);
2133 }
2134 }
2135}
2136
2137static int bnxt_calc_nr_ring_pages(u32 ring_size, int desc_per_pg)
2138{
2139 int pages;
2140
2141 pages = ring_size / desc_per_pg;
2142
2143 if (!pages)
2144 return 1;
2145
2146 pages++;
2147
2148 while (pages & (pages - 1))
2149 pages++;
2150
2151 return pages;
2152}
2153
2154static void bnxt_set_tpa_flags(struct bnxt *bp)
2155{
2156 bp->flags &= ~BNXT_FLAG_TPA;
2157 if (bp->dev->features & NETIF_F_LRO)
2158 bp->flags |= BNXT_FLAG_LRO;
2159 if ((bp->dev->features & NETIF_F_GRO) && (bp->pdev->revision > 0))
2160 bp->flags |= BNXT_FLAG_GRO;
2161}
2162
2163/* bp->rx_ring_size, bp->tx_ring_size, dev->mtu, BNXT_FLAG_{G|L}RO flags must
2164 * be set on entry.
2165 */
2166void bnxt_set_ring_params(struct bnxt *bp)
2167{
2168 u32 ring_size, rx_size, rx_space;
2169 u32 agg_factor = 0, agg_ring_size = 0;
2170
2171 /* 8 for CRC and VLAN */
2172 rx_size = SKB_DATA_ALIGN(bp->dev->mtu + ETH_HLEN + NET_IP_ALIGN + 8);
2173
2174 rx_space = rx_size + NET_SKB_PAD +
2175 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2176
2177 bp->rx_copy_thresh = BNXT_RX_COPY_THRESH;
2178 ring_size = bp->rx_ring_size;
2179 bp->rx_agg_ring_size = 0;
2180 bp->rx_agg_nr_pages = 0;
2181
2182 if (bp->flags & BNXT_FLAG_TPA)
2183 agg_factor = 4;
2184
2185 bp->flags &= ~BNXT_FLAG_JUMBO;
2186 if (rx_space > PAGE_SIZE) {
2187 u32 jumbo_factor;
2188
2189 bp->flags |= BNXT_FLAG_JUMBO;
2190 jumbo_factor = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
2191 if (jumbo_factor > agg_factor)
2192 agg_factor = jumbo_factor;
2193 }
2194 agg_ring_size = ring_size * agg_factor;
2195
2196 if (agg_ring_size) {
2197 bp->rx_agg_nr_pages = bnxt_calc_nr_ring_pages(agg_ring_size,
2198 RX_DESC_CNT);
2199 if (bp->rx_agg_nr_pages > MAX_RX_AGG_PAGES) {
2200 u32 tmp = agg_ring_size;
2201
2202 bp->rx_agg_nr_pages = MAX_RX_AGG_PAGES;
2203 agg_ring_size = MAX_RX_AGG_PAGES * RX_DESC_CNT - 1;
2204 netdev_warn(bp->dev, "rx agg ring size %d reduced to %d.\n",
2205 tmp, agg_ring_size);
2206 }
2207 bp->rx_agg_ring_size = agg_ring_size;
2208 bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1;
2209 rx_size = SKB_DATA_ALIGN(BNXT_RX_COPY_THRESH + NET_IP_ALIGN);
2210 rx_space = rx_size + NET_SKB_PAD +
2211 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2212 }
2213
2214 bp->rx_buf_use_size = rx_size;
2215 bp->rx_buf_size = rx_space;
2216
2217 bp->rx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, RX_DESC_CNT);
2218 bp->rx_ring_mask = (bp->rx_nr_pages * RX_DESC_CNT) - 1;
2219
2220 ring_size = bp->tx_ring_size;
2221 bp->tx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, TX_DESC_CNT);
2222 bp->tx_ring_mask = (bp->tx_nr_pages * TX_DESC_CNT) - 1;
2223
2224 ring_size = bp->rx_ring_size * (2 + agg_factor) + bp->tx_ring_size;
2225 bp->cp_ring_size = ring_size;
2226
2227 bp->cp_nr_pages = bnxt_calc_nr_ring_pages(ring_size, CP_DESC_CNT);
2228 if (bp->cp_nr_pages > MAX_CP_PAGES) {
2229 bp->cp_nr_pages = MAX_CP_PAGES;
2230 bp->cp_ring_size = MAX_CP_PAGES * CP_DESC_CNT - 1;
2231 netdev_warn(bp->dev, "completion ring size %d reduced to %d.\n",
2232 ring_size, bp->cp_ring_size);
2233 }
2234 bp->cp_bit = bp->cp_nr_pages * CP_DESC_CNT;
2235 bp->cp_ring_mask = bp->cp_bit - 1;
2236}
2237
2238static void bnxt_free_vnic_attributes(struct bnxt *bp)
2239{
2240 int i;
2241 struct bnxt_vnic_info *vnic;
2242 struct pci_dev *pdev = bp->pdev;
2243
2244 if (!bp->vnic_info)
2245 return;
2246
2247 for (i = 0; i < bp->nr_vnics; i++) {
2248 vnic = &bp->vnic_info[i];
2249
2250 kfree(vnic->fw_grp_ids);
2251 vnic->fw_grp_ids = NULL;
2252
2253 kfree(vnic->uc_list);
2254 vnic->uc_list = NULL;
2255
2256 if (vnic->mc_list) {
2257 dma_free_coherent(&pdev->dev, vnic->mc_list_size,
2258 vnic->mc_list, vnic->mc_list_mapping);
2259 vnic->mc_list = NULL;
2260 }
2261
2262 if (vnic->rss_table) {
2263 dma_free_coherent(&pdev->dev, PAGE_SIZE,
2264 vnic->rss_table,
2265 vnic->rss_table_dma_addr);
2266 vnic->rss_table = NULL;
2267 }
2268
2269 vnic->rss_hash_key = NULL;
2270 vnic->flags = 0;
2271 }
2272}
2273
2274static int bnxt_alloc_vnic_attributes(struct bnxt *bp)
2275{
2276 int i, rc = 0, size;
2277 struct bnxt_vnic_info *vnic;
2278 struct pci_dev *pdev = bp->pdev;
2279 int max_rings;
2280
2281 for (i = 0; i < bp->nr_vnics; i++) {
2282 vnic = &bp->vnic_info[i];
2283
2284 if (vnic->flags & BNXT_VNIC_UCAST_FLAG) {
2285 int mem_size = (BNXT_MAX_UC_ADDRS - 1) * ETH_ALEN;
2286
2287 if (mem_size > 0) {
2288 vnic->uc_list = kmalloc(mem_size, GFP_KERNEL);
2289 if (!vnic->uc_list) {
2290 rc = -ENOMEM;
2291 goto out;
2292 }
2293 }
2294 }
2295
2296 if (vnic->flags & BNXT_VNIC_MCAST_FLAG) {
2297 vnic->mc_list_size = BNXT_MAX_MC_ADDRS * ETH_ALEN;
2298 vnic->mc_list =
2299 dma_alloc_coherent(&pdev->dev,
2300 vnic->mc_list_size,
2301 &vnic->mc_list_mapping,
2302 GFP_KERNEL);
2303 if (!vnic->mc_list) {
2304 rc = -ENOMEM;
2305 goto out;
2306 }
2307 }
2308
2309 if (vnic->flags & BNXT_VNIC_RSS_FLAG)
2310 max_rings = bp->rx_nr_rings;
2311 else
2312 max_rings = 1;
2313
2314 vnic->fw_grp_ids = kcalloc(max_rings, sizeof(u16), GFP_KERNEL);
2315 if (!vnic->fw_grp_ids) {
2316 rc = -ENOMEM;
2317 goto out;
2318 }
2319
2320 /* Allocate rss table and hash key */
2321 vnic->rss_table = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
2322 &vnic->rss_table_dma_addr,
2323 GFP_KERNEL);
2324 if (!vnic->rss_table) {
2325 rc = -ENOMEM;
2326 goto out;
2327 }
2328
2329 size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16));
2330
2331 vnic->rss_hash_key = ((void *)vnic->rss_table) + size;
2332 vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size;
2333 }
2334 return 0;
2335
2336out:
2337 return rc;
2338}
2339
2340static void bnxt_free_hwrm_resources(struct bnxt *bp)
2341{
2342 struct pci_dev *pdev = bp->pdev;
2343
2344 dma_free_coherent(&pdev->dev, PAGE_SIZE, bp->hwrm_cmd_resp_addr,
2345 bp->hwrm_cmd_resp_dma_addr);
2346
2347 bp->hwrm_cmd_resp_addr = NULL;
2348 if (bp->hwrm_dbg_resp_addr) {
2349 dma_free_coherent(&pdev->dev, HWRM_DBG_REG_BUF_SIZE,
2350 bp->hwrm_dbg_resp_addr,
2351 bp->hwrm_dbg_resp_dma_addr);
2352
2353 bp->hwrm_dbg_resp_addr = NULL;
2354 }
2355}
2356
2357static int bnxt_alloc_hwrm_resources(struct bnxt *bp)
2358{
2359 struct pci_dev *pdev = bp->pdev;
2360
2361 bp->hwrm_cmd_resp_addr = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
2362 &bp->hwrm_cmd_resp_dma_addr,
2363 GFP_KERNEL);
2364 if (!bp->hwrm_cmd_resp_addr)
2365 return -ENOMEM;
2366 bp->hwrm_dbg_resp_addr = dma_alloc_coherent(&pdev->dev,
2367 HWRM_DBG_REG_BUF_SIZE,
2368 &bp->hwrm_dbg_resp_dma_addr,
2369 GFP_KERNEL);
2370 if (!bp->hwrm_dbg_resp_addr)
2371 netdev_warn(bp->dev, "fail to alloc debug register dma mem\n");
2372
2373 return 0;
2374}
2375
2376static void bnxt_free_stats(struct bnxt *bp)
2377{
2378 u32 size, i;
2379 struct pci_dev *pdev = bp->pdev;
2380
2381 if (!bp->bnapi)
2382 return;
2383
2384 size = sizeof(struct ctx_hw_stats);
2385
2386 for (i = 0; i < bp->cp_nr_rings; i++) {
2387 struct bnxt_napi *bnapi = bp->bnapi[i];
2388 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2389
2390 if (cpr->hw_stats) {
2391 dma_free_coherent(&pdev->dev, size, cpr->hw_stats,
2392 cpr->hw_stats_map);
2393 cpr->hw_stats = NULL;
2394 }
2395 }
2396}
2397
2398static int bnxt_alloc_stats(struct bnxt *bp)
2399{
2400 u32 size, i;
2401 struct pci_dev *pdev = bp->pdev;
2402
2403 size = sizeof(struct ctx_hw_stats);
2404
2405 for (i = 0; i < bp->cp_nr_rings; i++) {
2406 struct bnxt_napi *bnapi = bp->bnapi[i];
2407 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2408
2409 cpr->hw_stats = dma_alloc_coherent(&pdev->dev, size,
2410 &cpr->hw_stats_map,
2411 GFP_KERNEL);
2412 if (!cpr->hw_stats)
2413 return -ENOMEM;
2414
2415 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
2416 }
2417 return 0;
2418}
2419
2420static void bnxt_clear_ring_indices(struct bnxt *bp)
2421{
2422 int i;
2423
2424 if (!bp->bnapi)
2425 return;
2426
2427 for (i = 0; i < bp->cp_nr_rings; i++) {
2428 struct bnxt_napi *bnapi = bp->bnapi[i];
2429 struct bnxt_cp_ring_info *cpr;
2430 struct bnxt_rx_ring_info *rxr;
2431 struct bnxt_tx_ring_info *txr;
2432
2433 if (!bnapi)
2434 continue;
2435
2436 cpr = &bnapi->cp_ring;
2437 cpr->cp_raw_cons = 0;
2438
2439 txr = &bnapi->tx_ring;
2440 txr->tx_prod = 0;
2441 txr->tx_cons = 0;
2442
2443 rxr = &bnapi->rx_ring;
2444 rxr->rx_prod = 0;
2445 rxr->rx_agg_prod = 0;
2446 rxr->rx_sw_agg_prod = 0;
2447 }
2448}
2449
2450static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool irq_reinit)
2451{
2452#ifdef CONFIG_RFS_ACCEL
2453 int i;
2454
2455 /* Under rtnl_lock and all our NAPIs have been disabled. It's
2456 * safe to delete the hash table.
2457 */
2458 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
2459 struct hlist_head *head;
2460 struct hlist_node *tmp;
2461 struct bnxt_ntuple_filter *fltr;
2462
2463 head = &bp->ntp_fltr_hash_tbl[i];
2464 hlist_for_each_entry_safe(fltr, tmp, head, hash) {
2465 hlist_del(&fltr->hash);
2466 kfree(fltr);
2467 }
2468 }
2469 if (irq_reinit) {
2470 kfree(bp->ntp_fltr_bmap);
2471 bp->ntp_fltr_bmap = NULL;
2472 }
2473 bp->ntp_fltr_count = 0;
2474#endif
2475}
2476
2477static int bnxt_alloc_ntp_fltrs(struct bnxt *bp)
2478{
2479#ifdef CONFIG_RFS_ACCEL
2480 int i, rc = 0;
2481
2482 if (!(bp->flags & BNXT_FLAG_RFS))
2483 return 0;
2484
2485 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++)
2486 INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]);
2487
2488 bp->ntp_fltr_count = 0;
2489 bp->ntp_fltr_bmap = kzalloc(BITS_TO_LONGS(BNXT_NTP_FLTR_MAX_FLTR),
2490 GFP_KERNEL);
2491
2492 if (!bp->ntp_fltr_bmap)
2493 rc = -ENOMEM;
2494
2495 return rc;
2496#else
2497 return 0;
2498#endif
2499}
2500
2501static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init)
2502{
2503 bnxt_free_vnic_attributes(bp);
2504 bnxt_free_tx_rings(bp);
2505 bnxt_free_rx_rings(bp);
2506 bnxt_free_cp_rings(bp);
2507 bnxt_free_ntp_fltrs(bp, irq_re_init);
2508 if (irq_re_init) {
2509 bnxt_free_stats(bp);
2510 bnxt_free_ring_grps(bp);
2511 bnxt_free_vnics(bp);
2512 kfree(bp->bnapi);
2513 bp->bnapi = NULL;
2514 } else {
2515 bnxt_clear_ring_indices(bp);
2516 }
2517}
2518
2519static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
2520{
2521 int i, rc, size, arr_size;
2522 void *bnapi;
2523
2524 if (irq_re_init) {
2525 /* Allocate bnapi mem pointer array and mem block for
2526 * all queues
2527 */
2528 arr_size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi *) *
2529 bp->cp_nr_rings);
2530 size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi));
2531 bnapi = kzalloc(arr_size + size * bp->cp_nr_rings, GFP_KERNEL);
2532 if (!bnapi)
2533 return -ENOMEM;
2534
2535 bp->bnapi = bnapi;
2536 bnapi += arr_size;
2537 for (i = 0; i < bp->cp_nr_rings; i++, bnapi += size) {
2538 bp->bnapi[i] = bnapi;
2539 bp->bnapi[i]->index = i;
2540 bp->bnapi[i]->bp = bp;
2541 }
2542
2543 rc = bnxt_alloc_stats(bp);
2544 if (rc)
2545 goto alloc_mem_err;
2546
2547 rc = bnxt_alloc_ntp_fltrs(bp);
2548 if (rc)
2549 goto alloc_mem_err;
2550
2551 rc = bnxt_alloc_vnics(bp);
2552 if (rc)
2553 goto alloc_mem_err;
2554 }
2555
2556 bnxt_init_ring_struct(bp);
2557
2558 rc = bnxt_alloc_rx_rings(bp);
2559 if (rc)
2560 goto alloc_mem_err;
2561
2562 rc = bnxt_alloc_tx_rings(bp);
2563 if (rc)
2564 goto alloc_mem_err;
2565
2566 rc = bnxt_alloc_cp_rings(bp);
2567 if (rc)
2568 goto alloc_mem_err;
2569
2570 bp->vnic_info[0].flags |= BNXT_VNIC_RSS_FLAG | BNXT_VNIC_MCAST_FLAG |
2571 BNXT_VNIC_UCAST_FLAG;
2572 rc = bnxt_alloc_vnic_attributes(bp);
2573 if (rc)
2574 goto alloc_mem_err;
2575 return 0;
2576
2577alloc_mem_err:
2578 bnxt_free_mem(bp, true);
2579 return rc;
2580}
2581
2582void bnxt_hwrm_cmd_hdr_init(struct bnxt *bp, void *request, u16 req_type,
2583 u16 cmpl_ring, u16 target_id)
2584{
2585 struct hwrm_cmd_req_hdr *req = request;
2586
2587 req->cmpl_ring_req_type =
2588 cpu_to_le32(req_type | (cmpl_ring << HWRM_CMPL_RING_SFT));
2589 req->target_id_seq_id = cpu_to_le32(target_id << HWRM_TARGET_FID_SFT);
2590 req->resp_addr = cpu_to_le64(bp->hwrm_cmd_resp_dma_addr);
2591}
2592
2593int _hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
2594{
2595 int i, intr_process, rc;
2596 struct hwrm_cmd_req_hdr *req = msg;
2597 u32 *data = msg;
2598 __le32 *resp_len, *valid;
2599 u16 cp_ring_id, len = 0;
2600 struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr;
2601
2602 req->target_id_seq_id |= cpu_to_le32(bp->hwrm_cmd_seq++);
2603 memset(resp, 0, PAGE_SIZE);
2604 cp_ring_id = (le32_to_cpu(req->cmpl_ring_req_type) &
2605 HWRM_CMPL_RING_MASK) >>
2606 HWRM_CMPL_RING_SFT;
2607 intr_process = (cp_ring_id == INVALID_HW_RING_ID) ? 0 : 1;
2608
2609 /* Write request msg to hwrm channel */
2610 __iowrite32_copy(bp->bar0, data, msg_len / 4);
2611
2612 /* currently supports only one outstanding message */
2613 if (intr_process)
2614 bp->hwrm_intr_seq_id = le32_to_cpu(req->target_id_seq_id) &
2615 HWRM_SEQ_ID_MASK;
2616
2617 /* Ring channel doorbell */
2618 writel(1, bp->bar0 + 0x100);
2619
2620 i = 0;
2621 if (intr_process) {
2622 /* Wait until hwrm response cmpl interrupt is processed */
2623 while (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID &&
2624 i++ < timeout) {
2625 usleep_range(600, 800);
2626 }
2627
2628 if (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID) {
2629 netdev_err(bp->dev, "Resp cmpl intr err msg: 0x%x\n",
2630 req->cmpl_ring_req_type);
2631 return -1;
2632 }
2633 } else {
2634 /* Check if response len is updated */
2635 resp_len = bp->hwrm_cmd_resp_addr + HWRM_RESP_LEN_OFFSET;
2636 for (i = 0; i < timeout; i++) {
2637 len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >>
2638 HWRM_RESP_LEN_SFT;
2639 if (len)
2640 break;
2641 usleep_range(600, 800);
2642 }
2643
2644 if (i >= timeout) {
2645 netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d\n",
2646 timeout, req->cmpl_ring_req_type,
2647 req->target_id_seq_id, *resp_len);
2648 return -1;
2649 }
2650
2651 /* Last word of resp contains valid bit */
2652 valid = bp->hwrm_cmd_resp_addr + len - 4;
2653 for (i = 0; i < timeout; i++) {
2654 if (le32_to_cpu(*valid) & HWRM_RESP_VALID_MASK)
2655 break;
2656 usleep_range(600, 800);
2657 }
2658
2659 if (i >= timeout) {
2660 netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d v:%d\n",
2661 timeout, req->cmpl_ring_req_type,
2662 req->target_id_seq_id, len, *valid);
2663 return -1;
2664 }
2665 }
2666
2667 rc = le16_to_cpu(resp->error_code);
2668 if (rc) {
2669 netdev_err(bp->dev, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n",
2670 le16_to_cpu(resp->req_type),
2671 le16_to_cpu(resp->seq_id), rc);
2672 return rc;
2673 }
2674 return 0;
2675}
2676
2677int hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
2678{
2679 int rc;
2680
2681 mutex_lock(&bp->hwrm_cmd_lock);
2682 rc = _hwrm_send_message(bp, msg, msg_len, timeout);
2683 mutex_unlock(&bp->hwrm_cmd_lock);
2684 return rc;
2685}
2686
2687static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp)
2688{
2689 struct hwrm_func_drv_rgtr_input req = {0};
2690 int i;
2691
2692 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1);
2693
2694 req.enables =
2695 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE |
2696 FUNC_DRV_RGTR_REQ_ENABLES_VER |
2697 FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
2698
2699 /* TODO: current async event fwd bits are not defined and the firmware
2700 * only checks if it is non-zero to enable async event forwarding
2701 */
2702 req.async_event_fwd[0] |= cpu_to_le32(1);
2703 req.os_type = cpu_to_le16(1);
2704 req.ver_maj = DRV_VER_MAJ;
2705 req.ver_min = DRV_VER_MIN;
2706 req.ver_upd = DRV_VER_UPD;
2707
2708 if (BNXT_PF(bp)) {
de68f5de 2709 DECLARE_BITMAP(vf_req_snif_bmap, 256);
c0c050c5
MC
2710 u32 *data = (u32 *)vf_req_snif_bmap;
2711
de68f5de 2712 memset(vf_req_snif_bmap, 0, sizeof(vf_req_snif_bmap));
c0c050c5
MC
2713 for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++)
2714 __set_bit(bnxt_vf_req_snif[i], vf_req_snif_bmap);
2715
de68f5de
MC
2716 for (i = 0; i < 8; i++)
2717 req.vf_req_fwd[i] = cpu_to_le32(data[i]);
2718
c0c050c5
MC
2719 req.enables |=
2720 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD);
2721 }
2722
2723 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
2724}
2725
be58a0da
JH
2726static int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp)
2727{
2728 struct hwrm_func_drv_unrgtr_input req = {0};
2729
2730 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_UNRGTR, -1, -1);
2731 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
2732}
2733
c0c050c5
MC
2734static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type)
2735{
2736 u32 rc = 0;
2737 struct hwrm_tunnel_dst_port_free_input req = {0};
2738
2739 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_FREE, -1, -1);
2740 req.tunnel_type = tunnel_type;
2741
2742 switch (tunnel_type) {
2743 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN:
2744 req.tunnel_dst_port_id = bp->vxlan_fw_dst_port_id;
2745 break;
2746 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE:
2747 req.tunnel_dst_port_id = bp->nge_fw_dst_port_id;
2748 break;
2749 default:
2750 break;
2751 }
2752
2753 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
2754 if (rc)
2755 netdev_err(bp->dev, "hwrm_tunnel_dst_port_free failed. rc:%d\n",
2756 rc);
2757 return rc;
2758}
2759
2760static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port,
2761 u8 tunnel_type)
2762{
2763 u32 rc = 0;
2764 struct hwrm_tunnel_dst_port_alloc_input req = {0};
2765 struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
2766
2767 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_ALLOC, -1, -1);
2768
2769 req.tunnel_type = tunnel_type;
2770 req.tunnel_dst_port_val = port;
2771
2772 mutex_lock(&bp->hwrm_cmd_lock);
2773 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
2774 if (rc) {
2775 netdev_err(bp->dev, "hwrm_tunnel_dst_port_alloc failed. rc:%d\n",
2776 rc);
2777 goto err_out;
2778 }
2779
2780 if (tunnel_type & TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN)
2781 bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id;
2782
2783 else if (tunnel_type & TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE)
2784 bp->nge_fw_dst_port_id = resp->tunnel_dst_port_id;
2785err_out:
2786 mutex_unlock(&bp->hwrm_cmd_lock);
2787 return rc;
2788}
2789
2790static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id)
2791{
2792 struct hwrm_cfa_l2_set_rx_mask_input req = {0};
2793 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
2794
2795 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_SET_RX_MASK, -1, -1);
c193554e 2796 req.vnic_id = cpu_to_le32(vnic->fw_vnic_id);
c0c050c5
MC
2797
2798 req.num_mc_entries = cpu_to_le32(vnic->mc_list_count);
2799 req.mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping);
2800 req.mask = cpu_to_le32(vnic->rx_mask);
2801 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
2802}
2803
2804#ifdef CONFIG_RFS_ACCEL
2805static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
2806 struct bnxt_ntuple_filter *fltr)
2807{
2808 struct hwrm_cfa_ntuple_filter_free_input req = {0};
2809
2810 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_FREE, -1, -1);
2811 req.ntuple_filter_id = fltr->filter_id;
2812 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
2813}
2814
2815#define BNXT_NTP_FLTR_FLAGS \
2816 (CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID | \
2817 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE | \
2818 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR | \
2819 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE | \
2820 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR | \
2821 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK | \
2822 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR | \
2823 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK | \
2824 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL | \
2825 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT | \
2826 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK | \
2827 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT | \
2828 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK | \
c193554e 2829 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID)
c0c050c5
MC
2830
2831static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
2832 struct bnxt_ntuple_filter *fltr)
2833{
2834 int rc = 0;
2835 struct hwrm_cfa_ntuple_filter_alloc_input req = {0};
2836 struct hwrm_cfa_ntuple_filter_alloc_output *resp =
2837 bp->hwrm_cmd_resp_addr;
2838 struct flow_keys *keys = &fltr->fkeys;
2839 struct bnxt_vnic_info *vnic = &bp->vnic_info[fltr->rxq + 1];
2840
2841 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_ALLOC, -1, -1);
2842 req.l2_filter_id = bp->vnic_info[0].fw_l2_filter_id[0];
2843
2844 req.enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS);
2845
2846 req.ethertype = htons(ETH_P_IP);
2847 memcpy(req.src_macaddr, fltr->src_mac_addr, ETH_ALEN);
c193554e 2848 req.ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4;
c0c050c5
MC
2849 req.ip_protocol = keys->basic.ip_proto;
2850
2851 req.src_ipaddr[0] = keys->addrs.v4addrs.src;
2852 req.src_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
2853 req.dst_ipaddr[0] = keys->addrs.v4addrs.dst;
2854 req.dst_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
2855
2856 req.src_port = keys->ports.src;
2857 req.src_port_mask = cpu_to_be16(0xffff);
2858 req.dst_port = keys->ports.dst;
2859 req.dst_port_mask = cpu_to_be16(0xffff);
2860
c193554e 2861 req.dst_id = cpu_to_le16(vnic->fw_vnic_id);
c0c050c5
MC
2862 mutex_lock(&bp->hwrm_cmd_lock);
2863 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
2864 if (!rc)
2865 fltr->filter_id = resp->ntuple_filter_id;
2866 mutex_unlock(&bp->hwrm_cmd_lock);
2867 return rc;
2868}
2869#endif
2870
2871static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx,
2872 u8 *mac_addr)
2873{
2874 u32 rc = 0;
2875 struct hwrm_cfa_l2_filter_alloc_input req = {0};
2876 struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
2877
2878 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_ALLOC, -1, -1);
2879 req.flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX |
2880 CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST);
c193554e 2881 req.dst_id = cpu_to_le16(bp->vnic_info[vnic_id].fw_vnic_id);
c0c050c5
MC
2882 req.enables =
2883 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR |
c193554e 2884 CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID |
c0c050c5
MC
2885 CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK);
2886 memcpy(req.l2_addr, mac_addr, ETH_ALEN);
2887 req.l2_addr_mask[0] = 0xff;
2888 req.l2_addr_mask[1] = 0xff;
2889 req.l2_addr_mask[2] = 0xff;
2890 req.l2_addr_mask[3] = 0xff;
2891 req.l2_addr_mask[4] = 0xff;
2892 req.l2_addr_mask[5] = 0xff;
2893
2894 mutex_lock(&bp->hwrm_cmd_lock);
2895 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
2896 if (!rc)
2897 bp->vnic_info[vnic_id].fw_l2_filter_id[idx] =
2898 resp->l2_filter_id;
2899 mutex_unlock(&bp->hwrm_cmd_lock);
2900 return rc;
2901}
2902
2903static int bnxt_hwrm_clear_vnic_filter(struct bnxt *bp)
2904{
2905 u16 i, j, num_of_vnics = 1; /* only vnic 0 supported */
2906 int rc = 0;
2907
2908 /* Any associated ntuple filters will also be cleared by firmware. */
2909 mutex_lock(&bp->hwrm_cmd_lock);
2910 for (i = 0; i < num_of_vnics; i++) {
2911 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
2912
2913 for (j = 0; j < vnic->uc_filter_count; j++) {
2914 struct hwrm_cfa_l2_filter_free_input req = {0};
2915
2916 bnxt_hwrm_cmd_hdr_init(bp, &req,
2917 HWRM_CFA_L2_FILTER_FREE, -1, -1);
2918
2919 req.l2_filter_id = vnic->fw_l2_filter_id[j];
2920
2921 rc = _hwrm_send_message(bp, &req, sizeof(req),
2922 HWRM_CMD_TIMEOUT);
2923 }
2924 vnic->uc_filter_count = 0;
2925 }
2926 mutex_unlock(&bp->hwrm_cmd_lock);
2927
2928 return rc;
2929}
2930
2931static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)
2932{
2933 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
2934 struct hwrm_vnic_tpa_cfg_input req = {0};
2935
2936 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_TPA_CFG, -1, -1);
2937
2938 if (tpa_flags) {
2939 u16 mss = bp->dev->mtu - 40;
2940 u32 nsegs, n, segs = 0, flags;
2941
2942 flags = VNIC_TPA_CFG_REQ_FLAGS_TPA |
2943 VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA |
2944 VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE |
2945 VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN |
2946 VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ;
2947 if (tpa_flags & BNXT_FLAG_GRO)
2948 flags |= VNIC_TPA_CFG_REQ_FLAGS_GRO;
2949
2950 req.flags = cpu_to_le32(flags);
2951
2952 req.enables =
2953 cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS |
c193554e
MC
2954 VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS |
2955 VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN);
c0c050c5
MC
2956
2957 /* Number of segs are log2 units, and first packet is not
2958 * included as part of this units.
2959 */
2960 if (mss <= PAGE_SIZE) {
2961 n = PAGE_SIZE / mss;
2962 nsegs = (MAX_SKB_FRAGS - 1) * n;
2963 } else {
2964 n = mss / PAGE_SIZE;
2965 if (mss & (PAGE_SIZE - 1))
2966 n++;
2967 nsegs = (MAX_SKB_FRAGS - n) / n;
2968 }
2969
2970 segs = ilog2(nsegs);
2971 req.max_agg_segs = cpu_to_le16(segs);
2972 req.max_aggs = cpu_to_le16(VNIC_TPA_CFG_REQ_MAX_AGGS_MAX);
c193554e
MC
2973
2974 req.min_agg_len = cpu_to_le32(512);
c0c050c5
MC
2975 }
2976 req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
2977
2978 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
2979}
2980
2981static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)
2982{
2983 u32 i, j, max_rings;
2984 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
2985 struct hwrm_vnic_rss_cfg_input req = {0};
2986
2987 if (vnic->fw_rss_cos_lb_ctx == INVALID_HW_RING_ID)
2988 return 0;
2989
2990 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1);
2991 if (set_rss) {
2992 vnic->hash_type = BNXT_RSS_HASH_TYPE_FLAG_IPV4 |
2993 BNXT_RSS_HASH_TYPE_FLAG_TCP_IPV4 |
2994 BNXT_RSS_HASH_TYPE_FLAG_IPV6 |
2995 BNXT_RSS_HASH_TYPE_FLAG_TCP_IPV6;
2996
2997 req.hash_type = cpu_to_le32(vnic->hash_type);
2998
2999 if (vnic->flags & BNXT_VNIC_RSS_FLAG)
3000 max_rings = bp->rx_nr_rings;
3001 else
3002 max_rings = 1;
3003
3004 /* Fill the RSS indirection table with ring group ids */
3005 for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++, j++) {
3006 if (j == max_rings)
3007 j = 0;
3008 vnic->rss_table[i] = cpu_to_le16(vnic->fw_grp_ids[j]);
3009 }
3010
3011 req.ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr);
3012 req.hash_key_tbl_addr =
3013 cpu_to_le64(vnic->rss_hash_key_dma_addr);
3014 }
3015 req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx);
3016 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3017}
3018
3019static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id)
3020{
3021 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
3022 struct hwrm_vnic_plcmodes_cfg_input req = {0};
3023
3024 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_PLCMODES_CFG, -1, -1);
3025 req.flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT |
3026 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 |
3027 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6);
3028 req.enables =
3029 cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID |
3030 VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID);
3031 /* thresholds not implemented in firmware yet */
3032 req.jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh);
3033 req.hds_threshold = cpu_to_le16(bp->rx_copy_thresh);
3034 req.vnic_id = cpu_to_le32(vnic->fw_vnic_id);
3035 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3036}
3037
3038static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, u16 vnic_id)
3039{
3040 struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {0};
3041
3042 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_FREE, -1, -1);
3043 req.rss_cos_lb_ctx_id =
3044 cpu_to_le16(bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx);
3045
3046 hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3047 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx = INVALID_HW_RING_ID;
3048}
3049
3050static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp)
3051{
3052 int i;
3053
3054 for (i = 0; i < bp->nr_vnics; i++) {
3055 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
3056
3057 if (vnic->fw_rss_cos_lb_ctx != INVALID_HW_RING_ID)
3058 bnxt_hwrm_vnic_ctx_free_one(bp, i);
3059 }
3060 bp->rsscos_nr_ctxs = 0;
3061}
3062
3063static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id)
3064{
3065 int rc;
3066 struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {0};
3067 struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
3068 bp->hwrm_cmd_resp_addr;
3069
3070 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC, -1,
3071 -1);
3072
3073 mutex_lock(&bp->hwrm_cmd_lock);
3074 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3075 if (!rc)
3076 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx =
3077 le16_to_cpu(resp->rss_cos_lb_ctx_id);
3078 mutex_unlock(&bp->hwrm_cmd_lock);
3079
3080 return rc;
3081}
3082
3083static int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
3084{
3085 int grp_idx = 0;
3086 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
3087 struct hwrm_vnic_cfg_input req = {0};
3088
3089 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_CFG, -1, -1);
3090 /* Only RSS support for now TBD: COS & LB */
3091 req.enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP |
3092 VNIC_CFG_REQ_ENABLES_RSS_RULE);
3093 req.rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx);
3094 req.cos_rule = cpu_to_le16(0xffff);
3095 if (vnic->flags & BNXT_VNIC_RSS_FLAG)
3096 grp_idx = 0;
3097 else if (vnic->flags & BNXT_VNIC_RFS_FLAG)
3098 grp_idx = vnic_id - 1;
3099
3100 req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
3101 req.dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id);
3102
3103 req.lb_rule = cpu_to_le16(0xffff);
3104 req.mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + ETH_FCS_LEN +
3105 VLAN_HLEN);
3106
3107 if (bp->flags & BNXT_FLAG_STRIP_VLAN)
3108 req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE);
3109
3110 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3111}
3112
3113static int bnxt_hwrm_vnic_free_one(struct bnxt *bp, u16 vnic_id)
3114{
3115 u32 rc = 0;
3116
3117 if (bp->vnic_info[vnic_id].fw_vnic_id != INVALID_HW_RING_ID) {
3118 struct hwrm_vnic_free_input req = {0};
3119
3120 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_FREE, -1, -1);
3121 req.vnic_id =
3122 cpu_to_le32(bp->vnic_info[vnic_id].fw_vnic_id);
3123
3124 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3125 if (rc)
3126 return rc;
3127 bp->vnic_info[vnic_id].fw_vnic_id = INVALID_HW_RING_ID;
3128 }
3129 return rc;
3130}
3131
3132static void bnxt_hwrm_vnic_free(struct bnxt *bp)
3133{
3134 u16 i;
3135
3136 for (i = 0; i < bp->nr_vnics; i++)
3137 bnxt_hwrm_vnic_free_one(bp, i);
3138}
3139
3140static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id, u16 start_grp_id,
3141 u16 end_grp_id)
3142{
3143 u32 rc = 0, i, j;
3144 struct hwrm_vnic_alloc_input req = {0};
3145 struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
3146
3147 /* map ring groups to this vnic */
3148 for (i = start_grp_id, j = 0; i < end_grp_id; i++, j++) {
3149 if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID) {
3150 netdev_err(bp->dev, "Not enough ring groups avail:%x req:%x\n",
3151 j, (end_grp_id - start_grp_id));
3152 break;
3153 }
3154 bp->vnic_info[vnic_id].fw_grp_ids[j] =
3155 bp->grp_info[i].fw_grp_id;
3156 }
3157
3158 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx = INVALID_HW_RING_ID;
3159 if (vnic_id == 0)
3160 req.flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT);
3161
3162 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_ALLOC, -1, -1);
3163
3164 mutex_lock(&bp->hwrm_cmd_lock);
3165 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3166 if (!rc)
3167 bp->vnic_info[vnic_id].fw_vnic_id = le32_to_cpu(resp->vnic_id);
3168 mutex_unlock(&bp->hwrm_cmd_lock);
3169 return rc;
3170}
3171
3172static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp)
3173{
3174 u16 i;
3175 u32 rc = 0;
3176
3177 mutex_lock(&bp->hwrm_cmd_lock);
3178 for (i = 0; i < bp->rx_nr_rings; i++) {
3179 struct hwrm_ring_grp_alloc_input req = {0};
3180 struct hwrm_ring_grp_alloc_output *resp =
3181 bp->hwrm_cmd_resp_addr;
3182
3183 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_ALLOC, -1, -1);
3184
3185 req.cr = cpu_to_le16(bp->grp_info[i].cp_fw_ring_id);
3186 req.rr = cpu_to_le16(bp->grp_info[i].rx_fw_ring_id);
3187 req.ar = cpu_to_le16(bp->grp_info[i].agg_fw_ring_id);
3188 req.sc = cpu_to_le16(bp->grp_info[i].fw_stats_ctx);
3189
3190 rc = _hwrm_send_message(bp, &req, sizeof(req),
3191 HWRM_CMD_TIMEOUT);
3192 if (rc)
3193 break;
3194
3195 bp->grp_info[i].fw_grp_id = le32_to_cpu(resp->ring_group_id);
3196 }
3197 mutex_unlock(&bp->hwrm_cmd_lock);
3198 return rc;
3199}
3200
3201static int bnxt_hwrm_ring_grp_free(struct bnxt *bp)
3202{
3203 u16 i;
3204 u32 rc = 0;
3205 struct hwrm_ring_grp_free_input req = {0};
3206
3207 if (!bp->grp_info)
3208 return 0;
3209
3210 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_FREE, -1, -1);
3211
3212 mutex_lock(&bp->hwrm_cmd_lock);
3213 for (i = 0; i < bp->cp_nr_rings; i++) {
3214 if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID)
3215 continue;
3216 req.ring_group_id =
3217 cpu_to_le32(bp->grp_info[i].fw_grp_id);
3218
3219 rc = _hwrm_send_message(bp, &req, sizeof(req),
3220 HWRM_CMD_TIMEOUT);
3221 if (rc)
3222 break;
3223 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
3224 }
3225 mutex_unlock(&bp->hwrm_cmd_lock);
3226 return rc;
3227}
3228
3229static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
3230 struct bnxt_ring_struct *ring,
3231 u32 ring_type, u32 map_index,
3232 u32 stats_ctx_id)
3233{
3234 int rc = 0, err = 0;
3235 struct hwrm_ring_alloc_input req = {0};
3236 struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
3237 u16 ring_id;
3238
3239 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_ALLOC, -1, -1);
3240
3241 req.enables = 0;
3242 if (ring->nr_pages > 1) {
3243 req.page_tbl_addr = cpu_to_le64(ring->pg_tbl_map);
3244 /* Page size is in log2 units */
3245 req.page_size = BNXT_PAGE_SHIFT;
3246 req.page_tbl_depth = 1;
3247 } else {
3248 req.page_tbl_addr = cpu_to_le64(ring->dma_arr[0]);
3249 }
3250 req.fbo = 0;
3251 /* Association of ring index with doorbell index and MSIX number */
3252 req.logical_id = cpu_to_le16(map_index);
3253
3254 switch (ring_type) {
3255 case HWRM_RING_ALLOC_TX:
3256 req.ring_type = RING_ALLOC_REQ_RING_TYPE_TX;
3257 /* Association of transmit ring with completion ring */
3258 req.cmpl_ring_id =
3259 cpu_to_le16(bp->grp_info[map_index].cp_fw_ring_id);
3260 req.length = cpu_to_le32(bp->tx_ring_mask + 1);
3261 req.stat_ctx_id = cpu_to_le32(stats_ctx_id);
3262 req.queue_id = cpu_to_le16(ring->queue_id);
3263 break;
3264 case HWRM_RING_ALLOC_RX:
3265 req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
3266 req.length = cpu_to_le32(bp->rx_ring_mask + 1);
3267 break;
3268 case HWRM_RING_ALLOC_AGG:
3269 req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
3270 req.length = cpu_to_le32(bp->rx_agg_ring_mask + 1);
3271 break;
3272 case HWRM_RING_ALLOC_CMPL:
3273 req.ring_type = RING_ALLOC_REQ_RING_TYPE_CMPL;
3274 req.length = cpu_to_le32(bp->cp_ring_mask + 1);
3275 if (bp->flags & BNXT_FLAG_USING_MSIX)
3276 req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
3277 break;
3278 default:
3279 netdev_err(bp->dev, "hwrm alloc invalid ring type %d\n",
3280 ring_type);
3281 return -1;
3282 }
3283
3284 mutex_lock(&bp->hwrm_cmd_lock);
3285 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3286 err = le16_to_cpu(resp->error_code);
3287 ring_id = le16_to_cpu(resp->ring_id);
3288 mutex_unlock(&bp->hwrm_cmd_lock);
3289
3290 if (rc || err) {
3291 switch (ring_type) {
3292 case RING_FREE_REQ_RING_TYPE_CMPL:
3293 netdev_err(bp->dev, "hwrm_ring_alloc cp failed. rc:%x err:%x\n",
3294 rc, err);
3295 return -1;
3296
3297 case RING_FREE_REQ_RING_TYPE_RX:
3298 netdev_err(bp->dev, "hwrm_ring_alloc rx failed. rc:%x err:%x\n",
3299 rc, err);
3300 return -1;
3301
3302 case RING_FREE_REQ_RING_TYPE_TX:
3303 netdev_err(bp->dev, "hwrm_ring_alloc tx failed. rc:%x err:%x\n",
3304 rc, err);
3305 return -1;
3306
3307 default:
3308 netdev_err(bp->dev, "Invalid ring\n");
3309 return -1;
3310 }
3311 }
3312 ring->fw_ring_id = ring_id;
3313 return rc;
3314}
3315
3316static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
3317{
3318 int i, rc = 0;
3319
edd0c2cc
MC
3320 for (i = 0; i < bp->cp_nr_rings; i++) {
3321 struct bnxt_napi *bnapi = bp->bnapi[i];
3322 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3323 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
c0c050c5 3324
edd0c2cc
MC
3325 rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_CMPL, i,
3326 INVALID_STATS_CTX_ID);
3327 if (rc)
3328 goto err_out;
3329 cpr->cp_doorbell = bp->bar1 + i * 0x80;
3330 BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
3331 bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id;
c0c050c5
MC
3332 }
3333
edd0c2cc
MC
3334 for (i = 0; i < bp->tx_nr_rings; i++) {
3335 struct bnxt_napi *bnapi = bp->bnapi[i];
3336 struct bnxt_tx_ring_info *txr = &bnapi->tx_ring;
3337 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
3338 u16 fw_stats_ctx = bp->grp_info[i].fw_stats_ctx;
c0c050c5 3339
edd0c2cc
MC
3340 rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_TX, i,
3341 fw_stats_ctx);
3342 if (rc)
3343 goto err_out;
3344 txr->tx_doorbell = bp->bar1 + i * 0x80;
c0c050c5
MC
3345 }
3346
edd0c2cc
MC
3347 for (i = 0; i < bp->rx_nr_rings; i++) {
3348 struct bnxt_napi *bnapi = bp->bnapi[i];
3349 struct bnxt_rx_ring_info *rxr = &bnapi->rx_ring;
3350 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
c0c050c5 3351
edd0c2cc
MC
3352 rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_RX, i,
3353 INVALID_STATS_CTX_ID);
3354 if (rc)
3355 goto err_out;
3356 rxr->rx_doorbell = bp->bar1 + i * 0x80;
3357 writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell);
3358 bp->grp_info[i].rx_fw_ring_id = ring->fw_ring_id;
c0c050c5
MC
3359 }
3360
3361 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
3362 for (i = 0; i < bp->rx_nr_rings; i++) {
3363 struct bnxt_napi *bnapi = bp->bnapi[i];
3364 struct bnxt_rx_ring_info *rxr = &bnapi->rx_ring;
3365 struct bnxt_ring_struct *ring =
3366 &rxr->rx_agg_ring_struct;
3367
3368 rc = hwrm_ring_alloc_send_msg(bp, ring,
3369 HWRM_RING_ALLOC_AGG,
3370 bp->rx_nr_rings + i,
3371 INVALID_STATS_CTX_ID);
3372 if (rc)
3373 goto err_out;
3374
3375 rxr->rx_agg_doorbell =
3376 bp->bar1 + (bp->rx_nr_rings + i) * 0x80;
3377 writel(DB_KEY_RX | rxr->rx_agg_prod,
3378 rxr->rx_agg_doorbell);
3379 bp->grp_info[i].agg_fw_ring_id = ring->fw_ring_id;
3380 }
3381 }
3382err_out:
3383 return rc;
3384}
3385
3386static int hwrm_ring_free_send_msg(struct bnxt *bp,
3387 struct bnxt_ring_struct *ring,
3388 u32 ring_type, int cmpl_ring_id)
3389{
3390 int rc;
3391 struct hwrm_ring_free_input req = {0};
3392 struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
3393 u16 error_code;
3394
3395 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_FREE, -1, -1);
3396 req.ring_type = ring_type;
3397 req.ring_id = cpu_to_le16(ring->fw_ring_id);
3398
3399 mutex_lock(&bp->hwrm_cmd_lock);
3400 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3401 error_code = le16_to_cpu(resp->error_code);
3402 mutex_unlock(&bp->hwrm_cmd_lock);
3403
3404 if (rc || error_code) {
3405 switch (ring_type) {
3406 case RING_FREE_REQ_RING_TYPE_CMPL:
3407 netdev_err(bp->dev, "hwrm_ring_free cp failed. rc:%d\n",
3408 rc);
3409 return rc;
3410 case RING_FREE_REQ_RING_TYPE_RX:
3411 netdev_err(bp->dev, "hwrm_ring_free rx failed. rc:%d\n",
3412 rc);
3413 return rc;
3414 case RING_FREE_REQ_RING_TYPE_TX:
3415 netdev_err(bp->dev, "hwrm_ring_free tx failed. rc:%d\n",
3416 rc);
3417 return rc;
3418 default:
3419 netdev_err(bp->dev, "Invalid ring\n");
3420 return -1;
3421 }
3422 }
3423 return 0;
3424}
3425
edd0c2cc 3426static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
c0c050c5 3427{
edd0c2cc 3428 int i;
c0c050c5
MC
3429
3430 if (!bp->bnapi)
edd0c2cc 3431 return;
c0c050c5 3432
edd0c2cc
MC
3433 for (i = 0; i < bp->tx_nr_rings; i++) {
3434 struct bnxt_napi *bnapi = bp->bnapi[i];
3435 struct bnxt_tx_ring_info *txr = &bnapi->tx_ring;
3436 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
3437 u32 cmpl_ring_id = bp->grp_info[i].cp_fw_ring_id;
3438
3439 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
3440 hwrm_ring_free_send_msg(bp, ring,
3441 RING_FREE_REQ_RING_TYPE_TX,
3442 close_path ? cmpl_ring_id :
3443 INVALID_HW_RING_ID);
3444 ring->fw_ring_id = INVALID_HW_RING_ID;
c0c050c5
MC
3445 }
3446 }
3447
edd0c2cc
MC
3448 for (i = 0; i < bp->rx_nr_rings; i++) {
3449 struct bnxt_napi *bnapi = bp->bnapi[i];
3450 struct bnxt_rx_ring_info *rxr = &bnapi->rx_ring;
3451 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
3452 u32 cmpl_ring_id = bp->grp_info[i].cp_fw_ring_id;
3453
3454 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
3455 hwrm_ring_free_send_msg(bp, ring,
3456 RING_FREE_REQ_RING_TYPE_RX,
3457 close_path ? cmpl_ring_id :
3458 INVALID_HW_RING_ID);
3459 ring->fw_ring_id = INVALID_HW_RING_ID;
3460 bp->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID;
c0c050c5
MC
3461 }
3462 }
3463
edd0c2cc
MC
3464 for (i = 0; i < bp->rx_nr_rings; i++) {
3465 struct bnxt_napi *bnapi = bp->bnapi[i];
3466 struct bnxt_rx_ring_info *rxr = &bnapi->rx_ring;
3467 struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct;
3468 u32 cmpl_ring_id = bp->grp_info[i].cp_fw_ring_id;
3469
3470 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
3471 hwrm_ring_free_send_msg(bp, ring,
3472 RING_FREE_REQ_RING_TYPE_RX,
3473 close_path ? cmpl_ring_id :
3474 INVALID_HW_RING_ID);
3475 ring->fw_ring_id = INVALID_HW_RING_ID;
3476 bp->grp_info[i].agg_fw_ring_id = INVALID_HW_RING_ID;
c0c050c5
MC
3477 }
3478 }
3479
edd0c2cc
MC
3480 for (i = 0; i < bp->cp_nr_rings; i++) {
3481 struct bnxt_napi *bnapi = bp->bnapi[i];
3482 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3483 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
3484
3485 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
3486 hwrm_ring_free_send_msg(bp, ring,
3487 RING_FREE_REQ_RING_TYPE_CMPL,
3488 INVALID_HW_RING_ID);
3489 ring->fw_ring_id = INVALID_HW_RING_ID;
3490 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
c0c050c5
MC
3491 }
3492 }
c0c050c5
MC
3493}
3494
3495int bnxt_hwrm_set_coal(struct bnxt *bp)
3496{
3497 int i, rc = 0;
3498 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req = {0};
3499 u16 max_buf, max_buf_irq;
3500 u16 buf_tmr, buf_tmr_irq;
3501 u32 flags;
3502
3503 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS,
3504 -1, -1);
3505
3506 /* Each rx completion (2 records) should be DMAed immediately */
3507 max_buf = min_t(u16, bp->coal_bufs / 4, 2);
3508 /* max_buf must not be zero */
3509 max_buf = clamp_t(u16, max_buf, 1, 63);
3510 max_buf_irq = clamp_t(u16, bp->coal_bufs_irq, 1, 63);
3511 buf_tmr = max_t(u16, bp->coal_ticks / 4, 1);
3512 buf_tmr_irq = max_t(u16, bp->coal_ticks_irq, 1);
3513
3514 flags = RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
3515
3516 /* RING_IDLE generates more IRQs for lower latency. Enable it only
3517 * if coal_ticks is less than 25 us.
3518 */
3519 if (BNXT_COAL_TIMER_TO_USEC(bp->coal_ticks) < 25)
3520 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE;
3521
3522 req.flags = cpu_to_le16(flags);
3523 req.num_cmpl_dma_aggr = cpu_to_le16(max_buf);
3524 req.num_cmpl_dma_aggr_during_int = cpu_to_le16(max_buf_irq);
3525 req.cmpl_aggr_dma_tmr = cpu_to_le16(buf_tmr);
3526 req.cmpl_aggr_dma_tmr_during_int = cpu_to_le16(buf_tmr_irq);
3527 req.int_lat_tmr_min = cpu_to_le16(buf_tmr);
3528 req.int_lat_tmr_max = cpu_to_le16(bp->coal_ticks);
3529 req.num_cmpl_aggr_int = cpu_to_le16(bp->coal_bufs);
3530
3531 mutex_lock(&bp->hwrm_cmd_lock);
3532 for (i = 0; i < bp->cp_nr_rings; i++) {
3533 req.ring_id = cpu_to_le16(bp->grp_info[i].cp_fw_ring_id);
3534
3535 rc = _hwrm_send_message(bp, &req, sizeof(req),
3536 HWRM_CMD_TIMEOUT);
3537 if (rc)
3538 break;
3539 }
3540 mutex_unlock(&bp->hwrm_cmd_lock);
3541 return rc;
3542}
3543
3544static int bnxt_hwrm_stat_ctx_free(struct bnxt *bp)
3545{
3546 int rc = 0, i;
3547 struct hwrm_stat_ctx_free_input req = {0};
3548
3549 if (!bp->bnapi)
3550 return 0;
3551
3552 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_FREE, -1, -1);
3553
3554 mutex_lock(&bp->hwrm_cmd_lock);
3555 for (i = 0; i < bp->cp_nr_rings; i++) {
3556 struct bnxt_napi *bnapi = bp->bnapi[i];
3557 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3558
3559 if (cpr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) {
3560 req.stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id);
3561
3562 rc = _hwrm_send_message(bp, &req, sizeof(req),
3563 HWRM_CMD_TIMEOUT);
3564 if (rc)
3565 break;
3566
3567 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
3568 }
3569 }
3570 mutex_unlock(&bp->hwrm_cmd_lock);
3571 return rc;
3572}
3573
3574static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp)
3575{
3576 int rc = 0, i;
3577 struct hwrm_stat_ctx_alloc_input req = {0};
3578 struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
3579
3580 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_ALLOC, -1, -1);
3581
3582 req.update_period_ms = cpu_to_le32(1000);
3583
3584 mutex_lock(&bp->hwrm_cmd_lock);
3585 for (i = 0; i < bp->cp_nr_rings; i++) {
3586 struct bnxt_napi *bnapi = bp->bnapi[i];
3587 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3588
3589 req.stats_dma_addr = cpu_to_le64(cpr->hw_stats_map);
3590
3591 rc = _hwrm_send_message(bp, &req, sizeof(req),
3592 HWRM_CMD_TIMEOUT);
3593 if (rc)
3594 break;
3595
3596 cpr->hw_stats_ctx_id = le32_to_cpu(resp->stat_ctx_id);
3597
3598 bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;
3599 }
3600 mutex_unlock(&bp->hwrm_cmd_lock);
3601 return 0;
3602}
3603
4a21b49b 3604int bnxt_hwrm_func_qcaps(struct bnxt *bp)
c0c050c5
MC
3605{
3606 int rc = 0;
3607 struct hwrm_func_qcaps_input req = {0};
3608 struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
3609
3610 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1);
3611 req.fid = cpu_to_le16(0xffff);
3612
3613 mutex_lock(&bp->hwrm_cmd_lock);
3614 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3615 if (rc)
3616 goto hwrm_func_qcaps_exit;
3617
3618 if (BNXT_PF(bp)) {
3619 struct bnxt_pf_info *pf = &bp->pf;
3620
3621 pf->fw_fid = le16_to_cpu(resp->fid);
3622 pf->port_id = le16_to_cpu(resp->port_id);
3623 memcpy(pf->mac_addr, resp->perm_mac_address, ETH_ALEN);
bdd4347b 3624 memcpy(bp->dev->dev_addr, pf->mac_addr, ETH_ALEN);
c0c050c5
MC
3625 pf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
3626 pf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
3627 pf->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
c0c050c5 3628 pf->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
b72d4a68
MC
3629 pf->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps);
3630 if (!pf->max_hw_ring_grps)
3631 pf->max_hw_ring_grps = pf->max_tx_rings;
c0c050c5
MC
3632 pf->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
3633 pf->max_vnics = le16_to_cpu(resp->max_vnics);
3634 pf->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
3635 pf->first_vf_id = le16_to_cpu(resp->first_vf_id);
3636 pf->max_vfs = le16_to_cpu(resp->max_vfs);
3637 pf->max_encap_records = le32_to_cpu(resp->max_encap_records);
3638 pf->max_decap_records = le32_to_cpu(resp->max_decap_records);
3639 pf->max_tx_em_flows = le32_to_cpu(resp->max_tx_em_flows);
3640 pf->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows);
3641 pf->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows);
3642 pf->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows);
3643 } else {
379a80a1 3644#ifdef CONFIG_BNXT_SRIOV
c0c050c5
MC
3645 struct bnxt_vf_info *vf = &bp->vf;
3646
3647 vf->fw_fid = le16_to_cpu(resp->fid);
3648 memcpy(vf->mac_addr, resp->perm_mac_address, ETH_ALEN);
bdd4347b
JH
3649 if (is_valid_ether_addr(vf->mac_addr))
3650 /* overwrite netdev dev_adr with admin VF MAC */
3651 memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN);
3652 else
3653 random_ether_addr(bp->dev->dev_addr);
c0c050c5
MC
3654
3655 vf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
3656 vf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
3657 vf->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
3658 vf->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
b72d4a68
MC
3659 vf->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps);
3660 if (!vf->max_hw_ring_grps)
3661 vf->max_hw_ring_grps = vf->max_tx_rings;
c0c050c5
MC
3662 vf->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
3663 vf->max_vnics = le16_to_cpu(resp->max_vnics);
3664 vf->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
379a80a1 3665#endif
c0c050c5
MC
3666 }
3667
3668 bp->tx_push_thresh = 0;
3669 if (resp->flags &
3670 cpu_to_le32(FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED))
3671 bp->tx_push_thresh = BNXT_TX_PUSH_THRESH;
3672
3673hwrm_func_qcaps_exit:
3674 mutex_unlock(&bp->hwrm_cmd_lock);
3675 return rc;
3676}
3677
3678static int bnxt_hwrm_func_reset(struct bnxt *bp)
3679{
3680 struct hwrm_func_reset_input req = {0};
3681
3682 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_RESET, -1, -1);
3683 req.enables = 0;
3684
3685 return hwrm_send_message(bp, &req, sizeof(req), HWRM_RESET_TIMEOUT);
3686}
3687
3688static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
3689{
3690 int rc = 0;
3691 struct hwrm_queue_qportcfg_input req = {0};
3692 struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
3693 u8 i, *qptr;
3694
3695 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_QPORTCFG, -1, -1);
3696
3697 mutex_lock(&bp->hwrm_cmd_lock);
3698 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3699 if (rc)
3700 goto qportcfg_exit;
3701
3702 if (!resp->max_configurable_queues) {
3703 rc = -EINVAL;
3704 goto qportcfg_exit;
3705 }
3706 bp->max_tc = resp->max_configurable_queues;
3707 if (bp->max_tc > BNXT_MAX_QUEUE)
3708 bp->max_tc = BNXT_MAX_QUEUE;
3709
3710 qptr = &resp->queue_id0;
3711 for (i = 0; i < bp->max_tc; i++) {
3712 bp->q_info[i].queue_id = *qptr++;
3713 bp->q_info[i].queue_profile = *qptr++;
3714 }
3715
3716qportcfg_exit:
3717 mutex_unlock(&bp->hwrm_cmd_lock);
3718 return rc;
3719}
3720
3721static int bnxt_hwrm_ver_get(struct bnxt *bp)
3722{
3723 int rc;
3724 struct hwrm_ver_get_input req = {0};
3725 struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
3726
3727 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VER_GET, -1, -1);
3728 req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
3729 req.hwrm_intf_min = HWRM_VERSION_MINOR;
3730 req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
3731 mutex_lock(&bp->hwrm_cmd_lock);
3732 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3733 if (rc)
3734 goto hwrm_ver_get_exit;
3735
3736 memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output));
3737
c193554e
MC
3738 if (resp->hwrm_intf_maj < 1) {
3739 netdev_warn(bp->dev, "HWRM interface %d.%d.%d is older than 1.0.0.\n",
c0c050c5 3740 resp->hwrm_intf_maj, resp->hwrm_intf_min,
c193554e
MC
3741 resp->hwrm_intf_upd);
3742 netdev_warn(bp->dev, "Please update firmware with HWRM interface 1.0.0 or newer.\n");
c0c050c5
MC
3743 }
3744 snprintf(bp->fw_ver_str, BC_HWRM_STR_LEN, "bc %d.%d.%d rm %d.%d.%d",
3745 resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld,
3746 resp->hwrm_intf_maj, resp->hwrm_intf_min, resp->hwrm_intf_upd);
3747
3748hwrm_ver_get_exit:
3749 mutex_unlock(&bp->hwrm_cmd_lock);
3750 return rc;
3751}
3752
3753static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp)
3754{
3755 if (bp->vxlan_port_cnt) {
3756 bnxt_hwrm_tunnel_dst_port_free(
3757 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
3758 }
3759 bp->vxlan_port_cnt = 0;
3760 if (bp->nge_port_cnt) {
3761 bnxt_hwrm_tunnel_dst_port_free(
3762 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
3763 }
3764 bp->nge_port_cnt = 0;
3765}
3766
3767static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa)
3768{
3769 int rc, i;
3770 u32 tpa_flags = 0;
3771
3772 if (set_tpa)
3773 tpa_flags = bp->flags & BNXT_FLAG_TPA;
3774 for (i = 0; i < bp->nr_vnics; i++) {
3775 rc = bnxt_hwrm_vnic_set_tpa(bp, i, tpa_flags);
3776 if (rc) {
3777 netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n",
3778 rc, i);
3779 return rc;
3780 }
3781 }
3782 return 0;
3783}
3784
3785static void bnxt_hwrm_clear_vnic_rss(struct bnxt *bp)
3786{
3787 int i;
3788
3789 for (i = 0; i < bp->nr_vnics; i++)
3790 bnxt_hwrm_vnic_set_rss(bp, i, false);
3791}
3792
3793static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path,
3794 bool irq_re_init)
3795{
3796 if (bp->vnic_info) {
3797 bnxt_hwrm_clear_vnic_filter(bp);
3798 /* clear all RSS setting before free vnic ctx */
3799 bnxt_hwrm_clear_vnic_rss(bp);
3800 bnxt_hwrm_vnic_ctx_free(bp);
3801 /* before free the vnic, undo the vnic tpa settings */
3802 if (bp->flags & BNXT_FLAG_TPA)
3803 bnxt_set_tpa(bp, false);
3804 bnxt_hwrm_vnic_free(bp);
3805 }
3806 bnxt_hwrm_ring_free(bp, close_path);
3807 bnxt_hwrm_ring_grp_free(bp);
3808 if (irq_re_init) {
3809 bnxt_hwrm_stat_ctx_free(bp);
3810 bnxt_hwrm_free_tunnel_ports(bp);
3811 }
3812}
3813
3814static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
3815{
3816 int rc;
3817
3818 /* allocate context for vnic */
3819 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id);
3820 if (rc) {
3821 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
3822 vnic_id, rc);
3823 goto vnic_setup_err;
3824 }
3825 bp->rsscos_nr_ctxs++;
3826
3827 /* configure default vnic, ring grp */
3828 rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
3829 if (rc) {
3830 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
3831 vnic_id, rc);
3832 goto vnic_setup_err;
3833 }
3834
3835 /* Enable RSS hashing on vnic */
3836 rc = bnxt_hwrm_vnic_set_rss(bp, vnic_id, true);
3837 if (rc) {
3838 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %x\n",
3839 vnic_id, rc);
3840 goto vnic_setup_err;
3841 }
3842
3843 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
3844 rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
3845 if (rc) {
3846 netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
3847 vnic_id, rc);
3848 }
3849 }
3850
3851vnic_setup_err:
3852 return rc;
3853}
3854
3855static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
3856{
3857#ifdef CONFIG_RFS_ACCEL
3858 int i, rc = 0;
3859
3860 for (i = 0; i < bp->rx_nr_rings; i++) {
3861 u16 vnic_id = i + 1;
3862 u16 ring_id = i;
3863
3864 if (vnic_id >= bp->nr_vnics)
3865 break;
3866
3867 bp->vnic_info[vnic_id].flags |= BNXT_VNIC_RFS_FLAG;
3868 rc = bnxt_hwrm_vnic_alloc(bp, vnic_id, ring_id, ring_id + 1);
3869 if (rc) {
3870 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
3871 vnic_id, rc);
3872 break;
3873 }
3874 rc = bnxt_setup_vnic(bp, vnic_id);
3875 if (rc)
3876 break;
3877 }
3878 return rc;
3879#else
3880 return 0;
3881#endif
3882}
3883
b664f008
MC
3884static int bnxt_cfg_rx_mode(struct bnxt *);
3885
c0c050c5
MC
3886static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
3887{
3888 int rc = 0;
3889
3890 if (irq_re_init) {
3891 rc = bnxt_hwrm_stat_ctx_alloc(bp);
3892 if (rc) {
3893 netdev_err(bp->dev, "hwrm stat ctx alloc failure rc: %x\n",
3894 rc);
3895 goto err_out;
3896 }
3897 }
3898
3899 rc = bnxt_hwrm_ring_alloc(bp);
3900 if (rc) {
3901 netdev_err(bp->dev, "hwrm ring alloc failure rc: %x\n", rc);
3902 goto err_out;
3903 }
3904
3905 rc = bnxt_hwrm_ring_grp_alloc(bp);
3906 if (rc) {
3907 netdev_err(bp->dev, "hwrm_ring_grp alloc failure: %x\n", rc);
3908 goto err_out;
3909 }
3910
3911 /* default vnic 0 */
3912 rc = bnxt_hwrm_vnic_alloc(bp, 0, 0, bp->rx_nr_rings);
3913 if (rc) {
3914 netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc);
3915 goto err_out;
3916 }
3917
3918 rc = bnxt_setup_vnic(bp, 0);
3919 if (rc)
3920 goto err_out;
3921
3922 if (bp->flags & BNXT_FLAG_RFS) {
3923 rc = bnxt_alloc_rfs_vnics(bp);
3924 if (rc)
3925 goto err_out;
3926 }
3927
3928 if (bp->flags & BNXT_FLAG_TPA) {
3929 rc = bnxt_set_tpa(bp, true);
3930 if (rc)
3931 goto err_out;
3932 }
3933
3934 if (BNXT_VF(bp))
3935 bnxt_update_vf_mac(bp);
3936
3937 /* Filter for default vnic 0 */
3938 rc = bnxt_hwrm_set_vnic_filter(bp, 0, 0, bp->dev->dev_addr);
3939 if (rc) {
3940 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
3941 goto err_out;
3942 }
3943 bp->vnic_info[0].uc_filter_count = 1;
3944
c193554e 3945 bp->vnic_info[0].rx_mask = CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
c0c050c5
MC
3946
3947 if ((bp->dev->flags & IFF_PROMISC) && BNXT_PF(bp))
3948 bp->vnic_info[0].rx_mask |=
3949 CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
3950
b664f008
MC
3951 rc = bnxt_cfg_rx_mode(bp);
3952 if (rc)
c0c050c5 3953 goto err_out;
c0c050c5
MC
3954
3955 rc = bnxt_hwrm_set_coal(bp);
3956 if (rc)
3957 netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n",
3958 rc);
3959
3960 return 0;
3961
3962err_out:
3963 bnxt_hwrm_resource_free(bp, 0, true);
3964
3965 return rc;
3966}
3967
3968static int bnxt_shutdown_nic(struct bnxt *bp, bool irq_re_init)
3969{
3970 bnxt_hwrm_resource_free(bp, 1, irq_re_init);
3971 return 0;
3972}
3973
3974static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init)
3975{
3976 bnxt_init_rx_rings(bp);
3977 bnxt_init_tx_rings(bp);
3978 bnxt_init_ring_grps(bp, irq_re_init);
3979 bnxt_init_vnics(bp);
3980
3981 return bnxt_init_chip(bp, irq_re_init);
3982}
3983
3984static void bnxt_disable_int(struct bnxt *bp)
3985{
3986 int i;
3987
3988 if (!bp->bnapi)
3989 return;
3990
3991 for (i = 0; i < bp->cp_nr_rings; i++) {
3992 struct bnxt_napi *bnapi = bp->bnapi[i];
3993 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3994
3995 BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
3996 }
3997}
3998
3999static void bnxt_enable_int(struct bnxt *bp)
4000{
4001 int i;
4002
4003 atomic_set(&bp->intr_sem, 0);
4004 for (i = 0; i < bp->cp_nr_rings; i++) {
4005 struct bnxt_napi *bnapi = bp->bnapi[i];
4006 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4007
4008 BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons);
4009 }
4010}
4011
4012static int bnxt_set_real_num_queues(struct bnxt *bp)
4013{
4014 int rc;
4015 struct net_device *dev = bp->dev;
4016
4017 rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings);
4018 if (rc)
4019 return rc;
4020
4021 rc = netif_set_real_num_rx_queues(dev, bp->rx_nr_rings);
4022 if (rc)
4023 return rc;
4024
4025#ifdef CONFIG_RFS_ACCEL
45019a18 4026 if (bp->flags & BNXT_FLAG_RFS)
c0c050c5 4027 dev->rx_cpu_rmap = alloc_irq_cpu_rmap(bp->rx_nr_rings);
c0c050c5
MC
4028#endif
4029
4030 return rc;
4031}
4032
4033static int bnxt_setup_msix(struct bnxt *bp)
4034{
4035 struct msix_entry *msix_ent;
4036 struct net_device *dev = bp->dev;
4037 int i, total_vecs, rc = 0;
4038 const int len = sizeof(bp->irq_tbl[0].name);
4039
4040 bp->flags &= ~BNXT_FLAG_USING_MSIX;
4041 total_vecs = bp->cp_nr_rings;
4042
4043 msix_ent = kcalloc(total_vecs, sizeof(struct msix_entry), GFP_KERNEL);
4044 if (!msix_ent)
4045 return -ENOMEM;
4046
4047 for (i = 0; i < total_vecs; i++) {
4048 msix_ent[i].entry = i;
4049 msix_ent[i].vector = 0;
4050 }
4051
4052 total_vecs = pci_enable_msix_range(bp->pdev, msix_ent, 1, total_vecs);
4053 if (total_vecs < 0) {
4054 rc = -ENODEV;
4055 goto msix_setup_exit;
4056 }
4057
4058 bp->irq_tbl = kcalloc(total_vecs, sizeof(struct bnxt_irq), GFP_KERNEL);
4059 if (bp->irq_tbl) {
4060 int tcs;
4061
4062 /* Trim rings based upon num of vectors allocated */
4063 bp->rx_nr_rings = min_t(int, total_vecs, bp->rx_nr_rings);
4064 bp->tx_nr_rings = min_t(int, total_vecs, bp->tx_nr_rings);
4065 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
4066 tcs = netdev_get_num_tc(dev);
4067 if (tcs > 1) {
4068 bp->tx_nr_rings_per_tc = bp->tx_nr_rings / tcs;
4069 if (bp->tx_nr_rings_per_tc == 0) {
4070 netdev_reset_tc(dev);
4071 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
4072 } else {
4073 int i, off, count;
4074
4075 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tcs;
4076 for (i = 0; i < tcs; i++) {
4077 count = bp->tx_nr_rings_per_tc;
4078 off = i * count;
4079 netdev_set_tc_queue(dev, i, count, off);
4080 }
4081 }
4082 }
4083 bp->cp_nr_rings = max_t(int, bp->rx_nr_rings, bp->tx_nr_rings);
4084
4085 for (i = 0; i < bp->cp_nr_rings; i++) {
4086 bp->irq_tbl[i].vector = msix_ent[i].vector;
4087 snprintf(bp->irq_tbl[i].name, len,
4088 "%s-%s-%d", dev->name, "TxRx", i);
4089 bp->irq_tbl[i].handler = bnxt_msix;
4090 }
4091 rc = bnxt_set_real_num_queues(bp);
4092 if (rc)
4093 goto msix_setup_exit;
4094 } else {
4095 rc = -ENOMEM;
4096 goto msix_setup_exit;
4097 }
4098 bp->flags |= BNXT_FLAG_USING_MSIX;
4099 kfree(msix_ent);
4100 return 0;
4101
4102msix_setup_exit:
4103 netdev_err(bp->dev, "bnxt_setup_msix err: %x\n", rc);
4104 pci_disable_msix(bp->pdev);
4105 kfree(msix_ent);
4106 return rc;
4107}
4108
4109static int bnxt_setup_inta(struct bnxt *bp)
4110{
4111 int rc;
4112 const int len = sizeof(bp->irq_tbl[0].name);
4113
4114 if (netdev_get_num_tc(bp->dev))
4115 netdev_reset_tc(bp->dev);
4116
4117 bp->irq_tbl = kcalloc(1, sizeof(struct bnxt_irq), GFP_KERNEL);
4118 if (!bp->irq_tbl) {
4119 rc = -ENOMEM;
4120 return rc;
4121 }
4122 bp->rx_nr_rings = 1;
4123 bp->tx_nr_rings = 1;
4124 bp->cp_nr_rings = 1;
4125 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
4126 bp->irq_tbl[0].vector = bp->pdev->irq;
4127 snprintf(bp->irq_tbl[0].name, len,
4128 "%s-%s-%d", bp->dev->name, "TxRx", 0);
4129 bp->irq_tbl[0].handler = bnxt_inta;
4130 rc = bnxt_set_real_num_queues(bp);
4131 return rc;
4132}
4133
4134static int bnxt_setup_int_mode(struct bnxt *bp)
4135{
4136 int rc = 0;
4137
4138 if (bp->flags & BNXT_FLAG_MSIX_CAP)
4139 rc = bnxt_setup_msix(bp);
4140
4141 if (!(bp->flags & BNXT_FLAG_USING_MSIX)) {
4142 /* fallback to INTA */
4143 rc = bnxt_setup_inta(bp);
4144 }
4145 return rc;
4146}
4147
4148static void bnxt_free_irq(struct bnxt *bp)
4149{
4150 struct bnxt_irq *irq;
4151 int i;
4152
4153#ifdef CONFIG_RFS_ACCEL
4154 free_irq_cpu_rmap(bp->dev->rx_cpu_rmap);
4155 bp->dev->rx_cpu_rmap = NULL;
4156#endif
4157 if (!bp->irq_tbl)
4158 return;
4159
4160 for (i = 0; i < bp->cp_nr_rings; i++) {
4161 irq = &bp->irq_tbl[i];
4162 if (irq->requested)
4163 free_irq(irq->vector, bp->bnapi[i]);
4164 irq->requested = 0;
4165 }
4166 if (bp->flags & BNXT_FLAG_USING_MSIX)
4167 pci_disable_msix(bp->pdev);
4168 kfree(bp->irq_tbl);
4169 bp->irq_tbl = NULL;
4170}
4171
4172static int bnxt_request_irq(struct bnxt *bp)
4173{
4174 int i, rc = 0;
4175 unsigned long flags = 0;
4176#ifdef CONFIG_RFS_ACCEL
4177 struct cpu_rmap *rmap = bp->dev->rx_cpu_rmap;
4178#endif
4179
4180 if (!(bp->flags & BNXT_FLAG_USING_MSIX))
4181 flags = IRQF_SHARED;
4182
4183 for (i = 0; i < bp->cp_nr_rings; i++) {
4184 struct bnxt_irq *irq = &bp->irq_tbl[i];
4185#ifdef CONFIG_RFS_ACCEL
4186 if (rmap && (i < bp->rx_nr_rings)) {
4187 rc = irq_cpu_rmap_add(rmap, irq->vector);
4188 if (rc)
4189 netdev_warn(bp->dev, "failed adding irq rmap for ring %d\n",
4190 i);
4191 }
4192#endif
4193 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
4194 bp->bnapi[i]);
4195 if (rc)
4196 break;
4197
4198 irq->requested = 1;
4199 }
4200 return rc;
4201}
4202
4203static void bnxt_del_napi(struct bnxt *bp)
4204{
4205 int i;
4206
4207 if (!bp->bnapi)
4208 return;
4209
4210 for (i = 0; i < bp->cp_nr_rings; i++) {
4211 struct bnxt_napi *bnapi = bp->bnapi[i];
4212
4213 napi_hash_del(&bnapi->napi);
4214 netif_napi_del(&bnapi->napi);
4215 }
4216}
4217
4218static void bnxt_init_napi(struct bnxt *bp)
4219{
4220 int i;
4221 struct bnxt_napi *bnapi;
4222
4223 if (bp->flags & BNXT_FLAG_USING_MSIX) {
4224 for (i = 0; i < bp->cp_nr_rings; i++) {
4225 bnapi = bp->bnapi[i];
4226 netif_napi_add(bp->dev, &bnapi->napi,
4227 bnxt_poll, 64);
c0c050c5
MC
4228 }
4229 } else {
4230 bnapi = bp->bnapi[0];
4231 netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll, 64);
c0c050c5
MC
4232 }
4233}
4234
4235static void bnxt_disable_napi(struct bnxt *bp)
4236{
4237 int i;
4238
4239 if (!bp->bnapi)
4240 return;
4241
4242 for (i = 0; i < bp->cp_nr_rings; i++) {
4243 napi_disable(&bp->bnapi[i]->napi);
4244 bnxt_disable_poll(bp->bnapi[i]);
4245 }
4246}
4247
4248static void bnxt_enable_napi(struct bnxt *bp)
4249{
4250 int i;
4251
4252 for (i = 0; i < bp->cp_nr_rings; i++) {
4253 bnxt_enable_poll(bp->bnapi[i]);
4254 napi_enable(&bp->bnapi[i]->napi);
4255 }
4256}
4257
4258static void bnxt_tx_disable(struct bnxt *bp)
4259{
4260 int i;
4261 struct bnxt_napi *bnapi;
4262 struct bnxt_tx_ring_info *txr;
4263 struct netdev_queue *txq;
4264
4265 if (bp->bnapi) {
4266 for (i = 0; i < bp->tx_nr_rings; i++) {
4267 bnapi = bp->bnapi[i];
4268 txr = &bnapi->tx_ring;
4269 txq = netdev_get_tx_queue(bp->dev, i);
4270 __netif_tx_lock(txq, smp_processor_id());
4271 txr->dev_state = BNXT_DEV_STATE_CLOSING;
4272 __netif_tx_unlock(txq);
4273 }
4274 }
4275 /* Stop all TX queues */
4276 netif_tx_disable(bp->dev);
4277 netif_carrier_off(bp->dev);
4278}
4279
4280static void bnxt_tx_enable(struct bnxt *bp)
4281{
4282 int i;
4283 struct bnxt_napi *bnapi;
4284 struct bnxt_tx_ring_info *txr;
4285 struct netdev_queue *txq;
4286
4287 for (i = 0; i < bp->tx_nr_rings; i++) {
4288 bnapi = bp->bnapi[i];
4289 txr = &bnapi->tx_ring;
4290 txq = netdev_get_tx_queue(bp->dev, i);
4291 txr->dev_state = 0;
4292 }
4293 netif_tx_wake_all_queues(bp->dev);
4294 if (bp->link_info.link_up)
4295 netif_carrier_on(bp->dev);
4296}
4297
4298static void bnxt_report_link(struct bnxt *bp)
4299{
4300 if (bp->link_info.link_up) {
4301 const char *duplex;
4302 const char *flow_ctrl;
4303 u16 speed;
4304
4305 netif_carrier_on(bp->dev);
4306 if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL)
4307 duplex = "full";
4308 else
4309 duplex = "half";
4310 if (bp->link_info.pause == BNXT_LINK_PAUSE_BOTH)
4311 flow_ctrl = "ON - receive & transmit";
4312 else if (bp->link_info.pause == BNXT_LINK_PAUSE_TX)
4313 flow_ctrl = "ON - transmit";
4314 else if (bp->link_info.pause == BNXT_LINK_PAUSE_RX)
4315 flow_ctrl = "ON - receive";
4316 else
4317 flow_ctrl = "none";
4318 speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
4319 netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
4320 speed, duplex, flow_ctrl);
4321 } else {
4322 netif_carrier_off(bp->dev);
4323 netdev_err(bp->dev, "NIC Link is Down\n");
4324 }
4325}
4326
4327static int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
4328{
4329 int rc = 0;
4330 struct bnxt_link_info *link_info = &bp->link_info;
4331 struct hwrm_port_phy_qcfg_input req = {0};
4332 struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
4333 u8 link_up = link_info->link_up;
4334
4335 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCFG, -1, -1);
4336
4337 mutex_lock(&bp->hwrm_cmd_lock);
4338 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4339 if (rc) {
4340 mutex_unlock(&bp->hwrm_cmd_lock);
4341 return rc;
4342 }
4343
4344 memcpy(&link_info->phy_qcfg_resp, resp, sizeof(*resp));
4345 link_info->phy_link_status = resp->link;
4346 link_info->duplex = resp->duplex;
4347 link_info->pause = resp->pause;
4348 link_info->auto_mode = resp->auto_mode;
4349 link_info->auto_pause_setting = resp->auto_pause;
4350 link_info->force_pause_setting = resp->force_pause;
c193554e 4351 link_info->duplex_setting = resp->duplex;
c0c050c5
MC
4352 if (link_info->phy_link_status == BNXT_LINK_LINK)
4353 link_info->link_speed = le16_to_cpu(resp->link_speed);
4354 else
4355 link_info->link_speed = 0;
4356 link_info->force_link_speed = le16_to_cpu(resp->force_link_speed);
4357 link_info->auto_link_speed = le16_to_cpu(resp->auto_link_speed);
4358 link_info->support_speeds = le16_to_cpu(resp->support_speeds);
4359 link_info->auto_link_speeds = le16_to_cpu(resp->auto_link_speed_mask);
4360 link_info->preemphasis = le32_to_cpu(resp->preemphasis);
4361 link_info->phy_ver[0] = resp->phy_maj;
4362 link_info->phy_ver[1] = resp->phy_min;
4363 link_info->phy_ver[2] = resp->phy_bld;
4364 link_info->media_type = resp->media_type;
4365 link_info->transceiver = resp->transceiver_type;
4366 link_info->phy_addr = resp->phy_addr;
4367
4368 /* TODO: need to add more logic to report VF link */
4369 if (chng_link_state) {
4370 if (link_info->phy_link_status == BNXT_LINK_LINK)
4371 link_info->link_up = 1;
4372 else
4373 link_info->link_up = 0;
4374 if (link_up != link_info->link_up)
4375 bnxt_report_link(bp);
4376 } else {
4377 /* alwasy link down if not require to update link state */
4378 link_info->link_up = 0;
4379 }
4380 mutex_unlock(&bp->hwrm_cmd_lock);
4381 return 0;
4382}
4383
4384static void
4385bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
4386{
4387 if (bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) {
4388 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
4389 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX;
4390 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
4391 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX;
4392 req->enables |=
4393 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
4394 } else {
4395 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
4396 req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_RX;
4397 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
4398 req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_TX;
4399 req->enables |=
4400 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE);
4401 }
4402}
4403
4404static void bnxt_hwrm_set_link_common(struct bnxt *bp,
4405 struct hwrm_port_phy_cfg_input *req)
4406{
4407 u8 autoneg = bp->link_info.autoneg;
4408 u16 fw_link_speed = bp->link_info.req_link_speed;
4409 u32 advertising = bp->link_info.advertising;
4410
4411 if (autoneg & BNXT_AUTONEG_SPEED) {
4412 req->auto_mode |=
4413 PORT_PHY_CFG_REQ_AUTO_MODE_MASK;
4414
4415 req->enables |= cpu_to_le32(
4416 PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK);
4417 req->auto_link_speed_mask = cpu_to_le16(advertising);
4418
4419 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE);
4420 req->flags |=
4421 cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG);
4422 } else {
4423 req->force_link_speed = cpu_to_le16(fw_link_speed);
4424 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE);
4425 }
4426
4427 /* currently don't support half duplex */
4428 req->auto_duplex = PORT_PHY_CFG_REQ_AUTO_DUPLEX_FULL;
4429 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_DUPLEX);
4430 /* tell chimp that the setting takes effect immediately */
4431 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESET_PHY);
4432}
4433
4434int bnxt_hwrm_set_pause(struct bnxt *bp)
4435{
4436 struct hwrm_port_phy_cfg_input req = {0};
4437 int rc;
4438
4439 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
4440 bnxt_hwrm_set_pause_common(bp, &req);
4441
4442 if ((bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) ||
4443 bp->link_info.force_link_chng)
4444 bnxt_hwrm_set_link_common(bp, &req);
4445
4446 mutex_lock(&bp->hwrm_cmd_lock);
4447 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4448 if (!rc && !(bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL)) {
4449 /* since changing of pause setting doesn't trigger any link
4450 * change event, the driver needs to update the current pause
4451 * result upon successfully return of the phy_cfg command
4452 */
4453 bp->link_info.pause =
4454 bp->link_info.force_pause_setting = bp->link_info.req_flow_ctrl;
4455 bp->link_info.auto_pause_setting = 0;
4456 if (!bp->link_info.force_link_chng)
4457 bnxt_report_link(bp);
4458 }
4459 bp->link_info.force_link_chng = false;
4460 mutex_unlock(&bp->hwrm_cmd_lock);
4461 return rc;
4462}
4463
4464int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause)
4465{
4466 struct hwrm_port_phy_cfg_input req = {0};
4467
4468 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
4469 if (set_pause)
4470 bnxt_hwrm_set_pause_common(bp, &req);
4471
4472 bnxt_hwrm_set_link_common(bp, &req);
4473 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4474}
4475
4476static int bnxt_update_phy_setting(struct bnxt *bp)
4477{
4478 int rc;
4479 bool update_link = false;
4480 bool update_pause = false;
4481 struct bnxt_link_info *link_info = &bp->link_info;
4482
4483 rc = bnxt_update_link(bp, true);
4484 if (rc) {
4485 netdev_err(bp->dev, "failed to update link (rc: %x)\n",
4486 rc);
4487 return rc;
4488 }
4489 if ((link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
4490 link_info->auto_pause_setting != link_info->req_flow_ctrl)
4491 update_pause = true;
4492 if (!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
4493 link_info->force_pause_setting != link_info->req_flow_ctrl)
4494 update_pause = true;
4495 if (link_info->req_duplex != link_info->duplex_setting)
4496 update_link = true;
4497 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
4498 if (BNXT_AUTO_MODE(link_info->auto_mode))
4499 update_link = true;
4500 if (link_info->req_link_speed != link_info->force_link_speed)
4501 update_link = true;
4502 } else {
4503 if (link_info->auto_mode == BNXT_LINK_AUTO_NONE)
4504 update_link = true;
4505 if (link_info->advertising != link_info->auto_link_speeds)
4506 update_link = true;
4507 if (link_info->req_link_speed != link_info->auto_link_speed)
4508 update_link = true;
4509 }
4510
4511 if (update_link)
4512 rc = bnxt_hwrm_set_link_setting(bp, update_pause);
4513 else if (update_pause)
4514 rc = bnxt_hwrm_set_pause(bp);
4515 if (rc) {
4516 netdev_err(bp->dev, "failed to update phy setting (rc: %x)\n",
4517 rc);
4518 return rc;
4519 }
4520
4521 return rc;
4522}
4523
11809490
JH
4524/* Common routine to pre-map certain register block to different GRC window.
4525 * A PF has 16 4K windows and a VF has 4 4K windows. However, only 15 windows
4526 * in PF and 3 windows in VF that can be customized to map in different
4527 * register blocks.
4528 */
4529static void bnxt_preset_reg_win(struct bnxt *bp)
4530{
4531 if (BNXT_PF(bp)) {
4532 /* CAG registers map to GRC window #4 */
4533 writel(BNXT_CAG_REG_BASE,
4534 bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 12);
4535 }
4536}
4537
c0c050c5
MC
4538static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
4539{
4540 int rc = 0;
4541
11809490 4542 bnxt_preset_reg_win(bp);
c0c050c5
MC
4543 netif_carrier_off(bp->dev);
4544 if (irq_re_init) {
4545 rc = bnxt_setup_int_mode(bp);
4546 if (rc) {
4547 netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n",
4548 rc);
4549 return rc;
4550 }
4551 }
4552 if ((bp->flags & BNXT_FLAG_RFS) &&
4553 !(bp->flags & BNXT_FLAG_USING_MSIX)) {
4554 /* disable RFS if falling back to INTA */
4555 bp->dev->hw_features &= ~NETIF_F_NTUPLE;
4556 bp->flags &= ~BNXT_FLAG_RFS;
4557 }
4558
4559 rc = bnxt_alloc_mem(bp, irq_re_init);
4560 if (rc) {
4561 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
4562 goto open_err_free_mem;
4563 }
4564
4565 if (irq_re_init) {
4566 bnxt_init_napi(bp);
4567 rc = bnxt_request_irq(bp);
4568 if (rc) {
4569 netdev_err(bp->dev, "bnxt_request_irq err: %x\n", rc);
4570 goto open_err;
4571 }
4572 }
4573
4574 bnxt_enable_napi(bp);
4575
4576 rc = bnxt_init_nic(bp, irq_re_init);
4577 if (rc) {
4578 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
4579 goto open_err;
4580 }
4581
4582 if (link_re_init) {
4583 rc = bnxt_update_phy_setting(bp);
4584 if (rc)
4585 goto open_err;
4586 }
4587
4588 if (irq_re_init) {
4589#if defined(CONFIG_VXLAN) || defined(CONFIG_VXLAN_MODULE)
4590 vxlan_get_rx_port(bp->dev);
4591#endif
4592 if (!bnxt_hwrm_tunnel_dst_port_alloc(
4593 bp, htons(0x17c1),
4594 TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE))
4595 bp->nge_port_cnt = 1;
4596 }
4597
caefe526 4598 set_bit(BNXT_STATE_OPEN, &bp->state);
c0c050c5
MC
4599 bnxt_enable_int(bp);
4600 /* Enable TX queues */
4601 bnxt_tx_enable(bp);
4602 mod_timer(&bp->timer, jiffies + bp->current_interval);
4603
4604 return 0;
4605
4606open_err:
4607 bnxt_disable_napi(bp);
4608 bnxt_del_napi(bp);
4609
4610open_err_free_mem:
4611 bnxt_free_skbs(bp);
4612 bnxt_free_irq(bp);
4613 bnxt_free_mem(bp, true);
4614 return rc;
4615}
4616
4617/* rtnl_lock held */
4618int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
4619{
4620 int rc = 0;
4621
4622 rc = __bnxt_open_nic(bp, irq_re_init, link_re_init);
4623 if (rc) {
4624 netdev_err(bp->dev, "nic open fail (rc: %x)\n", rc);
4625 dev_close(bp->dev);
4626 }
4627 return rc;
4628}
4629
4630static int bnxt_open(struct net_device *dev)
4631{
4632 struct bnxt *bp = netdev_priv(dev);
4633 int rc = 0;
4634
4635 rc = bnxt_hwrm_func_reset(bp);
4636 if (rc) {
4637 netdev_err(bp->dev, "hwrm chip reset failure rc: %x\n",
4638 rc);
4639 rc = -1;
4640 return rc;
4641 }
4642 return __bnxt_open_nic(bp, true, true);
4643}
4644
4645static void bnxt_disable_int_sync(struct bnxt *bp)
4646{
4647 int i;
4648
4649 atomic_inc(&bp->intr_sem);
4650 if (!netif_running(bp->dev))
4651 return;
4652
4653 bnxt_disable_int(bp);
4654 for (i = 0; i < bp->cp_nr_rings; i++)
4655 synchronize_irq(bp->irq_tbl[i].vector);
4656}
4657
4658int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
4659{
4660 int rc = 0;
4661
4662#ifdef CONFIG_BNXT_SRIOV
4663 if (bp->sriov_cfg) {
4664 rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait,
4665 !bp->sriov_cfg,
4666 BNXT_SRIOV_CFG_WAIT_TMO);
4667 if (rc)
4668 netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete!\n");
4669 }
4670#endif
4671 /* Change device state to avoid TX queue wake up's */
4672 bnxt_tx_disable(bp);
4673
caefe526 4674 clear_bit(BNXT_STATE_OPEN, &bp->state);
4cebdcec
MC
4675 smp_mb__after_atomic();
4676 while (test_bit(BNXT_STATE_IN_SP_TASK, &bp->state))
4677 msleep(20);
c0c050c5
MC
4678
4679 /* Flush rings before disabling interrupts */
4680 bnxt_shutdown_nic(bp, irq_re_init);
4681
4682 /* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */
4683
4684 bnxt_disable_napi(bp);
4685 bnxt_disable_int_sync(bp);
4686 del_timer_sync(&bp->timer);
4687 bnxt_free_skbs(bp);
4688
4689 if (irq_re_init) {
4690 bnxt_free_irq(bp);
4691 bnxt_del_napi(bp);
4692 }
4693 bnxt_free_mem(bp, irq_re_init);
4694 return rc;
4695}
4696
4697static int bnxt_close(struct net_device *dev)
4698{
4699 struct bnxt *bp = netdev_priv(dev);
4700
4701 bnxt_close_nic(bp, true, true);
4702 return 0;
4703}
4704
4705/* rtnl_lock held */
4706static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
4707{
4708 switch (cmd) {
4709 case SIOCGMIIPHY:
4710 /* fallthru */
4711 case SIOCGMIIREG: {
4712 if (!netif_running(dev))
4713 return -EAGAIN;
4714
4715 return 0;
4716 }
4717
4718 case SIOCSMIIREG:
4719 if (!netif_running(dev))
4720 return -EAGAIN;
4721
4722 return 0;
4723
4724 default:
4725 /* do nothing */
4726 break;
4727 }
4728 return -EOPNOTSUPP;
4729}
4730
4731static struct rtnl_link_stats64 *
4732bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
4733{
4734 u32 i;
4735 struct bnxt *bp = netdev_priv(dev);
4736
4737 memset(stats, 0, sizeof(struct rtnl_link_stats64));
4738
4739 if (!bp->bnapi)
4740 return stats;
4741
4742 /* TODO check if we need to synchronize with bnxt_close path */
4743 for (i = 0; i < bp->cp_nr_rings; i++) {
4744 struct bnxt_napi *bnapi = bp->bnapi[i];
4745 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4746 struct ctx_hw_stats *hw_stats = cpr->hw_stats;
4747
4748 stats->rx_packets += le64_to_cpu(hw_stats->rx_ucast_pkts);
4749 stats->rx_packets += le64_to_cpu(hw_stats->rx_mcast_pkts);
4750 stats->rx_packets += le64_to_cpu(hw_stats->rx_bcast_pkts);
4751
4752 stats->tx_packets += le64_to_cpu(hw_stats->tx_ucast_pkts);
4753 stats->tx_packets += le64_to_cpu(hw_stats->tx_mcast_pkts);
4754 stats->tx_packets += le64_to_cpu(hw_stats->tx_bcast_pkts);
4755
4756 stats->rx_bytes += le64_to_cpu(hw_stats->rx_ucast_bytes);
4757 stats->rx_bytes += le64_to_cpu(hw_stats->rx_mcast_bytes);
4758 stats->rx_bytes += le64_to_cpu(hw_stats->rx_bcast_bytes);
4759
4760 stats->tx_bytes += le64_to_cpu(hw_stats->tx_ucast_bytes);
4761 stats->tx_bytes += le64_to_cpu(hw_stats->tx_mcast_bytes);
4762 stats->tx_bytes += le64_to_cpu(hw_stats->tx_bcast_bytes);
4763
4764 stats->rx_missed_errors +=
4765 le64_to_cpu(hw_stats->rx_discard_pkts);
4766
4767 stats->multicast += le64_to_cpu(hw_stats->rx_mcast_pkts);
4768
4769 stats->rx_dropped += le64_to_cpu(hw_stats->rx_drop_pkts);
4770
4771 stats->tx_dropped += le64_to_cpu(hw_stats->tx_drop_pkts);
4772 }
4773
4774 return stats;
4775}
4776
4777static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask)
4778{
4779 struct net_device *dev = bp->dev;
4780 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
4781 struct netdev_hw_addr *ha;
4782 u8 *haddr;
4783 int mc_count = 0;
4784 bool update = false;
4785 int off = 0;
4786
4787 netdev_for_each_mc_addr(ha, dev) {
4788 if (mc_count >= BNXT_MAX_MC_ADDRS) {
4789 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
4790 vnic->mc_list_count = 0;
4791 return false;
4792 }
4793 haddr = ha->addr;
4794 if (!ether_addr_equal(haddr, vnic->mc_list + off)) {
4795 memcpy(vnic->mc_list + off, haddr, ETH_ALEN);
4796 update = true;
4797 }
4798 off += ETH_ALEN;
4799 mc_count++;
4800 }
4801 if (mc_count)
4802 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
4803
4804 if (mc_count != vnic->mc_list_count) {
4805 vnic->mc_list_count = mc_count;
4806 update = true;
4807 }
4808 return update;
4809}
4810
4811static bool bnxt_uc_list_updated(struct bnxt *bp)
4812{
4813 struct net_device *dev = bp->dev;
4814 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
4815 struct netdev_hw_addr *ha;
4816 int off = 0;
4817
4818 if (netdev_uc_count(dev) != (vnic->uc_filter_count - 1))
4819 return true;
4820
4821 netdev_for_each_uc_addr(ha, dev) {
4822 if (!ether_addr_equal(ha->addr, vnic->uc_list + off))
4823 return true;
4824
4825 off += ETH_ALEN;
4826 }
4827 return false;
4828}
4829
4830static void bnxt_set_rx_mode(struct net_device *dev)
4831{
4832 struct bnxt *bp = netdev_priv(dev);
4833 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
4834 u32 mask = vnic->rx_mask;
4835 bool mc_update = false;
4836 bool uc_update;
4837
4838 if (!netif_running(dev))
4839 return;
4840
4841 mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS |
4842 CFA_L2_SET_RX_MASK_REQ_MASK_MCAST |
4843 CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST);
4844
4845 /* Only allow PF to be in promiscuous mode */
4846 if ((dev->flags & IFF_PROMISC) && BNXT_PF(bp))
4847 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
4848
4849 uc_update = bnxt_uc_list_updated(bp);
4850
4851 if (dev->flags & IFF_ALLMULTI) {
4852 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
4853 vnic->mc_list_count = 0;
4854 } else {
4855 mc_update = bnxt_mc_list_updated(bp, &mask);
4856 }
4857
4858 if (mask != vnic->rx_mask || uc_update || mc_update) {
4859 vnic->rx_mask = mask;
4860
4861 set_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event);
4862 schedule_work(&bp->sp_task);
4863 }
4864}
4865
b664f008 4866static int bnxt_cfg_rx_mode(struct bnxt *bp)
c0c050c5
MC
4867{
4868 struct net_device *dev = bp->dev;
4869 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
4870 struct netdev_hw_addr *ha;
4871 int i, off = 0, rc;
4872 bool uc_update;
4873
4874 netif_addr_lock_bh(dev);
4875 uc_update = bnxt_uc_list_updated(bp);
4876 netif_addr_unlock_bh(dev);
4877
4878 if (!uc_update)
4879 goto skip_uc;
4880
4881 mutex_lock(&bp->hwrm_cmd_lock);
4882 for (i = 1; i < vnic->uc_filter_count; i++) {
4883 struct hwrm_cfa_l2_filter_free_input req = {0};
4884
4885 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_FREE, -1,
4886 -1);
4887
4888 req.l2_filter_id = vnic->fw_l2_filter_id[i];
4889
4890 rc = _hwrm_send_message(bp, &req, sizeof(req),
4891 HWRM_CMD_TIMEOUT);
4892 }
4893 mutex_unlock(&bp->hwrm_cmd_lock);
4894
4895 vnic->uc_filter_count = 1;
4896
4897 netif_addr_lock_bh(dev);
4898 if (netdev_uc_count(dev) > (BNXT_MAX_UC_ADDRS - 1)) {
4899 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
4900 } else {
4901 netdev_for_each_uc_addr(ha, dev) {
4902 memcpy(vnic->uc_list + off, ha->addr, ETH_ALEN);
4903 off += ETH_ALEN;
4904 vnic->uc_filter_count++;
4905 }
4906 }
4907 netif_addr_unlock_bh(dev);
4908
4909 for (i = 1, off = 0; i < vnic->uc_filter_count; i++, off += ETH_ALEN) {
4910 rc = bnxt_hwrm_set_vnic_filter(bp, 0, i, vnic->uc_list + off);
4911 if (rc) {
4912 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n",
4913 rc);
4914 vnic->uc_filter_count = i;
b664f008 4915 return rc;
c0c050c5
MC
4916 }
4917 }
4918
4919skip_uc:
4920 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
4921 if (rc)
4922 netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %x\n",
4923 rc);
b664f008
MC
4924
4925 return rc;
c0c050c5
MC
4926}
4927
2bcfa6f6
MC
4928static bool bnxt_rfs_capable(struct bnxt *bp)
4929{
4930#ifdef CONFIG_RFS_ACCEL
4931 struct bnxt_pf_info *pf = &bp->pf;
4932 int vnics;
4933
4934 if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_MSIX_CAP))
4935 return false;
4936
4937 vnics = 1 + bp->rx_nr_rings;
4938 if (vnics > pf->max_rsscos_ctxs || vnics > pf->max_vnics)
4939 return false;
4940
4941 return true;
4942#else
4943 return false;
4944#endif
4945}
4946
c0c050c5
MC
4947static netdev_features_t bnxt_fix_features(struct net_device *dev,
4948 netdev_features_t features)
4949{
2bcfa6f6
MC
4950 struct bnxt *bp = netdev_priv(dev);
4951
4952 if (!bnxt_rfs_capable(bp))
4953 features &= ~NETIF_F_NTUPLE;
c0c050c5
MC
4954 return features;
4955}
4956
4957static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
4958{
4959 struct bnxt *bp = netdev_priv(dev);
4960 u32 flags = bp->flags;
4961 u32 changes;
4962 int rc = 0;
4963 bool re_init = false;
4964 bool update_tpa = false;
4965
4966 flags &= ~BNXT_FLAG_ALL_CONFIG_FEATS;
4967 if ((features & NETIF_F_GRO) && (bp->pdev->revision > 0))
4968 flags |= BNXT_FLAG_GRO;
4969 if (features & NETIF_F_LRO)
4970 flags |= BNXT_FLAG_LRO;
4971
4972 if (features & NETIF_F_HW_VLAN_CTAG_RX)
4973 flags |= BNXT_FLAG_STRIP_VLAN;
4974
4975 if (features & NETIF_F_NTUPLE)
4976 flags |= BNXT_FLAG_RFS;
4977
4978 changes = flags ^ bp->flags;
4979 if (changes & BNXT_FLAG_TPA) {
4980 update_tpa = true;
4981 if ((bp->flags & BNXT_FLAG_TPA) == 0 ||
4982 (flags & BNXT_FLAG_TPA) == 0)
4983 re_init = true;
4984 }
4985
4986 if (changes & ~BNXT_FLAG_TPA)
4987 re_init = true;
4988
4989 if (flags != bp->flags) {
4990 u32 old_flags = bp->flags;
4991
4992 bp->flags = flags;
4993
2bcfa6f6 4994 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
c0c050c5
MC
4995 if (update_tpa)
4996 bnxt_set_ring_params(bp);
4997 return rc;
4998 }
4999
5000 if (re_init) {
5001 bnxt_close_nic(bp, false, false);
5002 if (update_tpa)
5003 bnxt_set_ring_params(bp);
5004
5005 return bnxt_open_nic(bp, false, false);
5006 }
5007 if (update_tpa) {
5008 rc = bnxt_set_tpa(bp,
5009 (flags & BNXT_FLAG_TPA) ?
5010 true : false);
5011 if (rc)
5012 bp->flags = old_flags;
5013 }
5014 }
5015 return rc;
5016}
5017
9f554590
MC
5018static void bnxt_dump_tx_sw_state(struct bnxt_napi *bnapi)
5019{
5020 struct bnxt_tx_ring_info *txr = &bnapi->tx_ring;
5021 int i = bnapi->index;
5022
5023 netdev_info(bnapi->bp->dev, "[%d]: tx{fw_ring: %d prod: %x cons: %x}\n",
5024 i, txr->tx_ring_struct.fw_ring_id, txr->tx_prod,
5025 txr->tx_cons);
5026}
5027
5028static void bnxt_dump_rx_sw_state(struct bnxt_napi *bnapi)
5029{
5030 struct bnxt_rx_ring_info *rxr = &bnapi->rx_ring;
5031 int i = bnapi->index;
5032
5033 netdev_info(bnapi->bp->dev, "[%d]: rx{fw_ring: %d prod: %x} rx_agg{fw_ring: %d agg_prod: %x sw_agg_prod: %x}\n",
5034 i, rxr->rx_ring_struct.fw_ring_id, rxr->rx_prod,
5035 rxr->rx_agg_ring_struct.fw_ring_id, rxr->rx_agg_prod,
5036 rxr->rx_sw_agg_prod);
5037}
5038
5039static void bnxt_dump_cp_sw_state(struct bnxt_napi *bnapi)
5040{
5041 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5042 int i = bnapi->index;
5043
5044 netdev_info(bnapi->bp->dev, "[%d]: cp{fw_ring: %d raw_cons: %x}\n",
5045 i, cpr->cp_ring_struct.fw_ring_id, cpr->cp_raw_cons);
5046}
5047
c0c050c5
MC
5048static void bnxt_dbg_dump_states(struct bnxt *bp)
5049{
5050 int i;
5051 struct bnxt_napi *bnapi;
c0c050c5
MC
5052
5053 for (i = 0; i < bp->cp_nr_rings; i++) {
5054 bnapi = bp->bnapi[i];
c0c050c5 5055 if (netif_msg_drv(bp)) {
9f554590
MC
5056 bnxt_dump_tx_sw_state(bnapi);
5057 bnxt_dump_rx_sw_state(bnapi);
5058 bnxt_dump_cp_sw_state(bnapi);
c0c050c5
MC
5059 }
5060 }
5061}
5062
5063static void bnxt_reset_task(struct bnxt *bp)
5064{
5065 bnxt_dbg_dump_states(bp);
028de140
MC
5066 if (netif_running(bp->dev)) {
5067 bnxt_close_nic(bp, false, false);
5068 bnxt_open_nic(bp, false, false);
5069 }
c0c050c5
MC
5070}
5071
5072static void bnxt_tx_timeout(struct net_device *dev)
5073{
5074 struct bnxt *bp = netdev_priv(dev);
5075
5076 netdev_err(bp->dev, "TX timeout detected, starting reset task!\n");
5077 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
5078 schedule_work(&bp->sp_task);
5079}
5080
5081#ifdef CONFIG_NET_POLL_CONTROLLER
5082static void bnxt_poll_controller(struct net_device *dev)
5083{
5084 struct bnxt *bp = netdev_priv(dev);
5085 int i;
5086
5087 for (i = 0; i < bp->cp_nr_rings; i++) {
5088 struct bnxt_irq *irq = &bp->irq_tbl[i];
5089
5090 disable_irq(irq->vector);
5091 irq->handler(irq->vector, bp->bnapi[i]);
5092 enable_irq(irq->vector);
5093 }
5094}
5095#endif
5096
5097static void bnxt_timer(unsigned long data)
5098{
5099 struct bnxt *bp = (struct bnxt *)data;
5100 struct net_device *dev = bp->dev;
5101
5102 if (!netif_running(dev))
5103 return;
5104
5105 if (atomic_read(&bp->intr_sem) != 0)
5106 goto bnxt_restart_timer;
5107
5108bnxt_restart_timer:
5109 mod_timer(&bp->timer, jiffies + bp->current_interval);
5110}
5111
5112static void bnxt_cfg_ntp_filters(struct bnxt *);
5113
5114static void bnxt_sp_task(struct work_struct *work)
5115{
5116 struct bnxt *bp = container_of(work, struct bnxt, sp_task);
5117 int rc;
5118
4cebdcec
MC
5119 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
5120 smp_mb__after_atomic();
5121 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
5122 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
c0c050c5 5123 return;
4cebdcec 5124 }
c0c050c5
MC
5125
5126 if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event))
5127 bnxt_cfg_rx_mode(bp);
5128
5129 if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event))
5130 bnxt_cfg_ntp_filters(bp);
5131 if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
5132 rc = bnxt_update_link(bp, true);
5133 if (rc)
5134 netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
5135 rc);
5136 }
5137 if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event))
5138 bnxt_hwrm_exec_fwd_req(bp);
5139 if (test_and_clear_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event)) {
5140 bnxt_hwrm_tunnel_dst_port_alloc(
5141 bp, bp->vxlan_port,
5142 TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
5143 }
5144 if (test_and_clear_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event)) {
5145 bnxt_hwrm_tunnel_dst_port_free(
5146 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
5147 }
028de140
MC
5148 if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event)) {
5149 /* bnxt_reset_task() calls bnxt_close_nic() which waits
5150 * for BNXT_STATE_IN_SP_TASK to clear.
5151 */
5152 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
5153 rtnl_lock();
c0c050c5 5154 bnxt_reset_task(bp);
028de140
MC
5155 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
5156 rtnl_unlock();
5157 }
4cebdcec
MC
5158
5159 smp_mb__before_atomic();
5160 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
c0c050c5
MC
5161}
5162
5163static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
5164{
5165 int rc;
5166 struct bnxt *bp = netdev_priv(dev);
5167
5168 SET_NETDEV_DEV(dev, &pdev->dev);
5169
5170 /* enable device (incl. PCI PM wakeup), and bus-mastering */
5171 rc = pci_enable_device(pdev);
5172 if (rc) {
5173 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
5174 goto init_err;
5175 }
5176
5177 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
5178 dev_err(&pdev->dev,
5179 "Cannot find PCI device base address, aborting\n");
5180 rc = -ENODEV;
5181 goto init_err_disable;
5182 }
5183
5184 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
5185 if (rc) {
5186 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
5187 goto init_err_disable;
5188 }
5189
5190 if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) != 0 &&
5191 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
5192 dev_err(&pdev->dev, "System does not support DMA, aborting\n");
5193 goto init_err_disable;
5194 }
5195
5196 pci_set_master(pdev);
5197
5198 bp->dev = dev;
5199 bp->pdev = pdev;
5200
5201 bp->bar0 = pci_ioremap_bar(pdev, 0);
5202 if (!bp->bar0) {
5203 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
5204 rc = -ENOMEM;
5205 goto init_err_release;
5206 }
5207
5208 bp->bar1 = pci_ioremap_bar(pdev, 2);
5209 if (!bp->bar1) {
5210 dev_err(&pdev->dev, "Cannot map doorbell registers, aborting\n");
5211 rc = -ENOMEM;
5212 goto init_err_release;
5213 }
5214
5215 bp->bar2 = pci_ioremap_bar(pdev, 4);
5216 if (!bp->bar2) {
5217 dev_err(&pdev->dev, "Cannot map bar4 registers, aborting\n");
5218 rc = -ENOMEM;
5219 goto init_err_release;
5220 }
5221
5222 INIT_WORK(&bp->sp_task, bnxt_sp_task);
5223
5224 spin_lock_init(&bp->ntp_fltr_lock);
5225
5226 bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE;
5227 bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE;
5228
5229 bp->coal_ticks = BNXT_USEC_TO_COAL_TIMER(4);
5230 bp->coal_bufs = 20;
5231 bp->coal_ticks_irq = BNXT_USEC_TO_COAL_TIMER(1);
5232 bp->coal_bufs_irq = 2;
5233
5234 init_timer(&bp->timer);
5235 bp->timer.data = (unsigned long)bp;
5236 bp->timer.function = bnxt_timer;
5237 bp->current_interval = BNXT_TIMER_INTERVAL;
5238
caefe526 5239 clear_bit(BNXT_STATE_OPEN, &bp->state);
c0c050c5
MC
5240
5241 return 0;
5242
5243init_err_release:
5244 if (bp->bar2) {
5245 pci_iounmap(pdev, bp->bar2);
5246 bp->bar2 = NULL;
5247 }
5248
5249 if (bp->bar1) {
5250 pci_iounmap(pdev, bp->bar1);
5251 bp->bar1 = NULL;
5252 }
5253
5254 if (bp->bar0) {
5255 pci_iounmap(pdev, bp->bar0);
5256 bp->bar0 = NULL;
5257 }
5258
5259 pci_release_regions(pdev);
5260
5261init_err_disable:
5262 pci_disable_device(pdev);
5263
5264init_err:
5265 return rc;
5266}
5267
5268/* rtnl_lock held */
5269static int bnxt_change_mac_addr(struct net_device *dev, void *p)
5270{
5271 struct sockaddr *addr = p;
1fc2cfd0
JH
5272 struct bnxt *bp = netdev_priv(dev);
5273 int rc = 0;
c0c050c5
MC
5274
5275 if (!is_valid_ether_addr(addr->sa_data))
5276 return -EADDRNOTAVAIL;
5277
bdd4347b
JH
5278#ifdef CONFIG_BNXT_SRIOV
5279 if (BNXT_VF(bp) && is_valid_ether_addr(bp->vf.mac_addr))
5280 return -EADDRNOTAVAIL;
5281#endif
5282
1fc2cfd0
JH
5283 if (ether_addr_equal(addr->sa_data, dev->dev_addr))
5284 return 0;
5285
c0c050c5 5286 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1fc2cfd0
JH
5287 if (netif_running(dev)) {
5288 bnxt_close_nic(bp, false, false);
5289 rc = bnxt_open_nic(bp, false, false);
5290 }
c0c050c5 5291
1fc2cfd0 5292 return rc;
c0c050c5
MC
5293}
5294
5295/* rtnl_lock held */
5296static int bnxt_change_mtu(struct net_device *dev, int new_mtu)
5297{
5298 struct bnxt *bp = netdev_priv(dev);
5299
5300 if (new_mtu < 60 || new_mtu > 9000)
5301 return -EINVAL;
5302
5303 if (netif_running(dev))
5304 bnxt_close_nic(bp, false, false);
5305
5306 dev->mtu = new_mtu;
5307 bnxt_set_ring_params(bp);
5308
5309 if (netif_running(dev))
5310 return bnxt_open_nic(bp, false, false);
5311
5312 return 0;
5313}
5314
5315static int bnxt_setup_tc(struct net_device *dev, u8 tc)
5316{
5317 struct bnxt *bp = netdev_priv(dev);
5318
5319 if (tc > bp->max_tc) {
5320 netdev_err(dev, "too many traffic classes requested: %d Max supported is %d\n",
5321 tc, bp->max_tc);
5322 return -EINVAL;
5323 }
5324
5325 if (netdev_get_num_tc(dev) == tc)
5326 return 0;
5327
5328 if (tc) {
5329 int max_rx_rings, max_tx_rings;
5330
5331 bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings);
5332 if (bp->tx_nr_rings_per_tc * tc > max_tx_rings)
5333 return -ENOMEM;
5334 }
5335
5336 /* Needs to close the device and do hw resource re-allocations */
5337 if (netif_running(bp->dev))
5338 bnxt_close_nic(bp, true, false);
5339
5340 if (tc) {
5341 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc;
5342 netdev_set_num_tc(dev, tc);
5343 } else {
5344 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
5345 netdev_reset_tc(dev);
5346 }
5347 bp->cp_nr_rings = max_t(int, bp->tx_nr_rings, bp->rx_nr_rings);
5348 bp->num_stat_ctxs = bp->cp_nr_rings;
5349
5350 if (netif_running(bp->dev))
5351 return bnxt_open_nic(bp, true, false);
5352
5353 return 0;
5354}
5355
5356#ifdef CONFIG_RFS_ACCEL
5357static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1,
5358 struct bnxt_ntuple_filter *f2)
5359{
5360 struct flow_keys *keys1 = &f1->fkeys;
5361 struct flow_keys *keys2 = &f2->fkeys;
5362
5363 if (keys1->addrs.v4addrs.src == keys2->addrs.v4addrs.src &&
5364 keys1->addrs.v4addrs.dst == keys2->addrs.v4addrs.dst &&
5365 keys1->ports.ports == keys2->ports.ports &&
5366 keys1->basic.ip_proto == keys2->basic.ip_proto &&
5367 keys1->basic.n_proto == keys2->basic.n_proto &&
5368 ether_addr_equal(f1->src_mac_addr, f2->src_mac_addr))
5369 return true;
5370
5371 return false;
5372}
5373
5374static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
5375 u16 rxq_index, u32 flow_id)
5376{
5377 struct bnxt *bp = netdev_priv(dev);
5378 struct bnxt_ntuple_filter *fltr, *new_fltr;
5379 struct flow_keys *fkeys;
5380 struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb);
84e86b98 5381 int rc = 0, idx, bit_id;
c0c050c5
MC
5382 struct hlist_head *head;
5383
5384 if (skb->encapsulation)
5385 return -EPROTONOSUPPORT;
5386
5387 new_fltr = kzalloc(sizeof(*new_fltr), GFP_ATOMIC);
5388 if (!new_fltr)
5389 return -ENOMEM;
5390
5391 fkeys = &new_fltr->fkeys;
5392 if (!skb_flow_dissect_flow_keys(skb, fkeys, 0)) {
5393 rc = -EPROTONOSUPPORT;
5394 goto err_free;
5395 }
5396
5397 if ((fkeys->basic.n_proto != htons(ETH_P_IP)) ||
5398 ((fkeys->basic.ip_proto != IPPROTO_TCP) &&
5399 (fkeys->basic.ip_proto != IPPROTO_UDP))) {
5400 rc = -EPROTONOSUPPORT;
5401 goto err_free;
5402 }
5403
5404 memcpy(new_fltr->src_mac_addr, eth->h_source, ETH_ALEN);
5405
5406 idx = skb_get_hash_raw(skb) & BNXT_NTP_FLTR_HASH_MASK;
5407 head = &bp->ntp_fltr_hash_tbl[idx];
5408 rcu_read_lock();
5409 hlist_for_each_entry_rcu(fltr, head, hash) {
5410 if (bnxt_fltr_match(fltr, new_fltr)) {
5411 rcu_read_unlock();
5412 rc = 0;
5413 goto err_free;
5414 }
5415 }
5416 rcu_read_unlock();
5417
5418 spin_lock_bh(&bp->ntp_fltr_lock);
84e86b98
MC
5419 bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap,
5420 BNXT_NTP_FLTR_MAX_FLTR, 0);
5421 if (bit_id < 0) {
c0c050c5
MC
5422 spin_unlock_bh(&bp->ntp_fltr_lock);
5423 rc = -ENOMEM;
5424 goto err_free;
5425 }
5426
84e86b98 5427 new_fltr->sw_id = (u16)bit_id;
c0c050c5
MC
5428 new_fltr->flow_id = flow_id;
5429 new_fltr->rxq = rxq_index;
5430 hlist_add_head_rcu(&new_fltr->hash, head);
5431 bp->ntp_fltr_count++;
5432 spin_unlock_bh(&bp->ntp_fltr_lock);
5433
5434 set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event);
5435 schedule_work(&bp->sp_task);
5436
5437 return new_fltr->sw_id;
5438
5439err_free:
5440 kfree(new_fltr);
5441 return rc;
5442}
5443
5444static void bnxt_cfg_ntp_filters(struct bnxt *bp)
5445{
5446 int i;
5447
5448 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
5449 struct hlist_head *head;
5450 struct hlist_node *tmp;
5451 struct bnxt_ntuple_filter *fltr;
5452 int rc;
5453
5454 head = &bp->ntp_fltr_hash_tbl[i];
5455 hlist_for_each_entry_safe(fltr, tmp, head, hash) {
5456 bool del = false;
5457
5458 if (test_bit(BNXT_FLTR_VALID, &fltr->state)) {
5459 if (rps_may_expire_flow(bp->dev, fltr->rxq,
5460 fltr->flow_id,
5461 fltr->sw_id)) {
5462 bnxt_hwrm_cfa_ntuple_filter_free(bp,
5463 fltr);
5464 del = true;
5465 }
5466 } else {
5467 rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp,
5468 fltr);
5469 if (rc)
5470 del = true;
5471 else
5472 set_bit(BNXT_FLTR_VALID, &fltr->state);
5473 }
5474
5475 if (del) {
5476 spin_lock_bh(&bp->ntp_fltr_lock);
5477 hlist_del_rcu(&fltr->hash);
5478 bp->ntp_fltr_count--;
5479 spin_unlock_bh(&bp->ntp_fltr_lock);
5480 synchronize_rcu();
5481 clear_bit(fltr->sw_id, bp->ntp_fltr_bmap);
5482 kfree(fltr);
5483 }
5484 }
5485 }
5486}
5487
5488#else
5489
5490static void bnxt_cfg_ntp_filters(struct bnxt *bp)
5491{
5492}
5493
5494#endif /* CONFIG_RFS_ACCEL */
5495
5496static void bnxt_add_vxlan_port(struct net_device *dev, sa_family_t sa_family,
5497 __be16 port)
5498{
5499 struct bnxt *bp = netdev_priv(dev);
5500
5501 if (!netif_running(dev))
5502 return;
5503
5504 if (sa_family != AF_INET6 && sa_family != AF_INET)
5505 return;
5506
5507 if (bp->vxlan_port_cnt && bp->vxlan_port != port)
5508 return;
5509
5510 bp->vxlan_port_cnt++;
5511 if (bp->vxlan_port_cnt == 1) {
5512 bp->vxlan_port = port;
5513 set_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event);
5514 schedule_work(&bp->sp_task);
5515 }
5516}
5517
5518static void bnxt_del_vxlan_port(struct net_device *dev, sa_family_t sa_family,
5519 __be16 port)
5520{
5521 struct bnxt *bp = netdev_priv(dev);
5522
5523 if (!netif_running(dev))
5524 return;
5525
5526 if (sa_family != AF_INET6 && sa_family != AF_INET)
5527 return;
5528
5529 if (bp->vxlan_port_cnt && bp->vxlan_port == port) {
5530 bp->vxlan_port_cnt--;
5531
5532 if (bp->vxlan_port_cnt == 0) {
5533 set_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event);
5534 schedule_work(&bp->sp_task);
5535 }
5536 }
5537}
5538
5539static const struct net_device_ops bnxt_netdev_ops = {
5540 .ndo_open = bnxt_open,
5541 .ndo_start_xmit = bnxt_start_xmit,
5542 .ndo_stop = bnxt_close,
5543 .ndo_get_stats64 = bnxt_get_stats64,
5544 .ndo_set_rx_mode = bnxt_set_rx_mode,
5545 .ndo_do_ioctl = bnxt_ioctl,
5546 .ndo_validate_addr = eth_validate_addr,
5547 .ndo_set_mac_address = bnxt_change_mac_addr,
5548 .ndo_change_mtu = bnxt_change_mtu,
5549 .ndo_fix_features = bnxt_fix_features,
5550 .ndo_set_features = bnxt_set_features,
5551 .ndo_tx_timeout = bnxt_tx_timeout,
5552#ifdef CONFIG_BNXT_SRIOV
5553 .ndo_get_vf_config = bnxt_get_vf_config,
5554 .ndo_set_vf_mac = bnxt_set_vf_mac,
5555 .ndo_set_vf_vlan = bnxt_set_vf_vlan,
5556 .ndo_set_vf_rate = bnxt_set_vf_bw,
5557 .ndo_set_vf_link_state = bnxt_set_vf_link_state,
5558 .ndo_set_vf_spoofchk = bnxt_set_vf_spoofchk,
5559#endif
5560#ifdef CONFIG_NET_POLL_CONTROLLER
5561 .ndo_poll_controller = bnxt_poll_controller,
5562#endif
5563 .ndo_setup_tc = bnxt_setup_tc,
5564#ifdef CONFIG_RFS_ACCEL
5565 .ndo_rx_flow_steer = bnxt_rx_flow_steer,
5566#endif
5567 .ndo_add_vxlan_port = bnxt_add_vxlan_port,
5568 .ndo_del_vxlan_port = bnxt_del_vxlan_port,
5569#ifdef CONFIG_NET_RX_BUSY_POLL
5570 .ndo_busy_poll = bnxt_busy_poll,
5571#endif
5572};
5573
5574static void bnxt_remove_one(struct pci_dev *pdev)
5575{
5576 struct net_device *dev = pci_get_drvdata(pdev);
5577 struct bnxt *bp = netdev_priv(dev);
5578
5579 if (BNXT_PF(bp))
5580 bnxt_sriov_disable(bp);
5581
5582 unregister_netdev(dev);
5583 cancel_work_sync(&bp->sp_task);
5584 bp->sp_event = 0;
5585
be58a0da 5586 bnxt_hwrm_func_drv_unrgtr(bp);
c0c050c5
MC
5587 bnxt_free_hwrm_resources(bp);
5588 pci_iounmap(pdev, bp->bar2);
5589 pci_iounmap(pdev, bp->bar1);
5590 pci_iounmap(pdev, bp->bar0);
5591 free_netdev(dev);
5592
5593 pci_release_regions(pdev);
5594 pci_disable_device(pdev);
5595}
5596
5597static int bnxt_probe_phy(struct bnxt *bp)
5598{
5599 int rc = 0;
5600 struct bnxt_link_info *link_info = &bp->link_info;
5601 char phy_ver[PHY_VER_STR_LEN];
5602
5603 rc = bnxt_update_link(bp, false);
5604 if (rc) {
5605 netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n",
5606 rc);
5607 return rc;
5608 }
5609
5610 /*initialize the ethool setting copy with NVM settings */
5611 if (BNXT_AUTO_MODE(link_info->auto_mode))
5612 link_info->autoneg |= BNXT_AUTONEG_SPEED;
5613
5614 if (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) {
5615 if (link_info->auto_pause_setting == BNXT_LINK_PAUSE_BOTH)
5616 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
5617 link_info->req_flow_ctrl = link_info->auto_pause_setting;
5618 } else if (link_info->force_pause_setting & BNXT_LINK_PAUSE_BOTH) {
5619 link_info->req_flow_ctrl = link_info->force_pause_setting;
5620 }
5621 link_info->req_duplex = link_info->duplex_setting;
5622 if (link_info->autoneg & BNXT_AUTONEG_SPEED)
5623 link_info->req_link_speed = link_info->auto_link_speed;
5624 else
5625 link_info->req_link_speed = link_info->force_link_speed;
5626 link_info->advertising = link_info->auto_link_speeds;
5627 snprintf(phy_ver, PHY_VER_STR_LEN, " ph %d.%d.%d",
5628 link_info->phy_ver[0],
5629 link_info->phy_ver[1],
5630 link_info->phy_ver[2]);
5631 strcat(bp->fw_ver_str, phy_ver);
5632 return rc;
5633}
5634
5635static int bnxt_get_max_irq(struct pci_dev *pdev)
5636{
5637 u16 ctrl;
5638
5639 if (!pdev->msix_cap)
5640 return 1;
5641
5642 pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl);
5643 return (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1;
5644}
5645
5646void bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx)
5647{
b72d4a68 5648 int max_rings = 0, max_ring_grps = 0;
c0c050c5
MC
5649
5650 if (BNXT_PF(bp)) {
4a21b49b
MC
5651 *max_tx = bp->pf.max_tx_rings;
5652 *max_rx = bp->pf.max_rx_rings;
c0c050c5
MC
5653 max_rings = min_t(int, bp->pf.max_irqs, bp->pf.max_cp_rings);
5654 max_rings = min_t(int, max_rings, bp->pf.max_stat_ctxs);
b72d4a68 5655 max_ring_grps = bp->pf.max_hw_ring_grps;
c0c050c5 5656 } else {
379a80a1 5657#ifdef CONFIG_BNXT_SRIOV
c0c050c5
MC
5658 *max_tx = bp->vf.max_tx_rings;
5659 *max_rx = bp->vf.max_rx_rings;
5660 max_rings = min_t(int, bp->vf.max_irqs, bp->vf.max_cp_rings);
5661 max_rings = min_t(int, max_rings, bp->vf.max_stat_ctxs);
b72d4a68 5662 max_ring_grps = bp->vf.max_hw_ring_grps;
379a80a1 5663#endif
c0c050c5
MC
5664 }
5665 if (bp->flags & BNXT_FLAG_AGG_RINGS)
5666 *max_rx >>= 1;
5667
5668 *max_rx = min_t(int, *max_rx, max_rings);
b72d4a68 5669 *max_rx = min_t(int, *max_rx, max_ring_grps);
c0c050c5
MC
5670 *max_tx = min_t(int, *max_tx, max_rings);
5671}
5672
5673static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
5674{
5675 static int version_printed;
5676 struct net_device *dev;
5677 struct bnxt *bp;
5678 int rc, max_rx_rings, max_tx_rings, max_irqs, dflt_rings;
5679
5680 if (version_printed++ == 0)
5681 pr_info("%s", version);
5682
5683 max_irqs = bnxt_get_max_irq(pdev);
5684 dev = alloc_etherdev_mq(sizeof(*bp), max_irqs);
5685 if (!dev)
5686 return -ENOMEM;
5687
5688 bp = netdev_priv(dev);
5689
5690 if (bnxt_vf_pciid(ent->driver_data))
5691 bp->flags |= BNXT_FLAG_VF;
5692
2bcfa6f6 5693 if (pdev->msix_cap)
c0c050c5 5694 bp->flags |= BNXT_FLAG_MSIX_CAP;
c0c050c5
MC
5695
5696 rc = bnxt_init_board(pdev, dev);
5697 if (rc < 0)
5698 goto init_err_free;
5699
5700 dev->netdev_ops = &bnxt_netdev_ops;
5701 dev->watchdog_timeo = BNXT_TX_TIMEOUT;
5702 dev->ethtool_ops = &bnxt_ethtool_ops;
5703
5704 pci_set_drvdata(pdev, dev);
5705
5706 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
5707 NETIF_F_TSO | NETIF_F_TSO6 |
5708 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
5709 NETIF_F_GSO_IPIP | NETIF_F_GSO_SIT |
5710 NETIF_F_RXHASH |
5711 NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO;
5712
c0c050c5
MC
5713 dev->hw_enc_features =
5714 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
5715 NETIF_F_TSO | NETIF_F_TSO6 |
5716 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
5717 NETIF_F_GSO_IPIP | NETIF_F_GSO_SIT;
5718 dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA;
5719 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX |
5720 NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX;
5721 dev->features |= dev->hw_features | NETIF_F_HIGHDMA;
5722 dev->priv_flags |= IFF_UNICAST_FLT;
5723
5724#ifdef CONFIG_BNXT_SRIOV
5725 init_waitqueue_head(&bp->sriov_cfg_wait);
5726#endif
5727 rc = bnxt_alloc_hwrm_resources(bp);
5728 if (rc)
5729 goto init_err;
5730
5731 mutex_init(&bp->hwrm_cmd_lock);
5732 bnxt_hwrm_ver_get(bp);
5733
5734 rc = bnxt_hwrm_func_drv_rgtr(bp);
5735 if (rc)
5736 goto init_err;
5737
5738 /* Get the MAX capabilities for this function */
5739 rc = bnxt_hwrm_func_qcaps(bp);
5740 if (rc) {
5741 netdev_err(bp->dev, "hwrm query capability failure rc: %x\n",
5742 rc);
5743 rc = -1;
5744 goto init_err;
5745 }
5746
5747 rc = bnxt_hwrm_queue_qportcfg(bp);
5748 if (rc) {
5749 netdev_err(bp->dev, "hwrm query qportcfg failure rc: %x\n",
5750 rc);
5751 rc = -1;
5752 goto init_err;
5753 }
5754
5755 bnxt_set_tpa_flags(bp);
5756 bnxt_set_ring_params(bp);
5757 dflt_rings = netif_get_num_default_rss_queues();
bdd4347b 5758 if (BNXT_PF(bp))
c0c050c5 5759 bp->pf.max_irqs = max_irqs;
379a80a1 5760#if defined(CONFIG_BNXT_SRIOV)
bdd4347b 5761 else
c0c050c5 5762 bp->vf.max_irqs = max_irqs;
379a80a1 5763#endif
c0c050c5
MC
5764 bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings);
5765 bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings);
5766 bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings);
5767 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
5768 bp->cp_nr_rings = max_t(int, bp->rx_nr_rings, bp->tx_nr_rings);
5769 bp->num_stat_ctxs = bp->cp_nr_rings;
5770
2bcfa6f6
MC
5771 if (BNXT_PF(bp)) {
5772 dev->hw_features |= NETIF_F_NTUPLE;
5773 if (bnxt_rfs_capable(bp)) {
5774 bp->flags |= BNXT_FLAG_RFS;
5775 dev->features |= NETIF_F_NTUPLE;
5776 }
5777 }
5778
c0c050c5
MC
5779 if (dev->hw_features & NETIF_F_HW_VLAN_CTAG_RX)
5780 bp->flags |= BNXT_FLAG_STRIP_VLAN;
5781
5782 rc = bnxt_probe_phy(bp);
5783 if (rc)
5784 goto init_err;
5785
5786 rc = register_netdev(dev);
5787 if (rc)
5788 goto init_err;
5789
5790 netdev_info(dev, "%s found at mem %lx, node addr %pM\n",
5791 board_info[ent->driver_data].name,
5792 (long)pci_resource_start(pdev, 0), dev->dev_addr);
5793
5794 return 0;
5795
5796init_err:
5797 pci_iounmap(pdev, bp->bar0);
5798 pci_release_regions(pdev);
5799 pci_disable_device(pdev);
5800
5801init_err_free:
5802 free_netdev(dev);
5803 return rc;
5804}
5805
5806static struct pci_driver bnxt_pci_driver = {
5807 .name = DRV_MODULE_NAME,
5808 .id_table = bnxt_pci_tbl,
5809 .probe = bnxt_init_one,
5810 .remove = bnxt_remove_one,
5811#if defined(CONFIG_BNXT_SRIOV)
5812 .sriov_configure = bnxt_sriov_configure,
5813#endif
5814};
5815
5816module_pci_driver(bnxt_pci_driver);