2 * Copyright (C) 2015-2017 Netronome Systems, Inc.
4 * This software is dual licensed under the GNU General License Version 2,
5 * June 1991 as shown in the file COPYING in the top-level directory of this
6 * source tree or the BSD 2-Clause License provided below. You have the
7 * option to license this software under the complete terms of either license.
9 * The BSD 2-Clause License:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * 1. Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * 2. Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 * Netronome network device driver: Common functions between PF and VF
37 * Authors: Jakub Kicinski <jakub.kicinski@netronome.com>
38 * Jason McMullan <jason.mcmullan@netronome.com>
39 * Rolf Neugebauer <rolf.neugebauer@netronome.com>
40 * Brad Petrus <brad.petrus@netronome.com>
41 * Chris Telfer <chris.telfer@netronome.com>
44 #include <linux/bitfield.h>
45 #include <linux/bpf.h>
46 #include <linux/bpf_trace.h>
47 #include <linux/module.h>
48 #include <linux/kernel.h>
49 #include <linux/init.h>
51 #include <linux/netdevice.h>
52 #include <linux/etherdevice.h>
53 #include <linux/interrupt.h>
55 #include <linux/ipv6.h>
56 #include <linux/page_ref.h>
57 #include <linux/pci.h>
58 #include <linux/pci_regs.h>
59 #include <linux/msi.h>
60 #include <linux/ethtool.h>
61 #include <linux/log2.h>
62 #include <linux/if_vlan.h>
63 #include <linux/random.h>
65 #include <linux/ktime.h>
67 #include <net/pkt_cls.h>
68 #include <net/vxlan.h>
70 #include "nfpcore/nfp_nsp.h"
71 #include "nfp_net_ctrl.h"
75 * nfp_net_get_fw_version() - Read and parse the FW version
76 * @fw_ver: Output fw_version structure to read to
77 * @ctrl_bar: Mapped address of the control BAR
79 void nfp_net_get_fw_version(struct nfp_net_fw_version
*fw_ver
,
80 void __iomem
*ctrl_bar
)
84 reg
= readl(ctrl_bar
+ NFP_NET_CFG_VERSION
);
85 put_unaligned_le32(reg
, fw_ver
);
88 static dma_addr_t
nfp_net_dma_map_rx(struct nfp_net_dp
*dp
, void *frag
)
90 return dma_map_single_attrs(dp
->dev
, frag
+ NFP_NET_RX_BUF_HEADROOM
,
91 dp
->fl_bufsz
- NFP_NET_RX_BUF_NON_DATA
,
92 dp
->rx_dma_dir
, DMA_ATTR_SKIP_CPU_SYNC
);
96 nfp_net_dma_sync_dev_rx(const struct nfp_net_dp
*dp
, dma_addr_t dma_addr
)
98 dma_sync_single_for_device(dp
->dev
, dma_addr
,
99 dp
->fl_bufsz
- NFP_NET_RX_BUF_NON_DATA
,
103 static void nfp_net_dma_unmap_rx(struct nfp_net_dp
*dp
, dma_addr_t dma_addr
)
105 dma_unmap_single_attrs(dp
->dev
, dma_addr
,
106 dp
->fl_bufsz
- NFP_NET_RX_BUF_NON_DATA
,
107 dp
->rx_dma_dir
, DMA_ATTR_SKIP_CPU_SYNC
);
110 static void nfp_net_dma_sync_cpu_rx(struct nfp_net_dp
*dp
, dma_addr_t dma_addr
,
113 dma_sync_single_for_cpu(dp
->dev
, dma_addr
- NFP_NET_RX_BUF_HEADROOM
,
114 len
, dp
->rx_dma_dir
);
119 * Firmware reconfig may take a while so we have two versions of it -
120 * synchronous and asynchronous (posted). All synchronous callers are holding
121 * RTNL so we don't have to worry about serializing them.
123 static void nfp_net_reconfig_start(struct nfp_net
*nn
, u32 update
)
125 nn_writel(nn
, NFP_NET_CFG_UPDATE
, update
);
126 /* ensure update is written before pinging HW */
128 nfp_qcp_wr_ptr_add(nn
->qcp_cfg
, 1);
131 /* Pass 0 as update to run posted reconfigs. */
132 static void nfp_net_reconfig_start_async(struct nfp_net
*nn
, u32 update
)
134 update
|= nn
->reconfig_posted
;
135 nn
->reconfig_posted
= 0;
137 nfp_net_reconfig_start(nn
, update
);
139 nn
->reconfig_timer_active
= true;
140 mod_timer(&nn
->reconfig_timer
, jiffies
+ NFP_NET_POLL_TIMEOUT
* HZ
);
143 static bool nfp_net_reconfig_check_done(struct nfp_net
*nn
, bool last_check
)
147 reg
= nn_readl(nn
, NFP_NET_CFG_UPDATE
);
150 if (reg
& NFP_NET_CFG_UPDATE_ERR
) {
151 nn_err(nn
, "Reconfig error: 0x%08x\n", reg
);
153 } else if (last_check
) {
154 nn_err(nn
, "Reconfig timeout: 0x%08x\n", reg
);
161 static int nfp_net_reconfig_wait(struct nfp_net
*nn
, unsigned long deadline
)
163 bool timed_out
= false;
165 /* Poll update field, waiting for NFP to ack the config */
166 while (!nfp_net_reconfig_check_done(nn
, timed_out
)) {
168 timed_out
= time_is_before_eq_jiffies(deadline
);
171 if (nn_readl(nn
, NFP_NET_CFG_UPDATE
) & NFP_NET_CFG_UPDATE_ERR
)
174 return timed_out
? -EIO
: 0;
177 static void nfp_net_reconfig_timer(unsigned long data
)
179 struct nfp_net
*nn
= (void *)data
;
181 spin_lock_bh(&nn
->reconfig_lock
);
183 nn
->reconfig_timer_active
= false;
185 /* If sync caller is present it will take over from us */
186 if (nn
->reconfig_sync_present
)
189 /* Read reconfig status and report errors */
190 nfp_net_reconfig_check_done(nn
, true);
192 if (nn
->reconfig_posted
)
193 nfp_net_reconfig_start_async(nn
, 0);
195 spin_unlock_bh(&nn
->reconfig_lock
);
199 * nfp_net_reconfig_post() - Post async reconfig request
200 * @nn: NFP Net device to reconfigure
201 * @update: The value for the update field in the BAR config
203 * Record FW reconfiguration request. Reconfiguration will be kicked off
204 * whenever reconfiguration machinery is idle. Multiple requests can be
207 static void nfp_net_reconfig_post(struct nfp_net
*nn
, u32 update
)
209 spin_lock_bh(&nn
->reconfig_lock
);
211 /* Sync caller will kick off async reconf when it's done, just post */
212 if (nn
->reconfig_sync_present
) {
213 nn
->reconfig_posted
|= update
;
217 /* Opportunistically check if the previous command is done */
218 if (!nn
->reconfig_timer_active
||
219 nfp_net_reconfig_check_done(nn
, false))
220 nfp_net_reconfig_start_async(nn
, update
);
222 nn
->reconfig_posted
|= update
;
224 spin_unlock_bh(&nn
->reconfig_lock
);
228 * nfp_net_reconfig() - Reconfigure the firmware
229 * @nn: NFP Net device to reconfigure
230 * @update: The value for the update field in the BAR config
232 * Write the update word to the BAR and ping the reconfig queue. The
233 * poll until the firmware has acknowledged the update by zeroing the
236 * Return: Negative errno on error, 0 on success
238 int nfp_net_reconfig(struct nfp_net
*nn
, u32 update
)
240 bool cancelled_timer
= false;
241 u32 pre_posted_requests
;
244 spin_lock_bh(&nn
->reconfig_lock
);
246 nn
->reconfig_sync_present
= true;
248 if (nn
->reconfig_timer_active
) {
249 del_timer(&nn
->reconfig_timer
);
250 nn
->reconfig_timer_active
= false;
251 cancelled_timer
= true;
253 pre_posted_requests
= nn
->reconfig_posted
;
254 nn
->reconfig_posted
= 0;
256 spin_unlock_bh(&nn
->reconfig_lock
);
259 nfp_net_reconfig_wait(nn
, nn
->reconfig_timer
.expires
);
261 /* Run the posted reconfigs which were issued before we started */
262 if (pre_posted_requests
) {
263 nfp_net_reconfig_start(nn
, pre_posted_requests
);
264 nfp_net_reconfig_wait(nn
, jiffies
+ HZ
* NFP_NET_POLL_TIMEOUT
);
267 nfp_net_reconfig_start(nn
, update
);
268 ret
= nfp_net_reconfig_wait(nn
, jiffies
+ HZ
* NFP_NET_POLL_TIMEOUT
);
270 spin_lock_bh(&nn
->reconfig_lock
);
272 if (nn
->reconfig_posted
)
273 nfp_net_reconfig_start_async(nn
, 0);
275 nn
->reconfig_sync_present
= false;
277 spin_unlock_bh(&nn
->reconfig_lock
);
282 /* Interrupt configuration and handling
286 * nfp_net_irq_unmask() - Unmask automasked interrupt
287 * @nn: NFP Network structure
288 * @entry_nr: MSI-X table entry
290 * Clear the ICR for the IRQ entry.
292 static void nfp_net_irq_unmask(struct nfp_net
*nn
, unsigned int entry_nr
)
294 nn_writeb(nn
, NFP_NET_CFG_ICR(entry_nr
), NFP_NET_CFG_ICR_UNMASKED
);
299 * nfp_net_irqs_alloc() - allocates MSI-X irqs
300 * @pdev: PCI device structure
301 * @irq_entries: Array to be initialized and used to hold the irq entries
302 * @min_irqs: Minimal acceptable number of interrupts
303 * @wanted_irqs: Target number of interrupts to allocate
305 * Return: Number of irqs obtained or 0 on error.
308 nfp_net_irqs_alloc(struct pci_dev
*pdev
, struct msix_entry
*irq_entries
,
309 unsigned int min_irqs
, unsigned int wanted_irqs
)
314 for (i
= 0; i
< wanted_irqs
; i
++)
315 irq_entries
[i
].entry
= i
;
317 got_irqs
= pci_enable_msix_range(pdev
, irq_entries
,
318 min_irqs
, wanted_irqs
);
320 dev_err(&pdev
->dev
, "Failed to enable %d-%d MSI-X (err=%d)\n",
321 min_irqs
, wanted_irqs
, got_irqs
);
325 if (got_irqs
< wanted_irqs
)
326 dev_warn(&pdev
->dev
, "Unable to allocate %d IRQs got only %d\n",
327 wanted_irqs
, got_irqs
);
333 * nfp_net_irqs_assign() - Assign interrupts allocated externally to netdev
334 * @nn: NFP Network structure
335 * @irq_entries: Table of allocated interrupts
336 * @n: Size of @irq_entries (number of entries to grab)
338 * After interrupts are allocated with nfp_net_irqs_alloc() this function
339 * should be called to assign them to a specific netdev (port).
342 nfp_net_irqs_assign(struct nfp_net
*nn
, struct msix_entry
*irq_entries
,
345 struct nfp_net_dp
*dp
= &nn
->dp
;
347 nn
->max_r_vecs
= n
- NFP_NET_NON_Q_VECTORS
;
348 dp
->num_r_vecs
= nn
->max_r_vecs
;
350 memcpy(nn
->irq_entries
, irq_entries
, sizeof(*irq_entries
) * n
);
352 if (dp
->num_rx_rings
> dp
->num_r_vecs
||
353 dp
->num_tx_rings
> dp
->num_r_vecs
)
354 dev_warn(nn
->dp
.dev
, "More rings (%d,%d) than vectors (%d).\n",
355 dp
->num_rx_rings
, dp
->num_tx_rings
,
358 dp
->num_rx_rings
= min(dp
->num_r_vecs
, dp
->num_rx_rings
);
359 dp
->num_tx_rings
= min(dp
->num_r_vecs
, dp
->num_tx_rings
);
360 dp
->num_stack_tx_rings
= dp
->num_tx_rings
;
364 * nfp_net_irqs_disable() - Disable interrupts
365 * @pdev: PCI device structure
367 * Undoes what @nfp_net_irqs_alloc() does.
369 void nfp_net_irqs_disable(struct pci_dev
*pdev
)
371 pci_disable_msix(pdev
);
375 * nfp_net_irq_rxtx() - Interrupt service routine for RX/TX rings.
377 * @data: Opaque data structure
379 * Return: Indicate if the interrupt has been handled.
381 static irqreturn_t
nfp_net_irq_rxtx(int irq
, void *data
)
383 struct nfp_net_r_vector
*r_vec
= data
;
385 napi_schedule_irqoff(&r_vec
->napi
);
387 /* The FW auto-masks any interrupt, either via the MASK bit in
388 * the MSI-X table or via the per entry ICR field. So there
389 * is no need to disable interrupts here.
394 bool nfp_net_link_changed_read_clear(struct nfp_net
*nn
)
399 spin_lock_irqsave(&nn
->link_status_lock
, flags
);
400 ret
= nn
->link_changed
;
401 nn
->link_changed
= false;
402 spin_unlock_irqrestore(&nn
->link_status_lock
, flags
);
408 * nfp_net_read_link_status() - Reread link status from control BAR
409 * @nn: NFP Network structure
411 static void nfp_net_read_link_status(struct nfp_net
*nn
)
417 spin_lock_irqsave(&nn
->link_status_lock
, flags
);
419 sts
= nn_readl(nn
, NFP_NET_CFG_STS
);
420 link_up
= !!(sts
& NFP_NET_CFG_STS_LINK
);
422 if (nn
->link_up
== link_up
)
425 nn
->link_up
= link_up
;
426 nn
->link_changed
= true;
429 netif_carrier_on(nn
->dp
.netdev
);
430 netdev_info(nn
->dp
.netdev
, "NIC Link is Up\n");
432 netif_carrier_off(nn
->dp
.netdev
);
433 netdev_info(nn
->dp
.netdev
, "NIC Link is Down\n");
436 spin_unlock_irqrestore(&nn
->link_status_lock
, flags
);
440 * nfp_net_irq_lsc() - Interrupt service routine for link state changes
442 * @data: Opaque data structure
444 * Return: Indicate if the interrupt has been handled.
446 static irqreturn_t
nfp_net_irq_lsc(int irq
, void *data
)
448 struct nfp_net
*nn
= data
;
449 struct msix_entry
*entry
;
451 entry
= &nn
->irq_entries
[NFP_NET_IRQ_LSC_IDX
];
453 nfp_net_read_link_status(nn
);
455 nfp_net_irq_unmask(nn
, entry
->entry
);
461 * nfp_net_irq_exn() - Interrupt service routine for exceptions
463 * @data: Opaque data structure
465 * Return: Indicate if the interrupt has been handled.
467 static irqreturn_t
nfp_net_irq_exn(int irq
, void *data
)
469 struct nfp_net
*nn
= data
;
471 nn_err(nn
, "%s: UNIMPLEMENTED.\n", __func__
);
472 /* XXX TO BE IMPLEMENTED */
477 * nfp_net_tx_ring_init() - Fill in the boilerplate for a TX ring
478 * @tx_ring: TX ring structure
479 * @r_vec: IRQ vector servicing this ring
483 nfp_net_tx_ring_init(struct nfp_net_tx_ring
*tx_ring
,
484 struct nfp_net_r_vector
*r_vec
, unsigned int idx
)
486 struct nfp_net
*nn
= r_vec
->nfp_net
;
489 tx_ring
->r_vec
= r_vec
;
491 tx_ring
->qcidx
= tx_ring
->idx
* nn
->stride_tx
;
492 tx_ring
->qcp_q
= nn
->tx_bar
+ NFP_QCP_QUEUE_OFF(tx_ring
->qcidx
);
496 * nfp_net_rx_ring_init() - Fill in the boilerplate for a RX ring
497 * @rx_ring: RX ring structure
498 * @r_vec: IRQ vector servicing this ring
502 nfp_net_rx_ring_init(struct nfp_net_rx_ring
*rx_ring
,
503 struct nfp_net_r_vector
*r_vec
, unsigned int idx
)
505 struct nfp_net
*nn
= r_vec
->nfp_net
;
508 rx_ring
->r_vec
= r_vec
;
510 rx_ring
->fl_qcidx
= rx_ring
->idx
* nn
->stride_rx
;
511 rx_ring
->qcp_fl
= nn
->rx_bar
+ NFP_QCP_QUEUE_OFF(rx_ring
->fl_qcidx
);
515 * nfp_net_vecs_init() - Assign IRQs and setup rvecs.
516 * @netdev: netdev structure
518 static void nfp_net_vecs_init(struct net_device
*netdev
)
520 struct nfp_net
*nn
= netdev_priv(netdev
);
521 struct nfp_net_r_vector
*r_vec
;
524 nn
->lsc_handler
= nfp_net_irq_lsc
;
525 nn
->exn_handler
= nfp_net_irq_exn
;
527 for (r
= 0; r
< nn
->max_r_vecs
; r
++) {
528 struct msix_entry
*entry
;
530 entry
= &nn
->irq_entries
[NFP_NET_NON_Q_VECTORS
+ r
];
532 r_vec
= &nn
->r_vecs
[r
];
534 r_vec
->handler
= nfp_net_irq_rxtx
;
535 r_vec
->irq_entry
= entry
->entry
;
536 r_vec
->irq_vector
= entry
->vector
;
538 cpumask_set_cpu(r
, &r_vec
->affinity_mask
);
543 * nfp_net_aux_irq_request() - Request an auxiliary interrupt (LSC or EXN)
544 * @nn: NFP Network structure
545 * @ctrl_offset: Control BAR offset where IRQ configuration should be written
546 * @format: printf-style format to construct the interrupt name
547 * @name: Pointer to allocated space for interrupt name
548 * @name_sz: Size of space for interrupt name
549 * @vector_idx: Index of MSI-X vector used for this interrupt
550 * @handler: IRQ handler to register for this interrupt
553 nfp_net_aux_irq_request(struct nfp_net
*nn
, u32 ctrl_offset
,
554 const char *format
, char *name
, size_t name_sz
,
555 unsigned int vector_idx
, irq_handler_t handler
)
557 struct msix_entry
*entry
;
560 entry
= &nn
->irq_entries
[vector_idx
];
562 snprintf(name
, name_sz
, format
, netdev_name(nn
->dp
.netdev
));
563 err
= request_irq(entry
->vector
, handler
, 0, name
, nn
);
565 nn_err(nn
, "Failed to request IRQ %d (err=%d).\n",
569 nn_writeb(nn
, ctrl_offset
, entry
->entry
);
575 * nfp_net_aux_irq_free() - Free an auxiliary interrupt (LSC or EXN)
576 * @nn: NFP Network structure
577 * @ctrl_offset: Control BAR offset where IRQ configuration should be written
578 * @vector_idx: Index of MSI-X vector used for this interrupt
580 static void nfp_net_aux_irq_free(struct nfp_net
*nn
, u32 ctrl_offset
,
581 unsigned int vector_idx
)
583 nn_writeb(nn
, ctrl_offset
, 0xff);
584 free_irq(nn
->irq_entries
[vector_idx
].vector
, nn
);
589 * One queue controller peripheral queue is used for transmit. The
590 * driver en-queues packets for transmit by advancing the write
591 * pointer. The device indicates that packets have transmitted by
592 * advancing the read pointer. The driver maintains a local copy of
593 * the read and write pointer in @struct nfp_net_tx_ring. The driver
594 * keeps @wr_p in sync with the queue controller write pointer and can
595 * determine how many packets have been transmitted by comparing its
596 * copy of the read pointer @rd_p with the read pointer maintained by
597 * the queue controller peripheral.
601 * nfp_net_tx_full() - Check if the TX ring is full
602 * @tx_ring: TX ring to check
603 * @dcnt: Number of descriptors that need to be enqueued (must be >= 1)
605 * This function checks, based on the *host copy* of read/write
606 * pointer if a given TX ring is full. The real TX queue may have
607 * some newly made available slots.
609 * Return: True if the ring is full.
611 static int nfp_net_tx_full(struct nfp_net_tx_ring
*tx_ring
, int dcnt
)
613 return (tx_ring
->wr_p
- tx_ring
->rd_p
) >= (tx_ring
->cnt
- dcnt
);
616 /* Wrappers for deciding when to stop and restart TX queues */
617 static int nfp_net_tx_ring_should_wake(struct nfp_net_tx_ring
*tx_ring
)
619 return !nfp_net_tx_full(tx_ring
, MAX_SKB_FRAGS
* 4);
622 static int nfp_net_tx_ring_should_stop(struct nfp_net_tx_ring
*tx_ring
)
624 return nfp_net_tx_full(tx_ring
, MAX_SKB_FRAGS
+ 1);
628 * nfp_net_tx_ring_stop() - stop tx ring
629 * @nd_q: netdev queue
630 * @tx_ring: driver tx queue structure
632 * Safely stop TX ring. Remember that while we are running .start_xmit()
633 * someone else may be cleaning the TX ring completions so we need to be
634 * extra careful here.
636 static void nfp_net_tx_ring_stop(struct netdev_queue
*nd_q
,
637 struct nfp_net_tx_ring
*tx_ring
)
639 netif_tx_stop_queue(nd_q
);
641 /* We can race with the TX completion out of NAPI so recheck */
643 if (unlikely(nfp_net_tx_ring_should_wake(tx_ring
)))
644 netif_tx_start_queue(nd_q
);
648 * nfp_net_tx_tso() - Set up Tx descriptor for LSO
649 * @r_vec: per-ring structure
650 * @txbuf: Pointer to driver soft TX descriptor
651 * @txd: Pointer to HW TX descriptor
652 * @skb: Pointer to SKB
654 * Set up Tx descriptor for LSO, do nothing for non-LSO skbs.
655 * Return error on packet header greater than maximum supported LSO header size.
657 static void nfp_net_tx_tso(struct nfp_net_r_vector
*r_vec
,
658 struct nfp_net_tx_buf
*txbuf
,
659 struct nfp_net_tx_desc
*txd
, struct sk_buff
*skb
)
664 if (!skb_is_gso(skb
))
667 if (!skb
->encapsulation
)
668 hdrlen
= skb_transport_offset(skb
) + tcp_hdrlen(skb
);
670 hdrlen
= skb_inner_transport_header(skb
) - skb
->data
+
671 inner_tcp_hdrlen(skb
);
673 txbuf
->pkt_cnt
= skb_shinfo(skb
)->gso_segs
;
674 txbuf
->real_len
+= hdrlen
* (txbuf
->pkt_cnt
- 1);
676 mss
= skb_shinfo(skb
)->gso_size
& PCIE_DESC_TX_MSS_MASK
;
677 txd
->l4_offset
= hdrlen
;
678 txd
->mss
= cpu_to_le16(mss
);
679 txd
->flags
|= PCIE_DESC_TX_LSO
;
681 u64_stats_update_begin(&r_vec
->tx_sync
);
683 u64_stats_update_end(&r_vec
->tx_sync
);
687 * nfp_net_tx_csum() - Set TX CSUM offload flags in TX descriptor
688 * @dp: NFP Net data path struct
689 * @r_vec: per-ring structure
690 * @txbuf: Pointer to driver soft TX descriptor
691 * @txd: Pointer to TX descriptor
692 * @skb: Pointer to SKB
694 * This function sets the TX checksum flags in the TX descriptor based
695 * on the configuration and the protocol of the packet to be transmitted.
697 static void nfp_net_tx_csum(struct nfp_net_dp
*dp
,
698 struct nfp_net_r_vector
*r_vec
,
699 struct nfp_net_tx_buf
*txbuf
,
700 struct nfp_net_tx_desc
*txd
, struct sk_buff
*skb
)
702 struct ipv6hdr
*ipv6h
;
706 if (!(dp
->ctrl
& NFP_NET_CFG_CTRL_TXCSUM
))
709 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
)
712 txd
->flags
|= PCIE_DESC_TX_CSUM
;
713 if (skb
->encapsulation
)
714 txd
->flags
|= PCIE_DESC_TX_ENCAP
;
716 iph
= skb
->encapsulation
? inner_ip_hdr(skb
) : ip_hdr(skb
);
717 ipv6h
= skb
->encapsulation
? inner_ipv6_hdr(skb
) : ipv6_hdr(skb
);
719 if (iph
->version
== 4) {
720 txd
->flags
|= PCIE_DESC_TX_IP4_CSUM
;
721 l4_hdr
= iph
->protocol
;
722 } else if (ipv6h
->version
== 6) {
723 l4_hdr
= ipv6h
->nexthdr
;
725 nn_dp_warn(dp
, "partial checksum but ipv=%x!\n", iph
->version
);
731 txd
->flags
|= PCIE_DESC_TX_TCP_CSUM
;
734 txd
->flags
|= PCIE_DESC_TX_UDP_CSUM
;
737 nn_dp_warn(dp
, "partial checksum but l4 proto=%x!\n", l4_hdr
);
741 u64_stats_update_begin(&r_vec
->tx_sync
);
742 if (skb
->encapsulation
)
743 r_vec
->hw_csum_tx_inner
+= txbuf
->pkt_cnt
;
745 r_vec
->hw_csum_tx
+= txbuf
->pkt_cnt
;
746 u64_stats_update_end(&r_vec
->tx_sync
);
749 static void nfp_net_tx_xmit_more_flush(struct nfp_net_tx_ring
*tx_ring
)
752 nfp_qcp_wr_ptr_add(tx_ring
->qcp_q
, tx_ring
->wr_ptr_add
);
753 tx_ring
->wr_ptr_add
= 0;
757 * nfp_net_tx() - Main transmit entry point
758 * @skb: SKB to transmit
759 * @netdev: netdev structure
761 * Return: NETDEV_TX_OK on success.
763 static int nfp_net_tx(struct sk_buff
*skb
, struct net_device
*netdev
)
765 struct nfp_net
*nn
= netdev_priv(netdev
);
766 const struct skb_frag_struct
*frag
;
767 struct nfp_net_tx_desc
*txd
, txdg
;
768 struct nfp_net_tx_ring
*tx_ring
;
769 struct nfp_net_r_vector
*r_vec
;
770 struct nfp_net_tx_buf
*txbuf
;
771 struct netdev_queue
*nd_q
;
772 struct nfp_net_dp
*dp
;
780 qidx
= skb_get_queue_mapping(skb
);
781 tx_ring
= &dp
->tx_rings
[qidx
];
782 r_vec
= tx_ring
->r_vec
;
783 nd_q
= netdev_get_tx_queue(dp
->netdev
, qidx
);
785 nr_frags
= skb_shinfo(skb
)->nr_frags
;
787 if (unlikely(nfp_net_tx_full(tx_ring
, nr_frags
+ 1))) {
788 nn_dp_warn(dp
, "TX ring %d busy. wrp=%u rdp=%u\n",
789 qidx
, tx_ring
->wr_p
, tx_ring
->rd_p
);
790 netif_tx_stop_queue(nd_q
);
791 nfp_net_tx_xmit_more_flush(tx_ring
);
792 u64_stats_update_begin(&r_vec
->tx_sync
);
794 u64_stats_update_end(&r_vec
->tx_sync
);
795 return NETDEV_TX_BUSY
;
798 /* Start with the head skbuf */
799 dma_addr
= dma_map_single(dp
->dev
, skb
->data
, skb_headlen(skb
),
801 if (dma_mapping_error(dp
->dev
, dma_addr
))
804 wr_idx
= tx_ring
->wr_p
& (tx_ring
->cnt
- 1);
806 /* Stash the soft descriptor of the head then initialize it */
807 txbuf
= &tx_ring
->txbufs
[wr_idx
];
809 txbuf
->dma_addr
= dma_addr
;
812 txbuf
->real_len
= skb
->len
;
814 /* Build TX descriptor */
815 txd
= &tx_ring
->txds
[wr_idx
];
816 txd
->offset_eop
= (nr_frags
== 0) ? PCIE_DESC_TX_EOP
: 0;
817 txd
->dma_len
= cpu_to_le16(skb_headlen(skb
));
818 nfp_desc_set_dma_addr(txd
, dma_addr
);
819 txd
->data_len
= cpu_to_le16(skb
->len
);
825 nfp_net_tx_tso(r_vec
, txbuf
, txd
, skb
);
827 nfp_net_tx_csum(dp
, r_vec
, txbuf
, txd
, skb
);
829 if (skb_vlan_tag_present(skb
) && dp
->ctrl
& NFP_NET_CFG_CTRL_TXVLAN
) {
830 txd
->flags
|= PCIE_DESC_TX_VLAN
;
831 txd
->vlan
= cpu_to_le16(skb_vlan_tag_get(skb
));
836 /* all descs must match except for in addr, length and eop */
839 for (f
= 0; f
< nr_frags
; f
++) {
840 frag
= &skb_shinfo(skb
)->frags
[f
];
841 fsize
= skb_frag_size(frag
);
843 dma_addr
= skb_frag_dma_map(dp
->dev
, frag
, 0,
844 fsize
, DMA_TO_DEVICE
);
845 if (dma_mapping_error(dp
->dev
, dma_addr
))
848 wr_idx
= (wr_idx
+ 1) & (tx_ring
->cnt
- 1);
849 tx_ring
->txbufs
[wr_idx
].skb
= skb
;
850 tx_ring
->txbufs
[wr_idx
].dma_addr
= dma_addr
;
851 tx_ring
->txbufs
[wr_idx
].fidx
= f
;
853 txd
= &tx_ring
->txds
[wr_idx
];
855 txd
->dma_len
= cpu_to_le16(fsize
);
856 nfp_desc_set_dma_addr(txd
, dma_addr
);
858 (f
== nr_frags
- 1) ? PCIE_DESC_TX_EOP
: 0;
861 u64_stats_update_begin(&r_vec
->tx_sync
);
863 u64_stats_update_end(&r_vec
->tx_sync
);
866 netdev_tx_sent_queue(nd_q
, txbuf
->real_len
);
868 tx_ring
->wr_p
+= nr_frags
+ 1;
869 if (nfp_net_tx_ring_should_stop(tx_ring
))
870 nfp_net_tx_ring_stop(nd_q
, tx_ring
);
872 tx_ring
->wr_ptr_add
+= nr_frags
+ 1;
873 if (!skb
->xmit_more
|| netif_xmit_stopped(nd_q
))
874 nfp_net_tx_xmit_more_flush(tx_ring
);
876 skb_tx_timestamp(skb
);
883 frag
= &skb_shinfo(skb
)->frags
[f
];
884 dma_unmap_page(dp
->dev
, tx_ring
->txbufs
[wr_idx
].dma_addr
,
885 skb_frag_size(frag
), DMA_TO_DEVICE
);
886 tx_ring
->txbufs
[wr_idx
].skb
= NULL
;
887 tx_ring
->txbufs
[wr_idx
].dma_addr
= 0;
888 tx_ring
->txbufs
[wr_idx
].fidx
= -2;
891 wr_idx
+= tx_ring
->cnt
;
893 dma_unmap_single(dp
->dev
, tx_ring
->txbufs
[wr_idx
].dma_addr
,
894 skb_headlen(skb
), DMA_TO_DEVICE
);
895 tx_ring
->txbufs
[wr_idx
].skb
= NULL
;
896 tx_ring
->txbufs
[wr_idx
].dma_addr
= 0;
897 tx_ring
->txbufs
[wr_idx
].fidx
= -2;
899 nn_dp_warn(dp
, "Failed to map DMA TX buffer\n");
900 nfp_net_tx_xmit_more_flush(tx_ring
);
901 u64_stats_update_begin(&r_vec
->tx_sync
);
903 u64_stats_update_end(&r_vec
->tx_sync
);
904 dev_kfree_skb_any(skb
);
909 * nfp_net_tx_complete() - Handled completed TX packets
910 * @tx_ring: TX ring structure
912 * Return: Number of completed TX descriptors
914 static void nfp_net_tx_complete(struct nfp_net_tx_ring
*tx_ring
)
916 struct nfp_net_r_vector
*r_vec
= tx_ring
->r_vec
;
917 struct nfp_net_dp
*dp
= &r_vec
->nfp_net
->dp
;
918 const struct skb_frag_struct
*frag
;
919 struct netdev_queue
*nd_q
;
920 u32 done_pkts
= 0, done_bytes
= 0;
927 /* Work out how many descriptors have been transmitted */
928 qcp_rd_p
= nfp_qcp_rd_ptr_read(tx_ring
->qcp_q
);
930 if (qcp_rd_p
== tx_ring
->qcp_rd_p
)
933 if (qcp_rd_p
> tx_ring
->qcp_rd_p
)
934 todo
= qcp_rd_p
- tx_ring
->qcp_rd_p
;
936 todo
= qcp_rd_p
+ tx_ring
->cnt
- tx_ring
->qcp_rd_p
;
939 idx
= tx_ring
->rd_p
& (tx_ring
->cnt
- 1);
942 skb
= tx_ring
->txbufs
[idx
].skb
;
946 nr_frags
= skb_shinfo(skb
)->nr_frags
;
947 fidx
= tx_ring
->txbufs
[idx
].fidx
;
951 dma_unmap_single(dp
->dev
, tx_ring
->txbufs
[idx
].dma_addr
,
952 skb_headlen(skb
), DMA_TO_DEVICE
);
954 done_pkts
+= tx_ring
->txbufs
[idx
].pkt_cnt
;
955 done_bytes
+= tx_ring
->txbufs
[idx
].real_len
;
958 frag
= &skb_shinfo(skb
)->frags
[fidx
];
959 dma_unmap_page(dp
->dev
, tx_ring
->txbufs
[idx
].dma_addr
,
960 skb_frag_size(frag
), DMA_TO_DEVICE
);
963 /* check for last gather fragment */
964 if (fidx
== nr_frags
- 1)
965 dev_kfree_skb_any(skb
);
967 tx_ring
->txbufs
[idx
].dma_addr
= 0;
968 tx_ring
->txbufs
[idx
].skb
= NULL
;
969 tx_ring
->txbufs
[idx
].fidx
= -2;
972 tx_ring
->qcp_rd_p
= qcp_rd_p
;
974 u64_stats_update_begin(&r_vec
->tx_sync
);
975 r_vec
->tx_bytes
+= done_bytes
;
976 r_vec
->tx_pkts
+= done_pkts
;
977 u64_stats_update_end(&r_vec
->tx_sync
);
979 nd_q
= netdev_get_tx_queue(dp
->netdev
, tx_ring
->idx
);
980 netdev_tx_completed_queue(nd_q
, done_pkts
, done_bytes
);
981 if (nfp_net_tx_ring_should_wake(tx_ring
)) {
982 /* Make sure TX thread will see updated tx_ring->rd_p */
985 if (unlikely(netif_tx_queue_stopped(nd_q
)))
986 netif_tx_wake_queue(nd_q
);
989 WARN_ONCE(tx_ring
->wr_p
- tx_ring
->rd_p
> tx_ring
->cnt
,
990 "TX ring corruption rd_p=%u wr_p=%u cnt=%u\n",
991 tx_ring
->rd_p
, tx_ring
->wr_p
, tx_ring
->cnt
);
994 static void nfp_net_xdp_complete(struct nfp_net_tx_ring
*tx_ring
)
996 struct nfp_net_r_vector
*r_vec
= tx_ring
->r_vec
;
997 struct nfp_net_dp
*dp
= &r_vec
->nfp_net
->dp
;
998 u32 done_pkts
= 0, done_bytes
= 0;
1002 /* Work out how many descriptors have been transmitted */
1003 qcp_rd_p
= nfp_qcp_rd_ptr_read(tx_ring
->qcp_q
);
1005 if (qcp_rd_p
== tx_ring
->qcp_rd_p
)
1008 if (qcp_rd_p
> tx_ring
->qcp_rd_p
)
1009 todo
= qcp_rd_p
- tx_ring
->qcp_rd_p
;
1011 todo
= qcp_rd_p
+ tx_ring
->cnt
- tx_ring
->qcp_rd_p
;
1014 idx
= tx_ring
->rd_p
& (tx_ring
->cnt
- 1);
1017 if (!tx_ring
->txbufs
[idx
].frag
)
1020 nfp_net_dma_unmap_rx(dp
, tx_ring
->txbufs
[idx
].dma_addr
);
1021 __free_page(virt_to_page(tx_ring
->txbufs
[idx
].frag
));
1024 done_bytes
+= tx_ring
->txbufs
[idx
].real_len
;
1026 tx_ring
->txbufs
[idx
].dma_addr
= 0;
1027 tx_ring
->txbufs
[idx
].frag
= NULL
;
1028 tx_ring
->txbufs
[idx
].fidx
= -2;
1031 tx_ring
->qcp_rd_p
= qcp_rd_p
;
1033 u64_stats_update_begin(&r_vec
->tx_sync
);
1034 r_vec
->tx_bytes
+= done_bytes
;
1035 r_vec
->tx_pkts
+= done_pkts
;
1036 u64_stats_update_end(&r_vec
->tx_sync
);
1038 WARN_ONCE(tx_ring
->wr_p
- tx_ring
->rd_p
> tx_ring
->cnt
,
1039 "TX ring corruption rd_p=%u wr_p=%u cnt=%u\n",
1040 tx_ring
->rd_p
, tx_ring
->wr_p
, tx_ring
->cnt
);
1044 * nfp_net_tx_ring_reset() - Free any untransmitted buffers and reset pointers
1045 * @dp: NFP Net data path struct
1046 * @tx_ring: TX ring structure
1048 * Assumes that the device is stopped
1051 nfp_net_tx_ring_reset(struct nfp_net_dp
*dp
, struct nfp_net_tx_ring
*tx_ring
)
1053 struct nfp_net_r_vector
*r_vec
= tx_ring
->r_vec
;
1054 const struct skb_frag_struct
*frag
;
1055 struct netdev_queue
*nd_q
;
1057 while (tx_ring
->rd_p
!= tx_ring
->wr_p
) {
1058 struct nfp_net_tx_buf
*tx_buf
;
1061 idx
= tx_ring
->rd_p
& (tx_ring
->cnt
- 1);
1062 tx_buf
= &tx_ring
->txbufs
[idx
];
1064 if (tx_ring
== r_vec
->xdp_ring
) {
1065 nfp_net_dma_unmap_rx(dp
, tx_buf
->dma_addr
);
1066 __free_page(virt_to_page(tx_ring
->txbufs
[idx
].frag
));
1068 struct sk_buff
*skb
= tx_ring
->txbufs
[idx
].skb
;
1069 int nr_frags
= skb_shinfo(skb
)->nr_frags
;
1071 if (tx_buf
->fidx
== -1) {
1073 dma_unmap_single(dp
->dev
, tx_buf
->dma_addr
,
1077 /* unmap fragment */
1078 frag
= &skb_shinfo(skb
)->frags
[tx_buf
->fidx
];
1079 dma_unmap_page(dp
->dev
, tx_buf
->dma_addr
,
1080 skb_frag_size(frag
),
1084 /* check for last gather fragment */
1085 if (tx_buf
->fidx
== nr_frags
- 1)
1086 dev_kfree_skb_any(skb
);
1089 tx_buf
->dma_addr
= 0;
1093 tx_ring
->qcp_rd_p
++;
1097 memset(tx_ring
->txds
, 0, sizeof(*tx_ring
->txds
) * tx_ring
->cnt
);
1100 tx_ring
->qcp_rd_p
= 0;
1101 tx_ring
->wr_ptr_add
= 0;
1103 if (tx_ring
== r_vec
->xdp_ring
)
1106 nd_q
= netdev_get_tx_queue(dp
->netdev
, tx_ring
->idx
);
1107 netdev_tx_reset_queue(nd_q
);
1110 static void nfp_net_tx_timeout(struct net_device
*netdev
)
1112 struct nfp_net
*nn
= netdev_priv(netdev
);
1115 for (i
= 0; i
< nn
->dp
.netdev
->real_num_tx_queues
; i
++) {
1116 if (!netif_tx_queue_stopped(netdev_get_tx_queue(netdev
, i
)))
1118 nn_warn(nn
, "TX timeout on ring: %d\n", i
);
1120 nn_warn(nn
, "TX watchdog timeout\n");
1123 /* Receive processing
1126 nfp_net_calc_fl_bufsz(struct nfp_net_dp
*dp
)
1128 unsigned int fl_bufsz
;
1130 fl_bufsz
= NFP_NET_RX_BUF_HEADROOM
;
1131 fl_bufsz
+= dp
->rx_dma_off
;
1132 if (dp
->rx_offset
== NFP_NET_CFG_RX_OFFSET_DYNAMIC
)
1133 fl_bufsz
+= NFP_NET_MAX_PREPEND
;
1135 fl_bufsz
+= dp
->rx_offset
;
1136 fl_bufsz
+= ETH_HLEN
+ VLAN_HLEN
* 2 + dp
->mtu
;
1138 fl_bufsz
= SKB_DATA_ALIGN(fl_bufsz
);
1139 fl_bufsz
+= SKB_DATA_ALIGN(sizeof(struct skb_shared_info
));
1145 nfp_net_free_frag(void *frag
, bool xdp
)
1148 skb_free_frag(frag
);
1150 __free_page(virt_to_page(frag
));
1154 * nfp_net_rx_alloc_one() - Allocate and map page frag for RX
1155 * @dp: NFP Net data path struct
1156 * @rx_ring: RX ring structure of the skb
1157 * @dma_addr: Pointer to storage for DMA address (output param)
1159 * This function will allcate a new page frag, map it for DMA.
1161 * Return: allocated page frag or NULL on failure.
1164 nfp_net_rx_alloc_one(struct nfp_net_dp
*dp
, struct nfp_net_rx_ring
*rx_ring
,
1165 dma_addr_t
*dma_addr
)
1170 frag
= netdev_alloc_frag(dp
->fl_bufsz
);
1172 frag
= page_address(alloc_page(GFP_KERNEL
| __GFP_COLD
));
1174 nn_dp_warn(dp
, "Failed to alloc receive page frag\n");
1178 *dma_addr
= nfp_net_dma_map_rx(dp
, frag
);
1179 if (dma_mapping_error(dp
->dev
, *dma_addr
)) {
1180 nfp_net_free_frag(frag
, dp
->xdp_prog
);
1181 nn_dp_warn(dp
, "Failed to map DMA RX buffer\n");
1188 static void *nfp_net_napi_alloc_one(struct nfp_net_dp
*dp
, dma_addr_t
*dma_addr
)
1193 frag
= napi_alloc_frag(dp
->fl_bufsz
);
1195 frag
= page_address(alloc_page(GFP_ATOMIC
| __GFP_COLD
));
1197 nn_dp_warn(dp
, "Failed to alloc receive page frag\n");
1201 *dma_addr
= nfp_net_dma_map_rx(dp
, frag
);
1202 if (dma_mapping_error(dp
->dev
, *dma_addr
)) {
1203 nfp_net_free_frag(frag
, dp
->xdp_prog
);
1204 nn_dp_warn(dp
, "Failed to map DMA RX buffer\n");
1212 * nfp_net_rx_give_one() - Put mapped skb on the software and hardware rings
1213 * @dp: NFP Net data path struct
1214 * @rx_ring: RX ring structure
1215 * @frag: page fragment buffer
1216 * @dma_addr: DMA address of skb mapping
1218 static void nfp_net_rx_give_one(const struct nfp_net_dp
*dp
,
1219 struct nfp_net_rx_ring
*rx_ring
,
1220 void *frag
, dma_addr_t dma_addr
)
1222 unsigned int wr_idx
;
1224 wr_idx
= rx_ring
->wr_p
& (rx_ring
->cnt
- 1);
1226 nfp_net_dma_sync_dev_rx(dp
, dma_addr
);
1228 /* Stash SKB and DMA address away */
1229 rx_ring
->rxbufs
[wr_idx
].frag
= frag
;
1230 rx_ring
->rxbufs
[wr_idx
].dma_addr
= dma_addr
;
1232 /* Fill freelist descriptor */
1233 rx_ring
->rxds
[wr_idx
].fld
.reserved
= 0;
1234 rx_ring
->rxds
[wr_idx
].fld
.meta_len_dd
= 0;
1235 nfp_desc_set_dma_addr(&rx_ring
->rxds
[wr_idx
].fld
,
1236 dma_addr
+ dp
->rx_dma_off
);
1239 rx_ring
->wr_ptr_add
++;
1240 if (rx_ring
->wr_ptr_add
>= NFP_NET_FL_BATCH
) {
1241 /* Update write pointer of the freelist queue. Make
1242 * sure all writes are flushed before telling the hardware.
1245 nfp_qcp_wr_ptr_add(rx_ring
->qcp_fl
, rx_ring
->wr_ptr_add
);
1246 rx_ring
->wr_ptr_add
= 0;
1251 * nfp_net_rx_ring_reset() - Reflect in SW state of freelist after disable
1252 * @rx_ring: RX ring structure
1254 * Warning: Do *not* call if ring buffers were never put on the FW freelist
1255 * (i.e. device was not enabled)!
1257 static void nfp_net_rx_ring_reset(struct nfp_net_rx_ring
*rx_ring
)
1259 unsigned int wr_idx
, last_idx
;
1261 /* Move the empty entry to the end of the list */
1262 wr_idx
= rx_ring
->wr_p
& (rx_ring
->cnt
- 1);
1263 last_idx
= rx_ring
->cnt
- 1;
1264 rx_ring
->rxbufs
[wr_idx
].dma_addr
= rx_ring
->rxbufs
[last_idx
].dma_addr
;
1265 rx_ring
->rxbufs
[wr_idx
].frag
= rx_ring
->rxbufs
[last_idx
].frag
;
1266 rx_ring
->rxbufs
[last_idx
].dma_addr
= 0;
1267 rx_ring
->rxbufs
[last_idx
].frag
= NULL
;
1269 memset(rx_ring
->rxds
, 0, sizeof(*rx_ring
->rxds
) * rx_ring
->cnt
);
1272 rx_ring
->wr_ptr_add
= 0;
1276 * nfp_net_rx_ring_bufs_free() - Free any buffers currently on the RX ring
1277 * @dp: NFP Net data path struct
1278 * @rx_ring: RX ring to remove buffers from
1280 * Assumes that the device is stopped and buffers are in [0, ring->cnt - 1)
1281 * entries. After device is disabled nfp_net_rx_ring_reset() must be called
1282 * to restore required ring geometry.
1285 nfp_net_rx_ring_bufs_free(struct nfp_net_dp
*dp
,
1286 struct nfp_net_rx_ring
*rx_ring
)
1290 for (i
= 0; i
< rx_ring
->cnt
- 1; i
++) {
1291 /* NULL skb can only happen when initial filling of the ring
1292 * fails to allocate enough buffers and calls here to free
1293 * already allocated ones.
1295 if (!rx_ring
->rxbufs
[i
].frag
)
1298 nfp_net_dma_unmap_rx(dp
, rx_ring
->rxbufs
[i
].dma_addr
);
1299 nfp_net_free_frag(rx_ring
->rxbufs
[i
].frag
, dp
->xdp_prog
);
1300 rx_ring
->rxbufs
[i
].dma_addr
= 0;
1301 rx_ring
->rxbufs
[i
].frag
= NULL
;
1306 * nfp_net_rx_ring_bufs_alloc() - Fill RX ring with buffers (don't give to FW)
1307 * @dp: NFP Net data path struct
1308 * @rx_ring: RX ring to remove buffers from
1311 nfp_net_rx_ring_bufs_alloc(struct nfp_net_dp
*dp
,
1312 struct nfp_net_rx_ring
*rx_ring
)
1314 struct nfp_net_rx_buf
*rxbufs
;
1317 rxbufs
= rx_ring
->rxbufs
;
1319 for (i
= 0; i
< rx_ring
->cnt
- 1; i
++) {
1321 nfp_net_rx_alloc_one(dp
, rx_ring
, &rxbufs
[i
].dma_addr
);
1322 if (!rxbufs
[i
].frag
) {
1323 nfp_net_rx_ring_bufs_free(dp
, rx_ring
);
1332 * nfp_net_rx_ring_fill_freelist() - Give buffers from the ring to FW
1333 * @dp: NFP Net data path struct
1334 * @rx_ring: RX ring to fill
1337 nfp_net_rx_ring_fill_freelist(struct nfp_net_dp
*dp
,
1338 struct nfp_net_rx_ring
*rx_ring
)
1342 for (i
= 0; i
< rx_ring
->cnt
- 1; i
++)
1343 nfp_net_rx_give_one(dp
, rx_ring
, rx_ring
->rxbufs
[i
].frag
,
1344 rx_ring
->rxbufs
[i
].dma_addr
);
1348 * nfp_net_rx_csum_has_errors() - group check if rxd has any csum errors
1349 * @flags: RX descriptor flags field in CPU byte order
1351 static int nfp_net_rx_csum_has_errors(u16 flags
)
1353 u16 csum_all_checked
, csum_all_ok
;
1355 csum_all_checked
= flags
& __PCIE_DESC_RX_CSUM_ALL
;
1356 csum_all_ok
= flags
& __PCIE_DESC_RX_CSUM_ALL_OK
;
1358 return csum_all_checked
!= (csum_all_ok
<< PCIE_DESC_RX_CSUM_OK_SHIFT
);
1362 * nfp_net_rx_csum() - set SKB checksum field based on RX descriptor flags
1363 * @dp: NFP Net data path struct
1364 * @r_vec: per-ring structure
1365 * @rxd: Pointer to RX descriptor
1366 * @skb: Pointer to SKB
1368 static void nfp_net_rx_csum(struct nfp_net_dp
*dp
,
1369 struct nfp_net_r_vector
*r_vec
,
1370 struct nfp_net_rx_desc
*rxd
, struct sk_buff
*skb
)
1372 skb_checksum_none_assert(skb
);
1374 if (!(dp
->netdev
->features
& NETIF_F_RXCSUM
))
1377 if (nfp_net_rx_csum_has_errors(le16_to_cpu(rxd
->rxd
.flags
))) {
1378 u64_stats_update_begin(&r_vec
->rx_sync
);
1379 r_vec
->hw_csum_rx_error
++;
1380 u64_stats_update_end(&r_vec
->rx_sync
);
1384 /* Assume that the firmware will never report inner CSUM_OK unless outer
1385 * L4 headers were successfully parsed. FW will always report zero UDP
1386 * checksum as CSUM_OK.
1388 if (rxd
->rxd
.flags
& PCIE_DESC_RX_TCP_CSUM_OK
||
1389 rxd
->rxd
.flags
& PCIE_DESC_RX_UDP_CSUM_OK
) {
1390 __skb_incr_checksum_unnecessary(skb
);
1391 u64_stats_update_begin(&r_vec
->rx_sync
);
1392 r_vec
->hw_csum_rx_ok
++;
1393 u64_stats_update_end(&r_vec
->rx_sync
);
1396 if (rxd
->rxd
.flags
& PCIE_DESC_RX_I_TCP_CSUM_OK
||
1397 rxd
->rxd
.flags
& PCIE_DESC_RX_I_UDP_CSUM_OK
) {
1398 __skb_incr_checksum_unnecessary(skb
);
1399 u64_stats_update_begin(&r_vec
->rx_sync
);
1400 r_vec
->hw_csum_rx_inner_ok
++;
1401 u64_stats_update_end(&r_vec
->rx_sync
);
1406 nfp_net_set_hash(struct net_device
*netdev
, struct nfp_meta_parsed
*meta
,
1407 unsigned int type
, __be32
*hash
)
1409 if (!(netdev
->features
& NETIF_F_RXHASH
))
1413 case NFP_NET_RSS_IPV4
:
1414 case NFP_NET_RSS_IPV6
:
1415 case NFP_NET_RSS_IPV6_EX
:
1416 meta
->hash_type
= PKT_HASH_TYPE_L3
;
1419 meta
->hash_type
= PKT_HASH_TYPE_L4
;
1423 meta
->hash
= get_unaligned_be32(hash
);
1427 nfp_net_set_hash_desc(struct net_device
*netdev
, struct nfp_meta_parsed
*meta
,
1428 void *data
, struct nfp_net_rx_desc
*rxd
)
1430 struct nfp_net_rx_hash
*rx_hash
= data
;
1432 if (!(rxd
->rxd
.flags
& PCIE_DESC_RX_RSS
))
1435 nfp_net_set_hash(netdev
, meta
, get_unaligned_be32(&rx_hash
->hash_type
),
1440 nfp_net_parse_meta(struct net_device
*netdev
, struct nfp_meta_parsed
*meta
,
1441 void *data
, int meta_len
)
1445 meta_info
= get_unaligned_be32(data
);
1449 switch (meta_info
& NFP_NET_META_FIELD_MASK
) {
1450 case NFP_NET_META_HASH
:
1451 meta_info
>>= NFP_NET_META_FIELD_SIZE
;
1452 nfp_net_set_hash(netdev
, meta
,
1453 meta_info
& NFP_NET_META_FIELD_MASK
,
1457 case NFP_NET_META_MARK
:
1458 meta
->mark
= get_unaligned_be32(data
);
1465 meta_info
>>= NFP_NET_META_FIELD_SIZE
;
1472 nfp_net_rx_drop(const struct nfp_net_dp
*dp
, struct nfp_net_r_vector
*r_vec
,
1473 struct nfp_net_rx_ring
*rx_ring
, struct nfp_net_rx_buf
*rxbuf
,
1474 struct sk_buff
*skb
)
1476 u64_stats_update_begin(&r_vec
->rx_sync
);
1478 u64_stats_update_end(&r_vec
->rx_sync
);
1480 /* skb is build based on the frag, free_skb() would free the frag
1481 * so to be able to reuse it we need an extra ref.
1483 if (skb
&& rxbuf
&& skb
->head
== rxbuf
->frag
)
1484 page_ref_inc(virt_to_head_page(rxbuf
->frag
));
1486 nfp_net_rx_give_one(dp
, rx_ring
, rxbuf
->frag
, rxbuf
->dma_addr
);
1488 dev_kfree_skb_any(skb
);
1492 nfp_net_tx_xdp_buf(struct nfp_net_dp
*dp
, struct nfp_net_rx_ring
*rx_ring
,
1493 struct nfp_net_tx_ring
*tx_ring
,
1494 struct nfp_net_rx_buf
*rxbuf
, unsigned int dma_off
,
1495 unsigned int pkt_len
)
1497 struct nfp_net_tx_buf
*txbuf
;
1498 struct nfp_net_tx_desc
*txd
;
1499 dma_addr_t new_dma_addr
;
1503 if (unlikely(nfp_net_tx_full(tx_ring
, 1))) {
1504 nfp_net_rx_drop(dp
, rx_ring
->r_vec
, rx_ring
, rxbuf
, NULL
);
1508 new_frag
= nfp_net_napi_alloc_one(dp
, &new_dma_addr
);
1509 if (unlikely(!new_frag
)) {
1510 nfp_net_rx_drop(dp
, rx_ring
->r_vec
, rx_ring
, rxbuf
, NULL
);
1513 nfp_net_rx_give_one(dp
, rx_ring
, new_frag
, new_dma_addr
);
1515 wr_idx
= tx_ring
->wr_p
& (tx_ring
->cnt
- 1);
1517 /* Stash the soft descriptor of the head then initialize it */
1518 txbuf
= &tx_ring
->txbufs
[wr_idx
];
1519 txbuf
->frag
= rxbuf
->frag
;
1520 txbuf
->dma_addr
= rxbuf
->dma_addr
;
1523 txbuf
->real_len
= pkt_len
;
1525 dma_sync_single_for_device(dp
->dev
, rxbuf
->dma_addr
+ dma_off
,
1526 pkt_len
, DMA_BIDIRECTIONAL
);
1528 /* Build TX descriptor */
1529 txd
= &tx_ring
->txds
[wr_idx
];
1530 txd
->offset_eop
= PCIE_DESC_TX_EOP
;
1531 txd
->dma_len
= cpu_to_le16(pkt_len
);
1532 nfp_desc_set_dma_addr(txd
, rxbuf
->dma_addr
+ dma_off
);
1533 txd
->data_len
= cpu_to_le16(pkt_len
);
1540 tx_ring
->wr_ptr_add
++;
1544 static int nfp_net_run_xdp(struct bpf_prog
*prog
, void *data
, void *hard_start
,
1545 unsigned int *off
, unsigned int *len
)
1547 struct xdp_buff xdp
;
1551 xdp
.data_hard_start
= hard_start
;
1552 xdp
.data
= data
+ *off
;
1553 xdp
.data_end
= data
+ *off
+ *len
;
1555 orig_data
= xdp
.data
;
1556 ret
= bpf_prog_run_xdp(prog
, &xdp
);
1558 *len
-= xdp
.data
- orig_data
;
1559 *off
+= xdp
.data
- orig_data
;
1565 * nfp_net_rx() - receive up to @budget packets on @rx_ring
1566 * @rx_ring: RX ring to receive from
1567 * @budget: NAPI budget
1569 * Note, this function is separated out from the napi poll function to
1570 * more cleanly separate packet receive code from other bookkeeping
1571 * functions performed in the napi poll function.
1573 * Return: Number of packets received.
1575 static int nfp_net_rx(struct nfp_net_rx_ring
*rx_ring
, int budget
)
1577 struct nfp_net_r_vector
*r_vec
= rx_ring
->r_vec
;
1578 struct nfp_net_dp
*dp
= &r_vec
->nfp_net
->dp
;
1579 struct nfp_net_tx_ring
*tx_ring
;
1580 struct bpf_prog
*xdp_prog
;
1581 unsigned int true_bufsz
;
1582 struct sk_buff
*skb
;
1583 int pkts_polled
= 0;
1587 xdp_prog
= READ_ONCE(dp
->xdp_prog
);
1588 true_bufsz
= xdp_prog
? PAGE_SIZE
: dp
->fl_bufsz
;
1589 tx_ring
= r_vec
->xdp_ring
;
1591 while (pkts_polled
< budget
) {
1592 unsigned int meta_len
, data_len
, meta_off
, pkt_len
, pkt_off
;
1593 struct nfp_net_rx_buf
*rxbuf
;
1594 struct nfp_net_rx_desc
*rxd
;
1595 struct nfp_meta_parsed meta
;
1596 dma_addr_t new_dma_addr
;
1599 idx
= rx_ring
->rd_p
& (rx_ring
->cnt
- 1);
1601 rxd
= &rx_ring
->rxds
[idx
];
1602 if (!(rxd
->rxd
.meta_len_dd
& PCIE_DESC_RX_DD
))
1605 /* Memory barrier to ensure that we won't do other reads
1606 * before the DD bit.
1610 memset(&meta
, 0, sizeof(meta
));
1615 rxbuf
= &rx_ring
->rxbufs
[idx
];
1617 * <-- [rx_offset] -->
1618 * ---------------------------------------------------------
1619 * | [XX] | metadata | packet | XXXX |
1620 * ---------------------------------------------------------
1621 * <---------------- data_len --------------->
1623 * The rx_offset is fixed for all packets, the meta_len can vary
1624 * on a packet by packet basis. If rx_offset is set to zero
1625 * (_RX_OFFSET_DYNAMIC) metadata starts at the beginning of the
1626 * buffer and is immediately followed by the packet (no [XX]).
1628 meta_len
= rxd
->rxd
.meta_len_dd
& PCIE_DESC_RX_META_LEN_MASK
;
1629 data_len
= le16_to_cpu(rxd
->rxd
.data_len
);
1630 pkt_len
= data_len
- meta_len
;
1632 pkt_off
= NFP_NET_RX_BUF_HEADROOM
+ dp
->rx_dma_off
;
1633 if (dp
->rx_offset
== NFP_NET_CFG_RX_OFFSET_DYNAMIC
)
1634 pkt_off
+= meta_len
;
1636 pkt_off
+= dp
->rx_offset
;
1637 meta_off
= pkt_off
- meta_len
;
1640 u64_stats_update_begin(&r_vec
->rx_sync
);
1642 r_vec
->rx_bytes
+= pkt_len
;
1643 u64_stats_update_end(&r_vec
->rx_sync
);
1645 if (unlikely(meta_len
> NFP_NET_MAX_PREPEND
||
1646 (dp
->rx_offset
&& meta_len
> dp
->rx_offset
))) {
1647 nn_dp_warn(dp
, "oversized RX packet metadata %u\n",
1649 nfp_net_rx_drop(dp
, r_vec
, rx_ring
, rxbuf
, NULL
);
1653 nfp_net_dma_sync_cpu_rx(dp
, rxbuf
->dma_addr
+ meta_off
,
1656 if (!dp
->chained_metadata_format
) {
1657 nfp_net_set_hash_desc(dp
->netdev
, &meta
,
1658 rxbuf
->frag
+ meta_off
, rxd
);
1659 } else if (meta_len
) {
1662 end
= nfp_net_parse_meta(dp
->netdev
, &meta
,
1663 rxbuf
->frag
+ meta_off
,
1665 if (unlikely(end
!= rxbuf
->frag
+ pkt_off
)) {
1666 nn_dp_warn(dp
, "invalid RX packet metadata\n");
1667 nfp_net_rx_drop(dp
, r_vec
, rx_ring
, rxbuf
,
1673 if (xdp_prog
&& !(rxd
->rxd
.flags
& PCIE_DESC_RX_BPF
&&
1674 dp
->bpf_offload_xdp
)) {
1675 unsigned int dma_off
;
1679 hard_start
= rxbuf
->frag
+ NFP_NET_RX_BUF_HEADROOM
;
1681 act
= nfp_net_run_xdp(xdp_prog
, rxbuf
->frag
, hard_start
,
1682 &pkt_off
, &pkt_len
);
1687 dma_off
= pkt_off
- NFP_NET_RX_BUF_HEADROOM
;
1688 if (unlikely(!nfp_net_tx_xdp_buf(dp
, rx_ring
,
1692 trace_xdp_exception(dp
->netdev
,
1696 bpf_warn_invalid_xdp_action(act
);
1698 trace_xdp_exception(dp
->netdev
, xdp_prog
, act
);
1700 nfp_net_rx_give_one(dp
, rx_ring
, rxbuf
->frag
,
1706 skb
= build_skb(rxbuf
->frag
, true_bufsz
);
1707 if (unlikely(!skb
)) {
1708 nfp_net_rx_drop(dp
, r_vec
, rx_ring
, rxbuf
, NULL
);
1711 new_frag
= nfp_net_napi_alloc_one(dp
, &new_dma_addr
);
1712 if (unlikely(!new_frag
)) {
1713 nfp_net_rx_drop(dp
, r_vec
, rx_ring
, rxbuf
, skb
);
1717 nfp_net_dma_unmap_rx(dp
, rxbuf
->dma_addr
);
1719 nfp_net_rx_give_one(dp
, rx_ring
, new_frag
, new_dma_addr
);
1721 skb_reserve(skb
, pkt_off
);
1722 skb_put(skb
, pkt_len
);
1724 skb
->mark
= meta
.mark
;
1725 skb_set_hash(skb
, meta
.hash
, meta
.hash_type
);
1727 skb_record_rx_queue(skb
, rx_ring
->idx
);
1728 skb
->protocol
= eth_type_trans(skb
, dp
->netdev
);
1730 nfp_net_rx_csum(dp
, r_vec
, rxd
, skb
);
1732 if (rxd
->rxd
.flags
& PCIE_DESC_RX_VLAN
)
1733 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
),
1734 le16_to_cpu(rxd
->rxd
.vlan
));
1736 napi_gro_receive(&rx_ring
->r_vec
->napi
, skb
);
1739 if (xdp_prog
&& tx_ring
->wr_ptr_add
)
1740 nfp_net_tx_xmit_more_flush(tx_ring
);
1747 * nfp_net_poll() - napi poll function
1748 * @napi: NAPI structure
1749 * @budget: NAPI budget
1751 * Return: number of packets polled.
1753 static int nfp_net_poll(struct napi_struct
*napi
, int budget
)
1755 struct nfp_net_r_vector
*r_vec
=
1756 container_of(napi
, struct nfp_net_r_vector
, napi
);
1757 unsigned int pkts_polled
= 0;
1760 nfp_net_tx_complete(r_vec
->tx_ring
);
1761 if (r_vec
->rx_ring
) {
1762 pkts_polled
= nfp_net_rx(r_vec
->rx_ring
, budget
);
1763 if (r_vec
->xdp_ring
)
1764 nfp_net_xdp_complete(r_vec
->xdp_ring
);
1767 if (pkts_polled
< budget
)
1768 if (napi_complete_done(napi
, pkts_polled
))
1769 nfp_net_irq_unmask(r_vec
->nfp_net
, r_vec
->irq_entry
);
1774 /* Setup and Configuration
1778 * nfp_net_tx_ring_free() - Free resources allocated to a TX ring
1779 * @tx_ring: TX ring to free
1781 static void nfp_net_tx_ring_free(struct nfp_net_tx_ring
*tx_ring
)
1783 struct nfp_net_r_vector
*r_vec
= tx_ring
->r_vec
;
1784 struct nfp_net_dp
*dp
= &r_vec
->nfp_net
->dp
;
1786 kfree(tx_ring
->txbufs
);
1789 dma_free_coherent(dp
->dev
, tx_ring
->size
,
1790 tx_ring
->txds
, tx_ring
->dma
);
1793 tx_ring
->txbufs
= NULL
;
1794 tx_ring
->txds
= NULL
;
1800 * nfp_net_tx_ring_alloc() - Allocate resource for a TX ring
1801 * @dp: NFP Net data path struct
1802 * @tx_ring: TX Ring structure to allocate
1803 * @is_xdp: True if ring will be used for XDP
1805 * Return: 0 on success, negative errno otherwise.
1808 nfp_net_tx_ring_alloc(struct nfp_net_dp
*dp
, struct nfp_net_tx_ring
*tx_ring
,
1811 struct nfp_net_r_vector
*r_vec
= tx_ring
->r_vec
;
1814 tx_ring
->cnt
= dp
->txd_cnt
;
1816 tx_ring
->size
= sizeof(*tx_ring
->txds
) * tx_ring
->cnt
;
1817 tx_ring
->txds
= dma_zalloc_coherent(dp
->dev
, tx_ring
->size
,
1818 &tx_ring
->dma
, GFP_KERNEL
);
1822 sz
= sizeof(*tx_ring
->txbufs
) * tx_ring
->cnt
;
1823 tx_ring
->txbufs
= kzalloc(sz
, GFP_KERNEL
);
1824 if (!tx_ring
->txbufs
)
1828 netif_set_xps_queue(dp
->netdev
, &r_vec
->affinity_mask
,
1834 nfp_net_tx_ring_free(tx_ring
);
1838 static int nfp_net_tx_rings_prepare(struct nfp_net
*nn
, struct nfp_net_dp
*dp
)
1842 dp
->tx_rings
= kcalloc(dp
->num_tx_rings
, sizeof(*dp
->tx_rings
),
1847 for (r
= 0; r
< dp
->num_tx_rings
; r
++) {
1850 if (r
>= dp
->num_stack_tx_rings
)
1851 bias
= dp
->num_stack_tx_rings
;
1853 nfp_net_tx_ring_init(&dp
->tx_rings
[r
], &nn
->r_vecs
[r
- bias
],
1856 if (nfp_net_tx_ring_alloc(dp
, &dp
->tx_rings
[r
], bias
))
1864 nfp_net_tx_ring_free(&dp
->tx_rings
[r
]);
1865 kfree(dp
->tx_rings
);
1869 static void nfp_net_tx_rings_free(struct nfp_net_dp
*dp
)
1873 for (r
= 0; r
< dp
->num_tx_rings
; r
++)
1874 nfp_net_tx_ring_free(&dp
->tx_rings
[r
]);
1876 kfree(dp
->tx_rings
);
1880 * nfp_net_rx_ring_free() - Free resources allocated to a RX ring
1881 * @rx_ring: RX ring to free
1883 static void nfp_net_rx_ring_free(struct nfp_net_rx_ring
*rx_ring
)
1885 struct nfp_net_r_vector
*r_vec
= rx_ring
->r_vec
;
1886 struct nfp_net_dp
*dp
= &r_vec
->nfp_net
->dp
;
1888 kfree(rx_ring
->rxbufs
);
1891 dma_free_coherent(dp
->dev
, rx_ring
->size
,
1892 rx_ring
->rxds
, rx_ring
->dma
);
1895 rx_ring
->rxbufs
= NULL
;
1896 rx_ring
->rxds
= NULL
;
1902 * nfp_net_rx_ring_alloc() - Allocate resource for a RX ring
1903 * @dp: NFP Net data path struct
1904 * @rx_ring: RX ring to allocate
1906 * Return: 0 on success, negative errno otherwise.
1909 nfp_net_rx_ring_alloc(struct nfp_net_dp
*dp
, struct nfp_net_rx_ring
*rx_ring
)
1913 rx_ring
->cnt
= dp
->rxd_cnt
;
1914 rx_ring
->size
= sizeof(*rx_ring
->rxds
) * rx_ring
->cnt
;
1915 rx_ring
->rxds
= dma_zalloc_coherent(dp
->dev
, rx_ring
->size
,
1916 &rx_ring
->dma
, GFP_KERNEL
);
1920 sz
= sizeof(*rx_ring
->rxbufs
) * rx_ring
->cnt
;
1921 rx_ring
->rxbufs
= kzalloc(sz
, GFP_KERNEL
);
1922 if (!rx_ring
->rxbufs
)
1928 nfp_net_rx_ring_free(rx_ring
);
1932 static int nfp_net_rx_rings_prepare(struct nfp_net
*nn
, struct nfp_net_dp
*dp
)
1936 dp
->rx_rings
= kcalloc(dp
->num_rx_rings
, sizeof(*dp
->rx_rings
),
1941 for (r
= 0; r
< dp
->num_rx_rings
; r
++) {
1942 nfp_net_rx_ring_init(&dp
->rx_rings
[r
], &nn
->r_vecs
[r
], r
);
1944 if (nfp_net_rx_ring_alloc(dp
, &dp
->rx_rings
[r
]))
1947 if (nfp_net_rx_ring_bufs_alloc(dp
, &dp
->rx_rings
[r
]))
1955 nfp_net_rx_ring_bufs_free(dp
, &dp
->rx_rings
[r
]);
1957 nfp_net_rx_ring_free(&dp
->rx_rings
[r
]);
1959 kfree(dp
->rx_rings
);
1963 static void nfp_net_rx_rings_free(struct nfp_net_dp
*dp
)
1967 for (r
= 0; r
< dp
->num_rx_rings
; r
++) {
1968 nfp_net_rx_ring_bufs_free(dp
, &dp
->rx_rings
[r
]);
1969 nfp_net_rx_ring_free(&dp
->rx_rings
[r
]);
1972 kfree(dp
->rx_rings
);
1976 nfp_net_vector_assign_rings(struct nfp_net_dp
*dp
,
1977 struct nfp_net_r_vector
*r_vec
, int idx
)
1979 r_vec
->rx_ring
= idx
< dp
->num_rx_rings
? &dp
->rx_rings
[idx
] : NULL
;
1981 idx
< dp
->num_stack_tx_rings
? &dp
->tx_rings
[idx
] : NULL
;
1983 r_vec
->xdp_ring
= idx
< dp
->num_tx_rings
- dp
->num_stack_tx_rings
?
1984 &dp
->tx_rings
[dp
->num_stack_tx_rings
+ idx
] : NULL
;
1988 nfp_net_prepare_vector(struct nfp_net
*nn
, struct nfp_net_r_vector
*r_vec
,
1994 netif_napi_add(nn
->dp
.netdev
, &r_vec
->napi
,
1995 nfp_net_poll
, NAPI_POLL_WEIGHT
);
1997 snprintf(r_vec
->name
, sizeof(r_vec
->name
),
1998 "%s-rxtx-%d", nn
->dp
.netdev
->name
, idx
);
1999 err
= request_irq(r_vec
->irq_vector
, r_vec
->handler
, 0, r_vec
->name
,
2002 netif_napi_del(&r_vec
->napi
);
2003 nn_err(nn
, "Error requesting IRQ %d\n", r_vec
->irq_vector
);
2006 disable_irq(r_vec
->irq_vector
);
2008 irq_set_affinity_hint(r_vec
->irq_vector
, &r_vec
->affinity_mask
);
2010 nn_dbg(nn
, "RV%02d: irq=%03d/%03d\n", idx
, r_vec
->irq_vector
,
2017 nfp_net_cleanup_vector(struct nfp_net
*nn
, struct nfp_net_r_vector
*r_vec
)
2019 irq_set_affinity_hint(r_vec
->irq_vector
, NULL
);
2020 netif_napi_del(&r_vec
->napi
);
2021 free_irq(r_vec
->irq_vector
, r_vec
);
2025 * nfp_net_rss_write_itbl() - Write RSS indirection table to device
2026 * @nn: NFP Net device to reconfigure
2028 void nfp_net_rss_write_itbl(struct nfp_net
*nn
)
2032 for (i
= 0; i
< NFP_NET_CFG_RSS_ITBL_SZ
; i
+= 4)
2033 nn_writel(nn
, NFP_NET_CFG_RSS_ITBL
+ i
,
2034 get_unaligned_le32(nn
->rss_itbl
+ i
));
2038 * nfp_net_rss_write_key() - Write RSS hash key to device
2039 * @nn: NFP Net device to reconfigure
2041 void nfp_net_rss_write_key(struct nfp_net
*nn
)
2045 for (i
= 0; i
< nfp_net_rss_key_sz(nn
); i
+= 4)
2046 nn_writel(nn
, NFP_NET_CFG_RSS_KEY
+ i
,
2047 get_unaligned_le32(nn
->rss_key
+ i
));
2051 * nfp_net_coalesce_write_cfg() - Write irq coalescence configuration to HW
2052 * @nn: NFP Net device to reconfigure
2054 void nfp_net_coalesce_write_cfg(struct nfp_net
*nn
)
2060 /* Compute factor used to convert coalesce '_usecs' parameters to
2061 * ME timestamp ticks. There are 16 ME clock cycles for each timestamp
2064 factor
= nn
->me_freq_mhz
/ 16;
2066 /* copy RX interrupt coalesce parameters */
2067 value
= (nn
->rx_coalesce_max_frames
<< 16) |
2068 (factor
* nn
->rx_coalesce_usecs
);
2069 for (i
= 0; i
< nn
->dp
.num_rx_rings
; i
++)
2070 nn_writel(nn
, NFP_NET_CFG_RXR_IRQ_MOD(i
), value
);
2072 /* copy TX interrupt coalesce parameters */
2073 value
= (nn
->tx_coalesce_max_frames
<< 16) |
2074 (factor
* nn
->tx_coalesce_usecs
);
2075 for (i
= 0; i
< nn
->dp
.num_tx_rings
; i
++)
2076 nn_writel(nn
, NFP_NET_CFG_TXR_IRQ_MOD(i
), value
);
2080 * nfp_net_write_mac_addr() - Write mac address to the device control BAR
2081 * @nn: NFP Net device to reconfigure
2083 * Writes the MAC address from the netdev to the device control BAR. Does not
2084 * perform the required reconfig. We do a bit of byte swapping dance because
2087 static void nfp_net_write_mac_addr(struct nfp_net
*nn
)
2089 nn_writel(nn
, NFP_NET_CFG_MACADDR
+ 0,
2090 get_unaligned_be32(nn
->dp
.netdev
->dev_addr
));
2091 nn_writew(nn
, NFP_NET_CFG_MACADDR
+ 6,
2092 get_unaligned_be16(nn
->dp
.netdev
->dev_addr
+ 4));
2095 static void nfp_net_vec_clear_ring_data(struct nfp_net
*nn
, unsigned int idx
)
2097 nn_writeq(nn
, NFP_NET_CFG_RXR_ADDR(idx
), 0);
2098 nn_writeb(nn
, NFP_NET_CFG_RXR_SZ(idx
), 0);
2099 nn_writeb(nn
, NFP_NET_CFG_RXR_VEC(idx
), 0);
2101 nn_writeq(nn
, NFP_NET_CFG_TXR_ADDR(idx
), 0);
2102 nn_writeb(nn
, NFP_NET_CFG_TXR_SZ(idx
), 0);
2103 nn_writeb(nn
, NFP_NET_CFG_TXR_VEC(idx
), 0);
2107 * nfp_net_clear_config_and_disable() - Clear control BAR and disable NFP
2108 * @nn: NFP Net device to reconfigure
2110 static void nfp_net_clear_config_and_disable(struct nfp_net
*nn
)
2112 u32 new_ctrl
, update
;
2116 new_ctrl
= nn
->dp
.ctrl
;
2117 new_ctrl
&= ~NFP_NET_CFG_CTRL_ENABLE
;
2118 update
= NFP_NET_CFG_UPDATE_GEN
;
2119 update
|= NFP_NET_CFG_UPDATE_MSIX
;
2120 update
|= NFP_NET_CFG_UPDATE_RING
;
2122 if (nn
->cap
& NFP_NET_CFG_CTRL_RINGCFG
)
2123 new_ctrl
&= ~NFP_NET_CFG_CTRL_RINGCFG
;
2125 nn_writeq(nn
, NFP_NET_CFG_TXRS_ENABLE
, 0);
2126 nn_writeq(nn
, NFP_NET_CFG_RXRS_ENABLE
, 0);
2128 nn_writel(nn
, NFP_NET_CFG_CTRL
, new_ctrl
);
2129 err
= nfp_net_reconfig(nn
, update
);
2131 nn_err(nn
, "Could not disable device: %d\n", err
);
2133 for (r
= 0; r
< nn
->dp
.num_rx_rings
; r
++)
2134 nfp_net_rx_ring_reset(&nn
->dp
.rx_rings
[r
]);
2135 for (r
= 0; r
< nn
->dp
.num_tx_rings
; r
++)
2136 nfp_net_tx_ring_reset(&nn
->dp
, &nn
->dp
.tx_rings
[r
]);
2137 for (r
= 0; r
< nn
->dp
.num_r_vecs
; r
++)
2138 nfp_net_vec_clear_ring_data(nn
, r
);
2140 nn
->dp
.ctrl
= new_ctrl
;
2144 nfp_net_rx_ring_hw_cfg_write(struct nfp_net
*nn
,
2145 struct nfp_net_rx_ring
*rx_ring
, unsigned int idx
)
2147 /* Write the DMA address, size and MSI-X info to the device */
2148 nn_writeq(nn
, NFP_NET_CFG_RXR_ADDR(idx
), rx_ring
->dma
);
2149 nn_writeb(nn
, NFP_NET_CFG_RXR_SZ(idx
), ilog2(rx_ring
->cnt
));
2150 nn_writeb(nn
, NFP_NET_CFG_RXR_VEC(idx
), rx_ring
->r_vec
->irq_entry
);
2154 nfp_net_tx_ring_hw_cfg_write(struct nfp_net
*nn
,
2155 struct nfp_net_tx_ring
*tx_ring
, unsigned int idx
)
2157 nn_writeq(nn
, NFP_NET_CFG_TXR_ADDR(idx
), tx_ring
->dma
);
2158 nn_writeb(nn
, NFP_NET_CFG_TXR_SZ(idx
), ilog2(tx_ring
->cnt
));
2159 nn_writeb(nn
, NFP_NET_CFG_TXR_VEC(idx
), tx_ring
->r_vec
->irq_entry
);
2163 * nfp_net_set_config_and_enable() - Write control BAR and enable NFP
2164 * @nn: NFP Net device to reconfigure
2166 static int nfp_net_set_config_and_enable(struct nfp_net
*nn
)
2168 u32 new_ctrl
, update
= 0;
2172 new_ctrl
= nn
->dp
.ctrl
;
2174 if (nn
->cap
& NFP_NET_CFG_CTRL_RSS
) {
2175 nfp_net_rss_write_key(nn
);
2176 nfp_net_rss_write_itbl(nn
);
2177 nn_writel(nn
, NFP_NET_CFG_RSS_CTRL
, nn
->rss_cfg
);
2178 update
|= NFP_NET_CFG_UPDATE_RSS
;
2181 if (nn
->cap
& NFP_NET_CFG_CTRL_IRQMOD
) {
2182 nfp_net_coalesce_write_cfg(nn
);
2184 new_ctrl
|= NFP_NET_CFG_CTRL_IRQMOD
;
2185 update
|= NFP_NET_CFG_UPDATE_IRQMOD
;
2188 for (r
= 0; r
< nn
->dp
.num_tx_rings
; r
++)
2189 nfp_net_tx_ring_hw_cfg_write(nn
, &nn
->dp
.tx_rings
[r
], r
);
2190 for (r
= 0; r
< nn
->dp
.num_rx_rings
; r
++)
2191 nfp_net_rx_ring_hw_cfg_write(nn
, &nn
->dp
.rx_rings
[r
], r
);
2193 nn_writeq(nn
, NFP_NET_CFG_TXRS_ENABLE
, nn
->dp
.num_tx_rings
== 64 ?
2194 0xffffffffffffffffULL
: ((u64
)1 << nn
->dp
.num_tx_rings
) - 1);
2196 nn_writeq(nn
, NFP_NET_CFG_RXRS_ENABLE
, nn
->dp
.num_rx_rings
== 64 ?
2197 0xffffffffffffffffULL
: ((u64
)1 << nn
->dp
.num_rx_rings
) - 1);
2199 nfp_net_write_mac_addr(nn
);
2201 nn_writel(nn
, NFP_NET_CFG_MTU
, nn
->dp
.netdev
->mtu
);
2202 nn_writel(nn
, NFP_NET_CFG_FLBUFSZ
,
2203 nn
->dp
.fl_bufsz
- NFP_NET_RX_BUF_NON_DATA
);
2206 new_ctrl
|= NFP_NET_CFG_CTRL_ENABLE
;
2207 update
|= NFP_NET_CFG_UPDATE_GEN
;
2208 update
|= NFP_NET_CFG_UPDATE_MSIX
;
2209 update
|= NFP_NET_CFG_UPDATE_RING
;
2210 if (nn
->cap
& NFP_NET_CFG_CTRL_RINGCFG
)
2211 new_ctrl
|= NFP_NET_CFG_CTRL_RINGCFG
;
2213 nn_writel(nn
, NFP_NET_CFG_CTRL
, new_ctrl
);
2214 err
= nfp_net_reconfig(nn
, update
);
2216 nfp_net_clear_config_and_disable(nn
);
2220 nn
->dp
.ctrl
= new_ctrl
;
2222 for (r
= 0; r
< nn
->dp
.num_rx_rings
; r
++)
2223 nfp_net_rx_ring_fill_freelist(&nn
->dp
, &nn
->dp
.rx_rings
[r
]);
2225 /* Since reconfiguration requests while NFP is down are ignored we
2226 * have to wipe the entire VXLAN configuration and reinitialize it.
2228 if (nn
->dp
.ctrl
& NFP_NET_CFG_CTRL_VXLAN
) {
2229 memset(&nn
->vxlan_ports
, 0, sizeof(nn
->vxlan_ports
));
2230 memset(&nn
->vxlan_usecnt
, 0, sizeof(nn
->vxlan_usecnt
));
2231 udp_tunnel_get_rx_info(nn
->dp
.netdev
);
2238 * nfp_net_open_stack() - Start the device from stack's perspective
2239 * @nn: NFP Net device to reconfigure
2241 static void nfp_net_open_stack(struct nfp_net
*nn
)
2245 for (r
= 0; r
< nn
->dp
.num_r_vecs
; r
++) {
2246 napi_enable(&nn
->r_vecs
[r
].napi
);
2247 enable_irq(nn
->r_vecs
[r
].irq_vector
);
2250 netif_tx_wake_all_queues(nn
->dp
.netdev
);
2252 enable_irq(nn
->irq_entries
[NFP_NET_IRQ_LSC_IDX
].vector
);
2253 nfp_net_read_link_status(nn
);
2256 static int nfp_net_netdev_open(struct net_device
*netdev
)
2258 struct nfp_net
*nn
= netdev_priv(netdev
);
2261 /* Step 1: Allocate resources for rings and the like
2262 * - Request interrupts
2263 * - Allocate RX and TX ring resources
2264 * - Setup initial RSS table
2266 err
= nfp_net_aux_irq_request(nn
, NFP_NET_CFG_EXN
, "%s-exn",
2267 nn
->exn_name
, sizeof(nn
->exn_name
),
2268 NFP_NET_IRQ_EXN_IDX
, nn
->exn_handler
);
2271 err
= nfp_net_aux_irq_request(nn
, NFP_NET_CFG_LSC
, "%s-lsc",
2272 nn
->lsc_name
, sizeof(nn
->lsc_name
),
2273 NFP_NET_IRQ_LSC_IDX
, nn
->lsc_handler
);
2276 disable_irq(nn
->irq_entries
[NFP_NET_IRQ_LSC_IDX
].vector
);
2278 for (r
= 0; r
< nn
->dp
.num_r_vecs
; r
++) {
2279 err
= nfp_net_prepare_vector(nn
, &nn
->r_vecs
[r
], r
);
2281 goto err_cleanup_vec_p
;
2284 err
= nfp_net_rx_rings_prepare(nn
, &nn
->dp
);
2286 goto err_cleanup_vec
;
2288 err
= nfp_net_tx_rings_prepare(nn
, &nn
->dp
);
2290 goto err_free_rx_rings
;
2292 for (r
= 0; r
< nn
->max_r_vecs
; r
++)
2293 nfp_net_vector_assign_rings(&nn
->dp
, &nn
->r_vecs
[r
], r
);
2295 err
= netif_set_real_num_tx_queues(netdev
, nn
->dp
.num_stack_tx_rings
);
2297 goto err_free_rings
;
2299 err
= netif_set_real_num_rx_queues(netdev
, nn
->dp
.num_rx_rings
);
2301 goto err_free_rings
;
2303 /* Step 2: Configure the NFP
2304 * - Enable rings from 0 to tx_rings/rx_rings - 1.
2305 * - Write MAC address (in case it changed)
2307 * - Set the Freelist buffer size
2310 err
= nfp_net_set_config_and_enable(nn
);
2312 goto err_free_rings
;
2314 /* Step 3: Enable for kernel
2315 * - put some freelist descriptors on each RX ring
2316 * - enable NAPI on each ring
2317 * - enable all TX queues
2320 nfp_net_open_stack(nn
);
2325 nfp_net_tx_rings_free(&nn
->dp
);
2327 nfp_net_rx_rings_free(&nn
->dp
);
2329 r
= nn
->dp
.num_r_vecs
;
2332 nfp_net_cleanup_vector(nn
, &nn
->r_vecs
[r
]);
2333 nfp_net_aux_irq_free(nn
, NFP_NET_CFG_LSC
, NFP_NET_IRQ_LSC_IDX
);
2335 nfp_net_aux_irq_free(nn
, NFP_NET_CFG_EXN
, NFP_NET_IRQ_EXN_IDX
);
2340 * nfp_net_close_stack() - Quiescent the stack (part of close)
2341 * @nn: NFP Net device to reconfigure
2343 static void nfp_net_close_stack(struct nfp_net
*nn
)
2347 disable_irq(nn
->irq_entries
[NFP_NET_IRQ_LSC_IDX
].vector
);
2348 netif_carrier_off(nn
->dp
.netdev
);
2349 nn
->link_up
= false;
2351 for (r
= 0; r
< nn
->dp
.num_r_vecs
; r
++) {
2352 disable_irq(nn
->r_vecs
[r
].irq_vector
);
2353 napi_disable(&nn
->r_vecs
[r
].napi
);
2356 netif_tx_disable(nn
->dp
.netdev
);
2360 * nfp_net_close_free_all() - Free all runtime resources
2361 * @nn: NFP Net device to reconfigure
2363 static void nfp_net_close_free_all(struct nfp_net
*nn
)
2367 for (r
= 0; r
< nn
->dp
.num_rx_rings
; r
++) {
2368 nfp_net_rx_ring_bufs_free(&nn
->dp
, &nn
->dp
.rx_rings
[r
]);
2369 nfp_net_rx_ring_free(&nn
->dp
.rx_rings
[r
]);
2371 for (r
= 0; r
< nn
->dp
.num_tx_rings
; r
++)
2372 nfp_net_tx_ring_free(&nn
->dp
.tx_rings
[r
]);
2373 for (r
= 0; r
< nn
->dp
.num_r_vecs
; r
++)
2374 nfp_net_cleanup_vector(nn
, &nn
->r_vecs
[r
]);
2376 kfree(nn
->dp
.rx_rings
);
2377 kfree(nn
->dp
.tx_rings
);
2379 nfp_net_aux_irq_free(nn
, NFP_NET_CFG_LSC
, NFP_NET_IRQ_LSC_IDX
);
2380 nfp_net_aux_irq_free(nn
, NFP_NET_CFG_EXN
, NFP_NET_IRQ_EXN_IDX
);
2384 * nfp_net_netdev_close() - Called when the device is downed
2385 * @netdev: netdev structure
2387 static int nfp_net_netdev_close(struct net_device
*netdev
)
2389 struct nfp_net
*nn
= netdev_priv(netdev
);
2391 /* Step 1: Disable RX and TX rings from the Linux kernel perspective
2393 nfp_net_close_stack(nn
);
2397 nfp_net_clear_config_and_disable(nn
);
2399 /* Step 3: Free resources
2401 nfp_net_close_free_all(nn
);
2403 nn_dbg(nn
, "%s down", netdev
->name
);
2407 static void nfp_net_set_rx_mode(struct net_device
*netdev
)
2409 struct nfp_net
*nn
= netdev_priv(netdev
);
2412 new_ctrl
= nn
->dp
.ctrl
;
2414 if (netdev
->flags
& IFF_PROMISC
) {
2415 if (nn
->cap
& NFP_NET_CFG_CTRL_PROMISC
)
2416 new_ctrl
|= NFP_NET_CFG_CTRL_PROMISC
;
2418 nn_warn(nn
, "FW does not support promiscuous mode\n");
2420 new_ctrl
&= ~NFP_NET_CFG_CTRL_PROMISC
;
2423 if (new_ctrl
== nn
->dp
.ctrl
)
2426 nn_writel(nn
, NFP_NET_CFG_CTRL
, new_ctrl
);
2427 nfp_net_reconfig_post(nn
, NFP_NET_CFG_UPDATE_GEN
);
2429 nn
->dp
.ctrl
= new_ctrl
;
2432 static void nfp_net_rss_init_itbl(struct nfp_net
*nn
)
2436 for (i
= 0; i
< sizeof(nn
->rss_itbl
); i
++)
2438 ethtool_rxfh_indir_default(i
, nn
->dp
.num_rx_rings
);
2441 static void nfp_net_dp_swap(struct nfp_net
*nn
, struct nfp_net_dp
*dp
)
2443 struct nfp_net_dp new_dp
= *dp
;
2448 nn
->dp
.netdev
->mtu
= new_dp
.mtu
;
2450 if (!netif_is_rxfh_configured(nn
->dp
.netdev
))
2451 nfp_net_rss_init_itbl(nn
);
2454 static int nfp_net_dp_swap_enable(struct nfp_net
*nn
, struct nfp_net_dp
*dp
)
2459 nfp_net_dp_swap(nn
, dp
);
2461 for (r
= 0; r
< nn
->max_r_vecs
; r
++)
2462 nfp_net_vector_assign_rings(&nn
->dp
, &nn
->r_vecs
[r
], r
);
2464 err
= netif_set_real_num_rx_queues(nn
->dp
.netdev
, nn
->dp
.num_rx_rings
);
2468 if (nn
->dp
.netdev
->real_num_tx_queues
!= nn
->dp
.num_stack_tx_rings
) {
2469 err
= netif_set_real_num_tx_queues(nn
->dp
.netdev
,
2470 nn
->dp
.num_stack_tx_rings
);
2475 return nfp_net_set_config_and_enable(nn
);
2478 struct nfp_net_dp
*nfp_net_clone_dp(struct nfp_net
*nn
)
2480 struct nfp_net_dp
*new;
2482 new = kmalloc(sizeof(*new), GFP_KERNEL
);
2488 /* Clear things which need to be recomputed */
2490 new->tx_rings
= NULL
;
2491 new->rx_rings
= NULL
;
2492 new->num_r_vecs
= 0;
2493 new->num_stack_tx_rings
= 0;
2498 static int nfp_net_check_config(struct nfp_net
*nn
, struct nfp_net_dp
*dp
)
2500 /* XDP-enabled tests */
2503 if (dp
->fl_bufsz
> PAGE_SIZE
) {
2504 nn_warn(nn
, "MTU too large w/ XDP enabled\n");
2507 if (dp
->num_tx_rings
> nn
->max_tx_rings
) {
2508 nn_warn(nn
, "Insufficient number of TX rings w/ XDP enabled\n");
2515 int nfp_net_ring_reconfig(struct nfp_net
*nn
, struct nfp_net_dp
*dp
)
2519 dp
->fl_bufsz
= nfp_net_calc_fl_bufsz(dp
);
2521 dp
->num_stack_tx_rings
= dp
->num_tx_rings
;
2523 dp
->num_stack_tx_rings
-= dp
->num_rx_rings
;
2525 dp
->num_r_vecs
= max(dp
->num_rx_rings
, dp
->num_stack_tx_rings
);
2527 err
= nfp_net_check_config(nn
, dp
);
2531 if (!netif_running(dp
->netdev
)) {
2532 nfp_net_dp_swap(nn
, dp
);
2537 /* Prepare new rings */
2538 for (r
= nn
->dp
.num_r_vecs
; r
< dp
->num_r_vecs
; r
++) {
2539 err
= nfp_net_prepare_vector(nn
, &nn
->r_vecs
[r
], r
);
2542 goto err_cleanup_vecs
;
2546 err
= nfp_net_rx_rings_prepare(nn
, dp
);
2548 goto err_cleanup_vecs
;
2550 err
= nfp_net_tx_rings_prepare(nn
, dp
);
2554 /* Stop device, swap in new rings, try to start the firmware */
2555 nfp_net_close_stack(nn
);
2556 nfp_net_clear_config_and_disable(nn
);
2558 err
= nfp_net_dp_swap_enable(nn
, dp
);
2562 nfp_net_clear_config_and_disable(nn
);
2564 /* Try with old configuration and old rings */
2565 err2
= nfp_net_dp_swap_enable(nn
, dp
);
2567 nn_err(nn
, "Can't restore ring config - FW communication failed (%d,%d)\n",
2570 for (r
= dp
->num_r_vecs
- 1; r
>= nn
->dp
.num_r_vecs
; r
--)
2571 nfp_net_cleanup_vector(nn
, &nn
->r_vecs
[r
]);
2573 nfp_net_rx_rings_free(dp
);
2574 nfp_net_tx_rings_free(dp
);
2576 nfp_net_open_stack(nn
);
2583 nfp_net_rx_rings_free(dp
);
2585 for (r
= dp
->num_r_vecs
- 1; r
>= nn
->dp
.num_r_vecs
; r
--)
2586 nfp_net_cleanup_vector(nn
, &nn
->r_vecs
[r
]);
2591 static int nfp_net_change_mtu(struct net_device
*netdev
, int new_mtu
)
2593 struct nfp_net
*nn
= netdev_priv(netdev
);
2594 struct nfp_net_dp
*dp
;
2596 dp
= nfp_net_clone_dp(nn
);
2602 return nfp_net_ring_reconfig(nn
, dp
);
2605 static void nfp_net_stat64(struct net_device
*netdev
,
2606 struct rtnl_link_stats64
*stats
)
2608 struct nfp_net
*nn
= netdev_priv(netdev
);
2611 for (r
= 0; r
< nn
->dp
.num_r_vecs
; r
++) {
2612 struct nfp_net_r_vector
*r_vec
= &nn
->r_vecs
[r
];
2617 start
= u64_stats_fetch_begin(&r_vec
->rx_sync
);
2618 data
[0] = r_vec
->rx_pkts
;
2619 data
[1] = r_vec
->rx_bytes
;
2620 data
[2] = r_vec
->rx_drops
;
2621 } while (u64_stats_fetch_retry(&r_vec
->rx_sync
, start
));
2622 stats
->rx_packets
+= data
[0];
2623 stats
->rx_bytes
+= data
[1];
2624 stats
->rx_dropped
+= data
[2];
2627 start
= u64_stats_fetch_begin(&r_vec
->tx_sync
);
2628 data
[0] = r_vec
->tx_pkts
;
2629 data
[1] = r_vec
->tx_bytes
;
2630 data
[2] = r_vec
->tx_errors
;
2631 } while (u64_stats_fetch_retry(&r_vec
->tx_sync
, start
));
2632 stats
->tx_packets
+= data
[0];
2633 stats
->tx_bytes
+= data
[1];
2634 stats
->tx_errors
+= data
[2];
2638 static bool nfp_net_ebpf_capable(struct nfp_net
*nn
)
2640 if (nn
->cap
& NFP_NET_CFG_CTRL_BPF
&&
2641 nn_readb(nn
, NFP_NET_CFG_BPF_ABI
) == NFP_NET_BPF_ABI
)
2647 nfp_net_setup_tc(struct net_device
*netdev
, u32 handle
, __be16 proto
,
2648 struct tc_to_netdev
*tc
)
2650 struct nfp_net
*nn
= netdev_priv(netdev
);
2652 if (TC_H_MAJ(handle
) != TC_H_MAJ(TC_H_INGRESS
))
2654 if (proto
!= htons(ETH_P_ALL
))
2657 if (tc
->type
== TC_SETUP_CLSBPF
&& nfp_net_ebpf_capable(nn
)) {
2658 if (!nn
->dp
.bpf_offload_xdp
)
2659 return nfp_net_bpf_offload(nn
, tc
->cls_bpf
);
2667 static int nfp_net_set_features(struct net_device
*netdev
,
2668 netdev_features_t features
)
2670 netdev_features_t changed
= netdev
->features
^ features
;
2671 struct nfp_net
*nn
= netdev_priv(netdev
);
2675 /* Assume this is not called with features we have not advertised */
2677 new_ctrl
= nn
->dp
.ctrl
;
2679 if (changed
& NETIF_F_RXCSUM
) {
2680 if (features
& NETIF_F_RXCSUM
)
2681 new_ctrl
|= NFP_NET_CFG_CTRL_RXCSUM
;
2683 new_ctrl
&= ~NFP_NET_CFG_CTRL_RXCSUM
;
2686 if (changed
& (NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
)) {
2687 if (features
& (NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
))
2688 new_ctrl
|= NFP_NET_CFG_CTRL_TXCSUM
;
2690 new_ctrl
&= ~NFP_NET_CFG_CTRL_TXCSUM
;
2693 if (changed
& (NETIF_F_TSO
| NETIF_F_TSO6
)) {
2694 if (features
& (NETIF_F_TSO
| NETIF_F_TSO6
))
2695 new_ctrl
|= NFP_NET_CFG_CTRL_LSO
;
2697 new_ctrl
&= ~NFP_NET_CFG_CTRL_LSO
;
2700 if (changed
& NETIF_F_HW_VLAN_CTAG_RX
) {
2701 if (features
& NETIF_F_HW_VLAN_CTAG_RX
)
2702 new_ctrl
|= NFP_NET_CFG_CTRL_RXVLAN
;
2704 new_ctrl
&= ~NFP_NET_CFG_CTRL_RXVLAN
;
2707 if (changed
& NETIF_F_HW_VLAN_CTAG_TX
) {
2708 if (features
& NETIF_F_HW_VLAN_CTAG_TX
)
2709 new_ctrl
|= NFP_NET_CFG_CTRL_TXVLAN
;
2711 new_ctrl
&= ~NFP_NET_CFG_CTRL_TXVLAN
;
2714 if (changed
& NETIF_F_SG
) {
2715 if (features
& NETIF_F_SG
)
2716 new_ctrl
|= NFP_NET_CFG_CTRL_GATHER
;
2718 new_ctrl
&= ~NFP_NET_CFG_CTRL_GATHER
;
2721 if (changed
& NETIF_F_HW_TC
&& nn
->dp
.ctrl
& NFP_NET_CFG_CTRL_BPF
) {
2722 nn_err(nn
, "Cannot disable HW TC offload while in use\n");
2726 nn_dbg(nn
, "Feature change 0x%llx -> 0x%llx (changed=0x%llx)\n",
2727 netdev
->features
, features
, changed
);
2729 if (new_ctrl
== nn
->dp
.ctrl
)
2732 nn_dbg(nn
, "NIC ctrl: 0x%x -> 0x%x\n", nn
->dp
.ctrl
, new_ctrl
);
2733 nn_writel(nn
, NFP_NET_CFG_CTRL
, new_ctrl
);
2734 err
= nfp_net_reconfig(nn
, NFP_NET_CFG_UPDATE_GEN
);
2738 nn
->dp
.ctrl
= new_ctrl
;
2743 static netdev_features_t
2744 nfp_net_features_check(struct sk_buff
*skb
, struct net_device
*dev
,
2745 netdev_features_t features
)
2749 /* We can't do TSO over double tagged packets (802.1AD) */
2750 features
&= vlan_features_check(skb
, features
);
2752 if (!skb
->encapsulation
)
2755 /* Ensure that inner L4 header offset fits into TX descriptor field */
2756 if (skb_is_gso(skb
)) {
2759 hdrlen
= skb_inner_transport_header(skb
) - skb
->data
+
2760 inner_tcp_hdrlen(skb
);
2762 if (unlikely(hdrlen
> NFP_NET_LSO_MAX_HDR_SZ
))
2763 features
&= ~NETIF_F_GSO_MASK
;
2766 /* VXLAN/GRE check */
2767 switch (vlan_get_protocol(skb
)) {
2768 case htons(ETH_P_IP
):
2769 l4_hdr
= ip_hdr(skb
)->protocol
;
2771 case htons(ETH_P_IPV6
):
2772 l4_hdr
= ipv6_hdr(skb
)->nexthdr
;
2775 return features
& ~(NETIF_F_CSUM_MASK
| NETIF_F_GSO_MASK
);
2778 if (skb
->inner_protocol_type
!= ENCAP_TYPE_ETHER
||
2779 skb
->inner_protocol
!= htons(ETH_P_TEB
) ||
2780 (l4_hdr
!= IPPROTO_UDP
&& l4_hdr
!= IPPROTO_GRE
) ||
2781 (l4_hdr
== IPPROTO_UDP
&&
2782 (skb_inner_mac_header(skb
) - skb_transport_header(skb
) !=
2783 sizeof(struct udphdr
) + sizeof(struct vxlanhdr
))))
2784 return features
& ~(NETIF_F_CSUM_MASK
| NETIF_F_GSO_MASK
);
2790 nfp_net_get_phys_port_name(struct net_device
*netdev
, char *name
, size_t len
)
2792 struct nfp_net
*nn
= netdev_priv(netdev
);
2798 if (!nn
->eth_port
->is_split
)
2799 err
= snprintf(name
, len
, "p%d", nn
->eth_port
->label_port
);
2801 err
= snprintf(name
, len
, "p%ds%d", nn
->eth_port
->label_port
,
2802 nn
->eth_port
->label_subport
);
2810 * nfp_net_set_vxlan_port() - set vxlan port in SW and reconfigure HW
2811 * @nn: NFP Net device to reconfigure
2812 * @idx: Index into the port table where new port should be written
2813 * @port: UDP port to configure (pass zero to remove VXLAN port)
2815 static void nfp_net_set_vxlan_port(struct nfp_net
*nn
, int idx
, __be16 port
)
2819 nn
->vxlan_ports
[idx
] = port
;
2821 if (!(nn
->dp
.ctrl
& NFP_NET_CFG_CTRL_VXLAN
))
2824 BUILD_BUG_ON(NFP_NET_N_VXLAN_PORTS
& 1);
2825 for (i
= 0; i
< NFP_NET_N_VXLAN_PORTS
; i
+= 2)
2826 nn_writel(nn
, NFP_NET_CFG_VXLAN_PORT
+ i
* sizeof(port
),
2827 be16_to_cpu(nn
->vxlan_ports
[i
+ 1]) << 16 |
2828 be16_to_cpu(nn
->vxlan_ports
[i
]));
2830 nfp_net_reconfig_post(nn
, NFP_NET_CFG_UPDATE_VXLAN
);
2834 * nfp_net_find_vxlan_idx() - find table entry of the port or a free one
2835 * @nn: NFP Network structure
2836 * @port: UDP port to look for
2838 * Return: if the port is already in the table -- it's position;
2839 * if the port is not in the table -- free position to use;
2840 * if the table is full -- -ENOSPC.
2842 static int nfp_net_find_vxlan_idx(struct nfp_net
*nn
, __be16 port
)
2844 int i
, free_idx
= -ENOSPC
;
2846 for (i
= 0; i
< NFP_NET_N_VXLAN_PORTS
; i
++) {
2847 if (nn
->vxlan_ports
[i
] == port
)
2849 if (!nn
->vxlan_usecnt
[i
])
2856 static void nfp_net_add_vxlan_port(struct net_device
*netdev
,
2857 struct udp_tunnel_info
*ti
)
2859 struct nfp_net
*nn
= netdev_priv(netdev
);
2862 if (ti
->type
!= UDP_TUNNEL_TYPE_VXLAN
)
2865 idx
= nfp_net_find_vxlan_idx(nn
, ti
->port
);
2869 if (!nn
->vxlan_usecnt
[idx
]++)
2870 nfp_net_set_vxlan_port(nn
, idx
, ti
->port
);
2873 static void nfp_net_del_vxlan_port(struct net_device
*netdev
,
2874 struct udp_tunnel_info
*ti
)
2876 struct nfp_net
*nn
= netdev_priv(netdev
);
2879 if (ti
->type
!= UDP_TUNNEL_TYPE_VXLAN
)
2882 idx
= nfp_net_find_vxlan_idx(nn
, ti
->port
);
2883 if (idx
== -ENOSPC
|| !nn
->vxlan_usecnt
[idx
])
2886 if (!--nn
->vxlan_usecnt
[idx
])
2887 nfp_net_set_vxlan_port(nn
, idx
, 0);
2890 static int nfp_net_xdp_offload(struct nfp_net
*nn
, struct bpf_prog
*prog
)
2892 struct tc_cls_bpf_offload cmd
= {
2897 if (!nfp_net_ebpf_capable(nn
))
2900 if (nn
->dp
.ctrl
& NFP_NET_CFG_CTRL_BPF
) {
2901 if (!nn
->dp
.bpf_offload_xdp
)
2902 return prog
? -EBUSY
: 0;
2903 cmd
.command
= prog
? TC_CLSBPF_REPLACE
: TC_CLSBPF_DESTROY
;
2907 cmd
.command
= TC_CLSBPF_ADD
;
2910 ret
= nfp_net_bpf_offload(nn
, &cmd
);
2911 /* Stop offload if replace not possible */
2912 if (ret
&& cmd
.command
== TC_CLSBPF_REPLACE
)
2913 nfp_net_xdp_offload(nn
, NULL
);
2914 nn
->dp
.bpf_offload_xdp
= prog
&& !ret
;
2918 static int nfp_net_xdp_setup(struct nfp_net
*nn
, struct bpf_prog
*prog
)
2920 struct bpf_prog
*old_prog
= nn
->dp
.xdp_prog
;
2921 struct nfp_net_dp
*dp
;
2924 if (!prog
&& !nn
->dp
.xdp_prog
)
2926 if (prog
&& nn
->dp
.xdp_prog
) {
2927 prog
= xchg(&nn
->dp
.xdp_prog
, prog
);
2929 nfp_net_xdp_offload(nn
, nn
->dp
.xdp_prog
);
2933 dp
= nfp_net_clone_dp(nn
);
2937 dp
->xdp_prog
= prog
;
2938 dp
->num_tx_rings
+= prog
? nn
->dp
.num_rx_rings
: -nn
->dp
.num_rx_rings
;
2939 dp
->rx_dma_dir
= prog
? DMA_BIDIRECTIONAL
: DMA_FROM_DEVICE
;
2941 dp
->rx_dma_off
= XDP_PACKET_HEADROOM
-
2942 (nn
->dp
.rx_offset
?: NFP_NET_MAX_PREPEND
);
2946 /* We need RX reconfig to remap the buffers (BIDIR vs FROM_DEV) */
2947 err
= nfp_net_ring_reconfig(nn
, dp
);
2952 bpf_prog_put(old_prog
);
2954 nfp_net_xdp_offload(nn
, nn
->dp
.xdp_prog
);
2959 static int nfp_net_xdp(struct net_device
*netdev
, struct netdev_xdp
*xdp
)
2961 struct nfp_net
*nn
= netdev_priv(netdev
);
2963 switch (xdp
->command
) {
2964 case XDP_SETUP_PROG
:
2965 return nfp_net_xdp_setup(nn
, xdp
->prog
);
2966 case XDP_QUERY_PROG
:
2967 xdp
->prog_attached
= !!nn
->dp
.xdp_prog
;
2974 static const struct net_device_ops nfp_net_netdev_ops
= {
2975 .ndo_open
= nfp_net_netdev_open
,
2976 .ndo_stop
= nfp_net_netdev_close
,
2977 .ndo_start_xmit
= nfp_net_tx
,
2978 .ndo_get_stats64
= nfp_net_stat64
,
2979 .ndo_setup_tc
= nfp_net_setup_tc
,
2980 .ndo_tx_timeout
= nfp_net_tx_timeout
,
2981 .ndo_set_rx_mode
= nfp_net_set_rx_mode
,
2982 .ndo_change_mtu
= nfp_net_change_mtu
,
2983 .ndo_set_mac_address
= eth_mac_addr
,
2984 .ndo_set_features
= nfp_net_set_features
,
2985 .ndo_features_check
= nfp_net_features_check
,
2986 .ndo_get_phys_port_name
= nfp_net_get_phys_port_name
,
2987 .ndo_udp_tunnel_add
= nfp_net_add_vxlan_port
,
2988 .ndo_udp_tunnel_del
= nfp_net_del_vxlan_port
,
2989 .ndo_xdp
= nfp_net_xdp
,
2993 * nfp_net_info() - Print general info about the NIC
2994 * @nn: NFP Net device to reconfigure
2996 void nfp_net_info(struct nfp_net
*nn
)
2998 nn_info(nn
, "Netronome NFP-6xxx %sNetdev: TxQs=%d/%d RxQs=%d/%d\n",
2999 nn
->dp
.is_vf
? "VF " : "",
3000 nn
->dp
.num_tx_rings
, nn
->max_tx_rings
,
3001 nn
->dp
.num_rx_rings
, nn
->max_rx_rings
);
3002 nn_info(nn
, "VER: %d.%d.%d.%d, Maximum supported MTU: %d\n",
3003 nn
->fw_ver
.resv
, nn
->fw_ver
.class,
3004 nn
->fw_ver
.major
, nn
->fw_ver
.minor
,
3006 nn_info(nn
, "CAP: %#x %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
3008 nn
->cap
& NFP_NET_CFG_CTRL_PROMISC
? "PROMISC " : "",
3009 nn
->cap
& NFP_NET_CFG_CTRL_L2BC
? "L2BCFILT " : "",
3010 nn
->cap
& NFP_NET_CFG_CTRL_L2MC
? "L2MCFILT " : "",
3011 nn
->cap
& NFP_NET_CFG_CTRL_RXCSUM
? "RXCSUM " : "",
3012 nn
->cap
& NFP_NET_CFG_CTRL_TXCSUM
? "TXCSUM " : "",
3013 nn
->cap
& NFP_NET_CFG_CTRL_RXVLAN
? "RXVLAN " : "",
3014 nn
->cap
& NFP_NET_CFG_CTRL_TXVLAN
? "TXVLAN " : "",
3015 nn
->cap
& NFP_NET_CFG_CTRL_SCATTER
? "SCATTER " : "",
3016 nn
->cap
& NFP_NET_CFG_CTRL_GATHER
? "GATHER " : "",
3017 nn
->cap
& NFP_NET_CFG_CTRL_LSO
? "TSO " : "",
3018 nn
->cap
& NFP_NET_CFG_CTRL_RSS
? "RSS " : "",
3019 nn
->cap
& NFP_NET_CFG_CTRL_L2SWITCH
? "L2SWITCH " : "",
3020 nn
->cap
& NFP_NET_CFG_CTRL_MSIXAUTO
? "AUTOMASK " : "",
3021 nn
->cap
& NFP_NET_CFG_CTRL_IRQMOD
? "IRQMOD " : "",
3022 nn
->cap
& NFP_NET_CFG_CTRL_VXLAN
? "VXLAN " : "",
3023 nn
->cap
& NFP_NET_CFG_CTRL_NVGRE
? "NVGRE " : "",
3024 nfp_net_ebpf_capable(nn
) ? "BPF " : "");
3028 * nfp_net_netdev_alloc() - Allocate netdev and related structure
3030 * @max_tx_rings: Maximum number of TX rings supported by device
3031 * @max_rx_rings: Maximum number of RX rings supported by device
3033 * This function allocates a netdev device and fills in the initial
3034 * part of the @struct nfp_net structure.
3036 * Return: NFP Net device structure, or ERR_PTR on error.
3038 struct nfp_net
*nfp_net_netdev_alloc(struct pci_dev
*pdev
,
3039 unsigned int max_tx_rings
,
3040 unsigned int max_rx_rings
)
3042 struct net_device
*netdev
;
3045 netdev
= alloc_etherdev_mqs(sizeof(struct nfp_net
),
3046 max_tx_rings
, max_rx_rings
);
3048 return ERR_PTR(-ENOMEM
);
3050 SET_NETDEV_DEV(netdev
, &pdev
->dev
);
3051 nn
= netdev_priv(netdev
);
3053 nn
->dp
.netdev
= netdev
;
3054 nn
->dp
.dev
= &pdev
->dev
;
3057 nn
->max_tx_rings
= max_tx_rings
;
3058 nn
->max_rx_rings
= max_rx_rings
;
3060 nn
->dp
.num_tx_rings
= min_t(unsigned int,
3061 max_tx_rings
, num_online_cpus());
3062 nn
->dp
.num_rx_rings
= min_t(unsigned int, max_rx_rings
,
3063 netif_get_num_default_rss_queues());
3065 nn
->dp
.num_r_vecs
= max(nn
->dp
.num_tx_rings
, nn
->dp
.num_rx_rings
);
3066 nn
->dp
.num_r_vecs
= min_t(unsigned int,
3067 nn
->dp
.num_r_vecs
, num_online_cpus());
3069 nn
->dp
.txd_cnt
= NFP_NET_TX_DESCS_DEFAULT
;
3070 nn
->dp
.rxd_cnt
= NFP_NET_RX_DESCS_DEFAULT
;
3072 spin_lock_init(&nn
->reconfig_lock
);
3073 spin_lock_init(&nn
->rx_filter_lock
);
3074 spin_lock_init(&nn
->link_status_lock
);
3076 setup_timer(&nn
->reconfig_timer
,
3077 nfp_net_reconfig_timer
, (unsigned long)nn
);
3078 setup_timer(&nn
->rx_filter_stats_timer
,
3079 nfp_net_filter_stats_timer
, (unsigned long)nn
);
3085 * nfp_net_netdev_free() - Undo what @nfp_net_netdev_alloc() did
3086 * @nn: NFP Net device to reconfigure
3088 void nfp_net_netdev_free(struct nfp_net
*nn
)
3090 free_netdev(nn
->dp
.netdev
);
3094 * nfp_net_rss_key_sz() - Get current size of the RSS key
3095 * @nn: NFP Net device instance
3097 * Return: size of the RSS key for currently selected hash function.
3099 unsigned int nfp_net_rss_key_sz(struct nfp_net
*nn
)
3101 switch (nn
->rss_hfunc
) {
3102 case ETH_RSS_HASH_TOP
:
3103 return NFP_NET_CFG_RSS_KEY_SZ
;
3104 case ETH_RSS_HASH_XOR
:
3106 case ETH_RSS_HASH_CRC32
:
3110 nn_warn(nn
, "Unknown hash function: %u\n", nn
->rss_hfunc
);
3115 * nfp_net_rss_init() - Set the initial RSS parameters
3116 * @nn: NFP Net device to reconfigure
3118 static void nfp_net_rss_init(struct nfp_net
*nn
)
3120 unsigned long func_bit
, rss_cap_hfunc
;
3123 /* Read the RSS function capability and select first supported func */
3124 reg
= nn_readl(nn
, NFP_NET_CFG_RSS_CAP
);
3125 rss_cap_hfunc
= FIELD_GET(NFP_NET_CFG_RSS_CAP_HFUNC
, reg
);
3127 rss_cap_hfunc
= FIELD_GET(NFP_NET_CFG_RSS_CAP_HFUNC
,
3128 NFP_NET_CFG_RSS_TOEPLITZ
);
3130 func_bit
= find_first_bit(&rss_cap_hfunc
, NFP_NET_CFG_RSS_HFUNCS
);
3131 if (func_bit
== NFP_NET_CFG_RSS_HFUNCS
) {
3132 dev_warn(nn
->dp
.dev
,
3133 "Bad RSS config, defaulting to Toeplitz hash\n");
3134 func_bit
= ETH_RSS_HASH_TOP_BIT
;
3136 nn
->rss_hfunc
= 1 << func_bit
;
3138 netdev_rss_key_fill(nn
->rss_key
, nfp_net_rss_key_sz(nn
));
3140 nfp_net_rss_init_itbl(nn
);
3142 /* Enable IPv4/IPv6 TCP by default */
3143 nn
->rss_cfg
= NFP_NET_CFG_RSS_IPV4_TCP
|
3144 NFP_NET_CFG_RSS_IPV6_TCP
|
3145 FIELD_PREP(NFP_NET_CFG_RSS_HFUNC
, nn
->rss_hfunc
) |
3146 NFP_NET_CFG_RSS_MASK
;
3150 * nfp_net_irqmod_init() - Set the initial IRQ moderation parameters
3151 * @nn: NFP Net device to reconfigure
3153 static void nfp_net_irqmod_init(struct nfp_net
*nn
)
3155 nn
->rx_coalesce_usecs
= 50;
3156 nn
->rx_coalesce_max_frames
= 64;
3157 nn
->tx_coalesce_usecs
= 50;
3158 nn
->tx_coalesce_max_frames
= 64;
3162 * nfp_net_netdev_init() - Initialise/finalise the netdev structure
3163 * @netdev: netdev structure
3165 * Return: 0 on success or negative errno on error.
3167 int nfp_net_netdev_init(struct net_device
*netdev
)
3169 struct nfp_net
*nn
= netdev_priv(netdev
);
3172 /* XDP calls for 256 byte packet headroom which wouldn't fit in a u8.
3173 * We, however, reuse the metadata prepend space for XDP buffers which
3174 * is at least 1 byte long and as long as XDP headroom doesn't increase
3175 * above 256 the *extra* XDP headroom will fit on 8 bits.
3177 BUILD_BUG_ON(XDP_PACKET_HEADROOM
> 256);
3179 nn
->dp
.chained_metadata_format
= nn
->fw_ver
.major
> 3;
3181 nn
->dp
.rx_dma_dir
= DMA_FROM_DEVICE
;
3183 /* Get some of the read-only fields from the BAR */
3184 nn
->cap
= nn_readl(nn
, NFP_NET_CFG_CAP
);
3185 nn
->max_mtu
= nn_readl(nn
, NFP_NET_CFG_MAX_MTU
);
3187 nfp_net_write_mac_addr(nn
);
3189 /* Determine RX packet/metadata boundary offset */
3190 if (nn
->fw_ver
.major
>= 2) {
3193 reg
= nn_readl(nn
, NFP_NET_CFG_RX_OFFSET
);
3194 if (reg
> NFP_NET_MAX_PREPEND
) {
3195 nn_err(nn
, "Invalid rx offset: %d\n", reg
);
3198 nn
->dp
.rx_offset
= reg
;
3200 nn
->dp
.rx_offset
= NFP_NET_RX_OFFSET
;
3203 /* Set default MTU and Freelist buffer size */
3204 if (nn
->max_mtu
< NFP_NET_DEFAULT_MTU
)
3205 netdev
->mtu
= nn
->max_mtu
;
3207 netdev
->mtu
= NFP_NET_DEFAULT_MTU
;
3208 nn
->dp
.mtu
= netdev
->mtu
;
3209 nn
->dp
.fl_bufsz
= nfp_net_calc_fl_bufsz(&nn
->dp
);
3211 /* Advertise/enable offloads based on capabilities
3213 * Note: netdev->features show the currently enabled features
3214 * and netdev->hw_features advertises which features are
3215 * supported. By default we enable most features.
3217 netdev
->hw_features
= NETIF_F_HIGHDMA
;
3218 if (nn
->cap
& NFP_NET_CFG_CTRL_RXCSUM
) {
3219 netdev
->hw_features
|= NETIF_F_RXCSUM
;
3220 nn
->dp
.ctrl
|= NFP_NET_CFG_CTRL_RXCSUM
;
3222 if (nn
->cap
& NFP_NET_CFG_CTRL_TXCSUM
) {
3223 netdev
->hw_features
|= NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
;
3224 nn
->dp
.ctrl
|= NFP_NET_CFG_CTRL_TXCSUM
;
3226 if (nn
->cap
& NFP_NET_CFG_CTRL_GATHER
) {
3227 netdev
->hw_features
|= NETIF_F_SG
;
3228 nn
->dp
.ctrl
|= NFP_NET_CFG_CTRL_GATHER
;
3230 if ((nn
->cap
& NFP_NET_CFG_CTRL_LSO
) && nn
->fw_ver
.major
> 2) {
3231 netdev
->hw_features
|= NETIF_F_TSO
| NETIF_F_TSO6
;
3232 nn
->dp
.ctrl
|= NFP_NET_CFG_CTRL_LSO
;
3234 if (nn
->cap
& NFP_NET_CFG_CTRL_RSS
) {
3235 netdev
->hw_features
|= NETIF_F_RXHASH
;
3236 nfp_net_rss_init(nn
);
3237 nn
->dp
.ctrl
|= NFP_NET_CFG_CTRL_RSS
;
3239 if (nn
->cap
& NFP_NET_CFG_CTRL_VXLAN
&&
3240 nn
->cap
& NFP_NET_CFG_CTRL_NVGRE
) {
3241 if (nn
->cap
& NFP_NET_CFG_CTRL_LSO
)
3242 netdev
->hw_features
|= NETIF_F_GSO_GRE
|
3243 NETIF_F_GSO_UDP_TUNNEL
;
3244 nn
->dp
.ctrl
|= NFP_NET_CFG_CTRL_VXLAN
| NFP_NET_CFG_CTRL_NVGRE
;
3246 netdev
->hw_enc_features
= netdev
->hw_features
;
3249 netdev
->vlan_features
= netdev
->hw_features
;
3251 if (nn
->cap
& NFP_NET_CFG_CTRL_RXVLAN
) {
3252 netdev
->hw_features
|= NETIF_F_HW_VLAN_CTAG_RX
;
3253 nn
->dp
.ctrl
|= NFP_NET_CFG_CTRL_RXVLAN
;
3255 if (nn
->cap
& NFP_NET_CFG_CTRL_TXVLAN
) {
3256 netdev
->hw_features
|= NETIF_F_HW_VLAN_CTAG_TX
;
3257 nn
->dp
.ctrl
|= NFP_NET_CFG_CTRL_TXVLAN
;
3260 netdev
->features
= netdev
->hw_features
;
3262 if (nfp_net_ebpf_capable(nn
))
3263 netdev
->hw_features
|= NETIF_F_HW_TC
;
3265 /* Advertise but disable TSO by default. */
3266 netdev
->features
&= ~(NETIF_F_TSO
| NETIF_F_TSO6
);
3268 /* Allow L2 Broadcast and Multicast through by default, if supported */
3269 if (nn
->cap
& NFP_NET_CFG_CTRL_L2BC
)
3270 nn
->dp
.ctrl
|= NFP_NET_CFG_CTRL_L2BC
;
3271 if (nn
->cap
& NFP_NET_CFG_CTRL_L2MC
)
3272 nn
->dp
.ctrl
|= NFP_NET_CFG_CTRL_L2MC
;
3274 /* Allow IRQ moderation, if supported */
3275 if (nn
->cap
& NFP_NET_CFG_CTRL_IRQMOD
) {
3276 nfp_net_irqmod_init(nn
);
3277 nn
->dp
.ctrl
|= NFP_NET_CFG_CTRL_IRQMOD
;
3280 /* Stash the re-configuration queue away. First odd queue in TX Bar */
3281 nn
->qcp_cfg
= nn
->tx_bar
+ NFP_QCP_QUEUE_ADDR_SZ
;
3283 /* Make sure the FW knows the netdev is supposed to be disabled here */
3284 nn_writel(nn
, NFP_NET_CFG_CTRL
, 0);
3285 nn_writeq(nn
, NFP_NET_CFG_TXRS_ENABLE
, 0);
3286 nn_writeq(nn
, NFP_NET_CFG_RXRS_ENABLE
, 0);
3287 err
= nfp_net_reconfig(nn
, NFP_NET_CFG_UPDATE_RING
|
3288 NFP_NET_CFG_UPDATE_GEN
);
3292 /* Finalise the netdev setup */
3293 netdev
->netdev_ops
= &nfp_net_netdev_ops
;
3294 netdev
->watchdog_timeo
= msecs_to_jiffies(5 * 1000);
3296 /* MTU range: 68 - hw-specific max */
3297 netdev
->min_mtu
= ETH_MIN_MTU
;
3298 netdev
->max_mtu
= nn
->max_mtu
;
3300 netif_carrier_off(netdev
);
3302 nfp_net_set_ethtool_ops(netdev
);
3303 nfp_net_vecs_init(netdev
);
3305 return register_netdev(netdev
);
3309 * nfp_net_netdev_clean() - Undo what nfp_net_netdev_init() did.
3310 * @netdev: netdev structure
3312 void nfp_net_netdev_clean(struct net_device
*netdev
)
3314 struct nfp_net
*nn
= netdev_priv(netdev
);
3316 unregister_netdev(nn
->dp
.netdev
);
3318 if (nn
->dp
.xdp_prog
)
3319 bpf_prog_put(nn
->dp
.xdp_prog
);
3320 if (nn
->dp
.bpf_offload_xdp
)
3321 nfp_net_xdp_offload(nn
, NULL
);