2 * Copyright (C) 2015 Netronome Systems, Inc.
4 * This software is dual licensed under the GNU General License Version 2,
5 * June 1991 as shown in the file COPYING in the top-level directory of this
6 * source tree or the BSD 2-Clause License provided below. You have the
7 * option to license this software under the complete terms of either license.
9 * The BSD 2-Clause License:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * 1. Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * 2. Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 * Netronome network device driver: Common functions between PF and VF
37 * Authors: Jakub Kicinski <jakub.kicinski@netronome.com>
38 * Jason McMullan <jason.mcmullan@netronome.com>
39 * Rolf Neugebauer <rolf.neugebauer@netronome.com>
40 * Brad Petrus <brad.petrus@netronome.com>
41 * Chris Telfer <chris.telfer@netronome.com>
44 #include <linux/module.h>
45 #include <linux/kernel.h>
46 #include <linux/init.h>
48 #include <linux/netdevice.h>
49 #include <linux/etherdevice.h>
50 #include <linux/interrupt.h>
52 #include <linux/ipv6.h>
53 #include <linux/pci.h>
54 #include <linux/pci_regs.h>
55 #include <linux/msi.h>
56 #include <linux/ethtool.h>
57 #include <linux/log2.h>
58 #include <linux/if_vlan.h>
59 #include <linux/random.h>
61 #include <linux/ktime.h>
63 #include <net/pkt_cls.h>
64 #include <net/vxlan.h>
66 #include "nfp_net_ctrl.h"
70 * nfp_net_get_fw_version() - Read and parse the FW version
71 * @fw_ver: Output fw_version structure to read to
72 * @ctrl_bar: Mapped address of the control BAR
74 void nfp_net_get_fw_version(struct nfp_net_fw_version
*fw_ver
,
75 void __iomem
*ctrl_bar
)
79 reg
= readl(ctrl_bar
+ NFP_NET_CFG_VERSION
);
80 put_unaligned_le32(reg
, fw_ver
);
85 * Firmware reconfig may take a while so we have two versions of it -
86 * synchronous and asynchronous (posted). All synchronous callers are holding
87 * RTNL so we don't have to worry about serializing them.
89 static void nfp_net_reconfig_start(struct nfp_net
*nn
, u32 update
)
91 nn_writel(nn
, NFP_NET_CFG_UPDATE
, update
);
92 /* ensure update is written before pinging HW */
94 nfp_qcp_wr_ptr_add(nn
->qcp_cfg
, 1);
97 /* Pass 0 as update to run posted reconfigs. */
98 static void nfp_net_reconfig_start_async(struct nfp_net
*nn
, u32 update
)
100 update
|= nn
->reconfig_posted
;
101 nn
->reconfig_posted
= 0;
103 nfp_net_reconfig_start(nn
, update
);
105 nn
->reconfig_timer_active
= true;
106 mod_timer(&nn
->reconfig_timer
, jiffies
+ NFP_NET_POLL_TIMEOUT
* HZ
);
109 static bool nfp_net_reconfig_check_done(struct nfp_net
*nn
, bool last_check
)
113 reg
= nn_readl(nn
, NFP_NET_CFG_UPDATE
);
116 if (reg
& NFP_NET_CFG_UPDATE_ERR
) {
117 nn_err(nn
, "Reconfig error: 0x%08x\n", reg
);
119 } else if (last_check
) {
120 nn_err(nn
, "Reconfig timeout: 0x%08x\n", reg
);
127 static int nfp_net_reconfig_wait(struct nfp_net
*nn
, unsigned long deadline
)
129 bool timed_out
= false;
131 /* Poll update field, waiting for NFP to ack the config */
132 while (!nfp_net_reconfig_check_done(nn
, timed_out
)) {
134 timed_out
= time_is_before_eq_jiffies(deadline
);
137 if (nn_readl(nn
, NFP_NET_CFG_UPDATE
) & NFP_NET_CFG_UPDATE_ERR
)
140 return timed_out
? -EIO
: 0;
143 static void nfp_net_reconfig_timer(unsigned long data
)
145 struct nfp_net
*nn
= (void *)data
;
147 spin_lock_bh(&nn
->reconfig_lock
);
149 nn
->reconfig_timer_active
= false;
151 /* If sync caller is present it will take over from us */
152 if (nn
->reconfig_sync_present
)
155 /* Read reconfig status and report errors */
156 nfp_net_reconfig_check_done(nn
, true);
158 if (nn
->reconfig_posted
)
159 nfp_net_reconfig_start_async(nn
, 0);
161 spin_unlock_bh(&nn
->reconfig_lock
);
165 * nfp_net_reconfig_post() - Post async reconfig request
166 * @nn: NFP Net device to reconfigure
167 * @update: The value for the update field in the BAR config
169 * Record FW reconfiguration request. Reconfiguration will be kicked off
170 * whenever reconfiguration machinery is idle. Multiple requests can be
173 static void nfp_net_reconfig_post(struct nfp_net
*nn
, u32 update
)
175 spin_lock_bh(&nn
->reconfig_lock
);
177 /* Sync caller will kick off async reconf when it's done, just post */
178 if (nn
->reconfig_sync_present
) {
179 nn
->reconfig_posted
|= update
;
183 /* Opportunistically check if the previous command is done */
184 if (!nn
->reconfig_timer_active
||
185 nfp_net_reconfig_check_done(nn
, false))
186 nfp_net_reconfig_start_async(nn
, update
);
188 nn
->reconfig_posted
|= update
;
190 spin_unlock_bh(&nn
->reconfig_lock
);
194 * nfp_net_reconfig() - Reconfigure the firmware
195 * @nn: NFP Net device to reconfigure
196 * @update: The value for the update field in the BAR config
198 * Write the update word to the BAR and ping the reconfig queue. The
199 * poll until the firmware has acknowledged the update by zeroing the
202 * Return: Negative errno on error, 0 on success
204 int nfp_net_reconfig(struct nfp_net
*nn
, u32 update
)
206 bool cancelled_timer
= false;
207 u32 pre_posted_requests
;
210 spin_lock_bh(&nn
->reconfig_lock
);
212 nn
->reconfig_sync_present
= true;
214 if (nn
->reconfig_timer_active
) {
215 del_timer(&nn
->reconfig_timer
);
216 nn
->reconfig_timer_active
= false;
217 cancelled_timer
= true;
219 pre_posted_requests
= nn
->reconfig_posted
;
220 nn
->reconfig_posted
= 0;
222 spin_unlock_bh(&nn
->reconfig_lock
);
225 nfp_net_reconfig_wait(nn
, nn
->reconfig_timer
.expires
);
227 /* Run the posted reconfigs which were issued before we started */
228 if (pre_posted_requests
) {
229 nfp_net_reconfig_start(nn
, pre_posted_requests
);
230 nfp_net_reconfig_wait(nn
, jiffies
+ HZ
* NFP_NET_POLL_TIMEOUT
);
233 nfp_net_reconfig_start(nn
, update
);
234 ret
= nfp_net_reconfig_wait(nn
, jiffies
+ HZ
* NFP_NET_POLL_TIMEOUT
);
236 spin_lock_bh(&nn
->reconfig_lock
);
238 if (nn
->reconfig_posted
)
239 nfp_net_reconfig_start_async(nn
, 0);
241 nn
->reconfig_sync_present
= false;
243 spin_unlock_bh(&nn
->reconfig_lock
);
248 /* Interrupt configuration and handling
252 * nfp_net_irq_unmask_msix() - Unmask MSI-X after automasking
253 * @nn: NFP Network structure
254 * @entry_nr: MSI-X table entry
256 * Clear the MSI-X table mask bit for the given entry bypassing Linux irq
257 * handling subsystem. Use *only* to reenable automasked vectors.
259 static void nfp_net_irq_unmask_msix(struct nfp_net
*nn
, unsigned int entry_nr
)
261 struct list_head
*msi_head
= &nn
->pdev
->dev
.msi_list
;
262 struct msi_desc
*entry
;
265 /* All MSI-Xs have the same mask_base */
266 entry
= list_first_entry(msi_head
, struct msi_desc
, list
);
268 off
= (PCI_MSIX_ENTRY_SIZE
* entry_nr
) +
269 PCI_MSIX_ENTRY_VECTOR_CTRL
;
270 writel(0, entry
->mask_base
+ off
);
271 readl(entry
->mask_base
);
275 * nfp_net_irq_unmask() - Unmask automasked interrupt
276 * @nn: NFP Network structure
277 * @entry_nr: MSI-X table entry
279 * If MSI-X auto-masking is enabled clear the mask bit, otherwise
280 * clear the ICR for the entry.
282 static void nfp_net_irq_unmask(struct nfp_net
*nn
, unsigned int entry_nr
)
284 if (nn
->ctrl
& NFP_NET_CFG_CTRL_MSIXAUTO
) {
285 nfp_net_irq_unmask_msix(nn
, entry_nr
);
289 nn_writeb(nn
, NFP_NET_CFG_ICR(entry_nr
), NFP_NET_CFG_ICR_UNMASKED
);
294 * nfp_net_msix_alloc() - Try to allocate MSI-X irqs
295 * @nn: NFP Network structure
296 * @nr_vecs: Number of MSI-X vectors to allocate
298 * For MSI-X we want at least NFP_NET_NON_Q_VECTORS + 1 vectors.
300 * Return: Number of MSI-X vectors obtained or 0 on error.
302 static int nfp_net_msix_alloc(struct nfp_net
*nn
, int nr_vecs
)
304 struct pci_dev
*pdev
= nn
->pdev
;
308 for (i
= 0; i
< nr_vecs
; i
++)
309 nn
->irq_entries
[i
].entry
= i
;
311 nvecs
= pci_enable_msix_range(pdev
, nn
->irq_entries
,
312 NFP_NET_NON_Q_VECTORS
+ 1, nr_vecs
);
314 nn_warn(nn
, "Failed to enable MSI-X. Wanted %d-%d (err=%d)\n",
315 NFP_NET_NON_Q_VECTORS
+ 1, nr_vecs
, nvecs
);
323 * nfp_net_irqs_wanted() - Work out how many interrupt vectors we want
324 * @nn: NFP Network structure
326 * We want a vector per CPU (or ring), whatever is smaller plus
327 * NFP_NET_NON_Q_VECTORS for LSC etc.
329 * Return: Number of interrupts wanted
331 static int nfp_net_irqs_wanted(struct nfp_net
*nn
)
336 ncpus
= num_online_cpus();
338 vecs
= max_t(int, nn
->num_tx_rings
, nn
->num_rx_rings
);
339 vecs
= min_t(int, vecs
, ncpus
);
341 return vecs
+ NFP_NET_NON_Q_VECTORS
;
345 * nfp_net_irqs_alloc() - allocates MSI-X irqs
346 * @nn: NFP Network structure
348 * Return: Number of irqs obtained or 0 on error.
350 int nfp_net_irqs_alloc(struct nfp_net
*nn
)
354 wanted_irqs
= nfp_net_irqs_wanted(nn
);
356 nn
->num_irqs
= nfp_net_msix_alloc(nn
, wanted_irqs
);
357 if (nn
->num_irqs
== 0) {
358 nn_err(nn
, "Failed to allocate MSI-X IRQs\n");
362 nn
->num_r_vecs
= nn
->num_irqs
- NFP_NET_NON_Q_VECTORS
;
364 if (nn
->num_irqs
< wanted_irqs
)
365 nn_warn(nn
, "Unable to allocate %d vectors. Got %d instead\n",
366 wanted_irqs
, nn
->num_irqs
);
372 * nfp_net_irqs_disable() - Disable interrupts
373 * @nn: NFP Network structure
375 * Undoes what @nfp_net_irqs_alloc() does.
377 void nfp_net_irqs_disable(struct nfp_net
*nn
)
379 pci_disable_msix(nn
->pdev
);
383 * nfp_net_irq_rxtx() - Interrupt service routine for RX/TX rings.
385 * @data: Opaque data structure
387 * Return: Indicate if the interrupt has been handled.
389 static irqreturn_t
nfp_net_irq_rxtx(int irq
, void *data
)
391 struct nfp_net_r_vector
*r_vec
= data
;
393 napi_schedule_irqoff(&r_vec
->napi
);
395 /* The FW auto-masks any interrupt, either via the MASK bit in
396 * the MSI-X table or via the per entry ICR field. So there
397 * is no need to disable interrupts here.
403 * nfp_net_read_link_status() - Reread link status from control BAR
404 * @nn: NFP Network structure
406 static void nfp_net_read_link_status(struct nfp_net
*nn
)
412 spin_lock_irqsave(&nn
->link_status_lock
, flags
);
414 sts
= nn_readl(nn
, NFP_NET_CFG_STS
);
415 link_up
= !!(sts
& NFP_NET_CFG_STS_LINK
);
417 if (nn
->link_up
== link_up
)
420 nn
->link_up
= link_up
;
423 netif_carrier_on(nn
->netdev
);
424 netdev_info(nn
->netdev
, "NIC Link is Up\n");
426 netif_carrier_off(nn
->netdev
);
427 netdev_info(nn
->netdev
, "NIC Link is Down\n");
430 spin_unlock_irqrestore(&nn
->link_status_lock
, flags
);
434 * nfp_net_irq_lsc() - Interrupt service routine for link state changes
436 * @data: Opaque data structure
438 * Return: Indicate if the interrupt has been handled.
440 static irqreturn_t
nfp_net_irq_lsc(int irq
, void *data
)
442 struct nfp_net
*nn
= data
;
444 nfp_net_read_link_status(nn
);
446 nfp_net_irq_unmask(nn
, NFP_NET_IRQ_LSC_IDX
);
452 * nfp_net_irq_exn() - Interrupt service routine for exceptions
454 * @data: Opaque data structure
456 * Return: Indicate if the interrupt has been handled.
458 static irqreturn_t
nfp_net_irq_exn(int irq
, void *data
)
460 struct nfp_net
*nn
= data
;
462 nn_err(nn
, "%s: UNIMPLEMENTED.\n", __func__
);
463 /* XXX TO BE IMPLEMENTED */
468 * nfp_net_tx_ring_init() - Fill in the boilerplate for a TX ring
469 * @tx_ring: TX ring structure
470 * @r_vec: IRQ vector servicing this ring
474 nfp_net_tx_ring_init(struct nfp_net_tx_ring
*tx_ring
,
475 struct nfp_net_r_vector
*r_vec
, unsigned int idx
)
477 struct nfp_net
*nn
= r_vec
->nfp_net
;
480 tx_ring
->r_vec
= r_vec
;
482 tx_ring
->qcidx
= tx_ring
->idx
* nn
->stride_tx
;
483 tx_ring
->qcp_q
= nn
->tx_bar
+ NFP_QCP_QUEUE_OFF(tx_ring
->qcidx
);
487 * nfp_net_rx_ring_init() - Fill in the boilerplate for a RX ring
488 * @rx_ring: RX ring structure
489 * @r_vec: IRQ vector servicing this ring
493 nfp_net_rx_ring_init(struct nfp_net_rx_ring
*rx_ring
,
494 struct nfp_net_r_vector
*r_vec
, unsigned int idx
)
496 struct nfp_net
*nn
= r_vec
->nfp_net
;
499 rx_ring
->r_vec
= r_vec
;
501 rx_ring
->fl_qcidx
= rx_ring
->idx
* nn
->stride_rx
;
502 rx_ring
->rx_qcidx
= rx_ring
->fl_qcidx
+ (nn
->stride_rx
- 1);
504 rx_ring
->qcp_fl
= nn
->rx_bar
+ NFP_QCP_QUEUE_OFF(rx_ring
->fl_qcidx
);
505 rx_ring
->qcp_rx
= nn
->rx_bar
+ NFP_QCP_QUEUE_OFF(rx_ring
->rx_qcidx
);
509 * nfp_net_irqs_assign() - Assign IRQs and setup rvecs.
510 * @netdev: netdev structure
512 static void nfp_net_irqs_assign(struct net_device
*netdev
)
514 struct nfp_net
*nn
= netdev_priv(netdev
);
515 struct nfp_net_r_vector
*r_vec
;
518 /* Assumes nn->num_tx_rings == nn->num_rx_rings */
519 if (nn
->num_tx_rings
> nn
->num_r_vecs
) {
520 nn_warn(nn
, "More rings (%d) than vectors (%d).\n",
521 nn
->num_tx_rings
, nn
->num_r_vecs
);
522 nn
->num_tx_rings
= nn
->num_r_vecs
;
523 nn
->num_rx_rings
= nn
->num_r_vecs
;
526 nn
->lsc_handler
= nfp_net_irq_lsc
;
527 nn
->exn_handler
= nfp_net_irq_exn
;
529 for (r
= 0; r
< nn
->num_r_vecs
; r
++) {
530 r_vec
= &nn
->r_vecs
[r
];
532 r_vec
->handler
= nfp_net_irq_rxtx
;
533 r_vec
->irq_idx
= NFP_NET_NON_Q_VECTORS
+ r
;
535 cpumask_set_cpu(r
, &r_vec
->affinity_mask
);
540 * nfp_net_aux_irq_request() - Request an auxiliary interrupt (LSC or EXN)
541 * @nn: NFP Network structure
542 * @ctrl_offset: Control BAR offset where IRQ configuration should be written
543 * @format: printf-style format to construct the interrupt name
544 * @name: Pointer to allocated space for interrupt name
545 * @name_sz: Size of space for interrupt name
546 * @vector_idx: Index of MSI-X vector used for this interrupt
547 * @handler: IRQ handler to register for this interrupt
550 nfp_net_aux_irq_request(struct nfp_net
*nn
, u32 ctrl_offset
,
551 const char *format
, char *name
, size_t name_sz
,
552 unsigned int vector_idx
, irq_handler_t handler
)
554 struct msix_entry
*entry
;
557 entry
= &nn
->irq_entries
[vector_idx
];
559 snprintf(name
, name_sz
, format
, netdev_name(nn
->netdev
));
560 err
= request_irq(entry
->vector
, handler
, 0, name
, nn
);
562 nn_err(nn
, "Failed to request IRQ %d (err=%d).\n",
566 nn_writeb(nn
, ctrl_offset
, vector_idx
);
572 * nfp_net_aux_irq_free() - Free an auxiliary interrupt (LSC or EXN)
573 * @nn: NFP Network structure
574 * @ctrl_offset: Control BAR offset where IRQ configuration should be written
575 * @vector_idx: Index of MSI-X vector used for this interrupt
577 static void nfp_net_aux_irq_free(struct nfp_net
*nn
, u32 ctrl_offset
,
578 unsigned int vector_idx
)
580 nn_writeb(nn
, ctrl_offset
, 0xff);
581 free_irq(nn
->irq_entries
[vector_idx
].vector
, nn
);
586 * One queue controller peripheral queue is used for transmit. The
587 * driver en-queues packets for transmit by advancing the write
588 * pointer. The device indicates that packets have transmitted by
589 * advancing the read pointer. The driver maintains a local copy of
590 * the read and write pointer in @struct nfp_net_tx_ring. The driver
591 * keeps @wr_p in sync with the queue controller write pointer and can
592 * determine how many packets have been transmitted by comparing its
593 * copy of the read pointer @rd_p with the read pointer maintained by
594 * the queue controller peripheral.
598 * nfp_net_tx_full() - Check if the TX ring is full
599 * @tx_ring: TX ring to check
600 * @dcnt: Number of descriptors that need to be enqueued (must be >= 1)
602 * This function checks, based on the *host copy* of read/write
603 * pointer if a given TX ring is full. The real TX queue may have
604 * some newly made available slots.
606 * Return: True if the ring is full.
608 static inline int nfp_net_tx_full(struct nfp_net_tx_ring
*tx_ring
, int dcnt
)
610 return (tx_ring
->wr_p
- tx_ring
->rd_p
) >= (tx_ring
->cnt
- dcnt
);
613 /* Wrappers for deciding when to stop and restart TX queues */
614 static int nfp_net_tx_ring_should_wake(struct nfp_net_tx_ring
*tx_ring
)
616 return !nfp_net_tx_full(tx_ring
, MAX_SKB_FRAGS
* 4);
619 static int nfp_net_tx_ring_should_stop(struct nfp_net_tx_ring
*tx_ring
)
621 return nfp_net_tx_full(tx_ring
, MAX_SKB_FRAGS
+ 1);
625 * nfp_net_tx_ring_stop() - stop tx ring
626 * @nd_q: netdev queue
627 * @tx_ring: driver tx queue structure
629 * Safely stop TX ring. Remember that while we are running .start_xmit()
630 * someone else may be cleaning the TX ring completions so we need to be
631 * extra careful here.
633 static void nfp_net_tx_ring_stop(struct netdev_queue
*nd_q
,
634 struct nfp_net_tx_ring
*tx_ring
)
636 netif_tx_stop_queue(nd_q
);
638 /* We can race with the TX completion out of NAPI so recheck */
640 if (unlikely(nfp_net_tx_ring_should_wake(tx_ring
)))
641 netif_tx_start_queue(nd_q
);
645 * nfp_net_tx_tso() - Set up Tx descriptor for LSO
646 * @nn: NFP Net device
647 * @r_vec: per-ring structure
648 * @txbuf: Pointer to driver soft TX descriptor
649 * @txd: Pointer to HW TX descriptor
650 * @skb: Pointer to SKB
652 * Set up Tx descriptor for LSO, do nothing for non-LSO skbs.
653 * Return error on packet header greater than maximum supported LSO header size.
655 static void nfp_net_tx_tso(struct nfp_net
*nn
, struct nfp_net_r_vector
*r_vec
,
656 struct nfp_net_tx_buf
*txbuf
,
657 struct nfp_net_tx_desc
*txd
, struct sk_buff
*skb
)
662 if (!skb_is_gso(skb
))
665 if (!skb
->encapsulation
)
666 hdrlen
= skb_transport_offset(skb
) + tcp_hdrlen(skb
);
668 hdrlen
= skb_inner_transport_header(skb
) - skb
->data
+
669 inner_tcp_hdrlen(skb
);
671 txbuf
->pkt_cnt
= skb_shinfo(skb
)->gso_segs
;
672 txbuf
->real_len
+= hdrlen
* (txbuf
->pkt_cnt
- 1);
674 mss
= skb_shinfo(skb
)->gso_size
& PCIE_DESC_TX_MSS_MASK
;
675 txd
->l4_offset
= hdrlen
;
676 txd
->mss
= cpu_to_le16(mss
);
677 txd
->flags
|= PCIE_DESC_TX_LSO
;
679 u64_stats_update_begin(&r_vec
->tx_sync
);
681 u64_stats_update_end(&r_vec
->tx_sync
);
685 * nfp_net_tx_csum() - Set TX CSUM offload flags in TX descriptor
686 * @nn: NFP Net device
687 * @r_vec: per-ring structure
688 * @txbuf: Pointer to driver soft TX descriptor
689 * @txd: Pointer to TX descriptor
690 * @skb: Pointer to SKB
692 * This function sets the TX checksum flags in the TX descriptor based
693 * on the configuration and the protocol of the packet to be transmitted.
695 static void nfp_net_tx_csum(struct nfp_net
*nn
, struct nfp_net_r_vector
*r_vec
,
696 struct nfp_net_tx_buf
*txbuf
,
697 struct nfp_net_tx_desc
*txd
, struct sk_buff
*skb
)
699 struct ipv6hdr
*ipv6h
;
703 if (!(nn
->ctrl
& NFP_NET_CFG_CTRL_TXCSUM
))
706 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
)
709 txd
->flags
|= PCIE_DESC_TX_CSUM
;
710 if (skb
->encapsulation
)
711 txd
->flags
|= PCIE_DESC_TX_ENCAP
;
713 iph
= skb
->encapsulation
? inner_ip_hdr(skb
) : ip_hdr(skb
);
714 ipv6h
= skb
->encapsulation
? inner_ipv6_hdr(skb
) : ipv6_hdr(skb
);
716 if (iph
->version
== 4) {
717 txd
->flags
|= PCIE_DESC_TX_IP4_CSUM
;
718 l4_hdr
= iph
->protocol
;
719 } else if (ipv6h
->version
== 6) {
720 l4_hdr
= ipv6h
->nexthdr
;
722 nn_warn_ratelimit(nn
, "partial checksum but ipv=%x!\n",
729 txd
->flags
|= PCIE_DESC_TX_TCP_CSUM
;
732 txd
->flags
|= PCIE_DESC_TX_UDP_CSUM
;
735 nn_warn_ratelimit(nn
, "partial checksum but l4 proto=%x!\n",
740 u64_stats_update_begin(&r_vec
->tx_sync
);
741 if (skb
->encapsulation
)
742 r_vec
->hw_csum_tx_inner
+= txbuf
->pkt_cnt
;
744 r_vec
->hw_csum_tx
+= txbuf
->pkt_cnt
;
745 u64_stats_update_end(&r_vec
->tx_sync
);
749 * nfp_net_tx() - Main transmit entry point
750 * @skb: SKB to transmit
751 * @netdev: netdev structure
753 * Return: NETDEV_TX_OK on success.
755 static int nfp_net_tx(struct sk_buff
*skb
, struct net_device
*netdev
)
757 struct nfp_net
*nn
= netdev_priv(netdev
);
758 const struct skb_frag_struct
*frag
;
759 struct nfp_net_r_vector
*r_vec
;
760 struct nfp_net_tx_desc
*txd
, txdg
;
761 struct nfp_net_tx_buf
*txbuf
;
762 struct nfp_net_tx_ring
*tx_ring
;
763 struct netdev_queue
*nd_q
;
770 qidx
= skb_get_queue_mapping(skb
);
771 tx_ring
= &nn
->tx_rings
[qidx
];
772 r_vec
= tx_ring
->r_vec
;
773 nd_q
= netdev_get_tx_queue(nn
->netdev
, qidx
);
775 nr_frags
= skb_shinfo(skb
)->nr_frags
;
777 if (unlikely(nfp_net_tx_full(tx_ring
, nr_frags
+ 1))) {
778 nn_warn_ratelimit(nn
, "TX ring %d busy. wrp=%u rdp=%u\n",
779 qidx
, tx_ring
->wr_p
, tx_ring
->rd_p
);
780 netif_tx_stop_queue(nd_q
);
781 u64_stats_update_begin(&r_vec
->tx_sync
);
783 u64_stats_update_end(&r_vec
->tx_sync
);
784 return NETDEV_TX_BUSY
;
787 /* Start with the head skbuf */
788 dma_addr
= dma_map_single(&nn
->pdev
->dev
, skb
->data
, skb_headlen(skb
),
790 if (dma_mapping_error(&nn
->pdev
->dev
, dma_addr
))
793 wr_idx
= tx_ring
->wr_p
% tx_ring
->cnt
;
795 /* Stash the soft descriptor of the head then initialize it */
796 txbuf
= &tx_ring
->txbufs
[wr_idx
];
798 txbuf
->dma_addr
= dma_addr
;
801 txbuf
->real_len
= skb
->len
;
803 /* Build TX descriptor */
804 txd
= &tx_ring
->txds
[wr_idx
];
805 txd
->offset_eop
= (nr_frags
== 0) ? PCIE_DESC_TX_EOP
: 0;
806 txd
->dma_len
= cpu_to_le16(skb_headlen(skb
));
807 nfp_desc_set_dma_addr(txd
, dma_addr
);
808 txd
->data_len
= cpu_to_le16(skb
->len
);
814 nfp_net_tx_tso(nn
, r_vec
, txbuf
, txd
, skb
);
816 nfp_net_tx_csum(nn
, r_vec
, txbuf
, txd
, skb
);
818 if (skb_vlan_tag_present(skb
) && nn
->ctrl
& NFP_NET_CFG_CTRL_TXVLAN
) {
819 txd
->flags
|= PCIE_DESC_TX_VLAN
;
820 txd
->vlan
= cpu_to_le16(skb_vlan_tag_get(skb
));
825 /* all descs must match except for in addr, length and eop */
828 for (f
= 0; f
< nr_frags
; f
++) {
829 frag
= &skb_shinfo(skb
)->frags
[f
];
830 fsize
= skb_frag_size(frag
);
832 dma_addr
= skb_frag_dma_map(&nn
->pdev
->dev
, frag
, 0,
833 fsize
, DMA_TO_DEVICE
);
834 if (dma_mapping_error(&nn
->pdev
->dev
, dma_addr
))
837 wr_idx
= (wr_idx
+ 1) % tx_ring
->cnt
;
838 tx_ring
->txbufs
[wr_idx
].skb
= skb
;
839 tx_ring
->txbufs
[wr_idx
].dma_addr
= dma_addr
;
840 tx_ring
->txbufs
[wr_idx
].fidx
= f
;
842 txd
= &tx_ring
->txds
[wr_idx
];
844 txd
->dma_len
= cpu_to_le16(fsize
);
845 nfp_desc_set_dma_addr(txd
, dma_addr
);
847 (f
== nr_frags
- 1) ? PCIE_DESC_TX_EOP
: 0;
850 u64_stats_update_begin(&r_vec
->tx_sync
);
852 u64_stats_update_end(&r_vec
->tx_sync
);
855 netdev_tx_sent_queue(nd_q
, txbuf
->real_len
);
857 tx_ring
->wr_p
+= nr_frags
+ 1;
858 if (nfp_net_tx_ring_should_stop(tx_ring
))
859 nfp_net_tx_ring_stop(nd_q
, tx_ring
);
861 tx_ring
->wr_ptr_add
+= nr_frags
+ 1;
862 if (!skb
->xmit_more
|| netif_xmit_stopped(nd_q
)) {
863 /* force memory write before we let HW know */
865 nfp_qcp_wr_ptr_add(tx_ring
->qcp_q
, tx_ring
->wr_ptr_add
);
866 tx_ring
->wr_ptr_add
= 0;
869 skb_tx_timestamp(skb
);
876 frag
= &skb_shinfo(skb
)->frags
[f
];
877 dma_unmap_page(&nn
->pdev
->dev
,
878 tx_ring
->txbufs
[wr_idx
].dma_addr
,
879 skb_frag_size(frag
), DMA_TO_DEVICE
);
880 tx_ring
->txbufs
[wr_idx
].skb
= NULL
;
881 tx_ring
->txbufs
[wr_idx
].dma_addr
= 0;
882 tx_ring
->txbufs
[wr_idx
].fidx
= -2;
885 wr_idx
+= tx_ring
->cnt
;
887 dma_unmap_single(&nn
->pdev
->dev
, tx_ring
->txbufs
[wr_idx
].dma_addr
,
888 skb_headlen(skb
), DMA_TO_DEVICE
);
889 tx_ring
->txbufs
[wr_idx
].skb
= NULL
;
890 tx_ring
->txbufs
[wr_idx
].dma_addr
= 0;
891 tx_ring
->txbufs
[wr_idx
].fidx
= -2;
893 nn_warn_ratelimit(nn
, "Failed to map DMA TX buffer\n");
894 u64_stats_update_begin(&r_vec
->tx_sync
);
896 u64_stats_update_end(&r_vec
->tx_sync
);
897 dev_kfree_skb_any(skb
);
902 * nfp_net_tx_complete() - Handled completed TX packets
903 * @tx_ring: TX ring structure
905 * Return: Number of completed TX descriptors
907 static void nfp_net_tx_complete(struct nfp_net_tx_ring
*tx_ring
)
909 struct nfp_net_r_vector
*r_vec
= tx_ring
->r_vec
;
910 struct nfp_net
*nn
= r_vec
->nfp_net
;
911 const struct skb_frag_struct
*frag
;
912 struct netdev_queue
*nd_q
;
913 u32 done_pkts
= 0, done_bytes
= 0;
920 /* Work out how many descriptors have been transmitted */
921 qcp_rd_p
= nfp_qcp_rd_ptr_read(tx_ring
->qcp_q
);
923 if (qcp_rd_p
== tx_ring
->qcp_rd_p
)
926 if (qcp_rd_p
> tx_ring
->qcp_rd_p
)
927 todo
= qcp_rd_p
- tx_ring
->qcp_rd_p
;
929 todo
= qcp_rd_p
+ tx_ring
->cnt
- tx_ring
->qcp_rd_p
;
932 idx
= tx_ring
->rd_p
% tx_ring
->cnt
;
935 skb
= tx_ring
->txbufs
[idx
].skb
;
939 nr_frags
= skb_shinfo(skb
)->nr_frags
;
940 fidx
= tx_ring
->txbufs
[idx
].fidx
;
944 dma_unmap_single(&nn
->pdev
->dev
,
945 tx_ring
->txbufs
[idx
].dma_addr
,
946 skb_headlen(skb
), DMA_TO_DEVICE
);
948 done_pkts
+= tx_ring
->txbufs
[idx
].pkt_cnt
;
949 done_bytes
+= tx_ring
->txbufs
[idx
].real_len
;
952 frag
= &skb_shinfo(skb
)->frags
[fidx
];
953 dma_unmap_page(&nn
->pdev
->dev
,
954 tx_ring
->txbufs
[idx
].dma_addr
,
955 skb_frag_size(frag
), DMA_TO_DEVICE
);
958 /* check for last gather fragment */
959 if (fidx
== nr_frags
- 1)
960 dev_kfree_skb_any(skb
);
962 tx_ring
->txbufs
[idx
].dma_addr
= 0;
963 tx_ring
->txbufs
[idx
].skb
= NULL
;
964 tx_ring
->txbufs
[idx
].fidx
= -2;
967 tx_ring
->qcp_rd_p
= qcp_rd_p
;
969 u64_stats_update_begin(&r_vec
->tx_sync
);
970 r_vec
->tx_bytes
+= done_bytes
;
971 r_vec
->tx_pkts
+= done_pkts
;
972 u64_stats_update_end(&r_vec
->tx_sync
);
974 nd_q
= netdev_get_tx_queue(nn
->netdev
, tx_ring
->idx
);
975 netdev_tx_completed_queue(nd_q
, done_pkts
, done_bytes
);
976 if (nfp_net_tx_ring_should_wake(tx_ring
)) {
977 /* Make sure TX thread will see updated tx_ring->rd_p */
980 if (unlikely(netif_tx_queue_stopped(nd_q
)))
981 netif_tx_wake_queue(nd_q
);
984 WARN_ONCE(tx_ring
->wr_p
- tx_ring
->rd_p
> tx_ring
->cnt
,
985 "TX ring corruption rd_p=%u wr_p=%u cnt=%u\n",
986 tx_ring
->rd_p
, tx_ring
->wr_p
, tx_ring
->cnt
);
990 * nfp_net_tx_ring_reset() - Free any untransmitted buffers and reset pointers
991 * @nn: NFP Net device
992 * @tx_ring: TX ring structure
994 * Assumes that the device is stopped
997 nfp_net_tx_ring_reset(struct nfp_net
*nn
, struct nfp_net_tx_ring
*tx_ring
)
999 const struct skb_frag_struct
*frag
;
1000 struct netdev_queue
*nd_q
;
1001 struct pci_dev
*pdev
= nn
->pdev
;
1003 while (tx_ring
->rd_p
!= tx_ring
->wr_p
) {
1004 int nr_frags
, fidx
, idx
;
1005 struct sk_buff
*skb
;
1007 idx
= tx_ring
->rd_p
% tx_ring
->cnt
;
1008 skb
= tx_ring
->txbufs
[idx
].skb
;
1009 nr_frags
= skb_shinfo(skb
)->nr_frags
;
1010 fidx
= tx_ring
->txbufs
[idx
].fidx
;
1014 dma_unmap_single(&pdev
->dev
,
1015 tx_ring
->txbufs
[idx
].dma_addr
,
1016 skb_headlen(skb
), DMA_TO_DEVICE
);
1018 /* unmap fragment */
1019 frag
= &skb_shinfo(skb
)->frags
[fidx
];
1020 dma_unmap_page(&pdev
->dev
,
1021 tx_ring
->txbufs
[idx
].dma_addr
,
1022 skb_frag_size(frag
), DMA_TO_DEVICE
);
1025 /* check for last gather fragment */
1026 if (fidx
== nr_frags
- 1)
1027 dev_kfree_skb_any(skb
);
1029 tx_ring
->txbufs
[idx
].dma_addr
= 0;
1030 tx_ring
->txbufs
[idx
].skb
= NULL
;
1031 tx_ring
->txbufs
[idx
].fidx
= -2;
1033 tx_ring
->qcp_rd_p
++;
1037 memset(tx_ring
->txds
, 0, sizeof(*tx_ring
->txds
) * tx_ring
->cnt
);
1040 tx_ring
->qcp_rd_p
= 0;
1041 tx_ring
->wr_ptr_add
= 0;
1043 nd_q
= netdev_get_tx_queue(nn
->netdev
, tx_ring
->idx
);
1044 netdev_tx_reset_queue(nd_q
);
1047 static void nfp_net_tx_timeout(struct net_device
*netdev
)
1049 struct nfp_net
*nn
= netdev_priv(netdev
);
1052 for (i
= 0; i
< nn
->num_tx_rings
; i
++) {
1053 if (!netif_tx_queue_stopped(netdev_get_tx_queue(netdev
, i
)))
1055 nn_warn(nn
, "TX timeout on ring: %d\n", i
);
1057 nn_warn(nn
, "TX watchdog timeout\n");
1060 /* Receive processing
1064 * nfp_net_rx_space() - return the number of free slots on the RX ring
1065 * @rx_ring: RX ring structure
1067 * Make sure we leave at least one slot free.
1069 * Return: True if there is space on the RX ring
1071 static inline int nfp_net_rx_space(struct nfp_net_rx_ring
*rx_ring
)
1073 return (rx_ring
->cnt
- 1) - (rx_ring
->wr_p
- rx_ring
->rd_p
);
1077 * nfp_net_rx_alloc_one() - Allocate and map skb for RX
1078 * @rx_ring: RX ring structure of the skb
1079 * @dma_addr: Pointer to storage for DMA address (output param)
1080 * @fl_bufsz: size of freelist buffers
1082 * This function will allcate a new skb, map it for DMA.
1084 * Return: allocated skb or NULL on failure.
1086 static struct sk_buff
*
1087 nfp_net_rx_alloc_one(struct nfp_net_rx_ring
*rx_ring
, dma_addr_t
*dma_addr
,
1088 unsigned int fl_bufsz
)
1090 struct nfp_net
*nn
= rx_ring
->r_vec
->nfp_net
;
1091 struct sk_buff
*skb
;
1093 skb
= netdev_alloc_skb(nn
->netdev
, fl_bufsz
);
1095 nn_warn_ratelimit(nn
, "Failed to alloc receive SKB\n");
1099 *dma_addr
= dma_map_single(&nn
->pdev
->dev
, skb
->data
,
1100 fl_bufsz
, DMA_FROM_DEVICE
);
1101 if (dma_mapping_error(&nn
->pdev
->dev
, *dma_addr
)) {
1102 dev_kfree_skb_any(skb
);
1103 nn_warn_ratelimit(nn
, "Failed to map DMA RX buffer\n");
1111 * nfp_net_rx_give_one() - Put mapped skb on the software and hardware rings
1112 * @rx_ring: RX ring structure
1113 * @skb: Skb to put on rings
1114 * @dma_addr: DMA address of skb mapping
1116 static void nfp_net_rx_give_one(struct nfp_net_rx_ring
*rx_ring
,
1117 struct sk_buff
*skb
, dma_addr_t dma_addr
)
1119 unsigned int wr_idx
;
1121 wr_idx
= rx_ring
->wr_p
% rx_ring
->cnt
;
1123 /* Stash SKB and DMA address away */
1124 rx_ring
->rxbufs
[wr_idx
].skb
= skb
;
1125 rx_ring
->rxbufs
[wr_idx
].dma_addr
= dma_addr
;
1127 /* Fill freelist descriptor */
1128 rx_ring
->rxds
[wr_idx
].fld
.reserved
= 0;
1129 rx_ring
->rxds
[wr_idx
].fld
.meta_len_dd
= 0;
1130 nfp_desc_set_dma_addr(&rx_ring
->rxds
[wr_idx
].fld
, dma_addr
);
1133 rx_ring
->wr_ptr_add
++;
1134 if (rx_ring
->wr_ptr_add
>= NFP_NET_FL_BATCH
) {
1135 /* Update write pointer of the freelist queue. Make
1136 * sure all writes are flushed before telling the hardware.
1139 nfp_qcp_wr_ptr_add(rx_ring
->qcp_fl
, rx_ring
->wr_ptr_add
);
1140 rx_ring
->wr_ptr_add
= 0;
1145 * nfp_net_rx_ring_reset() - Reflect in SW state of freelist after disable
1146 * @rx_ring: RX ring structure
1148 * Warning: Do *not* call if ring buffers were never put on the FW freelist
1149 * (i.e. device was not enabled)!
1151 static void nfp_net_rx_ring_reset(struct nfp_net_rx_ring
*rx_ring
)
1153 unsigned int wr_idx
, last_idx
;
1155 /* Move the empty entry to the end of the list */
1156 wr_idx
= rx_ring
->wr_p
% rx_ring
->cnt
;
1157 last_idx
= rx_ring
->cnt
- 1;
1158 rx_ring
->rxbufs
[wr_idx
].dma_addr
= rx_ring
->rxbufs
[last_idx
].dma_addr
;
1159 rx_ring
->rxbufs
[wr_idx
].skb
= rx_ring
->rxbufs
[last_idx
].skb
;
1160 rx_ring
->rxbufs
[last_idx
].dma_addr
= 0;
1161 rx_ring
->rxbufs
[last_idx
].skb
= NULL
;
1163 memset(rx_ring
->rxds
, 0, sizeof(*rx_ring
->rxds
) * rx_ring
->cnt
);
1166 rx_ring
->wr_ptr_add
= 0;
1170 * nfp_net_rx_ring_bufs_free() - Free any buffers currently on the RX ring
1171 * @nn: NFP Net device
1172 * @rx_ring: RX ring to remove buffers from
1174 * Assumes that the device is stopped and buffers are in [0, ring->cnt - 1)
1175 * entries. After device is disabled nfp_net_rx_ring_reset() must be called
1176 * to restore required ring geometry.
1179 nfp_net_rx_ring_bufs_free(struct nfp_net
*nn
, struct nfp_net_rx_ring
*rx_ring
)
1181 struct pci_dev
*pdev
= nn
->pdev
;
1184 for (i
= 0; i
< rx_ring
->cnt
- 1; i
++) {
1185 /* NULL skb can only happen when initial filling of the ring
1186 * fails to allocate enough buffers and calls here to free
1187 * already allocated ones.
1189 if (!rx_ring
->rxbufs
[i
].skb
)
1192 dma_unmap_single(&pdev
->dev
, rx_ring
->rxbufs
[i
].dma_addr
,
1193 rx_ring
->bufsz
, DMA_FROM_DEVICE
);
1194 dev_kfree_skb_any(rx_ring
->rxbufs
[i
].skb
);
1195 rx_ring
->rxbufs
[i
].dma_addr
= 0;
1196 rx_ring
->rxbufs
[i
].skb
= NULL
;
1201 * nfp_net_rx_ring_bufs_alloc() - Fill RX ring with buffers (don't give to FW)
1202 * @nn: NFP Net device
1203 * @rx_ring: RX ring to remove buffers from
1206 nfp_net_rx_ring_bufs_alloc(struct nfp_net
*nn
, struct nfp_net_rx_ring
*rx_ring
)
1208 struct nfp_net_rx_buf
*rxbufs
;
1211 rxbufs
= rx_ring
->rxbufs
;
1213 for (i
= 0; i
< rx_ring
->cnt
- 1; i
++) {
1215 nfp_net_rx_alloc_one(rx_ring
, &rxbufs
[i
].dma_addr
,
1217 if (!rxbufs
[i
].skb
) {
1218 nfp_net_rx_ring_bufs_free(nn
, rx_ring
);
1227 * nfp_net_rx_ring_fill_freelist() - Give buffers from the ring to FW
1228 * @rx_ring: RX ring to fill
1230 static void nfp_net_rx_ring_fill_freelist(struct nfp_net_rx_ring
*rx_ring
)
1234 for (i
= 0; i
< rx_ring
->cnt
- 1; i
++)
1235 nfp_net_rx_give_one(rx_ring
, rx_ring
->rxbufs
[i
].skb
,
1236 rx_ring
->rxbufs
[i
].dma_addr
);
1240 * nfp_net_rx_csum_has_errors() - group check if rxd has any csum errors
1241 * @flags: RX descriptor flags field in CPU byte order
1243 static int nfp_net_rx_csum_has_errors(u16 flags
)
1245 u16 csum_all_checked
, csum_all_ok
;
1247 csum_all_checked
= flags
& __PCIE_DESC_RX_CSUM_ALL
;
1248 csum_all_ok
= flags
& __PCIE_DESC_RX_CSUM_ALL_OK
;
1250 return csum_all_checked
!= (csum_all_ok
<< PCIE_DESC_RX_CSUM_OK_SHIFT
);
1254 * nfp_net_rx_csum() - set SKB checksum field based on RX descriptor flags
1255 * @nn: NFP Net device
1256 * @r_vec: per-ring structure
1257 * @rxd: Pointer to RX descriptor
1258 * @skb: Pointer to SKB
1260 static void nfp_net_rx_csum(struct nfp_net
*nn
, struct nfp_net_r_vector
*r_vec
,
1261 struct nfp_net_rx_desc
*rxd
, struct sk_buff
*skb
)
1263 skb_checksum_none_assert(skb
);
1265 if (!(nn
->netdev
->features
& NETIF_F_RXCSUM
))
1268 if (nfp_net_rx_csum_has_errors(le16_to_cpu(rxd
->rxd
.flags
))) {
1269 u64_stats_update_begin(&r_vec
->rx_sync
);
1270 r_vec
->hw_csum_rx_error
++;
1271 u64_stats_update_end(&r_vec
->rx_sync
);
1275 /* Assume that the firmware will never report inner CSUM_OK unless outer
1276 * L4 headers were successfully parsed. FW will always report zero UDP
1277 * checksum as CSUM_OK.
1279 if (rxd
->rxd
.flags
& PCIE_DESC_RX_TCP_CSUM_OK
||
1280 rxd
->rxd
.flags
& PCIE_DESC_RX_UDP_CSUM_OK
) {
1281 __skb_incr_checksum_unnecessary(skb
);
1282 u64_stats_update_begin(&r_vec
->rx_sync
);
1283 r_vec
->hw_csum_rx_ok
++;
1284 u64_stats_update_end(&r_vec
->rx_sync
);
1287 if (rxd
->rxd
.flags
& PCIE_DESC_RX_I_TCP_CSUM_OK
||
1288 rxd
->rxd
.flags
& PCIE_DESC_RX_I_UDP_CSUM_OK
) {
1289 __skb_incr_checksum_unnecessary(skb
);
1290 u64_stats_update_begin(&r_vec
->rx_sync
);
1291 r_vec
->hw_csum_rx_inner_ok
++;
1292 u64_stats_update_end(&r_vec
->rx_sync
);
1296 static void nfp_net_set_hash(struct net_device
*netdev
, struct sk_buff
*skb
,
1297 unsigned int type
, __be32
*hash
)
1299 if (!(netdev
->features
& NETIF_F_RXHASH
))
1303 case NFP_NET_RSS_IPV4
:
1304 case NFP_NET_RSS_IPV6
:
1305 case NFP_NET_RSS_IPV6_EX
:
1306 skb_set_hash(skb
, get_unaligned_be32(hash
), PKT_HASH_TYPE_L3
);
1309 skb_set_hash(skb
, get_unaligned_be32(hash
), PKT_HASH_TYPE_L4
);
1315 nfp_net_set_hash_desc(struct net_device
*netdev
, struct sk_buff
*skb
,
1316 struct nfp_net_rx_desc
*rxd
)
1318 struct nfp_net_rx_hash
*rx_hash
;
1320 if (!(rxd
->rxd
.flags
& PCIE_DESC_RX_RSS
))
1323 rx_hash
= (struct nfp_net_rx_hash
*)(skb
->data
- sizeof(*rx_hash
));
1325 nfp_net_set_hash(netdev
, skb
, get_unaligned_be32(&rx_hash
->hash_type
),
1330 nfp_net_parse_meta(struct net_device
*netdev
, struct sk_buff
*skb
,
1333 u8
*data
= skb
->data
- meta_len
;
1336 meta_info
= get_unaligned_be32(data
);
1340 switch (meta_info
& NFP_NET_META_FIELD_MASK
) {
1341 case NFP_NET_META_HASH
:
1342 meta_info
>>= NFP_NET_META_FIELD_SIZE
;
1343 nfp_net_set_hash(netdev
, skb
,
1344 meta_info
& NFP_NET_META_FIELD_MASK
,
1348 case NFP_NET_META_MARK
:
1349 skb
->mark
= get_unaligned_be32(data
);
1356 meta_info
>>= NFP_NET_META_FIELD_SIZE
;
1363 * nfp_net_rx() - receive up to @budget packets on @rx_ring
1364 * @rx_ring: RX ring to receive from
1365 * @budget: NAPI budget
1367 * Note, this function is separated out from the napi poll function to
1368 * more cleanly separate packet receive code from other bookkeeping
1369 * functions performed in the napi poll function.
1371 * There are differences between the NFP-3200 firmware and the
1372 * NFP-6000 firmware. The NFP-3200 firmware uses a dedicated RX queue
1373 * to indicate that new packets have arrived. The NFP-6000 does not
1374 * have this queue and uses the DD bit in the RX descriptor. This
1375 * method cannot be used on the NFP-3200 as it causes a race
1376 * condition: The RX ring write pointer on the NFP-3200 is updated
1377 * after packets (and descriptors) have been DMAed. If the DD bit is
1378 * used and subsequently the read pointer is updated this may lead to
1379 * the RX queue to underflow (if the firmware has not yet update the
1380 * write pointer). Therefore we use slightly ugly conditional code
1381 * below to handle the differences. We may, in the future update the
1382 * NFP-3200 firmware to behave the same as the firmware on the
1385 * Return: Number of packets received.
1387 static int nfp_net_rx(struct nfp_net_rx_ring
*rx_ring
, int budget
)
1389 struct nfp_net_r_vector
*r_vec
= rx_ring
->r_vec
;
1390 struct nfp_net
*nn
= r_vec
->nfp_net
;
1391 unsigned int data_len
, meta_len
;
1392 int avail
= 0, pkts_polled
= 0;
1393 struct sk_buff
*skb
, *new_skb
;
1394 struct nfp_net_rx_desc
*rxd
;
1395 dma_addr_t new_dma_addr
;
1399 if (nn
->is_nfp3200
) {
1400 /* Work out how many packets arrived */
1401 qcp_wr_p
= nfp_qcp_wr_ptr_read(rx_ring
->qcp_rx
);
1402 idx
= rx_ring
->rd_p
% rx_ring
->cnt
;
1404 if (qcp_wr_p
== idx
)
1405 /* No new packets */
1409 avail
= qcp_wr_p
- idx
;
1411 avail
= qcp_wr_p
+ rx_ring
->cnt
- idx
;
1416 while (avail
> 0 && pkts_polled
< budget
) {
1417 idx
= rx_ring
->rd_p
% rx_ring
->cnt
;
1419 rxd
= &rx_ring
->rxds
[idx
];
1420 if (!(rxd
->rxd
.meta_len_dd
& PCIE_DESC_RX_DD
)) {
1422 nn_dbg(nn
, "RX descriptor not valid (DD)%d:%u rxd[0]=%#x rxd[1]=%#x\n",
1424 rxd
->vals
[0], rxd
->vals
[1]);
1427 /* Memory barrier to ensure that we won't do other reads
1428 * before the DD bit.
1436 skb
= rx_ring
->rxbufs
[idx
].skb
;
1438 new_skb
= nfp_net_rx_alloc_one(rx_ring
, &new_dma_addr
,
1441 nfp_net_rx_give_one(rx_ring
, rx_ring
->rxbufs
[idx
].skb
,
1442 rx_ring
->rxbufs
[idx
].dma_addr
);
1443 u64_stats_update_begin(&r_vec
->rx_sync
);
1445 u64_stats_update_end(&r_vec
->rx_sync
);
1449 dma_unmap_single(&nn
->pdev
->dev
,
1450 rx_ring
->rxbufs
[idx
].dma_addr
,
1451 nn
->fl_bufsz
, DMA_FROM_DEVICE
);
1453 nfp_net_rx_give_one(rx_ring
, new_skb
, new_dma_addr
);
1456 * <-- [rx_offset] -->
1457 * ---------------------------------------------------------
1458 * | [XX] | metadata | packet | XXXX |
1459 * ---------------------------------------------------------
1460 * <---------------- data_len --------------->
1462 * The rx_offset is fixed for all packets, the meta_len can vary
1463 * on a packet by packet basis. If rx_offset is set to zero
1464 * (_RX_OFFSET_DYNAMIC) metadata starts at the beginning of the
1465 * buffer and is immediately followed by the packet (no [XX]).
1467 meta_len
= rxd
->rxd
.meta_len_dd
& PCIE_DESC_RX_META_LEN_MASK
;
1468 data_len
= le16_to_cpu(rxd
->rxd
.data_len
);
1470 if (nn
->rx_offset
== NFP_NET_CFG_RX_OFFSET_DYNAMIC
)
1471 skb_reserve(skb
, meta_len
);
1473 skb_reserve(skb
, nn
->rx_offset
);
1474 skb_put(skb
, data_len
- meta_len
);
1477 u64_stats_update_begin(&r_vec
->rx_sync
);
1479 r_vec
->rx_bytes
+= skb
->len
;
1480 u64_stats_update_end(&r_vec
->rx_sync
);
1482 if (nn
->fw_ver
.major
<= 3) {
1483 nfp_net_set_hash_desc(nn
->netdev
, skb
, rxd
);
1484 } else if (meta_len
) {
1487 end
= nfp_net_parse_meta(nn
->netdev
, skb
, meta_len
);
1488 if (unlikely(end
!= skb
->data
)) {
1489 u64_stats_update_begin(&r_vec
->rx_sync
);
1491 u64_stats_update_end(&r_vec
->rx_sync
);
1493 dev_kfree_skb_any(skb
);
1494 nn_warn_ratelimit(nn
, "invalid RX packet metadata\n");
1499 skb_record_rx_queue(skb
, rx_ring
->idx
);
1500 skb
->protocol
= eth_type_trans(skb
, nn
->netdev
);
1502 nfp_net_rx_csum(nn
, r_vec
, rxd
, skb
);
1504 if (rxd
->rxd
.flags
& PCIE_DESC_RX_VLAN
)
1505 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
),
1506 le16_to_cpu(rxd
->rxd
.vlan
));
1508 napi_gro_receive(&rx_ring
->r_vec
->napi
, skb
);
1512 nfp_qcp_rd_ptr_add(rx_ring
->qcp_rx
, pkts_polled
);
1518 * nfp_net_poll() - napi poll function
1519 * @napi: NAPI structure
1520 * @budget: NAPI budget
1522 * Return: number of packets polled.
1524 static int nfp_net_poll(struct napi_struct
*napi
, int budget
)
1526 struct nfp_net_r_vector
*r_vec
=
1527 container_of(napi
, struct nfp_net_r_vector
, napi
);
1528 struct nfp_net_rx_ring
*rx_ring
= r_vec
->rx_ring
;
1529 struct nfp_net_tx_ring
*tx_ring
= r_vec
->tx_ring
;
1530 struct nfp_net
*nn
= r_vec
->nfp_net
;
1531 struct netdev_queue
*txq
;
1532 unsigned int pkts_polled
;
1534 tx_ring
= &nn
->tx_rings
[rx_ring
->idx
];
1535 txq
= netdev_get_tx_queue(nn
->netdev
, tx_ring
->idx
);
1536 nfp_net_tx_complete(tx_ring
);
1538 pkts_polled
= nfp_net_rx(rx_ring
, budget
);
1540 if (pkts_polled
< budget
) {
1541 napi_complete_done(napi
, pkts_polled
);
1542 nfp_net_irq_unmask(nn
, r_vec
->irq_idx
);
1548 /* Setup and Configuration
1552 * nfp_net_tx_ring_free() - Free resources allocated to a TX ring
1553 * @tx_ring: TX ring to free
1555 static void nfp_net_tx_ring_free(struct nfp_net_tx_ring
*tx_ring
)
1557 struct nfp_net_r_vector
*r_vec
= tx_ring
->r_vec
;
1558 struct nfp_net
*nn
= r_vec
->nfp_net
;
1559 struct pci_dev
*pdev
= nn
->pdev
;
1561 kfree(tx_ring
->txbufs
);
1564 dma_free_coherent(&pdev
->dev
, tx_ring
->size
,
1565 tx_ring
->txds
, tx_ring
->dma
);
1568 tx_ring
->txbufs
= NULL
;
1569 tx_ring
->txds
= NULL
;
1575 * nfp_net_tx_ring_alloc() - Allocate resource for a TX ring
1576 * @tx_ring: TX Ring structure to allocate
1577 * @cnt: Ring buffer count
1579 * Return: 0 on success, negative errno otherwise.
1581 static int nfp_net_tx_ring_alloc(struct nfp_net_tx_ring
*tx_ring
, u32 cnt
)
1583 struct nfp_net_r_vector
*r_vec
= tx_ring
->r_vec
;
1584 struct nfp_net
*nn
= r_vec
->nfp_net
;
1585 struct pci_dev
*pdev
= nn
->pdev
;
1590 tx_ring
->size
= sizeof(*tx_ring
->txds
) * tx_ring
->cnt
;
1591 tx_ring
->txds
= dma_zalloc_coherent(&pdev
->dev
, tx_ring
->size
,
1592 &tx_ring
->dma
, GFP_KERNEL
);
1596 sz
= sizeof(*tx_ring
->txbufs
) * tx_ring
->cnt
;
1597 tx_ring
->txbufs
= kzalloc(sz
, GFP_KERNEL
);
1598 if (!tx_ring
->txbufs
)
1601 netif_set_xps_queue(nn
->netdev
, &r_vec
->affinity_mask
, tx_ring
->idx
);
1603 nn_dbg(nn
, "TxQ%02d: QCidx=%02d cnt=%d dma=%#llx host=%p\n",
1604 tx_ring
->idx
, tx_ring
->qcidx
,
1605 tx_ring
->cnt
, (unsigned long long)tx_ring
->dma
, tx_ring
->txds
);
1610 nfp_net_tx_ring_free(tx_ring
);
1614 static struct nfp_net_tx_ring
*
1615 nfp_net_shadow_tx_rings_prepare(struct nfp_net
*nn
, u32 buf_cnt
)
1617 struct nfp_net_tx_ring
*rings
;
1620 rings
= kcalloc(nn
->num_tx_rings
, sizeof(*rings
), GFP_KERNEL
);
1624 for (r
= 0; r
< nn
->num_tx_rings
; r
++) {
1625 nfp_net_tx_ring_init(&rings
[r
], nn
->tx_rings
[r
].r_vec
, r
);
1627 if (nfp_net_tx_ring_alloc(&rings
[r
], buf_cnt
))
1635 nfp_net_tx_ring_free(&rings
[r
]);
1640 static struct nfp_net_tx_ring
*
1641 nfp_net_shadow_tx_rings_swap(struct nfp_net
*nn
, struct nfp_net_tx_ring
*rings
)
1643 struct nfp_net_tx_ring
*old
= nn
->tx_rings
;
1646 for (r
= 0; r
< nn
->num_tx_rings
; r
++)
1647 old
[r
].r_vec
->tx_ring
= &rings
[r
];
1649 nn
->tx_rings
= rings
;
1654 nfp_net_shadow_tx_rings_free(struct nfp_net
*nn
, struct nfp_net_tx_ring
*rings
)
1661 for (r
= 0; r
< nn
->num_tx_rings
; r
++)
1662 nfp_net_tx_ring_free(&rings
[r
]);
1668 * nfp_net_rx_ring_free() - Free resources allocated to a RX ring
1669 * @rx_ring: RX ring to free
1671 static void nfp_net_rx_ring_free(struct nfp_net_rx_ring
*rx_ring
)
1673 struct nfp_net_r_vector
*r_vec
= rx_ring
->r_vec
;
1674 struct nfp_net
*nn
= r_vec
->nfp_net
;
1675 struct pci_dev
*pdev
= nn
->pdev
;
1677 kfree(rx_ring
->rxbufs
);
1680 dma_free_coherent(&pdev
->dev
, rx_ring
->size
,
1681 rx_ring
->rxds
, rx_ring
->dma
);
1684 rx_ring
->rxbufs
= NULL
;
1685 rx_ring
->rxds
= NULL
;
1691 * nfp_net_rx_ring_alloc() - Allocate resource for a RX ring
1692 * @rx_ring: RX ring to allocate
1693 * @fl_bufsz: Size of buffers to allocate
1694 * @cnt: Ring buffer count
1696 * Return: 0 on success, negative errno otherwise.
1699 nfp_net_rx_ring_alloc(struct nfp_net_rx_ring
*rx_ring
, unsigned int fl_bufsz
,
1702 struct nfp_net_r_vector
*r_vec
= rx_ring
->r_vec
;
1703 struct nfp_net
*nn
= r_vec
->nfp_net
;
1704 struct pci_dev
*pdev
= nn
->pdev
;
1708 rx_ring
->bufsz
= fl_bufsz
;
1710 rx_ring
->size
= sizeof(*rx_ring
->rxds
) * rx_ring
->cnt
;
1711 rx_ring
->rxds
= dma_zalloc_coherent(&pdev
->dev
, rx_ring
->size
,
1712 &rx_ring
->dma
, GFP_KERNEL
);
1716 sz
= sizeof(*rx_ring
->rxbufs
) * rx_ring
->cnt
;
1717 rx_ring
->rxbufs
= kzalloc(sz
, GFP_KERNEL
);
1718 if (!rx_ring
->rxbufs
)
1721 nn_dbg(nn
, "RxQ%02d: FlQCidx=%02d RxQCidx=%02d cnt=%d dma=%#llx host=%p\n",
1722 rx_ring
->idx
, rx_ring
->fl_qcidx
, rx_ring
->rx_qcidx
,
1723 rx_ring
->cnt
, (unsigned long long)rx_ring
->dma
, rx_ring
->rxds
);
1728 nfp_net_rx_ring_free(rx_ring
);
1732 static struct nfp_net_rx_ring
*
1733 nfp_net_shadow_rx_rings_prepare(struct nfp_net
*nn
, unsigned int fl_bufsz
,
1736 struct nfp_net_rx_ring
*rings
;
1739 rings
= kcalloc(nn
->num_rx_rings
, sizeof(*rings
), GFP_KERNEL
);
1743 for (r
= 0; r
< nn
->num_rx_rings
; r
++) {
1744 nfp_net_rx_ring_init(&rings
[r
], nn
->rx_rings
[r
].r_vec
, r
);
1746 if (nfp_net_rx_ring_alloc(&rings
[r
], fl_bufsz
, buf_cnt
))
1749 if (nfp_net_rx_ring_bufs_alloc(nn
, &rings
[r
]))
1757 nfp_net_rx_ring_bufs_free(nn
, &rings
[r
]);
1759 nfp_net_rx_ring_free(&rings
[r
]);
1765 static struct nfp_net_rx_ring
*
1766 nfp_net_shadow_rx_rings_swap(struct nfp_net
*nn
, struct nfp_net_rx_ring
*rings
)
1768 struct nfp_net_rx_ring
*old
= nn
->rx_rings
;
1771 for (r
= 0; r
< nn
->num_rx_rings
; r
++)
1772 old
[r
].r_vec
->rx_ring
= &rings
[r
];
1774 nn
->rx_rings
= rings
;
1779 nfp_net_shadow_rx_rings_free(struct nfp_net
*nn
, struct nfp_net_rx_ring
*rings
)
1786 for (r
= 0; r
< nn
->num_r_vecs
; r
++) {
1787 nfp_net_rx_ring_bufs_free(nn
, &rings
[r
]);
1788 nfp_net_rx_ring_free(&rings
[r
]);
1795 nfp_net_prepare_vector(struct nfp_net
*nn
, struct nfp_net_r_vector
*r_vec
,
1798 struct msix_entry
*entry
= &nn
->irq_entries
[r_vec
->irq_idx
];
1801 r_vec
->tx_ring
= &nn
->tx_rings
[idx
];
1802 nfp_net_tx_ring_init(r_vec
->tx_ring
, r_vec
, idx
);
1804 r_vec
->rx_ring
= &nn
->rx_rings
[idx
];
1805 nfp_net_rx_ring_init(r_vec
->rx_ring
, r_vec
, idx
);
1807 snprintf(r_vec
->name
, sizeof(r_vec
->name
),
1808 "%s-rxtx-%d", nn
->netdev
->name
, idx
);
1809 err
= request_irq(entry
->vector
, r_vec
->handler
, 0, r_vec
->name
, r_vec
);
1811 nn_err(nn
, "Error requesting IRQ %d\n", entry
->vector
);
1814 disable_irq(entry
->vector
);
1817 netif_napi_add(nn
->netdev
, &r_vec
->napi
,
1818 nfp_net_poll
, NAPI_POLL_WEIGHT
);
1820 irq_set_affinity_hint(entry
->vector
, &r_vec
->affinity_mask
);
1822 nn_dbg(nn
, "RV%02d: irq=%03d/%03d\n", idx
, entry
->vector
, entry
->entry
);
1828 nfp_net_cleanup_vector(struct nfp_net
*nn
, struct nfp_net_r_vector
*r_vec
)
1830 struct msix_entry
*entry
= &nn
->irq_entries
[r_vec
->irq_idx
];
1832 irq_set_affinity_hint(entry
->vector
, NULL
);
1833 netif_napi_del(&r_vec
->napi
);
1834 free_irq(entry
->vector
, r_vec
);
1838 * nfp_net_rss_write_itbl() - Write RSS indirection table to device
1839 * @nn: NFP Net device to reconfigure
1841 void nfp_net_rss_write_itbl(struct nfp_net
*nn
)
1845 for (i
= 0; i
< NFP_NET_CFG_RSS_ITBL_SZ
; i
+= 4)
1846 nn_writel(nn
, NFP_NET_CFG_RSS_ITBL
+ i
,
1847 get_unaligned_le32(nn
->rss_itbl
+ i
));
1851 * nfp_net_rss_write_key() - Write RSS hash key to device
1852 * @nn: NFP Net device to reconfigure
1854 void nfp_net_rss_write_key(struct nfp_net
*nn
)
1858 for (i
= 0; i
< NFP_NET_CFG_RSS_KEY_SZ
; i
+= 4)
1859 nn_writel(nn
, NFP_NET_CFG_RSS_KEY
+ i
,
1860 get_unaligned_le32(nn
->rss_key
+ i
));
1864 * nfp_net_coalesce_write_cfg() - Write irq coalescence configuration to HW
1865 * @nn: NFP Net device to reconfigure
1867 void nfp_net_coalesce_write_cfg(struct nfp_net
*nn
)
1873 /* Compute factor used to convert coalesce '_usecs' parameters to
1874 * ME timestamp ticks. There are 16 ME clock cycles for each timestamp
1877 factor
= nn
->me_freq_mhz
/ 16;
1879 /* copy RX interrupt coalesce parameters */
1880 value
= (nn
->rx_coalesce_max_frames
<< 16) |
1881 (factor
* nn
->rx_coalesce_usecs
);
1882 for (i
= 0; i
< nn
->num_r_vecs
; i
++)
1883 nn_writel(nn
, NFP_NET_CFG_RXR_IRQ_MOD(i
), value
);
1885 /* copy TX interrupt coalesce parameters */
1886 value
= (nn
->tx_coalesce_max_frames
<< 16) |
1887 (factor
* nn
->tx_coalesce_usecs
);
1888 for (i
= 0; i
< nn
->num_r_vecs
; i
++)
1889 nn_writel(nn
, NFP_NET_CFG_TXR_IRQ_MOD(i
), value
);
1893 * nfp_net_write_mac_addr() - Write mac address to the device control BAR
1894 * @nn: NFP Net device to reconfigure
1896 * Writes the MAC address from the netdev to the device control BAR. Does not
1897 * perform the required reconfig. We do a bit of byte swapping dance because
1900 static void nfp_net_write_mac_addr(struct nfp_net
*nn
)
1902 nn_writel(nn
, NFP_NET_CFG_MACADDR
+ 0,
1903 get_unaligned_be32(nn
->netdev
->dev_addr
));
1904 /* We can't do writew for NFP-3200 compatibility */
1905 nn_writel(nn
, NFP_NET_CFG_MACADDR
+ 4,
1906 get_unaligned_be16(nn
->netdev
->dev_addr
+ 4) << 16);
1909 static void nfp_net_vec_clear_ring_data(struct nfp_net
*nn
, unsigned int idx
)
1911 nn_writeq(nn
, NFP_NET_CFG_RXR_ADDR(idx
), 0);
1912 nn_writeb(nn
, NFP_NET_CFG_RXR_SZ(idx
), 0);
1913 nn_writeb(nn
, NFP_NET_CFG_RXR_VEC(idx
), 0);
1915 nn_writeq(nn
, NFP_NET_CFG_TXR_ADDR(idx
), 0);
1916 nn_writeb(nn
, NFP_NET_CFG_TXR_SZ(idx
), 0);
1917 nn_writeb(nn
, NFP_NET_CFG_TXR_VEC(idx
), 0);
1921 * nfp_net_clear_config_and_disable() - Clear control BAR and disable NFP
1922 * @nn: NFP Net device to reconfigure
1924 static void nfp_net_clear_config_and_disable(struct nfp_net
*nn
)
1926 u32 new_ctrl
, update
;
1930 new_ctrl
= nn
->ctrl
;
1931 new_ctrl
&= ~NFP_NET_CFG_CTRL_ENABLE
;
1932 update
= NFP_NET_CFG_UPDATE_GEN
;
1933 update
|= NFP_NET_CFG_UPDATE_MSIX
;
1934 update
|= NFP_NET_CFG_UPDATE_RING
;
1936 if (nn
->cap
& NFP_NET_CFG_CTRL_RINGCFG
)
1937 new_ctrl
&= ~NFP_NET_CFG_CTRL_RINGCFG
;
1939 nn_writeq(nn
, NFP_NET_CFG_TXRS_ENABLE
, 0);
1940 nn_writeq(nn
, NFP_NET_CFG_RXRS_ENABLE
, 0);
1942 nn_writel(nn
, NFP_NET_CFG_CTRL
, new_ctrl
);
1943 err
= nfp_net_reconfig(nn
, update
);
1945 nn_err(nn
, "Could not disable device: %d\n", err
);
1947 for (r
= 0; r
< nn
->num_r_vecs
; r
++) {
1948 nfp_net_rx_ring_reset(nn
->r_vecs
[r
].rx_ring
);
1949 nfp_net_tx_ring_reset(nn
, nn
->r_vecs
[r
].tx_ring
);
1950 nfp_net_vec_clear_ring_data(nn
, r
);
1953 nn
->ctrl
= new_ctrl
;
1957 nfp_net_vec_write_ring_data(struct nfp_net
*nn
, struct nfp_net_r_vector
*r_vec
,
1960 /* Write the DMA address, size and MSI-X info to the device */
1961 nn_writeq(nn
, NFP_NET_CFG_RXR_ADDR(idx
), r_vec
->rx_ring
->dma
);
1962 nn_writeb(nn
, NFP_NET_CFG_RXR_SZ(idx
), ilog2(r_vec
->rx_ring
->cnt
));
1963 nn_writeb(nn
, NFP_NET_CFG_RXR_VEC(idx
), r_vec
->irq_idx
);
1965 nn_writeq(nn
, NFP_NET_CFG_TXR_ADDR(idx
), r_vec
->tx_ring
->dma
);
1966 nn_writeb(nn
, NFP_NET_CFG_TXR_SZ(idx
), ilog2(r_vec
->tx_ring
->cnt
));
1967 nn_writeb(nn
, NFP_NET_CFG_TXR_VEC(idx
), r_vec
->irq_idx
);
1970 static int __nfp_net_set_config_and_enable(struct nfp_net
*nn
)
1972 u32 new_ctrl
, update
= 0;
1976 new_ctrl
= nn
->ctrl
;
1978 if (nn
->cap
& NFP_NET_CFG_CTRL_RSS
) {
1979 nfp_net_rss_write_key(nn
);
1980 nfp_net_rss_write_itbl(nn
);
1981 nn_writel(nn
, NFP_NET_CFG_RSS_CTRL
, nn
->rss_cfg
);
1982 update
|= NFP_NET_CFG_UPDATE_RSS
;
1985 if (nn
->cap
& NFP_NET_CFG_CTRL_IRQMOD
) {
1986 nfp_net_coalesce_write_cfg(nn
);
1988 new_ctrl
|= NFP_NET_CFG_CTRL_IRQMOD
;
1989 update
|= NFP_NET_CFG_UPDATE_IRQMOD
;
1992 for (r
= 0; r
< nn
->num_r_vecs
; r
++)
1993 nfp_net_vec_write_ring_data(nn
, &nn
->r_vecs
[r
], r
);
1995 nn_writeq(nn
, NFP_NET_CFG_TXRS_ENABLE
, nn
->num_tx_rings
== 64 ?
1996 0xffffffffffffffffULL
: ((u64
)1 << nn
->num_tx_rings
) - 1);
1998 nn_writeq(nn
, NFP_NET_CFG_RXRS_ENABLE
, nn
->num_rx_rings
== 64 ?
1999 0xffffffffffffffffULL
: ((u64
)1 << nn
->num_rx_rings
) - 1);
2001 nfp_net_write_mac_addr(nn
);
2003 nn_writel(nn
, NFP_NET_CFG_MTU
, nn
->netdev
->mtu
);
2004 nn_writel(nn
, NFP_NET_CFG_FLBUFSZ
, nn
->fl_bufsz
);
2007 new_ctrl
|= NFP_NET_CFG_CTRL_ENABLE
;
2008 update
|= NFP_NET_CFG_UPDATE_GEN
;
2009 update
|= NFP_NET_CFG_UPDATE_MSIX
;
2010 update
|= NFP_NET_CFG_UPDATE_RING
;
2011 if (nn
->cap
& NFP_NET_CFG_CTRL_RINGCFG
)
2012 new_ctrl
|= NFP_NET_CFG_CTRL_RINGCFG
;
2014 nn_writel(nn
, NFP_NET_CFG_CTRL
, new_ctrl
);
2015 err
= nfp_net_reconfig(nn
, update
);
2017 nn
->ctrl
= new_ctrl
;
2019 for (r
= 0; r
< nn
->num_r_vecs
; r
++)
2020 nfp_net_rx_ring_fill_freelist(nn
->r_vecs
[r
].rx_ring
);
2022 /* Since reconfiguration requests while NFP is down are ignored we
2023 * have to wipe the entire VXLAN configuration and reinitialize it.
2025 if (nn
->ctrl
& NFP_NET_CFG_CTRL_VXLAN
) {
2026 memset(&nn
->vxlan_ports
, 0, sizeof(nn
->vxlan_ports
));
2027 memset(&nn
->vxlan_usecnt
, 0, sizeof(nn
->vxlan_usecnt
));
2028 udp_tunnel_get_rx_info(nn
->netdev
);
2035 * nfp_net_set_config_and_enable() - Write control BAR and enable NFP
2036 * @nn: NFP Net device to reconfigure
2038 static int nfp_net_set_config_and_enable(struct nfp_net
*nn
)
2042 err
= __nfp_net_set_config_and_enable(nn
);
2044 nfp_net_clear_config_and_disable(nn
);
2050 * nfp_net_open_stack() - Start the device from stack's perspective
2051 * @nn: NFP Net device to reconfigure
2053 static void nfp_net_open_stack(struct nfp_net
*nn
)
2057 for (r
= 0; r
< nn
->num_r_vecs
; r
++) {
2058 napi_enable(&nn
->r_vecs
[r
].napi
);
2059 enable_irq(nn
->irq_entries
[nn
->r_vecs
[r
].irq_idx
].vector
);
2062 netif_tx_wake_all_queues(nn
->netdev
);
2064 enable_irq(nn
->irq_entries
[NFP_NET_IRQ_LSC_IDX
].vector
);
2065 nfp_net_read_link_status(nn
);
2068 static int nfp_net_netdev_open(struct net_device
*netdev
)
2070 struct nfp_net
*nn
= netdev_priv(netdev
);
2073 if (nn
->ctrl
& NFP_NET_CFG_CTRL_ENABLE
) {
2074 nn_err(nn
, "Dev is already enabled: 0x%08x\n", nn
->ctrl
);
2078 /* Step 1: Allocate resources for rings and the like
2079 * - Request interrupts
2080 * - Allocate RX and TX ring resources
2081 * - Setup initial RSS table
2083 err
= nfp_net_aux_irq_request(nn
, NFP_NET_CFG_EXN
, "%s-exn",
2084 nn
->exn_name
, sizeof(nn
->exn_name
),
2085 NFP_NET_IRQ_EXN_IDX
, nn
->exn_handler
);
2088 err
= nfp_net_aux_irq_request(nn
, NFP_NET_CFG_LSC
, "%s-lsc",
2089 nn
->lsc_name
, sizeof(nn
->lsc_name
),
2090 NFP_NET_IRQ_LSC_IDX
, nn
->lsc_handler
);
2093 disable_irq(nn
->irq_entries
[NFP_NET_IRQ_LSC_IDX
].vector
);
2095 nn
->rx_rings
= kcalloc(nn
->num_rx_rings
, sizeof(*nn
->rx_rings
),
2097 if (!nn
->rx_rings
) {
2101 nn
->tx_rings
= kcalloc(nn
->num_tx_rings
, sizeof(*nn
->tx_rings
),
2103 if (!nn
->tx_rings
) {
2105 goto err_free_rx_rings
;
2108 for (r
= 0; r
< nn
->num_r_vecs
; r
++) {
2109 err
= nfp_net_prepare_vector(nn
, &nn
->r_vecs
[r
], r
);
2111 goto err_free_prev_vecs
;
2113 err
= nfp_net_tx_ring_alloc(nn
->r_vecs
[r
].tx_ring
, nn
->txd_cnt
);
2115 goto err_cleanup_vec_p
;
2117 err
= nfp_net_rx_ring_alloc(nn
->r_vecs
[r
].rx_ring
,
2118 nn
->fl_bufsz
, nn
->rxd_cnt
);
2120 goto err_free_tx_ring_p
;
2122 err
= nfp_net_rx_ring_bufs_alloc(nn
, nn
->r_vecs
[r
].rx_ring
);
2124 goto err_flush_rx_ring_p
;
2127 err
= netif_set_real_num_tx_queues(netdev
, nn
->num_tx_rings
);
2129 goto err_free_rings
;
2131 err
= netif_set_real_num_rx_queues(netdev
, nn
->num_rx_rings
);
2133 goto err_free_rings
;
2135 /* Step 2: Configure the NFP
2136 * - Enable rings from 0 to tx_rings/rx_rings - 1.
2137 * - Write MAC address (in case it changed)
2139 * - Set the Freelist buffer size
2142 err
= nfp_net_set_config_and_enable(nn
);
2144 goto err_free_rings
;
2146 /* Step 3: Enable for kernel
2147 * - put some freelist descriptors on each RX ring
2148 * - enable NAPI on each ring
2149 * - enable all TX queues
2152 nfp_net_open_stack(nn
);
2160 nfp_net_rx_ring_bufs_free(nn
, nn
->r_vecs
[r
].rx_ring
);
2161 err_flush_rx_ring_p
:
2162 nfp_net_rx_ring_free(nn
->r_vecs
[r
].rx_ring
);
2164 nfp_net_tx_ring_free(nn
->r_vecs
[r
].tx_ring
);
2166 nfp_net_cleanup_vector(nn
, &nn
->r_vecs
[r
]);
2168 kfree(nn
->tx_rings
);
2170 kfree(nn
->rx_rings
);
2172 nfp_net_aux_irq_free(nn
, NFP_NET_CFG_LSC
, NFP_NET_IRQ_LSC_IDX
);
2174 nfp_net_aux_irq_free(nn
, NFP_NET_CFG_EXN
, NFP_NET_IRQ_EXN_IDX
);
2179 * nfp_net_close_stack() - Quiescent the stack (part of close)
2180 * @nn: NFP Net device to reconfigure
2182 static void nfp_net_close_stack(struct nfp_net
*nn
)
2186 disable_irq(nn
->irq_entries
[NFP_NET_IRQ_LSC_IDX
].vector
);
2187 netif_carrier_off(nn
->netdev
);
2188 nn
->link_up
= false;
2190 for (r
= 0; r
< nn
->num_r_vecs
; r
++) {
2191 disable_irq(nn
->irq_entries
[nn
->r_vecs
[r
].irq_idx
].vector
);
2192 napi_disable(&nn
->r_vecs
[r
].napi
);
2195 netif_tx_disable(nn
->netdev
);
2199 * nfp_net_close_free_all() - Free all runtime resources
2200 * @nn: NFP Net device to reconfigure
2202 static void nfp_net_close_free_all(struct nfp_net
*nn
)
2206 for (r
= 0; r
< nn
->num_r_vecs
; r
++) {
2207 nfp_net_rx_ring_bufs_free(nn
, nn
->r_vecs
[r
].rx_ring
);
2208 nfp_net_rx_ring_free(nn
->r_vecs
[r
].rx_ring
);
2209 nfp_net_tx_ring_free(nn
->r_vecs
[r
].tx_ring
);
2210 nfp_net_cleanup_vector(nn
, &nn
->r_vecs
[r
]);
2213 kfree(nn
->rx_rings
);
2214 kfree(nn
->tx_rings
);
2216 nfp_net_aux_irq_free(nn
, NFP_NET_CFG_LSC
, NFP_NET_IRQ_LSC_IDX
);
2217 nfp_net_aux_irq_free(nn
, NFP_NET_CFG_EXN
, NFP_NET_IRQ_EXN_IDX
);
2221 * nfp_net_netdev_close() - Called when the device is downed
2222 * @netdev: netdev structure
2224 static int nfp_net_netdev_close(struct net_device
*netdev
)
2226 struct nfp_net
*nn
= netdev_priv(netdev
);
2228 if (!(nn
->ctrl
& NFP_NET_CFG_CTRL_ENABLE
)) {
2229 nn_err(nn
, "Dev is not up: 0x%08x\n", nn
->ctrl
);
2233 /* Step 1: Disable RX and TX rings from the Linux kernel perspective
2235 nfp_net_close_stack(nn
);
2239 nfp_net_clear_config_and_disable(nn
);
2241 /* Step 3: Free resources
2243 nfp_net_close_free_all(nn
);
2245 nn_dbg(nn
, "%s down", netdev
->name
);
2249 static void nfp_net_set_rx_mode(struct net_device
*netdev
)
2251 struct nfp_net
*nn
= netdev_priv(netdev
);
2254 new_ctrl
= nn
->ctrl
;
2256 if (netdev
->flags
& IFF_PROMISC
) {
2257 if (nn
->cap
& NFP_NET_CFG_CTRL_PROMISC
)
2258 new_ctrl
|= NFP_NET_CFG_CTRL_PROMISC
;
2260 nn_warn(nn
, "FW does not support promiscuous mode\n");
2262 new_ctrl
&= ~NFP_NET_CFG_CTRL_PROMISC
;
2265 if (new_ctrl
== nn
->ctrl
)
2268 nn_writel(nn
, NFP_NET_CFG_CTRL
, new_ctrl
);
2269 nfp_net_reconfig_post(nn
, NFP_NET_CFG_UPDATE_GEN
);
2271 nn
->ctrl
= new_ctrl
;
2274 static int nfp_net_change_mtu(struct net_device
*netdev
, int new_mtu
)
2276 unsigned int old_mtu
, old_fl_bufsz
, new_fl_bufsz
;
2277 struct nfp_net
*nn
= netdev_priv(netdev
);
2278 struct nfp_net_rx_ring
*tmp_rings
;
2281 if (new_mtu
< 68 || new_mtu
> nn
->max_mtu
) {
2282 nn_err(nn
, "New MTU (%d) is not valid\n", new_mtu
);
2286 old_mtu
= netdev
->mtu
;
2287 old_fl_bufsz
= nn
->fl_bufsz
;
2288 new_fl_bufsz
= NFP_NET_MAX_PREPEND
+ ETH_HLEN
+ VLAN_HLEN
* 2 + new_mtu
;
2290 if (!netif_running(netdev
)) {
2291 netdev
->mtu
= new_mtu
;
2292 nn
->fl_bufsz
= new_fl_bufsz
;
2296 /* Prepare new rings */
2297 tmp_rings
= nfp_net_shadow_rx_rings_prepare(nn
, new_fl_bufsz
,
2302 /* Stop device, swap in new rings, try to start the firmware */
2303 nfp_net_close_stack(nn
);
2304 nfp_net_clear_config_and_disable(nn
);
2306 tmp_rings
= nfp_net_shadow_rx_rings_swap(nn
, tmp_rings
);
2308 netdev
->mtu
= new_mtu
;
2309 nn
->fl_bufsz
= new_fl_bufsz
;
2311 err
= nfp_net_set_config_and_enable(nn
);
2313 const int err_new
= err
;
2315 /* Try with old configuration and old rings */
2316 tmp_rings
= nfp_net_shadow_rx_rings_swap(nn
, tmp_rings
);
2318 netdev
->mtu
= old_mtu
;
2319 nn
->fl_bufsz
= old_fl_bufsz
;
2321 err
= __nfp_net_set_config_and_enable(nn
);
2323 nn_err(nn
, "Can't restore MTU - FW communication failed (%d,%d)\n",
2327 nfp_net_shadow_rx_rings_free(nn
, tmp_rings
);
2329 nfp_net_open_stack(nn
);
2334 int nfp_net_set_ring_size(struct nfp_net
*nn
, u32 rxd_cnt
, u32 txd_cnt
)
2336 struct nfp_net_tx_ring
*tx_rings
= NULL
;
2337 struct nfp_net_rx_ring
*rx_rings
= NULL
;
2338 u32 old_rxd_cnt
, old_txd_cnt
;
2341 if (!netif_running(nn
->netdev
)) {
2342 nn
->rxd_cnt
= rxd_cnt
;
2343 nn
->txd_cnt
= txd_cnt
;
2347 old_rxd_cnt
= nn
->rxd_cnt
;
2348 old_txd_cnt
= nn
->txd_cnt
;
2350 /* Prepare new rings */
2351 if (nn
->rxd_cnt
!= rxd_cnt
) {
2352 rx_rings
= nfp_net_shadow_rx_rings_prepare(nn
, nn
->fl_bufsz
,
2357 if (nn
->txd_cnt
!= txd_cnt
) {
2358 tx_rings
= nfp_net_shadow_tx_rings_prepare(nn
, txd_cnt
);
2360 nfp_net_shadow_rx_rings_free(nn
, rx_rings
);
2365 /* Stop device, swap in new rings, try to start the firmware */
2366 nfp_net_close_stack(nn
);
2367 nfp_net_clear_config_and_disable(nn
);
2370 rx_rings
= nfp_net_shadow_rx_rings_swap(nn
, rx_rings
);
2372 tx_rings
= nfp_net_shadow_tx_rings_swap(nn
, tx_rings
);
2374 nn
->rxd_cnt
= rxd_cnt
;
2375 nn
->txd_cnt
= txd_cnt
;
2377 err
= nfp_net_set_config_and_enable(nn
);
2379 const int err_new
= err
;
2381 /* Try with old configuration and old rings */
2383 rx_rings
= nfp_net_shadow_rx_rings_swap(nn
, rx_rings
);
2385 tx_rings
= nfp_net_shadow_tx_rings_swap(nn
, tx_rings
);
2387 nn
->rxd_cnt
= old_rxd_cnt
;
2388 nn
->txd_cnt
= old_txd_cnt
;
2390 err
= __nfp_net_set_config_and_enable(nn
);
2392 nn_err(nn
, "Can't restore ring config - FW communication failed (%d,%d)\n",
2396 nfp_net_shadow_rx_rings_free(nn
, rx_rings
);
2397 nfp_net_shadow_tx_rings_free(nn
, tx_rings
);
2399 nfp_net_open_stack(nn
);
2404 static struct rtnl_link_stats64
*nfp_net_stat64(struct net_device
*netdev
,
2405 struct rtnl_link_stats64
*stats
)
2407 struct nfp_net
*nn
= netdev_priv(netdev
);
2410 for (r
= 0; r
< nn
->num_r_vecs
; r
++) {
2411 struct nfp_net_r_vector
*r_vec
= &nn
->r_vecs
[r
];
2416 start
= u64_stats_fetch_begin(&r_vec
->rx_sync
);
2417 data
[0] = r_vec
->rx_pkts
;
2418 data
[1] = r_vec
->rx_bytes
;
2419 data
[2] = r_vec
->rx_drops
;
2420 } while (u64_stats_fetch_retry(&r_vec
->rx_sync
, start
));
2421 stats
->rx_packets
+= data
[0];
2422 stats
->rx_bytes
+= data
[1];
2423 stats
->rx_dropped
+= data
[2];
2426 start
= u64_stats_fetch_begin(&r_vec
->tx_sync
);
2427 data
[0] = r_vec
->tx_pkts
;
2428 data
[1] = r_vec
->tx_bytes
;
2429 data
[2] = r_vec
->tx_errors
;
2430 } while (u64_stats_fetch_retry(&r_vec
->tx_sync
, start
));
2431 stats
->tx_packets
+= data
[0];
2432 stats
->tx_bytes
+= data
[1];
2433 stats
->tx_errors
+= data
[2];
2439 static bool nfp_net_ebpf_capable(struct nfp_net
*nn
)
2441 if (nn
->cap
& NFP_NET_CFG_CTRL_BPF
&&
2442 nn_readb(nn
, NFP_NET_CFG_BPF_ABI
) == NFP_NET_BPF_ABI
)
2448 nfp_net_setup_tc(struct net_device
*netdev
, u32 handle
, __be16 proto
,
2449 struct tc_to_netdev
*tc
)
2451 struct nfp_net
*nn
= netdev_priv(netdev
);
2453 if (TC_H_MAJ(handle
) != TC_H_MAJ(TC_H_INGRESS
))
2455 if (proto
!= htons(ETH_P_ALL
))
2458 if (tc
->type
== TC_SETUP_CLSBPF
&& nfp_net_ebpf_capable(nn
))
2459 return nfp_net_bpf_offload(nn
, handle
, proto
, tc
->cls_bpf
);
2464 static int nfp_net_set_features(struct net_device
*netdev
,
2465 netdev_features_t features
)
2467 netdev_features_t changed
= netdev
->features
^ features
;
2468 struct nfp_net
*nn
= netdev_priv(netdev
);
2472 /* Assume this is not called with features we have not advertised */
2474 new_ctrl
= nn
->ctrl
;
2476 if (changed
& NETIF_F_RXCSUM
) {
2477 if (features
& NETIF_F_RXCSUM
)
2478 new_ctrl
|= NFP_NET_CFG_CTRL_RXCSUM
;
2480 new_ctrl
&= ~NFP_NET_CFG_CTRL_RXCSUM
;
2483 if (changed
& (NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
)) {
2484 if (features
& (NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
))
2485 new_ctrl
|= NFP_NET_CFG_CTRL_TXCSUM
;
2487 new_ctrl
&= ~NFP_NET_CFG_CTRL_TXCSUM
;
2490 if (changed
& (NETIF_F_TSO
| NETIF_F_TSO6
)) {
2491 if (features
& (NETIF_F_TSO
| NETIF_F_TSO6
))
2492 new_ctrl
|= NFP_NET_CFG_CTRL_LSO
;
2494 new_ctrl
&= ~NFP_NET_CFG_CTRL_LSO
;
2497 if (changed
& NETIF_F_HW_VLAN_CTAG_RX
) {
2498 if (features
& NETIF_F_HW_VLAN_CTAG_RX
)
2499 new_ctrl
|= NFP_NET_CFG_CTRL_RXVLAN
;
2501 new_ctrl
&= ~NFP_NET_CFG_CTRL_RXVLAN
;
2504 if (changed
& NETIF_F_HW_VLAN_CTAG_TX
) {
2505 if (features
& NETIF_F_HW_VLAN_CTAG_TX
)
2506 new_ctrl
|= NFP_NET_CFG_CTRL_TXVLAN
;
2508 new_ctrl
&= ~NFP_NET_CFG_CTRL_TXVLAN
;
2511 if (changed
& NETIF_F_SG
) {
2512 if (features
& NETIF_F_SG
)
2513 new_ctrl
|= NFP_NET_CFG_CTRL_GATHER
;
2515 new_ctrl
&= ~NFP_NET_CFG_CTRL_GATHER
;
2518 if (changed
& NETIF_F_HW_TC
&& nn
->ctrl
& NFP_NET_CFG_CTRL_BPF
) {
2519 nn_err(nn
, "Cannot disable HW TC offload while in use\n");
2523 nn_dbg(nn
, "Feature change 0x%llx -> 0x%llx (changed=0x%llx)\n",
2524 netdev
->features
, features
, changed
);
2526 if (new_ctrl
== nn
->ctrl
)
2529 nn_dbg(nn
, "NIC ctrl: 0x%x -> 0x%x\n", nn
->ctrl
, new_ctrl
);
2530 nn_writel(nn
, NFP_NET_CFG_CTRL
, new_ctrl
);
2531 err
= nfp_net_reconfig(nn
, NFP_NET_CFG_UPDATE_GEN
);
2535 nn
->ctrl
= new_ctrl
;
2540 static netdev_features_t
2541 nfp_net_features_check(struct sk_buff
*skb
, struct net_device
*dev
,
2542 netdev_features_t features
)
2546 /* We can't do TSO over double tagged packets (802.1AD) */
2547 features
&= vlan_features_check(skb
, features
);
2549 if (!skb
->encapsulation
)
2552 /* Ensure that inner L4 header offset fits into TX descriptor field */
2553 if (skb_is_gso(skb
)) {
2556 hdrlen
= skb_inner_transport_header(skb
) - skb
->data
+
2557 inner_tcp_hdrlen(skb
);
2559 if (unlikely(hdrlen
> NFP_NET_LSO_MAX_HDR_SZ
))
2560 features
&= ~NETIF_F_GSO_MASK
;
2563 /* VXLAN/GRE check */
2564 switch (vlan_get_protocol(skb
)) {
2565 case htons(ETH_P_IP
):
2566 l4_hdr
= ip_hdr(skb
)->protocol
;
2568 case htons(ETH_P_IPV6
):
2569 l4_hdr
= ipv6_hdr(skb
)->nexthdr
;
2572 return features
& ~(NETIF_F_CSUM_MASK
| NETIF_F_GSO_MASK
);
2575 if (skb
->inner_protocol_type
!= ENCAP_TYPE_ETHER
||
2576 skb
->inner_protocol
!= htons(ETH_P_TEB
) ||
2577 (l4_hdr
!= IPPROTO_UDP
&& l4_hdr
!= IPPROTO_GRE
) ||
2578 (l4_hdr
== IPPROTO_UDP
&&
2579 (skb_inner_mac_header(skb
) - skb_transport_header(skb
) !=
2580 sizeof(struct udphdr
) + sizeof(struct vxlanhdr
))))
2581 return features
& ~(NETIF_F_CSUM_MASK
| NETIF_F_GSO_MASK
);
2587 * nfp_net_set_vxlan_port() - set vxlan port in SW and reconfigure HW
2588 * @nn: NFP Net device to reconfigure
2589 * @idx: Index into the port table where new port should be written
2590 * @port: UDP port to configure (pass zero to remove VXLAN port)
2592 static void nfp_net_set_vxlan_port(struct nfp_net
*nn
, int idx
, __be16 port
)
2596 nn
->vxlan_ports
[idx
] = port
;
2598 if (!(nn
->ctrl
& NFP_NET_CFG_CTRL_VXLAN
))
2601 BUILD_BUG_ON(NFP_NET_N_VXLAN_PORTS
& 1);
2602 for (i
= 0; i
< NFP_NET_N_VXLAN_PORTS
; i
+= 2)
2603 nn_writel(nn
, NFP_NET_CFG_VXLAN_PORT
+ i
* sizeof(port
),
2604 be16_to_cpu(nn
->vxlan_ports
[i
+ 1]) << 16 |
2605 be16_to_cpu(nn
->vxlan_ports
[i
]));
2607 nfp_net_reconfig_post(nn
, NFP_NET_CFG_UPDATE_VXLAN
);
2611 * nfp_net_find_vxlan_idx() - find table entry of the port or a free one
2612 * @nn: NFP Network structure
2613 * @port: UDP port to look for
2615 * Return: if the port is already in the table -- it's position;
2616 * if the port is not in the table -- free position to use;
2617 * if the table is full -- -ENOSPC.
2619 static int nfp_net_find_vxlan_idx(struct nfp_net
*nn
, __be16 port
)
2621 int i
, free_idx
= -ENOSPC
;
2623 for (i
= 0; i
< NFP_NET_N_VXLAN_PORTS
; i
++) {
2624 if (nn
->vxlan_ports
[i
] == port
)
2626 if (!nn
->vxlan_usecnt
[i
])
2633 static void nfp_net_add_vxlan_port(struct net_device
*netdev
,
2634 struct udp_tunnel_info
*ti
)
2636 struct nfp_net
*nn
= netdev_priv(netdev
);
2639 if (ti
->type
!= UDP_TUNNEL_TYPE_VXLAN
)
2642 idx
= nfp_net_find_vxlan_idx(nn
, ti
->port
);
2646 if (!nn
->vxlan_usecnt
[idx
]++)
2647 nfp_net_set_vxlan_port(nn
, idx
, ti
->port
);
2650 static void nfp_net_del_vxlan_port(struct net_device
*netdev
,
2651 struct udp_tunnel_info
*ti
)
2653 struct nfp_net
*nn
= netdev_priv(netdev
);
2656 if (ti
->type
!= UDP_TUNNEL_TYPE_VXLAN
)
2659 idx
= nfp_net_find_vxlan_idx(nn
, ti
->port
);
2660 if (idx
== -ENOSPC
|| !nn
->vxlan_usecnt
[idx
])
2663 if (!--nn
->vxlan_usecnt
[idx
])
2664 nfp_net_set_vxlan_port(nn
, idx
, 0);
2667 static const struct net_device_ops nfp_net_netdev_ops
= {
2668 .ndo_open
= nfp_net_netdev_open
,
2669 .ndo_stop
= nfp_net_netdev_close
,
2670 .ndo_start_xmit
= nfp_net_tx
,
2671 .ndo_get_stats64
= nfp_net_stat64
,
2672 .ndo_setup_tc
= nfp_net_setup_tc
,
2673 .ndo_tx_timeout
= nfp_net_tx_timeout
,
2674 .ndo_set_rx_mode
= nfp_net_set_rx_mode
,
2675 .ndo_change_mtu
= nfp_net_change_mtu
,
2676 .ndo_set_mac_address
= eth_mac_addr
,
2677 .ndo_set_features
= nfp_net_set_features
,
2678 .ndo_features_check
= nfp_net_features_check
,
2679 .ndo_udp_tunnel_add
= nfp_net_add_vxlan_port
,
2680 .ndo_udp_tunnel_del
= nfp_net_del_vxlan_port
,
2684 * nfp_net_info() - Print general info about the NIC
2685 * @nn: NFP Net device to reconfigure
2687 void nfp_net_info(struct nfp_net
*nn
)
2689 nn_info(nn
, "Netronome %s %sNetdev: TxQs=%d/%d RxQs=%d/%d\n",
2690 nn
->is_nfp3200
? "NFP-32xx" : "NFP-6xxx",
2691 nn
->is_vf
? "VF " : "",
2692 nn
->num_tx_rings
, nn
->max_tx_rings
,
2693 nn
->num_rx_rings
, nn
->max_rx_rings
);
2694 nn_info(nn
, "VER: %d.%d.%d.%d, Maximum supported MTU: %d\n",
2695 nn
->fw_ver
.resv
, nn
->fw_ver
.class,
2696 nn
->fw_ver
.major
, nn
->fw_ver
.minor
,
2698 nn_info(nn
, "CAP: %#x %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
2700 nn
->cap
& NFP_NET_CFG_CTRL_PROMISC
? "PROMISC " : "",
2701 nn
->cap
& NFP_NET_CFG_CTRL_L2BC
? "L2BCFILT " : "",
2702 nn
->cap
& NFP_NET_CFG_CTRL_L2MC
? "L2MCFILT " : "",
2703 nn
->cap
& NFP_NET_CFG_CTRL_RXCSUM
? "RXCSUM " : "",
2704 nn
->cap
& NFP_NET_CFG_CTRL_TXCSUM
? "TXCSUM " : "",
2705 nn
->cap
& NFP_NET_CFG_CTRL_RXVLAN
? "RXVLAN " : "",
2706 nn
->cap
& NFP_NET_CFG_CTRL_TXVLAN
? "TXVLAN " : "",
2707 nn
->cap
& NFP_NET_CFG_CTRL_SCATTER
? "SCATTER " : "",
2708 nn
->cap
& NFP_NET_CFG_CTRL_GATHER
? "GATHER " : "",
2709 nn
->cap
& NFP_NET_CFG_CTRL_LSO
? "TSO " : "",
2710 nn
->cap
& NFP_NET_CFG_CTRL_RSS
? "RSS " : "",
2711 nn
->cap
& NFP_NET_CFG_CTRL_L2SWITCH
? "L2SWITCH " : "",
2712 nn
->cap
& NFP_NET_CFG_CTRL_MSIXAUTO
? "AUTOMASK " : "",
2713 nn
->cap
& NFP_NET_CFG_CTRL_IRQMOD
? "IRQMOD " : "",
2714 nn
->cap
& NFP_NET_CFG_CTRL_VXLAN
? "VXLAN " : "",
2715 nn
->cap
& NFP_NET_CFG_CTRL_NVGRE
? "NVGRE " : "",
2716 nfp_net_ebpf_capable(nn
) ? "BPF " : "");
2720 * nfp_net_netdev_alloc() - Allocate netdev and related structure
2722 * @max_tx_rings: Maximum number of TX rings supported by device
2723 * @max_rx_rings: Maximum number of RX rings supported by device
2725 * This function allocates a netdev device and fills in the initial
2726 * part of the @struct nfp_net structure.
2728 * Return: NFP Net device structure, or ERR_PTR on error.
2730 struct nfp_net
*nfp_net_netdev_alloc(struct pci_dev
*pdev
,
2731 int max_tx_rings
, int max_rx_rings
)
2733 struct net_device
*netdev
;
2737 netdev
= alloc_etherdev_mqs(sizeof(struct nfp_net
),
2738 max_tx_rings
, max_rx_rings
);
2740 return ERR_PTR(-ENOMEM
);
2742 SET_NETDEV_DEV(netdev
, &pdev
->dev
);
2743 nn
= netdev_priv(netdev
);
2745 nn
->netdev
= netdev
;
2748 nn
->max_tx_rings
= max_tx_rings
;
2749 nn
->max_rx_rings
= max_rx_rings
;
2751 nqs
= netif_get_num_default_rss_queues();
2752 nn
->num_tx_rings
= min_t(int, nqs
, max_tx_rings
);
2753 nn
->num_rx_rings
= min_t(int, nqs
, max_rx_rings
);
2755 nn
->txd_cnt
= NFP_NET_TX_DESCS_DEFAULT
;
2756 nn
->rxd_cnt
= NFP_NET_RX_DESCS_DEFAULT
;
2758 spin_lock_init(&nn
->reconfig_lock
);
2759 spin_lock_init(&nn
->rx_filter_lock
);
2760 spin_lock_init(&nn
->link_status_lock
);
2762 setup_timer(&nn
->reconfig_timer
,
2763 nfp_net_reconfig_timer
, (unsigned long)nn
);
2764 setup_timer(&nn
->rx_filter_stats_timer
,
2765 nfp_net_filter_stats_timer
, (unsigned long)nn
);
2771 * nfp_net_netdev_free() - Undo what @nfp_net_netdev_alloc() did
2772 * @nn: NFP Net device to reconfigure
2774 void nfp_net_netdev_free(struct nfp_net
*nn
)
2776 free_netdev(nn
->netdev
);
2780 * nfp_net_rss_init() - Set the initial RSS parameters
2781 * @nn: NFP Net device to reconfigure
2783 static void nfp_net_rss_init(struct nfp_net
*nn
)
2787 netdev_rss_key_fill(nn
->rss_key
, NFP_NET_CFG_RSS_KEY_SZ
);
2789 for (i
= 0; i
< sizeof(nn
->rss_itbl
); i
++)
2791 ethtool_rxfh_indir_default(i
, nn
->num_rx_rings
);
2793 /* Enable IPv4/IPv6 TCP by default */
2794 nn
->rss_cfg
= NFP_NET_CFG_RSS_IPV4_TCP
|
2795 NFP_NET_CFG_RSS_IPV6_TCP
|
2796 NFP_NET_CFG_RSS_TOEPLITZ
|
2797 NFP_NET_CFG_RSS_MASK
;
2801 * nfp_net_irqmod_init() - Set the initial IRQ moderation parameters
2802 * @nn: NFP Net device to reconfigure
2804 static void nfp_net_irqmod_init(struct nfp_net
*nn
)
2806 nn
->rx_coalesce_usecs
= 50;
2807 nn
->rx_coalesce_max_frames
= 64;
2808 nn
->tx_coalesce_usecs
= 50;
2809 nn
->tx_coalesce_max_frames
= 64;
2813 * nfp_net_netdev_init() - Initialise/finalise the netdev structure
2814 * @netdev: netdev structure
2816 * Return: 0 on success or negative errno on error.
2818 int nfp_net_netdev_init(struct net_device
*netdev
)
2820 struct nfp_net
*nn
= netdev_priv(netdev
);
2823 /* Get some of the read-only fields from the BAR */
2824 nn
->cap
= nn_readl(nn
, NFP_NET_CFG_CAP
);
2825 nn
->max_mtu
= nn_readl(nn
, NFP_NET_CFG_MAX_MTU
);
2827 nfp_net_write_mac_addr(nn
);
2829 /* Set default MTU and Freelist buffer size */
2830 if (nn
->max_mtu
< NFP_NET_DEFAULT_MTU
)
2831 netdev
->mtu
= nn
->max_mtu
;
2833 netdev
->mtu
= NFP_NET_DEFAULT_MTU
;
2834 nn
->fl_bufsz
= NFP_NET_DEFAULT_RX_BUFSZ
;
2836 /* Advertise/enable offloads based on capabilities
2838 * Note: netdev->features show the currently enabled features
2839 * and netdev->hw_features advertises which features are
2840 * supported. By default we enable most features.
2842 netdev
->hw_features
= NETIF_F_HIGHDMA
;
2843 if (nn
->cap
& NFP_NET_CFG_CTRL_RXCSUM
) {
2844 netdev
->hw_features
|= NETIF_F_RXCSUM
;
2845 nn
->ctrl
|= NFP_NET_CFG_CTRL_RXCSUM
;
2847 if (nn
->cap
& NFP_NET_CFG_CTRL_TXCSUM
) {
2848 netdev
->hw_features
|= NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
;
2849 nn
->ctrl
|= NFP_NET_CFG_CTRL_TXCSUM
;
2851 if (nn
->cap
& NFP_NET_CFG_CTRL_GATHER
) {
2852 netdev
->hw_features
|= NETIF_F_SG
;
2853 nn
->ctrl
|= NFP_NET_CFG_CTRL_GATHER
;
2855 if ((nn
->cap
& NFP_NET_CFG_CTRL_LSO
) && nn
->fw_ver
.major
> 2) {
2856 netdev
->hw_features
|= NETIF_F_TSO
| NETIF_F_TSO6
;
2857 nn
->ctrl
|= NFP_NET_CFG_CTRL_LSO
;
2859 if (nn
->cap
& NFP_NET_CFG_CTRL_RSS
) {
2860 netdev
->hw_features
|= NETIF_F_RXHASH
;
2861 nfp_net_rss_init(nn
);
2862 nn
->ctrl
|= NFP_NET_CFG_CTRL_RSS
;
2864 if (nn
->cap
& NFP_NET_CFG_CTRL_VXLAN
&&
2865 nn
->cap
& NFP_NET_CFG_CTRL_NVGRE
) {
2866 if (nn
->cap
& NFP_NET_CFG_CTRL_LSO
)
2867 netdev
->hw_features
|= NETIF_F_GSO_GRE
|
2868 NETIF_F_GSO_UDP_TUNNEL
;
2869 nn
->ctrl
|= NFP_NET_CFG_CTRL_VXLAN
| NFP_NET_CFG_CTRL_NVGRE
;
2871 netdev
->hw_enc_features
= netdev
->hw_features
;
2874 netdev
->vlan_features
= netdev
->hw_features
;
2876 if (nn
->cap
& NFP_NET_CFG_CTRL_RXVLAN
) {
2877 netdev
->hw_features
|= NETIF_F_HW_VLAN_CTAG_RX
;
2878 nn
->ctrl
|= NFP_NET_CFG_CTRL_RXVLAN
;
2880 if (nn
->cap
& NFP_NET_CFG_CTRL_TXVLAN
) {
2881 netdev
->hw_features
|= NETIF_F_HW_VLAN_CTAG_TX
;
2882 nn
->ctrl
|= NFP_NET_CFG_CTRL_TXVLAN
;
2885 netdev
->features
= netdev
->hw_features
;
2887 if (nfp_net_ebpf_capable(nn
))
2888 netdev
->hw_features
|= NETIF_F_HW_TC
;
2890 /* Advertise but disable TSO by default. */
2891 netdev
->features
&= ~(NETIF_F_TSO
| NETIF_F_TSO6
);
2893 /* Allow L2 Broadcast and Multicast through by default, if supported */
2894 if (nn
->cap
& NFP_NET_CFG_CTRL_L2BC
)
2895 nn
->ctrl
|= NFP_NET_CFG_CTRL_L2BC
;
2896 if (nn
->cap
& NFP_NET_CFG_CTRL_L2MC
)
2897 nn
->ctrl
|= NFP_NET_CFG_CTRL_L2MC
;
2899 /* Allow IRQ moderation, if supported */
2900 if (nn
->cap
& NFP_NET_CFG_CTRL_IRQMOD
) {
2901 nfp_net_irqmod_init(nn
);
2902 nn
->ctrl
|= NFP_NET_CFG_CTRL_IRQMOD
;
2905 /* On NFP-3200 enable MSI-X auto-masking, if supported and the
2906 * interrupts are not shared.
2908 if (nn
->is_nfp3200
&& nn
->cap
& NFP_NET_CFG_CTRL_MSIXAUTO
)
2909 nn
->ctrl
|= NFP_NET_CFG_CTRL_MSIXAUTO
;
2911 /* On NFP4000/NFP6000, determine RX packet/metadata boundary offset */
2912 if (nn
->fw_ver
.major
>= 2)
2913 nn
->rx_offset
= nn_readl(nn
, NFP_NET_CFG_RX_OFFSET
);
2915 nn
->rx_offset
= NFP_NET_RX_OFFSET
;
2917 /* Stash the re-configuration queue away. First odd queue in TX Bar */
2918 nn
->qcp_cfg
= nn
->tx_bar
+ NFP_QCP_QUEUE_ADDR_SZ
;
2920 /* Make sure the FW knows the netdev is supposed to be disabled here */
2921 nn_writel(nn
, NFP_NET_CFG_CTRL
, 0);
2922 nn_writeq(nn
, NFP_NET_CFG_TXRS_ENABLE
, 0);
2923 nn_writeq(nn
, NFP_NET_CFG_RXRS_ENABLE
, 0);
2924 err
= nfp_net_reconfig(nn
, NFP_NET_CFG_UPDATE_RING
|
2925 NFP_NET_CFG_UPDATE_GEN
);
2929 /* Finalise the netdev setup */
2930 ether_setup(netdev
);
2931 netdev
->netdev_ops
= &nfp_net_netdev_ops
;
2932 netdev
->watchdog_timeo
= msecs_to_jiffies(5 * 1000);
2933 netif_carrier_off(netdev
);
2935 nfp_net_set_ethtool_ops(netdev
);
2936 nfp_net_irqs_assign(netdev
);
2938 return register_netdev(netdev
);
2942 * nfp_net_netdev_clean() - Undo what nfp_net_netdev_init() did.
2943 * @netdev: netdev structure
2945 void nfp_net_netdev_clean(struct net_device
*netdev
)
2947 unregister_netdev(netdev
);