]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/net/ethernet/netronome/nfp/nfp_net_common.c
Merge tag 'xfs-reflink-for-linus-4.9-rc1' of git://git.kernel.org/pub/scm/linux/kerne...
[mirror_ubuntu-artful-kernel.git] / drivers / net / ethernet / netronome / nfp / nfp_net_common.c
1 /*
2 * Copyright (C) 2015 Netronome Systems, Inc.
3 *
4 * This software is dual licensed under the GNU General License Version 2,
5 * June 1991 as shown in the file COPYING in the top-level directory of this
6 * source tree or the BSD 2-Clause License provided below. You have the
7 * option to license this software under the complete terms of either license.
8 *
9 * The BSD 2-Clause License:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * 1. Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * 2. Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34 /*
35 * nfp_net_common.c
36 * Netronome network device driver: Common functions between PF and VF
37 * Authors: Jakub Kicinski <jakub.kicinski@netronome.com>
38 * Jason McMullan <jason.mcmullan@netronome.com>
39 * Rolf Neugebauer <rolf.neugebauer@netronome.com>
40 * Brad Petrus <brad.petrus@netronome.com>
41 * Chris Telfer <chris.telfer@netronome.com>
42 */
43
44 #include <linux/module.h>
45 #include <linux/kernel.h>
46 #include <linux/init.h>
47 #include <linux/fs.h>
48 #include <linux/netdevice.h>
49 #include <linux/etherdevice.h>
50 #include <linux/interrupt.h>
51 #include <linux/ip.h>
52 #include <linux/ipv6.h>
53 #include <linux/pci.h>
54 #include <linux/pci_regs.h>
55 #include <linux/msi.h>
56 #include <linux/ethtool.h>
57 #include <linux/log2.h>
58 #include <linux/if_vlan.h>
59 #include <linux/random.h>
60
61 #include <linux/ktime.h>
62
63 #include <net/pkt_cls.h>
64 #include <net/vxlan.h>
65
66 #include "nfp_net_ctrl.h"
67 #include "nfp_net.h"
68
69 /**
70 * nfp_net_get_fw_version() - Read and parse the FW version
71 * @fw_ver: Output fw_version structure to read to
72 * @ctrl_bar: Mapped address of the control BAR
73 */
74 void nfp_net_get_fw_version(struct nfp_net_fw_version *fw_ver,
75 void __iomem *ctrl_bar)
76 {
77 u32 reg;
78
79 reg = readl(ctrl_bar + NFP_NET_CFG_VERSION);
80 put_unaligned_le32(reg, fw_ver);
81 }
82
83 /* Firmware reconfig
84 *
85 * Firmware reconfig may take a while so we have two versions of it -
86 * synchronous and asynchronous (posted). All synchronous callers are holding
87 * RTNL so we don't have to worry about serializing them.
88 */
89 static void nfp_net_reconfig_start(struct nfp_net *nn, u32 update)
90 {
91 nn_writel(nn, NFP_NET_CFG_UPDATE, update);
92 /* ensure update is written before pinging HW */
93 nn_pci_flush(nn);
94 nfp_qcp_wr_ptr_add(nn->qcp_cfg, 1);
95 }
96
97 /* Pass 0 as update to run posted reconfigs. */
98 static void nfp_net_reconfig_start_async(struct nfp_net *nn, u32 update)
99 {
100 update |= nn->reconfig_posted;
101 nn->reconfig_posted = 0;
102
103 nfp_net_reconfig_start(nn, update);
104
105 nn->reconfig_timer_active = true;
106 mod_timer(&nn->reconfig_timer, jiffies + NFP_NET_POLL_TIMEOUT * HZ);
107 }
108
109 static bool nfp_net_reconfig_check_done(struct nfp_net *nn, bool last_check)
110 {
111 u32 reg;
112
113 reg = nn_readl(nn, NFP_NET_CFG_UPDATE);
114 if (reg == 0)
115 return true;
116 if (reg & NFP_NET_CFG_UPDATE_ERR) {
117 nn_err(nn, "Reconfig error: 0x%08x\n", reg);
118 return true;
119 } else if (last_check) {
120 nn_err(nn, "Reconfig timeout: 0x%08x\n", reg);
121 return true;
122 }
123
124 return false;
125 }
126
127 static int nfp_net_reconfig_wait(struct nfp_net *nn, unsigned long deadline)
128 {
129 bool timed_out = false;
130
131 /* Poll update field, waiting for NFP to ack the config */
132 while (!nfp_net_reconfig_check_done(nn, timed_out)) {
133 msleep(1);
134 timed_out = time_is_before_eq_jiffies(deadline);
135 }
136
137 if (nn_readl(nn, NFP_NET_CFG_UPDATE) & NFP_NET_CFG_UPDATE_ERR)
138 return -EIO;
139
140 return timed_out ? -EIO : 0;
141 }
142
143 static void nfp_net_reconfig_timer(unsigned long data)
144 {
145 struct nfp_net *nn = (void *)data;
146
147 spin_lock_bh(&nn->reconfig_lock);
148
149 nn->reconfig_timer_active = false;
150
151 /* If sync caller is present it will take over from us */
152 if (nn->reconfig_sync_present)
153 goto done;
154
155 /* Read reconfig status and report errors */
156 nfp_net_reconfig_check_done(nn, true);
157
158 if (nn->reconfig_posted)
159 nfp_net_reconfig_start_async(nn, 0);
160 done:
161 spin_unlock_bh(&nn->reconfig_lock);
162 }
163
164 /**
165 * nfp_net_reconfig_post() - Post async reconfig request
166 * @nn: NFP Net device to reconfigure
167 * @update: The value for the update field in the BAR config
168 *
169 * Record FW reconfiguration request. Reconfiguration will be kicked off
170 * whenever reconfiguration machinery is idle. Multiple requests can be
171 * merged together!
172 */
173 static void nfp_net_reconfig_post(struct nfp_net *nn, u32 update)
174 {
175 spin_lock_bh(&nn->reconfig_lock);
176
177 /* Sync caller will kick off async reconf when it's done, just post */
178 if (nn->reconfig_sync_present) {
179 nn->reconfig_posted |= update;
180 goto done;
181 }
182
183 /* Opportunistically check if the previous command is done */
184 if (!nn->reconfig_timer_active ||
185 nfp_net_reconfig_check_done(nn, false))
186 nfp_net_reconfig_start_async(nn, update);
187 else
188 nn->reconfig_posted |= update;
189 done:
190 spin_unlock_bh(&nn->reconfig_lock);
191 }
192
193 /**
194 * nfp_net_reconfig() - Reconfigure the firmware
195 * @nn: NFP Net device to reconfigure
196 * @update: The value for the update field in the BAR config
197 *
198 * Write the update word to the BAR and ping the reconfig queue. The
199 * poll until the firmware has acknowledged the update by zeroing the
200 * update word.
201 *
202 * Return: Negative errno on error, 0 on success
203 */
204 int nfp_net_reconfig(struct nfp_net *nn, u32 update)
205 {
206 bool cancelled_timer = false;
207 u32 pre_posted_requests;
208 int ret;
209
210 spin_lock_bh(&nn->reconfig_lock);
211
212 nn->reconfig_sync_present = true;
213
214 if (nn->reconfig_timer_active) {
215 del_timer(&nn->reconfig_timer);
216 nn->reconfig_timer_active = false;
217 cancelled_timer = true;
218 }
219 pre_posted_requests = nn->reconfig_posted;
220 nn->reconfig_posted = 0;
221
222 spin_unlock_bh(&nn->reconfig_lock);
223
224 if (cancelled_timer)
225 nfp_net_reconfig_wait(nn, nn->reconfig_timer.expires);
226
227 /* Run the posted reconfigs which were issued before we started */
228 if (pre_posted_requests) {
229 nfp_net_reconfig_start(nn, pre_posted_requests);
230 nfp_net_reconfig_wait(nn, jiffies + HZ * NFP_NET_POLL_TIMEOUT);
231 }
232
233 nfp_net_reconfig_start(nn, update);
234 ret = nfp_net_reconfig_wait(nn, jiffies + HZ * NFP_NET_POLL_TIMEOUT);
235
236 spin_lock_bh(&nn->reconfig_lock);
237
238 if (nn->reconfig_posted)
239 nfp_net_reconfig_start_async(nn, 0);
240
241 nn->reconfig_sync_present = false;
242
243 spin_unlock_bh(&nn->reconfig_lock);
244
245 return ret;
246 }
247
248 /* Interrupt configuration and handling
249 */
250
251 /**
252 * nfp_net_irq_unmask_msix() - Unmask MSI-X after automasking
253 * @nn: NFP Network structure
254 * @entry_nr: MSI-X table entry
255 *
256 * Clear the MSI-X table mask bit for the given entry bypassing Linux irq
257 * handling subsystem. Use *only* to reenable automasked vectors.
258 */
259 static void nfp_net_irq_unmask_msix(struct nfp_net *nn, unsigned int entry_nr)
260 {
261 struct list_head *msi_head = &nn->pdev->dev.msi_list;
262 struct msi_desc *entry;
263 u32 off;
264
265 /* All MSI-Xs have the same mask_base */
266 entry = list_first_entry(msi_head, struct msi_desc, list);
267
268 off = (PCI_MSIX_ENTRY_SIZE * entry_nr) +
269 PCI_MSIX_ENTRY_VECTOR_CTRL;
270 writel(0, entry->mask_base + off);
271 readl(entry->mask_base);
272 }
273
274 /**
275 * nfp_net_irq_unmask() - Unmask automasked interrupt
276 * @nn: NFP Network structure
277 * @entry_nr: MSI-X table entry
278 *
279 * If MSI-X auto-masking is enabled clear the mask bit, otherwise
280 * clear the ICR for the entry.
281 */
282 static void nfp_net_irq_unmask(struct nfp_net *nn, unsigned int entry_nr)
283 {
284 if (nn->ctrl & NFP_NET_CFG_CTRL_MSIXAUTO) {
285 nfp_net_irq_unmask_msix(nn, entry_nr);
286 return;
287 }
288
289 nn_writeb(nn, NFP_NET_CFG_ICR(entry_nr), NFP_NET_CFG_ICR_UNMASKED);
290 nn_pci_flush(nn);
291 }
292
293 /**
294 * nfp_net_msix_alloc() - Try to allocate MSI-X irqs
295 * @nn: NFP Network structure
296 * @nr_vecs: Number of MSI-X vectors to allocate
297 *
298 * For MSI-X we want at least NFP_NET_NON_Q_VECTORS + 1 vectors.
299 *
300 * Return: Number of MSI-X vectors obtained or 0 on error.
301 */
302 static int nfp_net_msix_alloc(struct nfp_net *nn, int nr_vecs)
303 {
304 struct pci_dev *pdev = nn->pdev;
305 int nvecs;
306 int i;
307
308 for (i = 0; i < nr_vecs; i++)
309 nn->irq_entries[i].entry = i;
310
311 nvecs = pci_enable_msix_range(pdev, nn->irq_entries,
312 NFP_NET_NON_Q_VECTORS + 1, nr_vecs);
313 if (nvecs < 0) {
314 nn_warn(nn, "Failed to enable MSI-X. Wanted %d-%d (err=%d)\n",
315 NFP_NET_NON_Q_VECTORS + 1, nr_vecs, nvecs);
316 return 0;
317 }
318
319 return nvecs;
320 }
321
322 /**
323 * nfp_net_irqs_wanted() - Work out how many interrupt vectors we want
324 * @nn: NFP Network structure
325 *
326 * We want a vector per CPU (or ring), whatever is smaller plus
327 * NFP_NET_NON_Q_VECTORS for LSC etc.
328 *
329 * Return: Number of interrupts wanted
330 */
331 static int nfp_net_irqs_wanted(struct nfp_net *nn)
332 {
333 int ncpus;
334 int vecs;
335
336 ncpus = num_online_cpus();
337
338 vecs = max_t(int, nn->num_tx_rings, nn->num_rx_rings);
339 vecs = min_t(int, vecs, ncpus);
340
341 return vecs + NFP_NET_NON_Q_VECTORS;
342 }
343
344 /**
345 * nfp_net_irqs_alloc() - allocates MSI-X irqs
346 * @nn: NFP Network structure
347 *
348 * Return: Number of irqs obtained or 0 on error.
349 */
350 int nfp_net_irqs_alloc(struct nfp_net *nn)
351 {
352 int wanted_irqs;
353
354 wanted_irqs = nfp_net_irqs_wanted(nn);
355
356 nn->num_irqs = nfp_net_msix_alloc(nn, wanted_irqs);
357 if (nn->num_irqs == 0) {
358 nn_err(nn, "Failed to allocate MSI-X IRQs\n");
359 return 0;
360 }
361
362 nn->num_r_vecs = nn->num_irqs - NFP_NET_NON_Q_VECTORS;
363
364 if (nn->num_irqs < wanted_irqs)
365 nn_warn(nn, "Unable to allocate %d vectors. Got %d instead\n",
366 wanted_irqs, nn->num_irqs);
367
368 return nn->num_irqs;
369 }
370
371 /**
372 * nfp_net_irqs_disable() - Disable interrupts
373 * @nn: NFP Network structure
374 *
375 * Undoes what @nfp_net_irqs_alloc() does.
376 */
377 void nfp_net_irqs_disable(struct nfp_net *nn)
378 {
379 pci_disable_msix(nn->pdev);
380 }
381
382 /**
383 * nfp_net_irq_rxtx() - Interrupt service routine for RX/TX rings.
384 * @irq: Interrupt
385 * @data: Opaque data structure
386 *
387 * Return: Indicate if the interrupt has been handled.
388 */
389 static irqreturn_t nfp_net_irq_rxtx(int irq, void *data)
390 {
391 struct nfp_net_r_vector *r_vec = data;
392
393 napi_schedule_irqoff(&r_vec->napi);
394
395 /* The FW auto-masks any interrupt, either via the MASK bit in
396 * the MSI-X table or via the per entry ICR field. So there
397 * is no need to disable interrupts here.
398 */
399 return IRQ_HANDLED;
400 }
401
402 /**
403 * nfp_net_read_link_status() - Reread link status from control BAR
404 * @nn: NFP Network structure
405 */
406 static void nfp_net_read_link_status(struct nfp_net *nn)
407 {
408 unsigned long flags;
409 bool link_up;
410 u32 sts;
411
412 spin_lock_irqsave(&nn->link_status_lock, flags);
413
414 sts = nn_readl(nn, NFP_NET_CFG_STS);
415 link_up = !!(sts & NFP_NET_CFG_STS_LINK);
416
417 if (nn->link_up == link_up)
418 goto out;
419
420 nn->link_up = link_up;
421
422 if (nn->link_up) {
423 netif_carrier_on(nn->netdev);
424 netdev_info(nn->netdev, "NIC Link is Up\n");
425 } else {
426 netif_carrier_off(nn->netdev);
427 netdev_info(nn->netdev, "NIC Link is Down\n");
428 }
429 out:
430 spin_unlock_irqrestore(&nn->link_status_lock, flags);
431 }
432
433 /**
434 * nfp_net_irq_lsc() - Interrupt service routine for link state changes
435 * @irq: Interrupt
436 * @data: Opaque data structure
437 *
438 * Return: Indicate if the interrupt has been handled.
439 */
440 static irqreturn_t nfp_net_irq_lsc(int irq, void *data)
441 {
442 struct nfp_net *nn = data;
443
444 nfp_net_read_link_status(nn);
445
446 nfp_net_irq_unmask(nn, NFP_NET_IRQ_LSC_IDX);
447
448 return IRQ_HANDLED;
449 }
450
451 /**
452 * nfp_net_irq_exn() - Interrupt service routine for exceptions
453 * @irq: Interrupt
454 * @data: Opaque data structure
455 *
456 * Return: Indicate if the interrupt has been handled.
457 */
458 static irqreturn_t nfp_net_irq_exn(int irq, void *data)
459 {
460 struct nfp_net *nn = data;
461
462 nn_err(nn, "%s: UNIMPLEMENTED.\n", __func__);
463 /* XXX TO BE IMPLEMENTED */
464 return IRQ_HANDLED;
465 }
466
467 /**
468 * nfp_net_tx_ring_init() - Fill in the boilerplate for a TX ring
469 * @tx_ring: TX ring structure
470 * @r_vec: IRQ vector servicing this ring
471 * @idx: Ring index
472 */
473 static void
474 nfp_net_tx_ring_init(struct nfp_net_tx_ring *tx_ring,
475 struct nfp_net_r_vector *r_vec, unsigned int idx)
476 {
477 struct nfp_net *nn = r_vec->nfp_net;
478
479 tx_ring->idx = idx;
480 tx_ring->r_vec = r_vec;
481
482 tx_ring->qcidx = tx_ring->idx * nn->stride_tx;
483 tx_ring->qcp_q = nn->tx_bar + NFP_QCP_QUEUE_OFF(tx_ring->qcidx);
484 }
485
486 /**
487 * nfp_net_rx_ring_init() - Fill in the boilerplate for a RX ring
488 * @rx_ring: RX ring structure
489 * @r_vec: IRQ vector servicing this ring
490 * @idx: Ring index
491 */
492 static void
493 nfp_net_rx_ring_init(struct nfp_net_rx_ring *rx_ring,
494 struct nfp_net_r_vector *r_vec, unsigned int idx)
495 {
496 struct nfp_net *nn = r_vec->nfp_net;
497
498 rx_ring->idx = idx;
499 rx_ring->r_vec = r_vec;
500
501 rx_ring->fl_qcidx = rx_ring->idx * nn->stride_rx;
502 rx_ring->rx_qcidx = rx_ring->fl_qcidx + (nn->stride_rx - 1);
503
504 rx_ring->qcp_fl = nn->rx_bar + NFP_QCP_QUEUE_OFF(rx_ring->fl_qcidx);
505 rx_ring->qcp_rx = nn->rx_bar + NFP_QCP_QUEUE_OFF(rx_ring->rx_qcidx);
506 }
507
508 /**
509 * nfp_net_irqs_assign() - Assign IRQs and setup rvecs.
510 * @netdev: netdev structure
511 */
512 static void nfp_net_irqs_assign(struct net_device *netdev)
513 {
514 struct nfp_net *nn = netdev_priv(netdev);
515 struct nfp_net_r_vector *r_vec;
516 int r;
517
518 /* Assumes nn->num_tx_rings == nn->num_rx_rings */
519 if (nn->num_tx_rings > nn->num_r_vecs) {
520 nn_warn(nn, "More rings (%d) than vectors (%d).\n",
521 nn->num_tx_rings, nn->num_r_vecs);
522 nn->num_tx_rings = nn->num_r_vecs;
523 nn->num_rx_rings = nn->num_r_vecs;
524 }
525
526 nn->lsc_handler = nfp_net_irq_lsc;
527 nn->exn_handler = nfp_net_irq_exn;
528
529 for (r = 0; r < nn->num_r_vecs; r++) {
530 r_vec = &nn->r_vecs[r];
531 r_vec->nfp_net = nn;
532 r_vec->handler = nfp_net_irq_rxtx;
533 r_vec->irq_idx = NFP_NET_NON_Q_VECTORS + r;
534
535 cpumask_set_cpu(r, &r_vec->affinity_mask);
536 }
537 }
538
539 /**
540 * nfp_net_aux_irq_request() - Request an auxiliary interrupt (LSC or EXN)
541 * @nn: NFP Network structure
542 * @ctrl_offset: Control BAR offset where IRQ configuration should be written
543 * @format: printf-style format to construct the interrupt name
544 * @name: Pointer to allocated space for interrupt name
545 * @name_sz: Size of space for interrupt name
546 * @vector_idx: Index of MSI-X vector used for this interrupt
547 * @handler: IRQ handler to register for this interrupt
548 */
549 static int
550 nfp_net_aux_irq_request(struct nfp_net *nn, u32 ctrl_offset,
551 const char *format, char *name, size_t name_sz,
552 unsigned int vector_idx, irq_handler_t handler)
553 {
554 struct msix_entry *entry;
555 int err;
556
557 entry = &nn->irq_entries[vector_idx];
558
559 snprintf(name, name_sz, format, netdev_name(nn->netdev));
560 err = request_irq(entry->vector, handler, 0, name, nn);
561 if (err) {
562 nn_err(nn, "Failed to request IRQ %d (err=%d).\n",
563 entry->vector, err);
564 return err;
565 }
566 nn_writeb(nn, ctrl_offset, vector_idx);
567
568 return 0;
569 }
570
571 /**
572 * nfp_net_aux_irq_free() - Free an auxiliary interrupt (LSC or EXN)
573 * @nn: NFP Network structure
574 * @ctrl_offset: Control BAR offset where IRQ configuration should be written
575 * @vector_idx: Index of MSI-X vector used for this interrupt
576 */
577 static void nfp_net_aux_irq_free(struct nfp_net *nn, u32 ctrl_offset,
578 unsigned int vector_idx)
579 {
580 nn_writeb(nn, ctrl_offset, 0xff);
581 free_irq(nn->irq_entries[vector_idx].vector, nn);
582 }
583
584 /* Transmit
585 *
586 * One queue controller peripheral queue is used for transmit. The
587 * driver en-queues packets for transmit by advancing the write
588 * pointer. The device indicates that packets have transmitted by
589 * advancing the read pointer. The driver maintains a local copy of
590 * the read and write pointer in @struct nfp_net_tx_ring. The driver
591 * keeps @wr_p in sync with the queue controller write pointer and can
592 * determine how many packets have been transmitted by comparing its
593 * copy of the read pointer @rd_p with the read pointer maintained by
594 * the queue controller peripheral.
595 */
596
597 /**
598 * nfp_net_tx_full() - Check if the TX ring is full
599 * @tx_ring: TX ring to check
600 * @dcnt: Number of descriptors that need to be enqueued (must be >= 1)
601 *
602 * This function checks, based on the *host copy* of read/write
603 * pointer if a given TX ring is full. The real TX queue may have
604 * some newly made available slots.
605 *
606 * Return: True if the ring is full.
607 */
608 static inline int nfp_net_tx_full(struct nfp_net_tx_ring *tx_ring, int dcnt)
609 {
610 return (tx_ring->wr_p - tx_ring->rd_p) >= (tx_ring->cnt - dcnt);
611 }
612
613 /* Wrappers for deciding when to stop and restart TX queues */
614 static int nfp_net_tx_ring_should_wake(struct nfp_net_tx_ring *tx_ring)
615 {
616 return !nfp_net_tx_full(tx_ring, MAX_SKB_FRAGS * 4);
617 }
618
619 static int nfp_net_tx_ring_should_stop(struct nfp_net_tx_ring *tx_ring)
620 {
621 return nfp_net_tx_full(tx_ring, MAX_SKB_FRAGS + 1);
622 }
623
624 /**
625 * nfp_net_tx_ring_stop() - stop tx ring
626 * @nd_q: netdev queue
627 * @tx_ring: driver tx queue structure
628 *
629 * Safely stop TX ring. Remember that while we are running .start_xmit()
630 * someone else may be cleaning the TX ring completions so we need to be
631 * extra careful here.
632 */
633 static void nfp_net_tx_ring_stop(struct netdev_queue *nd_q,
634 struct nfp_net_tx_ring *tx_ring)
635 {
636 netif_tx_stop_queue(nd_q);
637
638 /* We can race with the TX completion out of NAPI so recheck */
639 smp_mb();
640 if (unlikely(nfp_net_tx_ring_should_wake(tx_ring)))
641 netif_tx_start_queue(nd_q);
642 }
643
644 /**
645 * nfp_net_tx_tso() - Set up Tx descriptor for LSO
646 * @nn: NFP Net device
647 * @r_vec: per-ring structure
648 * @txbuf: Pointer to driver soft TX descriptor
649 * @txd: Pointer to HW TX descriptor
650 * @skb: Pointer to SKB
651 *
652 * Set up Tx descriptor for LSO, do nothing for non-LSO skbs.
653 * Return error on packet header greater than maximum supported LSO header size.
654 */
655 static void nfp_net_tx_tso(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
656 struct nfp_net_tx_buf *txbuf,
657 struct nfp_net_tx_desc *txd, struct sk_buff *skb)
658 {
659 u32 hdrlen;
660 u16 mss;
661
662 if (!skb_is_gso(skb))
663 return;
664
665 if (!skb->encapsulation)
666 hdrlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
667 else
668 hdrlen = skb_inner_transport_header(skb) - skb->data +
669 inner_tcp_hdrlen(skb);
670
671 txbuf->pkt_cnt = skb_shinfo(skb)->gso_segs;
672 txbuf->real_len += hdrlen * (txbuf->pkt_cnt - 1);
673
674 mss = skb_shinfo(skb)->gso_size & PCIE_DESC_TX_MSS_MASK;
675 txd->l4_offset = hdrlen;
676 txd->mss = cpu_to_le16(mss);
677 txd->flags |= PCIE_DESC_TX_LSO;
678
679 u64_stats_update_begin(&r_vec->tx_sync);
680 r_vec->tx_lso++;
681 u64_stats_update_end(&r_vec->tx_sync);
682 }
683
684 /**
685 * nfp_net_tx_csum() - Set TX CSUM offload flags in TX descriptor
686 * @nn: NFP Net device
687 * @r_vec: per-ring structure
688 * @txbuf: Pointer to driver soft TX descriptor
689 * @txd: Pointer to TX descriptor
690 * @skb: Pointer to SKB
691 *
692 * This function sets the TX checksum flags in the TX descriptor based
693 * on the configuration and the protocol of the packet to be transmitted.
694 */
695 static void nfp_net_tx_csum(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
696 struct nfp_net_tx_buf *txbuf,
697 struct nfp_net_tx_desc *txd, struct sk_buff *skb)
698 {
699 struct ipv6hdr *ipv6h;
700 struct iphdr *iph;
701 u8 l4_hdr;
702
703 if (!(nn->ctrl & NFP_NET_CFG_CTRL_TXCSUM))
704 return;
705
706 if (skb->ip_summed != CHECKSUM_PARTIAL)
707 return;
708
709 txd->flags |= PCIE_DESC_TX_CSUM;
710 if (skb->encapsulation)
711 txd->flags |= PCIE_DESC_TX_ENCAP;
712
713 iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
714 ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb) : ipv6_hdr(skb);
715
716 if (iph->version == 4) {
717 txd->flags |= PCIE_DESC_TX_IP4_CSUM;
718 l4_hdr = iph->protocol;
719 } else if (ipv6h->version == 6) {
720 l4_hdr = ipv6h->nexthdr;
721 } else {
722 nn_warn_ratelimit(nn, "partial checksum but ipv=%x!\n",
723 iph->version);
724 return;
725 }
726
727 switch (l4_hdr) {
728 case IPPROTO_TCP:
729 txd->flags |= PCIE_DESC_TX_TCP_CSUM;
730 break;
731 case IPPROTO_UDP:
732 txd->flags |= PCIE_DESC_TX_UDP_CSUM;
733 break;
734 default:
735 nn_warn_ratelimit(nn, "partial checksum but l4 proto=%x!\n",
736 l4_hdr);
737 return;
738 }
739
740 u64_stats_update_begin(&r_vec->tx_sync);
741 if (skb->encapsulation)
742 r_vec->hw_csum_tx_inner += txbuf->pkt_cnt;
743 else
744 r_vec->hw_csum_tx += txbuf->pkt_cnt;
745 u64_stats_update_end(&r_vec->tx_sync);
746 }
747
748 /**
749 * nfp_net_tx() - Main transmit entry point
750 * @skb: SKB to transmit
751 * @netdev: netdev structure
752 *
753 * Return: NETDEV_TX_OK on success.
754 */
755 static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
756 {
757 struct nfp_net *nn = netdev_priv(netdev);
758 const struct skb_frag_struct *frag;
759 struct nfp_net_r_vector *r_vec;
760 struct nfp_net_tx_desc *txd, txdg;
761 struct nfp_net_tx_buf *txbuf;
762 struct nfp_net_tx_ring *tx_ring;
763 struct netdev_queue *nd_q;
764 dma_addr_t dma_addr;
765 unsigned int fsize;
766 int f, nr_frags;
767 int wr_idx;
768 u16 qidx;
769
770 qidx = skb_get_queue_mapping(skb);
771 tx_ring = &nn->tx_rings[qidx];
772 r_vec = tx_ring->r_vec;
773 nd_q = netdev_get_tx_queue(nn->netdev, qidx);
774
775 nr_frags = skb_shinfo(skb)->nr_frags;
776
777 if (unlikely(nfp_net_tx_full(tx_ring, nr_frags + 1))) {
778 nn_warn_ratelimit(nn, "TX ring %d busy. wrp=%u rdp=%u\n",
779 qidx, tx_ring->wr_p, tx_ring->rd_p);
780 netif_tx_stop_queue(nd_q);
781 u64_stats_update_begin(&r_vec->tx_sync);
782 r_vec->tx_busy++;
783 u64_stats_update_end(&r_vec->tx_sync);
784 return NETDEV_TX_BUSY;
785 }
786
787 /* Start with the head skbuf */
788 dma_addr = dma_map_single(&nn->pdev->dev, skb->data, skb_headlen(skb),
789 DMA_TO_DEVICE);
790 if (dma_mapping_error(&nn->pdev->dev, dma_addr))
791 goto err_free;
792
793 wr_idx = tx_ring->wr_p % tx_ring->cnt;
794
795 /* Stash the soft descriptor of the head then initialize it */
796 txbuf = &tx_ring->txbufs[wr_idx];
797 txbuf->skb = skb;
798 txbuf->dma_addr = dma_addr;
799 txbuf->fidx = -1;
800 txbuf->pkt_cnt = 1;
801 txbuf->real_len = skb->len;
802
803 /* Build TX descriptor */
804 txd = &tx_ring->txds[wr_idx];
805 txd->offset_eop = (nr_frags == 0) ? PCIE_DESC_TX_EOP : 0;
806 txd->dma_len = cpu_to_le16(skb_headlen(skb));
807 nfp_desc_set_dma_addr(txd, dma_addr);
808 txd->data_len = cpu_to_le16(skb->len);
809
810 txd->flags = 0;
811 txd->mss = 0;
812 txd->l4_offset = 0;
813
814 nfp_net_tx_tso(nn, r_vec, txbuf, txd, skb);
815
816 nfp_net_tx_csum(nn, r_vec, txbuf, txd, skb);
817
818 if (skb_vlan_tag_present(skb) && nn->ctrl & NFP_NET_CFG_CTRL_TXVLAN) {
819 txd->flags |= PCIE_DESC_TX_VLAN;
820 txd->vlan = cpu_to_le16(skb_vlan_tag_get(skb));
821 }
822
823 /* Gather DMA */
824 if (nr_frags > 0) {
825 /* all descs must match except for in addr, length and eop */
826 txdg = *txd;
827
828 for (f = 0; f < nr_frags; f++) {
829 frag = &skb_shinfo(skb)->frags[f];
830 fsize = skb_frag_size(frag);
831
832 dma_addr = skb_frag_dma_map(&nn->pdev->dev, frag, 0,
833 fsize, DMA_TO_DEVICE);
834 if (dma_mapping_error(&nn->pdev->dev, dma_addr))
835 goto err_unmap;
836
837 wr_idx = (wr_idx + 1) % tx_ring->cnt;
838 tx_ring->txbufs[wr_idx].skb = skb;
839 tx_ring->txbufs[wr_idx].dma_addr = dma_addr;
840 tx_ring->txbufs[wr_idx].fidx = f;
841
842 txd = &tx_ring->txds[wr_idx];
843 *txd = txdg;
844 txd->dma_len = cpu_to_le16(fsize);
845 nfp_desc_set_dma_addr(txd, dma_addr);
846 txd->offset_eop =
847 (f == nr_frags - 1) ? PCIE_DESC_TX_EOP : 0;
848 }
849
850 u64_stats_update_begin(&r_vec->tx_sync);
851 r_vec->tx_gather++;
852 u64_stats_update_end(&r_vec->tx_sync);
853 }
854
855 netdev_tx_sent_queue(nd_q, txbuf->real_len);
856
857 tx_ring->wr_p += nr_frags + 1;
858 if (nfp_net_tx_ring_should_stop(tx_ring))
859 nfp_net_tx_ring_stop(nd_q, tx_ring);
860
861 tx_ring->wr_ptr_add += nr_frags + 1;
862 if (!skb->xmit_more || netif_xmit_stopped(nd_q)) {
863 /* force memory write before we let HW know */
864 wmb();
865 nfp_qcp_wr_ptr_add(tx_ring->qcp_q, tx_ring->wr_ptr_add);
866 tx_ring->wr_ptr_add = 0;
867 }
868
869 skb_tx_timestamp(skb);
870
871 return NETDEV_TX_OK;
872
873 err_unmap:
874 --f;
875 while (f >= 0) {
876 frag = &skb_shinfo(skb)->frags[f];
877 dma_unmap_page(&nn->pdev->dev,
878 tx_ring->txbufs[wr_idx].dma_addr,
879 skb_frag_size(frag), DMA_TO_DEVICE);
880 tx_ring->txbufs[wr_idx].skb = NULL;
881 tx_ring->txbufs[wr_idx].dma_addr = 0;
882 tx_ring->txbufs[wr_idx].fidx = -2;
883 wr_idx = wr_idx - 1;
884 if (wr_idx < 0)
885 wr_idx += tx_ring->cnt;
886 }
887 dma_unmap_single(&nn->pdev->dev, tx_ring->txbufs[wr_idx].dma_addr,
888 skb_headlen(skb), DMA_TO_DEVICE);
889 tx_ring->txbufs[wr_idx].skb = NULL;
890 tx_ring->txbufs[wr_idx].dma_addr = 0;
891 tx_ring->txbufs[wr_idx].fidx = -2;
892 err_free:
893 nn_warn_ratelimit(nn, "Failed to map DMA TX buffer\n");
894 u64_stats_update_begin(&r_vec->tx_sync);
895 r_vec->tx_errors++;
896 u64_stats_update_end(&r_vec->tx_sync);
897 dev_kfree_skb_any(skb);
898 return NETDEV_TX_OK;
899 }
900
901 /**
902 * nfp_net_tx_complete() - Handled completed TX packets
903 * @tx_ring: TX ring structure
904 *
905 * Return: Number of completed TX descriptors
906 */
907 static void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring)
908 {
909 struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
910 struct nfp_net *nn = r_vec->nfp_net;
911 const struct skb_frag_struct *frag;
912 struct netdev_queue *nd_q;
913 u32 done_pkts = 0, done_bytes = 0;
914 struct sk_buff *skb;
915 int todo, nr_frags;
916 u32 qcp_rd_p;
917 int fidx;
918 int idx;
919
920 /* Work out how many descriptors have been transmitted */
921 qcp_rd_p = nfp_qcp_rd_ptr_read(tx_ring->qcp_q);
922
923 if (qcp_rd_p == tx_ring->qcp_rd_p)
924 return;
925
926 if (qcp_rd_p > tx_ring->qcp_rd_p)
927 todo = qcp_rd_p - tx_ring->qcp_rd_p;
928 else
929 todo = qcp_rd_p + tx_ring->cnt - tx_ring->qcp_rd_p;
930
931 while (todo--) {
932 idx = tx_ring->rd_p % tx_ring->cnt;
933 tx_ring->rd_p++;
934
935 skb = tx_ring->txbufs[idx].skb;
936 if (!skb)
937 continue;
938
939 nr_frags = skb_shinfo(skb)->nr_frags;
940 fidx = tx_ring->txbufs[idx].fidx;
941
942 if (fidx == -1) {
943 /* unmap head */
944 dma_unmap_single(&nn->pdev->dev,
945 tx_ring->txbufs[idx].dma_addr,
946 skb_headlen(skb), DMA_TO_DEVICE);
947
948 done_pkts += tx_ring->txbufs[idx].pkt_cnt;
949 done_bytes += tx_ring->txbufs[idx].real_len;
950 } else {
951 /* unmap fragment */
952 frag = &skb_shinfo(skb)->frags[fidx];
953 dma_unmap_page(&nn->pdev->dev,
954 tx_ring->txbufs[idx].dma_addr,
955 skb_frag_size(frag), DMA_TO_DEVICE);
956 }
957
958 /* check for last gather fragment */
959 if (fidx == nr_frags - 1)
960 dev_kfree_skb_any(skb);
961
962 tx_ring->txbufs[idx].dma_addr = 0;
963 tx_ring->txbufs[idx].skb = NULL;
964 tx_ring->txbufs[idx].fidx = -2;
965 }
966
967 tx_ring->qcp_rd_p = qcp_rd_p;
968
969 u64_stats_update_begin(&r_vec->tx_sync);
970 r_vec->tx_bytes += done_bytes;
971 r_vec->tx_pkts += done_pkts;
972 u64_stats_update_end(&r_vec->tx_sync);
973
974 nd_q = netdev_get_tx_queue(nn->netdev, tx_ring->idx);
975 netdev_tx_completed_queue(nd_q, done_pkts, done_bytes);
976 if (nfp_net_tx_ring_should_wake(tx_ring)) {
977 /* Make sure TX thread will see updated tx_ring->rd_p */
978 smp_mb();
979
980 if (unlikely(netif_tx_queue_stopped(nd_q)))
981 netif_tx_wake_queue(nd_q);
982 }
983
984 WARN_ONCE(tx_ring->wr_p - tx_ring->rd_p > tx_ring->cnt,
985 "TX ring corruption rd_p=%u wr_p=%u cnt=%u\n",
986 tx_ring->rd_p, tx_ring->wr_p, tx_ring->cnt);
987 }
988
989 /**
990 * nfp_net_tx_ring_reset() - Free any untransmitted buffers and reset pointers
991 * @nn: NFP Net device
992 * @tx_ring: TX ring structure
993 *
994 * Assumes that the device is stopped
995 */
996 static void
997 nfp_net_tx_ring_reset(struct nfp_net *nn, struct nfp_net_tx_ring *tx_ring)
998 {
999 const struct skb_frag_struct *frag;
1000 struct netdev_queue *nd_q;
1001 struct pci_dev *pdev = nn->pdev;
1002
1003 while (tx_ring->rd_p != tx_ring->wr_p) {
1004 int nr_frags, fidx, idx;
1005 struct sk_buff *skb;
1006
1007 idx = tx_ring->rd_p % tx_ring->cnt;
1008 skb = tx_ring->txbufs[idx].skb;
1009 nr_frags = skb_shinfo(skb)->nr_frags;
1010 fidx = tx_ring->txbufs[idx].fidx;
1011
1012 if (fidx == -1) {
1013 /* unmap head */
1014 dma_unmap_single(&pdev->dev,
1015 tx_ring->txbufs[idx].dma_addr,
1016 skb_headlen(skb), DMA_TO_DEVICE);
1017 } else {
1018 /* unmap fragment */
1019 frag = &skb_shinfo(skb)->frags[fidx];
1020 dma_unmap_page(&pdev->dev,
1021 tx_ring->txbufs[idx].dma_addr,
1022 skb_frag_size(frag), DMA_TO_DEVICE);
1023 }
1024
1025 /* check for last gather fragment */
1026 if (fidx == nr_frags - 1)
1027 dev_kfree_skb_any(skb);
1028
1029 tx_ring->txbufs[idx].dma_addr = 0;
1030 tx_ring->txbufs[idx].skb = NULL;
1031 tx_ring->txbufs[idx].fidx = -2;
1032
1033 tx_ring->qcp_rd_p++;
1034 tx_ring->rd_p++;
1035 }
1036
1037 memset(tx_ring->txds, 0, sizeof(*tx_ring->txds) * tx_ring->cnt);
1038 tx_ring->wr_p = 0;
1039 tx_ring->rd_p = 0;
1040 tx_ring->qcp_rd_p = 0;
1041 tx_ring->wr_ptr_add = 0;
1042
1043 nd_q = netdev_get_tx_queue(nn->netdev, tx_ring->idx);
1044 netdev_tx_reset_queue(nd_q);
1045 }
1046
1047 static void nfp_net_tx_timeout(struct net_device *netdev)
1048 {
1049 struct nfp_net *nn = netdev_priv(netdev);
1050 int i;
1051
1052 for (i = 0; i < nn->num_tx_rings; i++) {
1053 if (!netif_tx_queue_stopped(netdev_get_tx_queue(netdev, i)))
1054 continue;
1055 nn_warn(nn, "TX timeout on ring: %d\n", i);
1056 }
1057 nn_warn(nn, "TX watchdog timeout\n");
1058 }
1059
1060 /* Receive processing
1061 */
1062
1063 /**
1064 * nfp_net_rx_space() - return the number of free slots on the RX ring
1065 * @rx_ring: RX ring structure
1066 *
1067 * Make sure we leave at least one slot free.
1068 *
1069 * Return: True if there is space on the RX ring
1070 */
1071 static inline int nfp_net_rx_space(struct nfp_net_rx_ring *rx_ring)
1072 {
1073 return (rx_ring->cnt - 1) - (rx_ring->wr_p - rx_ring->rd_p);
1074 }
1075
1076 /**
1077 * nfp_net_rx_alloc_one() - Allocate and map skb for RX
1078 * @rx_ring: RX ring structure of the skb
1079 * @dma_addr: Pointer to storage for DMA address (output param)
1080 * @fl_bufsz: size of freelist buffers
1081 *
1082 * This function will allcate a new skb, map it for DMA.
1083 *
1084 * Return: allocated skb or NULL on failure.
1085 */
1086 static struct sk_buff *
1087 nfp_net_rx_alloc_one(struct nfp_net_rx_ring *rx_ring, dma_addr_t *dma_addr,
1088 unsigned int fl_bufsz)
1089 {
1090 struct nfp_net *nn = rx_ring->r_vec->nfp_net;
1091 struct sk_buff *skb;
1092
1093 skb = netdev_alloc_skb(nn->netdev, fl_bufsz);
1094 if (!skb) {
1095 nn_warn_ratelimit(nn, "Failed to alloc receive SKB\n");
1096 return NULL;
1097 }
1098
1099 *dma_addr = dma_map_single(&nn->pdev->dev, skb->data,
1100 fl_bufsz, DMA_FROM_DEVICE);
1101 if (dma_mapping_error(&nn->pdev->dev, *dma_addr)) {
1102 dev_kfree_skb_any(skb);
1103 nn_warn_ratelimit(nn, "Failed to map DMA RX buffer\n");
1104 return NULL;
1105 }
1106
1107 return skb;
1108 }
1109
1110 /**
1111 * nfp_net_rx_give_one() - Put mapped skb on the software and hardware rings
1112 * @rx_ring: RX ring structure
1113 * @skb: Skb to put on rings
1114 * @dma_addr: DMA address of skb mapping
1115 */
1116 static void nfp_net_rx_give_one(struct nfp_net_rx_ring *rx_ring,
1117 struct sk_buff *skb, dma_addr_t dma_addr)
1118 {
1119 unsigned int wr_idx;
1120
1121 wr_idx = rx_ring->wr_p % rx_ring->cnt;
1122
1123 /* Stash SKB and DMA address away */
1124 rx_ring->rxbufs[wr_idx].skb = skb;
1125 rx_ring->rxbufs[wr_idx].dma_addr = dma_addr;
1126
1127 /* Fill freelist descriptor */
1128 rx_ring->rxds[wr_idx].fld.reserved = 0;
1129 rx_ring->rxds[wr_idx].fld.meta_len_dd = 0;
1130 nfp_desc_set_dma_addr(&rx_ring->rxds[wr_idx].fld, dma_addr);
1131
1132 rx_ring->wr_p++;
1133 rx_ring->wr_ptr_add++;
1134 if (rx_ring->wr_ptr_add >= NFP_NET_FL_BATCH) {
1135 /* Update write pointer of the freelist queue. Make
1136 * sure all writes are flushed before telling the hardware.
1137 */
1138 wmb();
1139 nfp_qcp_wr_ptr_add(rx_ring->qcp_fl, rx_ring->wr_ptr_add);
1140 rx_ring->wr_ptr_add = 0;
1141 }
1142 }
1143
1144 /**
1145 * nfp_net_rx_ring_reset() - Reflect in SW state of freelist after disable
1146 * @rx_ring: RX ring structure
1147 *
1148 * Warning: Do *not* call if ring buffers were never put on the FW freelist
1149 * (i.e. device was not enabled)!
1150 */
1151 static void nfp_net_rx_ring_reset(struct nfp_net_rx_ring *rx_ring)
1152 {
1153 unsigned int wr_idx, last_idx;
1154
1155 /* Move the empty entry to the end of the list */
1156 wr_idx = rx_ring->wr_p % rx_ring->cnt;
1157 last_idx = rx_ring->cnt - 1;
1158 rx_ring->rxbufs[wr_idx].dma_addr = rx_ring->rxbufs[last_idx].dma_addr;
1159 rx_ring->rxbufs[wr_idx].skb = rx_ring->rxbufs[last_idx].skb;
1160 rx_ring->rxbufs[last_idx].dma_addr = 0;
1161 rx_ring->rxbufs[last_idx].skb = NULL;
1162
1163 memset(rx_ring->rxds, 0, sizeof(*rx_ring->rxds) * rx_ring->cnt);
1164 rx_ring->wr_p = 0;
1165 rx_ring->rd_p = 0;
1166 rx_ring->wr_ptr_add = 0;
1167 }
1168
1169 /**
1170 * nfp_net_rx_ring_bufs_free() - Free any buffers currently on the RX ring
1171 * @nn: NFP Net device
1172 * @rx_ring: RX ring to remove buffers from
1173 *
1174 * Assumes that the device is stopped and buffers are in [0, ring->cnt - 1)
1175 * entries. After device is disabled nfp_net_rx_ring_reset() must be called
1176 * to restore required ring geometry.
1177 */
1178 static void
1179 nfp_net_rx_ring_bufs_free(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring)
1180 {
1181 struct pci_dev *pdev = nn->pdev;
1182 unsigned int i;
1183
1184 for (i = 0; i < rx_ring->cnt - 1; i++) {
1185 /* NULL skb can only happen when initial filling of the ring
1186 * fails to allocate enough buffers and calls here to free
1187 * already allocated ones.
1188 */
1189 if (!rx_ring->rxbufs[i].skb)
1190 continue;
1191
1192 dma_unmap_single(&pdev->dev, rx_ring->rxbufs[i].dma_addr,
1193 rx_ring->bufsz, DMA_FROM_DEVICE);
1194 dev_kfree_skb_any(rx_ring->rxbufs[i].skb);
1195 rx_ring->rxbufs[i].dma_addr = 0;
1196 rx_ring->rxbufs[i].skb = NULL;
1197 }
1198 }
1199
1200 /**
1201 * nfp_net_rx_ring_bufs_alloc() - Fill RX ring with buffers (don't give to FW)
1202 * @nn: NFP Net device
1203 * @rx_ring: RX ring to remove buffers from
1204 */
1205 static int
1206 nfp_net_rx_ring_bufs_alloc(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring)
1207 {
1208 struct nfp_net_rx_buf *rxbufs;
1209 unsigned int i;
1210
1211 rxbufs = rx_ring->rxbufs;
1212
1213 for (i = 0; i < rx_ring->cnt - 1; i++) {
1214 rxbufs[i].skb =
1215 nfp_net_rx_alloc_one(rx_ring, &rxbufs[i].dma_addr,
1216 rx_ring->bufsz);
1217 if (!rxbufs[i].skb) {
1218 nfp_net_rx_ring_bufs_free(nn, rx_ring);
1219 return -ENOMEM;
1220 }
1221 }
1222
1223 return 0;
1224 }
1225
1226 /**
1227 * nfp_net_rx_ring_fill_freelist() - Give buffers from the ring to FW
1228 * @rx_ring: RX ring to fill
1229 */
1230 static void nfp_net_rx_ring_fill_freelist(struct nfp_net_rx_ring *rx_ring)
1231 {
1232 unsigned int i;
1233
1234 for (i = 0; i < rx_ring->cnt - 1; i++)
1235 nfp_net_rx_give_one(rx_ring, rx_ring->rxbufs[i].skb,
1236 rx_ring->rxbufs[i].dma_addr);
1237 }
1238
1239 /**
1240 * nfp_net_rx_csum_has_errors() - group check if rxd has any csum errors
1241 * @flags: RX descriptor flags field in CPU byte order
1242 */
1243 static int nfp_net_rx_csum_has_errors(u16 flags)
1244 {
1245 u16 csum_all_checked, csum_all_ok;
1246
1247 csum_all_checked = flags & __PCIE_DESC_RX_CSUM_ALL;
1248 csum_all_ok = flags & __PCIE_DESC_RX_CSUM_ALL_OK;
1249
1250 return csum_all_checked != (csum_all_ok << PCIE_DESC_RX_CSUM_OK_SHIFT);
1251 }
1252
1253 /**
1254 * nfp_net_rx_csum() - set SKB checksum field based on RX descriptor flags
1255 * @nn: NFP Net device
1256 * @r_vec: per-ring structure
1257 * @rxd: Pointer to RX descriptor
1258 * @skb: Pointer to SKB
1259 */
1260 static void nfp_net_rx_csum(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
1261 struct nfp_net_rx_desc *rxd, struct sk_buff *skb)
1262 {
1263 skb_checksum_none_assert(skb);
1264
1265 if (!(nn->netdev->features & NETIF_F_RXCSUM))
1266 return;
1267
1268 if (nfp_net_rx_csum_has_errors(le16_to_cpu(rxd->rxd.flags))) {
1269 u64_stats_update_begin(&r_vec->rx_sync);
1270 r_vec->hw_csum_rx_error++;
1271 u64_stats_update_end(&r_vec->rx_sync);
1272 return;
1273 }
1274
1275 /* Assume that the firmware will never report inner CSUM_OK unless outer
1276 * L4 headers were successfully parsed. FW will always report zero UDP
1277 * checksum as CSUM_OK.
1278 */
1279 if (rxd->rxd.flags & PCIE_DESC_RX_TCP_CSUM_OK ||
1280 rxd->rxd.flags & PCIE_DESC_RX_UDP_CSUM_OK) {
1281 __skb_incr_checksum_unnecessary(skb);
1282 u64_stats_update_begin(&r_vec->rx_sync);
1283 r_vec->hw_csum_rx_ok++;
1284 u64_stats_update_end(&r_vec->rx_sync);
1285 }
1286
1287 if (rxd->rxd.flags & PCIE_DESC_RX_I_TCP_CSUM_OK ||
1288 rxd->rxd.flags & PCIE_DESC_RX_I_UDP_CSUM_OK) {
1289 __skb_incr_checksum_unnecessary(skb);
1290 u64_stats_update_begin(&r_vec->rx_sync);
1291 r_vec->hw_csum_rx_inner_ok++;
1292 u64_stats_update_end(&r_vec->rx_sync);
1293 }
1294 }
1295
1296 static void nfp_net_set_hash(struct net_device *netdev, struct sk_buff *skb,
1297 unsigned int type, __be32 *hash)
1298 {
1299 if (!(netdev->features & NETIF_F_RXHASH))
1300 return;
1301
1302 switch (type) {
1303 case NFP_NET_RSS_IPV4:
1304 case NFP_NET_RSS_IPV6:
1305 case NFP_NET_RSS_IPV6_EX:
1306 skb_set_hash(skb, get_unaligned_be32(hash), PKT_HASH_TYPE_L3);
1307 break;
1308 default:
1309 skb_set_hash(skb, get_unaligned_be32(hash), PKT_HASH_TYPE_L4);
1310 break;
1311 }
1312 }
1313
1314 static void
1315 nfp_net_set_hash_desc(struct net_device *netdev, struct sk_buff *skb,
1316 struct nfp_net_rx_desc *rxd)
1317 {
1318 struct nfp_net_rx_hash *rx_hash;
1319
1320 if (!(rxd->rxd.flags & PCIE_DESC_RX_RSS))
1321 return;
1322
1323 rx_hash = (struct nfp_net_rx_hash *)(skb->data - sizeof(*rx_hash));
1324
1325 nfp_net_set_hash(netdev, skb, get_unaligned_be32(&rx_hash->hash_type),
1326 &rx_hash->hash);
1327 }
1328
1329 static void *
1330 nfp_net_parse_meta(struct net_device *netdev, struct sk_buff *skb,
1331 int meta_len)
1332 {
1333 u8 *data = skb->data - meta_len;
1334 u32 meta_info;
1335
1336 meta_info = get_unaligned_be32(data);
1337 data += 4;
1338
1339 while (meta_info) {
1340 switch (meta_info & NFP_NET_META_FIELD_MASK) {
1341 case NFP_NET_META_HASH:
1342 meta_info >>= NFP_NET_META_FIELD_SIZE;
1343 nfp_net_set_hash(netdev, skb,
1344 meta_info & NFP_NET_META_FIELD_MASK,
1345 (__be32 *)data);
1346 data += 4;
1347 break;
1348 case NFP_NET_META_MARK:
1349 skb->mark = get_unaligned_be32(data);
1350 data += 4;
1351 break;
1352 default:
1353 return NULL;
1354 }
1355
1356 meta_info >>= NFP_NET_META_FIELD_SIZE;
1357 }
1358
1359 return data;
1360 }
1361
1362 /**
1363 * nfp_net_rx() - receive up to @budget packets on @rx_ring
1364 * @rx_ring: RX ring to receive from
1365 * @budget: NAPI budget
1366 *
1367 * Note, this function is separated out from the napi poll function to
1368 * more cleanly separate packet receive code from other bookkeeping
1369 * functions performed in the napi poll function.
1370 *
1371 * There are differences between the NFP-3200 firmware and the
1372 * NFP-6000 firmware. The NFP-3200 firmware uses a dedicated RX queue
1373 * to indicate that new packets have arrived. The NFP-6000 does not
1374 * have this queue and uses the DD bit in the RX descriptor. This
1375 * method cannot be used on the NFP-3200 as it causes a race
1376 * condition: The RX ring write pointer on the NFP-3200 is updated
1377 * after packets (and descriptors) have been DMAed. If the DD bit is
1378 * used and subsequently the read pointer is updated this may lead to
1379 * the RX queue to underflow (if the firmware has not yet update the
1380 * write pointer). Therefore we use slightly ugly conditional code
1381 * below to handle the differences. We may, in the future update the
1382 * NFP-3200 firmware to behave the same as the firmware on the
1383 * NFP-6000.
1384 *
1385 * Return: Number of packets received.
1386 */
1387 static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
1388 {
1389 struct nfp_net_r_vector *r_vec = rx_ring->r_vec;
1390 struct nfp_net *nn = r_vec->nfp_net;
1391 unsigned int data_len, meta_len;
1392 int avail = 0, pkts_polled = 0;
1393 struct sk_buff *skb, *new_skb;
1394 struct nfp_net_rx_desc *rxd;
1395 dma_addr_t new_dma_addr;
1396 u32 qcp_wr_p;
1397 int idx;
1398
1399 if (nn->is_nfp3200) {
1400 /* Work out how many packets arrived */
1401 qcp_wr_p = nfp_qcp_wr_ptr_read(rx_ring->qcp_rx);
1402 idx = rx_ring->rd_p % rx_ring->cnt;
1403
1404 if (qcp_wr_p == idx)
1405 /* No new packets */
1406 return 0;
1407
1408 if (qcp_wr_p > idx)
1409 avail = qcp_wr_p - idx;
1410 else
1411 avail = qcp_wr_p + rx_ring->cnt - idx;
1412 } else {
1413 avail = budget + 1;
1414 }
1415
1416 while (avail > 0 && pkts_polled < budget) {
1417 idx = rx_ring->rd_p % rx_ring->cnt;
1418
1419 rxd = &rx_ring->rxds[idx];
1420 if (!(rxd->rxd.meta_len_dd & PCIE_DESC_RX_DD)) {
1421 if (nn->is_nfp3200)
1422 nn_dbg(nn, "RX descriptor not valid (DD)%d:%u rxd[0]=%#x rxd[1]=%#x\n",
1423 rx_ring->idx, idx,
1424 rxd->vals[0], rxd->vals[1]);
1425 break;
1426 }
1427 /* Memory barrier to ensure that we won't do other reads
1428 * before the DD bit.
1429 */
1430 dma_rmb();
1431
1432 rx_ring->rd_p++;
1433 pkts_polled++;
1434 avail--;
1435
1436 skb = rx_ring->rxbufs[idx].skb;
1437
1438 new_skb = nfp_net_rx_alloc_one(rx_ring, &new_dma_addr,
1439 nn->fl_bufsz);
1440 if (!new_skb) {
1441 nfp_net_rx_give_one(rx_ring, rx_ring->rxbufs[idx].skb,
1442 rx_ring->rxbufs[idx].dma_addr);
1443 u64_stats_update_begin(&r_vec->rx_sync);
1444 r_vec->rx_drops++;
1445 u64_stats_update_end(&r_vec->rx_sync);
1446 continue;
1447 }
1448
1449 dma_unmap_single(&nn->pdev->dev,
1450 rx_ring->rxbufs[idx].dma_addr,
1451 nn->fl_bufsz, DMA_FROM_DEVICE);
1452
1453 nfp_net_rx_give_one(rx_ring, new_skb, new_dma_addr);
1454
1455 /* < meta_len >
1456 * <-- [rx_offset] -->
1457 * ---------------------------------------------------------
1458 * | [XX] | metadata | packet | XXXX |
1459 * ---------------------------------------------------------
1460 * <---------------- data_len --------------->
1461 *
1462 * The rx_offset is fixed for all packets, the meta_len can vary
1463 * on a packet by packet basis. If rx_offset is set to zero
1464 * (_RX_OFFSET_DYNAMIC) metadata starts at the beginning of the
1465 * buffer and is immediately followed by the packet (no [XX]).
1466 */
1467 meta_len = rxd->rxd.meta_len_dd & PCIE_DESC_RX_META_LEN_MASK;
1468 data_len = le16_to_cpu(rxd->rxd.data_len);
1469
1470 if (nn->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC)
1471 skb_reserve(skb, meta_len);
1472 else
1473 skb_reserve(skb, nn->rx_offset);
1474 skb_put(skb, data_len - meta_len);
1475
1476 /* Stats update */
1477 u64_stats_update_begin(&r_vec->rx_sync);
1478 r_vec->rx_pkts++;
1479 r_vec->rx_bytes += skb->len;
1480 u64_stats_update_end(&r_vec->rx_sync);
1481
1482 if (nn->fw_ver.major <= 3) {
1483 nfp_net_set_hash_desc(nn->netdev, skb, rxd);
1484 } else if (meta_len) {
1485 void *end;
1486
1487 end = nfp_net_parse_meta(nn->netdev, skb, meta_len);
1488 if (unlikely(end != skb->data)) {
1489 u64_stats_update_begin(&r_vec->rx_sync);
1490 r_vec->rx_drops++;
1491 u64_stats_update_end(&r_vec->rx_sync);
1492
1493 dev_kfree_skb_any(skb);
1494 nn_warn_ratelimit(nn, "invalid RX packet metadata\n");
1495 continue;
1496 }
1497 }
1498
1499 skb_record_rx_queue(skb, rx_ring->idx);
1500 skb->protocol = eth_type_trans(skb, nn->netdev);
1501
1502 nfp_net_rx_csum(nn, r_vec, rxd, skb);
1503
1504 if (rxd->rxd.flags & PCIE_DESC_RX_VLAN)
1505 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
1506 le16_to_cpu(rxd->rxd.vlan));
1507
1508 napi_gro_receive(&rx_ring->r_vec->napi, skb);
1509 }
1510
1511 if (nn->is_nfp3200)
1512 nfp_qcp_rd_ptr_add(rx_ring->qcp_rx, pkts_polled);
1513
1514 return pkts_polled;
1515 }
1516
1517 /**
1518 * nfp_net_poll() - napi poll function
1519 * @napi: NAPI structure
1520 * @budget: NAPI budget
1521 *
1522 * Return: number of packets polled.
1523 */
1524 static int nfp_net_poll(struct napi_struct *napi, int budget)
1525 {
1526 struct nfp_net_r_vector *r_vec =
1527 container_of(napi, struct nfp_net_r_vector, napi);
1528 struct nfp_net_rx_ring *rx_ring = r_vec->rx_ring;
1529 struct nfp_net_tx_ring *tx_ring = r_vec->tx_ring;
1530 struct nfp_net *nn = r_vec->nfp_net;
1531 struct netdev_queue *txq;
1532 unsigned int pkts_polled;
1533
1534 tx_ring = &nn->tx_rings[rx_ring->idx];
1535 txq = netdev_get_tx_queue(nn->netdev, tx_ring->idx);
1536 nfp_net_tx_complete(tx_ring);
1537
1538 pkts_polled = nfp_net_rx(rx_ring, budget);
1539
1540 if (pkts_polled < budget) {
1541 napi_complete_done(napi, pkts_polled);
1542 nfp_net_irq_unmask(nn, r_vec->irq_idx);
1543 }
1544
1545 return pkts_polled;
1546 }
1547
1548 /* Setup and Configuration
1549 */
1550
1551 /**
1552 * nfp_net_tx_ring_free() - Free resources allocated to a TX ring
1553 * @tx_ring: TX ring to free
1554 */
1555 static void nfp_net_tx_ring_free(struct nfp_net_tx_ring *tx_ring)
1556 {
1557 struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
1558 struct nfp_net *nn = r_vec->nfp_net;
1559 struct pci_dev *pdev = nn->pdev;
1560
1561 kfree(tx_ring->txbufs);
1562
1563 if (tx_ring->txds)
1564 dma_free_coherent(&pdev->dev, tx_ring->size,
1565 tx_ring->txds, tx_ring->dma);
1566
1567 tx_ring->cnt = 0;
1568 tx_ring->txbufs = NULL;
1569 tx_ring->txds = NULL;
1570 tx_ring->dma = 0;
1571 tx_ring->size = 0;
1572 }
1573
1574 /**
1575 * nfp_net_tx_ring_alloc() - Allocate resource for a TX ring
1576 * @tx_ring: TX Ring structure to allocate
1577 * @cnt: Ring buffer count
1578 *
1579 * Return: 0 on success, negative errno otherwise.
1580 */
1581 static int nfp_net_tx_ring_alloc(struct nfp_net_tx_ring *tx_ring, u32 cnt)
1582 {
1583 struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
1584 struct nfp_net *nn = r_vec->nfp_net;
1585 struct pci_dev *pdev = nn->pdev;
1586 int sz;
1587
1588 tx_ring->cnt = cnt;
1589
1590 tx_ring->size = sizeof(*tx_ring->txds) * tx_ring->cnt;
1591 tx_ring->txds = dma_zalloc_coherent(&pdev->dev, tx_ring->size,
1592 &tx_ring->dma, GFP_KERNEL);
1593 if (!tx_ring->txds)
1594 goto err_alloc;
1595
1596 sz = sizeof(*tx_ring->txbufs) * tx_ring->cnt;
1597 tx_ring->txbufs = kzalloc(sz, GFP_KERNEL);
1598 if (!tx_ring->txbufs)
1599 goto err_alloc;
1600
1601 netif_set_xps_queue(nn->netdev, &r_vec->affinity_mask, tx_ring->idx);
1602
1603 nn_dbg(nn, "TxQ%02d: QCidx=%02d cnt=%d dma=%#llx host=%p\n",
1604 tx_ring->idx, tx_ring->qcidx,
1605 tx_ring->cnt, (unsigned long long)tx_ring->dma, tx_ring->txds);
1606
1607 return 0;
1608
1609 err_alloc:
1610 nfp_net_tx_ring_free(tx_ring);
1611 return -ENOMEM;
1612 }
1613
1614 static struct nfp_net_tx_ring *
1615 nfp_net_shadow_tx_rings_prepare(struct nfp_net *nn, u32 buf_cnt)
1616 {
1617 struct nfp_net_tx_ring *rings;
1618 unsigned int r;
1619
1620 rings = kcalloc(nn->num_tx_rings, sizeof(*rings), GFP_KERNEL);
1621 if (!rings)
1622 return NULL;
1623
1624 for (r = 0; r < nn->num_tx_rings; r++) {
1625 nfp_net_tx_ring_init(&rings[r], nn->tx_rings[r].r_vec, r);
1626
1627 if (nfp_net_tx_ring_alloc(&rings[r], buf_cnt))
1628 goto err_free_prev;
1629 }
1630
1631 return rings;
1632
1633 err_free_prev:
1634 while (r--)
1635 nfp_net_tx_ring_free(&rings[r]);
1636 kfree(rings);
1637 return NULL;
1638 }
1639
1640 static struct nfp_net_tx_ring *
1641 nfp_net_shadow_tx_rings_swap(struct nfp_net *nn, struct nfp_net_tx_ring *rings)
1642 {
1643 struct nfp_net_tx_ring *old = nn->tx_rings;
1644 unsigned int r;
1645
1646 for (r = 0; r < nn->num_tx_rings; r++)
1647 old[r].r_vec->tx_ring = &rings[r];
1648
1649 nn->tx_rings = rings;
1650 return old;
1651 }
1652
1653 static void
1654 nfp_net_shadow_tx_rings_free(struct nfp_net *nn, struct nfp_net_tx_ring *rings)
1655 {
1656 unsigned int r;
1657
1658 if (!rings)
1659 return;
1660
1661 for (r = 0; r < nn->num_tx_rings; r++)
1662 nfp_net_tx_ring_free(&rings[r]);
1663
1664 kfree(rings);
1665 }
1666
1667 /**
1668 * nfp_net_rx_ring_free() - Free resources allocated to a RX ring
1669 * @rx_ring: RX ring to free
1670 */
1671 static void nfp_net_rx_ring_free(struct nfp_net_rx_ring *rx_ring)
1672 {
1673 struct nfp_net_r_vector *r_vec = rx_ring->r_vec;
1674 struct nfp_net *nn = r_vec->nfp_net;
1675 struct pci_dev *pdev = nn->pdev;
1676
1677 kfree(rx_ring->rxbufs);
1678
1679 if (rx_ring->rxds)
1680 dma_free_coherent(&pdev->dev, rx_ring->size,
1681 rx_ring->rxds, rx_ring->dma);
1682
1683 rx_ring->cnt = 0;
1684 rx_ring->rxbufs = NULL;
1685 rx_ring->rxds = NULL;
1686 rx_ring->dma = 0;
1687 rx_ring->size = 0;
1688 }
1689
1690 /**
1691 * nfp_net_rx_ring_alloc() - Allocate resource for a RX ring
1692 * @rx_ring: RX ring to allocate
1693 * @fl_bufsz: Size of buffers to allocate
1694 * @cnt: Ring buffer count
1695 *
1696 * Return: 0 on success, negative errno otherwise.
1697 */
1698 static int
1699 nfp_net_rx_ring_alloc(struct nfp_net_rx_ring *rx_ring, unsigned int fl_bufsz,
1700 u32 cnt)
1701 {
1702 struct nfp_net_r_vector *r_vec = rx_ring->r_vec;
1703 struct nfp_net *nn = r_vec->nfp_net;
1704 struct pci_dev *pdev = nn->pdev;
1705 int sz;
1706
1707 rx_ring->cnt = cnt;
1708 rx_ring->bufsz = fl_bufsz;
1709
1710 rx_ring->size = sizeof(*rx_ring->rxds) * rx_ring->cnt;
1711 rx_ring->rxds = dma_zalloc_coherent(&pdev->dev, rx_ring->size,
1712 &rx_ring->dma, GFP_KERNEL);
1713 if (!rx_ring->rxds)
1714 goto err_alloc;
1715
1716 sz = sizeof(*rx_ring->rxbufs) * rx_ring->cnt;
1717 rx_ring->rxbufs = kzalloc(sz, GFP_KERNEL);
1718 if (!rx_ring->rxbufs)
1719 goto err_alloc;
1720
1721 nn_dbg(nn, "RxQ%02d: FlQCidx=%02d RxQCidx=%02d cnt=%d dma=%#llx host=%p\n",
1722 rx_ring->idx, rx_ring->fl_qcidx, rx_ring->rx_qcidx,
1723 rx_ring->cnt, (unsigned long long)rx_ring->dma, rx_ring->rxds);
1724
1725 return 0;
1726
1727 err_alloc:
1728 nfp_net_rx_ring_free(rx_ring);
1729 return -ENOMEM;
1730 }
1731
1732 static struct nfp_net_rx_ring *
1733 nfp_net_shadow_rx_rings_prepare(struct nfp_net *nn, unsigned int fl_bufsz,
1734 u32 buf_cnt)
1735 {
1736 struct nfp_net_rx_ring *rings;
1737 unsigned int r;
1738
1739 rings = kcalloc(nn->num_rx_rings, sizeof(*rings), GFP_KERNEL);
1740 if (!rings)
1741 return NULL;
1742
1743 for (r = 0; r < nn->num_rx_rings; r++) {
1744 nfp_net_rx_ring_init(&rings[r], nn->rx_rings[r].r_vec, r);
1745
1746 if (nfp_net_rx_ring_alloc(&rings[r], fl_bufsz, buf_cnt))
1747 goto err_free_prev;
1748
1749 if (nfp_net_rx_ring_bufs_alloc(nn, &rings[r]))
1750 goto err_free_ring;
1751 }
1752
1753 return rings;
1754
1755 err_free_prev:
1756 while (r--) {
1757 nfp_net_rx_ring_bufs_free(nn, &rings[r]);
1758 err_free_ring:
1759 nfp_net_rx_ring_free(&rings[r]);
1760 }
1761 kfree(rings);
1762 return NULL;
1763 }
1764
1765 static struct nfp_net_rx_ring *
1766 nfp_net_shadow_rx_rings_swap(struct nfp_net *nn, struct nfp_net_rx_ring *rings)
1767 {
1768 struct nfp_net_rx_ring *old = nn->rx_rings;
1769 unsigned int r;
1770
1771 for (r = 0; r < nn->num_rx_rings; r++)
1772 old[r].r_vec->rx_ring = &rings[r];
1773
1774 nn->rx_rings = rings;
1775 return old;
1776 }
1777
1778 static void
1779 nfp_net_shadow_rx_rings_free(struct nfp_net *nn, struct nfp_net_rx_ring *rings)
1780 {
1781 unsigned int r;
1782
1783 if (!rings)
1784 return;
1785
1786 for (r = 0; r < nn->num_r_vecs; r++) {
1787 nfp_net_rx_ring_bufs_free(nn, &rings[r]);
1788 nfp_net_rx_ring_free(&rings[r]);
1789 }
1790
1791 kfree(rings);
1792 }
1793
1794 static int
1795 nfp_net_prepare_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
1796 int idx)
1797 {
1798 struct msix_entry *entry = &nn->irq_entries[r_vec->irq_idx];
1799 int err;
1800
1801 r_vec->tx_ring = &nn->tx_rings[idx];
1802 nfp_net_tx_ring_init(r_vec->tx_ring, r_vec, idx);
1803
1804 r_vec->rx_ring = &nn->rx_rings[idx];
1805 nfp_net_rx_ring_init(r_vec->rx_ring, r_vec, idx);
1806
1807 snprintf(r_vec->name, sizeof(r_vec->name),
1808 "%s-rxtx-%d", nn->netdev->name, idx);
1809 err = request_irq(entry->vector, r_vec->handler, 0, r_vec->name, r_vec);
1810 if (err) {
1811 nn_err(nn, "Error requesting IRQ %d\n", entry->vector);
1812 return err;
1813 }
1814 disable_irq(entry->vector);
1815
1816 /* Setup NAPI */
1817 netif_napi_add(nn->netdev, &r_vec->napi,
1818 nfp_net_poll, NAPI_POLL_WEIGHT);
1819
1820 irq_set_affinity_hint(entry->vector, &r_vec->affinity_mask);
1821
1822 nn_dbg(nn, "RV%02d: irq=%03d/%03d\n", idx, entry->vector, entry->entry);
1823
1824 return 0;
1825 }
1826
1827 static void
1828 nfp_net_cleanup_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec)
1829 {
1830 struct msix_entry *entry = &nn->irq_entries[r_vec->irq_idx];
1831
1832 irq_set_affinity_hint(entry->vector, NULL);
1833 netif_napi_del(&r_vec->napi);
1834 free_irq(entry->vector, r_vec);
1835 }
1836
1837 /**
1838 * nfp_net_rss_write_itbl() - Write RSS indirection table to device
1839 * @nn: NFP Net device to reconfigure
1840 */
1841 void nfp_net_rss_write_itbl(struct nfp_net *nn)
1842 {
1843 int i;
1844
1845 for (i = 0; i < NFP_NET_CFG_RSS_ITBL_SZ; i += 4)
1846 nn_writel(nn, NFP_NET_CFG_RSS_ITBL + i,
1847 get_unaligned_le32(nn->rss_itbl + i));
1848 }
1849
1850 /**
1851 * nfp_net_rss_write_key() - Write RSS hash key to device
1852 * @nn: NFP Net device to reconfigure
1853 */
1854 void nfp_net_rss_write_key(struct nfp_net *nn)
1855 {
1856 int i;
1857
1858 for (i = 0; i < NFP_NET_CFG_RSS_KEY_SZ; i += 4)
1859 nn_writel(nn, NFP_NET_CFG_RSS_KEY + i,
1860 get_unaligned_le32(nn->rss_key + i));
1861 }
1862
1863 /**
1864 * nfp_net_coalesce_write_cfg() - Write irq coalescence configuration to HW
1865 * @nn: NFP Net device to reconfigure
1866 */
1867 void nfp_net_coalesce_write_cfg(struct nfp_net *nn)
1868 {
1869 u8 i;
1870 u32 factor;
1871 u32 value;
1872
1873 /* Compute factor used to convert coalesce '_usecs' parameters to
1874 * ME timestamp ticks. There are 16 ME clock cycles for each timestamp
1875 * count.
1876 */
1877 factor = nn->me_freq_mhz / 16;
1878
1879 /* copy RX interrupt coalesce parameters */
1880 value = (nn->rx_coalesce_max_frames << 16) |
1881 (factor * nn->rx_coalesce_usecs);
1882 for (i = 0; i < nn->num_r_vecs; i++)
1883 nn_writel(nn, NFP_NET_CFG_RXR_IRQ_MOD(i), value);
1884
1885 /* copy TX interrupt coalesce parameters */
1886 value = (nn->tx_coalesce_max_frames << 16) |
1887 (factor * nn->tx_coalesce_usecs);
1888 for (i = 0; i < nn->num_r_vecs; i++)
1889 nn_writel(nn, NFP_NET_CFG_TXR_IRQ_MOD(i), value);
1890 }
1891
1892 /**
1893 * nfp_net_write_mac_addr() - Write mac address to the device control BAR
1894 * @nn: NFP Net device to reconfigure
1895 *
1896 * Writes the MAC address from the netdev to the device control BAR. Does not
1897 * perform the required reconfig. We do a bit of byte swapping dance because
1898 * firmware is LE.
1899 */
1900 static void nfp_net_write_mac_addr(struct nfp_net *nn)
1901 {
1902 nn_writel(nn, NFP_NET_CFG_MACADDR + 0,
1903 get_unaligned_be32(nn->netdev->dev_addr));
1904 /* We can't do writew for NFP-3200 compatibility */
1905 nn_writel(nn, NFP_NET_CFG_MACADDR + 4,
1906 get_unaligned_be16(nn->netdev->dev_addr + 4) << 16);
1907 }
1908
1909 static void nfp_net_vec_clear_ring_data(struct nfp_net *nn, unsigned int idx)
1910 {
1911 nn_writeq(nn, NFP_NET_CFG_RXR_ADDR(idx), 0);
1912 nn_writeb(nn, NFP_NET_CFG_RXR_SZ(idx), 0);
1913 nn_writeb(nn, NFP_NET_CFG_RXR_VEC(idx), 0);
1914
1915 nn_writeq(nn, NFP_NET_CFG_TXR_ADDR(idx), 0);
1916 nn_writeb(nn, NFP_NET_CFG_TXR_SZ(idx), 0);
1917 nn_writeb(nn, NFP_NET_CFG_TXR_VEC(idx), 0);
1918 }
1919
1920 /**
1921 * nfp_net_clear_config_and_disable() - Clear control BAR and disable NFP
1922 * @nn: NFP Net device to reconfigure
1923 */
1924 static void nfp_net_clear_config_and_disable(struct nfp_net *nn)
1925 {
1926 u32 new_ctrl, update;
1927 unsigned int r;
1928 int err;
1929
1930 new_ctrl = nn->ctrl;
1931 new_ctrl &= ~NFP_NET_CFG_CTRL_ENABLE;
1932 update = NFP_NET_CFG_UPDATE_GEN;
1933 update |= NFP_NET_CFG_UPDATE_MSIX;
1934 update |= NFP_NET_CFG_UPDATE_RING;
1935
1936 if (nn->cap & NFP_NET_CFG_CTRL_RINGCFG)
1937 new_ctrl &= ~NFP_NET_CFG_CTRL_RINGCFG;
1938
1939 nn_writeq(nn, NFP_NET_CFG_TXRS_ENABLE, 0);
1940 nn_writeq(nn, NFP_NET_CFG_RXRS_ENABLE, 0);
1941
1942 nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl);
1943 err = nfp_net_reconfig(nn, update);
1944 if (err)
1945 nn_err(nn, "Could not disable device: %d\n", err);
1946
1947 for (r = 0; r < nn->num_r_vecs; r++) {
1948 nfp_net_rx_ring_reset(nn->r_vecs[r].rx_ring);
1949 nfp_net_tx_ring_reset(nn, nn->r_vecs[r].tx_ring);
1950 nfp_net_vec_clear_ring_data(nn, r);
1951 }
1952
1953 nn->ctrl = new_ctrl;
1954 }
1955
1956 static void
1957 nfp_net_vec_write_ring_data(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
1958 unsigned int idx)
1959 {
1960 /* Write the DMA address, size and MSI-X info to the device */
1961 nn_writeq(nn, NFP_NET_CFG_RXR_ADDR(idx), r_vec->rx_ring->dma);
1962 nn_writeb(nn, NFP_NET_CFG_RXR_SZ(idx), ilog2(r_vec->rx_ring->cnt));
1963 nn_writeb(nn, NFP_NET_CFG_RXR_VEC(idx), r_vec->irq_idx);
1964
1965 nn_writeq(nn, NFP_NET_CFG_TXR_ADDR(idx), r_vec->tx_ring->dma);
1966 nn_writeb(nn, NFP_NET_CFG_TXR_SZ(idx), ilog2(r_vec->tx_ring->cnt));
1967 nn_writeb(nn, NFP_NET_CFG_TXR_VEC(idx), r_vec->irq_idx);
1968 }
1969
1970 static int __nfp_net_set_config_and_enable(struct nfp_net *nn)
1971 {
1972 u32 new_ctrl, update = 0;
1973 unsigned int r;
1974 int err;
1975
1976 new_ctrl = nn->ctrl;
1977
1978 if (nn->cap & NFP_NET_CFG_CTRL_RSS) {
1979 nfp_net_rss_write_key(nn);
1980 nfp_net_rss_write_itbl(nn);
1981 nn_writel(nn, NFP_NET_CFG_RSS_CTRL, nn->rss_cfg);
1982 update |= NFP_NET_CFG_UPDATE_RSS;
1983 }
1984
1985 if (nn->cap & NFP_NET_CFG_CTRL_IRQMOD) {
1986 nfp_net_coalesce_write_cfg(nn);
1987
1988 new_ctrl |= NFP_NET_CFG_CTRL_IRQMOD;
1989 update |= NFP_NET_CFG_UPDATE_IRQMOD;
1990 }
1991
1992 for (r = 0; r < nn->num_r_vecs; r++)
1993 nfp_net_vec_write_ring_data(nn, &nn->r_vecs[r], r);
1994
1995 nn_writeq(nn, NFP_NET_CFG_TXRS_ENABLE, nn->num_tx_rings == 64 ?
1996 0xffffffffffffffffULL : ((u64)1 << nn->num_tx_rings) - 1);
1997
1998 nn_writeq(nn, NFP_NET_CFG_RXRS_ENABLE, nn->num_rx_rings == 64 ?
1999 0xffffffffffffffffULL : ((u64)1 << nn->num_rx_rings) - 1);
2000
2001 nfp_net_write_mac_addr(nn);
2002
2003 nn_writel(nn, NFP_NET_CFG_MTU, nn->netdev->mtu);
2004 nn_writel(nn, NFP_NET_CFG_FLBUFSZ, nn->fl_bufsz);
2005
2006 /* Enable device */
2007 new_ctrl |= NFP_NET_CFG_CTRL_ENABLE;
2008 update |= NFP_NET_CFG_UPDATE_GEN;
2009 update |= NFP_NET_CFG_UPDATE_MSIX;
2010 update |= NFP_NET_CFG_UPDATE_RING;
2011 if (nn->cap & NFP_NET_CFG_CTRL_RINGCFG)
2012 new_ctrl |= NFP_NET_CFG_CTRL_RINGCFG;
2013
2014 nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl);
2015 err = nfp_net_reconfig(nn, update);
2016
2017 nn->ctrl = new_ctrl;
2018
2019 for (r = 0; r < nn->num_r_vecs; r++)
2020 nfp_net_rx_ring_fill_freelist(nn->r_vecs[r].rx_ring);
2021
2022 /* Since reconfiguration requests while NFP is down are ignored we
2023 * have to wipe the entire VXLAN configuration and reinitialize it.
2024 */
2025 if (nn->ctrl & NFP_NET_CFG_CTRL_VXLAN) {
2026 memset(&nn->vxlan_ports, 0, sizeof(nn->vxlan_ports));
2027 memset(&nn->vxlan_usecnt, 0, sizeof(nn->vxlan_usecnt));
2028 udp_tunnel_get_rx_info(nn->netdev);
2029 }
2030
2031 return err;
2032 }
2033
2034 /**
2035 * nfp_net_set_config_and_enable() - Write control BAR and enable NFP
2036 * @nn: NFP Net device to reconfigure
2037 */
2038 static int nfp_net_set_config_and_enable(struct nfp_net *nn)
2039 {
2040 int err;
2041
2042 err = __nfp_net_set_config_and_enable(nn);
2043 if (err)
2044 nfp_net_clear_config_and_disable(nn);
2045
2046 return err;
2047 }
2048
2049 /**
2050 * nfp_net_open_stack() - Start the device from stack's perspective
2051 * @nn: NFP Net device to reconfigure
2052 */
2053 static void nfp_net_open_stack(struct nfp_net *nn)
2054 {
2055 unsigned int r;
2056
2057 for (r = 0; r < nn->num_r_vecs; r++) {
2058 napi_enable(&nn->r_vecs[r].napi);
2059 enable_irq(nn->irq_entries[nn->r_vecs[r].irq_idx].vector);
2060 }
2061
2062 netif_tx_wake_all_queues(nn->netdev);
2063
2064 enable_irq(nn->irq_entries[NFP_NET_IRQ_LSC_IDX].vector);
2065 nfp_net_read_link_status(nn);
2066 }
2067
2068 static int nfp_net_netdev_open(struct net_device *netdev)
2069 {
2070 struct nfp_net *nn = netdev_priv(netdev);
2071 int err, r;
2072
2073 if (nn->ctrl & NFP_NET_CFG_CTRL_ENABLE) {
2074 nn_err(nn, "Dev is already enabled: 0x%08x\n", nn->ctrl);
2075 return -EBUSY;
2076 }
2077
2078 /* Step 1: Allocate resources for rings and the like
2079 * - Request interrupts
2080 * - Allocate RX and TX ring resources
2081 * - Setup initial RSS table
2082 */
2083 err = nfp_net_aux_irq_request(nn, NFP_NET_CFG_EXN, "%s-exn",
2084 nn->exn_name, sizeof(nn->exn_name),
2085 NFP_NET_IRQ_EXN_IDX, nn->exn_handler);
2086 if (err)
2087 return err;
2088 err = nfp_net_aux_irq_request(nn, NFP_NET_CFG_LSC, "%s-lsc",
2089 nn->lsc_name, sizeof(nn->lsc_name),
2090 NFP_NET_IRQ_LSC_IDX, nn->lsc_handler);
2091 if (err)
2092 goto err_free_exn;
2093 disable_irq(nn->irq_entries[NFP_NET_IRQ_LSC_IDX].vector);
2094
2095 nn->rx_rings = kcalloc(nn->num_rx_rings, sizeof(*nn->rx_rings),
2096 GFP_KERNEL);
2097 if (!nn->rx_rings) {
2098 err = -ENOMEM;
2099 goto err_free_lsc;
2100 }
2101 nn->tx_rings = kcalloc(nn->num_tx_rings, sizeof(*nn->tx_rings),
2102 GFP_KERNEL);
2103 if (!nn->tx_rings) {
2104 err = -ENOMEM;
2105 goto err_free_rx_rings;
2106 }
2107
2108 for (r = 0; r < nn->num_r_vecs; r++) {
2109 err = nfp_net_prepare_vector(nn, &nn->r_vecs[r], r);
2110 if (err)
2111 goto err_free_prev_vecs;
2112
2113 err = nfp_net_tx_ring_alloc(nn->r_vecs[r].tx_ring, nn->txd_cnt);
2114 if (err)
2115 goto err_cleanup_vec_p;
2116
2117 err = nfp_net_rx_ring_alloc(nn->r_vecs[r].rx_ring,
2118 nn->fl_bufsz, nn->rxd_cnt);
2119 if (err)
2120 goto err_free_tx_ring_p;
2121
2122 err = nfp_net_rx_ring_bufs_alloc(nn, nn->r_vecs[r].rx_ring);
2123 if (err)
2124 goto err_flush_rx_ring_p;
2125 }
2126
2127 err = netif_set_real_num_tx_queues(netdev, nn->num_tx_rings);
2128 if (err)
2129 goto err_free_rings;
2130
2131 err = netif_set_real_num_rx_queues(netdev, nn->num_rx_rings);
2132 if (err)
2133 goto err_free_rings;
2134
2135 /* Step 2: Configure the NFP
2136 * - Enable rings from 0 to tx_rings/rx_rings - 1.
2137 * - Write MAC address (in case it changed)
2138 * - Set the MTU
2139 * - Set the Freelist buffer size
2140 * - Enable the FW
2141 */
2142 err = nfp_net_set_config_and_enable(nn);
2143 if (err)
2144 goto err_free_rings;
2145
2146 /* Step 3: Enable for kernel
2147 * - put some freelist descriptors on each RX ring
2148 * - enable NAPI on each ring
2149 * - enable all TX queues
2150 * - set link state
2151 */
2152 nfp_net_open_stack(nn);
2153
2154 return 0;
2155
2156 err_free_rings:
2157 r = nn->num_r_vecs;
2158 err_free_prev_vecs:
2159 while (r--) {
2160 nfp_net_rx_ring_bufs_free(nn, nn->r_vecs[r].rx_ring);
2161 err_flush_rx_ring_p:
2162 nfp_net_rx_ring_free(nn->r_vecs[r].rx_ring);
2163 err_free_tx_ring_p:
2164 nfp_net_tx_ring_free(nn->r_vecs[r].tx_ring);
2165 err_cleanup_vec_p:
2166 nfp_net_cleanup_vector(nn, &nn->r_vecs[r]);
2167 }
2168 kfree(nn->tx_rings);
2169 err_free_rx_rings:
2170 kfree(nn->rx_rings);
2171 err_free_lsc:
2172 nfp_net_aux_irq_free(nn, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX);
2173 err_free_exn:
2174 nfp_net_aux_irq_free(nn, NFP_NET_CFG_EXN, NFP_NET_IRQ_EXN_IDX);
2175 return err;
2176 }
2177
2178 /**
2179 * nfp_net_close_stack() - Quiescent the stack (part of close)
2180 * @nn: NFP Net device to reconfigure
2181 */
2182 static void nfp_net_close_stack(struct nfp_net *nn)
2183 {
2184 unsigned int r;
2185
2186 disable_irq(nn->irq_entries[NFP_NET_IRQ_LSC_IDX].vector);
2187 netif_carrier_off(nn->netdev);
2188 nn->link_up = false;
2189
2190 for (r = 0; r < nn->num_r_vecs; r++) {
2191 disable_irq(nn->irq_entries[nn->r_vecs[r].irq_idx].vector);
2192 napi_disable(&nn->r_vecs[r].napi);
2193 }
2194
2195 netif_tx_disable(nn->netdev);
2196 }
2197
2198 /**
2199 * nfp_net_close_free_all() - Free all runtime resources
2200 * @nn: NFP Net device to reconfigure
2201 */
2202 static void nfp_net_close_free_all(struct nfp_net *nn)
2203 {
2204 unsigned int r;
2205
2206 for (r = 0; r < nn->num_r_vecs; r++) {
2207 nfp_net_rx_ring_bufs_free(nn, nn->r_vecs[r].rx_ring);
2208 nfp_net_rx_ring_free(nn->r_vecs[r].rx_ring);
2209 nfp_net_tx_ring_free(nn->r_vecs[r].tx_ring);
2210 nfp_net_cleanup_vector(nn, &nn->r_vecs[r]);
2211 }
2212
2213 kfree(nn->rx_rings);
2214 kfree(nn->tx_rings);
2215
2216 nfp_net_aux_irq_free(nn, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX);
2217 nfp_net_aux_irq_free(nn, NFP_NET_CFG_EXN, NFP_NET_IRQ_EXN_IDX);
2218 }
2219
2220 /**
2221 * nfp_net_netdev_close() - Called when the device is downed
2222 * @netdev: netdev structure
2223 */
2224 static int nfp_net_netdev_close(struct net_device *netdev)
2225 {
2226 struct nfp_net *nn = netdev_priv(netdev);
2227
2228 if (!(nn->ctrl & NFP_NET_CFG_CTRL_ENABLE)) {
2229 nn_err(nn, "Dev is not up: 0x%08x\n", nn->ctrl);
2230 return 0;
2231 }
2232
2233 /* Step 1: Disable RX and TX rings from the Linux kernel perspective
2234 */
2235 nfp_net_close_stack(nn);
2236
2237 /* Step 2: Tell NFP
2238 */
2239 nfp_net_clear_config_and_disable(nn);
2240
2241 /* Step 3: Free resources
2242 */
2243 nfp_net_close_free_all(nn);
2244
2245 nn_dbg(nn, "%s down", netdev->name);
2246 return 0;
2247 }
2248
2249 static void nfp_net_set_rx_mode(struct net_device *netdev)
2250 {
2251 struct nfp_net *nn = netdev_priv(netdev);
2252 u32 new_ctrl;
2253
2254 new_ctrl = nn->ctrl;
2255
2256 if (netdev->flags & IFF_PROMISC) {
2257 if (nn->cap & NFP_NET_CFG_CTRL_PROMISC)
2258 new_ctrl |= NFP_NET_CFG_CTRL_PROMISC;
2259 else
2260 nn_warn(nn, "FW does not support promiscuous mode\n");
2261 } else {
2262 new_ctrl &= ~NFP_NET_CFG_CTRL_PROMISC;
2263 }
2264
2265 if (new_ctrl == nn->ctrl)
2266 return;
2267
2268 nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl);
2269 nfp_net_reconfig_post(nn, NFP_NET_CFG_UPDATE_GEN);
2270
2271 nn->ctrl = new_ctrl;
2272 }
2273
2274 static int nfp_net_change_mtu(struct net_device *netdev, int new_mtu)
2275 {
2276 unsigned int old_mtu, old_fl_bufsz, new_fl_bufsz;
2277 struct nfp_net *nn = netdev_priv(netdev);
2278 struct nfp_net_rx_ring *tmp_rings;
2279 int err;
2280
2281 if (new_mtu < 68 || new_mtu > nn->max_mtu) {
2282 nn_err(nn, "New MTU (%d) is not valid\n", new_mtu);
2283 return -EINVAL;
2284 }
2285
2286 old_mtu = netdev->mtu;
2287 old_fl_bufsz = nn->fl_bufsz;
2288 new_fl_bufsz = NFP_NET_MAX_PREPEND + ETH_HLEN + VLAN_HLEN * 2 + new_mtu;
2289
2290 if (!netif_running(netdev)) {
2291 netdev->mtu = new_mtu;
2292 nn->fl_bufsz = new_fl_bufsz;
2293 return 0;
2294 }
2295
2296 /* Prepare new rings */
2297 tmp_rings = nfp_net_shadow_rx_rings_prepare(nn, new_fl_bufsz,
2298 nn->rxd_cnt);
2299 if (!tmp_rings)
2300 return -ENOMEM;
2301
2302 /* Stop device, swap in new rings, try to start the firmware */
2303 nfp_net_close_stack(nn);
2304 nfp_net_clear_config_and_disable(nn);
2305
2306 tmp_rings = nfp_net_shadow_rx_rings_swap(nn, tmp_rings);
2307
2308 netdev->mtu = new_mtu;
2309 nn->fl_bufsz = new_fl_bufsz;
2310
2311 err = nfp_net_set_config_and_enable(nn);
2312 if (err) {
2313 const int err_new = err;
2314
2315 /* Try with old configuration and old rings */
2316 tmp_rings = nfp_net_shadow_rx_rings_swap(nn, tmp_rings);
2317
2318 netdev->mtu = old_mtu;
2319 nn->fl_bufsz = old_fl_bufsz;
2320
2321 err = __nfp_net_set_config_and_enable(nn);
2322 if (err)
2323 nn_err(nn, "Can't restore MTU - FW communication failed (%d,%d)\n",
2324 err_new, err);
2325 }
2326
2327 nfp_net_shadow_rx_rings_free(nn, tmp_rings);
2328
2329 nfp_net_open_stack(nn);
2330
2331 return err;
2332 }
2333
2334 int nfp_net_set_ring_size(struct nfp_net *nn, u32 rxd_cnt, u32 txd_cnt)
2335 {
2336 struct nfp_net_tx_ring *tx_rings = NULL;
2337 struct nfp_net_rx_ring *rx_rings = NULL;
2338 u32 old_rxd_cnt, old_txd_cnt;
2339 int err;
2340
2341 if (!netif_running(nn->netdev)) {
2342 nn->rxd_cnt = rxd_cnt;
2343 nn->txd_cnt = txd_cnt;
2344 return 0;
2345 }
2346
2347 old_rxd_cnt = nn->rxd_cnt;
2348 old_txd_cnt = nn->txd_cnt;
2349
2350 /* Prepare new rings */
2351 if (nn->rxd_cnt != rxd_cnt) {
2352 rx_rings = nfp_net_shadow_rx_rings_prepare(nn, nn->fl_bufsz,
2353 rxd_cnt);
2354 if (!rx_rings)
2355 return -ENOMEM;
2356 }
2357 if (nn->txd_cnt != txd_cnt) {
2358 tx_rings = nfp_net_shadow_tx_rings_prepare(nn, txd_cnt);
2359 if (!tx_rings) {
2360 nfp_net_shadow_rx_rings_free(nn, rx_rings);
2361 return -ENOMEM;
2362 }
2363 }
2364
2365 /* Stop device, swap in new rings, try to start the firmware */
2366 nfp_net_close_stack(nn);
2367 nfp_net_clear_config_and_disable(nn);
2368
2369 if (rx_rings)
2370 rx_rings = nfp_net_shadow_rx_rings_swap(nn, rx_rings);
2371 if (tx_rings)
2372 tx_rings = nfp_net_shadow_tx_rings_swap(nn, tx_rings);
2373
2374 nn->rxd_cnt = rxd_cnt;
2375 nn->txd_cnt = txd_cnt;
2376
2377 err = nfp_net_set_config_and_enable(nn);
2378 if (err) {
2379 const int err_new = err;
2380
2381 /* Try with old configuration and old rings */
2382 if (rx_rings)
2383 rx_rings = nfp_net_shadow_rx_rings_swap(nn, rx_rings);
2384 if (tx_rings)
2385 tx_rings = nfp_net_shadow_tx_rings_swap(nn, tx_rings);
2386
2387 nn->rxd_cnt = old_rxd_cnt;
2388 nn->txd_cnt = old_txd_cnt;
2389
2390 err = __nfp_net_set_config_and_enable(nn);
2391 if (err)
2392 nn_err(nn, "Can't restore ring config - FW communication failed (%d,%d)\n",
2393 err_new, err);
2394 }
2395
2396 nfp_net_shadow_rx_rings_free(nn, rx_rings);
2397 nfp_net_shadow_tx_rings_free(nn, tx_rings);
2398
2399 nfp_net_open_stack(nn);
2400
2401 return err;
2402 }
2403
2404 static struct rtnl_link_stats64 *nfp_net_stat64(struct net_device *netdev,
2405 struct rtnl_link_stats64 *stats)
2406 {
2407 struct nfp_net *nn = netdev_priv(netdev);
2408 int r;
2409
2410 for (r = 0; r < nn->num_r_vecs; r++) {
2411 struct nfp_net_r_vector *r_vec = &nn->r_vecs[r];
2412 u64 data[3];
2413 unsigned int start;
2414
2415 do {
2416 start = u64_stats_fetch_begin(&r_vec->rx_sync);
2417 data[0] = r_vec->rx_pkts;
2418 data[1] = r_vec->rx_bytes;
2419 data[2] = r_vec->rx_drops;
2420 } while (u64_stats_fetch_retry(&r_vec->rx_sync, start));
2421 stats->rx_packets += data[0];
2422 stats->rx_bytes += data[1];
2423 stats->rx_dropped += data[2];
2424
2425 do {
2426 start = u64_stats_fetch_begin(&r_vec->tx_sync);
2427 data[0] = r_vec->tx_pkts;
2428 data[1] = r_vec->tx_bytes;
2429 data[2] = r_vec->tx_errors;
2430 } while (u64_stats_fetch_retry(&r_vec->tx_sync, start));
2431 stats->tx_packets += data[0];
2432 stats->tx_bytes += data[1];
2433 stats->tx_errors += data[2];
2434 }
2435
2436 return stats;
2437 }
2438
2439 static bool nfp_net_ebpf_capable(struct nfp_net *nn)
2440 {
2441 if (nn->cap & NFP_NET_CFG_CTRL_BPF &&
2442 nn_readb(nn, NFP_NET_CFG_BPF_ABI) == NFP_NET_BPF_ABI)
2443 return true;
2444 return false;
2445 }
2446
2447 static int
2448 nfp_net_setup_tc(struct net_device *netdev, u32 handle, __be16 proto,
2449 struct tc_to_netdev *tc)
2450 {
2451 struct nfp_net *nn = netdev_priv(netdev);
2452
2453 if (TC_H_MAJ(handle) != TC_H_MAJ(TC_H_INGRESS))
2454 return -ENOTSUPP;
2455 if (proto != htons(ETH_P_ALL))
2456 return -ENOTSUPP;
2457
2458 if (tc->type == TC_SETUP_CLSBPF && nfp_net_ebpf_capable(nn))
2459 return nfp_net_bpf_offload(nn, handle, proto, tc->cls_bpf);
2460
2461 return -EINVAL;
2462 }
2463
2464 static int nfp_net_set_features(struct net_device *netdev,
2465 netdev_features_t features)
2466 {
2467 netdev_features_t changed = netdev->features ^ features;
2468 struct nfp_net *nn = netdev_priv(netdev);
2469 u32 new_ctrl;
2470 int err;
2471
2472 /* Assume this is not called with features we have not advertised */
2473
2474 new_ctrl = nn->ctrl;
2475
2476 if (changed & NETIF_F_RXCSUM) {
2477 if (features & NETIF_F_RXCSUM)
2478 new_ctrl |= NFP_NET_CFG_CTRL_RXCSUM;
2479 else
2480 new_ctrl &= ~NFP_NET_CFG_CTRL_RXCSUM;
2481 }
2482
2483 if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) {
2484 if (features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))
2485 new_ctrl |= NFP_NET_CFG_CTRL_TXCSUM;
2486 else
2487 new_ctrl &= ~NFP_NET_CFG_CTRL_TXCSUM;
2488 }
2489
2490 if (changed & (NETIF_F_TSO | NETIF_F_TSO6)) {
2491 if (features & (NETIF_F_TSO | NETIF_F_TSO6))
2492 new_ctrl |= NFP_NET_CFG_CTRL_LSO;
2493 else
2494 new_ctrl &= ~NFP_NET_CFG_CTRL_LSO;
2495 }
2496
2497 if (changed & NETIF_F_HW_VLAN_CTAG_RX) {
2498 if (features & NETIF_F_HW_VLAN_CTAG_RX)
2499 new_ctrl |= NFP_NET_CFG_CTRL_RXVLAN;
2500 else
2501 new_ctrl &= ~NFP_NET_CFG_CTRL_RXVLAN;
2502 }
2503
2504 if (changed & NETIF_F_HW_VLAN_CTAG_TX) {
2505 if (features & NETIF_F_HW_VLAN_CTAG_TX)
2506 new_ctrl |= NFP_NET_CFG_CTRL_TXVLAN;
2507 else
2508 new_ctrl &= ~NFP_NET_CFG_CTRL_TXVLAN;
2509 }
2510
2511 if (changed & NETIF_F_SG) {
2512 if (features & NETIF_F_SG)
2513 new_ctrl |= NFP_NET_CFG_CTRL_GATHER;
2514 else
2515 new_ctrl &= ~NFP_NET_CFG_CTRL_GATHER;
2516 }
2517
2518 if (changed & NETIF_F_HW_TC && nn->ctrl & NFP_NET_CFG_CTRL_BPF) {
2519 nn_err(nn, "Cannot disable HW TC offload while in use\n");
2520 return -EBUSY;
2521 }
2522
2523 nn_dbg(nn, "Feature change 0x%llx -> 0x%llx (changed=0x%llx)\n",
2524 netdev->features, features, changed);
2525
2526 if (new_ctrl == nn->ctrl)
2527 return 0;
2528
2529 nn_dbg(nn, "NIC ctrl: 0x%x -> 0x%x\n", nn->ctrl, new_ctrl);
2530 nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl);
2531 err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN);
2532 if (err)
2533 return err;
2534
2535 nn->ctrl = new_ctrl;
2536
2537 return 0;
2538 }
2539
2540 static netdev_features_t
2541 nfp_net_features_check(struct sk_buff *skb, struct net_device *dev,
2542 netdev_features_t features)
2543 {
2544 u8 l4_hdr;
2545
2546 /* We can't do TSO over double tagged packets (802.1AD) */
2547 features &= vlan_features_check(skb, features);
2548
2549 if (!skb->encapsulation)
2550 return features;
2551
2552 /* Ensure that inner L4 header offset fits into TX descriptor field */
2553 if (skb_is_gso(skb)) {
2554 u32 hdrlen;
2555
2556 hdrlen = skb_inner_transport_header(skb) - skb->data +
2557 inner_tcp_hdrlen(skb);
2558
2559 if (unlikely(hdrlen > NFP_NET_LSO_MAX_HDR_SZ))
2560 features &= ~NETIF_F_GSO_MASK;
2561 }
2562
2563 /* VXLAN/GRE check */
2564 switch (vlan_get_protocol(skb)) {
2565 case htons(ETH_P_IP):
2566 l4_hdr = ip_hdr(skb)->protocol;
2567 break;
2568 case htons(ETH_P_IPV6):
2569 l4_hdr = ipv6_hdr(skb)->nexthdr;
2570 break;
2571 default:
2572 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
2573 }
2574
2575 if (skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
2576 skb->inner_protocol != htons(ETH_P_TEB) ||
2577 (l4_hdr != IPPROTO_UDP && l4_hdr != IPPROTO_GRE) ||
2578 (l4_hdr == IPPROTO_UDP &&
2579 (skb_inner_mac_header(skb) - skb_transport_header(skb) !=
2580 sizeof(struct udphdr) + sizeof(struct vxlanhdr))))
2581 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
2582
2583 return features;
2584 }
2585
2586 /**
2587 * nfp_net_set_vxlan_port() - set vxlan port in SW and reconfigure HW
2588 * @nn: NFP Net device to reconfigure
2589 * @idx: Index into the port table where new port should be written
2590 * @port: UDP port to configure (pass zero to remove VXLAN port)
2591 */
2592 static void nfp_net_set_vxlan_port(struct nfp_net *nn, int idx, __be16 port)
2593 {
2594 int i;
2595
2596 nn->vxlan_ports[idx] = port;
2597
2598 if (!(nn->ctrl & NFP_NET_CFG_CTRL_VXLAN))
2599 return;
2600
2601 BUILD_BUG_ON(NFP_NET_N_VXLAN_PORTS & 1);
2602 for (i = 0; i < NFP_NET_N_VXLAN_PORTS; i += 2)
2603 nn_writel(nn, NFP_NET_CFG_VXLAN_PORT + i * sizeof(port),
2604 be16_to_cpu(nn->vxlan_ports[i + 1]) << 16 |
2605 be16_to_cpu(nn->vxlan_ports[i]));
2606
2607 nfp_net_reconfig_post(nn, NFP_NET_CFG_UPDATE_VXLAN);
2608 }
2609
2610 /**
2611 * nfp_net_find_vxlan_idx() - find table entry of the port or a free one
2612 * @nn: NFP Network structure
2613 * @port: UDP port to look for
2614 *
2615 * Return: if the port is already in the table -- it's position;
2616 * if the port is not in the table -- free position to use;
2617 * if the table is full -- -ENOSPC.
2618 */
2619 static int nfp_net_find_vxlan_idx(struct nfp_net *nn, __be16 port)
2620 {
2621 int i, free_idx = -ENOSPC;
2622
2623 for (i = 0; i < NFP_NET_N_VXLAN_PORTS; i++) {
2624 if (nn->vxlan_ports[i] == port)
2625 return i;
2626 if (!nn->vxlan_usecnt[i])
2627 free_idx = i;
2628 }
2629
2630 return free_idx;
2631 }
2632
2633 static void nfp_net_add_vxlan_port(struct net_device *netdev,
2634 struct udp_tunnel_info *ti)
2635 {
2636 struct nfp_net *nn = netdev_priv(netdev);
2637 int idx;
2638
2639 if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
2640 return;
2641
2642 idx = nfp_net_find_vxlan_idx(nn, ti->port);
2643 if (idx == -ENOSPC)
2644 return;
2645
2646 if (!nn->vxlan_usecnt[idx]++)
2647 nfp_net_set_vxlan_port(nn, idx, ti->port);
2648 }
2649
2650 static void nfp_net_del_vxlan_port(struct net_device *netdev,
2651 struct udp_tunnel_info *ti)
2652 {
2653 struct nfp_net *nn = netdev_priv(netdev);
2654 int idx;
2655
2656 if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
2657 return;
2658
2659 idx = nfp_net_find_vxlan_idx(nn, ti->port);
2660 if (idx == -ENOSPC || !nn->vxlan_usecnt[idx])
2661 return;
2662
2663 if (!--nn->vxlan_usecnt[idx])
2664 nfp_net_set_vxlan_port(nn, idx, 0);
2665 }
2666
2667 static const struct net_device_ops nfp_net_netdev_ops = {
2668 .ndo_open = nfp_net_netdev_open,
2669 .ndo_stop = nfp_net_netdev_close,
2670 .ndo_start_xmit = nfp_net_tx,
2671 .ndo_get_stats64 = nfp_net_stat64,
2672 .ndo_setup_tc = nfp_net_setup_tc,
2673 .ndo_tx_timeout = nfp_net_tx_timeout,
2674 .ndo_set_rx_mode = nfp_net_set_rx_mode,
2675 .ndo_change_mtu = nfp_net_change_mtu,
2676 .ndo_set_mac_address = eth_mac_addr,
2677 .ndo_set_features = nfp_net_set_features,
2678 .ndo_features_check = nfp_net_features_check,
2679 .ndo_udp_tunnel_add = nfp_net_add_vxlan_port,
2680 .ndo_udp_tunnel_del = nfp_net_del_vxlan_port,
2681 };
2682
2683 /**
2684 * nfp_net_info() - Print general info about the NIC
2685 * @nn: NFP Net device to reconfigure
2686 */
2687 void nfp_net_info(struct nfp_net *nn)
2688 {
2689 nn_info(nn, "Netronome %s %sNetdev: TxQs=%d/%d RxQs=%d/%d\n",
2690 nn->is_nfp3200 ? "NFP-32xx" : "NFP-6xxx",
2691 nn->is_vf ? "VF " : "",
2692 nn->num_tx_rings, nn->max_tx_rings,
2693 nn->num_rx_rings, nn->max_rx_rings);
2694 nn_info(nn, "VER: %d.%d.%d.%d, Maximum supported MTU: %d\n",
2695 nn->fw_ver.resv, nn->fw_ver.class,
2696 nn->fw_ver.major, nn->fw_ver.minor,
2697 nn->max_mtu);
2698 nn_info(nn, "CAP: %#x %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
2699 nn->cap,
2700 nn->cap & NFP_NET_CFG_CTRL_PROMISC ? "PROMISC " : "",
2701 nn->cap & NFP_NET_CFG_CTRL_L2BC ? "L2BCFILT " : "",
2702 nn->cap & NFP_NET_CFG_CTRL_L2MC ? "L2MCFILT " : "",
2703 nn->cap & NFP_NET_CFG_CTRL_RXCSUM ? "RXCSUM " : "",
2704 nn->cap & NFP_NET_CFG_CTRL_TXCSUM ? "TXCSUM " : "",
2705 nn->cap & NFP_NET_CFG_CTRL_RXVLAN ? "RXVLAN " : "",
2706 nn->cap & NFP_NET_CFG_CTRL_TXVLAN ? "TXVLAN " : "",
2707 nn->cap & NFP_NET_CFG_CTRL_SCATTER ? "SCATTER " : "",
2708 nn->cap & NFP_NET_CFG_CTRL_GATHER ? "GATHER " : "",
2709 nn->cap & NFP_NET_CFG_CTRL_LSO ? "TSO " : "",
2710 nn->cap & NFP_NET_CFG_CTRL_RSS ? "RSS " : "",
2711 nn->cap & NFP_NET_CFG_CTRL_L2SWITCH ? "L2SWITCH " : "",
2712 nn->cap & NFP_NET_CFG_CTRL_MSIXAUTO ? "AUTOMASK " : "",
2713 nn->cap & NFP_NET_CFG_CTRL_IRQMOD ? "IRQMOD " : "",
2714 nn->cap & NFP_NET_CFG_CTRL_VXLAN ? "VXLAN " : "",
2715 nn->cap & NFP_NET_CFG_CTRL_NVGRE ? "NVGRE " : "",
2716 nfp_net_ebpf_capable(nn) ? "BPF " : "");
2717 }
2718
2719 /**
2720 * nfp_net_netdev_alloc() - Allocate netdev and related structure
2721 * @pdev: PCI device
2722 * @max_tx_rings: Maximum number of TX rings supported by device
2723 * @max_rx_rings: Maximum number of RX rings supported by device
2724 *
2725 * This function allocates a netdev device and fills in the initial
2726 * part of the @struct nfp_net structure.
2727 *
2728 * Return: NFP Net device structure, or ERR_PTR on error.
2729 */
2730 struct nfp_net *nfp_net_netdev_alloc(struct pci_dev *pdev,
2731 int max_tx_rings, int max_rx_rings)
2732 {
2733 struct net_device *netdev;
2734 struct nfp_net *nn;
2735 int nqs;
2736
2737 netdev = alloc_etherdev_mqs(sizeof(struct nfp_net),
2738 max_tx_rings, max_rx_rings);
2739 if (!netdev)
2740 return ERR_PTR(-ENOMEM);
2741
2742 SET_NETDEV_DEV(netdev, &pdev->dev);
2743 nn = netdev_priv(netdev);
2744
2745 nn->netdev = netdev;
2746 nn->pdev = pdev;
2747
2748 nn->max_tx_rings = max_tx_rings;
2749 nn->max_rx_rings = max_rx_rings;
2750
2751 nqs = netif_get_num_default_rss_queues();
2752 nn->num_tx_rings = min_t(int, nqs, max_tx_rings);
2753 nn->num_rx_rings = min_t(int, nqs, max_rx_rings);
2754
2755 nn->txd_cnt = NFP_NET_TX_DESCS_DEFAULT;
2756 nn->rxd_cnt = NFP_NET_RX_DESCS_DEFAULT;
2757
2758 spin_lock_init(&nn->reconfig_lock);
2759 spin_lock_init(&nn->rx_filter_lock);
2760 spin_lock_init(&nn->link_status_lock);
2761
2762 setup_timer(&nn->reconfig_timer,
2763 nfp_net_reconfig_timer, (unsigned long)nn);
2764 setup_timer(&nn->rx_filter_stats_timer,
2765 nfp_net_filter_stats_timer, (unsigned long)nn);
2766
2767 return nn;
2768 }
2769
2770 /**
2771 * nfp_net_netdev_free() - Undo what @nfp_net_netdev_alloc() did
2772 * @nn: NFP Net device to reconfigure
2773 */
2774 void nfp_net_netdev_free(struct nfp_net *nn)
2775 {
2776 free_netdev(nn->netdev);
2777 }
2778
2779 /**
2780 * nfp_net_rss_init() - Set the initial RSS parameters
2781 * @nn: NFP Net device to reconfigure
2782 */
2783 static void nfp_net_rss_init(struct nfp_net *nn)
2784 {
2785 int i;
2786
2787 netdev_rss_key_fill(nn->rss_key, NFP_NET_CFG_RSS_KEY_SZ);
2788
2789 for (i = 0; i < sizeof(nn->rss_itbl); i++)
2790 nn->rss_itbl[i] =
2791 ethtool_rxfh_indir_default(i, nn->num_rx_rings);
2792
2793 /* Enable IPv4/IPv6 TCP by default */
2794 nn->rss_cfg = NFP_NET_CFG_RSS_IPV4_TCP |
2795 NFP_NET_CFG_RSS_IPV6_TCP |
2796 NFP_NET_CFG_RSS_TOEPLITZ |
2797 NFP_NET_CFG_RSS_MASK;
2798 }
2799
2800 /**
2801 * nfp_net_irqmod_init() - Set the initial IRQ moderation parameters
2802 * @nn: NFP Net device to reconfigure
2803 */
2804 static void nfp_net_irqmod_init(struct nfp_net *nn)
2805 {
2806 nn->rx_coalesce_usecs = 50;
2807 nn->rx_coalesce_max_frames = 64;
2808 nn->tx_coalesce_usecs = 50;
2809 nn->tx_coalesce_max_frames = 64;
2810 }
2811
2812 /**
2813 * nfp_net_netdev_init() - Initialise/finalise the netdev structure
2814 * @netdev: netdev structure
2815 *
2816 * Return: 0 on success or negative errno on error.
2817 */
2818 int nfp_net_netdev_init(struct net_device *netdev)
2819 {
2820 struct nfp_net *nn = netdev_priv(netdev);
2821 int err;
2822
2823 /* Get some of the read-only fields from the BAR */
2824 nn->cap = nn_readl(nn, NFP_NET_CFG_CAP);
2825 nn->max_mtu = nn_readl(nn, NFP_NET_CFG_MAX_MTU);
2826
2827 nfp_net_write_mac_addr(nn);
2828
2829 /* Set default MTU and Freelist buffer size */
2830 if (nn->max_mtu < NFP_NET_DEFAULT_MTU)
2831 netdev->mtu = nn->max_mtu;
2832 else
2833 netdev->mtu = NFP_NET_DEFAULT_MTU;
2834 nn->fl_bufsz = NFP_NET_DEFAULT_RX_BUFSZ;
2835
2836 /* Advertise/enable offloads based on capabilities
2837 *
2838 * Note: netdev->features show the currently enabled features
2839 * and netdev->hw_features advertises which features are
2840 * supported. By default we enable most features.
2841 */
2842 netdev->hw_features = NETIF_F_HIGHDMA;
2843 if (nn->cap & NFP_NET_CFG_CTRL_RXCSUM) {
2844 netdev->hw_features |= NETIF_F_RXCSUM;
2845 nn->ctrl |= NFP_NET_CFG_CTRL_RXCSUM;
2846 }
2847 if (nn->cap & NFP_NET_CFG_CTRL_TXCSUM) {
2848 netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
2849 nn->ctrl |= NFP_NET_CFG_CTRL_TXCSUM;
2850 }
2851 if (nn->cap & NFP_NET_CFG_CTRL_GATHER) {
2852 netdev->hw_features |= NETIF_F_SG;
2853 nn->ctrl |= NFP_NET_CFG_CTRL_GATHER;
2854 }
2855 if ((nn->cap & NFP_NET_CFG_CTRL_LSO) && nn->fw_ver.major > 2) {
2856 netdev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
2857 nn->ctrl |= NFP_NET_CFG_CTRL_LSO;
2858 }
2859 if (nn->cap & NFP_NET_CFG_CTRL_RSS) {
2860 netdev->hw_features |= NETIF_F_RXHASH;
2861 nfp_net_rss_init(nn);
2862 nn->ctrl |= NFP_NET_CFG_CTRL_RSS;
2863 }
2864 if (nn->cap & NFP_NET_CFG_CTRL_VXLAN &&
2865 nn->cap & NFP_NET_CFG_CTRL_NVGRE) {
2866 if (nn->cap & NFP_NET_CFG_CTRL_LSO)
2867 netdev->hw_features |= NETIF_F_GSO_GRE |
2868 NETIF_F_GSO_UDP_TUNNEL;
2869 nn->ctrl |= NFP_NET_CFG_CTRL_VXLAN | NFP_NET_CFG_CTRL_NVGRE;
2870
2871 netdev->hw_enc_features = netdev->hw_features;
2872 }
2873
2874 netdev->vlan_features = netdev->hw_features;
2875
2876 if (nn->cap & NFP_NET_CFG_CTRL_RXVLAN) {
2877 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
2878 nn->ctrl |= NFP_NET_CFG_CTRL_RXVLAN;
2879 }
2880 if (nn->cap & NFP_NET_CFG_CTRL_TXVLAN) {
2881 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
2882 nn->ctrl |= NFP_NET_CFG_CTRL_TXVLAN;
2883 }
2884
2885 netdev->features = netdev->hw_features;
2886
2887 if (nfp_net_ebpf_capable(nn))
2888 netdev->hw_features |= NETIF_F_HW_TC;
2889
2890 /* Advertise but disable TSO by default. */
2891 netdev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
2892
2893 /* Allow L2 Broadcast and Multicast through by default, if supported */
2894 if (nn->cap & NFP_NET_CFG_CTRL_L2BC)
2895 nn->ctrl |= NFP_NET_CFG_CTRL_L2BC;
2896 if (nn->cap & NFP_NET_CFG_CTRL_L2MC)
2897 nn->ctrl |= NFP_NET_CFG_CTRL_L2MC;
2898
2899 /* Allow IRQ moderation, if supported */
2900 if (nn->cap & NFP_NET_CFG_CTRL_IRQMOD) {
2901 nfp_net_irqmod_init(nn);
2902 nn->ctrl |= NFP_NET_CFG_CTRL_IRQMOD;
2903 }
2904
2905 /* On NFP-3200 enable MSI-X auto-masking, if supported and the
2906 * interrupts are not shared.
2907 */
2908 if (nn->is_nfp3200 && nn->cap & NFP_NET_CFG_CTRL_MSIXAUTO)
2909 nn->ctrl |= NFP_NET_CFG_CTRL_MSIXAUTO;
2910
2911 /* On NFP4000/NFP6000, determine RX packet/metadata boundary offset */
2912 if (nn->fw_ver.major >= 2)
2913 nn->rx_offset = nn_readl(nn, NFP_NET_CFG_RX_OFFSET);
2914 else
2915 nn->rx_offset = NFP_NET_RX_OFFSET;
2916
2917 /* Stash the re-configuration queue away. First odd queue in TX Bar */
2918 nn->qcp_cfg = nn->tx_bar + NFP_QCP_QUEUE_ADDR_SZ;
2919
2920 /* Make sure the FW knows the netdev is supposed to be disabled here */
2921 nn_writel(nn, NFP_NET_CFG_CTRL, 0);
2922 nn_writeq(nn, NFP_NET_CFG_TXRS_ENABLE, 0);
2923 nn_writeq(nn, NFP_NET_CFG_RXRS_ENABLE, 0);
2924 err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_RING |
2925 NFP_NET_CFG_UPDATE_GEN);
2926 if (err)
2927 return err;
2928
2929 /* Finalise the netdev setup */
2930 ether_setup(netdev);
2931 netdev->netdev_ops = &nfp_net_netdev_ops;
2932 netdev->watchdog_timeo = msecs_to_jiffies(5 * 1000);
2933 netif_carrier_off(netdev);
2934
2935 nfp_net_set_ethtool_ops(netdev);
2936 nfp_net_irqs_assign(netdev);
2937
2938 return register_netdev(netdev);
2939 }
2940
2941 /**
2942 * nfp_net_netdev_clean() - Undo what nfp_net_netdev_init() did.
2943 * @netdev: netdev structure
2944 */
2945 void nfp_net_netdev_clean(struct net_device *netdev)
2946 {
2947 unregister_netdev(netdev);
2948 }