2 * Copyright (c) 2014-2018 Netronome Systems, Inc.
5 * Small portions derived from code Copyright(c) 2010-2015 Intel Corporation.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are met:
10 * 1. Redistributions of source code must retain the above copyright notice,
11 * this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution
17 * 3. Neither the name of the copyright holder nor the names of its
18 * contributors may be used to endorse or promote products derived from this
19 * software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
25 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
35 * vim:shiftwidth=8:noexpandtab
37 * @file dpdk/pmd/nfp_net.c
39 * Netronome vNIC DPDK Poll-Mode Driver: Main entry point
42 #include <rte_byteorder.h>
43 #include <rte_common.h>
45 #include <rte_debug.h>
46 #include <rte_ethdev_driver.h>
47 #include <rte_ethdev_pci.h>
49 #include <rte_ether.h>
50 #include <rte_malloc.h>
51 #include <rte_memzone.h>
52 #include <rte_mempool.h>
53 #include <rte_version.h>
54 #include <rte_string_fns.h>
55 #include <rte_alarm.h>
56 #include <rte_spinlock.h>
57 #include <rte_service_component.h>
59 #include "nfpcore/nfp_cpp.h"
60 #include "nfpcore/nfp_nffw.h"
61 #include "nfpcore/nfp_hwinfo.h"
62 #include "nfpcore/nfp_mip.h"
63 #include "nfpcore/nfp_rtsym.h"
64 #include "nfpcore/nfp_nsp.h"
66 #include "nfp_net_pmd.h"
67 #include "nfp_net_logs.h"
68 #include "nfp_net_ctrl.h"
70 #include <sys/types.h>
71 #include <sys/socket.h>
75 #include <sys/ioctl.h>
79 static void nfp_net_close(struct rte_eth_dev
*dev
);
80 static int nfp_net_configure(struct rte_eth_dev
*dev
);
81 static void nfp_net_dev_interrupt_handler(void *param
);
82 static void nfp_net_dev_interrupt_delayed_handler(void *param
);
83 static int nfp_net_dev_mtu_set(struct rte_eth_dev
*dev
, uint16_t mtu
);
84 static void nfp_net_infos_get(struct rte_eth_dev
*dev
,
85 struct rte_eth_dev_info
*dev_info
);
86 static int nfp_net_init(struct rte_eth_dev
*eth_dev
);
87 static int nfp_net_link_update(struct rte_eth_dev
*dev
, int wait_to_complete
);
88 static void nfp_net_promisc_enable(struct rte_eth_dev
*dev
);
89 static void nfp_net_promisc_disable(struct rte_eth_dev
*dev
);
90 static int nfp_net_rx_fill_freelist(struct nfp_net_rxq
*rxq
);
91 static uint32_t nfp_net_rx_queue_count(struct rte_eth_dev
*dev
,
93 static uint16_t nfp_net_recv_pkts(void *rx_queue
, struct rte_mbuf
**rx_pkts
,
95 static void nfp_net_rx_queue_release(void *rxq
);
96 static int nfp_net_rx_queue_setup(struct rte_eth_dev
*dev
, uint16_t queue_idx
,
97 uint16_t nb_desc
, unsigned int socket_id
,
98 const struct rte_eth_rxconf
*rx_conf
,
99 struct rte_mempool
*mp
);
100 static int nfp_net_tx_free_bufs(struct nfp_net_txq
*txq
);
101 static void nfp_net_tx_queue_release(void *txq
);
102 static int nfp_net_tx_queue_setup(struct rte_eth_dev
*dev
, uint16_t queue_idx
,
103 uint16_t nb_desc
, unsigned int socket_id
,
104 const struct rte_eth_txconf
*tx_conf
);
105 static int nfp_net_start(struct rte_eth_dev
*dev
);
106 static int nfp_net_stats_get(struct rte_eth_dev
*dev
,
107 struct rte_eth_stats
*stats
);
108 static void nfp_net_stats_reset(struct rte_eth_dev
*dev
);
109 static void nfp_net_stop(struct rte_eth_dev
*dev
);
110 static uint16_t nfp_net_xmit_pkts(void *tx_queue
, struct rte_mbuf
**tx_pkts
,
113 static int nfp_net_rss_config_default(struct rte_eth_dev
*dev
);
114 static int nfp_net_rss_hash_update(struct rte_eth_dev
*dev
,
115 struct rte_eth_rss_conf
*rss_conf
);
116 static int nfp_net_rss_reta_write(struct rte_eth_dev
*dev
,
117 struct rte_eth_rss_reta_entry64
*reta_conf
,
119 static int nfp_net_rss_hash_write(struct rte_eth_dev
*dev
,
120 struct rte_eth_rss_conf
*rss_conf
);
121 static int nfp_set_mac_addr(struct rte_eth_dev
*dev
,
122 struct ether_addr
*mac_addr
);
124 /* The offset of the queue controller queues in the PCIe Target */
125 #define NFP_PCIE_QUEUE(_q) (0x80000 + (NFP_QCP_QUEUE_ADDR_SZ * ((_q) & 0xff)))
127 /* Maximum value which can be added to a queue with one transaction */
128 #define NFP_QCP_MAX_ADD 0x7f
130 #define RTE_MBUF_DMA_ADDR_DEFAULT(mb) \
131 (uint64_t)((mb)->buf_iova + RTE_PKTMBUF_HEADROOM)
133 /* nfp_qcp_ptr - Read or Write Pointer of a queue */
135 NFP_QCP_READ_PTR
= 0,
140 * nfp_qcp_ptr_add - Add the value to the selected pointer of a queue
141 * @q: Base address for queue structure
142 * @ptr: Add to the Read or Write pointer
143 * @val: Value to add to the queue pointer
145 * If @val is greater than @NFP_QCP_MAX_ADD multiple writes are performed.
148 nfp_qcp_ptr_add(uint8_t *q
, enum nfp_qcp_ptr ptr
, uint32_t val
)
152 if (ptr
== NFP_QCP_READ_PTR
)
153 off
= NFP_QCP_QUEUE_ADD_RPTR
;
155 off
= NFP_QCP_QUEUE_ADD_WPTR
;
157 while (val
> NFP_QCP_MAX_ADD
) {
158 nn_writel(rte_cpu_to_le_32(NFP_QCP_MAX_ADD
), q
+ off
);
159 val
-= NFP_QCP_MAX_ADD
;
162 nn_writel(rte_cpu_to_le_32(val
), q
+ off
);
166 * nfp_qcp_read - Read the current Read/Write pointer value for a queue
167 * @q: Base address for queue structure
168 * @ptr: Read or Write pointer
170 static inline uint32_t
171 nfp_qcp_read(uint8_t *q
, enum nfp_qcp_ptr ptr
)
176 if (ptr
== NFP_QCP_READ_PTR
)
177 off
= NFP_QCP_QUEUE_STS_LO
;
179 off
= NFP_QCP_QUEUE_STS_HI
;
181 val
= rte_cpu_to_le_32(nn_readl(q
+ off
));
183 if (ptr
== NFP_QCP_READ_PTR
)
184 return val
& NFP_QCP_QUEUE_STS_LO_READPTR_mask
;
186 return val
& NFP_QCP_QUEUE_STS_HI_WRITEPTR_mask
;
190 * Functions to read/write from/to Config BAR
191 * Performs any endian conversion necessary.
193 static inline uint8_t
194 nn_cfg_readb(struct nfp_net_hw
*hw
, int off
)
196 return nn_readb(hw
->ctrl_bar
+ off
);
200 nn_cfg_writeb(struct nfp_net_hw
*hw
, int off
, uint8_t val
)
202 nn_writeb(val
, hw
->ctrl_bar
+ off
);
205 static inline uint32_t
206 nn_cfg_readl(struct nfp_net_hw
*hw
, int off
)
208 return rte_le_to_cpu_32(nn_readl(hw
->ctrl_bar
+ off
));
212 nn_cfg_writel(struct nfp_net_hw
*hw
, int off
, uint32_t val
)
214 nn_writel(rte_cpu_to_le_32(val
), hw
->ctrl_bar
+ off
);
217 static inline uint64_t
218 nn_cfg_readq(struct nfp_net_hw
*hw
, int off
)
220 return rte_le_to_cpu_64(nn_readq(hw
->ctrl_bar
+ off
));
224 nn_cfg_writeq(struct nfp_net_hw
*hw
, int off
, uint64_t val
)
226 nn_writeq(rte_cpu_to_le_64(val
), hw
->ctrl_bar
+ off
);
230 nfp_net_rx_queue_release_mbufs(struct nfp_net_rxq
*rxq
)
234 if (rxq
->rxbufs
== NULL
)
237 for (i
= 0; i
< rxq
->rx_count
; i
++) {
238 if (rxq
->rxbufs
[i
].mbuf
) {
239 rte_pktmbuf_free_seg(rxq
->rxbufs
[i
].mbuf
);
240 rxq
->rxbufs
[i
].mbuf
= NULL
;
246 nfp_net_rx_queue_release(void *rx_queue
)
248 struct nfp_net_rxq
*rxq
= rx_queue
;
251 nfp_net_rx_queue_release_mbufs(rxq
);
252 rte_free(rxq
->rxbufs
);
258 nfp_net_reset_rx_queue(struct nfp_net_rxq
*rxq
)
260 nfp_net_rx_queue_release_mbufs(rxq
);
266 nfp_net_tx_queue_release_mbufs(struct nfp_net_txq
*txq
)
270 if (txq
->txbufs
== NULL
)
273 for (i
= 0; i
< txq
->tx_count
; i
++) {
274 if (txq
->txbufs
[i
].mbuf
) {
275 rte_pktmbuf_free_seg(txq
->txbufs
[i
].mbuf
);
276 txq
->txbufs
[i
].mbuf
= NULL
;
282 nfp_net_tx_queue_release(void *tx_queue
)
284 struct nfp_net_txq
*txq
= tx_queue
;
287 nfp_net_tx_queue_release_mbufs(txq
);
288 rte_free(txq
->txbufs
);
294 nfp_net_reset_tx_queue(struct nfp_net_txq
*txq
)
296 nfp_net_tx_queue_release_mbufs(txq
);
302 __nfp_net_reconfig(struct nfp_net_hw
*hw
, uint32_t update
)
306 struct timespec wait
;
308 PMD_DRV_LOG(DEBUG
, "Writing to the configuration queue (%p)...",
311 if (hw
->qcp_cfg
== NULL
)
312 rte_panic("Bad configuration queue pointer\n");
314 nfp_qcp_ptr_add(hw
->qcp_cfg
, NFP_QCP_WRITE_PTR
, 1);
317 wait
.tv_nsec
= 1000000;
319 PMD_DRV_LOG(DEBUG
, "Polling for update ack...");
321 /* Poll update field, waiting for NFP to ack the config */
322 for (cnt
= 0; ; cnt
++) {
323 new = nn_cfg_readl(hw
, NFP_NET_CFG_UPDATE
);
326 if (new & NFP_NET_CFG_UPDATE_ERR
) {
327 PMD_INIT_LOG(ERR
, "Reconfig error: 0x%08x", new);
330 if (cnt
>= NFP_NET_POLL_TIMEOUT
) {
331 PMD_INIT_LOG(ERR
, "Reconfig timeout for 0x%08x after"
332 " %dms", update
, cnt
);
333 rte_panic("Exiting\n");
335 nanosleep(&wait
, 0); /* waiting for a 1ms */
337 PMD_DRV_LOG(DEBUG
, "Ack DONE");
342 * Reconfigure the NIC
343 * @nn: device to reconfigure
344 * @ctrl: The value for the ctrl field in the BAR config
345 * @update: The value for the update field in the BAR config
347 * Write the update word to the BAR and ping the reconfig queue. Then poll
348 * until the firmware has acknowledged the update by zeroing the update word.
351 nfp_net_reconfig(struct nfp_net_hw
*hw
, uint32_t ctrl
, uint32_t update
)
355 PMD_DRV_LOG(DEBUG
, "nfp_net_reconfig: ctrl=%08x update=%08x",
358 rte_spinlock_lock(&hw
->reconfig_lock
);
360 nn_cfg_writel(hw
, NFP_NET_CFG_CTRL
, ctrl
);
361 nn_cfg_writel(hw
, NFP_NET_CFG_UPDATE
, update
);
365 err
= __nfp_net_reconfig(hw
, update
);
367 rte_spinlock_unlock(&hw
->reconfig_lock
);
373 * Reconfig errors imply situations where they can be handled.
374 * Otherwise, rte_panic is called inside __nfp_net_reconfig
376 PMD_INIT_LOG(ERR
, "Error nfp_net reconfig for ctrl: %x update: %x",
382 * Configure an Ethernet device. This function must be invoked first
383 * before any other function in the Ethernet API. This function can
384 * also be re-invoked when a device is in the stopped state.
387 nfp_net_configure(struct rte_eth_dev
*dev
)
389 struct rte_eth_conf
*dev_conf
;
390 struct rte_eth_rxmode
*rxmode
;
391 struct rte_eth_txmode
*txmode
;
392 struct nfp_net_hw
*hw
;
394 hw
= NFP_NET_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
397 * A DPDK app sends info about how many queues to use and how
398 * those queues need to be configured. This is used by the
399 * DPDK core and it makes sure no more queues than those
400 * advertised by the driver are requested. This function is
401 * called after that internal process
404 PMD_INIT_LOG(DEBUG
, "Configure");
406 dev_conf
= &dev
->data
->dev_conf
;
407 rxmode
= &dev_conf
->rxmode
;
408 txmode
= &dev_conf
->txmode
;
410 /* Checking TX mode */
411 if (txmode
->mq_mode
) {
412 PMD_INIT_LOG(INFO
, "TX mq_mode DCB and VMDq not supported");
416 /* Checking RX mode */
417 if (rxmode
->mq_mode
& ETH_MQ_RX_RSS
&&
418 !(hw
->cap
& NFP_NET_CFG_CTRL_RSS
)) {
419 PMD_INIT_LOG(INFO
, "RSS not supported");
427 nfp_net_enable_queues(struct rte_eth_dev
*dev
)
429 struct nfp_net_hw
*hw
;
430 uint64_t enabled_queues
= 0;
433 hw
= NFP_NET_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
435 /* Enabling the required TX queues in the device */
436 for (i
= 0; i
< dev
->data
->nb_tx_queues
; i
++)
437 enabled_queues
|= (1 << i
);
439 nn_cfg_writeq(hw
, NFP_NET_CFG_TXRS_ENABLE
, enabled_queues
);
443 /* Enabling the required RX queues in the device */
444 for (i
= 0; i
< dev
->data
->nb_rx_queues
; i
++)
445 enabled_queues
|= (1 << i
);
447 nn_cfg_writeq(hw
, NFP_NET_CFG_RXRS_ENABLE
, enabled_queues
);
451 nfp_net_disable_queues(struct rte_eth_dev
*dev
)
453 struct nfp_net_hw
*hw
;
454 uint32_t new_ctrl
, update
= 0;
456 hw
= NFP_NET_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
458 nn_cfg_writeq(hw
, NFP_NET_CFG_TXRS_ENABLE
, 0);
459 nn_cfg_writeq(hw
, NFP_NET_CFG_RXRS_ENABLE
, 0);
461 new_ctrl
= hw
->ctrl
& ~NFP_NET_CFG_CTRL_ENABLE
;
462 update
= NFP_NET_CFG_UPDATE_GEN
| NFP_NET_CFG_UPDATE_RING
|
463 NFP_NET_CFG_UPDATE_MSIX
;
465 if (hw
->cap
& NFP_NET_CFG_CTRL_RINGCFG
)
466 new_ctrl
&= ~NFP_NET_CFG_CTRL_RINGCFG
;
468 /* If an error when reconfig we avoid to change hw state */
469 if (nfp_net_reconfig(hw
, new_ctrl
, update
) < 0)
476 nfp_net_rx_freelist_setup(struct rte_eth_dev
*dev
)
480 for (i
= 0; i
< dev
->data
->nb_rx_queues
; i
++) {
481 if (nfp_net_rx_fill_freelist(dev
->data
->rx_queues
[i
]) < 0)
488 nfp_net_params_setup(struct nfp_net_hw
*hw
)
490 nn_cfg_writel(hw
, NFP_NET_CFG_MTU
, hw
->mtu
);
491 nn_cfg_writel(hw
, NFP_NET_CFG_FLBUFSZ
, hw
->flbufsz
);
495 nfp_net_cfg_queue_setup(struct nfp_net_hw
*hw
)
497 hw
->qcp_cfg
= hw
->tx_bar
+ NFP_QCP_QUEUE_ADDR_SZ
;
500 #define ETH_ADDR_LEN 6
503 nfp_eth_copy_mac(uint8_t *dst
, const uint8_t *src
)
507 for (i
= 0; i
< ETH_ADDR_LEN
; i
++)
512 nfp_net_pf_read_mac(struct nfp_net_hw
*hw
, int port
)
514 struct nfp_eth_table
*nfp_eth_table
;
516 nfp_eth_table
= nfp_eth_read_ports(hw
->cpp
);
518 * hw points to port0 private data. We need hw now pointing to
522 nfp_eth_copy_mac((uint8_t *)&hw
->mac_addr
,
523 (uint8_t *)&nfp_eth_table
->ports
[port
].mac_addr
);
530 nfp_net_vf_read_mac(struct nfp_net_hw
*hw
)
534 tmp
= rte_be_to_cpu_32(nn_cfg_readl(hw
, NFP_NET_CFG_MACADDR
));
535 memcpy(&hw
->mac_addr
[0], &tmp
, 4);
537 tmp
= rte_be_to_cpu_32(nn_cfg_readl(hw
, NFP_NET_CFG_MACADDR
+ 4));
538 memcpy(&hw
->mac_addr
[4], &tmp
, 2);
542 nfp_net_write_mac(struct nfp_net_hw
*hw
, uint8_t *mac
)
544 uint32_t mac0
= *(uint32_t *)mac
;
547 nn_writel(rte_cpu_to_be_32(mac0
), hw
->ctrl_bar
+ NFP_NET_CFG_MACADDR
);
550 mac1
= *(uint16_t *)mac
;
551 nn_writew(rte_cpu_to_be_16(mac1
),
552 hw
->ctrl_bar
+ NFP_NET_CFG_MACADDR
+ 6);
556 nfp_set_mac_addr(struct rte_eth_dev
*dev
, struct ether_addr
*mac_addr
)
558 struct nfp_net_hw
*hw
;
559 uint32_t update
, ctrl
;
561 hw
= NFP_NET_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
562 if ((hw
->ctrl
& NFP_NET_CFG_CTRL_ENABLE
) &&
563 !(hw
->cap
& NFP_NET_CFG_CTRL_LIVE_ADDR
)) {
564 PMD_INIT_LOG(INFO
, "MAC address unable to change when"
569 if ((hw
->ctrl
& NFP_NET_CFG_CTRL_ENABLE
) &&
570 !(hw
->cap
& NFP_NET_CFG_CTRL_LIVE_ADDR
))
573 /* Writing new MAC to the specific port BAR address */
574 nfp_net_write_mac(hw
, (uint8_t *)mac_addr
);
576 /* Signal the NIC about the change */
577 update
= NFP_NET_CFG_UPDATE_MACADDR
;
579 if ((hw
->ctrl
& NFP_NET_CFG_CTRL_ENABLE
) &&
580 (hw
->cap
& NFP_NET_CFG_CTRL_LIVE_ADDR
))
581 ctrl
|= NFP_NET_CFG_CTRL_LIVE_ADDR
;
582 if (nfp_net_reconfig(hw
, ctrl
, update
) < 0) {
583 PMD_INIT_LOG(INFO
, "MAC address update failed");
590 nfp_configure_rx_interrupt(struct rte_eth_dev
*dev
,
591 struct rte_intr_handle
*intr_handle
)
593 struct nfp_net_hw
*hw
;
596 if (!intr_handle
->intr_vec
) {
597 intr_handle
->intr_vec
=
598 rte_zmalloc("intr_vec",
599 dev
->data
->nb_rx_queues
* sizeof(int), 0);
600 if (!intr_handle
->intr_vec
) {
601 PMD_INIT_LOG(ERR
, "Failed to allocate %d rx_queues"
602 " intr_vec", dev
->data
->nb_rx_queues
);
607 hw
= NFP_NET_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
609 if (intr_handle
->type
== RTE_INTR_HANDLE_UIO
) {
610 PMD_INIT_LOG(INFO
, "VF: enabling RX interrupt with UIO");
611 /* UIO just supports one queue and no LSC*/
612 nn_cfg_writeb(hw
, NFP_NET_CFG_RXR_VEC(0), 0);
613 intr_handle
->intr_vec
[0] = 0;
615 PMD_INIT_LOG(INFO
, "VF: enabling RX interrupt with VFIO");
616 for (i
= 0; i
< dev
->data
->nb_rx_queues
; i
++) {
618 * The first msix vector is reserved for non
621 nn_cfg_writeb(hw
, NFP_NET_CFG_RXR_VEC(i
), i
+ 1);
622 intr_handle
->intr_vec
[i
] = i
+ 1;
623 PMD_INIT_LOG(DEBUG
, "intr_vec[%d]= %d", i
,
624 intr_handle
->intr_vec
[i
]);
628 /* Avoiding TX interrupts */
629 hw
->ctrl
|= NFP_NET_CFG_CTRL_MSIX_TX_OFF
;
634 nfp_check_offloads(struct rte_eth_dev
*dev
)
636 struct nfp_net_hw
*hw
;
637 struct rte_eth_conf
*dev_conf
;
638 struct rte_eth_rxmode
*rxmode
;
639 struct rte_eth_txmode
*txmode
;
642 hw
= NFP_NET_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
644 dev_conf
= &dev
->data
->dev_conf
;
645 rxmode
= &dev_conf
->rxmode
;
646 txmode
= &dev_conf
->txmode
;
648 if (rxmode
->offloads
& DEV_RX_OFFLOAD_IPV4_CKSUM
) {
649 if (hw
->cap
& NFP_NET_CFG_CTRL_RXCSUM
)
650 ctrl
|= NFP_NET_CFG_CTRL_RXCSUM
;
653 if (rxmode
->offloads
& DEV_RX_OFFLOAD_VLAN_STRIP
) {
654 if (hw
->cap
& NFP_NET_CFG_CTRL_RXVLAN
)
655 ctrl
|= NFP_NET_CFG_CTRL_RXVLAN
;
658 if (rxmode
->offloads
& DEV_RX_OFFLOAD_JUMBO_FRAME
)
659 hw
->mtu
= rxmode
->max_rx_pkt_len
;
661 if (txmode
->offloads
& DEV_TX_OFFLOAD_VLAN_INSERT
)
662 ctrl
|= NFP_NET_CFG_CTRL_TXVLAN
;
665 if (hw
->cap
& NFP_NET_CFG_CTRL_L2BC
)
666 ctrl
|= NFP_NET_CFG_CTRL_L2BC
;
669 if (hw
->cap
& NFP_NET_CFG_CTRL_L2MC
)
670 ctrl
|= NFP_NET_CFG_CTRL_L2MC
;
672 /* TX checksum offload */
673 if (txmode
->offloads
& DEV_TX_OFFLOAD_IPV4_CKSUM
||
674 txmode
->offloads
& DEV_TX_OFFLOAD_UDP_CKSUM
||
675 txmode
->offloads
& DEV_TX_OFFLOAD_TCP_CKSUM
)
676 ctrl
|= NFP_NET_CFG_CTRL_TXCSUM
;
679 if (txmode
->offloads
& DEV_TX_OFFLOAD_TCP_TSO
) {
680 if (hw
->cap
& NFP_NET_CFG_CTRL_LSO
)
681 ctrl
|= NFP_NET_CFG_CTRL_LSO
;
683 ctrl
|= NFP_NET_CFG_CTRL_LSO2
;
687 if (txmode
->offloads
& DEV_TX_OFFLOAD_MULTI_SEGS
)
688 ctrl
|= NFP_NET_CFG_CTRL_GATHER
;
694 nfp_net_start(struct rte_eth_dev
*dev
)
696 struct rte_pci_device
*pci_dev
= RTE_ETH_DEV_TO_PCI(dev
);
697 struct rte_intr_handle
*intr_handle
= &pci_dev
->intr_handle
;
698 uint32_t new_ctrl
, update
= 0;
699 struct nfp_net_hw
*hw
;
700 struct rte_eth_conf
*dev_conf
;
701 struct rte_eth_rxmode
*rxmode
;
702 uint32_t intr_vector
;
705 hw
= NFP_NET_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
707 PMD_INIT_LOG(DEBUG
, "Start");
709 /* Disabling queues just in case... */
710 nfp_net_disable_queues(dev
);
712 /* Enabling the required queues in the device */
713 nfp_net_enable_queues(dev
);
715 /* check and configure queue intr-vector mapping */
716 if (dev
->data
->dev_conf
.intr_conf
.rxq
!= 0) {
717 if (hw
->pf_multiport_enabled
) {
718 PMD_INIT_LOG(ERR
, "PMD rx interrupt is not supported "
719 "with NFP multiport PF");
722 if (intr_handle
->type
== RTE_INTR_HANDLE_UIO
) {
724 * Better not to share LSC with RX interrupts.
725 * Unregistering LSC interrupt handler
727 rte_intr_callback_unregister(&pci_dev
->intr_handle
,
728 nfp_net_dev_interrupt_handler
, (void *)dev
);
730 if (dev
->data
->nb_rx_queues
> 1) {
731 PMD_INIT_LOG(ERR
, "PMD rx interrupt only "
732 "supports 1 queue with UIO");
736 intr_vector
= dev
->data
->nb_rx_queues
;
737 if (rte_intr_efd_enable(intr_handle
, intr_vector
))
740 nfp_configure_rx_interrupt(dev
, intr_handle
);
741 update
= NFP_NET_CFG_UPDATE_MSIX
;
744 rte_intr_enable(intr_handle
);
746 new_ctrl
= nfp_check_offloads(dev
);
748 /* Writing configuration parameters in the device */
749 nfp_net_params_setup(hw
);
751 dev_conf
= &dev
->data
->dev_conf
;
752 rxmode
= &dev_conf
->rxmode
;
754 if (rxmode
->mq_mode
& ETH_MQ_RX_RSS
) {
755 nfp_net_rss_config_default(dev
);
756 update
|= NFP_NET_CFG_UPDATE_RSS
;
757 new_ctrl
|= NFP_NET_CFG_CTRL_RSS
;
761 new_ctrl
|= NFP_NET_CFG_CTRL_ENABLE
;
763 update
|= NFP_NET_CFG_UPDATE_GEN
| NFP_NET_CFG_UPDATE_RING
;
765 if (hw
->cap
& NFP_NET_CFG_CTRL_RINGCFG
)
766 new_ctrl
|= NFP_NET_CFG_CTRL_RINGCFG
;
768 nn_cfg_writel(hw
, NFP_NET_CFG_CTRL
, new_ctrl
);
769 if (nfp_net_reconfig(hw
, new_ctrl
, update
) < 0)
773 * Allocating rte mbufs for configured rx queues.
774 * This requires queues being enabled before
776 if (nfp_net_rx_freelist_setup(dev
) < 0) {
782 if (rte_eal_process_type() == RTE_PROC_PRIMARY
)
783 /* Configure the physical port up */
784 nfp_eth_set_configured(hw
->cpp
, hw
->pf_port_idx
, 1);
786 nfp_eth_set_configured(dev
->process_private
,
796 * An error returned by this function should mean the app
797 * exiting and then the system releasing all the memory
798 * allocated even memory coming from hugepages.
800 * The device could be enabled at this point with some queues
801 * ready for getting packets. This is true if the call to
802 * nfp_net_rx_freelist_setup() succeeds for some queues but
803 * fails for subsequent queues.
805 * This should make the app exiting but better if we tell the
808 nfp_net_disable_queues(dev
);
813 /* Stop device: disable rx and tx functions to allow for reconfiguring. */
815 nfp_net_stop(struct rte_eth_dev
*dev
)
818 struct nfp_net_hw
*hw
;
820 PMD_INIT_LOG(DEBUG
, "Stop");
822 hw
= NFP_NET_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
824 nfp_net_disable_queues(dev
);
827 for (i
= 0; i
< dev
->data
->nb_tx_queues
; i
++) {
828 nfp_net_reset_tx_queue(
829 (struct nfp_net_txq
*)dev
->data
->tx_queues
[i
]);
832 for (i
= 0; i
< dev
->data
->nb_rx_queues
; i
++) {
833 nfp_net_reset_rx_queue(
834 (struct nfp_net_rxq
*)dev
->data
->rx_queues
[i
]);
838 if (rte_eal_process_type() == RTE_PROC_PRIMARY
)
839 /* Configure the physical port down */
840 nfp_eth_set_configured(hw
->cpp
, hw
->pf_port_idx
, 0);
842 nfp_eth_set_configured(dev
->process_private
,
847 /* Set the link up. */
849 nfp_net_set_link_up(struct rte_eth_dev
*dev
)
851 struct nfp_net_hw
*hw
;
853 PMD_DRV_LOG(DEBUG
, "Set link up");
855 hw
= NFP_NET_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
860 if (rte_eal_process_type() == RTE_PROC_PRIMARY
)
861 /* Configure the physical port down */
862 return nfp_eth_set_configured(hw
->cpp
, hw
->pf_port_idx
, 1);
864 return nfp_eth_set_configured(dev
->process_private
,
868 /* Set the link down. */
870 nfp_net_set_link_down(struct rte_eth_dev
*dev
)
872 struct nfp_net_hw
*hw
;
874 PMD_DRV_LOG(DEBUG
, "Set link down");
876 hw
= NFP_NET_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
881 if (rte_eal_process_type() == RTE_PROC_PRIMARY
)
882 /* Configure the physical port down */
883 return nfp_eth_set_configured(hw
->cpp
, hw
->pf_port_idx
, 0);
885 return nfp_eth_set_configured(dev
->process_private
,
889 /* Reset and stop device. The device can not be restarted. */
891 nfp_net_close(struct rte_eth_dev
*dev
)
893 struct nfp_net_hw
*hw
;
894 struct rte_pci_device
*pci_dev
;
897 PMD_INIT_LOG(DEBUG
, "Close");
899 hw
= NFP_NET_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
900 pci_dev
= RTE_ETH_DEV_TO_PCI(dev
);
903 * We assume that the DPDK application is stopping all the
904 * threads/queues before calling the device close function.
907 nfp_net_disable_queues(dev
);
910 for (i
= 0; i
< dev
->data
->nb_tx_queues
; i
++) {
911 nfp_net_reset_tx_queue(
912 (struct nfp_net_txq
*)dev
->data
->tx_queues
[i
]);
915 for (i
= 0; i
< dev
->data
->nb_rx_queues
; i
++) {
916 nfp_net_reset_rx_queue(
917 (struct nfp_net_rxq
*)dev
->data
->rx_queues
[i
]);
920 rte_intr_disable(&pci_dev
->intr_handle
);
921 nn_cfg_writeb(hw
, NFP_NET_CFG_LSC
, 0xff);
923 /* unregister callback func from eal lib */
924 rte_intr_callback_unregister(&pci_dev
->intr_handle
,
925 nfp_net_dev_interrupt_handler
,
929 * The ixgbe PMD driver disables the pcie master on the
930 * device. The i40e does not...
935 nfp_net_promisc_enable(struct rte_eth_dev
*dev
)
937 uint32_t new_ctrl
, update
= 0;
938 struct nfp_net_hw
*hw
;
940 PMD_DRV_LOG(DEBUG
, "Promiscuous mode enable");
942 hw
= NFP_NET_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
944 if (!(hw
->cap
& NFP_NET_CFG_CTRL_PROMISC
)) {
945 PMD_INIT_LOG(INFO
, "Promiscuous mode not supported");
949 if (hw
->ctrl
& NFP_NET_CFG_CTRL_PROMISC
) {
950 PMD_DRV_LOG(INFO
, "Promiscuous mode already enabled");
954 new_ctrl
= hw
->ctrl
| NFP_NET_CFG_CTRL_PROMISC
;
955 update
= NFP_NET_CFG_UPDATE_GEN
;
958 * DPDK sets promiscuous mode on just after this call assuming
959 * it can not fail ...
961 if (nfp_net_reconfig(hw
, new_ctrl
, update
) < 0)
968 nfp_net_promisc_disable(struct rte_eth_dev
*dev
)
970 uint32_t new_ctrl
, update
= 0;
971 struct nfp_net_hw
*hw
;
973 hw
= NFP_NET_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
975 if ((hw
->ctrl
& NFP_NET_CFG_CTRL_PROMISC
) == 0) {
976 PMD_DRV_LOG(INFO
, "Promiscuous mode already disabled");
980 new_ctrl
= hw
->ctrl
& ~NFP_NET_CFG_CTRL_PROMISC
;
981 update
= NFP_NET_CFG_UPDATE_GEN
;
984 * DPDK sets promiscuous mode off just before this call
985 * assuming it can not fail ...
987 if (nfp_net_reconfig(hw
, new_ctrl
, update
) < 0)
994 * return 0 means link status changed, -1 means not changed
996 * Wait to complete is needed as it can take up to 9 seconds to get the Link
1000 nfp_net_link_update(struct rte_eth_dev
*dev
, __rte_unused
int wait_to_complete
)
1002 struct nfp_net_hw
*hw
;
1003 struct rte_eth_link link
;
1004 uint32_t nn_link_status
;
1007 static const uint32_t ls_to_ethtool
[] = {
1008 [NFP_NET_CFG_STS_LINK_RATE_UNSUPPORTED
] = ETH_SPEED_NUM_NONE
,
1009 [NFP_NET_CFG_STS_LINK_RATE_UNKNOWN
] = ETH_SPEED_NUM_NONE
,
1010 [NFP_NET_CFG_STS_LINK_RATE_1G
] = ETH_SPEED_NUM_1G
,
1011 [NFP_NET_CFG_STS_LINK_RATE_10G
] = ETH_SPEED_NUM_10G
,
1012 [NFP_NET_CFG_STS_LINK_RATE_25G
] = ETH_SPEED_NUM_25G
,
1013 [NFP_NET_CFG_STS_LINK_RATE_40G
] = ETH_SPEED_NUM_40G
,
1014 [NFP_NET_CFG_STS_LINK_RATE_50G
] = ETH_SPEED_NUM_50G
,
1015 [NFP_NET_CFG_STS_LINK_RATE_100G
] = ETH_SPEED_NUM_100G
,
1018 PMD_DRV_LOG(DEBUG
, "Link update");
1020 hw
= NFP_NET_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
1022 nn_link_status
= nn_cfg_readl(hw
, NFP_NET_CFG_STS
);
1024 memset(&link
, 0, sizeof(struct rte_eth_link
));
1026 if (nn_link_status
& NFP_NET_CFG_STS_LINK
)
1027 link
.link_status
= ETH_LINK_UP
;
1029 link
.link_duplex
= ETH_LINK_FULL_DUPLEX
;
1031 nn_link_status
= (nn_link_status
>> NFP_NET_CFG_STS_LINK_RATE_SHIFT
) &
1032 NFP_NET_CFG_STS_LINK_RATE_MASK
;
1034 if (nn_link_status
>= RTE_DIM(ls_to_ethtool
))
1035 link
.link_speed
= ETH_SPEED_NUM_NONE
;
1037 link
.link_speed
= ls_to_ethtool
[nn_link_status
];
1039 ret
= rte_eth_linkstatus_set(dev
, &link
);
1041 if (link
.link_status
)
1042 PMD_DRV_LOG(INFO
, "NIC Link is Up");
1044 PMD_DRV_LOG(INFO
, "NIC Link is Down");
1050 nfp_net_stats_get(struct rte_eth_dev
*dev
, struct rte_eth_stats
*stats
)
1053 struct nfp_net_hw
*hw
;
1054 struct rte_eth_stats nfp_dev_stats
;
1056 hw
= NFP_NET_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
1058 /* RTE_ETHDEV_QUEUE_STAT_CNTRS default value is 16 */
1060 memset(&nfp_dev_stats
, 0, sizeof(nfp_dev_stats
));
1062 /* reading per RX ring stats */
1063 for (i
= 0; i
< dev
->data
->nb_rx_queues
; i
++) {
1064 if (i
== RTE_ETHDEV_QUEUE_STAT_CNTRS
)
1067 nfp_dev_stats
.q_ipackets
[i
] =
1068 nn_cfg_readq(hw
, NFP_NET_CFG_RXR_STATS(i
));
1070 nfp_dev_stats
.q_ipackets
[i
] -=
1071 hw
->eth_stats_base
.q_ipackets
[i
];
1073 nfp_dev_stats
.q_ibytes
[i
] =
1074 nn_cfg_readq(hw
, NFP_NET_CFG_RXR_STATS(i
) + 0x8);
1076 nfp_dev_stats
.q_ibytes
[i
] -=
1077 hw
->eth_stats_base
.q_ibytes
[i
];
1080 /* reading per TX ring stats */
1081 for (i
= 0; i
< dev
->data
->nb_tx_queues
; i
++) {
1082 if (i
== RTE_ETHDEV_QUEUE_STAT_CNTRS
)
1085 nfp_dev_stats
.q_opackets
[i
] =
1086 nn_cfg_readq(hw
, NFP_NET_CFG_TXR_STATS(i
));
1088 nfp_dev_stats
.q_opackets
[i
] -=
1089 hw
->eth_stats_base
.q_opackets
[i
];
1091 nfp_dev_stats
.q_obytes
[i
] =
1092 nn_cfg_readq(hw
, NFP_NET_CFG_TXR_STATS(i
) + 0x8);
1094 nfp_dev_stats
.q_obytes
[i
] -=
1095 hw
->eth_stats_base
.q_obytes
[i
];
1098 nfp_dev_stats
.ipackets
=
1099 nn_cfg_readq(hw
, NFP_NET_CFG_STATS_RX_FRAMES
);
1101 nfp_dev_stats
.ipackets
-= hw
->eth_stats_base
.ipackets
;
1103 nfp_dev_stats
.ibytes
=
1104 nn_cfg_readq(hw
, NFP_NET_CFG_STATS_RX_OCTETS
);
1106 nfp_dev_stats
.ibytes
-= hw
->eth_stats_base
.ibytes
;
1108 nfp_dev_stats
.opackets
=
1109 nn_cfg_readq(hw
, NFP_NET_CFG_STATS_TX_FRAMES
);
1111 nfp_dev_stats
.opackets
-= hw
->eth_stats_base
.opackets
;
1113 nfp_dev_stats
.obytes
=
1114 nn_cfg_readq(hw
, NFP_NET_CFG_STATS_TX_OCTETS
);
1116 nfp_dev_stats
.obytes
-= hw
->eth_stats_base
.obytes
;
1118 /* reading general device stats */
1119 nfp_dev_stats
.ierrors
=
1120 nn_cfg_readq(hw
, NFP_NET_CFG_STATS_RX_ERRORS
);
1122 nfp_dev_stats
.ierrors
-= hw
->eth_stats_base
.ierrors
;
1124 nfp_dev_stats
.oerrors
=
1125 nn_cfg_readq(hw
, NFP_NET_CFG_STATS_TX_ERRORS
);
1127 nfp_dev_stats
.oerrors
-= hw
->eth_stats_base
.oerrors
;
1129 /* RX ring mbuf allocation failures */
1130 nfp_dev_stats
.rx_nombuf
= dev
->data
->rx_mbuf_alloc_failed
;
1132 nfp_dev_stats
.imissed
=
1133 nn_cfg_readq(hw
, NFP_NET_CFG_STATS_RX_DISCARDS
);
1135 nfp_dev_stats
.imissed
-= hw
->eth_stats_base
.imissed
;
1138 memcpy(stats
, &nfp_dev_stats
, sizeof(*stats
));
1145 nfp_net_stats_reset(struct rte_eth_dev
*dev
)
1148 struct nfp_net_hw
*hw
;
1150 hw
= NFP_NET_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
1153 * hw->eth_stats_base records the per counter starting point.
1154 * Lets update it now
1157 /* reading per RX ring stats */
1158 for (i
= 0; i
< dev
->data
->nb_rx_queues
; i
++) {
1159 if (i
== RTE_ETHDEV_QUEUE_STAT_CNTRS
)
1162 hw
->eth_stats_base
.q_ipackets
[i
] =
1163 nn_cfg_readq(hw
, NFP_NET_CFG_RXR_STATS(i
));
1165 hw
->eth_stats_base
.q_ibytes
[i
] =
1166 nn_cfg_readq(hw
, NFP_NET_CFG_RXR_STATS(i
) + 0x8);
1169 /* reading per TX ring stats */
1170 for (i
= 0; i
< dev
->data
->nb_tx_queues
; i
++) {
1171 if (i
== RTE_ETHDEV_QUEUE_STAT_CNTRS
)
1174 hw
->eth_stats_base
.q_opackets
[i
] =
1175 nn_cfg_readq(hw
, NFP_NET_CFG_TXR_STATS(i
));
1177 hw
->eth_stats_base
.q_obytes
[i
] =
1178 nn_cfg_readq(hw
, NFP_NET_CFG_TXR_STATS(i
) + 0x8);
1181 hw
->eth_stats_base
.ipackets
=
1182 nn_cfg_readq(hw
, NFP_NET_CFG_STATS_RX_FRAMES
);
1184 hw
->eth_stats_base
.ibytes
=
1185 nn_cfg_readq(hw
, NFP_NET_CFG_STATS_RX_OCTETS
);
1187 hw
->eth_stats_base
.opackets
=
1188 nn_cfg_readq(hw
, NFP_NET_CFG_STATS_TX_FRAMES
);
1190 hw
->eth_stats_base
.obytes
=
1191 nn_cfg_readq(hw
, NFP_NET_CFG_STATS_TX_OCTETS
);
1193 /* reading general device stats */
1194 hw
->eth_stats_base
.ierrors
=
1195 nn_cfg_readq(hw
, NFP_NET_CFG_STATS_RX_ERRORS
);
1197 hw
->eth_stats_base
.oerrors
=
1198 nn_cfg_readq(hw
, NFP_NET_CFG_STATS_TX_ERRORS
);
1200 /* RX ring mbuf allocation failures */
1201 dev
->data
->rx_mbuf_alloc_failed
= 0;
1203 hw
->eth_stats_base
.imissed
=
1204 nn_cfg_readq(hw
, NFP_NET_CFG_STATS_RX_DISCARDS
);
1208 nfp_net_infos_get(struct rte_eth_dev
*dev
, struct rte_eth_dev_info
*dev_info
)
1210 struct nfp_net_hw
*hw
;
1212 hw
= NFP_NET_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
1214 dev_info
->max_rx_queues
= (uint16_t)hw
->max_rx_queues
;
1215 dev_info
->max_tx_queues
= (uint16_t)hw
->max_tx_queues
;
1216 dev_info
->min_rx_bufsize
= ETHER_MIN_MTU
;
1217 dev_info
->max_rx_pktlen
= hw
->max_mtu
;
1218 /* Next should change when PF support is implemented */
1219 dev_info
->max_mac_addrs
= 1;
1221 if (hw
->cap
& NFP_NET_CFG_CTRL_RXVLAN
)
1222 dev_info
->rx_offload_capa
= DEV_RX_OFFLOAD_VLAN_STRIP
;
1224 if (hw
->cap
& NFP_NET_CFG_CTRL_RXCSUM
)
1225 dev_info
->rx_offload_capa
|= DEV_RX_OFFLOAD_IPV4_CKSUM
|
1226 DEV_RX_OFFLOAD_UDP_CKSUM
|
1227 DEV_RX_OFFLOAD_TCP_CKSUM
;
1229 dev_info
->rx_offload_capa
|= DEV_RX_OFFLOAD_JUMBO_FRAME
;
1231 if (hw
->cap
& NFP_NET_CFG_CTRL_TXVLAN
)
1232 dev_info
->tx_offload_capa
= DEV_TX_OFFLOAD_VLAN_INSERT
;
1234 if (hw
->cap
& NFP_NET_CFG_CTRL_TXCSUM
)
1235 dev_info
->tx_offload_capa
|= DEV_TX_OFFLOAD_IPV4_CKSUM
|
1236 DEV_TX_OFFLOAD_UDP_CKSUM
|
1237 DEV_TX_OFFLOAD_TCP_CKSUM
;
1239 if (hw
->cap
& NFP_NET_CFG_CTRL_LSO_ANY
)
1240 dev_info
->tx_offload_capa
|= DEV_TX_OFFLOAD_TCP_TSO
;
1242 if (hw
->cap
& NFP_NET_CFG_CTRL_GATHER
)
1243 dev_info
->tx_offload_capa
|= DEV_TX_OFFLOAD_MULTI_SEGS
;
1245 dev_info
->default_rxconf
= (struct rte_eth_rxconf
) {
1247 .pthresh
= DEFAULT_RX_PTHRESH
,
1248 .hthresh
= DEFAULT_RX_HTHRESH
,
1249 .wthresh
= DEFAULT_RX_WTHRESH
,
1251 .rx_free_thresh
= DEFAULT_RX_FREE_THRESH
,
1255 dev_info
->default_txconf
= (struct rte_eth_txconf
) {
1257 .pthresh
= DEFAULT_TX_PTHRESH
,
1258 .hthresh
= DEFAULT_TX_HTHRESH
,
1259 .wthresh
= DEFAULT_TX_WTHRESH
,
1261 .tx_free_thresh
= DEFAULT_TX_FREE_THRESH
,
1262 .tx_rs_thresh
= DEFAULT_TX_RSBIT_THRESH
,
1265 dev_info
->flow_type_rss_offloads
= ETH_RSS_IPV4
|
1266 ETH_RSS_NONFRAG_IPV4_TCP
|
1267 ETH_RSS_NONFRAG_IPV4_UDP
|
1269 ETH_RSS_NONFRAG_IPV6_TCP
|
1270 ETH_RSS_NONFRAG_IPV6_UDP
;
1272 dev_info
->reta_size
= NFP_NET_CFG_RSS_ITBL_SZ
;
1273 dev_info
->hash_key_size
= NFP_NET_CFG_RSS_KEY_SZ
;
1275 dev_info
->speed_capa
= ETH_LINK_SPEED_1G
| ETH_LINK_SPEED_10G
|
1276 ETH_LINK_SPEED_25G
| ETH_LINK_SPEED_40G
|
1277 ETH_LINK_SPEED_50G
| ETH_LINK_SPEED_100G
;
1280 static const uint32_t *
1281 nfp_net_supported_ptypes_get(struct rte_eth_dev
*dev
)
1283 static const uint32_t ptypes
[] = {
1284 /* refers to nfp_net_set_hash() */
1285 RTE_PTYPE_INNER_L3_IPV4
,
1286 RTE_PTYPE_INNER_L3_IPV6
,
1287 RTE_PTYPE_INNER_L3_IPV6_EXT
,
1288 RTE_PTYPE_INNER_L4_MASK
,
1292 if (dev
->rx_pkt_burst
== nfp_net_recv_pkts
)
1298 nfp_net_rx_queue_count(struct rte_eth_dev
*dev
, uint16_t queue_idx
)
1300 struct nfp_net_rxq
*rxq
;
1301 struct nfp_net_rx_desc
*rxds
;
1305 rxq
= (struct nfp_net_rxq
*)dev
->data
->rx_queues
[queue_idx
];
1312 * Other PMDs are just checking the DD bit in intervals of 4
1313 * descriptors and counting all four if the first has the DD
1314 * bit on. Of course, this is not accurate but can be good for
1315 * performance. But ideally that should be done in descriptors
1316 * chunks belonging to the same cache line
1319 while (count
< rxq
->rx_count
) {
1320 rxds
= &rxq
->rxds
[idx
];
1321 if ((rxds
->rxd
.meta_len_dd
& PCIE_DESC_RX_DD
) == 0)
1328 if ((idx
) == rxq
->rx_count
)
1336 nfp_rx_queue_intr_enable(struct rte_eth_dev
*dev
, uint16_t queue_id
)
1338 struct rte_pci_device
*pci_dev
;
1339 struct nfp_net_hw
*hw
;
1342 hw
= NFP_NET_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
1343 pci_dev
= RTE_ETH_DEV_TO_PCI(dev
);
1345 if (pci_dev
->intr_handle
.type
!= RTE_INTR_HANDLE_UIO
)
1348 /* Make sure all updates are written before un-masking */
1350 nn_cfg_writeb(hw
, NFP_NET_CFG_ICR(base
+ queue_id
),
1351 NFP_NET_CFG_ICR_UNMASKED
);
1356 nfp_rx_queue_intr_disable(struct rte_eth_dev
*dev
, uint16_t queue_id
)
1358 struct rte_pci_device
*pci_dev
;
1359 struct nfp_net_hw
*hw
;
1362 hw
= NFP_NET_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
1363 pci_dev
= RTE_ETH_DEV_TO_PCI(dev
);
1365 if (pci_dev
->intr_handle
.type
!= RTE_INTR_HANDLE_UIO
)
1368 /* Make sure all updates are written before un-masking */
1370 nn_cfg_writeb(hw
, NFP_NET_CFG_ICR(base
+ queue_id
), 0x1);
1375 nfp_net_dev_link_status_print(struct rte_eth_dev
*dev
)
1377 struct rte_pci_device
*pci_dev
= RTE_ETH_DEV_TO_PCI(dev
);
1378 struct rte_eth_link link
;
1380 rte_eth_linkstatus_get(dev
, &link
);
1381 if (link
.link_status
)
1382 PMD_DRV_LOG(INFO
, "Port %d: Link Up - speed %u Mbps - %s",
1383 dev
->data
->port_id
, link
.link_speed
,
1384 link
.link_duplex
== ETH_LINK_FULL_DUPLEX
1385 ? "full-duplex" : "half-duplex");
1387 PMD_DRV_LOG(INFO
, " Port %d: Link Down",
1388 dev
->data
->port_id
);
1390 PMD_DRV_LOG(INFO
, "PCI Address: %04d:%02d:%02d:%d",
1391 pci_dev
->addr
.domain
, pci_dev
->addr
.bus
,
1392 pci_dev
->addr
.devid
, pci_dev
->addr
.function
);
1395 /* Interrupt configuration and handling */
1398 * nfp_net_irq_unmask - Unmask an interrupt
1400 * If MSI-X auto-masking is enabled clear the mask bit, otherwise
1401 * clear the ICR for the entry.
1404 nfp_net_irq_unmask(struct rte_eth_dev
*dev
)
1406 struct nfp_net_hw
*hw
;
1407 struct rte_pci_device
*pci_dev
;
1409 hw
= NFP_NET_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
1410 pci_dev
= RTE_ETH_DEV_TO_PCI(dev
);
1412 if (hw
->ctrl
& NFP_NET_CFG_CTRL_MSIXAUTO
) {
1413 /* If MSI-X auto-masking is used, clear the entry */
1415 rte_intr_enable(&pci_dev
->intr_handle
);
1417 /* Make sure all updates are written before un-masking */
1419 nn_cfg_writeb(hw
, NFP_NET_CFG_ICR(NFP_NET_IRQ_LSC_IDX
),
1420 NFP_NET_CFG_ICR_UNMASKED
);
1425 nfp_net_dev_interrupt_handler(void *param
)
1428 struct rte_eth_link link
;
1429 struct rte_eth_dev
*dev
= (struct rte_eth_dev
*)param
;
1431 PMD_DRV_LOG(DEBUG
, "We got a LSC interrupt!!!");
1433 rte_eth_linkstatus_get(dev
, &link
);
1435 nfp_net_link_update(dev
, 0);
1438 if (!link
.link_status
) {
1439 /* handle it 1 sec later, wait it being stable */
1440 timeout
= NFP_NET_LINK_UP_CHECK_TIMEOUT
;
1441 /* likely to down */
1443 /* handle it 4 sec later, wait it being stable */
1444 timeout
= NFP_NET_LINK_DOWN_CHECK_TIMEOUT
;
1447 if (rte_eal_alarm_set(timeout
* 1000,
1448 nfp_net_dev_interrupt_delayed_handler
,
1450 PMD_INIT_LOG(ERR
, "Error setting alarm");
1452 nfp_net_irq_unmask(dev
);
1457 * Interrupt handler which shall be registered for alarm callback for delayed
1458 * handling specific interrupt to wait for the stable nic state. As the NIC
1459 * interrupt state is not stable for nfp after link is just down, it needs
1460 * to wait 4 seconds to get the stable status.
1462 * @param handle Pointer to interrupt handle.
1463 * @param param The address of parameter (struct rte_eth_dev *)
1468 nfp_net_dev_interrupt_delayed_handler(void *param
)
1470 struct rte_eth_dev
*dev
= (struct rte_eth_dev
*)param
;
1472 nfp_net_link_update(dev
, 0);
1473 _rte_eth_dev_callback_process(dev
, RTE_ETH_EVENT_INTR_LSC
, NULL
);
1475 nfp_net_dev_link_status_print(dev
);
1478 nfp_net_irq_unmask(dev
);
1482 nfp_net_dev_mtu_set(struct rte_eth_dev
*dev
, uint16_t mtu
)
1484 struct nfp_net_hw
*hw
;
1486 hw
= NFP_NET_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
1488 /* check that mtu is within the allowed range */
1489 if ((mtu
< ETHER_MIN_MTU
) || ((uint32_t)mtu
> hw
->max_mtu
))
1492 /* mtu setting is forbidden if port is started */
1493 if (dev
->data
->dev_started
) {
1494 PMD_DRV_LOG(ERR
, "port %d must be stopped before configuration",
1495 dev
->data
->port_id
);
1499 /* switch to jumbo mode if needed */
1500 if ((uint32_t)mtu
> ETHER_MAX_LEN
)
1501 dev
->data
->dev_conf
.rxmode
.offloads
|= DEV_RX_OFFLOAD_JUMBO_FRAME
;
1503 dev
->data
->dev_conf
.rxmode
.offloads
&= ~DEV_RX_OFFLOAD_JUMBO_FRAME
;
1505 /* update max frame size */
1506 dev
->data
->dev_conf
.rxmode
.max_rx_pkt_len
= (uint32_t)mtu
;
1508 /* writing to configuration space */
1509 nn_cfg_writel(hw
, NFP_NET_CFG_MTU
, (uint32_t)mtu
);
1517 nfp_net_rx_queue_setup(struct rte_eth_dev
*dev
,
1518 uint16_t queue_idx
, uint16_t nb_desc
,
1519 unsigned int socket_id
,
1520 const struct rte_eth_rxconf
*rx_conf
,
1521 struct rte_mempool
*mp
)
1523 const struct rte_memzone
*tz
;
1524 struct nfp_net_rxq
*rxq
;
1525 struct nfp_net_hw
*hw
;
1527 hw
= NFP_NET_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
1529 PMD_INIT_FUNC_TRACE();
1531 /* Validating number of descriptors */
1532 if (((nb_desc
* sizeof(struct nfp_net_rx_desc
)) % 128) != 0 ||
1533 (nb_desc
> NFP_NET_MAX_RX_DESC
) ||
1534 (nb_desc
< NFP_NET_MIN_RX_DESC
)) {
1535 PMD_DRV_LOG(ERR
, "Wrong nb_desc value");
1540 * Free memory prior to re-allocation if needed. This is the case after
1541 * calling nfp_net_stop
1543 if (dev
->data
->rx_queues
[queue_idx
]) {
1544 nfp_net_rx_queue_release(dev
->data
->rx_queues
[queue_idx
]);
1545 dev
->data
->rx_queues
[queue_idx
] = NULL
;
1548 /* Allocating rx queue data structure */
1549 rxq
= rte_zmalloc_socket("ethdev RX queue", sizeof(struct nfp_net_rxq
),
1550 RTE_CACHE_LINE_SIZE
, socket_id
);
1554 /* Hw queues mapping based on firmware configuration */
1555 rxq
->qidx
= queue_idx
;
1556 rxq
->fl_qcidx
= queue_idx
* hw
->stride_rx
;
1557 rxq
->rx_qcidx
= rxq
->fl_qcidx
+ (hw
->stride_rx
- 1);
1558 rxq
->qcp_fl
= hw
->rx_bar
+ NFP_QCP_QUEUE_OFF(rxq
->fl_qcidx
);
1559 rxq
->qcp_rx
= hw
->rx_bar
+ NFP_QCP_QUEUE_OFF(rxq
->rx_qcidx
);
1562 * Tracking mbuf size for detecting a potential mbuf overflow due to
1566 rxq
->mbuf_size
= rxq
->mem_pool
->elt_size
;
1567 rxq
->mbuf_size
-= (sizeof(struct rte_mbuf
) + RTE_PKTMBUF_HEADROOM
);
1568 hw
->flbufsz
= rxq
->mbuf_size
;
1570 rxq
->rx_count
= nb_desc
;
1571 rxq
->port_id
= dev
->data
->port_id
;
1572 rxq
->rx_free_thresh
= rx_conf
->rx_free_thresh
;
1573 rxq
->drop_en
= rx_conf
->rx_drop_en
;
1576 * Allocate RX ring hardware descriptors. A memzone large enough to
1577 * handle the maximum ring size is allocated in order to allow for
1578 * resizing in later calls to the queue setup function.
1580 tz
= rte_eth_dma_zone_reserve(dev
, "rx_ring", queue_idx
,
1581 sizeof(struct nfp_net_rx_desc
) *
1582 NFP_NET_MAX_RX_DESC
, NFP_MEMZONE_ALIGN
,
1586 PMD_DRV_LOG(ERR
, "Error allocating rx dma");
1587 nfp_net_rx_queue_release(rxq
);
1591 /* Saving physical and virtual addresses for the RX ring */
1592 rxq
->dma
= (uint64_t)tz
->iova
;
1593 rxq
->rxds
= (struct nfp_net_rx_desc
*)tz
->addr
;
1595 /* mbuf pointers array for referencing mbufs linked to RX descriptors */
1596 rxq
->rxbufs
= rte_zmalloc_socket("rxq->rxbufs",
1597 sizeof(*rxq
->rxbufs
) * nb_desc
,
1598 RTE_CACHE_LINE_SIZE
, socket_id
);
1599 if (rxq
->rxbufs
== NULL
) {
1600 nfp_net_rx_queue_release(rxq
);
1604 PMD_RX_LOG(DEBUG
, "rxbufs=%p hw_ring=%p dma_addr=0x%" PRIx64
,
1605 rxq
->rxbufs
, rxq
->rxds
, (unsigned long int)rxq
->dma
);
1607 nfp_net_reset_rx_queue(rxq
);
1609 dev
->data
->rx_queues
[queue_idx
] = rxq
;
1613 * Telling the HW about the physical address of the RX ring and number
1614 * of descriptors in log2 format
1616 nn_cfg_writeq(hw
, NFP_NET_CFG_RXR_ADDR(queue_idx
), rxq
->dma
);
1617 nn_cfg_writeb(hw
, NFP_NET_CFG_RXR_SZ(queue_idx
), rte_log2_u32(nb_desc
));
1623 nfp_net_rx_fill_freelist(struct nfp_net_rxq
*rxq
)
1625 struct nfp_net_rx_buff
*rxe
= rxq
->rxbufs
;
1629 PMD_RX_LOG(DEBUG
, "nfp_net_rx_fill_freelist for %u descriptors",
1632 for (i
= 0; i
< rxq
->rx_count
; i
++) {
1633 struct nfp_net_rx_desc
*rxd
;
1634 struct rte_mbuf
*mbuf
= rte_pktmbuf_alloc(rxq
->mem_pool
);
1637 PMD_DRV_LOG(ERR
, "RX mbuf alloc failed queue_id=%u",
1638 (unsigned)rxq
->qidx
);
1642 dma_addr
= rte_cpu_to_le_64(RTE_MBUF_DMA_ADDR_DEFAULT(mbuf
));
1644 rxd
= &rxq
->rxds
[i
];
1646 rxd
->fld
.dma_addr_hi
= (dma_addr
>> 32) & 0xff;
1647 rxd
->fld
.dma_addr_lo
= dma_addr
& 0xffffffff;
1649 PMD_RX_LOG(DEBUG
, "[%d]: %" PRIx64
, i
, dma_addr
);
1652 /* Make sure all writes are flushed before telling the hardware */
1655 /* Not advertising the whole ring as the firmware gets confused if so */
1656 PMD_RX_LOG(DEBUG
, "Increment FL write pointer in %u",
1659 nfp_qcp_ptr_add(rxq
->qcp_fl
, NFP_QCP_WRITE_PTR
, rxq
->rx_count
- 1);
1665 nfp_net_tx_queue_setup(struct rte_eth_dev
*dev
, uint16_t queue_idx
,
1666 uint16_t nb_desc
, unsigned int socket_id
,
1667 const struct rte_eth_txconf
*tx_conf
)
1669 const struct rte_memzone
*tz
;
1670 struct nfp_net_txq
*txq
;
1671 uint16_t tx_free_thresh
;
1672 struct nfp_net_hw
*hw
;
1674 hw
= NFP_NET_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
1676 PMD_INIT_FUNC_TRACE();
1678 /* Validating number of descriptors */
1679 if (((nb_desc
* sizeof(struct nfp_net_tx_desc
)) % 128) != 0 ||
1680 (nb_desc
> NFP_NET_MAX_TX_DESC
) ||
1681 (nb_desc
< NFP_NET_MIN_TX_DESC
)) {
1682 PMD_DRV_LOG(ERR
, "Wrong nb_desc value");
1686 tx_free_thresh
= (uint16_t)((tx_conf
->tx_free_thresh
) ?
1687 tx_conf
->tx_free_thresh
:
1688 DEFAULT_TX_FREE_THRESH
);
1690 if (tx_free_thresh
> (nb_desc
)) {
1692 "tx_free_thresh must be less than the number of TX "
1693 "descriptors. (tx_free_thresh=%u port=%d "
1694 "queue=%d)", (unsigned int)tx_free_thresh
,
1695 dev
->data
->port_id
, (int)queue_idx
);
1700 * Free memory prior to re-allocation if needed. This is the case after
1701 * calling nfp_net_stop
1703 if (dev
->data
->tx_queues
[queue_idx
]) {
1704 PMD_TX_LOG(DEBUG
, "Freeing memory prior to re-allocation %d",
1706 nfp_net_tx_queue_release(dev
->data
->tx_queues
[queue_idx
]);
1707 dev
->data
->tx_queues
[queue_idx
] = NULL
;
1710 /* Allocating tx queue data structure */
1711 txq
= rte_zmalloc_socket("ethdev TX queue", sizeof(struct nfp_net_txq
),
1712 RTE_CACHE_LINE_SIZE
, socket_id
);
1714 PMD_DRV_LOG(ERR
, "Error allocating tx dma");
1719 * Allocate TX ring hardware descriptors. A memzone large enough to
1720 * handle the maximum ring size is allocated in order to allow for
1721 * resizing in later calls to the queue setup function.
1723 tz
= rte_eth_dma_zone_reserve(dev
, "tx_ring", queue_idx
,
1724 sizeof(struct nfp_net_tx_desc
) *
1725 NFP_NET_MAX_TX_DESC
, NFP_MEMZONE_ALIGN
,
1728 PMD_DRV_LOG(ERR
, "Error allocating tx dma");
1729 nfp_net_tx_queue_release(txq
);
1733 txq
->tx_count
= nb_desc
;
1734 txq
->tx_free_thresh
= tx_free_thresh
;
1735 txq
->tx_pthresh
= tx_conf
->tx_thresh
.pthresh
;
1736 txq
->tx_hthresh
= tx_conf
->tx_thresh
.hthresh
;
1737 txq
->tx_wthresh
= tx_conf
->tx_thresh
.wthresh
;
1739 /* queue mapping based on firmware configuration */
1740 txq
->qidx
= queue_idx
;
1741 txq
->tx_qcidx
= queue_idx
* hw
->stride_tx
;
1742 txq
->qcp_q
= hw
->tx_bar
+ NFP_QCP_QUEUE_OFF(txq
->tx_qcidx
);
1744 txq
->port_id
= dev
->data
->port_id
;
1746 /* Saving physical and virtual addresses for the TX ring */
1747 txq
->dma
= (uint64_t)tz
->iova
;
1748 txq
->txds
= (struct nfp_net_tx_desc
*)tz
->addr
;
1750 /* mbuf pointers array for referencing mbufs linked to TX descriptors */
1751 txq
->txbufs
= rte_zmalloc_socket("txq->txbufs",
1752 sizeof(*txq
->txbufs
) * nb_desc
,
1753 RTE_CACHE_LINE_SIZE
, socket_id
);
1754 if (txq
->txbufs
== NULL
) {
1755 nfp_net_tx_queue_release(txq
);
1758 PMD_TX_LOG(DEBUG
, "txbufs=%p hw_ring=%p dma_addr=0x%" PRIx64
,
1759 txq
->txbufs
, txq
->txds
, (unsigned long int)txq
->dma
);
1761 nfp_net_reset_tx_queue(txq
);
1763 dev
->data
->tx_queues
[queue_idx
] = txq
;
1767 * Telling the HW about the physical address of the TX ring and number
1768 * of descriptors in log2 format
1770 nn_cfg_writeq(hw
, NFP_NET_CFG_TXR_ADDR(queue_idx
), txq
->dma
);
1771 nn_cfg_writeb(hw
, NFP_NET_CFG_TXR_SZ(queue_idx
), rte_log2_u32(nb_desc
));
1776 /* nfp_net_tx_tso - Set TX descriptor for TSO */
1778 nfp_net_tx_tso(struct nfp_net_txq
*txq
, struct nfp_net_tx_desc
*txd
,
1779 struct rte_mbuf
*mb
)
1782 struct nfp_net_hw
*hw
= txq
->hw
;
1784 if (!(hw
->cap
& NFP_NET_CFG_CTRL_LSO_ANY
))
1787 ol_flags
= mb
->ol_flags
;
1789 if (!(ol_flags
& PKT_TX_TCP_SEG
))
1792 txd
->l3_offset
= mb
->l2_len
;
1793 txd
->l4_offset
= mb
->l2_len
+ mb
->l3_len
;
1794 txd
->lso_hdrlen
= mb
->l2_len
+ mb
->l3_len
+ mb
->l4_len
;
1795 txd
->mss
= rte_cpu_to_le_16(mb
->tso_segsz
);
1796 txd
->flags
= PCIE_DESC_TX_LSO
;
1803 txd
->lso_hdrlen
= 0;
1807 /* nfp_net_tx_cksum - Set TX CSUM offload flags in TX descriptor */
1809 nfp_net_tx_cksum(struct nfp_net_txq
*txq
, struct nfp_net_tx_desc
*txd
,
1810 struct rte_mbuf
*mb
)
1813 struct nfp_net_hw
*hw
= txq
->hw
;
1815 if (!(hw
->cap
& NFP_NET_CFG_CTRL_TXCSUM
))
1818 ol_flags
= mb
->ol_flags
;
1820 /* IPv6 does not need checksum */
1821 if (ol_flags
& PKT_TX_IP_CKSUM
)
1822 txd
->flags
|= PCIE_DESC_TX_IP4_CSUM
;
1824 switch (ol_flags
& PKT_TX_L4_MASK
) {
1825 case PKT_TX_UDP_CKSUM
:
1826 txd
->flags
|= PCIE_DESC_TX_UDP_CSUM
;
1828 case PKT_TX_TCP_CKSUM
:
1829 txd
->flags
|= PCIE_DESC_TX_TCP_CSUM
;
1833 if (ol_flags
& (PKT_TX_IP_CKSUM
| PKT_TX_L4_MASK
))
1834 txd
->flags
|= PCIE_DESC_TX_CSUM
;
1837 /* nfp_net_rx_cksum - set mbuf checksum flags based on RX descriptor flags */
1839 nfp_net_rx_cksum(struct nfp_net_rxq
*rxq
, struct nfp_net_rx_desc
*rxd
,
1840 struct rte_mbuf
*mb
)
1842 struct nfp_net_hw
*hw
= rxq
->hw
;
1844 if (!(hw
->ctrl
& NFP_NET_CFG_CTRL_RXCSUM
))
1847 /* If IPv4 and IP checksum error, fail */
1848 if (unlikely((rxd
->rxd
.flags
& PCIE_DESC_RX_IP4_CSUM
) &&
1849 !(rxd
->rxd
.flags
& PCIE_DESC_RX_IP4_CSUM_OK
)))
1850 mb
->ol_flags
|= PKT_RX_IP_CKSUM_BAD
;
1852 mb
->ol_flags
|= PKT_RX_IP_CKSUM_GOOD
;
1854 /* If neither UDP nor TCP return */
1855 if (!(rxd
->rxd
.flags
& PCIE_DESC_RX_TCP_CSUM
) &&
1856 !(rxd
->rxd
.flags
& PCIE_DESC_RX_UDP_CSUM
))
1859 if (likely(rxd
->rxd
.flags
& PCIE_DESC_RX_L4_CSUM_OK
))
1860 mb
->ol_flags
|= PKT_RX_L4_CKSUM_GOOD
;
1862 mb
->ol_flags
|= PKT_RX_L4_CKSUM_BAD
;
1865 #define NFP_HASH_OFFSET ((uint8_t *)mbuf->buf_addr + mbuf->data_off - 4)
1866 #define NFP_HASH_TYPE_OFFSET ((uint8_t *)mbuf->buf_addr + mbuf->data_off - 8)
1868 #define NFP_DESC_META_LEN(d) (d->rxd.meta_len_dd & PCIE_DESC_RX_META_LEN_MASK)
1871 * nfp_net_set_hash - Set mbuf hash data
1873 * The RSS hash and hash-type are pre-pended to the packet data.
1874 * Extract and decode it and set the mbuf fields.
1877 nfp_net_set_hash(struct nfp_net_rxq
*rxq
, struct nfp_net_rx_desc
*rxd
,
1878 struct rte_mbuf
*mbuf
)
1880 struct nfp_net_hw
*hw
= rxq
->hw
;
1881 uint8_t *meta_offset
;
1884 uint32_t hash_type
= 0;
1886 if (!(hw
->ctrl
& NFP_NET_CFG_CTRL_RSS
))
1889 /* this is true for new firmwares */
1890 if (likely(((hw
->cap
& NFP_NET_CFG_CTRL_RSS2
) ||
1891 (NFD_CFG_MAJOR_VERSION_of(hw
->ver
) == 4)) &&
1892 NFP_DESC_META_LEN(rxd
))) {
1895 * <---- 32 bit ----->
1900 * ====================
1903 * Field type word contains up to 8 4bit field types
1904 * A 4bit field type refers to a data field word
1905 * A data field word can have several 4bit field types
1907 meta_offset
= rte_pktmbuf_mtod(mbuf
, uint8_t *);
1908 meta_offset
-= NFP_DESC_META_LEN(rxd
);
1909 meta_info
= rte_be_to_cpu_32(*(uint32_t *)meta_offset
);
1911 /* NFP PMD just supports metadata for hashing */
1912 switch (meta_info
& NFP_NET_META_FIELD_MASK
) {
1913 case NFP_NET_META_HASH
:
1914 /* next field type is about the hash type */
1915 meta_info
>>= NFP_NET_META_FIELD_SIZE
;
1916 /* hash value is in the data field */
1917 hash
= rte_be_to_cpu_32(*(uint32_t *)meta_offset
);
1918 hash_type
= meta_info
& NFP_NET_META_FIELD_MASK
;
1921 /* Unsupported metadata can be a performance issue */
1925 if (!(rxd
->rxd
.flags
& PCIE_DESC_RX_RSS
))
1928 hash
= rte_be_to_cpu_32(*(uint32_t *)NFP_HASH_OFFSET
);
1929 hash_type
= rte_be_to_cpu_32(*(uint32_t *)NFP_HASH_TYPE_OFFSET
);
1932 mbuf
->hash
.rss
= hash
;
1933 mbuf
->ol_flags
|= PKT_RX_RSS_HASH
;
1935 switch (hash_type
) {
1936 case NFP_NET_RSS_IPV4
:
1937 mbuf
->packet_type
|= RTE_PTYPE_INNER_L3_IPV4
;
1939 case NFP_NET_RSS_IPV6
:
1940 mbuf
->packet_type
|= RTE_PTYPE_INNER_L3_IPV6
;
1942 case NFP_NET_RSS_IPV6_EX
:
1943 mbuf
->packet_type
|= RTE_PTYPE_INNER_L3_IPV6_EXT
;
1945 case NFP_NET_RSS_IPV4_TCP
:
1946 mbuf
->packet_type
|= RTE_PTYPE_INNER_L3_IPV6_EXT
;
1948 case NFP_NET_RSS_IPV6_TCP
:
1949 mbuf
->packet_type
|= RTE_PTYPE_INNER_L3_IPV6_EXT
;
1951 case NFP_NET_RSS_IPV4_UDP
:
1952 mbuf
->packet_type
|= RTE_PTYPE_INNER_L3_IPV6_EXT
;
1954 case NFP_NET_RSS_IPV6_UDP
:
1955 mbuf
->packet_type
|= RTE_PTYPE_INNER_L3_IPV6_EXT
;
1958 mbuf
->packet_type
|= RTE_PTYPE_INNER_L4_MASK
;
1963 nfp_net_mbuf_alloc_failed(struct nfp_net_rxq
*rxq
)
1965 rte_eth_devices
[rxq
->port_id
].data
->rx_mbuf_alloc_failed
++;
1968 #define NFP_DESC_META_LEN(d) (d->rxd.meta_len_dd & PCIE_DESC_RX_META_LEN_MASK)
1973 * There are some decisions to take:
1974 * 1) How to check DD RX descriptors bit
1975 * 2) How and when to allocate new mbufs
1977 * Current implementation checks just one single DD bit each loop. As each
1978 * descriptor is 8 bytes, it is likely a good idea to check descriptors in
1979 * a single cache line instead. Tests with this change have not shown any
1980 * performance improvement but it requires further investigation. For example,
1981 * depending on which descriptor is next, the number of descriptors could be
1982 * less than 8 for just checking those in the same cache line. This implies
1983 * extra work which could be counterproductive by itself. Indeed, last firmware
1984 * changes are just doing this: writing several descriptors with the DD bit
1985 * for saving PCIe bandwidth and DMA operations from the NFP.
1987 * Mbuf allocation is done when a new packet is received. Then the descriptor
1988 * is automatically linked with the new mbuf and the old one is given to the
1989 * user. The main drawback with this design is mbuf allocation is heavier than
1990 * using bulk allocations allowed by DPDK with rte_mempool_get_bulk. From the
1991 * cache point of view it does not seem allocating the mbuf early on as we are
1992 * doing now have any benefit at all. Again, tests with this change have not
1993 * shown any improvement. Also, rte_mempool_get_bulk returns all or nothing
1994 * so looking at the implications of this type of allocation should be studied
1999 nfp_net_recv_pkts(void *rx_queue
, struct rte_mbuf
**rx_pkts
, uint16_t nb_pkts
)
2001 struct nfp_net_rxq
*rxq
;
2002 struct nfp_net_rx_desc
*rxds
;
2003 struct nfp_net_rx_buff
*rxb
;
2004 struct nfp_net_hw
*hw
;
2005 struct rte_mbuf
*mb
;
2006 struct rte_mbuf
*new_mb
;
2012 if (unlikely(rxq
== NULL
)) {
2014 * DPDK just checks the queue is lower than max queues
2015 * enabled. But the queue needs to be configured
2017 RTE_LOG_DP(ERR
, PMD
, "RX Bad queue\n");
2025 while (avail
< nb_pkts
) {
2026 rxb
= &rxq
->rxbufs
[rxq
->rd_p
];
2027 if (unlikely(rxb
== NULL
)) {
2028 RTE_LOG_DP(ERR
, PMD
, "rxb does not exist!\n");
2032 rxds
= &rxq
->rxds
[rxq
->rd_p
];
2033 if ((rxds
->rxd
.meta_len_dd
& PCIE_DESC_RX_DD
) == 0)
2037 * Memory barrier to ensure that we won't do other
2038 * reads before the DD bit.
2043 * We got a packet. Let's alloc a new mbuf for refilling the
2044 * free descriptor ring as soon as possible
2046 new_mb
= rte_pktmbuf_alloc(rxq
->mem_pool
);
2047 if (unlikely(new_mb
== NULL
)) {
2048 RTE_LOG_DP(DEBUG
, PMD
,
2049 "RX mbuf alloc failed port_id=%u queue_id=%u\n",
2050 rxq
->port_id
, (unsigned int)rxq
->qidx
);
2051 nfp_net_mbuf_alloc_failed(rxq
);
2058 * Grab the mbuf and refill the descriptor with the
2059 * previously allocated mbuf
2064 PMD_RX_LOG(DEBUG
, "Packet len: %u, mbuf_size: %u",
2065 rxds
->rxd
.data_len
, rxq
->mbuf_size
);
2067 /* Size of this segment */
2068 mb
->data_len
= rxds
->rxd
.data_len
- NFP_DESC_META_LEN(rxds
);
2069 /* Size of the whole packet. We just support 1 segment */
2070 mb
->pkt_len
= rxds
->rxd
.data_len
- NFP_DESC_META_LEN(rxds
);
2072 if (unlikely((mb
->data_len
+ hw
->rx_offset
) >
2075 * This should not happen and the user has the
2076 * responsibility of avoiding it. But we have
2077 * to give some info about the error
2079 RTE_LOG_DP(ERR
, PMD
,
2080 "mbuf overflow likely due to the RX offset.\n"
2081 "\t\tYour mbuf size should have extra space for"
2082 " RX offset=%u bytes.\n"
2083 "\t\tCurrently you just have %u bytes available"
2084 " but the received packet is %u bytes long",
2086 rxq
->mbuf_size
- hw
->rx_offset
,
2091 /* Filling the received mbuf with packet info */
2093 mb
->data_off
= RTE_PKTMBUF_HEADROOM
+ hw
->rx_offset
;
2095 mb
->data_off
= RTE_PKTMBUF_HEADROOM
+
2096 NFP_DESC_META_LEN(rxds
);
2098 /* No scatter mode supported */
2102 mb
->port
= rxq
->port_id
;
2104 /* Checking the RSS flag */
2105 nfp_net_set_hash(rxq
, rxds
, mb
);
2107 /* Checking the checksum flag */
2108 nfp_net_rx_cksum(rxq
, rxds
, mb
);
2110 if ((rxds
->rxd
.flags
& PCIE_DESC_RX_VLAN
) &&
2111 (hw
->ctrl
& NFP_NET_CFG_CTRL_RXVLAN
)) {
2112 mb
->vlan_tci
= rte_cpu_to_le_32(rxds
->rxd
.vlan
);
2113 mb
->ol_flags
|= PKT_RX_VLAN
| PKT_RX_VLAN_STRIPPED
;
2116 /* Adding the mbuf to the mbuf array passed by the app */
2117 rx_pkts
[avail
++] = mb
;
2119 /* Now resetting and updating the descriptor */
2122 dma_addr
= rte_cpu_to_le_64(RTE_MBUF_DMA_ADDR_DEFAULT(new_mb
));
2124 rxds
->fld
.dma_addr_hi
= (dma_addr
>> 32) & 0xff;
2125 rxds
->fld
.dma_addr_lo
= dma_addr
& 0xffffffff;
2128 if (unlikely(rxq
->rd_p
== rxq
->rx_count
)) /* wrapping?*/
2135 PMD_RX_LOG(DEBUG
, "RX port_id=%u queue_id=%u, %d packets received",
2136 rxq
->port_id
, (unsigned int)rxq
->qidx
, nb_hold
);
2138 nb_hold
+= rxq
->nb_rx_hold
;
2141 * FL descriptors needs to be written before incrementing the
2142 * FL queue WR pointer
2145 if (nb_hold
> rxq
->rx_free_thresh
) {
2146 PMD_RX_LOG(DEBUG
, "port=%u queue=%u nb_hold=%u avail=%u",
2147 rxq
->port_id
, (unsigned int)rxq
->qidx
,
2148 (unsigned)nb_hold
, (unsigned)avail
);
2149 nfp_qcp_ptr_add(rxq
->qcp_fl
, NFP_QCP_WRITE_PTR
, nb_hold
);
2152 rxq
->nb_rx_hold
= nb_hold
;
2158 * nfp_net_tx_free_bufs - Check for descriptors with a complete
2160 * @txq: TX queue to work with
2161 * Returns number of descriptors freed
2164 nfp_net_tx_free_bufs(struct nfp_net_txq
*txq
)
2169 PMD_TX_LOG(DEBUG
, "queue %u. Check for descriptor with a complete"
2170 " status", txq
->qidx
);
2172 /* Work out how many packets have been sent */
2173 qcp_rd_p
= nfp_qcp_read(txq
->qcp_q
, NFP_QCP_READ_PTR
);
2175 if (qcp_rd_p
== txq
->rd_p
) {
2176 PMD_TX_LOG(DEBUG
, "queue %u: It seems harrier is not sending "
2177 "packets (%u, %u)", txq
->qidx
,
2178 qcp_rd_p
, txq
->rd_p
);
2182 if (qcp_rd_p
> txq
->rd_p
)
2183 todo
= qcp_rd_p
- txq
->rd_p
;
2185 todo
= qcp_rd_p
+ txq
->tx_count
- txq
->rd_p
;
2187 PMD_TX_LOG(DEBUG
, "qcp_rd_p %u, txq->rd_p: %u, qcp->rd_p: %u",
2188 qcp_rd_p
, txq
->rd_p
, txq
->rd_p
);
2194 if (unlikely(txq
->rd_p
>= txq
->tx_count
))
2195 txq
->rd_p
-= txq
->tx_count
;
2200 /* Leaving always free descriptors for avoiding wrapping confusion */
2202 uint32_t nfp_free_tx_desc(struct nfp_net_txq
*txq
)
2204 if (txq
->wr_p
>= txq
->rd_p
)
2205 return txq
->tx_count
- (txq
->wr_p
- txq
->rd_p
) - 8;
2207 return txq
->rd_p
- txq
->wr_p
- 8;
2211 * nfp_net_txq_full - Check if the TX queue free descriptors
2212 * is below tx_free_threshold
2214 * @txq: TX queue to check
2216 * This function uses the host copy* of read/write pointers
2219 uint32_t nfp_net_txq_full(struct nfp_net_txq
*txq
)
2221 return (nfp_free_tx_desc(txq
) < txq
->tx_free_thresh
);
2225 nfp_net_xmit_pkts(void *tx_queue
, struct rte_mbuf
**tx_pkts
, uint16_t nb_pkts
)
2227 struct nfp_net_txq
*txq
;
2228 struct nfp_net_hw
*hw
;
2229 struct nfp_net_tx_desc
*txds
, txd
;
2230 struct rte_mbuf
*pkt
;
2232 int pkt_size
, dma_size
;
2233 uint16_t free_descs
, issued_descs
;
2234 struct rte_mbuf
**lmbuf
;
2239 txds
= &txq
->txds
[txq
->wr_p
];
2241 PMD_TX_LOG(DEBUG
, "working for queue %u at pos %d and %u packets",
2242 txq
->qidx
, txq
->wr_p
, nb_pkts
);
2244 if ((nfp_free_tx_desc(txq
) < nb_pkts
) || (nfp_net_txq_full(txq
)))
2245 nfp_net_tx_free_bufs(txq
);
2247 free_descs
= (uint16_t)nfp_free_tx_desc(txq
);
2248 if (unlikely(free_descs
== 0))
2255 PMD_TX_LOG(DEBUG
, "queue: %u. Sending %u packets",
2256 txq
->qidx
, nb_pkts
);
2257 /* Sending packets */
2258 while ((i
< nb_pkts
) && free_descs
) {
2259 /* Grabbing the mbuf linked to the current descriptor */
2260 lmbuf
= &txq
->txbufs
[txq
->wr_p
].mbuf
;
2261 /* Warming the cache for releasing the mbuf later on */
2262 RTE_MBUF_PREFETCH_TO_FREE(*lmbuf
);
2264 pkt
= *(tx_pkts
+ i
);
2266 if (unlikely((pkt
->nb_segs
> 1) &&
2267 !(hw
->cap
& NFP_NET_CFG_CTRL_GATHER
))) {
2268 PMD_INIT_LOG(INFO
, "NFP_NET_CFG_CTRL_GATHER not set");
2269 rte_panic("Multisegment packet unsupported\n");
2272 /* Checking if we have enough descriptors */
2273 if (unlikely(pkt
->nb_segs
> free_descs
))
2277 * Checksum and VLAN flags just in the first descriptor for a
2278 * multisegment packet, but TSO info needs to be in all of them.
2280 txd
.data_len
= pkt
->pkt_len
;
2281 nfp_net_tx_tso(txq
, &txd
, pkt
);
2282 nfp_net_tx_cksum(txq
, &txd
, pkt
);
2284 if ((pkt
->ol_flags
& PKT_TX_VLAN_PKT
) &&
2285 (hw
->cap
& NFP_NET_CFG_CTRL_TXVLAN
)) {
2286 txd
.flags
|= PCIE_DESC_TX_VLAN
;
2287 txd
.vlan
= pkt
->vlan_tci
;
2291 * mbuf data_len is the data in one segment and pkt_len data
2292 * in the whole packet. When the packet is just one segment,
2293 * then data_len = pkt_len
2295 pkt_size
= pkt
->pkt_len
;
2298 /* Copying TSO, VLAN and cksum info */
2301 /* Releasing mbuf used by this descriptor previously*/
2303 rte_pktmbuf_free_seg(*lmbuf
);
2306 * Linking mbuf with descriptor for being released
2307 * next time descriptor is used
2311 dma_size
= pkt
->data_len
;
2312 dma_addr
= rte_mbuf_data_iova(pkt
);
2313 PMD_TX_LOG(DEBUG
, "Working with mbuf at dma address:"
2314 "%" PRIx64
"", dma_addr
);
2316 /* Filling descriptors fields */
2317 txds
->dma_len
= dma_size
;
2318 txds
->data_len
= txd
.data_len
;
2319 txds
->dma_addr_hi
= (dma_addr
>> 32) & 0xff;
2320 txds
->dma_addr_lo
= (dma_addr
& 0xffffffff);
2321 ASSERT(free_descs
> 0);
2325 if (unlikely(txq
->wr_p
== txq
->tx_count
)) /* wrapping?*/
2328 pkt_size
-= dma_size
;
2331 * Making the EOP, packets with just one segment
2334 if (likely(!pkt_size
))
2335 txds
->offset_eop
= PCIE_DESC_TX_EOP
;
2337 txds
->offset_eop
= 0;
2340 /* Referencing next free TX descriptor */
2341 txds
= &txq
->txds
[txq
->wr_p
];
2342 lmbuf
= &txq
->txbufs
[txq
->wr_p
].mbuf
;
2349 /* Increment write pointers. Force memory write before we let HW know */
2351 nfp_qcp_ptr_add(txq
->qcp_q
, NFP_QCP_WRITE_PTR
, issued_descs
);
2357 nfp_net_vlan_offload_set(struct rte_eth_dev
*dev
, int mask
)
2359 uint32_t new_ctrl
, update
;
2360 struct nfp_net_hw
*hw
;
2363 hw
= NFP_NET_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
2366 if ((mask
& ETH_VLAN_FILTER_OFFLOAD
) ||
2367 (mask
& ETH_VLAN_EXTEND_OFFLOAD
))
2368 PMD_DRV_LOG(INFO
, "No support for ETH_VLAN_FILTER_OFFLOAD or"
2369 " ETH_VLAN_EXTEND_OFFLOAD");
2371 /* Enable vlan strip if it is not configured yet */
2372 if ((mask
& ETH_VLAN_STRIP_OFFLOAD
) &&
2373 !(hw
->ctrl
& NFP_NET_CFG_CTRL_RXVLAN
))
2374 new_ctrl
= hw
->ctrl
| NFP_NET_CFG_CTRL_RXVLAN
;
2376 /* Disable vlan strip just if it is configured */
2377 if (!(mask
& ETH_VLAN_STRIP_OFFLOAD
) &&
2378 (hw
->ctrl
& NFP_NET_CFG_CTRL_RXVLAN
))
2379 new_ctrl
= hw
->ctrl
& ~NFP_NET_CFG_CTRL_RXVLAN
;
2384 update
= NFP_NET_CFG_UPDATE_GEN
;
2386 ret
= nfp_net_reconfig(hw
, new_ctrl
, update
);
2388 hw
->ctrl
= new_ctrl
;
2394 nfp_net_rss_reta_write(struct rte_eth_dev
*dev
,
2395 struct rte_eth_rss_reta_entry64
*reta_conf
,
2398 uint32_t reta
, mask
;
2401 struct nfp_net_hw
*hw
=
2402 NFP_NET_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
2404 if (reta_size
!= NFP_NET_CFG_RSS_ITBL_SZ
) {
2405 PMD_DRV_LOG(ERR
, "The size of hash lookup table configured "
2406 "(%d) doesn't match the number hardware can supported "
2407 "(%d)", reta_size
, NFP_NET_CFG_RSS_ITBL_SZ
);
2412 * Update Redirection Table. There are 128 8bit-entries which can be
2413 * manage as 32 32bit-entries
2415 for (i
= 0; i
< reta_size
; i
+= 4) {
2416 /* Handling 4 RSS entries per loop */
2417 idx
= i
/ RTE_RETA_GROUP_SIZE
;
2418 shift
= i
% RTE_RETA_GROUP_SIZE
;
2419 mask
= (uint8_t)((reta_conf
[idx
].mask
>> shift
) & 0xF);
2425 /* If all 4 entries were set, don't need read RETA register */
2427 reta
= nn_cfg_readl(hw
, NFP_NET_CFG_RSS_ITBL
+ i
);
2429 for (j
= 0; j
< 4; j
++) {
2430 if (!(mask
& (0x1 << j
)))
2433 /* Clearing the entry bits */
2434 reta
&= ~(0xFF << (8 * j
));
2435 reta
|= reta_conf
[idx
].reta
[shift
+ j
] << (8 * j
);
2437 nn_cfg_writel(hw
, NFP_NET_CFG_RSS_ITBL
+ (idx
* 64) + shift
,
2443 /* Update Redirection Table(RETA) of Receive Side Scaling of Ethernet device */
2445 nfp_net_reta_update(struct rte_eth_dev
*dev
,
2446 struct rte_eth_rss_reta_entry64
*reta_conf
,
2449 struct nfp_net_hw
*hw
=
2450 NFP_NET_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
2454 if (!(hw
->ctrl
& NFP_NET_CFG_CTRL_RSS
))
2457 ret
= nfp_net_rss_reta_write(dev
, reta_conf
, reta_size
);
2461 update
= NFP_NET_CFG_UPDATE_RSS
;
2463 if (nfp_net_reconfig(hw
, hw
->ctrl
, update
) < 0)
2469 /* Query Redirection Table(RETA) of Receive Side Scaling of Ethernet device. */
2471 nfp_net_reta_query(struct rte_eth_dev
*dev
,
2472 struct rte_eth_rss_reta_entry64
*reta_conf
,
2478 struct nfp_net_hw
*hw
;
2480 hw
= NFP_NET_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
2482 if (!(hw
->ctrl
& NFP_NET_CFG_CTRL_RSS
))
2485 if (reta_size
!= NFP_NET_CFG_RSS_ITBL_SZ
) {
2486 PMD_DRV_LOG(ERR
, "The size of hash lookup table configured "
2487 "(%d) doesn't match the number hardware can supported "
2488 "(%d)", reta_size
, NFP_NET_CFG_RSS_ITBL_SZ
);
2493 * Reading Redirection Table. There are 128 8bit-entries which can be
2494 * manage as 32 32bit-entries
2496 for (i
= 0; i
< reta_size
; i
+= 4) {
2497 /* Handling 4 RSS entries per loop */
2498 idx
= i
/ RTE_RETA_GROUP_SIZE
;
2499 shift
= i
% RTE_RETA_GROUP_SIZE
;
2500 mask
= (uint8_t)((reta_conf
[idx
].mask
>> shift
) & 0xF);
2505 reta
= nn_cfg_readl(hw
, NFP_NET_CFG_RSS_ITBL
+ (idx
* 64) +
2507 for (j
= 0; j
< 4; j
++) {
2508 if (!(mask
& (0x1 << j
)))
2510 reta_conf
[idx
].reta
[shift
+ j
] =
2511 (uint8_t)((reta
>> (8 * j
)) & 0xF);
2518 nfp_net_rss_hash_write(struct rte_eth_dev
*dev
,
2519 struct rte_eth_rss_conf
*rss_conf
)
2521 struct nfp_net_hw
*hw
;
2523 uint32_t cfg_rss_ctrl
= 0;
2527 hw
= NFP_NET_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
2529 /* Writing the key byte a byte */
2530 for (i
= 0; i
< rss_conf
->rss_key_len
; i
++) {
2531 memcpy(&key
, &rss_conf
->rss_key
[i
], 1);
2532 nn_cfg_writeb(hw
, NFP_NET_CFG_RSS_KEY
+ i
, key
);
2535 rss_hf
= rss_conf
->rss_hf
;
2537 if (rss_hf
& ETH_RSS_IPV4
)
2538 cfg_rss_ctrl
|= NFP_NET_CFG_RSS_IPV4
;
2540 if (rss_hf
& ETH_RSS_NONFRAG_IPV4_TCP
)
2541 cfg_rss_ctrl
|= NFP_NET_CFG_RSS_IPV4_TCP
;
2543 if (rss_hf
& ETH_RSS_NONFRAG_IPV4_UDP
)
2544 cfg_rss_ctrl
|= NFP_NET_CFG_RSS_IPV4_UDP
;
2546 if (rss_hf
& ETH_RSS_IPV6
)
2547 cfg_rss_ctrl
|= NFP_NET_CFG_RSS_IPV6
;
2549 if (rss_hf
& ETH_RSS_NONFRAG_IPV6_TCP
)
2550 cfg_rss_ctrl
|= NFP_NET_CFG_RSS_IPV6_TCP
;
2552 if (rss_hf
& ETH_RSS_NONFRAG_IPV6_UDP
)
2553 cfg_rss_ctrl
|= NFP_NET_CFG_RSS_IPV6_UDP
;
2555 cfg_rss_ctrl
|= NFP_NET_CFG_RSS_MASK
;
2556 cfg_rss_ctrl
|= NFP_NET_CFG_RSS_TOEPLITZ
;
2558 /* configuring where to apply the RSS hash */
2559 nn_cfg_writel(hw
, NFP_NET_CFG_RSS_CTRL
, cfg_rss_ctrl
);
2561 /* Writing the key size */
2562 nn_cfg_writeb(hw
, NFP_NET_CFG_RSS_KEY_SZ
, rss_conf
->rss_key_len
);
2568 nfp_net_rss_hash_update(struct rte_eth_dev
*dev
,
2569 struct rte_eth_rss_conf
*rss_conf
)
2573 struct nfp_net_hw
*hw
;
2575 hw
= NFP_NET_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
2577 rss_hf
= rss_conf
->rss_hf
;
2579 /* Checking if RSS is enabled */
2580 if (!(hw
->ctrl
& NFP_NET_CFG_CTRL_RSS
)) {
2581 if (rss_hf
!= 0) { /* Enable RSS? */
2582 PMD_DRV_LOG(ERR
, "RSS unsupported");
2585 return 0; /* Nothing to do */
2588 if (rss_conf
->rss_key_len
> NFP_NET_CFG_RSS_KEY_SZ
) {
2589 PMD_DRV_LOG(ERR
, "hash key too long");
2593 nfp_net_rss_hash_write(dev
, rss_conf
);
2595 update
= NFP_NET_CFG_UPDATE_RSS
;
2597 if (nfp_net_reconfig(hw
, hw
->ctrl
, update
) < 0)
2604 nfp_net_rss_hash_conf_get(struct rte_eth_dev
*dev
,
2605 struct rte_eth_rss_conf
*rss_conf
)
2608 uint32_t cfg_rss_ctrl
;
2611 struct nfp_net_hw
*hw
;
2613 hw
= NFP_NET_DEV_PRIVATE_TO_HW(dev
->data
->dev_private
);
2615 if (!(hw
->ctrl
& NFP_NET_CFG_CTRL_RSS
))
2618 rss_hf
= rss_conf
->rss_hf
;
2619 cfg_rss_ctrl
= nn_cfg_readl(hw
, NFP_NET_CFG_RSS_CTRL
);
2621 if (cfg_rss_ctrl
& NFP_NET_CFG_RSS_IPV4
)
2622 rss_hf
|= ETH_RSS_NONFRAG_IPV4_TCP
| ETH_RSS_NONFRAG_IPV4_UDP
;
2624 if (cfg_rss_ctrl
& NFP_NET_CFG_RSS_IPV4_TCP
)
2625 rss_hf
|= ETH_RSS_NONFRAG_IPV4_TCP
;
2627 if (cfg_rss_ctrl
& NFP_NET_CFG_RSS_IPV6_TCP
)
2628 rss_hf
|= ETH_RSS_NONFRAG_IPV6_TCP
;
2630 if (cfg_rss_ctrl
& NFP_NET_CFG_RSS_IPV4_UDP
)
2631 rss_hf
|= ETH_RSS_NONFRAG_IPV4_UDP
;
2633 if (cfg_rss_ctrl
& NFP_NET_CFG_RSS_IPV6_UDP
)
2634 rss_hf
|= ETH_RSS_NONFRAG_IPV6_UDP
;
2636 if (cfg_rss_ctrl
& NFP_NET_CFG_RSS_IPV6
)
2637 rss_hf
|= ETH_RSS_NONFRAG_IPV4_UDP
| ETH_RSS_NONFRAG_IPV6_UDP
;
2639 /* Reading the key size */
2640 rss_conf
->rss_key_len
= nn_cfg_readl(hw
, NFP_NET_CFG_RSS_KEY_SZ
);
2642 /* Reading the key byte a byte */
2643 for (i
= 0; i
< rss_conf
->rss_key_len
; i
++) {
2644 key
= nn_cfg_readb(hw
, NFP_NET_CFG_RSS_KEY
+ i
);
2645 memcpy(&rss_conf
->rss_key
[i
], &key
, 1);
2652 nfp_net_rss_config_default(struct rte_eth_dev
*dev
)
2654 struct rte_eth_conf
*dev_conf
;
2655 struct rte_eth_rss_conf rss_conf
;
2656 struct rte_eth_rss_reta_entry64 nfp_reta_conf
[2];
2657 uint16_t rx_queues
= dev
->data
->nb_rx_queues
;
2661 PMD_DRV_LOG(INFO
, "setting default RSS conf for %u queues",
2664 nfp_reta_conf
[0].mask
= ~0x0;
2665 nfp_reta_conf
[1].mask
= ~0x0;
2668 for (i
= 0; i
< 0x40; i
+= 8) {
2669 for (j
= i
; j
< (i
+ 8); j
++) {
2670 nfp_reta_conf
[0].reta
[j
] = queue
;
2671 nfp_reta_conf
[1].reta
[j
] = queue
++;
2675 ret
= nfp_net_rss_reta_write(dev
, nfp_reta_conf
, 0x80);
2679 dev_conf
= &dev
->data
->dev_conf
;
2681 PMD_DRV_LOG(INFO
, "wrong rss conf");
2684 rss_conf
= dev_conf
->rx_adv_conf
.rss_conf
;
2686 ret
= nfp_net_rss_hash_write(dev
, &rss_conf
);
2692 /* Initialise and register driver with DPDK Application */
2693 static const struct eth_dev_ops nfp_net_eth_dev_ops
= {
2694 .dev_configure
= nfp_net_configure
,
2695 .dev_start
= nfp_net_start
,
2696 .dev_stop
= nfp_net_stop
,
2697 .dev_set_link_up
= nfp_net_set_link_up
,
2698 .dev_set_link_down
= nfp_net_set_link_down
,
2699 .dev_close
= nfp_net_close
,
2700 .promiscuous_enable
= nfp_net_promisc_enable
,
2701 .promiscuous_disable
= nfp_net_promisc_disable
,
2702 .link_update
= nfp_net_link_update
,
2703 .stats_get
= nfp_net_stats_get
,
2704 .stats_reset
= nfp_net_stats_reset
,
2705 .dev_infos_get
= nfp_net_infos_get
,
2706 .dev_supported_ptypes_get
= nfp_net_supported_ptypes_get
,
2707 .mtu_set
= nfp_net_dev_mtu_set
,
2708 .mac_addr_set
= nfp_set_mac_addr
,
2709 .vlan_offload_set
= nfp_net_vlan_offload_set
,
2710 .reta_update
= nfp_net_reta_update
,
2711 .reta_query
= nfp_net_reta_query
,
2712 .rss_hash_update
= nfp_net_rss_hash_update
,
2713 .rss_hash_conf_get
= nfp_net_rss_hash_conf_get
,
2714 .rx_queue_setup
= nfp_net_rx_queue_setup
,
2715 .rx_queue_release
= nfp_net_rx_queue_release
,
2716 .rx_queue_count
= nfp_net_rx_queue_count
,
2717 .tx_queue_setup
= nfp_net_tx_queue_setup
,
2718 .tx_queue_release
= nfp_net_tx_queue_release
,
2719 .rx_queue_intr_enable
= nfp_rx_queue_intr_enable
,
2720 .rx_queue_intr_disable
= nfp_rx_queue_intr_disable
,
2724 * All eth_dev created got its private data, but before nfp_net_init, that
2725 * private data is referencing private data for all the PF ports. This is due
2726 * to how the vNIC bars are mapped based on first port, so all ports need info
2727 * about port 0 private data. Inside nfp_net_init the private data pointer is
2728 * changed to the right address for each port once the bars have been mapped.
2730 * This functions helps to find out which port and therefore which offset
2731 * inside the private data array to use.
2734 get_pf_port_number(char *name
)
2736 char *pf_str
= name
;
2739 while ((*pf_str
!= '_') && (*pf_str
!= '\0') && (size
++ < 30))
2744 * This should not happen at all and it would mean major
2745 * implementation fault.
2747 rte_panic("nfp_net: problem with pf device name\n");
2749 /* Expecting _portX with X within [0,7] */
2752 return (int)strtol(pf_str
, NULL
, 10);
2756 nfp_net_init(struct rte_eth_dev
*eth_dev
)
2758 struct rte_pci_device
*pci_dev
;
2759 struct nfp_net_hw
*hw
, *hwport0
;
2761 uint64_t tx_bar_off
= 0, rx_bar_off
= 0;
2767 PMD_INIT_FUNC_TRACE();
2769 pci_dev
= RTE_ETH_DEV_TO_PCI(eth_dev
);
2771 /* NFP can not handle DMA addresses requiring more than 40 bits */
2772 if (rte_mem_check_dma_mask(40)) {
2773 RTE_LOG(ERR
, PMD
, "device %s can not be used:",
2774 pci_dev
->device
.name
);
2775 RTE_LOG(ERR
, PMD
, "\trestricted dma mask to 40 bits!\n");
2779 if ((pci_dev
->id
.device_id
== PCI_DEVICE_ID_NFP4000_PF_NIC
) ||
2780 (pci_dev
->id
.device_id
== PCI_DEVICE_ID_NFP6000_PF_NIC
)) {
2781 port
= get_pf_port_number(eth_dev
->data
->name
);
2782 if (port
< 0 || port
> 7) {
2783 PMD_DRV_LOG(ERR
, "Port value is wrong");
2787 PMD_INIT_LOG(DEBUG
, "Working with PF port value %d", port
);
2789 /* This points to port 0 private data */
2790 hwport0
= NFP_NET_DEV_PRIVATE_TO_HW(eth_dev
->data
->dev_private
);
2792 /* This points to the specific port private data */
2793 hw
= &hwport0
[port
];
2795 hw
= NFP_NET_DEV_PRIVATE_TO_HW(eth_dev
->data
->dev_private
);
2799 eth_dev
->dev_ops
= &nfp_net_eth_dev_ops
;
2800 eth_dev
->rx_pkt_burst
= &nfp_net_recv_pkts
;
2801 eth_dev
->tx_pkt_burst
= &nfp_net_xmit_pkts
;
2803 /* For secondary processes, the primary has done all the work */
2804 if (rte_eal_process_type() != RTE_PROC_PRIMARY
)
2807 rte_eth_copy_pci_info(eth_dev
, pci_dev
);
2809 hw
->device_id
= pci_dev
->id
.device_id
;
2810 hw
->vendor_id
= pci_dev
->id
.vendor_id
;
2811 hw
->subsystem_device_id
= pci_dev
->id
.subsystem_device_id
;
2812 hw
->subsystem_vendor_id
= pci_dev
->id
.subsystem_vendor_id
;
2814 PMD_INIT_LOG(DEBUG
, "nfp_net: device (%u:%u) %u:%u:%u:%u",
2815 pci_dev
->id
.vendor_id
, pci_dev
->id
.device_id
,
2816 pci_dev
->addr
.domain
, pci_dev
->addr
.bus
,
2817 pci_dev
->addr
.devid
, pci_dev
->addr
.function
);
2819 hw
->ctrl_bar
= (uint8_t *)pci_dev
->mem_resource
[0].addr
;
2820 if (hw
->ctrl_bar
== NULL
) {
2822 "hw->ctrl_bar is NULL. BAR0 not configured");
2826 if (hw
->is_pf
&& port
== 0) {
2827 hw
->ctrl_bar
= nfp_rtsym_map(hw
->sym_tbl
, "_pf0_net_bar0",
2828 hw
->total_ports
* 32768,
2830 if (!hw
->ctrl_bar
) {
2831 printf("nfp_rtsym_map fails for _pf0_net_ctrl_bar");
2835 PMD_INIT_LOG(DEBUG
, "ctrl bar: %p", hw
->ctrl_bar
);
2839 if (!hwport0
->ctrl_bar
)
2842 /* address based on port0 offset */
2843 hw
->ctrl_bar
= hwport0
->ctrl_bar
+
2844 (port
* NFP_PF_CSR_SLICE_SIZE
);
2847 PMD_INIT_LOG(DEBUG
, "ctrl bar: %p", hw
->ctrl_bar
);
2849 hw
->max_rx_queues
= nn_cfg_readl(hw
, NFP_NET_CFG_MAX_RXRINGS
);
2850 hw
->max_tx_queues
= nn_cfg_readl(hw
, NFP_NET_CFG_MAX_TXRINGS
);
2852 /* Work out where in the BAR the queues start. */
2853 switch (pci_dev
->id
.device_id
) {
2854 case PCI_DEVICE_ID_NFP4000_PF_NIC
:
2855 case PCI_DEVICE_ID_NFP6000_PF_NIC
:
2856 case PCI_DEVICE_ID_NFP6000_VF_NIC
:
2857 start_q
= nn_cfg_readl(hw
, NFP_NET_CFG_START_TXQ
);
2858 tx_bar_off
= (uint64_t)start_q
* NFP_QCP_QUEUE_ADDR_SZ
;
2859 start_q
= nn_cfg_readl(hw
, NFP_NET_CFG_START_RXQ
);
2860 rx_bar_off
= (uint64_t)start_q
* NFP_QCP_QUEUE_ADDR_SZ
;
2863 PMD_DRV_LOG(ERR
, "nfp_net: no device ID matching");
2865 goto dev_err_ctrl_map
;
2868 PMD_INIT_LOG(DEBUG
, "tx_bar_off: 0x%" PRIx64
"", tx_bar_off
);
2869 PMD_INIT_LOG(DEBUG
, "rx_bar_off: 0x%" PRIx64
"", rx_bar_off
);
2871 if (hw
->is_pf
&& port
== 0) {
2872 /* configure access to tx/rx vNIC BARs */
2873 hwport0
->hw_queues
= nfp_cpp_map_area(hw
->cpp
, 0, 0,
2875 NFP_QCP_QUEUE_AREA_SZ
,
2876 &hw
->hwqueues_area
);
2878 if (!hwport0
->hw_queues
) {
2879 printf("nfp_rtsym_map fails for net.qc");
2881 goto dev_err_ctrl_map
;
2884 PMD_INIT_LOG(DEBUG
, "tx/rx bar address: 0x%p",
2885 hwport0
->hw_queues
);
2889 hw
->tx_bar
= hwport0
->hw_queues
+ tx_bar_off
;
2890 hw
->rx_bar
= hwport0
->hw_queues
+ rx_bar_off
;
2891 eth_dev
->data
->dev_private
= hw
;
2893 hw
->tx_bar
= (uint8_t *)pci_dev
->mem_resource
[2].addr
+
2895 hw
->rx_bar
= (uint8_t *)pci_dev
->mem_resource
[2].addr
+
2899 PMD_INIT_LOG(DEBUG
, "ctrl_bar: %p, tx_bar: %p, rx_bar: %p",
2900 hw
->ctrl_bar
, hw
->tx_bar
, hw
->rx_bar
);
2902 nfp_net_cfg_queue_setup(hw
);
2904 /* Get some of the read-only fields from the config BAR */
2905 hw
->ver
= nn_cfg_readl(hw
, NFP_NET_CFG_VERSION
);
2906 hw
->cap
= nn_cfg_readl(hw
, NFP_NET_CFG_CAP
);
2907 hw
->max_mtu
= nn_cfg_readl(hw
, NFP_NET_CFG_MAX_MTU
);
2908 hw
->mtu
= ETHER_MTU
;
2910 /* VLAN insertion is incompatible with LSOv2 */
2911 if (hw
->cap
& NFP_NET_CFG_CTRL_LSO2
)
2912 hw
->cap
&= ~NFP_NET_CFG_CTRL_TXVLAN
;
2914 if (NFD_CFG_MAJOR_VERSION_of(hw
->ver
) < 2)
2915 hw
->rx_offset
= NFP_NET_RX_OFFSET
;
2917 hw
->rx_offset
= nn_cfg_readl(hw
, NFP_NET_CFG_RX_OFFSET_ADDR
);
2919 PMD_INIT_LOG(INFO
, "VER: %u.%u, Maximum supported MTU: %d",
2920 NFD_CFG_MAJOR_VERSION_of(hw
->ver
),
2921 NFD_CFG_MINOR_VERSION_of(hw
->ver
), hw
->max_mtu
);
2923 PMD_INIT_LOG(INFO
, "CAP: %#x, %s%s%s%s%s%s%s%s%s%s%s%s%s%s", hw
->cap
,
2924 hw
->cap
& NFP_NET_CFG_CTRL_PROMISC
? "PROMISC " : "",
2925 hw
->cap
& NFP_NET_CFG_CTRL_L2BC
? "L2BCFILT " : "",
2926 hw
->cap
& NFP_NET_CFG_CTRL_L2MC
? "L2MCFILT " : "",
2927 hw
->cap
& NFP_NET_CFG_CTRL_RXCSUM
? "RXCSUM " : "",
2928 hw
->cap
& NFP_NET_CFG_CTRL_TXCSUM
? "TXCSUM " : "",
2929 hw
->cap
& NFP_NET_CFG_CTRL_RXVLAN
? "RXVLAN " : "",
2930 hw
->cap
& NFP_NET_CFG_CTRL_TXVLAN
? "TXVLAN " : "",
2931 hw
->cap
& NFP_NET_CFG_CTRL_SCATTER
? "SCATTER " : "",
2932 hw
->cap
& NFP_NET_CFG_CTRL_GATHER
? "GATHER " : "",
2933 hw
->cap
& NFP_NET_CFG_CTRL_LIVE_ADDR
? "LIVE_ADDR " : "",
2934 hw
->cap
& NFP_NET_CFG_CTRL_LSO
? "TSO " : "",
2935 hw
->cap
& NFP_NET_CFG_CTRL_LSO2
? "TSOv2 " : "",
2936 hw
->cap
& NFP_NET_CFG_CTRL_RSS
? "RSS " : "",
2937 hw
->cap
& NFP_NET_CFG_CTRL_RSS2
? "RSSv2 " : "");
2941 hw
->stride_rx
= stride
;
2942 hw
->stride_tx
= stride
;
2944 PMD_INIT_LOG(INFO
, "max_rx_queues: %u, max_tx_queues: %u",
2945 hw
->max_rx_queues
, hw
->max_tx_queues
);
2947 /* Initializing spinlock for reconfigs */
2948 rte_spinlock_init(&hw
->reconfig_lock
);
2950 /* Allocating memory for mac addr */
2951 eth_dev
->data
->mac_addrs
= rte_zmalloc("mac_addr", ETHER_ADDR_LEN
, 0);
2952 if (eth_dev
->data
->mac_addrs
== NULL
) {
2953 PMD_INIT_LOG(ERR
, "Failed to space for MAC address");
2955 goto dev_err_queues_map
;
2959 nfp_net_pf_read_mac(hwport0
, port
);
2960 nfp_net_write_mac(hw
, (uint8_t *)&hw
->mac_addr
);
2962 nfp_net_vf_read_mac(hw
);
2965 if (!is_valid_assigned_ether_addr((struct ether_addr
*)&hw
->mac_addr
)) {
2966 PMD_INIT_LOG(INFO
, "Using random mac address for port %d",
2968 /* Using random mac addresses for VFs */
2969 eth_random_addr(&hw
->mac_addr
[0]);
2970 nfp_net_write_mac(hw
, (uint8_t *)&hw
->mac_addr
);
2973 /* Copying mac address to DPDK eth_dev struct */
2974 ether_addr_copy((struct ether_addr
*)hw
->mac_addr
,
2975 ð_dev
->data
->mac_addrs
[0]);
2977 if (!(hw
->cap
& NFP_NET_CFG_CTRL_LIVE_ADDR
))
2978 eth_dev
->data
->dev_flags
|= RTE_ETH_DEV_NOLIVE_MAC_ADDR
;
2980 PMD_INIT_LOG(INFO
, "port %d VendorID=0x%x DeviceID=0x%x "
2981 "mac=%02x:%02x:%02x:%02x:%02x:%02x",
2982 eth_dev
->data
->port_id
, pci_dev
->id
.vendor_id
,
2983 pci_dev
->id
.device_id
,
2984 hw
->mac_addr
[0], hw
->mac_addr
[1], hw
->mac_addr
[2],
2985 hw
->mac_addr
[3], hw
->mac_addr
[4], hw
->mac_addr
[5]);
2987 if (rte_eal_process_type() == RTE_PROC_PRIMARY
) {
2988 /* Registering LSC interrupt handler */
2989 rte_intr_callback_register(&pci_dev
->intr_handle
,
2990 nfp_net_dev_interrupt_handler
,
2992 /* Telling the firmware about the LSC interrupt entry */
2993 nn_cfg_writeb(hw
, NFP_NET_CFG_LSC
, NFP_NET_IRQ_LSC_IDX
);
2994 /* Recording current stats counters values */
2995 nfp_net_stats_reset(eth_dev
);
3001 nfp_cpp_area_free(hw
->hwqueues_area
);
3003 nfp_cpp_area_free(hw
->ctrl_area
);
3008 #define NFP_CPP_MEMIO_BOUNDARY (1 << 20)
3011 * Serving a write request to NFP from host programs. The request
3012 * sends the write size and the CPP target. The bridge makes use
3013 * of CPP interface handler configured by the PMD setup.
3016 nfp_cpp_bridge_serve_write(int sockfd
, struct nfp_cpp
*cpp
)
3018 struct nfp_cpp_area
*area
;
3019 off_t offset
, nfp_offset
;
3020 uint32_t cpp_id
, pos
, len
;
3021 uint32_t tmpbuf
[16];
3022 size_t count
, curlen
, totlen
= 0;
3025 PMD_CPP_LOG(DEBUG
, "%s: offset size %lu, count_size: %lu\n", __func__
,
3026 sizeof(off_t
), sizeof(size_t));
3028 /* Reading the count param */
3029 err
= recv(sockfd
, &count
, sizeof(off_t
), 0);
3030 if (err
!= sizeof(off_t
))
3035 /* Reading the offset param */
3036 err
= recv(sockfd
, &offset
, sizeof(off_t
), 0);
3037 if (err
!= sizeof(off_t
))
3040 /* Obtain target's CPP ID and offset in target */
3041 cpp_id
= (offset
>> 40) << 8;
3042 nfp_offset
= offset
& ((1ull << 40) - 1);
3044 PMD_CPP_LOG(DEBUG
, "%s: count %lu and offset %ld\n", __func__
, count
,
3046 PMD_CPP_LOG(DEBUG
, "%s: cpp_id %08x and nfp_offset %ld\n", __func__
,
3047 cpp_id
, nfp_offset
);
3049 /* Adjust length if not aligned */
3050 if (((nfp_offset
+ (off_t
)count
- 1) & ~(NFP_CPP_MEMIO_BOUNDARY
- 1)) !=
3051 (nfp_offset
& ~(NFP_CPP_MEMIO_BOUNDARY
- 1))) {
3052 curlen
= NFP_CPP_MEMIO_BOUNDARY
-
3053 (nfp_offset
& (NFP_CPP_MEMIO_BOUNDARY
- 1));
3057 /* configure a CPP PCIe2CPP BAR for mapping the CPP target */
3058 area
= nfp_cpp_area_alloc_with_name(cpp
, cpp_id
, "nfp.cdev",
3059 nfp_offset
, curlen
);
3061 RTE_LOG(ERR
, PMD
, "%s: area alloc fail\n", __func__
);
3065 /* mapping the target */
3066 err
= nfp_cpp_area_acquire(area
);
3068 RTE_LOG(ERR
, PMD
, "area acquire failed\n");
3069 nfp_cpp_area_free(area
);
3073 for (pos
= 0; pos
< curlen
; pos
+= len
) {
3075 if (len
> sizeof(tmpbuf
))
3076 len
= sizeof(tmpbuf
);
3078 PMD_CPP_LOG(DEBUG
, "%s: Receive %u of %lu\n", __func__
,
3080 err
= recv(sockfd
, tmpbuf
, len
, MSG_WAITALL
);
3081 if (err
!= (int)len
) {
3083 "%s: error when receiving, %d of %lu\n",
3084 __func__
, err
, count
);
3085 nfp_cpp_area_release(area
);
3086 nfp_cpp_area_free(area
);
3089 err
= nfp_cpp_area_write(area
, pos
, tmpbuf
, len
);
3091 RTE_LOG(ERR
, PMD
, "nfp_cpp_area_write error\n");
3092 nfp_cpp_area_release(area
);
3093 nfp_cpp_area_free(area
);
3100 nfp_cpp_area_release(area
);
3101 nfp_cpp_area_free(area
);
3104 curlen
= (count
> NFP_CPP_MEMIO_BOUNDARY
) ?
3105 NFP_CPP_MEMIO_BOUNDARY
: count
;
3112 * Serving a read request to NFP from host programs. The request
3113 * sends the read size and the CPP target. The bridge makes use
3114 * of CPP interface handler configured by the PMD setup. The read
3115 * data is sent to the requester using the same socket.
3118 nfp_cpp_bridge_serve_read(int sockfd
, struct nfp_cpp
*cpp
)
3120 struct nfp_cpp_area
*area
;
3121 off_t offset
, nfp_offset
;
3122 uint32_t cpp_id
, pos
, len
;
3123 uint32_t tmpbuf
[16];
3124 size_t count
, curlen
, totlen
= 0;
3127 PMD_CPP_LOG(DEBUG
, "%s: offset size %lu, count_size: %lu\n", __func__
,
3128 sizeof(off_t
), sizeof(size_t));
3130 /* Reading the count param */
3131 err
= recv(sockfd
, &count
, sizeof(off_t
), 0);
3132 if (err
!= sizeof(off_t
))
3137 /* Reading the offset param */
3138 err
= recv(sockfd
, &offset
, sizeof(off_t
), 0);
3139 if (err
!= sizeof(off_t
))
3142 /* Obtain target's CPP ID and offset in target */
3143 cpp_id
= (offset
>> 40) << 8;
3144 nfp_offset
= offset
& ((1ull << 40) - 1);
3146 PMD_CPP_LOG(DEBUG
, "%s: count %lu and offset %ld\n", __func__
, count
,
3148 PMD_CPP_LOG(DEBUG
, "%s: cpp_id %08x and nfp_offset %ld\n", __func__
,
3149 cpp_id
, nfp_offset
);
3151 /* Adjust length if not aligned */
3152 if (((nfp_offset
+ (off_t
)count
- 1) & ~(NFP_CPP_MEMIO_BOUNDARY
- 1)) !=
3153 (nfp_offset
& ~(NFP_CPP_MEMIO_BOUNDARY
- 1))) {
3154 curlen
= NFP_CPP_MEMIO_BOUNDARY
-
3155 (nfp_offset
& (NFP_CPP_MEMIO_BOUNDARY
- 1));
3159 area
= nfp_cpp_area_alloc_with_name(cpp
, cpp_id
, "nfp.cdev",
3160 nfp_offset
, curlen
);
3162 RTE_LOG(ERR
, PMD
, "%s: area alloc failed\n", __func__
);
3166 err
= nfp_cpp_area_acquire(area
);
3168 RTE_LOG(ERR
, PMD
, "area acquire failed\n");
3169 nfp_cpp_area_free(area
);
3173 for (pos
= 0; pos
< curlen
; pos
+= len
) {
3175 if (len
> sizeof(tmpbuf
))
3176 len
= sizeof(tmpbuf
);
3178 err
= nfp_cpp_area_read(area
, pos
, tmpbuf
, len
);
3180 RTE_LOG(ERR
, PMD
, "nfp_cpp_area_read error\n");
3181 nfp_cpp_area_release(area
);
3182 nfp_cpp_area_free(area
);
3185 PMD_CPP_LOG(DEBUG
, "%s: sending %u of %lu\n", __func__
,
3188 err
= send(sockfd
, tmpbuf
, len
, 0);
3189 if (err
!= (int)len
) {
3191 "%s: error when sending: %d of %lu\n",
3192 __func__
, err
, count
);
3193 nfp_cpp_area_release(area
);
3194 nfp_cpp_area_free(area
);
3201 nfp_cpp_area_release(area
);
3202 nfp_cpp_area_free(area
);
3205 curlen
= (count
> NFP_CPP_MEMIO_BOUNDARY
) ?
3206 NFP_CPP_MEMIO_BOUNDARY
: count
;
3211 #define NFP_IOCTL 'n'
3212 #define NFP_IOCTL_CPP_IDENTIFICATION _IOW(NFP_IOCTL, 0x8f, uint32_t)
3214 * Serving a ioctl command from host NFP tools. This usually goes to
3215 * a kernel driver char driver but it is not available when the PF is
3216 * bound to the PMD. Currently just one ioctl command is served and it
3217 * does not require any CPP access at all.
3220 nfp_cpp_bridge_serve_ioctl(int sockfd
, struct nfp_cpp
*cpp
)
3222 uint32_t cmd
, ident_size
, tmp
;
3225 /* Reading now the IOCTL command */
3226 err
= recv(sockfd
, &cmd
, 4, 0);
3228 RTE_LOG(ERR
, PMD
, "%s: read error from socket\n", __func__
);
3232 /* Only supporting NFP_IOCTL_CPP_IDENTIFICATION */
3233 if (cmd
!= NFP_IOCTL_CPP_IDENTIFICATION
) {
3234 RTE_LOG(ERR
, PMD
, "%s: unknown cmd %d\n", __func__
, cmd
);
3238 err
= recv(sockfd
, &ident_size
, 4, 0);
3240 RTE_LOG(ERR
, PMD
, "%s: read error from socket\n", __func__
);
3244 tmp
= nfp_cpp_model(cpp
);
3246 PMD_CPP_LOG(DEBUG
, "%s: sending NFP model %08x\n", __func__
, tmp
);
3248 err
= send(sockfd
, &tmp
, 4, 0);
3250 RTE_LOG(ERR
, PMD
, "%s: error writing to socket\n", __func__
);
3254 tmp
= cpp
->interface
;
3256 PMD_CPP_LOG(DEBUG
, "%s: sending NFP interface %08x\n", __func__
, tmp
);
3258 err
= send(sockfd
, &tmp
, 4, 0);
3260 RTE_LOG(ERR
, PMD
, "%s: error writing to socket\n", __func__
);
3267 #define NFP_BRIDGE_OP_READ 20
3268 #define NFP_BRIDGE_OP_WRITE 30
3269 #define NFP_BRIDGE_OP_IOCTL 40
3272 * This is the code to be executed by a service core. The CPP bridge interface
3273 * is based on a unix socket and requests usually received by a kernel char
3274 * driver, read, write and ioctl, are handled by the CPP bridge. NFP host tools
3275 * can be executed with a wrapper library and LD_LIBRARY being completely
3276 * unaware of the CPP bridge performing the NFP kernel char driver for CPP
3280 nfp_cpp_bridge_service_func(void *args
)
3282 struct sockaddr address
;
3283 struct nfp_cpp
*cpp
= args
;
3284 int sockfd
, datafd
, op
, ret
;
3286 unlink("/tmp/nfp_cpp");
3287 sockfd
= socket(AF_UNIX
, SOCK_STREAM
, 0);
3289 RTE_LOG(ERR
, PMD
, "%s: socket creation error. Service failed\n",
3294 memset(&address
, 0, sizeof(struct sockaddr
));
3296 address
.sa_family
= AF_UNIX
;
3297 strcpy(address
.sa_data
, "/tmp/nfp_cpp");
3299 ret
= bind(sockfd
, (const struct sockaddr
*)&address
,
3300 sizeof(struct sockaddr
));
3302 RTE_LOG(ERR
, PMD
, "%s: bind error (%d). Service failed\n",
3308 ret
= listen(sockfd
, 20);
3310 RTE_LOG(ERR
, PMD
, "%s: listen error(%d). Service failed\n",
3317 datafd
= accept(sockfd
, NULL
, NULL
);
3319 RTE_LOG(ERR
, PMD
, "%s: accept call error (%d)\n",
3321 RTE_LOG(ERR
, PMD
, "%s: service failed\n", __func__
);
3327 ret
= recv(datafd
, &op
, 4, 0);
3329 PMD_CPP_LOG(DEBUG
, "%s: socket close\n",
3334 PMD_CPP_LOG(DEBUG
, "%s: getting op %u\n", __func__
, op
);
3336 if (op
== NFP_BRIDGE_OP_READ
)
3337 nfp_cpp_bridge_serve_read(datafd
, cpp
);
3339 if (op
== NFP_BRIDGE_OP_WRITE
)
3340 nfp_cpp_bridge_serve_write(datafd
, cpp
);
3342 if (op
== NFP_BRIDGE_OP_IOCTL
)
3343 nfp_cpp_bridge_serve_ioctl(datafd
, cpp
);
3356 nfp_pf_create_dev(struct rte_pci_device
*dev
, int port
, int ports
,
3357 struct nfp_cpp
*cpp
, struct nfp_hwinfo
*hwinfo
,
3358 int phys_port
, struct nfp_rtsym_table
*sym_tbl
, void **priv
)
3360 struct rte_eth_dev
*eth_dev
;
3361 struct nfp_net_hw
*hw
= NULL
;
3363 struct rte_service_spec service
;
3366 port_name
= rte_zmalloc("nfp_pf_port_name", 100, 0);
3371 snprintf(port_name
, 100, "%s_port%d", dev
->device
.name
, port
);
3373 strlcat(port_name
, dev
->device
.name
, 100);
3376 if (rte_eal_process_type() == RTE_PROC_PRIMARY
) {
3377 eth_dev
= rte_eth_dev_allocate(port_name
);
3379 rte_free(port_name
);
3383 *priv
= rte_zmalloc(port_name
,
3384 sizeof(struct nfp_net_adapter
) *
3385 ports
, RTE_CACHE_LINE_SIZE
);
3387 rte_free(port_name
);
3388 rte_eth_dev_release_port(eth_dev
);
3392 eth_dev
->data
->dev_private
= *priv
;
3395 * dev_private pointing to port0 dev_private because we need
3396 * to configure vNIC bars based on port0 at nfp_net_init.
3397 * Then dev_private is adjusted per port.
3399 hw
= (struct nfp_net_hw
*)(eth_dev
->data
->dev_private
) + port
;
3401 hw
->hwinfo
= hwinfo
;
3402 hw
->sym_tbl
= sym_tbl
;
3403 hw
->pf_port_idx
= phys_port
;
3406 hw
->pf_multiport_enabled
= 1;
3408 hw
->total_ports
= ports
;
3410 eth_dev
= rte_eth_dev_attach_secondary(port_name
);
3412 RTE_LOG(ERR
, EAL
, "secondary process attach failed, "
3413 "ethdev doesn't exist");
3414 rte_free(port_name
);
3417 eth_dev
->process_private
= cpp
;
3420 eth_dev
->device
= &dev
->device
;
3421 rte_eth_copy_pci_info(eth_dev
, dev
);
3423 retval
= nfp_net_init(eth_dev
);
3429 rte_eth_dev_probing_finish(eth_dev
);
3432 rte_free(port_name
);
3436 * The rte_service needs to be created just once per PMD.
3437 * And the cpp handler needs to be linked to the service.
3438 * Secondary processes will be used for debugging DPDK apps
3439 * when requiring to use the CPP interface for accessing NFP
3440 * components. And the cpp handler for secondary processes is
3441 * available at this point.
3443 memset(&service
, 0, sizeof(struct rte_service_spec
));
3444 snprintf(service
.name
, sizeof(service
.name
), "nfp_cpp_service");
3445 service
.callback
= nfp_cpp_bridge_service_func
;
3446 service
.callback_userdata
= (void *)cpp
;
3448 hw
= (struct nfp_net_hw
*)(eth_dev
->data
->dev_private
);
3450 if (rte_service_component_register(&service
,
3451 &hw
->nfp_cpp_service_id
))
3452 RTE_LOG(ERR
, PMD
, "NFP CPP bridge service register() failed");
3454 RTE_LOG(DEBUG
, PMD
, "NFP CPP bridge service registered");
3460 rte_free(port_name
);
3461 /* free ports private data if primary process */
3462 if (rte_eal_process_type() == RTE_PROC_PRIMARY
)
3463 rte_free(eth_dev
->data
->dev_private
);
3465 rte_eth_dev_release_port(eth_dev
);
3470 #define DEFAULT_FW_PATH "/lib/firmware/netronome"
3473 nfp_fw_upload(struct rte_pci_device
*dev
, struct nfp_nsp
*nsp
, char *card
)
3475 struct nfp_cpp
*cpp
= nsp
->cpp
;
3480 struct stat file_stat
;
3483 /* Looking for firmware file in order of priority */
3485 /* First try to find a firmware image specific for this device */
3486 snprintf(serial
, sizeof(serial
),
3487 "serial-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x",
3488 cpp
->serial
[0], cpp
->serial
[1], cpp
->serial
[2], cpp
->serial
[3],
3489 cpp
->serial
[4], cpp
->serial
[5], cpp
->interface
>> 8,
3490 cpp
->interface
& 0xff);
3492 snprintf(fw_name
, sizeof(fw_name
), "%s/%s.nffw", DEFAULT_FW_PATH
,
3495 PMD_DRV_LOG(DEBUG
, "Trying with fw file: %s", fw_name
);
3496 fw_f
= open(fw_name
, O_RDONLY
);
3500 /* Then try the PCI name */
3501 snprintf(fw_name
, sizeof(fw_name
), "%s/pci-%s.nffw", DEFAULT_FW_PATH
,
3504 PMD_DRV_LOG(DEBUG
, "Trying with fw file: %s", fw_name
);
3505 fw_f
= open(fw_name
, O_RDONLY
);
3509 /* Finally try the card type and media */
3510 snprintf(fw_name
, sizeof(fw_name
), "%s/%s", DEFAULT_FW_PATH
, card
);
3511 PMD_DRV_LOG(DEBUG
, "Trying with fw file: %s", fw_name
);
3512 fw_f
= open(fw_name
, O_RDONLY
);
3514 PMD_DRV_LOG(INFO
, "Firmware file %s not found.", fw_name
);
3519 if (fstat(fw_f
, &file_stat
) < 0) {
3520 PMD_DRV_LOG(INFO
, "Firmware file %s size is unknown", fw_name
);
3525 fsize
= file_stat
.st_size
;
3526 PMD_DRV_LOG(INFO
, "Firmware file found at %s with size: %" PRIu64
"",
3527 fw_name
, (uint64_t)fsize
);
3529 fw_buf
= malloc((size_t)fsize
);
3531 PMD_DRV_LOG(INFO
, "malloc failed for fw buffer");
3535 memset(fw_buf
, 0, fsize
);
3537 bytes
= read(fw_f
, fw_buf
, fsize
);
3538 if (bytes
!= fsize
) {
3539 PMD_DRV_LOG(INFO
, "Reading fw to buffer failed."
3540 "Just %" PRIu64
" of %" PRIu64
" bytes read",
3541 (uint64_t)bytes
, (uint64_t)fsize
);
3547 PMD_DRV_LOG(INFO
, "Uploading the firmware ...");
3548 nfp_nsp_load_fw(nsp
, fw_buf
, bytes
);
3549 PMD_DRV_LOG(INFO
, "Done");
3558 nfp_fw_setup(struct rte_pci_device
*dev
, struct nfp_cpp
*cpp
,
3559 struct nfp_eth_table
*nfp_eth_table
, struct nfp_hwinfo
*hwinfo
)
3561 struct nfp_nsp
*nsp
;
3562 const char *nfp_fw_model
;
3563 char card_desc
[100];
3566 nfp_fw_model
= nfp_hwinfo_lookup(hwinfo
, "assembly.partno");
3569 PMD_DRV_LOG(INFO
, "firmware model found: %s", nfp_fw_model
);
3571 PMD_DRV_LOG(ERR
, "firmware model NOT found");
3575 if (nfp_eth_table
->count
== 0 || nfp_eth_table
->count
> 8) {
3576 PMD_DRV_LOG(ERR
, "NFP ethernet table reports wrong ports: %u",
3577 nfp_eth_table
->count
);
3581 PMD_DRV_LOG(INFO
, "NFP ethernet port table reports %u ports",
3582 nfp_eth_table
->count
);
3584 PMD_DRV_LOG(INFO
, "Port speed: %u", nfp_eth_table
->ports
[0].speed
);
3586 snprintf(card_desc
, sizeof(card_desc
), "nic_%s_%dx%d.nffw",
3587 nfp_fw_model
, nfp_eth_table
->count
,
3588 nfp_eth_table
->ports
[0].speed
/ 1000);
3590 nsp
= nfp_nsp_open(cpp
);
3592 PMD_DRV_LOG(ERR
, "NFP error when obtaining NSP handle");
3596 nfp_nsp_device_soft_reset(nsp
);
3597 err
= nfp_fw_upload(dev
, nsp
, card_desc
);
3603 static int nfp_pf_pci_probe(struct rte_pci_driver
*pci_drv __rte_unused
,
3604 struct rte_pci_device
*dev
)
3606 struct nfp_cpp
*cpp
;
3607 struct nfp_hwinfo
*hwinfo
;
3608 struct nfp_rtsym_table
*sym_tbl
;
3609 struct nfp_eth_table
*nfp_eth_table
= NULL
;
3620 * When device bound to UIO, the device could be used, by mistake,
3621 * by two DPDK apps, and the UIO driver does not avoid it. This
3622 * could lead to a serious problem when configuring the NFP CPP
3623 * interface. Here we avoid this telling to the CPP init code to
3624 * use a lock file if UIO is being used.
3626 if (dev
->kdrv
== RTE_KDRV_VFIO
)
3627 cpp
= nfp_cpp_from_device_name(dev
, 0);
3629 cpp
= nfp_cpp_from_device_name(dev
, 1);
3632 PMD_DRV_LOG(ERR
, "A CPP handle can not be obtained");
3637 hwinfo
= nfp_hwinfo_read(cpp
);
3639 PMD_DRV_LOG(ERR
, "Error reading hwinfo table");
3643 nfp_eth_table
= nfp_eth_read_ports(cpp
);
3644 if (!nfp_eth_table
) {
3645 PMD_DRV_LOG(ERR
, "Error reading NFP ethernet table");
3649 if (rte_eal_process_type() == RTE_PROC_PRIMARY
) {
3650 if (nfp_fw_setup(dev
, cpp
, nfp_eth_table
, hwinfo
)) {
3651 PMD_DRV_LOG(INFO
, "Error when uploading firmware");
3657 /* Now the symbol table should be there */
3658 sym_tbl
= nfp_rtsym_table_read(cpp
);
3660 PMD_DRV_LOG(ERR
, "Something is wrong with the firmware"
3666 total_ports
= nfp_rtsym_read_le(sym_tbl
, "nfd_cfg_pf0_num_ports", &err
);
3667 if (total_ports
!= (int)nfp_eth_table
->count
) {
3668 PMD_DRV_LOG(ERR
, "Inconsistent number of ports");
3672 PMD_INIT_LOG(INFO
, "Total pf ports: %d", total_ports
);
3674 if (total_ports
<= 0 || total_ports
> 8) {
3675 PMD_DRV_LOG(ERR
, "nfd_cfg_pf0_num_ports symbol with wrong value");
3680 for (i
= 0; i
< total_ports
; i
++) {
3681 ret
= nfp_pf_create_dev(dev
, i
, total_ports
, cpp
, hwinfo
,
3682 nfp_eth_table
->ports
[i
].index
,
3689 free(nfp_eth_table
);
3693 int nfp_logtype_init
;
3694 int nfp_logtype_driver
;
3696 static const struct rte_pci_id pci_id_nfp_pf_net_map
[] = {
3698 RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME
,
3699 PCI_DEVICE_ID_NFP4000_PF_NIC
)
3702 RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME
,
3703 PCI_DEVICE_ID_NFP6000_PF_NIC
)
3710 static const struct rte_pci_id pci_id_nfp_vf_net_map
[] = {
3712 RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME
,
3713 PCI_DEVICE_ID_NFP6000_VF_NIC
)
3720 static int eth_nfp_pci_probe(struct rte_pci_driver
*pci_drv __rte_unused
,
3721 struct rte_pci_device
*pci_dev
)
3723 return rte_eth_dev_pci_generic_probe(pci_dev
,
3724 sizeof(struct nfp_net_adapter
), nfp_net_init
);
3727 static int eth_nfp_pci_remove(struct rte_pci_device
*pci_dev
)
3729 struct rte_eth_dev
*eth_dev
;
3730 struct nfp_net_hw
*hw
, *hwport0
;
3733 eth_dev
= rte_eth_dev_allocated(pci_dev
->device
.name
);
3734 if ((pci_dev
->id
.device_id
== PCI_DEVICE_ID_NFP4000_PF_NIC
) ||
3735 (pci_dev
->id
.device_id
== PCI_DEVICE_ID_NFP6000_PF_NIC
)) {
3736 port
= get_pf_port_number(eth_dev
->data
->name
);
3738 * hotplug is not possible with multiport PF although freeing
3739 * data structures can be done for first port.
3743 hwport0
= NFP_NET_DEV_PRIVATE_TO_HW(eth_dev
->data
->dev_private
);
3744 hw
= &hwport0
[port
];
3745 nfp_cpp_area_free(hw
->ctrl_area
);
3746 nfp_cpp_area_free(hw
->hwqueues_area
);
3749 nfp_cpp_free(hw
->cpp
);
3751 hw
= NFP_NET_DEV_PRIVATE_TO_HW(eth_dev
->data
->dev_private
);
3753 /* hotplug is not possible with multiport PF */
3754 if (hw
->pf_multiport_enabled
)
3756 return rte_eth_dev_pci_generic_remove(pci_dev
, NULL
);
3759 static struct rte_pci_driver rte_nfp_net_pf_pmd
= {
3760 .id_table
= pci_id_nfp_pf_net_map
,
3761 .drv_flags
= RTE_PCI_DRV_NEED_MAPPING
| RTE_PCI_DRV_INTR_LSC
|
3762 RTE_PCI_DRV_IOVA_AS_VA
,
3763 .probe
= nfp_pf_pci_probe
,
3764 .remove
= eth_nfp_pci_remove
,
3767 static struct rte_pci_driver rte_nfp_net_vf_pmd
= {
3768 .id_table
= pci_id_nfp_vf_net_map
,
3769 .drv_flags
= RTE_PCI_DRV_NEED_MAPPING
| RTE_PCI_DRV_INTR_LSC
|
3770 RTE_PCI_DRV_IOVA_AS_VA
,
3771 .probe
= eth_nfp_pci_probe
,
3772 .remove
= eth_nfp_pci_remove
,
3775 RTE_PMD_REGISTER_PCI(net_nfp_pf
, rte_nfp_net_pf_pmd
);
3776 RTE_PMD_REGISTER_PCI(net_nfp_vf
, rte_nfp_net_vf_pmd
);
3777 RTE_PMD_REGISTER_PCI_TABLE(net_nfp_pf
, pci_id_nfp_pf_net_map
);
3778 RTE_PMD_REGISTER_PCI_TABLE(net_nfp_vf
, pci_id_nfp_vf_net_map
);
3779 RTE_PMD_REGISTER_KMOD_DEP(net_nfp_pf
, "* igb_uio | uio_pci_generic | vfio");
3780 RTE_PMD_REGISTER_KMOD_DEP(net_nfp_vf
, "* igb_uio | uio_pci_generic | vfio");
3782 RTE_INIT(nfp_init_log
)
3784 nfp_logtype_init
= rte_log_register("pmd.net.nfp.init");
3785 if (nfp_logtype_init
>= 0)
3786 rte_log_set_level(nfp_logtype_init
, RTE_LOG_NOTICE
);
3787 nfp_logtype_driver
= rte_log_register("pmd.net.nfp.driver");
3788 if (nfp_logtype_driver
>= 0)
3789 rte_log_set_level(nfp_logtype_driver
, RTE_LOG_NOTICE
);
3793 * c-file-style: "Linux"
3794 * indent-tabs-mode: t