1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2013-2017 Wind River Systems, Inc.
11 #include <rte_ethdev_driver.h>
12 #include <rte_ethdev_pci.h>
13 #include <rte_memcpy.h>
14 #include <rte_string_fns.h>
15 #include <rte_malloc.h>
16 #include <rte_atomic.h>
17 #include <rte_branch_prediction.h>
19 #include <rte_bus_pci.h>
20 #include <rte_ether.h>
21 #include <rte_common.h>
22 #include <rte_cycles.h>
23 #include <rte_spinlock.h>
24 #include <rte_byteorder.h>
26 #include <rte_memory.h>
30 #include "rte_avp_common.h"
31 #include "rte_avp_fifo.h"
35 int avp_logtype_driver
;
37 static int avp_dev_create(struct rte_pci_device
*pci_dev
,
38 struct rte_eth_dev
*eth_dev
);
40 static int avp_dev_configure(struct rte_eth_dev
*dev
);
41 static int avp_dev_start(struct rte_eth_dev
*dev
);
42 static void avp_dev_stop(struct rte_eth_dev
*dev
);
43 static void avp_dev_close(struct rte_eth_dev
*dev
);
44 static void avp_dev_info_get(struct rte_eth_dev
*dev
,
45 struct rte_eth_dev_info
*dev_info
);
46 static int avp_vlan_offload_set(struct rte_eth_dev
*dev
, int mask
);
47 static int avp_dev_link_update(struct rte_eth_dev
*dev
, int wait_to_complete
);
48 static void avp_dev_promiscuous_enable(struct rte_eth_dev
*dev
);
49 static void avp_dev_promiscuous_disable(struct rte_eth_dev
*dev
);
51 static int avp_dev_rx_queue_setup(struct rte_eth_dev
*dev
,
54 unsigned int socket_id
,
55 const struct rte_eth_rxconf
*rx_conf
,
56 struct rte_mempool
*pool
);
58 static int avp_dev_tx_queue_setup(struct rte_eth_dev
*dev
,
61 unsigned int socket_id
,
62 const struct rte_eth_txconf
*tx_conf
);
64 static uint16_t avp_recv_scattered_pkts(void *rx_queue
,
65 struct rte_mbuf
**rx_pkts
,
68 static uint16_t avp_recv_pkts(void *rx_queue
,
69 struct rte_mbuf
**rx_pkts
,
72 static uint16_t avp_xmit_scattered_pkts(void *tx_queue
,
73 struct rte_mbuf
**tx_pkts
,
76 static uint16_t avp_xmit_pkts(void *tx_queue
,
77 struct rte_mbuf
**tx_pkts
,
80 static void avp_dev_rx_queue_release(void *rxq
);
81 static void avp_dev_tx_queue_release(void *txq
);
83 static int avp_dev_stats_get(struct rte_eth_dev
*dev
,
84 struct rte_eth_stats
*stats
);
85 static void avp_dev_stats_reset(struct rte_eth_dev
*dev
);
88 #define AVP_MAX_RX_BURST 64
89 #define AVP_MAX_TX_BURST 64
90 #define AVP_MAX_MAC_ADDRS 1
91 #define AVP_MIN_RX_BUFSIZE ETHER_MIN_LEN
95 * Defines the number of microseconds to wait before checking the response
96 * queue for completion.
98 #define AVP_REQUEST_DELAY_USECS (5000)
101 * Defines the number times to check the response queue for completion before
102 * declaring a timeout.
104 #define AVP_MAX_REQUEST_RETRY (100)
106 /* Defines the current PCI driver version number */
107 #define AVP_DPDK_DRIVER_VERSION RTE_AVP_CURRENT_GUEST_VERSION
110 * The set of PCI devices this driver supports
112 static const struct rte_pci_id pci_id_avp_map
[] = {
113 { .vendor_id
= RTE_AVP_PCI_VENDOR_ID
,
114 .device_id
= RTE_AVP_PCI_DEVICE_ID
,
115 .subsystem_vendor_id
= RTE_AVP_PCI_SUB_VENDOR_ID
,
116 .subsystem_device_id
= RTE_AVP_PCI_SUB_DEVICE_ID
,
117 .class_id
= RTE_CLASS_ANY_ID
,
120 { .vendor_id
= 0, /* sentinel */
125 * dev_ops for avp, bare necessities for basic operation
127 static const struct eth_dev_ops avp_eth_dev_ops
= {
128 .dev_configure
= avp_dev_configure
,
129 .dev_start
= avp_dev_start
,
130 .dev_stop
= avp_dev_stop
,
131 .dev_close
= avp_dev_close
,
132 .dev_infos_get
= avp_dev_info_get
,
133 .vlan_offload_set
= avp_vlan_offload_set
,
134 .stats_get
= avp_dev_stats_get
,
135 .stats_reset
= avp_dev_stats_reset
,
136 .link_update
= avp_dev_link_update
,
137 .promiscuous_enable
= avp_dev_promiscuous_enable
,
138 .promiscuous_disable
= avp_dev_promiscuous_disable
,
139 .rx_queue_setup
= avp_dev_rx_queue_setup
,
140 .rx_queue_release
= avp_dev_rx_queue_release
,
141 .tx_queue_setup
= avp_dev_tx_queue_setup
,
142 .tx_queue_release
= avp_dev_tx_queue_release
,
145 /**@{ AVP device flags */
146 #define AVP_F_PROMISC (1 << 1)
147 #define AVP_F_CONFIGURED (1 << 2)
148 #define AVP_F_LINKUP (1 << 3)
149 #define AVP_F_DETACHED (1 << 4)
152 /* Ethernet device validation marker */
153 #define AVP_ETHDEV_MAGIC 0x92972862
156 * Defines the AVP device attributes which are attached to an RTE ethernet
160 uint32_t magic
; /**< Memory validation marker */
161 uint64_t device_id
; /**< Unique system identifier */
162 struct ether_addr ethaddr
; /**< Host specified MAC address */
163 struct rte_eth_dev_data
*dev_data
;
164 /**< Back pointer to ethernet device data */
165 volatile uint32_t flags
; /**< Device operational flags */
166 uint16_t port_id
; /**< Ethernet port identifier */
167 struct rte_mempool
*pool
; /**< pkt mbuf mempool */
168 unsigned int guest_mbuf_size
; /**< local pool mbuf size */
169 unsigned int host_mbuf_size
; /**< host mbuf size */
170 unsigned int max_rx_pkt_len
; /**< maximum receive unit */
171 uint32_t host_features
; /**< Supported feature bitmap */
172 uint32_t features
; /**< Enabled feature bitmap */
173 unsigned int num_tx_queues
; /**< Negotiated number of transmit queues */
174 unsigned int max_tx_queues
; /**< Maximum number of transmit queues */
175 unsigned int num_rx_queues
; /**< Negotiated number of receive queues */
176 unsigned int max_rx_queues
; /**< Maximum number of receive queues */
178 struct rte_avp_fifo
*tx_q
[RTE_AVP_MAX_QUEUES
]; /**< TX queue */
179 struct rte_avp_fifo
*rx_q
[RTE_AVP_MAX_QUEUES
]; /**< RX queue */
180 struct rte_avp_fifo
*alloc_q
[RTE_AVP_MAX_QUEUES
];
181 /**< Allocated mbufs queue */
182 struct rte_avp_fifo
*free_q
[RTE_AVP_MAX_QUEUES
];
183 /**< To be freed mbufs queue */
185 /* mutual exclusion over the 'flag' and 'resp_q/req_q' fields */
188 /* For request & response */
189 struct rte_avp_fifo
*req_q
; /**< Request queue */
190 struct rte_avp_fifo
*resp_q
; /**< Response queue */
191 void *host_sync_addr
; /**< (host) Req/Resp Mem address */
192 void *sync_addr
; /**< Req/Resp Mem address */
193 void *host_mbuf_addr
; /**< (host) MBUF pool start address */
194 void *mbuf_addr
; /**< MBUF pool start address */
195 } __rte_cache_aligned
;
197 /* RTE ethernet private data */
200 } __rte_cache_aligned
;
203 /* 32-bit MMIO register write */
204 #define AVP_WRITE32(_value, _addr) rte_write32_relaxed((_value), (_addr))
206 /* 32-bit MMIO register read */
207 #define AVP_READ32(_addr) rte_read32_relaxed((_addr))
209 /* Macro to cast the ethernet device private data to a AVP object */
210 #define AVP_DEV_PRIVATE_TO_HW(adapter) \
211 (&((struct avp_adapter *)adapter)->avp)
214 * Defines the structure of a AVP device queue for the purpose of handling the
215 * receive and transmit burst callback functions
218 struct rte_eth_dev_data
*dev_data
;
219 /**< Backpointer to ethernet device data */
220 struct avp_dev
*avp
; /**< Backpointer to AVP device */
222 /**< Queue identifier used for indexing current queue */
224 /**< Base queue identifier for queue servicing */
225 uint16_t queue_limit
;
226 /**< Maximum queue identifier for queue servicing */
233 /* send a request and wait for a response
235 * @warning must be called while holding the avp->lock spinlock.
238 avp_dev_process_request(struct avp_dev
*avp
, struct rte_avp_request
*request
)
240 unsigned int retry
= AVP_MAX_REQUEST_RETRY
;
241 void *resp_addr
= NULL
;
245 PMD_DRV_LOG(DEBUG
, "Sending request %u to host\n", request
->req_id
);
247 request
->result
= -ENOTSUP
;
249 /* Discard any stale responses before starting a new request */
250 while (avp_fifo_get(avp
->resp_q
, (void **)&resp_addr
, 1))
251 PMD_DRV_LOG(DEBUG
, "Discarding stale response\n");
253 rte_memcpy(avp
->sync_addr
, request
, sizeof(*request
));
254 count
= avp_fifo_put(avp
->req_q
, &avp
->host_sync_addr
, 1);
256 PMD_DRV_LOG(ERR
, "Cannot send request %u to host\n",
263 /* wait for a response */
264 usleep(AVP_REQUEST_DELAY_USECS
);
266 count
= avp_fifo_count(avp
->resp_q
);
268 /* response received */
272 if ((count
< 1) && (retry
== 0)) {
273 PMD_DRV_LOG(ERR
, "Timeout while waiting for a response for %u\n",
280 /* retrieve the response */
281 count
= avp_fifo_get(avp
->resp_q
, (void **)&resp_addr
, 1);
282 if ((count
!= 1) || (resp_addr
!= avp
->host_sync_addr
)) {
283 PMD_DRV_LOG(ERR
, "Invalid response from host, count=%u resp=%p host_sync_addr=%p\n",
284 count
, resp_addr
, avp
->host_sync_addr
);
289 /* copy to user buffer */
290 rte_memcpy(request
, avp
->sync_addr
, sizeof(*request
));
293 PMD_DRV_LOG(DEBUG
, "Result %d received for request %u\n",
294 request
->result
, request
->req_id
);
301 avp_dev_ctrl_set_link_state(struct rte_eth_dev
*eth_dev
, unsigned int state
)
303 struct avp_dev
*avp
= AVP_DEV_PRIVATE_TO_HW(eth_dev
->data
->dev_private
);
304 struct rte_avp_request request
;
307 /* setup a link state change request */
308 memset(&request
, 0, sizeof(request
));
309 request
.req_id
= RTE_AVP_REQ_CFG_NETWORK_IF
;
310 request
.if_up
= state
;
312 ret
= avp_dev_process_request(avp
, &request
);
314 return ret
== 0 ? request
.result
: ret
;
318 avp_dev_ctrl_set_config(struct rte_eth_dev
*eth_dev
,
319 struct rte_avp_device_config
*config
)
321 struct avp_dev
*avp
= AVP_DEV_PRIVATE_TO_HW(eth_dev
->data
->dev_private
);
322 struct rte_avp_request request
;
325 /* setup a configure request */
326 memset(&request
, 0, sizeof(request
));
327 request
.req_id
= RTE_AVP_REQ_CFG_DEVICE
;
328 memcpy(&request
.config
, config
, sizeof(request
.config
));
330 ret
= avp_dev_process_request(avp
, &request
);
332 return ret
== 0 ? request
.result
: ret
;
336 avp_dev_ctrl_shutdown(struct rte_eth_dev
*eth_dev
)
338 struct avp_dev
*avp
= AVP_DEV_PRIVATE_TO_HW(eth_dev
->data
->dev_private
);
339 struct rte_avp_request request
;
342 /* setup a shutdown request */
343 memset(&request
, 0, sizeof(request
));
344 request
.req_id
= RTE_AVP_REQ_SHUTDOWN_DEVICE
;
346 ret
= avp_dev_process_request(avp
, &request
);
348 return ret
== 0 ? request
.result
: ret
;
351 /* translate from host mbuf virtual address to guest virtual address */
353 avp_dev_translate_buffer(struct avp_dev
*avp
, void *host_mbuf_address
)
355 return RTE_PTR_ADD(RTE_PTR_SUB(host_mbuf_address
,
356 (uintptr_t)avp
->host_mbuf_addr
),
357 (uintptr_t)avp
->mbuf_addr
);
360 /* translate from host physical address to guest virtual address */
362 avp_dev_translate_address(struct rte_eth_dev
*eth_dev
,
363 rte_iova_t host_phys_addr
)
365 struct rte_pci_device
*pci_dev
= RTE_ETH_DEV_TO_PCI(eth_dev
);
366 struct rte_mem_resource
*resource
;
367 struct rte_avp_memmap_info
*info
;
368 struct rte_avp_memmap
*map
;
373 addr
= pci_dev
->mem_resource
[RTE_AVP_PCI_MEMORY_BAR
].addr
;
374 resource
= &pci_dev
->mem_resource
[RTE_AVP_PCI_MEMMAP_BAR
];
375 info
= (struct rte_avp_memmap_info
*)resource
->addr
;
378 for (i
= 0; i
< info
->nb_maps
; i
++) {
379 /* search all segments looking for a matching address */
380 map
= &info
->maps
[i
];
382 if ((host_phys_addr
>= map
->phys_addr
) &&
383 (host_phys_addr
< (map
->phys_addr
+ map
->length
))) {
384 /* address is within this segment */
385 offset
+= (host_phys_addr
- map
->phys_addr
);
386 addr
= RTE_PTR_ADD(addr
, (uintptr_t)offset
);
388 PMD_DRV_LOG(DEBUG
, "Translating host physical 0x%" PRIx64
" to guest virtual 0x%p\n",
389 host_phys_addr
, addr
);
393 offset
+= map
->length
;
399 /* verify that the incoming device version is compatible with our version */
401 avp_dev_version_check(uint32_t version
)
403 uint32_t driver
= RTE_AVP_STRIP_MINOR_VERSION(AVP_DPDK_DRIVER_VERSION
);
404 uint32_t device
= RTE_AVP_STRIP_MINOR_VERSION(version
);
406 if (device
<= driver
) {
407 /* the host driver version is less than or equal to ours */
414 /* verify that memory regions have expected version and validation markers */
416 avp_dev_check_regions(struct rte_eth_dev
*eth_dev
)
418 struct rte_pci_device
*pci_dev
= RTE_ETH_DEV_TO_PCI(eth_dev
);
419 struct rte_avp_memmap_info
*memmap
;
420 struct rte_avp_device_info
*info
;
421 struct rte_mem_resource
*resource
;
424 /* Dump resource info for debug */
425 for (i
= 0; i
< PCI_MAX_RESOURCE
; i
++) {
426 resource
= &pci_dev
->mem_resource
[i
];
427 if ((resource
->phys_addr
== 0) || (resource
->len
== 0))
430 PMD_DRV_LOG(DEBUG
, "resource[%u]: phys=0x%" PRIx64
" len=%" PRIu64
" addr=%p\n",
431 i
, resource
->phys_addr
,
432 resource
->len
, resource
->addr
);
435 case RTE_AVP_PCI_MEMMAP_BAR
:
436 memmap
= (struct rte_avp_memmap_info
*)resource
->addr
;
437 if ((memmap
->magic
!= RTE_AVP_MEMMAP_MAGIC
) ||
438 (memmap
->version
!= RTE_AVP_MEMMAP_VERSION
)) {
439 PMD_DRV_LOG(ERR
, "Invalid memmap magic 0x%08x and version %u\n",
440 memmap
->magic
, memmap
->version
);
445 case RTE_AVP_PCI_DEVICE_BAR
:
446 info
= (struct rte_avp_device_info
*)resource
->addr
;
447 if ((info
->magic
!= RTE_AVP_DEVICE_MAGIC
) ||
448 avp_dev_version_check(info
->version
)) {
449 PMD_DRV_LOG(ERR
, "Invalid device info magic 0x%08x or version 0x%08x > 0x%08x\n",
450 info
->magic
, info
->version
,
451 AVP_DPDK_DRIVER_VERSION
);
456 case RTE_AVP_PCI_MEMORY_BAR
:
457 case RTE_AVP_PCI_MMIO_BAR
:
458 if (resource
->addr
== NULL
) {
459 PMD_DRV_LOG(ERR
, "Missing address space for BAR%u\n",
465 case RTE_AVP_PCI_MSIX_BAR
:
467 /* no validation required */
476 avp_dev_detach(struct rte_eth_dev
*eth_dev
)
478 struct avp_dev
*avp
= AVP_DEV_PRIVATE_TO_HW(eth_dev
->data
->dev_private
);
481 PMD_DRV_LOG(NOTICE
, "Detaching port %u from AVP device 0x%" PRIx64
"\n",
482 eth_dev
->data
->port_id
, avp
->device_id
);
484 rte_spinlock_lock(&avp
->lock
);
486 if (avp
->flags
& AVP_F_DETACHED
) {
487 PMD_DRV_LOG(NOTICE
, "port %u already detached\n",
488 eth_dev
->data
->port_id
);
493 /* shutdown the device first so the host stops sending us packets. */
494 ret
= avp_dev_ctrl_shutdown(eth_dev
);
496 PMD_DRV_LOG(ERR
, "Failed to send/recv shutdown to host, ret=%d\n",
498 avp
->flags
&= ~AVP_F_DETACHED
;
502 avp
->flags
|= AVP_F_DETACHED
;
505 /* wait for queues to acknowledge the presence of the detach flag */
511 rte_spinlock_unlock(&avp
->lock
);
516 _avp_set_rx_queue_mappings(struct rte_eth_dev
*eth_dev
, uint16_t rx_queue_id
)
518 struct avp_dev
*avp
=
519 AVP_DEV_PRIVATE_TO_HW(eth_dev
->data
->dev_private
);
520 struct avp_queue
*rxq
;
521 uint16_t queue_count
;
524 rxq
= (struct avp_queue
*)eth_dev
->data
->rx_queues
[rx_queue_id
];
527 * Must map all AVP fifos as evenly as possible between the configured
528 * device queues. Each device queue will service a subset of the AVP
529 * fifos. If there is an odd number of device queues the first set of
530 * device queues will get the extra AVP fifos.
532 queue_count
= avp
->num_rx_queues
/ eth_dev
->data
->nb_rx_queues
;
533 remainder
= avp
->num_rx_queues
% eth_dev
->data
->nb_rx_queues
;
534 if (rx_queue_id
< remainder
) {
535 /* these queues must service one extra FIFO */
536 rxq
->queue_base
= rx_queue_id
* (queue_count
+ 1);
537 rxq
->queue_limit
= rxq
->queue_base
+ (queue_count
+ 1) - 1;
539 /* these queues service the regular number of FIFO */
540 rxq
->queue_base
= ((remainder
* (queue_count
+ 1)) +
541 ((rx_queue_id
- remainder
) * queue_count
));
542 rxq
->queue_limit
= rxq
->queue_base
+ queue_count
- 1;
545 PMD_DRV_LOG(DEBUG
, "rxq %u at %p base %u limit %u\n",
546 rx_queue_id
, rxq
, rxq
->queue_base
, rxq
->queue_limit
);
548 rxq
->queue_id
= rxq
->queue_base
;
552 _avp_set_queue_counts(struct rte_eth_dev
*eth_dev
)
554 struct rte_pci_device
*pci_dev
= RTE_ETH_DEV_TO_PCI(eth_dev
);
555 struct avp_dev
*avp
= AVP_DEV_PRIVATE_TO_HW(eth_dev
->data
->dev_private
);
556 struct rte_avp_device_info
*host_info
;
559 addr
= pci_dev
->mem_resource
[RTE_AVP_PCI_DEVICE_BAR
].addr
;
560 host_info
= (struct rte_avp_device_info
*)addr
;
563 * the transmit direction is not negotiated beyond respecting the max
564 * number of queues because the host can handle arbitrary guest tx
565 * queues (host rx queues).
567 avp
->num_tx_queues
= eth_dev
->data
->nb_tx_queues
;
570 * the receive direction is more restrictive. The host requires a
571 * minimum number of guest rx queues (host tx queues) therefore
572 * negotiate a value that is at least as large as the host minimum
573 * requirement. If the host and guest values are not identical then a
574 * mapping will be established in the receive_queue_setup function.
576 avp
->num_rx_queues
= RTE_MAX(host_info
->min_rx_queues
,
577 eth_dev
->data
->nb_rx_queues
);
579 PMD_DRV_LOG(DEBUG
, "Requesting %u Tx and %u Rx queues from host\n",
580 avp
->num_tx_queues
, avp
->num_rx_queues
);
584 avp_dev_attach(struct rte_eth_dev
*eth_dev
)
586 struct avp_dev
*avp
= AVP_DEV_PRIVATE_TO_HW(eth_dev
->data
->dev_private
);
587 struct rte_avp_device_config config
;
591 PMD_DRV_LOG(NOTICE
, "Attaching port %u to AVP device 0x%" PRIx64
"\n",
592 eth_dev
->data
->port_id
, avp
->device_id
);
594 rte_spinlock_lock(&avp
->lock
);
596 if (!(avp
->flags
& AVP_F_DETACHED
)) {
597 PMD_DRV_LOG(NOTICE
, "port %u already attached\n",
598 eth_dev
->data
->port_id
);
604 * make sure that the detached flag is set prior to reconfiguring the
607 avp
->flags
|= AVP_F_DETACHED
;
611 * re-run the device create utility which will parse the new host info
612 * and setup the AVP device queue pointers.
614 ret
= avp_dev_create(RTE_ETH_DEV_TO_PCI(eth_dev
), eth_dev
);
616 PMD_DRV_LOG(ERR
, "Failed to re-create AVP device, ret=%d\n",
621 if (avp
->flags
& AVP_F_CONFIGURED
) {
623 * Update the receive queue mapping to handle cases where the
624 * source and destination hosts have different queue
625 * requirements. As long as the DETACHED flag is asserted the
626 * queue table should not be referenced so it should be safe to
629 _avp_set_queue_counts(eth_dev
);
630 for (i
= 0; i
< eth_dev
->data
->nb_rx_queues
; i
++)
631 _avp_set_rx_queue_mappings(eth_dev
, i
);
634 * Update the host with our config details so that it knows the
637 memset(&config
, 0, sizeof(config
));
638 config
.device_id
= avp
->device_id
;
639 config
.driver_type
= RTE_AVP_DRIVER_TYPE_DPDK
;
640 config
.driver_version
= AVP_DPDK_DRIVER_VERSION
;
641 config
.features
= avp
->features
;
642 config
.num_tx_queues
= avp
->num_tx_queues
;
643 config
.num_rx_queues
= avp
->num_rx_queues
;
644 config
.if_up
= !!(avp
->flags
& AVP_F_LINKUP
);
646 ret
= avp_dev_ctrl_set_config(eth_dev
, &config
);
648 PMD_DRV_LOG(ERR
, "Config request failed by host, ret=%d\n",
655 avp
->flags
&= ~AVP_F_DETACHED
;
660 rte_spinlock_unlock(&avp
->lock
);
665 avp_dev_interrupt_handler(void *data
)
667 struct rte_eth_dev
*eth_dev
= data
;
668 struct rte_pci_device
*pci_dev
= RTE_ETH_DEV_TO_PCI(eth_dev
);
669 void *registers
= pci_dev
->mem_resource
[RTE_AVP_PCI_MMIO_BAR
].addr
;
670 uint32_t status
, value
;
673 if (registers
== NULL
)
674 rte_panic("no mapped MMIO register space\n");
676 /* read the interrupt status register
677 * note: this register clears on read so all raised interrupts must be
678 * handled or remembered for later processing
681 RTE_PTR_ADD(registers
,
682 RTE_AVP_INTERRUPT_STATUS_OFFSET
));
684 if (status
& RTE_AVP_MIGRATION_INTERRUPT_MASK
) {
685 /* handle interrupt based on current status */
687 RTE_PTR_ADD(registers
,
688 RTE_AVP_MIGRATION_STATUS_OFFSET
));
690 case RTE_AVP_MIGRATION_DETACHED
:
691 ret
= avp_dev_detach(eth_dev
);
693 case RTE_AVP_MIGRATION_ATTACHED
:
694 ret
= avp_dev_attach(eth_dev
);
697 PMD_DRV_LOG(ERR
, "unexpected migration status, status=%u\n",
702 /* acknowledge the request by writing out our current status */
703 value
= (ret
== 0 ? value
: RTE_AVP_MIGRATION_ERROR
);
705 RTE_PTR_ADD(registers
,
706 RTE_AVP_MIGRATION_ACK_OFFSET
));
708 PMD_DRV_LOG(NOTICE
, "AVP migration interrupt handled\n");
711 if (status
& ~RTE_AVP_MIGRATION_INTERRUPT_MASK
)
712 PMD_DRV_LOG(WARNING
, "AVP unexpected interrupt, status=0x%08x\n",
715 /* re-enable UIO interrupt handling */
716 ret
= rte_intr_enable(&pci_dev
->intr_handle
);
718 PMD_DRV_LOG(ERR
, "Failed to re-enable UIO interrupts, ret=%d\n",
725 avp_dev_enable_interrupts(struct rte_eth_dev
*eth_dev
)
727 struct rte_pci_device
*pci_dev
= RTE_ETH_DEV_TO_PCI(eth_dev
);
728 void *registers
= pci_dev
->mem_resource
[RTE_AVP_PCI_MMIO_BAR
].addr
;
731 if (registers
== NULL
)
734 /* enable UIO interrupt handling */
735 ret
= rte_intr_enable(&pci_dev
->intr_handle
);
737 PMD_DRV_LOG(ERR
, "Failed to enable UIO interrupts, ret=%d\n",
742 /* inform the device that all interrupts are enabled */
743 AVP_WRITE32(RTE_AVP_APP_INTERRUPTS_MASK
,
744 RTE_PTR_ADD(registers
, RTE_AVP_INTERRUPT_MASK_OFFSET
));
750 avp_dev_disable_interrupts(struct rte_eth_dev
*eth_dev
)
752 struct rte_pci_device
*pci_dev
= RTE_ETH_DEV_TO_PCI(eth_dev
);
753 void *registers
= pci_dev
->mem_resource
[RTE_AVP_PCI_MMIO_BAR
].addr
;
756 if (registers
== NULL
)
759 /* inform the device that all interrupts are disabled */
760 AVP_WRITE32(RTE_AVP_NO_INTERRUPTS_MASK
,
761 RTE_PTR_ADD(registers
, RTE_AVP_INTERRUPT_MASK_OFFSET
));
763 /* enable UIO interrupt handling */
764 ret
= rte_intr_disable(&pci_dev
->intr_handle
);
766 PMD_DRV_LOG(ERR
, "Failed to disable UIO interrupts, ret=%d\n",
775 avp_dev_setup_interrupts(struct rte_eth_dev
*eth_dev
)
777 struct rte_pci_device
*pci_dev
= RTE_ETH_DEV_TO_PCI(eth_dev
);
780 /* register a callback handler with UIO for interrupt notifications */
781 ret
= rte_intr_callback_register(&pci_dev
->intr_handle
,
782 avp_dev_interrupt_handler
,
785 PMD_DRV_LOG(ERR
, "Failed to register UIO interrupt callback, ret=%d\n",
790 /* enable interrupt processing */
791 return avp_dev_enable_interrupts(eth_dev
);
795 avp_dev_migration_pending(struct rte_eth_dev
*eth_dev
)
797 struct rte_pci_device
*pci_dev
= RTE_ETH_DEV_TO_PCI(eth_dev
);
798 void *registers
= pci_dev
->mem_resource
[RTE_AVP_PCI_MMIO_BAR
].addr
;
801 if (registers
== NULL
)
804 value
= AVP_READ32(RTE_PTR_ADD(registers
,
805 RTE_AVP_MIGRATION_STATUS_OFFSET
));
806 if (value
== RTE_AVP_MIGRATION_DETACHED
) {
807 /* migration is in progress; ack it if we have not already */
809 RTE_PTR_ADD(registers
,
810 RTE_AVP_MIGRATION_ACK_OFFSET
));
817 * create a AVP device using the supplied device info by first translating it
818 * to guest address space(s).
821 avp_dev_create(struct rte_pci_device
*pci_dev
,
822 struct rte_eth_dev
*eth_dev
)
824 struct avp_dev
*avp
= AVP_DEV_PRIVATE_TO_HW(eth_dev
->data
->dev_private
);
825 struct rte_avp_device_info
*host_info
;
826 struct rte_mem_resource
*resource
;
829 resource
= &pci_dev
->mem_resource
[RTE_AVP_PCI_DEVICE_BAR
];
830 if (resource
->addr
== NULL
) {
831 PMD_DRV_LOG(ERR
, "BAR%u is not mapped\n",
832 RTE_AVP_PCI_DEVICE_BAR
);
835 host_info
= (struct rte_avp_device_info
*)resource
->addr
;
837 if ((host_info
->magic
!= RTE_AVP_DEVICE_MAGIC
) ||
838 avp_dev_version_check(host_info
->version
)) {
839 PMD_DRV_LOG(ERR
, "Invalid AVP PCI device, magic 0x%08x version 0x%08x > 0x%08x\n",
840 host_info
->magic
, host_info
->version
,
841 AVP_DPDK_DRIVER_VERSION
);
845 PMD_DRV_LOG(DEBUG
, "AVP host device is v%u.%u.%u\n",
846 RTE_AVP_GET_RELEASE_VERSION(host_info
->version
),
847 RTE_AVP_GET_MAJOR_VERSION(host_info
->version
),
848 RTE_AVP_GET_MINOR_VERSION(host_info
->version
));
850 PMD_DRV_LOG(DEBUG
, "AVP host supports %u to %u TX queue(s)\n",
851 host_info
->min_tx_queues
, host_info
->max_tx_queues
);
852 PMD_DRV_LOG(DEBUG
, "AVP host supports %u to %u RX queue(s)\n",
853 host_info
->min_rx_queues
, host_info
->max_rx_queues
);
854 PMD_DRV_LOG(DEBUG
, "AVP host supports features 0x%08x\n",
855 host_info
->features
);
857 if (avp
->magic
!= AVP_ETHDEV_MAGIC
) {
859 * First time initialization (i.e., not during a VM
862 memset(avp
, 0, sizeof(*avp
));
863 avp
->magic
= AVP_ETHDEV_MAGIC
;
864 avp
->dev_data
= eth_dev
->data
;
865 avp
->port_id
= eth_dev
->data
->port_id
;
866 avp
->host_mbuf_size
= host_info
->mbuf_size
;
867 avp
->host_features
= host_info
->features
;
868 rte_spinlock_init(&avp
->lock
);
869 memcpy(&avp
->ethaddr
.addr_bytes
[0],
870 host_info
->ethaddr
, ETHER_ADDR_LEN
);
871 /* adjust max values to not exceed our max */
873 RTE_MIN(host_info
->max_tx_queues
, RTE_AVP_MAX_QUEUES
);
875 RTE_MIN(host_info
->max_rx_queues
, RTE_AVP_MAX_QUEUES
);
877 /* Re-attaching during migration */
879 /* TODO... requires validation of host values */
880 if ((host_info
->features
& avp
->features
) != avp
->features
) {
881 PMD_DRV_LOG(ERR
, "AVP host features mismatched; 0x%08x, host=0x%08x\n",
882 avp
->features
, host_info
->features
);
883 /* this should not be possible; continue for now */
887 /* the device id is allowed to change over migrations */
888 avp
->device_id
= host_info
->device_id
;
890 /* translate incoming host addresses to guest address space */
891 PMD_DRV_LOG(DEBUG
, "AVP first host tx queue at 0x%" PRIx64
"\n",
893 PMD_DRV_LOG(DEBUG
, "AVP first host alloc queue at 0x%" PRIx64
"\n",
894 host_info
->alloc_phys
);
895 for (i
= 0; i
< avp
->max_tx_queues
; i
++) {
896 avp
->tx_q
[i
] = avp_dev_translate_address(eth_dev
,
897 host_info
->tx_phys
+ (i
* host_info
->tx_size
));
899 avp
->alloc_q
[i
] = avp_dev_translate_address(eth_dev
,
900 host_info
->alloc_phys
+ (i
* host_info
->alloc_size
));
903 PMD_DRV_LOG(DEBUG
, "AVP first host rx queue at 0x%" PRIx64
"\n",
905 PMD_DRV_LOG(DEBUG
, "AVP first host free queue at 0x%" PRIx64
"\n",
906 host_info
->free_phys
);
907 for (i
= 0; i
< avp
->max_rx_queues
; i
++) {
908 avp
->rx_q
[i
] = avp_dev_translate_address(eth_dev
,
909 host_info
->rx_phys
+ (i
* host_info
->rx_size
));
910 avp
->free_q
[i
] = avp_dev_translate_address(eth_dev
,
911 host_info
->free_phys
+ (i
* host_info
->free_size
));
914 PMD_DRV_LOG(DEBUG
, "AVP host request queue at 0x%" PRIx64
"\n",
915 host_info
->req_phys
);
916 PMD_DRV_LOG(DEBUG
, "AVP host response queue at 0x%" PRIx64
"\n",
917 host_info
->resp_phys
);
918 PMD_DRV_LOG(DEBUG
, "AVP host sync address at 0x%" PRIx64
"\n",
919 host_info
->sync_phys
);
920 PMD_DRV_LOG(DEBUG
, "AVP host mbuf address at 0x%" PRIx64
"\n",
921 host_info
->mbuf_phys
);
922 avp
->req_q
= avp_dev_translate_address(eth_dev
, host_info
->req_phys
);
923 avp
->resp_q
= avp_dev_translate_address(eth_dev
, host_info
->resp_phys
);
925 avp_dev_translate_address(eth_dev
, host_info
->sync_phys
);
927 avp_dev_translate_address(eth_dev
, host_info
->mbuf_phys
);
930 * store the host mbuf virtual address so that we can calculate
931 * relative offsets for each mbuf as they are processed
933 avp
->host_mbuf_addr
= host_info
->mbuf_va
;
934 avp
->host_sync_addr
= host_info
->sync_va
;
937 * store the maximum packet length that is supported by the host.
939 avp
->max_rx_pkt_len
= host_info
->max_rx_pkt_len
;
940 PMD_DRV_LOG(DEBUG
, "AVP host max receive packet length is %u\n",
941 host_info
->max_rx_pkt_len
);
947 * This function is based on probe() function in avp_pci.c
948 * It returns 0 on success.
951 eth_avp_dev_init(struct rte_eth_dev
*eth_dev
)
953 struct avp_dev
*avp
=
954 AVP_DEV_PRIVATE_TO_HW(eth_dev
->data
->dev_private
);
955 struct rte_pci_device
*pci_dev
;
958 pci_dev
= RTE_ETH_DEV_TO_PCI(eth_dev
);
959 eth_dev
->dev_ops
= &avp_eth_dev_ops
;
960 eth_dev
->rx_pkt_burst
= &avp_recv_pkts
;
961 eth_dev
->tx_pkt_burst
= &avp_xmit_pkts
;
963 if (rte_eal_process_type() != RTE_PROC_PRIMARY
) {
965 * no setup required on secondary processes. All data is saved
966 * in dev_private by the primary process. All resource should
967 * be mapped to the same virtual address so all pointers should
970 if (eth_dev
->data
->scattered_rx
) {
971 PMD_DRV_LOG(NOTICE
, "AVP device configured for chained mbufs\n");
972 eth_dev
->rx_pkt_burst
= avp_recv_scattered_pkts
;
973 eth_dev
->tx_pkt_burst
= avp_xmit_scattered_pkts
;
978 rte_eth_copy_pci_info(eth_dev
, pci_dev
);
980 /* Check current migration status */
981 if (avp_dev_migration_pending(eth_dev
)) {
982 PMD_DRV_LOG(ERR
, "VM live migration operation in progress\n");
986 /* Check BAR resources */
987 ret
= avp_dev_check_regions(eth_dev
);
989 PMD_DRV_LOG(ERR
, "Failed to validate BAR resources, ret=%d\n",
994 /* Enable interrupts */
995 ret
= avp_dev_setup_interrupts(eth_dev
);
997 PMD_DRV_LOG(ERR
, "Failed to enable interrupts, ret=%d\n", ret
);
1001 /* Handle each subtype */
1002 ret
= avp_dev_create(pci_dev
, eth_dev
);
1004 PMD_DRV_LOG(ERR
, "Failed to create device, ret=%d\n", ret
);
1008 /* Allocate memory for storing MAC addresses */
1009 eth_dev
->data
->mac_addrs
= rte_zmalloc("avp_ethdev", ETHER_ADDR_LEN
, 0);
1010 if (eth_dev
->data
->mac_addrs
== NULL
) {
1011 PMD_DRV_LOG(ERR
, "Failed to allocate %d bytes needed to store MAC addresses\n",
1016 /* Get a mac from device config */
1017 ether_addr_copy(&avp
->ethaddr
, ð_dev
->data
->mac_addrs
[0]);
1023 eth_avp_dev_uninit(struct rte_eth_dev
*eth_dev
)
1027 if (rte_eal_process_type() != RTE_PROC_PRIMARY
)
1030 if (eth_dev
->data
== NULL
)
1033 ret
= avp_dev_disable_interrupts(eth_dev
);
1035 PMD_DRV_LOG(ERR
, "Failed to disable interrupts, ret=%d\n", ret
);
1043 eth_avp_pci_probe(struct rte_pci_driver
*pci_drv __rte_unused
,
1044 struct rte_pci_device
*pci_dev
)
1046 return rte_eth_dev_pci_generic_probe(pci_dev
, sizeof(struct avp_adapter
),
1051 eth_avp_pci_remove(struct rte_pci_device
*pci_dev
)
1053 return rte_eth_dev_pci_generic_remove(pci_dev
,
1054 eth_avp_dev_uninit
);
1057 static struct rte_pci_driver rte_avp_pmd
= {
1058 .id_table
= pci_id_avp_map
,
1059 .drv_flags
= RTE_PCI_DRV_NEED_MAPPING
,
1060 .probe
= eth_avp_pci_probe
,
1061 .remove
= eth_avp_pci_remove
,
1065 avp_dev_enable_scattered(struct rte_eth_dev
*eth_dev
,
1066 struct avp_dev
*avp
)
1068 unsigned int max_rx_pkt_len
;
1070 max_rx_pkt_len
= eth_dev
->data
->dev_conf
.rxmode
.max_rx_pkt_len
;
1072 if ((max_rx_pkt_len
> avp
->guest_mbuf_size
) ||
1073 (max_rx_pkt_len
> avp
->host_mbuf_size
)) {
1075 * If the guest MTU is greater than either the host or guest
1076 * buffers then chained mbufs have to be enabled in the TX
1077 * direction. It is assumed that the application will not need
1078 * to send packets larger than their max_rx_pkt_len (MRU).
1083 if ((avp
->max_rx_pkt_len
> avp
->guest_mbuf_size
) ||
1084 (avp
->max_rx_pkt_len
> avp
->host_mbuf_size
)) {
1086 * If the host MRU is greater than its own mbuf size or the
1087 * guest mbuf size then chained mbufs have to be enabled in the
1097 avp_dev_rx_queue_setup(struct rte_eth_dev
*eth_dev
,
1098 uint16_t rx_queue_id
,
1099 uint16_t nb_rx_desc
,
1100 unsigned int socket_id
,
1101 const struct rte_eth_rxconf
*rx_conf
,
1102 struct rte_mempool
*pool
)
1104 struct avp_dev
*avp
= AVP_DEV_PRIVATE_TO_HW(eth_dev
->data
->dev_private
);
1105 struct rte_pktmbuf_pool_private
*mbp_priv
;
1106 struct avp_queue
*rxq
;
1108 if (rx_queue_id
>= eth_dev
->data
->nb_rx_queues
) {
1109 PMD_DRV_LOG(ERR
, "RX queue id is out of range: rx_queue_id=%u, nb_rx_queues=%u\n",
1110 rx_queue_id
, eth_dev
->data
->nb_rx_queues
);
1114 /* Save mbuf pool pointer */
1117 /* Save the local mbuf size */
1118 mbp_priv
= rte_mempool_get_priv(pool
);
1119 avp
->guest_mbuf_size
= (uint16_t)(mbp_priv
->mbuf_data_room_size
);
1120 avp
->guest_mbuf_size
-= RTE_PKTMBUF_HEADROOM
;
1122 if (avp_dev_enable_scattered(eth_dev
, avp
)) {
1123 if (!eth_dev
->data
->scattered_rx
) {
1124 PMD_DRV_LOG(NOTICE
, "AVP device configured for chained mbufs\n");
1125 eth_dev
->data
->scattered_rx
= 1;
1126 eth_dev
->rx_pkt_burst
= avp_recv_scattered_pkts
;
1127 eth_dev
->tx_pkt_burst
= avp_xmit_scattered_pkts
;
1131 PMD_DRV_LOG(DEBUG
, "AVP max_rx_pkt_len=(%u,%u) mbuf_size=(%u,%u)\n",
1132 avp
->max_rx_pkt_len
,
1133 eth_dev
->data
->dev_conf
.rxmode
.max_rx_pkt_len
,
1134 avp
->host_mbuf_size
,
1135 avp
->guest_mbuf_size
);
1137 /* allocate a queue object */
1138 rxq
= rte_zmalloc_socket("ethdev RX queue", sizeof(struct avp_queue
),
1139 RTE_CACHE_LINE_SIZE
, socket_id
);
1141 PMD_DRV_LOG(ERR
, "Failed to allocate new Rx queue object\n");
1145 /* save back pointers to AVP and Ethernet devices */
1147 rxq
->dev_data
= eth_dev
->data
;
1148 eth_dev
->data
->rx_queues
[rx_queue_id
] = (void *)rxq
;
1150 /* setup the queue receive mapping for the current queue. */
1151 _avp_set_rx_queue_mappings(eth_dev
, rx_queue_id
);
1153 PMD_DRV_LOG(DEBUG
, "Rx queue %u setup at %p\n", rx_queue_id
, rxq
);
1161 avp_dev_tx_queue_setup(struct rte_eth_dev
*eth_dev
,
1162 uint16_t tx_queue_id
,
1163 uint16_t nb_tx_desc
,
1164 unsigned int socket_id
,
1165 const struct rte_eth_txconf
*tx_conf
)
1167 struct avp_dev
*avp
= AVP_DEV_PRIVATE_TO_HW(eth_dev
->data
->dev_private
);
1168 struct avp_queue
*txq
;
1170 if (tx_queue_id
>= eth_dev
->data
->nb_tx_queues
) {
1171 PMD_DRV_LOG(ERR
, "TX queue id is out of range: tx_queue_id=%u, nb_tx_queues=%u\n",
1172 tx_queue_id
, eth_dev
->data
->nb_tx_queues
);
1176 /* allocate a queue object */
1177 txq
= rte_zmalloc_socket("ethdev TX queue", sizeof(struct avp_queue
),
1178 RTE_CACHE_LINE_SIZE
, socket_id
);
1180 PMD_DRV_LOG(ERR
, "Failed to allocate new Tx queue object\n");
1184 /* only the configured set of transmit queues are used */
1185 txq
->queue_id
= tx_queue_id
;
1186 txq
->queue_base
= tx_queue_id
;
1187 txq
->queue_limit
= tx_queue_id
;
1189 /* save back pointers to AVP and Ethernet devices */
1191 txq
->dev_data
= eth_dev
->data
;
1192 eth_dev
->data
->tx_queues
[tx_queue_id
] = (void *)txq
;
1194 PMD_DRV_LOG(DEBUG
, "Tx queue %u setup at %p\n", tx_queue_id
, txq
);
1202 _avp_cmp_ether_addr(struct ether_addr
*a
, struct ether_addr
*b
)
1204 uint16_t *_a
= (uint16_t *)&a
->addr_bytes
[0];
1205 uint16_t *_b
= (uint16_t *)&b
->addr_bytes
[0];
1206 return (_a
[0] ^ _b
[0]) | (_a
[1] ^ _b
[1]) | (_a
[2] ^ _b
[2]);
1210 _avp_mac_filter(struct avp_dev
*avp
, struct rte_mbuf
*m
)
1212 struct ether_hdr
*eth
= rte_pktmbuf_mtod(m
, struct ether_hdr
*);
1214 if (likely(_avp_cmp_ether_addr(&avp
->ethaddr
, ð
->d_addr
) == 0)) {
1215 /* allow all packets destined to our address */
1219 if (likely(is_broadcast_ether_addr(ð
->d_addr
))) {
1220 /* allow all broadcast packets */
1224 if (likely(is_multicast_ether_addr(ð
->d_addr
))) {
1225 /* allow all multicast packets */
1229 if (avp
->flags
& AVP_F_PROMISC
) {
1230 /* allow all packets when in promiscuous mode */
1237 #ifdef RTE_LIBRTE_AVP_DEBUG_BUFFERS
1239 __avp_dev_buffer_sanity_check(struct avp_dev
*avp
, struct rte_avp_desc
*buf
)
1241 struct rte_avp_desc
*first_buf
;
1242 struct rte_avp_desc
*pkt_buf
;
1243 unsigned int pkt_len
;
1244 unsigned int nb_segs
;
1248 first_buf
= avp_dev_translate_buffer(avp
, buf
);
1252 nb_segs
= first_buf
->nb_segs
;
1254 /* Adjust pointers for guest addressing */
1255 pkt_buf
= avp_dev_translate_buffer(avp
, buf
);
1256 if (pkt_buf
== NULL
)
1257 rte_panic("bad buffer: segment %u has an invalid address %p\n",
1259 pkt_data
= avp_dev_translate_buffer(avp
, pkt_buf
->data
);
1260 if (pkt_data
== NULL
)
1261 rte_panic("bad buffer: segment %u has a NULL data pointer\n",
1263 if (pkt_buf
->data_len
== 0)
1264 rte_panic("bad buffer: segment %u has 0 data length\n",
1266 pkt_len
+= pkt_buf
->data_len
;
1270 } while (nb_segs
&& (buf
= pkt_buf
->next
) != NULL
);
1273 rte_panic("bad buffer: expected %u segments found %u\n",
1274 first_buf
->nb_segs
, (first_buf
->nb_segs
- nb_segs
));
1275 if (pkt_len
!= first_buf
->pkt_len
)
1276 rte_panic("bad buffer: expected length %u found %u\n",
1277 first_buf
->pkt_len
, pkt_len
);
1280 #define avp_dev_buffer_sanity_check(a, b) \
1281 __avp_dev_buffer_sanity_check((a), (b))
1283 #else /* RTE_LIBRTE_AVP_DEBUG_BUFFERS */
1285 #define avp_dev_buffer_sanity_check(a, b) do {} while (0)
1290 * Copy a host buffer chain to a set of mbufs. This function assumes that
1291 * there exactly the required number of mbufs to copy all source bytes.
1293 static inline struct rte_mbuf
*
1294 avp_dev_copy_from_buffers(struct avp_dev
*avp
,
1295 struct rte_avp_desc
*buf
,
1296 struct rte_mbuf
**mbufs
,
1299 struct rte_mbuf
*m_previous
= NULL
;
1300 struct rte_avp_desc
*pkt_buf
;
1301 unsigned int total_length
= 0;
1302 unsigned int copy_length
;
1303 unsigned int src_offset
;
1310 avp_dev_buffer_sanity_check(avp
, buf
);
1312 /* setup the first source buffer */
1313 pkt_buf
= avp_dev_translate_buffer(avp
, buf
);
1314 pkt_data
= avp_dev_translate_buffer(avp
, pkt_buf
->data
);
1315 total_length
= pkt_buf
->pkt_len
;
1318 if (pkt_buf
->ol_flags
& RTE_AVP_RX_VLAN_PKT
) {
1319 ol_flags
= PKT_RX_VLAN
;
1320 vlan_tci
= pkt_buf
->vlan_tci
;
1326 for (i
= 0; (i
< count
) && (buf
!= NULL
); i
++) {
1327 /* fill each destination buffer */
1330 if (m_previous
!= NULL
)
1331 m_previous
->next
= m
;
1337 * Copy as many source buffers as will fit in the
1338 * destination buffer.
1340 copy_length
= RTE_MIN((avp
->guest_mbuf_size
-
1341 rte_pktmbuf_data_len(m
)),
1342 (pkt_buf
->data_len
-
1344 rte_memcpy(RTE_PTR_ADD(rte_pktmbuf_mtod(m
, void *),
1345 rte_pktmbuf_data_len(m
)),
1346 RTE_PTR_ADD(pkt_data
, src_offset
),
1348 rte_pktmbuf_data_len(m
) += copy_length
;
1349 src_offset
+= copy_length
;
1351 if (likely(src_offset
== pkt_buf
->data_len
)) {
1352 /* need a new source buffer */
1353 buf
= pkt_buf
->next
;
1355 pkt_buf
= avp_dev_translate_buffer(
1357 pkt_data
= avp_dev_translate_buffer(
1358 avp
, pkt_buf
->data
);
1363 if (unlikely(rte_pktmbuf_data_len(m
) ==
1364 avp
->guest_mbuf_size
)) {
1365 /* need a new destination mbuf */
1369 } while (buf
!= NULL
);
1373 m
->ol_flags
= ol_flags
;
1375 rte_pktmbuf_pkt_len(m
) = total_length
;
1376 m
->vlan_tci
= vlan_tci
;
1378 __rte_mbuf_sanity_check(m
, 1);
1384 avp_recv_scattered_pkts(void *rx_queue
,
1385 struct rte_mbuf
**rx_pkts
,
1388 struct avp_queue
*rxq
= (struct avp_queue
*)rx_queue
;
1389 struct rte_avp_desc
*avp_bufs
[AVP_MAX_RX_BURST
];
1390 struct rte_mbuf
*mbufs
[RTE_AVP_MAX_MBUF_SEGMENTS
];
1391 struct avp_dev
*avp
= rxq
->avp
;
1392 struct rte_avp_desc
*pkt_buf
;
1393 struct rte_avp_fifo
*free_q
;
1394 struct rte_avp_fifo
*rx_q
;
1395 struct rte_avp_desc
*buf
;
1396 unsigned int count
, avail
, n
;
1397 unsigned int guest_mbuf_size
;
1399 unsigned int required
;
1400 unsigned int buf_len
;
1401 unsigned int port_id
;
1404 if (unlikely(avp
->flags
& AVP_F_DETACHED
)) {
1405 /* VM live migration in progress */
1409 guest_mbuf_size
= avp
->guest_mbuf_size
;
1410 port_id
= avp
->port_id
;
1411 rx_q
= avp
->rx_q
[rxq
->queue_id
];
1412 free_q
= avp
->free_q
[rxq
->queue_id
];
1414 /* setup next queue to service */
1415 rxq
->queue_id
= (rxq
->queue_id
< rxq
->queue_limit
) ?
1416 (rxq
->queue_id
+ 1) : rxq
->queue_base
;
1418 /* determine how many slots are available in the free queue */
1419 count
= avp_fifo_free_count(free_q
);
1421 /* determine how many packets are available in the rx queue */
1422 avail
= avp_fifo_count(rx_q
);
1424 /* determine how many packets can be received */
1425 count
= RTE_MIN(count
, avail
);
1426 count
= RTE_MIN(count
, nb_pkts
);
1427 count
= RTE_MIN(count
, (unsigned int)AVP_MAX_RX_BURST
);
1429 if (unlikely(count
== 0)) {
1430 /* no free buffers, or no buffers on the rx queue */
1434 /* retrieve pending packets */
1435 n
= avp_fifo_get(rx_q
, (void **)&avp_bufs
, count
);
1436 PMD_RX_LOG(DEBUG
, "Receiving %u packets from Rx queue at %p\n",
1440 for (i
= 0; i
< n
; i
++) {
1441 /* prefetch next entry while processing current one */
1443 pkt_buf
= avp_dev_translate_buffer(avp
,
1445 rte_prefetch0(pkt_buf
);
1449 /* Peek into the first buffer to determine the total length */
1450 pkt_buf
= avp_dev_translate_buffer(avp
, buf
);
1451 buf_len
= pkt_buf
->pkt_len
;
1453 /* Allocate enough mbufs to receive the entire packet */
1454 required
= (buf_len
+ guest_mbuf_size
- 1) / guest_mbuf_size
;
1455 if (rte_pktmbuf_alloc_bulk(avp
->pool
, mbufs
, required
)) {
1456 rxq
->dev_data
->rx_mbuf_alloc_failed
++;
1460 /* Copy the data from the buffers to our mbufs */
1461 m
= avp_dev_copy_from_buffers(avp
, buf
, mbufs
, required
);
1466 if (_avp_mac_filter(avp
, m
) != 0) {
1467 /* silently discard packets not destined to our MAC */
1468 rte_pktmbuf_free(m
);
1472 /* return new mbuf to caller */
1473 rx_pkts
[count
++] = m
;
1474 rxq
->bytes
+= buf_len
;
1477 rxq
->packets
+= count
;
1479 /* return the buffers to the free queue */
1480 avp_fifo_put(free_q
, (void **)&avp_bufs
[0], n
);
1487 avp_recv_pkts(void *rx_queue
,
1488 struct rte_mbuf
**rx_pkts
,
1491 struct avp_queue
*rxq
= (struct avp_queue
*)rx_queue
;
1492 struct rte_avp_desc
*avp_bufs
[AVP_MAX_RX_BURST
];
1493 struct avp_dev
*avp
= rxq
->avp
;
1494 struct rte_avp_desc
*pkt_buf
;
1495 struct rte_avp_fifo
*free_q
;
1496 struct rte_avp_fifo
*rx_q
;
1497 unsigned int count
, avail
, n
;
1498 unsigned int pkt_len
;
1503 if (unlikely(avp
->flags
& AVP_F_DETACHED
)) {
1504 /* VM live migration in progress */
1508 rx_q
= avp
->rx_q
[rxq
->queue_id
];
1509 free_q
= avp
->free_q
[rxq
->queue_id
];
1511 /* setup next queue to service */
1512 rxq
->queue_id
= (rxq
->queue_id
< rxq
->queue_limit
) ?
1513 (rxq
->queue_id
+ 1) : rxq
->queue_base
;
1515 /* determine how many slots are available in the free queue */
1516 count
= avp_fifo_free_count(free_q
);
1518 /* determine how many packets are available in the rx queue */
1519 avail
= avp_fifo_count(rx_q
);
1521 /* determine how many packets can be received */
1522 count
= RTE_MIN(count
, avail
);
1523 count
= RTE_MIN(count
, nb_pkts
);
1524 count
= RTE_MIN(count
, (unsigned int)AVP_MAX_RX_BURST
);
1526 if (unlikely(count
== 0)) {
1527 /* no free buffers, or no buffers on the rx queue */
1531 /* retrieve pending packets */
1532 n
= avp_fifo_get(rx_q
, (void **)&avp_bufs
, count
);
1533 PMD_RX_LOG(DEBUG
, "Receiving %u packets from Rx queue at %p\n",
1537 for (i
= 0; i
< n
; i
++) {
1538 /* prefetch next entry while processing current one */
1540 pkt_buf
= avp_dev_translate_buffer(avp
,
1542 rte_prefetch0(pkt_buf
);
1545 /* Adjust host pointers for guest addressing */
1546 pkt_buf
= avp_dev_translate_buffer(avp
, avp_bufs
[i
]);
1547 pkt_data
= avp_dev_translate_buffer(avp
, pkt_buf
->data
);
1548 pkt_len
= pkt_buf
->pkt_len
;
1550 if (unlikely((pkt_len
> avp
->guest_mbuf_size
) ||
1551 (pkt_buf
->nb_segs
> 1))) {
1553 * application should be using the scattered receive
1560 /* process each packet to be transmitted */
1561 m
= rte_pktmbuf_alloc(avp
->pool
);
1562 if (unlikely(m
== NULL
)) {
1563 rxq
->dev_data
->rx_mbuf_alloc_failed
++;
1567 /* copy data out of the host buffer to our buffer */
1568 m
->data_off
= RTE_PKTMBUF_HEADROOM
;
1569 rte_memcpy(rte_pktmbuf_mtod(m
, void *), pkt_data
, pkt_len
);
1571 /* initialize the local mbuf */
1572 rte_pktmbuf_data_len(m
) = pkt_len
;
1573 rte_pktmbuf_pkt_len(m
) = pkt_len
;
1574 m
->port
= avp
->port_id
;
1576 if (pkt_buf
->ol_flags
& RTE_AVP_RX_VLAN_PKT
) {
1577 m
->ol_flags
= PKT_RX_VLAN
;
1578 m
->vlan_tci
= pkt_buf
->vlan_tci
;
1581 if (_avp_mac_filter(avp
, m
) != 0) {
1582 /* silently discard packets not destined to our MAC */
1583 rte_pktmbuf_free(m
);
1587 /* return new mbuf to caller */
1588 rx_pkts
[count
++] = m
;
1589 rxq
->bytes
+= pkt_len
;
1592 rxq
->packets
+= count
;
1594 /* return the buffers to the free queue */
1595 avp_fifo_put(free_q
, (void **)&avp_bufs
[0], n
);
1601 * Copy a chained mbuf to a set of host buffers. This function assumes that
1602 * there are sufficient destination buffers to contain the entire source
1605 static inline uint16_t
1606 avp_dev_copy_to_buffers(struct avp_dev
*avp
,
1607 struct rte_mbuf
*mbuf
,
1608 struct rte_avp_desc
**buffers
,
1611 struct rte_avp_desc
*previous_buf
= NULL
;
1612 struct rte_avp_desc
*first_buf
= NULL
;
1613 struct rte_avp_desc
*pkt_buf
;
1614 struct rte_avp_desc
*buf
;
1615 size_t total_length
;
1622 __rte_mbuf_sanity_check(mbuf
, 1);
1626 total_length
= rte_pktmbuf_pkt_len(m
);
1627 for (i
= 0; (i
< count
) && (m
!= NULL
); i
++) {
1628 /* fill each destination buffer */
1631 if (i
< count
- 1) {
1632 /* prefetch next entry while processing this one */
1633 pkt_buf
= avp_dev_translate_buffer(avp
, buffers
[i
+ 1]);
1634 rte_prefetch0(pkt_buf
);
1637 /* Adjust pointers for guest addressing */
1638 pkt_buf
= avp_dev_translate_buffer(avp
, buf
);
1639 pkt_data
= avp_dev_translate_buffer(avp
, pkt_buf
->data
);
1641 /* setup the buffer chain */
1642 if (previous_buf
!= NULL
)
1643 previous_buf
->next
= buf
;
1645 first_buf
= pkt_buf
;
1647 previous_buf
= pkt_buf
;
1651 * copy as many source mbuf segments as will fit in the
1652 * destination buffer.
1654 copy_length
= RTE_MIN((avp
->host_mbuf_size
-
1656 (rte_pktmbuf_data_len(m
) -
1658 rte_memcpy(RTE_PTR_ADD(pkt_data
, pkt_buf
->data_len
),
1659 RTE_PTR_ADD(rte_pktmbuf_mtod(m
, void *),
1662 pkt_buf
->data_len
+= copy_length
;
1663 src_offset
+= copy_length
;
1665 if (likely(src_offset
== rte_pktmbuf_data_len(m
))) {
1666 /* need a new source buffer */
1671 if (unlikely(pkt_buf
->data_len
==
1672 avp
->host_mbuf_size
)) {
1673 /* need a new destination buffer */
1677 } while (m
!= NULL
);
1680 first_buf
->nb_segs
= count
;
1681 first_buf
->pkt_len
= total_length
;
1683 if (mbuf
->ol_flags
& PKT_TX_VLAN_PKT
) {
1684 first_buf
->ol_flags
|= RTE_AVP_TX_VLAN_PKT
;
1685 first_buf
->vlan_tci
= mbuf
->vlan_tci
;
1688 avp_dev_buffer_sanity_check(avp
, buffers
[0]);
1690 return total_length
;
1695 avp_xmit_scattered_pkts(void *tx_queue
,
1696 struct rte_mbuf
**tx_pkts
,
1699 struct rte_avp_desc
*avp_bufs
[(AVP_MAX_TX_BURST
*
1700 RTE_AVP_MAX_MBUF_SEGMENTS
)];
1701 struct avp_queue
*txq
= (struct avp_queue
*)tx_queue
;
1702 struct rte_avp_desc
*tx_bufs
[AVP_MAX_TX_BURST
];
1703 struct avp_dev
*avp
= txq
->avp
;
1704 struct rte_avp_fifo
*alloc_q
;
1705 struct rte_avp_fifo
*tx_q
;
1706 unsigned int count
, avail
, n
;
1707 unsigned int orig_nb_pkts
;
1709 unsigned int required
;
1710 unsigned int segments
;
1711 unsigned int tx_bytes
;
1714 orig_nb_pkts
= nb_pkts
;
1715 if (unlikely(avp
->flags
& AVP_F_DETACHED
)) {
1716 /* VM live migration in progress */
1717 /* TODO ... buffer for X packets then drop? */
1718 txq
->errors
+= nb_pkts
;
1722 tx_q
= avp
->tx_q
[txq
->queue_id
];
1723 alloc_q
= avp
->alloc_q
[txq
->queue_id
];
1725 /* limit the number of transmitted packets to the max burst size */
1726 if (unlikely(nb_pkts
> AVP_MAX_TX_BURST
))
1727 nb_pkts
= AVP_MAX_TX_BURST
;
1729 /* determine how many buffers are available to copy into */
1730 avail
= avp_fifo_count(alloc_q
);
1731 if (unlikely(avail
> (AVP_MAX_TX_BURST
*
1732 RTE_AVP_MAX_MBUF_SEGMENTS
)))
1733 avail
= AVP_MAX_TX_BURST
* RTE_AVP_MAX_MBUF_SEGMENTS
;
1735 /* determine how many slots are available in the transmit queue */
1736 count
= avp_fifo_free_count(tx_q
);
1738 /* determine how many packets can be sent */
1739 nb_pkts
= RTE_MIN(count
, nb_pkts
);
1741 /* determine how many packets will fit in the available buffers */
1744 for (i
= 0; i
< nb_pkts
; i
++) {
1746 if (likely(i
< (unsigned int)nb_pkts
- 1)) {
1747 /* prefetch next entry while processing this one */
1748 rte_prefetch0(tx_pkts
[i
+ 1]);
1750 required
= (rte_pktmbuf_pkt_len(m
) + avp
->host_mbuf_size
- 1) /
1751 avp
->host_mbuf_size
;
1753 if (unlikely((required
== 0) ||
1754 (required
> RTE_AVP_MAX_MBUF_SEGMENTS
)))
1756 else if (unlikely(required
+ segments
> avail
))
1758 segments
+= required
;
1763 if (unlikely(nb_pkts
== 0)) {
1764 /* no available buffers, or no space on the tx queue */
1765 txq
->errors
+= orig_nb_pkts
;
1769 PMD_TX_LOG(DEBUG
, "Sending %u packets on Tx queue at %p\n",
1772 /* retrieve sufficient send buffers */
1773 n
= avp_fifo_get(alloc_q
, (void **)&avp_bufs
, segments
);
1774 if (unlikely(n
!= segments
)) {
1775 PMD_TX_LOG(DEBUG
, "Failed to allocate buffers "
1776 "n=%u, segments=%u, orig=%u\n",
1777 n
, segments
, orig_nb_pkts
);
1778 txq
->errors
+= orig_nb_pkts
;
1784 for (i
= 0; i
< nb_pkts
; i
++) {
1785 /* process each packet to be transmitted */
1788 /* determine how many buffers are required for this packet */
1789 required
= (rte_pktmbuf_pkt_len(m
) + avp
->host_mbuf_size
- 1) /
1790 avp
->host_mbuf_size
;
1792 tx_bytes
+= avp_dev_copy_to_buffers(avp
, m
,
1793 &avp_bufs
[count
], required
);
1794 tx_bufs
[i
] = avp_bufs
[count
];
1797 /* free the original mbuf */
1798 rte_pktmbuf_free(m
);
1801 txq
->packets
+= nb_pkts
;
1802 txq
->bytes
+= tx_bytes
;
1804 #ifdef RTE_LIBRTE_AVP_DEBUG_BUFFERS
1805 for (i
= 0; i
< nb_pkts
; i
++)
1806 avp_dev_buffer_sanity_check(avp
, tx_bufs
[i
]);
1809 /* send the packets */
1810 n
= avp_fifo_put(tx_q
, (void **)&tx_bufs
[0], nb_pkts
);
1811 if (unlikely(n
!= orig_nb_pkts
))
1812 txq
->errors
+= (orig_nb_pkts
- n
);
1819 avp_xmit_pkts(void *tx_queue
, struct rte_mbuf
**tx_pkts
, uint16_t nb_pkts
)
1821 struct avp_queue
*txq
= (struct avp_queue
*)tx_queue
;
1822 struct rte_avp_desc
*avp_bufs
[AVP_MAX_TX_BURST
];
1823 struct avp_dev
*avp
= txq
->avp
;
1824 struct rte_avp_desc
*pkt_buf
;
1825 struct rte_avp_fifo
*alloc_q
;
1826 struct rte_avp_fifo
*tx_q
;
1827 unsigned int count
, avail
, n
;
1829 unsigned int pkt_len
;
1830 unsigned int tx_bytes
;
1834 if (unlikely(avp
->flags
& AVP_F_DETACHED
)) {
1835 /* VM live migration in progress */
1836 /* TODO ... buffer for X packets then drop?! */
1841 tx_q
= avp
->tx_q
[txq
->queue_id
];
1842 alloc_q
= avp
->alloc_q
[txq
->queue_id
];
1844 /* limit the number of transmitted packets to the max burst size */
1845 if (unlikely(nb_pkts
> AVP_MAX_TX_BURST
))
1846 nb_pkts
= AVP_MAX_TX_BURST
;
1848 /* determine how many buffers are available to copy into */
1849 avail
= avp_fifo_count(alloc_q
);
1851 /* determine how many slots are available in the transmit queue */
1852 count
= avp_fifo_free_count(tx_q
);
1854 /* determine how many packets can be sent */
1855 count
= RTE_MIN(count
, avail
);
1856 count
= RTE_MIN(count
, nb_pkts
);
1858 if (unlikely(count
== 0)) {
1859 /* no available buffers, or no space on the tx queue */
1860 txq
->errors
+= nb_pkts
;
1864 PMD_TX_LOG(DEBUG
, "Sending %u packets on Tx queue at %p\n",
1867 /* retrieve sufficient send buffers */
1868 n
= avp_fifo_get(alloc_q
, (void **)&avp_bufs
, count
);
1869 if (unlikely(n
!= count
)) {
1875 for (i
= 0; i
< count
; i
++) {
1876 /* prefetch next entry while processing the current one */
1877 if (i
< count
- 1) {
1878 pkt_buf
= avp_dev_translate_buffer(avp
,
1880 rte_prefetch0(pkt_buf
);
1883 /* process each packet to be transmitted */
1886 /* Adjust pointers for guest addressing */
1887 pkt_buf
= avp_dev_translate_buffer(avp
, avp_bufs
[i
]);
1888 pkt_data
= avp_dev_translate_buffer(avp
, pkt_buf
->data
);
1889 pkt_len
= rte_pktmbuf_pkt_len(m
);
1891 if (unlikely((pkt_len
> avp
->guest_mbuf_size
) ||
1892 (pkt_len
> avp
->host_mbuf_size
))) {
1894 * application should be using the scattered transmit
1895 * function; send it truncated to avoid the performance
1896 * hit of having to manage returning the already
1897 * allocated buffer to the free list. This should not
1898 * happen since the application should have set the
1899 * max_rx_pkt_len based on its MTU and it should be
1900 * policing its own packet sizes.
1903 pkt_len
= RTE_MIN(avp
->guest_mbuf_size
,
1904 avp
->host_mbuf_size
);
1907 /* copy data out of our mbuf and into the AVP buffer */
1908 rte_memcpy(pkt_data
, rte_pktmbuf_mtod(m
, void *), pkt_len
);
1909 pkt_buf
->pkt_len
= pkt_len
;
1910 pkt_buf
->data_len
= pkt_len
;
1911 pkt_buf
->nb_segs
= 1;
1912 pkt_buf
->next
= NULL
;
1914 if (m
->ol_flags
& PKT_TX_VLAN_PKT
) {
1915 pkt_buf
->ol_flags
|= RTE_AVP_TX_VLAN_PKT
;
1916 pkt_buf
->vlan_tci
= m
->vlan_tci
;
1919 tx_bytes
+= pkt_len
;
1921 /* free the original mbuf */
1922 rte_pktmbuf_free(m
);
1925 txq
->packets
+= count
;
1926 txq
->bytes
+= tx_bytes
;
1928 /* send the packets */
1929 n
= avp_fifo_put(tx_q
, (void **)&avp_bufs
[0], count
);
1935 avp_dev_rx_queue_release(void *rx_queue
)
1937 struct avp_queue
*rxq
= (struct avp_queue
*)rx_queue
;
1938 struct avp_dev
*avp
= rxq
->avp
;
1939 struct rte_eth_dev_data
*data
= avp
->dev_data
;
1942 for (i
= 0; i
< avp
->num_rx_queues
; i
++) {
1943 if (data
->rx_queues
[i
] == rxq
)
1944 data
->rx_queues
[i
] = NULL
;
1949 avp_dev_tx_queue_release(void *tx_queue
)
1951 struct avp_queue
*txq
= (struct avp_queue
*)tx_queue
;
1952 struct avp_dev
*avp
= txq
->avp
;
1953 struct rte_eth_dev_data
*data
= avp
->dev_data
;
1956 for (i
= 0; i
< avp
->num_tx_queues
; i
++) {
1957 if (data
->tx_queues
[i
] == txq
)
1958 data
->tx_queues
[i
] = NULL
;
1963 avp_dev_configure(struct rte_eth_dev
*eth_dev
)
1965 struct rte_pci_device
*pci_dev
= RTE_ETH_DEV_TO_PCI(eth_dev
);
1966 struct avp_dev
*avp
= AVP_DEV_PRIVATE_TO_HW(eth_dev
->data
->dev_private
);
1967 struct rte_avp_device_info
*host_info
;
1968 struct rte_avp_device_config config
;
1973 rte_spinlock_lock(&avp
->lock
);
1974 if (avp
->flags
& AVP_F_DETACHED
) {
1975 PMD_DRV_LOG(ERR
, "Operation not supported during VM live migration\n");
1980 addr
= pci_dev
->mem_resource
[RTE_AVP_PCI_DEVICE_BAR
].addr
;
1981 host_info
= (struct rte_avp_device_info
*)addr
;
1983 /* Setup required number of queues */
1984 _avp_set_queue_counts(eth_dev
);
1986 mask
= (ETH_VLAN_STRIP_MASK
|
1987 ETH_VLAN_FILTER_MASK
|
1988 ETH_VLAN_EXTEND_MASK
);
1989 ret
= avp_vlan_offload_set(eth_dev
, mask
);
1991 PMD_DRV_LOG(ERR
, "VLAN offload set failed by host, ret=%d\n",
1996 /* update device config */
1997 memset(&config
, 0, sizeof(config
));
1998 config
.device_id
= host_info
->device_id
;
1999 config
.driver_type
= RTE_AVP_DRIVER_TYPE_DPDK
;
2000 config
.driver_version
= AVP_DPDK_DRIVER_VERSION
;
2001 config
.features
= avp
->features
;
2002 config
.num_tx_queues
= avp
->num_tx_queues
;
2003 config
.num_rx_queues
= avp
->num_rx_queues
;
2005 ret
= avp_dev_ctrl_set_config(eth_dev
, &config
);
2007 PMD_DRV_LOG(ERR
, "Config request failed by host, ret=%d\n",
2012 avp
->flags
|= AVP_F_CONFIGURED
;
2016 rte_spinlock_unlock(&avp
->lock
);
2021 avp_dev_start(struct rte_eth_dev
*eth_dev
)
2023 struct avp_dev
*avp
= AVP_DEV_PRIVATE_TO_HW(eth_dev
->data
->dev_private
);
2026 rte_spinlock_lock(&avp
->lock
);
2027 if (avp
->flags
& AVP_F_DETACHED
) {
2028 PMD_DRV_LOG(ERR
, "Operation not supported during VM live migration\n");
2033 /* update link state */
2034 ret
= avp_dev_ctrl_set_link_state(eth_dev
, 1);
2036 PMD_DRV_LOG(ERR
, "Link state change failed by host, ret=%d\n",
2041 /* remember current link state */
2042 avp
->flags
|= AVP_F_LINKUP
;
2047 rte_spinlock_unlock(&avp
->lock
);
2052 avp_dev_stop(struct rte_eth_dev
*eth_dev
)
2054 struct avp_dev
*avp
= AVP_DEV_PRIVATE_TO_HW(eth_dev
->data
->dev_private
);
2057 rte_spinlock_lock(&avp
->lock
);
2058 if (avp
->flags
& AVP_F_DETACHED
) {
2059 PMD_DRV_LOG(ERR
, "Operation not supported during VM live migration\n");
2063 /* remember current link state */
2064 avp
->flags
&= ~AVP_F_LINKUP
;
2066 /* update link state */
2067 ret
= avp_dev_ctrl_set_link_state(eth_dev
, 0);
2069 PMD_DRV_LOG(ERR
, "Link state change failed by host, ret=%d\n",
2074 rte_spinlock_unlock(&avp
->lock
);
2078 avp_dev_close(struct rte_eth_dev
*eth_dev
)
2080 struct avp_dev
*avp
= AVP_DEV_PRIVATE_TO_HW(eth_dev
->data
->dev_private
);
2083 rte_spinlock_lock(&avp
->lock
);
2084 if (avp
->flags
& AVP_F_DETACHED
) {
2085 PMD_DRV_LOG(ERR
, "Operation not supported during VM live migration\n");
2089 /* remember current link state */
2090 avp
->flags
&= ~AVP_F_LINKUP
;
2091 avp
->flags
&= ~AVP_F_CONFIGURED
;
2093 ret
= avp_dev_disable_interrupts(eth_dev
);
2095 PMD_DRV_LOG(ERR
, "Failed to disable interrupts\n");
2099 /* update device state */
2100 ret
= avp_dev_ctrl_shutdown(eth_dev
);
2102 PMD_DRV_LOG(ERR
, "Device shutdown failed by host, ret=%d\n",
2108 rte_spinlock_unlock(&avp
->lock
);
2112 avp_dev_link_update(struct rte_eth_dev
*eth_dev
,
2113 __rte_unused
int wait_to_complete
)
2115 struct avp_dev
*avp
= AVP_DEV_PRIVATE_TO_HW(eth_dev
->data
->dev_private
);
2116 struct rte_eth_link
*link
= ð_dev
->data
->dev_link
;
2118 link
->link_speed
= ETH_SPEED_NUM_10G
;
2119 link
->link_duplex
= ETH_LINK_FULL_DUPLEX
;
2120 link
->link_status
= !!(avp
->flags
& AVP_F_LINKUP
);
2126 avp_dev_promiscuous_enable(struct rte_eth_dev
*eth_dev
)
2128 struct avp_dev
*avp
= AVP_DEV_PRIVATE_TO_HW(eth_dev
->data
->dev_private
);
2130 rte_spinlock_lock(&avp
->lock
);
2131 if ((avp
->flags
& AVP_F_PROMISC
) == 0) {
2132 avp
->flags
|= AVP_F_PROMISC
;
2133 PMD_DRV_LOG(DEBUG
, "Promiscuous mode enabled on %u\n",
2134 eth_dev
->data
->port_id
);
2136 rte_spinlock_unlock(&avp
->lock
);
2140 avp_dev_promiscuous_disable(struct rte_eth_dev
*eth_dev
)
2142 struct avp_dev
*avp
= AVP_DEV_PRIVATE_TO_HW(eth_dev
->data
->dev_private
);
2144 rte_spinlock_lock(&avp
->lock
);
2145 if ((avp
->flags
& AVP_F_PROMISC
) != 0) {
2146 avp
->flags
&= ~AVP_F_PROMISC
;
2147 PMD_DRV_LOG(DEBUG
, "Promiscuous mode disabled on %u\n",
2148 eth_dev
->data
->port_id
);
2150 rte_spinlock_unlock(&avp
->lock
);
2154 avp_dev_info_get(struct rte_eth_dev
*eth_dev
,
2155 struct rte_eth_dev_info
*dev_info
)
2157 struct avp_dev
*avp
= AVP_DEV_PRIVATE_TO_HW(eth_dev
->data
->dev_private
);
2159 dev_info
->max_rx_queues
= avp
->max_rx_queues
;
2160 dev_info
->max_tx_queues
= avp
->max_tx_queues
;
2161 dev_info
->min_rx_bufsize
= AVP_MIN_RX_BUFSIZE
;
2162 dev_info
->max_rx_pktlen
= avp
->max_rx_pkt_len
;
2163 dev_info
->max_mac_addrs
= AVP_MAX_MAC_ADDRS
;
2164 if (avp
->host_features
& RTE_AVP_FEATURE_VLAN_OFFLOAD
) {
2165 dev_info
->rx_offload_capa
= DEV_RX_OFFLOAD_VLAN_STRIP
;
2166 dev_info
->tx_offload_capa
= DEV_TX_OFFLOAD_VLAN_INSERT
;
2171 avp_vlan_offload_set(struct rte_eth_dev
*eth_dev
, int mask
)
2173 struct avp_dev
*avp
= AVP_DEV_PRIVATE_TO_HW(eth_dev
->data
->dev_private
);
2174 struct rte_eth_conf
*dev_conf
= ð_dev
->data
->dev_conf
;
2175 uint64_t offloads
= dev_conf
->rxmode
.offloads
;
2177 if (mask
& ETH_VLAN_STRIP_MASK
) {
2178 if (avp
->host_features
& RTE_AVP_FEATURE_VLAN_OFFLOAD
) {
2179 if (offloads
& DEV_RX_OFFLOAD_VLAN_STRIP
)
2180 avp
->features
|= RTE_AVP_FEATURE_VLAN_OFFLOAD
;
2182 avp
->features
&= ~RTE_AVP_FEATURE_VLAN_OFFLOAD
;
2184 PMD_DRV_LOG(ERR
, "VLAN strip offload not supported\n");
2188 if (mask
& ETH_VLAN_FILTER_MASK
) {
2189 if (offloads
& DEV_RX_OFFLOAD_VLAN_FILTER
)
2190 PMD_DRV_LOG(ERR
, "VLAN filter offload not supported\n");
2193 if (mask
& ETH_VLAN_EXTEND_MASK
) {
2194 if (offloads
& DEV_RX_OFFLOAD_VLAN_EXTEND
)
2195 PMD_DRV_LOG(ERR
, "VLAN extend offload not supported\n");
2202 avp_dev_stats_get(struct rte_eth_dev
*eth_dev
, struct rte_eth_stats
*stats
)
2204 struct avp_dev
*avp
= AVP_DEV_PRIVATE_TO_HW(eth_dev
->data
->dev_private
);
2207 for (i
= 0; i
< avp
->num_rx_queues
; i
++) {
2208 struct avp_queue
*rxq
= avp
->dev_data
->rx_queues
[i
];
2211 stats
->ipackets
+= rxq
->packets
;
2212 stats
->ibytes
+= rxq
->bytes
;
2213 stats
->ierrors
+= rxq
->errors
;
2215 stats
->q_ipackets
[i
] += rxq
->packets
;
2216 stats
->q_ibytes
[i
] += rxq
->bytes
;
2217 stats
->q_errors
[i
] += rxq
->errors
;
2221 for (i
= 0; i
< avp
->num_tx_queues
; i
++) {
2222 struct avp_queue
*txq
= avp
->dev_data
->tx_queues
[i
];
2225 stats
->opackets
+= txq
->packets
;
2226 stats
->obytes
+= txq
->bytes
;
2227 stats
->oerrors
+= txq
->errors
;
2229 stats
->q_opackets
[i
] += txq
->packets
;
2230 stats
->q_obytes
[i
] += txq
->bytes
;
2231 stats
->q_errors
[i
] += txq
->errors
;
2239 avp_dev_stats_reset(struct rte_eth_dev
*eth_dev
)
2241 struct avp_dev
*avp
= AVP_DEV_PRIVATE_TO_HW(eth_dev
->data
->dev_private
);
2244 for (i
= 0; i
< avp
->num_rx_queues
; i
++) {
2245 struct avp_queue
*rxq
= avp
->dev_data
->rx_queues
[i
];
2254 for (i
= 0; i
< avp
->num_tx_queues
; i
++) {
2255 struct avp_queue
*txq
= avp
->dev_data
->tx_queues
[i
];
2265 RTE_PMD_REGISTER_PCI(net_avp
, rte_avp_pmd
);
2266 RTE_PMD_REGISTER_PCI_TABLE(net_avp
, pci_id_avp_map
);
2268 RTE_INIT(avp_init_log
)
2270 avp_logtype_driver
= rte_log_register("pmd.net.avp.driver");
2271 if (avp_logtype_driver
>= 0)
2272 rte_log_set_level(avp_logtype_driver
, RTE_LOG_NOTICE
);