]>
Commit | Line | Data |
---|---|---|
9f95a23c TL |
1 | /* SPDX-License-Identifier: BSD-3-Clause |
2 | * Copyright(c) 2013-2017 Wind River Systems, Inc. | |
11fdf7f2 TL |
3 | */ |
4 | ||
5 | #include <stdint.h> | |
6 | #include <string.h> | |
7 | #include <stdio.h> | |
8 | #include <errno.h> | |
9 | #include <unistd.h> | |
10 | ||
9f95a23c | 11 | #include <rte_ethdev_driver.h> |
11fdf7f2 TL |
12 | #include <rte_ethdev_pci.h> |
13 | #include <rte_memcpy.h> | |
14 | #include <rte_string_fns.h> | |
11fdf7f2 TL |
15 | #include <rte_malloc.h> |
16 | #include <rte_atomic.h> | |
17 | #include <rte_branch_prediction.h> | |
18 | #include <rte_pci.h> | |
9f95a23c | 19 | #include <rte_bus_pci.h> |
11fdf7f2 TL |
20 | #include <rte_ether.h> |
21 | #include <rte_common.h> | |
22 | #include <rte_cycles.h> | |
23 | #include <rte_spinlock.h> | |
24 | #include <rte_byteorder.h> | |
25 | #include <rte_dev.h> | |
26 | #include <rte_memory.h> | |
27 | #include <rte_eal.h> | |
28 | #include <rte_io.h> | |
29 | ||
30 | #include "rte_avp_common.h" | |
31 | #include "rte_avp_fifo.h" | |
32 | ||
33 | #include "avp_logs.h" | |
34 | ||
9f95a23c | 35 | int avp_logtype_driver; |
11fdf7f2 TL |
36 | |
37 | static int avp_dev_create(struct rte_pci_device *pci_dev, | |
38 | struct rte_eth_dev *eth_dev); | |
39 | ||
40 | static int avp_dev_configure(struct rte_eth_dev *dev); | |
41 | static int avp_dev_start(struct rte_eth_dev *dev); | |
42 | static void avp_dev_stop(struct rte_eth_dev *dev); | |
43 | static void avp_dev_close(struct rte_eth_dev *dev); | |
44 | static void avp_dev_info_get(struct rte_eth_dev *dev, | |
45 | struct rte_eth_dev_info *dev_info); | |
9f95a23c TL |
46 | static int avp_vlan_offload_set(struct rte_eth_dev *dev, int mask); |
47 | static int avp_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete); | |
11fdf7f2 TL |
48 | static void avp_dev_promiscuous_enable(struct rte_eth_dev *dev); |
49 | static void avp_dev_promiscuous_disable(struct rte_eth_dev *dev); | |
50 | ||
51 | static int avp_dev_rx_queue_setup(struct rte_eth_dev *dev, | |
52 | uint16_t rx_queue_id, | |
53 | uint16_t nb_rx_desc, | |
54 | unsigned int socket_id, | |
55 | const struct rte_eth_rxconf *rx_conf, | |
56 | struct rte_mempool *pool); | |
57 | ||
58 | static int avp_dev_tx_queue_setup(struct rte_eth_dev *dev, | |
59 | uint16_t tx_queue_id, | |
60 | uint16_t nb_tx_desc, | |
61 | unsigned int socket_id, | |
62 | const struct rte_eth_txconf *tx_conf); | |
63 | ||
64 | static uint16_t avp_recv_scattered_pkts(void *rx_queue, | |
65 | struct rte_mbuf **rx_pkts, | |
66 | uint16_t nb_pkts); | |
67 | ||
68 | static uint16_t avp_recv_pkts(void *rx_queue, | |
69 | struct rte_mbuf **rx_pkts, | |
70 | uint16_t nb_pkts); | |
71 | ||
72 | static uint16_t avp_xmit_scattered_pkts(void *tx_queue, | |
73 | struct rte_mbuf **tx_pkts, | |
74 | uint16_t nb_pkts); | |
75 | ||
76 | static uint16_t avp_xmit_pkts(void *tx_queue, | |
77 | struct rte_mbuf **tx_pkts, | |
78 | uint16_t nb_pkts); | |
79 | ||
80 | static void avp_dev_rx_queue_release(void *rxq); | |
81 | static void avp_dev_tx_queue_release(void *txq); | |
82 | ||
9f95a23c | 83 | static int avp_dev_stats_get(struct rte_eth_dev *dev, |
11fdf7f2 TL |
84 | struct rte_eth_stats *stats); |
85 | static void avp_dev_stats_reset(struct rte_eth_dev *dev); | |
86 | ||
87 | ||
11fdf7f2 TL |
88 | #define AVP_MAX_RX_BURST 64 |
89 | #define AVP_MAX_TX_BURST 64 | |
90 | #define AVP_MAX_MAC_ADDRS 1 | |
91 | #define AVP_MIN_RX_BUFSIZE ETHER_MIN_LEN | |
92 | ||
93 | ||
94 | /* | |
95 | * Defines the number of microseconds to wait before checking the response | |
96 | * queue for completion. | |
97 | */ | |
98 | #define AVP_REQUEST_DELAY_USECS (5000) | |
99 | ||
100 | /* | |
101 | * Defines the number times to check the response queue for completion before | |
102 | * declaring a timeout. | |
103 | */ | |
104 | #define AVP_MAX_REQUEST_RETRY (100) | |
105 | ||
106 | /* Defines the current PCI driver version number */ | |
107 | #define AVP_DPDK_DRIVER_VERSION RTE_AVP_CURRENT_GUEST_VERSION | |
108 | ||
109 | /* | |
110 | * The set of PCI devices this driver supports | |
111 | */ | |
112 | static const struct rte_pci_id pci_id_avp_map[] = { | |
113 | { .vendor_id = RTE_AVP_PCI_VENDOR_ID, | |
114 | .device_id = RTE_AVP_PCI_DEVICE_ID, | |
115 | .subsystem_vendor_id = RTE_AVP_PCI_SUB_VENDOR_ID, | |
116 | .subsystem_device_id = RTE_AVP_PCI_SUB_DEVICE_ID, | |
117 | .class_id = RTE_CLASS_ANY_ID, | |
118 | }, | |
119 | ||
120 | { .vendor_id = 0, /* sentinel */ | |
121 | }, | |
122 | }; | |
123 | ||
124 | /* | |
125 | * dev_ops for avp, bare necessities for basic operation | |
126 | */ | |
127 | static const struct eth_dev_ops avp_eth_dev_ops = { | |
128 | .dev_configure = avp_dev_configure, | |
129 | .dev_start = avp_dev_start, | |
130 | .dev_stop = avp_dev_stop, | |
131 | .dev_close = avp_dev_close, | |
132 | .dev_infos_get = avp_dev_info_get, | |
133 | .vlan_offload_set = avp_vlan_offload_set, | |
134 | .stats_get = avp_dev_stats_get, | |
135 | .stats_reset = avp_dev_stats_reset, | |
136 | .link_update = avp_dev_link_update, | |
137 | .promiscuous_enable = avp_dev_promiscuous_enable, | |
138 | .promiscuous_disable = avp_dev_promiscuous_disable, | |
139 | .rx_queue_setup = avp_dev_rx_queue_setup, | |
140 | .rx_queue_release = avp_dev_rx_queue_release, | |
141 | .tx_queue_setup = avp_dev_tx_queue_setup, | |
142 | .tx_queue_release = avp_dev_tx_queue_release, | |
143 | }; | |
144 | ||
145 | /**@{ AVP device flags */ | |
146 | #define AVP_F_PROMISC (1 << 1) | |
147 | #define AVP_F_CONFIGURED (1 << 2) | |
148 | #define AVP_F_LINKUP (1 << 3) | |
149 | #define AVP_F_DETACHED (1 << 4) | |
150 | /**@} */ | |
151 | ||
152 | /* Ethernet device validation marker */ | |
153 | #define AVP_ETHDEV_MAGIC 0x92972862 | |
154 | ||
155 | /* | |
156 | * Defines the AVP device attributes which are attached to an RTE ethernet | |
157 | * device | |
158 | */ | |
159 | struct avp_dev { | |
160 | uint32_t magic; /**< Memory validation marker */ | |
161 | uint64_t device_id; /**< Unique system identifier */ | |
162 | struct ether_addr ethaddr; /**< Host specified MAC address */ | |
163 | struct rte_eth_dev_data *dev_data; | |
164 | /**< Back pointer to ethernet device data */ | |
165 | volatile uint32_t flags; /**< Device operational flags */ | |
9f95a23c | 166 | uint16_t port_id; /**< Ethernet port identifier */ |
11fdf7f2 TL |
167 | struct rte_mempool *pool; /**< pkt mbuf mempool */ |
168 | unsigned int guest_mbuf_size; /**< local pool mbuf size */ | |
169 | unsigned int host_mbuf_size; /**< host mbuf size */ | |
170 | unsigned int max_rx_pkt_len; /**< maximum receive unit */ | |
171 | uint32_t host_features; /**< Supported feature bitmap */ | |
172 | uint32_t features; /**< Enabled feature bitmap */ | |
173 | unsigned int num_tx_queues; /**< Negotiated number of transmit queues */ | |
174 | unsigned int max_tx_queues; /**< Maximum number of transmit queues */ | |
175 | unsigned int num_rx_queues; /**< Negotiated number of receive queues */ | |
176 | unsigned int max_rx_queues; /**< Maximum number of receive queues */ | |
177 | ||
178 | struct rte_avp_fifo *tx_q[RTE_AVP_MAX_QUEUES]; /**< TX queue */ | |
179 | struct rte_avp_fifo *rx_q[RTE_AVP_MAX_QUEUES]; /**< RX queue */ | |
180 | struct rte_avp_fifo *alloc_q[RTE_AVP_MAX_QUEUES]; | |
181 | /**< Allocated mbufs queue */ | |
182 | struct rte_avp_fifo *free_q[RTE_AVP_MAX_QUEUES]; | |
183 | /**< To be freed mbufs queue */ | |
184 | ||
185 | /* mutual exclusion over the 'flag' and 'resp_q/req_q' fields */ | |
186 | rte_spinlock_t lock; | |
187 | ||
188 | /* For request & response */ | |
189 | struct rte_avp_fifo *req_q; /**< Request queue */ | |
190 | struct rte_avp_fifo *resp_q; /**< Response queue */ | |
191 | void *host_sync_addr; /**< (host) Req/Resp Mem address */ | |
192 | void *sync_addr; /**< Req/Resp Mem address */ | |
193 | void *host_mbuf_addr; /**< (host) MBUF pool start address */ | |
194 | void *mbuf_addr; /**< MBUF pool start address */ | |
195 | } __rte_cache_aligned; | |
196 | ||
197 | /* RTE ethernet private data */ | |
198 | struct avp_adapter { | |
199 | struct avp_dev avp; | |
200 | } __rte_cache_aligned; | |
201 | ||
202 | ||
203 | /* 32-bit MMIO register write */ | |
204 | #define AVP_WRITE32(_value, _addr) rte_write32_relaxed((_value), (_addr)) | |
205 | ||
206 | /* 32-bit MMIO register read */ | |
207 | #define AVP_READ32(_addr) rte_read32_relaxed((_addr)) | |
208 | ||
209 | /* Macro to cast the ethernet device private data to a AVP object */ | |
210 | #define AVP_DEV_PRIVATE_TO_HW(adapter) \ | |
211 | (&((struct avp_adapter *)adapter)->avp) | |
212 | ||
213 | /* | |
214 | * Defines the structure of a AVP device queue for the purpose of handling the | |
215 | * receive and transmit burst callback functions | |
216 | */ | |
217 | struct avp_queue { | |
218 | struct rte_eth_dev_data *dev_data; | |
219 | /**< Backpointer to ethernet device data */ | |
220 | struct avp_dev *avp; /**< Backpointer to AVP device */ | |
221 | uint16_t queue_id; | |
222 | /**< Queue identifier used for indexing current queue */ | |
223 | uint16_t queue_base; | |
224 | /**< Base queue identifier for queue servicing */ | |
225 | uint16_t queue_limit; | |
226 | /**< Maximum queue identifier for queue servicing */ | |
227 | ||
228 | uint64_t packets; | |
229 | uint64_t bytes; | |
230 | uint64_t errors; | |
231 | }; | |
232 | ||
233 | /* send a request and wait for a response | |
234 | * | |
235 | * @warning must be called while holding the avp->lock spinlock. | |
236 | */ | |
237 | static int | |
238 | avp_dev_process_request(struct avp_dev *avp, struct rte_avp_request *request) | |
239 | { | |
240 | unsigned int retry = AVP_MAX_REQUEST_RETRY; | |
241 | void *resp_addr = NULL; | |
242 | unsigned int count; | |
243 | int ret; | |
244 | ||
245 | PMD_DRV_LOG(DEBUG, "Sending request %u to host\n", request->req_id); | |
246 | ||
247 | request->result = -ENOTSUP; | |
248 | ||
249 | /* Discard any stale responses before starting a new request */ | |
250 | while (avp_fifo_get(avp->resp_q, (void **)&resp_addr, 1)) | |
251 | PMD_DRV_LOG(DEBUG, "Discarding stale response\n"); | |
252 | ||
253 | rte_memcpy(avp->sync_addr, request, sizeof(*request)); | |
254 | count = avp_fifo_put(avp->req_q, &avp->host_sync_addr, 1); | |
255 | if (count < 1) { | |
256 | PMD_DRV_LOG(ERR, "Cannot send request %u to host\n", | |
257 | request->req_id); | |
258 | ret = -EBUSY; | |
259 | goto done; | |
260 | } | |
261 | ||
262 | while (retry--) { | |
263 | /* wait for a response */ | |
264 | usleep(AVP_REQUEST_DELAY_USECS); | |
265 | ||
266 | count = avp_fifo_count(avp->resp_q); | |
267 | if (count >= 1) { | |
268 | /* response received */ | |
269 | break; | |
270 | } | |
271 | ||
272 | if ((count < 1) && (retry == 0)) { | |
273 | PMD_DRV_LOG(ERR, "Timeout while waiting for a response for %u\n", | |
274 | request->req_id); | |
275 | ret = -ETIME; | |
276 | goto done; | |
277 | } | |
278 | } | |
279 | ||
280 | /* retrieve the response */ | |
281 | count = avp_fifo_get(avp->resp_q, (void **)&resp_addr, 1); | |
282 | if ((count != 1) || (resp_addr != avp->host_sync_addr)) { | |
283 | PMD_DRV_LOG(ERR, "Invalid response from host, count=%u resp=%p host_sync_addr=%p\n", | |
284 | count, resp_addr, avp->host_sync_addr); | |
285 | ret = -ENODATA; | |
286 | goto done; | |
287 | } | |
288 | ||
289 | /* copy to user buffer */ | |
290 | rte_memcpy(request, avp->sync_addr, sizeof(*request)); | |
291 | ret = 0; | |
292 | ||
293 | PMD_DRV_LOG(DEBUG, "Result %d received for request %u\n", | |
294 | request->result, request->req_id); | |
295 | ||
296 | done: | |
297 | return ret; | |
298 | } | |
299 | ||
300 | static int | |
301 | avp_dev_ctrl_set_link_state(struct rte_eth_dev *eth_dev, unsigned int state) | |
302 | { | |
303 | struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); | |
304 | struct rte_avp_request request; | |
305 | int ret; | |
306 | ||
307 | /* setup a link state change request */ | |
308 | memset(&request, 0, sizeof(request)); | |
309 | request.req_id = RTE_AVP_REQ_CFG_NETWORK_IF; | |
310 | request.if_up = state; | |
311 | ||
312 | ret = avp_dev_process_request(avp, &request); | |
313 | ||
314 | return ret == 0 ? request.result : ret; | |
315 | } | |
316 | ||
317 | static int | |
318 | avp_dev_ctrl_set_config(struct rte_eth_dev *eth_dev, | |
319 | struct rte_avp_device_config *config) | |
320 | { | |
321 | struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); | |
322 | struct rte_avp_request request; | |
323 | int ret; | |
324 | ||
325 | /* setup a configure request */ | |
326 | memset(&request, 0, sizeof(request)); | |
327 | request.req_id = RTE_AVP_REQ_CFG_DEVICE; | |
328 | memcpy(&request.config, config, sizeof(request.config)); | |
329 | ||
330 | ret = avp_dev_process_request(avp, &request); | |
331 | ||
332 | return ret == 0 ? request.result : ret; | |
333 | } | |
334 | ||
335 | static int | |
336 | avp_dev_ctrl_shutdown(struct rte_eth_dev *eth_dev) | |
337 | { | |
338 | struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); | |
339 | struct rte_avp_request request; | |
340 | int ret; | |
341 | ||
342 | /* setup a shutdown request */ | |
343 | memset(&request, 0, sizeof(request)); | |
344 | request.req_id = RTE_AVP_REQ_SHUTDOWN_DEVICE; | |
345 | ||
346 | ret = avp_dev_process_request(avp, &request); | |
347 | ||
348 | return ret == 0 ? request.result : ret; | |
349 | } | |
350 | ||
351 | /* translate from host mbuf virtual address to guest virtual address */ | |
352 | static inline void * | |
353 | avp_dev_translate_buffer(struct avp_dev *avp, void *host_mbuf_address) | |
354 | { | |
355 | return RTE_PTR_ADD(RTE_PTR_SUB(host_mbuf_address, | |
356 | (uintptr_t)avp->host_mbuf_addr), | |
357 | (uintptr_t)avp->mbuf_addr); | |
358 | } | |
359 | ||
360 | /* translate from host physical address to guest virtual address */ | |
361 | static void * | |
362 | avp_dev_translate_address(struct rte_eth_dev *eth_dev, | |
9f95a23c | 363 | rte_iova_t host_phys_addr) |
11fdf7f2 | 364 | { |
9f95a23c | 365 | struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); |
11fdf7f2 TL |
366 | struct rte_mem_resource *resource; |
367 | struct rte_avp_memmap_info *info; | |
368 | struct rte_avp_memmap *map; | |
369 | off_t offset; | |
370 | void *addr; | |
371 | unsigned int i; | |
372 | ||
373 | addr = pci_dev->mem_resource[RTE_AVP_PCI_MEMORY_BAR].addr; | |
374 | resource = &pci_dev->mem_resource[RTE_AVP_PCI_MEMMAP_BAR]; | |
375 | info = (struct rte_avp_memmap_info *)resource->addr; | |
376 | ||
377 | offset = 0; | |
378 | for (i = 0; i < info->nb_maps; i++) { | |
379 | /* search all segments looking for a matching address */ | |
380 | map = &info->maps[i]; | |
381 | ||
382 | if ((host_phys_addr >= map->phys_addr) && | |
383 | (host_phys_addr < (map->phys_addr + map->length))) { | |
384 | /* address is within this segment */ | |
385 | offset += (host_phys_addr - map->phys_addr); | |
9f95a23c | 386 | addr = RTE_PTR_ADD(addr, (uintptr_t)offset); |
11fdf7f2 TL |
387 | |
388 | PMD_DRV_LOG(DEBUG, "Translating host physical 0x%" PRIx64 " to guest virtual 0x%p\n", | |
389 | host_phys_addr, addr); | |
390 | ||
391 | return addr; | |
392 | } | |
393 | offset += map->length; | |
394 | } | |
395 | ||
396 | return NULL; | |
397 | } | |
398 | ||
399 | /* verify that the incoming device version is compatible with our version */ | |
400 | static int | |
401 | avp_dev_version_check(uint32_t version) | |
402 | { | |
403 | uint32_t driver = RTE_AVP_STRIP_MINOR_VERSION(AVP_DPDK_DRIVER_VERSION); | |
404 | uint32_t device = RTE_AVP_STRIP_MINOR_VERSION(version); | |
405 | ||
406 | if (device <= driver) { | |
407 | /* the host driver version is less than or equal to ours */ | |
408 | return 0; | |
409 | } | |
410 | ||
411 | return 1; | |
412 | } | |
413 | ||
414 | /* verify that memory regions have expected version and validation markers */ | |
415 | static int | |
416 | avp_dev_check_regions(struct rte_eth_dev *eth_dev) | |
417 | { | |
9f95a23c | 418 | struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); |
11fdf7f2 TL |
419 | struct rte_avp_memmap_info *memmap; |
420 | struct rte_avp_device_info *info; | |
421 | struct rte_mem_resource *resource; | |
422 | unsigned int i; | |
423 | ||
424 | /* Dump resource info for debug */ | |
425 | for (i = 0; i < PCI_MAX_RESOURCE; i++) { | |
426 | resource = &pci_dev->mem_resource[i]; | |
427 | if ((resource->phys_addr == 0) || (resource->len == 0)) | |
428 | continue; | |
429 | ||
430 | PMD_DRV_LOG(DEBUG, "resource[%u]: phys=0x%" PRIx64 " len=%" PRIu64 " addr=%p\n", | |
431 | i, resource->phys_addr, | |
432 | resource->len, resource->addr); | |
433 | ||
434 | switch (i) { | |
435 | case RTE_AVP_PCI_MEMMAP_BAR: | |
436 | memmap = (struct rte_avp_memmap_info *)resource->addr; | |
437 | if ((memmap->magic != RTE_AVP_MEMMAP_MAGIC) || | |
438 | (memmap->version != RTE_AVP_MEMMAP_VERSION)) { | |
439 | PMD_DRV_LOG(ERR, "Invalid memmap magic 0x%08x and version %u\n", | |
440 | memmap->magic, memmap->version); | |
441 | return -EINVAL; | |
442 | } | |
443 | break; | |
444 | ||
445 | case RTE_AVP_PCI_DEVICE_BAR: | |
446 | info = (struct rte_avp_device_info *)resource->addr; | |
447 | if ((info->magic != RTE_AVP_DEVICE_MAGIC) || | |
448 | avp_dev_version_check(info->version)) { | |
449 | PMD_DRV_LOG(ERR, "Invalid device info magic 0x%08x or version 0x%08x > 0x%08x\n", | |
450 | info->magic, info->version, | |
451 | AVP_DPDK_DRIVER_VERSION); | |
452 | return -EINVAL; | |
453 | } | |
454 | break; | |
455 | ||
456 | case RTE_AVP_PCI_MEMORY_BAR: | |
457 | case RTE_AVP_PCI_MMIO_BAR: | |
458 | if (resource->addr == NULL) { | |
459 | PMD_DRV_LOG(ERR, "Missing address space for BAR%u\n", | |
460 | i); | |
461 | return -EINVAL; | |
462 | } | |
463 | break; | |
464 | ||
465 | case RTE_AVP_PCI_MSIX_BAR: | |
466 | default: | |
467 | /* no validation required */ | |
468 | break; | |
469 | } | |
470 | } | |
471 | ||
472 | return 0; | |
473 | } | |
474 | ||
475 | static int | |
476 | avp_dev_detach(struct rte_eth_dev *eth_dev) | |
477 | { | |
478 | struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); | |
479 | int ret; | |
480 | ||
481 | PMD_DRV_LOG(NOTICE, "Detaching port %u from AVP device 0x%" PRIx64 "\n", | |
482 | eth_dev->data->port_id, avp->device_id); | |
483 | ||
484 | rte_spinlock_lock(&avp->lock); | |
485 | ||
486 | if (avp->flags & AVP_F_DETACHED) { | |
487 | PMD_DRV_LOG(NOTICE, "port %u already detached\n", | |
488 | eth_dev->data->port_id); | |
489 | ret = 0; | |
490 | goto unlock; | |
491 | } | |
492 | ||
493 | /* shutdown the device first so the host stops sending us packets. */ | |
494 | ret = avp_dev_ctrl_shutdown(eth_dev); | |
495 | if (ret < 0) { | |
496 | PMD_DRV_LOG(ERR, "Failed to send/recv shutdown to host, ret=%d\n", | |
497 | ret); | |
498 | avp->flags &= ~AVP_F_DETACHED; | |
499 | goto unlock; | |
500 | } | |
501 | ||
502 | avp->flags |= AVP_F_DETACHED; | |
503 | rte_wmb(); | |
504 | ||
505 | /* wait for queues to acknowledge the presence of the detach flag */ | |
506 | rte_delay_ms(1); | |
507 | ||
508 | ret = 0; | |
509 | ||
510 | unlock: | |
511 | rte_spinlock_unlock(&avp->lock); | |
512 | return ret; | |
513 | } | |
514 | ||
515 | static void | |
516 | _avp_set_rx_queue_mappings(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id) | |
517 | { | |
518 | struct avp_dev *avp = | |
519 | AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); | |
520 | struct avp_queue *rxq; | |
521 | uint16_t queue_count; | |
522 | uint16_t remainder; | |
523 | ||
524 | rxq = (struct avp_queue *)eth_dev->data->rx_queues[rx_queue_id]; | |
525 | ||
526 | /* | |
527 | * Must map all AVP fifos as evenly as possible between the configured | |
528 | * device queues. Each device queue will service a subset of the AVP | |
529 | * fifos. If there is an odd number of device queues the first set of | |
530 | * device queues will get the extra AVP fifos. | |
531 | */ | |
532 | queue_count = avp->num_rx_queues / eth_dev->data->nb_rx_queues; | |
533 | remainder = avp->num_rx_queues % eth_dev->data->nb_rx_queues; | |
534 | if (rx_queue_id < remainder) { | |
535 | /* these queues must service one extra FIFO */ | |
536 | rxq->queue_base = rx_queue_id * (queue_count + 1); | |
537 | rxq->queue_limit = rxq->queue_base + (queue_count + 1) - 1; | |
538 | } else { | |
539 | /* these queues service the regular number of FIFO */ | |
540 | rxq->queue_base = ((remainder * (queue_count + 1)) + | |
541 | ((rx_queue_id - remainder) * queue_count)); | |
542 | rxq->queue_limit = rxq->queue_base + queue_count - 1; | |
543 | } | |
544 | ||
545 | PMD_DRV_LOG(DEBUG, "rxq %u at %p base %u limit %u\n", | |
546 | rx_queue_id, rxq, rxq->queue_base, rxq->queue_limit); | |
547 | ||
548 | rxq->queue_id = rxq->queue_base; | |
549 | } | |
550 | ||
551 | static void | |
552 | _avp_set_queue_counts(struct rte_eth_dev *eth_dev) | |
553 | { | |
9f95a23c | 554 | struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); |
11fdf7f2 TL |
555 | struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); |
556 | struct rte_avp_device_info *host_info; | |
557 | void *addr; | |
558 | ||
559 | addr = pci_dev->mem_resource[RTE_AVP_PCI_DEVICE_BAR].addr; | |
560 | host_info = (struct rte_avp_device_info *)addr; | |
561 | ||
562 | /* | |
563 | * the transmit direction is not negotiated beyond respecting the max | |
564 | * number of queues because the host can handle arbitrary guest tx | |
565 | * queues (host rx queues). | |
566 | */ | |
567 | avp->num_tx_queues = eth_dev->data->nb_tx_queues; | |
568 | ||
569 | /* | |
570 | * the receive direction is more restrictive. The host requires a | |
571 | * minimum number of guest rx queues (host tx queues) therefore | |
572 | * negotiate a value that is at least as large as the host minimum | |
573 | * requirement. If the host and guest values are not identical then a | |
574 | * mapping will be established in the receive_queue_setup function. | |
575 | */ | |
576 | avp->num_rx_queues = RTE_MAX(host_info->min_rx_queues, | |
577 | eth_dev->data->nb_rx_queues); | |
578 | ||
579 | PMD_DRV_LOG(DEBUG, "Requesting %u Tx and %u Rx queues from host\n", | |
580 | avp->num_tx_queues, avp->num_rx_queues); | |
581 | } | |
582 | ||
583 | static int | |
584 | avp_dev_attach(struct rte_eth_dev *eth_dev) | |
585 | { | |
586 | struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); | |
587 | struct rte_avp_device_config config; | |
588 | unsigned int i; | |
589 | int ret; | |
590 | ||
591 | PMD_DRV_LOG(NOTICE, "Attaching port %u to AVP device 0x%" PRIx64 "\n", | |
592 | eth_dev->data->port_id, avp->device_id); | |
593 | ||
594 | rte_spinlock_lock(&avp->lock); | |
595 | ||
596 | if (!(avp->flags & AVP_F_DETACHED)) { | |
597 | PMD_DRV_LOG(NOTICE, "port %u already attached\n", | |
598 | eth_dev->data->port_id); | |
599 | ret = 0; | |
600 | goto unlock; | |
601 | } | |
602 | ||
603 | /* | |
604 | * make sure that the detached flag is set prior to reconfiguring the | |
605 | * queues. | |
606 | */ | |
607 | avp->flags |= AVP_F_DETACHED; | |
608 | rte_wmb(); | |
609 | ||
610 | /* | |
611 | * re-run the device create utility which will parse the new host info | |
612 | * and setup the AVP device queue pointers. | |
613 | */ | |
9f95a23c | 614 | ret = avp_dev_create(RTE_ETH_DEV_TO_PCI(eth_dev), eth_dev); |
11fdf7f2 TL |
615 | if (ret < 0) { |
616 | PMD_DRV_LOG(ERR, "Failed to re-create AVP device, ret=%d\n", | |
617 | ret); | |
618 | goto unlock; | |
619 | } | |
620 | ||
621 | if (avp->flags & AVP_F_CONFIGURED) { | |
622 | /* | |
623 | * Update the receive queue mapping to handle cases where the | |
624 | * source and destination hosts have different queue | |
625 | * requirements. As long as the DETACHED flag is asserted the | |
626 | * queue table should not be referenced so it should be safe to | |
627 | * update it. | |
628 | */ | |
629 | _avp_set_queue_counts(eth_dev); | |
630 | for (i = 0; i < eth_dev->data->nb_rx_queues; i++) | |
631 | _avp_set_rx_queue_mappings(eth_dev, i); | |
632 | ||
633 | /* | |
634 | * Update the host with our config details so that it knows the | |
635 | * device is active. | |
636 | */ | |
637 | memset(&config, 0, sizeof(config)); | |
638 | config.device_id = avp->device_id; | |
639 | config.driver_type = RTE_AVP_DRIVER_TYPE_DPDK; | |
640 | config.driver_version = AVP_DPDK_DRIVER_VERSION; | |
641 | config.features = avp->features; | |
642 | config.num_tx_queues = avp->num_tx_queues; | |
643 | config.num_rx_queues = avp->num_rx_queues; | |
644 | config.if_up = !!(avp->flags & AVP_F_LINKUP); | |
645 | ||
646 | ret = avp_dev_ctrl_set_config(eth_dev, &config); | |
647 | if (ret < 0) { | |
648 | PMD_DRV_LOG(ERR, "Config request failed by host, ret=%d\n", | |
649 | ret); | |
650 | goto unlock; | |
651 | } | |
652 | } | |
653 | ||
654 | rte_wmb(); | |
655 | avp->flags &= ~AVP_F_DETACHED; | |
656 | ||
657 | ret = 0; | |
658 | ||
659 | unlock: | |
660 | rte_spinlock_unlock(&avp->lock); | |
661 | return ret; | |
662 | } | |
663 | ||
664 | static void | |
665 | avp_dev_interrupt_handler(void *data) | |
666 | { | |
667 | struct rte_eth_dev *eth_dev = data; | |
9f95a23c | 668 | struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); |
11fdf7f2 TL |
669 | void *registers = pci_dev->mem_resource[RTE_AVP_PCI_MMIO_BAR].addr; |
670 | uint32_t status, value; | |
671 | int ret; | |
672 | ||
673 | if (registers == NULL) | |
674 | rte_panic("no mapped MMIO register space\n"); | |
675 | ||
676 | /* read the interrupt status register | |
677 | * note: this register clears on read so all raised interrupts must be | |
678 | * handled or remembered for later processing | |
679 | */ | |
680 | status = AVP_READ32( | |
681 | RTE_PTR_ADD(registers, | |
682 | RTE_AVP_INTERRUPT_STATUS_OFFSET)); | |
683 | ||
684 | if (status & RTE_AVP_MIGRATION_INTERRUPT_MASK) { | |
685 | /* handle interrupt based on current status */ | |
686 | value = AVP_READ32( | |
687 | RTE_PTR_ADD(registers, | |
688 | RTE_AVP_MIGRATION_STATUS_OFFSET)); | |
689 | switch (value) { | |
690 | case RTE_AVP_MIGRATION_DETACHED: | |
691 | ret = avp_dev_detach(eth_dev); | |
692 | break; | |
693 | case RTE_AVP_MIGRATION_ATTACHED: | |
694 | ret = avp_dev_attach(eth_dev); | |
695 | break; | |
696 | default: | |
697 | PMD_DRV_LOG(ERR, "unexpected migration status, status=%u\n", | |
698 | value); | |
699 | ret = -EINVAL; | |
700 | } | |
701 | ||
702 | /* acknowledge the request by writing out our current status */ | |
703 | value = (ret == 0 ? value : RTE_AVP_MIGRATION_ERROR); | |
704 | AVP_WRITE32(value, | |
705 | RTE_PTR_ADD(registers, | |
706 | RTE_AVP_MIGRATION_ACK_OFFSET)); | |
707 | ||
708 | PMD_DRV_LOG(NOTICE, "AVP migration interrupt handled\n"); | |
709 | } | |
710 | ||
711 | if (status & ~RTE_AVP_MIGRATION_INTERRUPT_MASK) | |
712 | PMD_DRV_LOG(WARNING, "AVP unexpected interrupt, status=0x%08x\n", | |
713 | status); | |
714 | ||
715 | /* re-enable UIO interrupt handling */ | |
716 | ret = rte_intr_enable(&pci_dev->intr_handle); | |
717 | if (ret < 0) { | |
718 | PMD_DRV_LOG(ERR, "Failed to re-enable UIO interrupts, ret=%d\n", | |
719 | ret); | |
720 | /* continue */ | |
721 | } | |
722 | } | |
723 | ||
724 | static int | |
725 | avp_dev_enable_interrupts(struct rte_eth_dev *eth_dev) | |
726 | { | |
9f95a23c | 727 | struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); |
11fdf7f2 TL |
728 | void *registers = pci_dev->mem_resource[RTE_AVP_PCI_MMIO_BAR].addr; |
729 | int ret; | |
730 | ||
731 | if (registers == NULL) | |
732 | return -EINVAL; | |
733 | ||
734 | /* enable UIO interrupt handling */ | |
735 | ret = rte_intr_enable(&pci_dev->intr_handle); | |
736 | if (ret < 0) { | |
737 | PMD_DRV_LOG(ERR, "Failed to enable UIO interrupts, ret=%d\n", | |
738 | ret); | |
739 | return ret; | |
740 | } | |
741 | ||
742 | /* inform the device that all interrupts are enabled */ | |
743 | AVP_WRITE32(RTE_AVP_APP_INTERRUPTS_MASK, | |
744 | RTE_PTR_ADD(registers, RTE_AVP_INTERRUPT_MASK_OFFSET)); | |
745 | ||
746 | return 0; | |
747 | } | |
748 | ||
749 | static int | |
750 | avp_dev_disable_interrupts(struct rte_eth_dev *eth_dev) | |
751 | { | |
9f95a23c | 752 | struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); |
11fdf7f2 TL |
753 | void *registers = pci_dev->mem_resource[RTE_AVP_PCI_MMIO_BAR].addr; |
754 | int ret; | |
755 | ||
756 | if (registers == NULL) | |
757 | return 0; | |
758 | ||
759 | /* inform the device that all interrupts are disabled */ | |
760 | AVP_WRITE32(RTE_AVP_NO_INTERRUPTS_MASK, | |
761 | RTE_PTR_ADD(registers, RTE_AVP_INTERRUPT_MASK_OFFSET)); | |
762 | ||
763 | /* enable UIO interrupt handling */ | |
764 | ret = rte_intr_disable(&pci_dev->intr_handle); | |
765 | if (ret < 0) { | |
766 | PMD_DRV_LOG(ERR, "Failed to disable UIO interrupts, ret=%d\n", | |
767 | ret); | |
768 | return ret; | |
769 | } | |
770 | ||
771 | return 0; | |
772 | } | |
773 | ||
774 | static int | |
775 | avp_dev_setup_interrupts(struct rte_eth_dev *eth_dev) | |
776 | { | |
9f95a23c | 777 | struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); |
11fdf7f2 TL |
778 | int ret; |
779 | ||
780 | /* register a callback handler with UIO for interrupt notifications */ | |
781 | ret = rte_intr_callback_register(&pci_dev->intr_handle, | |
782 | avp_dev_interrupt_handler, | |
783 | (void *)eth_dev); | |
784 | if (ret < 0) { | |
785 | PMD_DRV_LOG(ERR, "Failed to register UIO interrupt callback, ret=%d\n", | |
786 | ret); | |
787 | return ret; | |
788 | } | |
789 | ||
790 | /* enable interrupt processing */ | |
791 | return avp_dev_enable_interrupts(eth_dev); | |
792 | } | |
793 | ||
794 | static int | |
795 | avp_dev_migration_pending(struct rte_eth_dev *eth_dev) | |
796 | { | |
9f95a23c | 797 | struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); |
11fdf7f2 TL |
798 | void *registers = pci_dev->mem_resource[RTE_AVP_PCI_MMIO_BAR].addr; |
799 | uint32_t value; | |
800 | ||
801 | if (registers == NULL) | |
802 | return 0; | |
803 | ||
804 | value = AVP_READ32(RTE_PTR_ADD(registers, | |
805 | RTE_AVP_MIGRATION_STATUS_OFFSET)); | |
806 | if (value == RTE_AVP_MIGRATION_DETACHED) { | |
807 | /* migration is in progress; ack it if we have not already */ | |
808 | AVP_WRITE32(value, | |
809 | RTE_PTR_ADD(registers, | |
810 | RTE_AVP_MIGRATION_ACK_OFFSET)); | |
811 | return 1; | |
812 | } | |
813 | return 0; | |
814 | } | |
815 | ||
816 | /* | |
817 | * create a AVP device using the supplied device info by first translating it | |
818 | * to guest address space(s). | |
819 | */ | |
820 | static int | |
821 | avp_dev_create(struct rte_pci_device *pci_dev, | |
822 | struct rte_eth_dev *eth_dev) | |
823 | { | |
824 | struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); | |
825 | struct rte_avp_device_info *host_info; | |
826 | struct rte_mem_resource *resource; | |
827 | unsigned int i; | |
828 | ||
829 | resource = &pci_dev->mem_resource[RTE_AVP_PCI_DEVICE_BAR]; | |
830 | if (resource->addr == NULL) { | |
831 | PMD_DRV_LOG(ERR, "BAR%u is not mapped\n", | |
832 | RTE_AVP_PCI_DEVICE_BAR); | |
833 | return -EFAULT; | |
834 | } | |
835 | host_info = (struct rte_avp_device_info *)resource->addr; | |
836 | ||
837 | if ((host_info->magic != RTE_AVP_DEVICE_MAGIC) || | |
838 | avp_dev_version_check(host_info->version)) { | |
839 | PMD_DRV_LOG(ERR, "Invalid AVP PCI device, magic 0x%08x version 0x%08x > 0x%08x\n", | |
840 | host_info->magic, host_info->version, | |
841 | AVP_DPDK_DRIVER_VERSION); | |
842 | return -EINVAL; | |
843 | } | |
844 | ||
845 | PMD_DRV_LOG(DEBUG, "AVP host device is v%u.%u.%u\n", | |
846 | RTE_AVP_GET_RELEASE_VERSION(host_info->version), | |
847 | RTE_AVP_GET_MAJOR_VERSION(host_info->version), | |
848 | RTE_AVP_GET_MINOR_VERSION(host_info->version)); | |
849 | ||
850 | PMD_DRV_LOG(DEBUG, "AVP host supports %u to %u TX queue(s)\n", | |
851 | host_info->min_tx_queues, host_info->max_tx_queues); | |
852 | PMD_DRV_LOG(DEBUG, "AVP host supports %u to %u RX queue(s)\n", | |
853 | host_info->min_rx_queues, host_info->max_rx_queues); | |
854 | PMD_DRV_LOG(DEBUG, "AVP host supports features 0x%08x\n", | |
855 | host_info->features); | |
856 | ||
857 | if (avp->magic != AVP_ETHDEV_MAGIC) { | |
858 | /* | |
859 | * First time initialization (i.e., not during a VM | |
860 | * migration) | |
861 | */ | |
862 | memset(avp, 0, sizeof(*avp)); | |
863 | avp->magic = AVP_ETHDEV_MAGIC; | |
864 | avp->dev_data = eth_dev->data; | |
865 | avp->port_id = eth_dev->data->port_id; | |
866 | avp->host_mbuf_size = host_info->mbuf_size; | |
867 | avp->host_features = host_info->features; | |
868 | rte_spinlock_init(&avp->lock); | |
869 | memcpy(&avp->ethaddr.addr_bytes[0], | |
870 | host_info->ethaddr, ETHER_ADDR_LEN); | |
871 | /* adjust max values to not exceed our max */ | |
872 | avp->max_tx_queues = | |
873 | RTE_MIN(host_info->max_tx_queues, RTE_AVP_MAX_QUEUES); | |
874 | avp->max_rx_queues = | |
875 | RTE_MIN(host_info->max_rx_queues, RTE_AVP_MAX_QUEUES); | |
876 | } else { | |
877 | /* Re-attaching during migration */ | |
878 | ||
879 | /* TODO... requires validation of host values */ | |
880 | if ((host_info->features & avp->features) != avp->features) { | |
881 | PMD_DRV_LOG(ERR, "AVP host features mismatched; 0x%08x, host=0x%08x\n", | |
882 | avp->features, host_info->features); | |
883 | /* this should not be possible; continue for now */ | |
884 | } | |
885 | } | |
886 | ||
887 | /* the device id is allowed to change over migrations */ | |
888 | avp->device_id = host_info->device_id; | |
889 | ||
890 | /* translate incoming host addresses to guest address space */ | |
891 | PMD_DRV_LOG(DEBUG, "AVP first host tx queue at 0x%" PRIx64 "\n", | |
892 | host_info->tx_phys); | |
893 | PMD_DRV_LOG(DEBUG, "AVP first host alloc queue at 0x%" PRIx64 "\n", | |
894 | host_info->alloc_phys); | |
895 | for (i = 0; i < avp->max_tx_queues; i++) { | |
896 | avp->tx_q[i] = avp_dev_translate_address(eth_dev, | |
897 | host_info->tx_phys + (i * host_info->tx_size)); | |
898 | ||
899 | avp->alloc_q[i] = avp_dev_translate_address(eth_dev, | |
900 | host_info->alloc_phys + (i * host_info->alloc_size)); | |
901 | } | |
902 | ||
903 | PMD_DRV_LOG(DEBUG, "AVP first host rx queue at 0x%" PRIx64 "\n", | |
904 | host_info->rx_phys); | |
905 | PMD_DRV_LOG(DEBUG, "AVP first host free queue at 0x%" PRIx64 "\n", | |
906 | host_info->free_phys); | |
907 | for (i = 0; i < avp->max_rx_queues; i++) { | |
908 | avp->rx_q[i] = avp_dev_translate_address(eth_dev, | |
909 | host_info->rx_phys + (i * host_info->rx_size)); | |
910 | avp->free_q[i] = avp_dev_translate_address(eth_dev, | |
911 | host_info->free_phys + (i * host_info->free_size)); | |
912 | } | |
913 | ||
914 | PMD_DRV_LOG(DEBUG, "AVP host request queue at 0x%" PRIx64 "\n", | |
915 | host_info->req_phys); | |
916 | PMD_DRV_LOG(DEBUG, "AVP host response queue at 0x%" PRIx64 "\n", | |
917 | host_info->resp_phys); | |
918 | PMD_DRV_LOG(DEBUG, "AVP host sync address at 0x%" PRIx64 "\n", | |
919 | host_info->sync_phys); | |
920 | PMD_DRV_LOG(DEBUG, "AVP host mbuf address at 0x%" PRIx64 "\n", | |
921 | host_info->mbuf_phys); | |
922 | avp->req_q = avp_dev_translate_address(eth_dev, host_info->req_phys); | |
923 | avp->resp_q = avp_dev_translate_address(eth_dev, host_info->resp_phys); | |
924 | avp->sync_addr = | |
925 | avp_dev_translate_address(eth_dev, host_info->sync_phys); | |
926 | avp->mbuf_addr = | |
927 | avp_dev_translate_address(eth_dev, host_info->mbuf_phys); | |
928 | ||
929 | /* | |
930 | * store the host mbuf virtual address so that we can calculate | |
931 | * relative offsets for each mbuf as they are processed | |
932 | */ | |
933 | avp->host_mbuf_addr = host_info->mbuf_va; | |
934 | avp->host_sync_addr = host_info->sync_va; | |
935 | ||
936 | /* | |
937 | * store the maximum packet length that is supported by the host. | |
938 | */ | |
939 | avp->max_rx_pkt_len = host_info->max_rx_pkt_len; | |
940 | PMD_DRV_LOG(DEBUG, "AVP host max receive packet length is %u\n", | |
941 | host_info->max_rx_pkt_len); | |
942 | ||
943 | return 0; | |
944 | } | |
945 | ||
946 | /* | |
947 | * This function is based on probe() function in avp_pci.c | |
948 | * It returns 0 on success. | |
949 | */ | |
950 | static int | |
951 | eth_avp_dev_init(struct rte_eth_dev *eth_dev) | |
952 | { | |
953 | struct avp_dev *avp = | |
954 | AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); | |
955 | struct rte_pci_device *pci_dev; | |
956 | int ret; | |
957 | ||
9f95a23c | 958 | pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); |
11fdf7f2 TL |
959 | eth_dev->dev_ops = &avp_eth_dev_ops; |
960 | eth_dev->rx_pkt_burst = &avp_recv_pkts; | |
961 | eth_dev->tx_pkt_burst = &avp_xmit_pkts; | |
962 | ||
963 | if (rte_eal_process_type() != RTE_PROC_PRIMARY) { | |
964 | /* | |
965 | * no setup required on secondary processes. All data is saved | |
966 | * in dev_private by the primary process. All resource should | |
967 | * be mapped to the same virtual address so all pointers should | |
968 | * be valid. | |
969 | */ | |
970 | if (eth_dev->data->scattered_rx) { | |
971 | PMD_DRV_LOG(NOTICE, "AVP device configured for chained mbufs\n"); | |
972 | eth_dev->rx_pkt_burst = avp_recv_scattered_pkts; | |
973 | eth_dev->tx_pkt_burst = avp_xmit_scattered_pkts; | |
974 | } | |
975 | return 0; | |
976 | } | |
977 | ||
978 | rte_eth_copy_pci_info(eth_dev, pci_dev); | |
979 | ||
11fdf7f2 TL |
980 | /* Check current migration status */ |
981 | if (avp_dev_migration_pending(eth_dev)) { | |
982 | PMD_DRV_LOG(ERR, "VM live migration operation in progress\n"); | |
983 | return -EBUSY; | |
984 | } | |
985 | ||
986 | /* Check BAR resources */ | |
987 | ret = avp_dev_check_regions(eth_dev); | |
988 | if (ret < 0) { | |
989 | PMD_DRV_LOG(ERR, "Failed to validate BAR resources, ret=%d\n", | |
990 | ret); | |
991 | return ret; | |
992 | } | |
993 | ||
994 | /* Enable interrupts */ | |
995 | ret = avp_dev_setup_interrupts(eth_dev); | |
996 | if (ret < 0) { | |
997 | PMD_DRV_LOG(ERR, "Failed to enable interrupts, ret=%d\n", ret); | |
998 | return ret; | |
999 | } | |
1000 | ||
1001 | /* Handle each subtype */ | |
1002 | ret = avp_dev_create(pci_dev, eth_dev); | |
1003 | if (ret < 0) { | |
1004 | PMD_DRV_LOG(ERR, "Failed to create device, ret=%d\n", ret); | |
1005 | return ret; | |
1006 | } | |
1007 | ||
1008 | /* Allocate memory for storing MAC addresses */ | |
1009 | eth_dev->data->mac_addrs = rte_zmalloc("avp_ethdev", ETHER_ADDR_LEN, 0); | |
1010 | if (eth_dev->data->mac_addrs == NULL) { | |
1011 | PMD_DRV_LOG(ERR, "Failed to allocate %d bytes needed to store MAC addresses\n", | |
1012 | ETHER_ADDR_LEN); | |
1013 | return -ENOMEM; | |
1014 | } | |
1015 | ||
1016 | /* Get a mac from device config */ | |
1017 | ether_addr_copy(&avp->ethaddr, ð_dev->data->mac_addrs[0]); | |
1018 | ||
1019 | return 0; | |
1020 | } | |
1021 | ||
1022 | static int | |
1023 | eth_avp_dev_uninit(struct rte_eth_dev *eth_dev) | |
1024 | { | |
1025 | int ret; | |
1026 | ||
1027 | if (rte_eal_process_type() != RTE_PROC_PRIMARY) | |
1028 | return -EPERM; | |
1029 | ||
1030 | if (eth_dev->data == NULL) | |
1031 | return 0; | |
1032 | ||
1033 | ret = avp_dev_disable_interrupts(eth_dev); | |
1034 | if (ret != 0) { | |
1035 | PMD_DRV_LOG(ERR, "Failed to disable interrupts, ret=%d\n", ret); | |
1036 | return ret; | |
1037 | } | |
1038 | ||
11fdf7f2 TL |
1039 | return 0; |
1040 | } | |
1041 | ||
1042 | static int | |
1043 | eth_avp_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, | |
1044 | struct rte_pci_device *pci_dev) | |
1045 | { | |
9f95a23c TL |
1046 | return rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct avp_adapter), |
1047 | eth_avp_dev_init); | |
11fdf7f2 TL |
1048 | } |
1049 | ||
1050 | static int | |
1051 | eth_avp_pci_remove(struct rte_pci_device *pci_dev) | |
1052 | { | |
1053 | return rte_eth_dev_pci_generic_remove(pci_dev, | |
1054 | eth_avp_dev_uninit); | |
1055 | } | |
1056 | ||
1057 | static struct rte_pci_driver rte_avp_pmd = { | |
1058 | .id_table = pci_id_avp_map, | |
1059 | .drv_flags = RTE_PCI_DRV_NEED_MAPPING, | |
1060 | .probe = eth_avp_pci_probe, | |
1061 | .remove = eth_avp_pci_remove, | |
1062 | }; | |
1063 | ||
1064 | static int | |
1065 | avp_dev_enable_scattered(struct rte_eth_dev *eth_dev, | |
1066 | struct avp_dev *avp) | |
1067 | { | |
1068 | unsigned int max_rx_pkt_len; | |
1069 | ||
1070 | max_rx_pkt_len = eth_dev->data->dev_conf.rxmode.max_rx_pkt_len; | |
1071 | ||
1072 | if ((max_rx_pkt_len > avp->guest_mbuf_size) || | |
1073 | (max_rx_pkt_len > avp->host_mbuf_size)) { | |
1074 | /* | |
1075 | * If the guest MTU is greater than either the host or guest | |
1076 | * buffers then chained mbufs have to be enabled in the TX | |
1077 | * direction. It is assumed that the application will not need | |
1078 | * to send packets larger than their max_rx_pkt_len (MRU). | |
1079 | */ | |
1080 | return 1; | |
1081 | } | |
1082 | ||
1083 | if ((avp->max_rx_pkt_len > avp->guest_mbuf_size) || | |
1084 | (avp->max_rx_pkt_len > avp->host_mbuf_size)) { | |
1085 | /* | |
1086 | * If the host MRU is greater than its own mbuf size or the | |
1087 | * guest mbuf size then chained mbufs have to be enabled in the | |
1088 | * RX direction. | |
1089 | */ | |
1090 | return 1; | |
1091 | } | |
1092 | ||
1093 | return 0; | |
1094 | } | |
1095 | ||
1096 | static int | |
1097 | avp_dev_rx_queue_setup(struct rte_eth_dev *eth_dev, | |
1098 | uint16_t rx_queue_id, | |
1099 | uint16_t nb_rx_desc, | |
1100 | unsigned int socket_id, | |
1101 | const struct rte_eth_rxconf *rx_conf, | |
1102 | struct rte_mempool *pool) | |
1103 | { | |
1104 | struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); | |
1105 | struct rte_pktmbuf_pool_private *mbp_priv; | |
1106 | struct avp_queue *rxq; | |
1107 | ||
1108 | if (rx_queue_id >= eth_dev->data->nb_rx_queues) { | |
1109 | PMD_DRV_LOG(ERR, "RX queue id is out of range: rx_queue_id=%u, nb_rx_queues=%u\n", | |
1110 | rx_queue_id, eth_dev->data->nb_rx_queues); | |
1111 | return -EINVAL; | |
1112 | } | |
1113 | ||
1114 | /* Save mbuf pool pointer */ | |
1115 | avp->pool = pool; | |
1116 | ||
1117 | /* Save the local mbuf size */ | |
1118 | mbp_priv = rte_mempool_get_priv(pool); | |
1119 | avp->guest_mbuf_size = (uint16_t)(mbp_priv->mbuf_data_room_size); | |
1120 | avp->guest_mbuf_size -= RTE_PKTMBUF_HEADROOM; | |
1121 | ||
1122 | if (avp_dev_enable_scattered(eth_dev, avp)) { | |
1123 | if (!eth_dev->data->scattered_rx) { | |
1124 | PMD_DRV_LOG(NOTICE, "AVP device configured for chained mbufs\n"); | |
1125 | eth_dev->data->scattered_rx = 1; | |
1126 | eth_dev->rx_pkt_burst = avp_recv_scattered_pkts; | |
1127 | eth_dev->tx_pkt_burst = avp_xmit_scattered_pkts; | |
1128 | } | |
1129 | } | |
1130 | ||
1131 | PMD_DRV_LOG(DEBUG, "AVP max_rx_pkt_len=(%u,%u) mbuf_size=(%u,%u)\n", | |
1132 | avp->max_rx_pkt_len, | |
1133 | eth_dev->data->dev_conf.rxmode.max_rx_pkt_len, | |
1134 | avp->host_mbuf_size, | |
1135 | avp->guest_mbuf_size); | |
1136 | ||
1137 | /* allocate a queue object */ | |
1138 | rxq = rte_zmalloc_socket("ethdev RX queue", sizeof(struct avp_queue), | |
1139 | RTE_CACHE_LINE_SIZE, socket_id); | |
1140 | if (rxq == NULL) { | |
1141 | PMD_DRV_LOG(ERR, "Failed to allocate new Rx queue object\n"); | |
1142 | return -ENOMEM; | |
1143 | } | |
1144 | ||
1145 | /* save back pointers to AVP and Ethernet devices */ | |
1146 | rxq->avp = avp; | |
1147 | rxq->dev_data = eth_dev->data; | |
1148 | eth_dev->data->rx_queues[rx_queue_id] = (void *)rxq; | |
1149 | ||
1150 | /* setup the queue receive mapping for the current queue. */ | |
1151 | _avp_set_rx_queue_mappings(eth_dev, rx_queue_id); | |
1152 | ||
1153 | PMD_DRV_LOG(DEBUG, "Rx queue %u setup at %p\n", rx_queue_id, rxq); | |
1154 | ||
1155 | (void)nb_rx_desc; | |
1156 | (void)rx_conf; | |
1157 | return 0; | |
1158 | } | |
1159 | ||
1160 | static int | |
1161 | avp_dev_tx_queue_setup(struct rte_eth_dev *eth_dev, | |
1162 | uint16_t tx_queue_id, | |
1163 | uint16_t nb_tx_desc, | |
1164 | unsigned int socket_id, | |
1165 | const struct rte_eth_txconf *tx_conf) | |
1166 | { | |
1167 | struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); | |
1168 | struct avp_queue *txq; | |
1169 | ||
1170 | if (tx_queue_id >= eth_dev->data->nb_tx_queues) { | |
1171 | PMD_DRV_LOG(ERR, "TX queue id is out of range: tx_queue_id=%u, nb_tx_queues=%u\n", | |
1172 | tx_queue_id, eth_dev->data->nb_tx_queues); | |
1173 | return -EINVAL; | |
1174 | } | |
1175 | ||
1176 | /* allocate a queue object */ | |
1177 | txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct avp_queue), | |
1178 | RTE_CACHE_LINE_SIZE, socket_id); | |
1179 | if (txq == NULL) { | |
1180 | PMD_DRV_LOG(ERR, "Failed to allocate new Tx queue object\n"); | |
1181 | return -ENOMEM; | |
1182 | } | |
1183 | ||
1184 | /* only the configured set of transmit queues are used */ | |
1185 | txq->queue_id = tx_queue_id; | |
1186 | txq->queue_base = tx_queue_id; | |
1187 | txq->queue_limit = tx_queue_id; | |
1188 | ||
1189 | /* save back pointers to AVP and Ethernet devices */ | |
1190 | txq->avp = avp; | |
1191 | txq->dev_data = eth_dev->data; | |
1192 | eth_dev->data->tx_queues[tx_queue_id] = (void *)txq; | |
1193 | ||
1194 | PMD_DRV_LOG(DEBUG, "Tx queue %u setup at %p\n", tx_queue_id, txq); | |
1195 | ||
1196 | (void)nb_tx_desc; | |
1197 | (void)tx_conf; | |
1198 | return 0; | |
1199 | } | |
1200 | ||
1201 | static inline int | |
1202 | _avp_cmp_ether_addr(struct ether_addr *a, struct ether_addr *b) | |
1203 | { | |
1204 | uint16_t *_a = (uint16_t *)&a->addr_bytes[0]; | |
1205 | uint16_t *_b = (uint16_t *)&b->addr_bytes[0]; | |
1206 | return (_a[0] ^ _b[0]) | (_a[1] ^ _b[1]) | (_a[2] ^ _b[2]); | |
1207 | } | |
1208 | ||
1209 | static inline int | |
1210 | _avp_mac_filter(struct avp_dev *avp, struct rte_mbuf *m) | |
1211 | { | |
1212 | struct ether_hdr *eth = rte_pktmbuf_mtod(m, struct ether_hdr *); | |
1213 | ||
1214 | if (likely(_avp_cmp_ether_addr(&avp->ethaddr, ð->d_addr) == 0)) { | |
1215 | /* allow all packets destined to our address */ | |
1216 | return 0; | |
1217 | } | |
1218 | ||
1219 | if (likely(is_broadcast_ether_addr(ð->d_addr))) { | |
1220 | /* allow all broadcast packets */ | |
1221 | return 0; | |
1222 | } | |
1223 | ||
1224 | if (likely(is_multicast_ether_addr(ð->d_addr))) { | |
1225 | /* allow all multicast packets */ | |
1226 | return 0; | |
1227 | } | |
1228 | ||
1229 | if (avp->flags & AVP_F_PROMISC) { | |
1230 | /* allow all packets when in promiscuous mode */ | |
1231 | return 0; | |
1232 | } | |
1233 | ||
1234 | return -1; | |
1235 | } | |
1236 | ||
1237 | #ifdef RTE_LIBRTE_AVP_DEBUG_BUFFERS | |
1238 | static inline void | |
1239 | __avp_dev_buffer_sanity_check(struct avp_dev *avp, struct rte_avp_desc *buf) | |
1240 | { | |
1241 | struct rte_avp_desc *first_buf; | |
1242 | struct rte_avp_desc *pkt_buf; | |
1243 | unsigned int pkt_len; | |
1244 | unsigned int nb_segs; | |
1245 | void *pkt_data; | |
1246 | unsigned int i; | |
1247 | ||
1248 | first_buf = avp_dev_translate_buffer(avp, buf); | |
1249 | ||
1250 | i = 0; | |
1251 | pkt_len = 0; | |
1252 | nb_segs = first_buf->nb_segs; | |
1253 | do { | |
1254 | /* Adjust pointers for guest addressing */ | |
1255 | pkt_buf = avp_dev_translate_buffer(avp, buf); | |
1256 | if (pkt_buf == NULL) | |
1257 | rte_panic("bad buffer: segment %u has an invalid address %p\n", | |
1258 | i, buf); | |
1259 | pkt_data = avp_dev_translate_buffer(avp, pkt_buf->data); | |
1260 | if (pkt_data == NULL) | |
1261 | rte_panic("bad buffer: segment %u has a NULL data pointer\n", | |
1262 | i); | |
1263 | if (pkt_buf->data_len == 0) | |
1264 | rte_panic("bad buffer: segment %u has 0 data length\n", | |
1265 | i); | |
1266 | pkt_len += pkt_buf->data_len; | |
1267 | nb_segs--; | |
1268 | i++; | |
1269 | ||
1270 | } while (nb_segs && (buf = pkt_buf->next) != NULL); | |
1271 | ||
1272 | if (nb_segs != 0) | |
1273 | rte_panic("bad buffer: expected %u segments found %u\n", | |
1274 | first_buf->nb_segs, (first_buf->nb_segs - nb_segs)); | |
1275 | if (pkt_len != first_buf->pkt_len) | |
1276 | rte_panic("bad buffer: expected length %u found %u\n", | |
1277 | first_buf->pkt_len, pkt_len); | |
1278 | } | |
1279 | ||
1280 | #define avp_dev_buffer_sanity_check(a, b) \ | |
1281 | __avp_dev_buffer_sanity_check((a), (b)) | |
1282 | ||
1283 | #else /* RTE_LIBRTE_AVP_DEBUG_BUFFERS */ | |
1284 | ||
1285 | #define avp_dev_buffer_sanity_check(a, b) do {} while (0) | |
1286 | ||
1287 | #endif | |
1288 | ||
1289 | /* | |
1290 | * Copy a host buffer chain to a set of mbufs. This function assumes that | |
1291 | * there exactly the required number of mbufs to copy all source bytes. | |
1292 | */ | |
1293 | static inline struct rte_mbuf * | |
1294 | avp_dev_copy_from_buffers(struct avp_dev *avp, | |
1295 | struct rte_avp_desc *buf, | |
1296 | struct rte_mbuf **mbufs, | |
1297 | unsigned int count) | |
1298 | { | |
1299 | struct rte_mbuf *m_previous = NULL; | |
1300 | struct rte_avp_desc *pkt_buf; | |
1301 | unsigned int total_length = 0; | |
1302 | unsigned int copy_length; | |
1303 | unsigned int src_offset; | |
1304 | struct rte_mbuf *m; | |
1305 | uint16_t ol_flags; | |
1306 | uint16_t vlan_tci; | |
1307 | void *pkt_data; | |
1308 | unsigned int i; | |
1309 | ||
1310 | avp_dev_buffer_sanity_check(avp, buf); | |
1311 | ||
1312 | /* setup the first source buffer */ | |
1313 | pkt_buf = avp_dev_translate_buffer(avp, buf); | |
1314 | pkt_data = avp_dev_translate_buffer(avp, pkt_buf->data); | |
1315 | total_length = pkt_buf->pkt_len; | |
1316 | src_offset = 0; | |
1317 | ||
1318 | if (pkt_buf->ol_flags & RTE_AVP_RX_VLAN_PKT) { | |
9f95a23c | 1319 | ol_flags = PKT_RX_VLAN; |
11fdf7f2 TL |
1320 | vlan_tci = pkt_buf->vlan_tci; |
1321 | } else { | |
1322 | ol_flags = 0; | |
1323 | vlan_tci = 0; | |
1324 | } | |
1325 | ||
1326 | for (i = 0; (i < count) && (buf != NULL); i++) { | |
1327 | /* fill each destination buffer */ | |
1328 | m = mbufs[i]; | |
1329 | ||
1330 | if (m_previous != NULL) | |
1331 | m_previous->next = m; | |
1332 | ||
1333 | m_previous = m; | |
1334 | ||
1335 | do { | |
1336 | /* | |
1337 | * Copy as many source buffers as will fit in the | |
1338 | * destination buffer. | |
1339 | */ | |
1340 | copy_length = RTE_MIN((avp->guest_mbuf_size - | |
1341 | rte_pktmbuf_data_len(m)), | |
1342 | (pkt_buf->data_len - | |
1343 | src_offset)); | |
1344 | rte_memcpy(RTE_PTR_ADD(rte_pktmbuf_mtod(m, void *), | |
1345 | rte_pktmbuf_data_len(m)), | |
1346 | RTE_PTR_ADD(pkt_data, src_offset), | |
1347 | copy_length); | |
1348 | rte_pktmbuf_data_len(m) += copy_length; | |
1349 | src_offset += copy_length; | |
1350 | ||
1351 | if (likely(src_offset == pkt_buf->data_len)) { | |
1352 | /* need a new source buffer */ | |
1353 | buf = pkt_buf->next; | |
1354 | if (buf != NULL) { | |
1355 | pkt_buf = avp_dev_translate_buffer( | |
1356 | avp, buf); | |
1357 | pkt_data = avp_dev_translate_buffer( | |
1358 | avp, pkt_buf->data); | |
1359 | src_offset = 0; | |
1360 | } | |
1361 | } | |
1362 | ||
1363 | if (unlikely(rte_pktmbuf_data_len(m) == | |
1364 | avp->guest_mbuf_size)) { | |
1365 | /* need a new destination mbuf */ | |
1366 | break; | |
1367 | } | |
1368 | ||
1369 | } while (buf != NULL); | |
1370 | } | |
1371 | ||
1372 | m = mbufs[0]; | |
1373 | m->ol_flags = ol_flags; | |
1374 | m->nb_segs = count; | |
1375 | rte_pktmbuf_pkt_len(m) = total_length; | |
1376 | m->vlan_tci = vlan_tci; | |
1377 | ||
1378 | __rte_mbuf_sanity_check(m, 1); | |
1379 | ||
1380 | return m; | |
1381 | } | |
1382 | ||
1383 | static uint16_t | |
1384 | avp_recv_scattered_pkts(void *rx_queue, | |
1385 | struct rte_mbuf **rx_pkts, | |
1386 | uint16_t nb_pkts) | |
1387 | { | |
1388 | struct avp_queue *rxq = (struct avp_queue *)rx_queue; | |
1389 | struct rte_avp_desc *avp_bufs[AVP_MAX_RX_BURST]; | |
1390 | struct rte_mbuf *mbufs[RTE_AVP_MAX_MBUF_SEGMENTS]; | |
1391 | struct avp_dev *avp = rxq->avp; | |
1392 | struct rte_avp_desc *pkt_buf; | |
1393 | struct rte_avp_fifo *free_q; | |
1394 | struct rte_avp_fifo *rx_q; | |
1395 | struct rte_avp_desc *buf; | |
1396 | unsigned int count, avail, n; | |
1397 | unsigned int guest_mbuf_size; | |
1398 | struct rte_mbuf *m; | |
1399 | unsigned int required; | |
1400 | unsigned int buf_len; | |
1401 | unsigned int port_id; | |
1402 | unsigned int i; | |
1403 | ||
1404 | if (unlikely(avp->flags & AVP_F_DETACHED)) { | |
1405 | /* VM live migration in progress */ | |
1406 | return 0; | |
1407 | } | |
1408 | ||
1409 | guest_mbuf_size = avp->guest_mbuf_size; | |
1410 | port_id = avp->port_id; | |
1411 | rx_q = avp->rx_q[rxq->queue_id]; | |
1412 | free_q = avp->free_q[rxq->queue_id]; | |
1413 | ||
1414 | /* setup next queue to service */ | |
1415 | rxq->queue_id = (rxq->queue_id < rxq->queue_limit) ? | |
1416 | (rxq->queue_id + 1) : rxq->queue_base; | |
1417 | ||
1418 | /* determine how many slots are available in the free queue */ | |
1419 | count = avp_fifo_free_count(free_q); | |
1420 | ||
1421 | /* determine how many packets are available in the rx queue */ | |
1422 | avail = avp_fifo_count(rx_q); | |
1423 | ||
1424 | /* determine how many packets can be received */ | |
1425 | count = RTE_MIN(count, avail); | |
1426 | count = RTE_MIN(count, nb_pkts); | |
1427 | count = RTE_MIN(count, (unsigned int)AVP_MAX_RX_BURST); | |
1428 | ||
1429 | if (unlikely(count == 0)) { | |
1430 | /* no free buffers, or no buffers on the rx queue */ | |
1431 | return 0; | |
1432 | } | |
1433 | ||
1434 | /* retrieve pending packets */ | |
1435 | n = avp_fifo_get(rx_q, (void **)&avp_bufs, count); | |
1436 | PMD_RX_LOG(DEBUG, "Receiving %u packets from Rx queue at %p\n", | |
1437 | count, rx_q); | |
1438 | ||
1439 | count = 0; | |
1440 | for (i = 0; i < n; i++) { | |
1441 | /* prefetch next entry while processing current one */ | |
1442 | if (i + 1 < n) { | |
1443 | pkt_buf = avp_dev_translate_buffer(avp, | |
1444 | avp_bufs[i + 1]); | |
1445 | rte_prefetch0(pkt_buf); | |
1446 | } | |
1447 | buf = avp_bufs[i]; | |
1448 | ||
1449 | /* Peek into the first buffer to determine the total length */ | |
1450 | pkt_buf = avp_dev_translate_buffer(avp, buf); | |
1451 | buf_len = pkt_buf->pkt_len; | |
1452 | ||
1453 | /* Allocate enough mbufs to receive the entire packet */ | |
1454 | required = (buf_len + guest_mbuf_size - 1) / guest_mbuf_size; | |
1455 | if (rte_pktmbuf_alloc_bulk(avp->pool, mbufs, required)) { | |
1456 | rxq->dev_data->rx_mbuf_alloc_failed++; | |
1457 | continue; | |
1458 | } | |
1459 | ||
1460 | /* Copy the data from the buffers to our mbufs */ | |
1461 | m = avp_dev_copy_from_buffers(avp, buf, mbufs, required); | |
1462 | ||
1463 | /* finalize mbuf */ | |
1464 | m->port = port_id; | |
1465 | ||
1466 | if (_avp_mac_filter(avp, m) != 0) { | |
1467 | /* silently discard packets not destined to our MAC */ | |
1468 | rte_pktmbuf_free(m); | |
1469 | continue; | |
1470 | } | |
1471 | ||
1472 | /* return new mbuf to caller */ | |
1473 | rx_pkts[count++] = m; | |
1474 | rxq->bytes += buf_len; | |
1475 | } | |
1476 | ||
1477 | rxq->packets += count; | |
1478 | ||
1479 | /* return the buffers to the free queue */ | |
1480 | avp_fifo_put(free_q, (void **)&avp_bufs[0], n); | |
1481 | ||
1482 | return count; | |
1483 | } | |
1484 | ||
1485 | ||
1486 | static uint16_t | |
1487 | avp_recv_pkts(void *rx_queue, | |
1488 | struct rte_mbuf **rx_pkts, | |
1489 | uint16_t nb_pkts) | |
1490 | { | |
1491 | struct avp_queue *rxq = (struct avp_queue *)rx_queue; | |
1492 | struct rte_avp_desc *avp_bufs[AVP_MAX_RX_BURST]; | |
1493 | struct avp_dev *avp = rxq->avp; | |
1494 | struct rte_avp_desc *pkt_buf; | |
1495 | struct rte_avp_fifo *free_q; | |
1496 | struct rte_avp_fifo *rx_q; | |
1497 | unsigned int count, avail, n; | |
1498 | unsigned int pkt_len; | |
1499 | struct rte_mbuf *m; | |
1500 | char *pkt_data; | |
1501 | unsigned int i; | |
1502 | ||
1503 | if (unlikely(avp->flags & AVP_F_DETACHED)) { | |
1504 | /* VM live migration in progress */ | |
1505 | return 0; | |
1506 | } | |
1507 | ||
1508 | rx_q = avp->rx_q[rxq->queue_id]; | |
1509 | free_q = avp->free_q[rxq->queue_id]; | |
1510 | ||
1511 | /* setup next queue to service */ | |
1512 | rxq->queue_id = (rxq->queue_id < rxq->queue_limit) ? | |
1513 | (rxq->queue_id + 1) : rxq->queue_base; | |
1514 | ||
1515 | /* determine how many slots are available in the free queue */ | |
1516 | count = avp_fifo_free_count(free_q); | |
1517 | ||
1518 | /* determine how many packets are available in the rx queue */ | |
1519 | avail = avp_fifo_count(rx_q); | |
1520 | ||
1521 | /* determine how many packets can be received */ | |
1522 | count = RTE_MIN(count, avail); | |
1523 | count = RTE_MIN(count, nb_pkts); | |
1524 | count = RTE_MIN(count, (unsigned int)AVP_MAX_RX_BURST); | |
1525 | ||
1526 | if (unlikely(count == 0)) { | |
1527 | /* no free buffers, or no buffers on the rx queue */ | |
1528 | return 0; | |
1529 | } | |
1530 | ||
1531 | /* retrieve pending packets */ | |
1532 | n = avp_fifo_get(rx_q, (void **)&avp_bufs, count); | |
1533 | PMD_RX_LOG(DEBUG, "Receiving %u packets from Rx queue at %p\n", | |
1534 | count, rx_q); | |
1535 | ||
1536 | count = 0; | |
1537 | for (i = 0; i < n; i++) { | |
1538 | /* prefetch next entry while processing current one */ | |
1539 | if (i < n - 1) { | |
1540 | pkt_buf = avp_dev_translate_buffer(avp, | |
1541 | avp_bufs[i + 1]); | |
1542 | rte_prefetch0(pkt_buf); | |
1543 | } | |
1544 | ||
1545 | /* Adjust host pointers for guest addressing */ | |
1546 | pkt_buf = avp_dev_translate_buffer(avp, avp_bufs[i]); | |
1547 | pkt_data = avp_dev_translate_buffer(avp, pkt_buf->data); | |
1548 | pkt_len = pkt_buf->pkt_len; | |
1549 | ||
1550 | if (unlikely((pkt_len > avp->guest_mbuf_size) || | |
1551 | (pkt_buf->nb_segs > 1))) { | |
1552 | /* | |
1553 | * application should be using the scattered receive | |
1554 | * function | |
1555 | */ | |
1556 | rxq->errors++; | |
1557 | continue; | |
1558 | } | |
1559 | ||
1560 | /* process each packet to be transmitted */ | |
1561 | m = rte_pktmbuf_alloc(avp->pool); | |
1562 | if (unlikely(m == NULL)) { | |
1563 | rxq->dev_data->rx_mbuf_alloc_failed++; | |
1564 | continue; | |
1565 | } | |
1566 | ||
1567 | /* copy data out of the host buffer to our buffer */ | |
1568 | m->data_off = RTE_PKTMBUF_HEADROOM; | |
1569 | rte_memcpy(rte_pktmbuf_mtod(m, void *), pkt_data, pkt_len); | |
1570 | ||
1571 | /* initialize the local mbuf */ | |
1572 | rte_pktmbuf_data_len(m) = pkt_len; | |
1573 | rte_pktmbuf_pkt_len(m) = pkt_len; | |
1574 | m->port = avp->port_id; | |
1575 | ||
1576 | if (pkt_buf->ol_flags & RTE_AVP_RX_VLAN_PKT) { | |
9f95a23c | 1577 | m->ol_flags = PKT_RX_VLAN; |
11fdf7f2 TL |
1578 | m->vlan_tci = pkt_buf->vlan_tci; |
1579 | } | |
1580 | ||
1581 | if (_avp_mac_filter(avp, m) != 0) { | |
1582 | /* silently discard packets not destined to our MAC */ | |
1583 | rte_pktmbuf_free(m); | |
1584 | continue; | |
1585 | } | |
1586 | ||
1587 | /* return new mbuf to caller */ | |
1588 | rx_pkts[count++] = m; | |
1589 | rxq->bytes += pkt_len; | |
1590 | } | |
1591 | ||
1592 | rxq->packets += count; | |
1593 | ||
1594 | /* return the buffers to the free queue */ | |
1595 | avp_fifo_put(free_q, (void **)&avp_bufs[0], n); | |
1596 | ||
1597 | return count; | |
1598 | } | |
1599 | ||
1600 | /* | |
1601 | * Copy a chained mbuf to a set of host buffers. This function assumes that | |
1602 | * there are sufficient destination buffers to contain the entire source | |
1603 | * packet. | |
1604 | */ | |
1605 | static inline uint16_t | |
1606 | avp_dev_copy_to_buffers(struct avp_dev *avp, | |
1607 | struct rte_mbuf *mbuf, | |
1608 | struct rte_avp_desc **buffers, | |
1609 | unsigned int count) | |
1610 | { | |
1611 | struct rte_avp_desc *previous_buf = NULL; | |
1612 | struct rte_avp_desc *first_buf = NULL; | |
1613 | struct rte_avp_desc *pkt_buf; | |
1614 | struct rte_avp_desc *buf; | |
1615 | size_t total_length; | |
1616 | struct rte_mbuf *m; | |
1617 | size_t copy_length; | |
1618 | size_t src_offset; | |
1619 | char *pkt_data; | |
1620 | unsigned int i; | |
1621 | ||
1622 | __rte_mbuf_sanity_check(mbuf, 1); | |
1623 | ||
1624 | m = mbuf; | |
1625 | src_offset = 0; | |
1626 | total_length = rte_pktmbuf_pkt_len(m); | |
1627 | for (i = 0; (i < count) && (m != NULL); i++) { | |
1628 | /* fill each destination buffer */ | |
1629 | buf = buffers[i]; | |
1630 | ||
1631 | if (i < count - 1) { | |
1632 | /* prefetch next entry while processing this one */ | |
1633 | pkt_buf = avp_dev_translate_buffer(avp, buffers[i + 1]); | |
1634 | rte_prefetch0(pkt_buf); | |
1635 | } | |
1636 | ||
1637 | /* Adjust pointers for guest addressing */ | |
1638 | pkt_buf = avp_dev_translate_buffer(avp, buf); | |
1639 | pkt_data = avp_dev_translate_buffer(avp, pkt_buf->data); | |
1640 | ||
1641 | /* setup the buffer chain */ | |
1642 | if (previous_buf != NULL) | |
1643 | previous_buf->next = buf; | |
1644 | else | |
1645 | first_buf = pkt_buf; | |
1646 | ||
1647 | previous_buf = pkt_buf; | |
1648 | ||
1649 | do { | |
1650 | /* | |
1651 | * copy as many source mbuf segments as will fit in the | |
1652 | * destination buffer. | |
1653 | */ | |
1654 | copy_length = RTE_MIN((avp->host_mbuf_size - | |
1655 | pkt_buf->data_len), | |
1656 | (rte_pktmbuf_data_len(m) - | |
1657 | src_offset)); | |
1658 | rte_memcpy(RTE_PTR_ADD(pkt_data, pkt_buf->data_len), | |
1659 | RTE_PTR_ADD(rte_pktmbuf_mtod(m, void *), | |
1660 | src_offset), | |
1661 | copy_length); | |
1662 | pkt_buf->data_len += copy_length; | |
1663 | src_offset += copy_length; | |
1664 | ||
1665 | if (likely(src_offset == rte_pktmbuf_data_len(m))) { | |
1666 | /* need a new source buffer */ | |
1667 | m = m->next; | |
1668 | src_offset = 0; | |
1669 | } | |
1670 | ||
1671 | if (unlikely(pkt_buf->data_len == | |
1672 | avp->host_mbuf_size)) { | |
1673 | /* need a new destination buffer */ | |
1674 | break; | |
1675 | } | |
1676 | ||
1677 | } while (m != NULL); | |
1678 | } | |
1679 | ||
1680 | first_buf->nb_segs = count; | |
1681 | first_buf->pkt_len = total_length; | |
1682 | ||
1683 | if (mbuf->ol_flags & PKT_TX_VLAN_PKT) { | |
1684 | first_buf->ol_flags |= RTE_AVP_TX_VLAN_PKT; | |
1685 | first_buf->vlan_tci = mbuf->vlan_tci; | |
1686 | } | |
1687 | ||
1688 | avp_dev_buffer_sanity_check(avp, buffers[0]); | |
1689 | ||
1690 | return total_length; | |
1691 | } | |
1692 | ||
1693 | ||
1694 | static uint16_t | |
1695 | avp_xmit_scattered_pkts(void *tx_queue, | |
1696 | struct rte_mbuf **tx_pkts, | |
1697 | uint16_t nb_pkts) | |
1698 | { | |
1699 | struct rte_avp_desc *avp_bufs[(AVP_MAX_TX_BURST * | |
1700 | RTE_AVP_MAX_MBUF_SEGMENTS)]; | |
1701 | struct avp_queue *txq = (struct avp_queue *)tx_queue; | |
1702 | struct rte_avp_desc *tx_bufs[AVP_MAX_TX_BURST]; | |
1703 | struct avp_dev *avp = txq->avp; | |
1704 | struct rte_avp_fifo *alloc_q; | |
1705 | struct rte_avp_fifo *tx_q; | |
1706 | unsigned int count, avail, n; | |
1707 | unsigned int orig_nb_pkts; | |
1708 | struct rte_mbuf *m; | |
1709 | unsigned int required; | |
1710 | unsigned int segments; | |
1711 | unsigned int tx_bytes; | |
1712 | unsigned int i; | |
1713 | ||
1714 | orig_nb_pkts = nb_pkts; | |
1715 | if (unlikely(avp->flags & AVP_F_DETACHED)) { | |
1716 | /* VM live migration in progress */ | |
1717 | /* TODO ... buffer for X packets then drop? */ | |
1718 | txq->errors += nb_pkts; | |
1719 | return 0; | |
1720 | } | |
1721 | ||
1722 | tx_q = avp->tx_q[txq->queue_id]; | |
1723 | alloc_q = avp->alloc_q[txq->queue_id]; | |
1724 | ||
1725 | /* limit the number of transmitted packets to the max burst size */ | |
1726 | if (unlikely(nb_pkts > AVP_MAX_TX_BURST)) | |
1727 | nb_pkts = AVP_MAX_TX_BURST; | |
1728 | ||
1729 | /* determine how many buffers are available to copy into */ | |
1730 | avail = avp_fifo_count(alloc_q); | |
1731 | if (unlikely(avail > (AVP_MAX_TX_BURST * | |
1732 | RTE_AVP_MAX_MBUF_SEGMENTS))) | |
1733 | avail = AVP_MAX_TX_BURST * RTE_AVP_MAX_MBUF_SEGMENTS; | |
1734 | ||
1735 | /* determine how many slots are available in the transmit queue */ | |
1736 | count = avp_fifo_free_count(tx_q); | |
1737 | ||
1738 | /* determine how many packets can be sent */ | |
1739 | nb_pkts = RTE_MIN(count, nb_pkts); | |
1740 | ||
1741 | /* determine how many packets will fit in the available buffers */ | |
1742 | count = 0; | |
1743 | segments = 0; | |
1744 | for (i = 0; i < nb_pkts; i++) { | |
1745 | m = tx_pkts[i]; | |
1746 | if (likely(i < (unsigned int)nb_pkts - 1)) { | |
1747 | /* prefetch next entry while processing this one */ | |
1748 | rte_prefetch0(tx_pkts[i + 1]); | |
1749 | } | |
1750 | required = (rte_pktmbuf_pkt_len(m) + avp->host_mbuf_size - 1) / | |
1751 | avp->host_mbuf_size; | |
1752 | ||
1753 | if (unlikely((required == 0) || | |
1754 | (required > RTE_AVP_MAX_MBUF_SEGMENTS))) | |
1755 | break; | |
1756 | else if (unlikely(required + segments > avail)) | |
1757 | break; | |
1758 | segments += required; | |
1759 | count++; | |
1760 | } | |
1761 | nb_pkts = count; | |
1762 | ||
1763 | if (unlikely(nb_pkts == 0)) { | |
1764 | /* no available buffers, or no space on the tx queue */ | |
1765 | txq->errors += orig_nb_pkts; | |
1766 | return 0; | |
1767 | } | |
1768 | ||
1769 | PMD_TX_LOG(DEBUG, "Sending %u packets on Tx queue at %p\n", | |
1770 | nb_pkts, tx_q); | |
1771 | ||
1772 | /* retrieve sufficient send buffers */ | |
1773 | n = avp_fifo_get(alloc_q, (void **)&avp_bufs, segments); | |
1774 | if (unlikely(n != segments)) { | |
1775 | PMD_TX_LOG(DEBUG, "Failed to allocate buffers " | |
1776 | "n=%u, segments=%u, orig=%u\n", | |
1777 | n, segments, orig_nb_pkts); | |
1778 | txq->errors += orig_nb_pkts; | |
1779 | return 0; | |
1780 | } | |
1781 | ||
1782 | tx_bytes = 0; | |
1783 | count = 0; | |
1784 | for (i = 0; i < nb_pkts; i++) { | |
1785 | /* process each packet to be transmitted */ | |
1786 | m = tx_pkts[i]; | |
1787 | ||
1788 | /* determine how many buffers are required for this packet */ | |
1789 | required = (rte_pktmbuf_pkt_len(m) + avp->host_mbuf_size - 1) / | |
1790 | avp->host_mbuf_size; | |
1791 | ||
1792 | tx_bytes += avp_dev_copy_to_buffers(avp, m, | |
1793 | &avp_bufs[count], required); | |
1794 | tx_bufs[i] = avp_bufs[count]; | |
1795 | count += required; | |
1796 | ||
1797 | /* free the original mbuf */ | |
1798 | rte_pktmbuf_free(m); | |
1799 | } | |
1800 | ||
1801 | txq->packets += nb_pkts; | |
1802 | txq->bytes += tx_bytes; | |
1803 | ||
1804 | #ifdef RTE_LIBRTE_AVP_DEBUG_BUFFERS | |
1805 | for (i = 0; i < nb_pkts; i++) | |
1806 | avp_dev_buffer_sanity_check(avp, tx_bufs[i]); | |
1807 | #endif | |
1808 | ||
1809 | /* send the packets */ | |
1810 | n = avp_fifo_put(tx_q, (void **)&tx_bufs[0], nb_pkts); | |
1811 | if (unlikely(n != orig_nb_pkts)) | |
1812 | txq->errors += (orig_nb_pkts - n); | |
1813 | ||
1814 | return n; | |
1815 | } | |
1816 | ||
1817 | ||
1818 | static uint16_t | |
1819 | avp_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) | |
1820 | { | |
1821 | struct avp_queue *txq = (struct avp_queue *)tx_queue; | |
1822 | struct rte_avp_desc *avp_bufs[AVP_MAX_TX_BURST]; | |
1823 | struct avp_dev *avp = txq->avp; | |
1824 | struct rte_avp_desc *pkt_buf; | |
1825 | struct rte_avp_fifo *alloc_q; | |
1826 | struct rte_avp_fifo *tx_q; | |
1827 | unsigned int count, avail, n; | |
1828 | struct rte_mbuf *m; | |
1829 | unsigned int pkt_len; | |
1830 | unsigned int tx_bytes; | |
1831 | char *pkt_data; | |
1832 | unsigned int i; | |
1833 | ||
1834 | if (unlikely(avp->flags & AVP_F_DETACHED)) { | |
1835 | /* VM live migration in progress */ | |
1836 | /* TODO ... buffer for X packets then drop?! */ | |
1837 | txq->errors++; | |
1838 | return 0; | |
1839 | } | |
1840 | ||
1841 | tx_q = avp->tx_q[txq->queue_id]; | |
1842 | alloc_q = avp->alloc_q[txq->queue_id]; | |
1843 | ||
1844 | /* limit the number of transmitted packets to the max burst size */ | |
1845 | if (unlikely(nb_pkts > AVP_MAX_TX_BURST)) | |
1846 | nb_pkts = AVP_MAX_TX_BURST; | |
1847 | ||
1848 | /* determine how many buffers are available to copy into */ | |
1849 | avail = avp_fifo_count(alloc_q); | |
1850 | ||
1851 | /* determine how many slots are available in the transmit queue */ | |
1852 | count = avp_fifo_free_count(tx_q); | |
1853 | ||
1854 | /* determine how many packets can be sent */ | |
1855 | count = RTE_MIN(count, avail); | |
1856 | count = RTE_MIN(count, nb_pkts); | |
1857 | ||
1858 | if (unlikely(count == 0)) { | |
1859 | /* no available buffers, or no space on the tx queue */ | |
1860 | txq->errors += nb_pkts; | |
1861 | return 0; | |
1862 | } | |
1863 | ||
1864 | PMD_TX_LOG(DEBUG, "Sending %u packets on Tx queue at %p\n", | |
1865 | count, tx_q); | |
1866 | ||
1867 | /* retrieve sufficient send buffers */ | |
1868 | n = avp_fifo_get(alloc_q, (void **)&avp_bufs, count); | |
1869 | if (unlikely(n != count)) { | |
1870 | txq->errors++; | |
1871 | return 0; | |
1872 | } | |
1873 | ||
1874 | tx_bytes = 0; | |
1875 | for (i = 0; i < count; i++) { | |
1876 | /* prefetch next entry while processing the current one */ | |
1877 | if (i < count - 1) { | |
1878 | pkt_buf = avp_dev_translate_buffer(avp, | |
1879 | avp_bufs[i + 1]); | |
1880 | rte_prefetch0(pkt_buf); | |
1881 | } | |
1882 | ||
1883 | /* process each packet to be transmitted */ | |
1884 | m = tx_pkts[i]; | |
1885 | ||
1886 | /* Adjust pointers for guest addressing */ | |
1887 | pkt_buf = avp_dev_translate_buffer(avp, avp_bufs[i]); | |
1888 | pkt_data = avp_dev_translate_buffer(avp, pkt_buf->data); | |
1889 | pkt_len = rte_pktmbuf_pkt_len(m); | |
1890 | ||
1891 | if (unlikely((pkt_len > avp->guest_mbuf_size) || | |
1892 | (pkt_len > avp->host_mbuf_size))) { | |
1893 | /* | |
1894 | * application should be using the scattered transmit | |
1895 | * function; send it truncated to avoid the performance | |
1896 | * hit of having to manage returning the already | |
1897 | * allocated buffer to the free list. This should not | |
1898 | * happen since the application should have set the | |
1899 | * max_rx_pkt_len based on its MTU and it should be | |
1900 | * policing its own packet sizes. | |
1901 | */ | |
1902 | txq->errors++; | |
1903 | pkt_len = RTE_MIN(avp->guest_mbuf_size, | |
1904 | avp->host_mbuf_size); | |
1905 | } | |
1906 | ||
1907 | /* copy data out of our mbuf and into the AVP buffer */ | |
1908 | rte_memcpy(pkt_data, rte_pktmbuf_mtod(m, void *), pkt_len); | |
1909 | pkt_buf->pkt_len = pkt_len; | |
1910 | pkt_buf->data_len = pkt_len; | |
1911 | pkt_buf->nb_segs = 1; | |
1912 | pkt_buf->next = NULL; | |
1913 | ||
1914 | if (m->ol_flags & PKT_TX_VLAN_PKT) { | |
1915 | pkt_buf->ol_flags |= RTE_AVP_TX_VLAN_PKT; | |
1916 | pkt_buf->vlan_tci = m->vlan_tci; | |
1917 | } | |
1918 | ||
1919 | tx_bytes += pkt_len; | |
1920 | ||
1921 | /* free the original mbuf */ | |
1922 | rte_pktmbuf_free(m); | |
1923 | } | |
1924 | ||
1925 | txq->packets += count; | |
1926 | txq->bytes += tx_bytes; | |
1927 | ||
1928 | /* send the packets */ | |
1929 | n = avp_fifo_put(tx_q, (void **)&avp_bufs[0], count); | |
1930 | ||
1931 | return n; | |
1932 | } | |
1933 | ||
1934 | static void | |
1935 | avp_dev_rx_queue_release(void *rx_queue) | |
1936 | { | |
1937 | struct avp_queue *rxq = (struct avp_queue *)rx_queue; | |
1938 | struct avp_dev *avp = rxq->avp; | |
1939 | struct rte_eth_dev_data *data = avp->dev_data; | |
1940 | unsigned int i; | |
1941 | ||
1942 | for (i = 0; i < avp->num_rx_queues; i++) { | |
1943 | if (data->rx_queues[i] == rxq) | |
1944 | data->rx_queues[i] = NULL; | |
1945 | } | |
1946 | } | |
1947 | ||
1948 | static void | |
1949 | avp_dev_tx_queue_release(void *tx_queue) | |
1950 | { | |
1951 | struct avp_queue *txq = (struct avp_queue *)tx_queue; | |
1952 | struct avp_dev *avp = txq->avp; | |
1953 | struct rte_eth_dev_data *data = avp->dev_data; | |
1954 | unsigned int i; | |
1955 | ||
1956 | for (i = 0; i < avp->num_tx_queues; i++) { | |
1957 | if (data->tx_queues[i] == txq) | |
1958 | data->tx_queues[i] = NULL; | |
1959 | } | |
1960 | } | |
1961 | ||
1962 | static int | |
1963 | avp_dev_configure(struct rte_eth_dev *eth_dev) | |
1964 | { | |
9f95a23c | 1965 | struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); |
11fdf7f2 TL |
1966 | struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); |
1967 | struct rte_avp_device_info *host_info; | |
1968 | struct rte_avp_device_config config; | |
1969 | int mask = 0; | |
1970 | void *addr; | |
1971 | int ret; | |
1972 | ||
1973 | rte_spinlock_lock(&avp->lock); | |
1974 | if (avp->flags & AVP_F_DETACHED) { | |
1975 | PMD_DRV_LOG(ERR, "Operation not supported during VM live migration\n"); | |
1976 | ret = -ENOTSUP; | |
1977 | goto unlock; | |
1978 | } | |
1979 | ||
1980 | addr = pci_dev->mem_resource[RTE_AVP_PCI_DEVICE_BAR].addr; | |
1981 | host_info = (struct rte_avp_device_info *)addr; | |
1982 | ||
1983 | /* Setup required number of queues */ | |
1984 | _avp_set_queue_counts(eth_dev); | |
1985 | ||
1986 | mask = (ETH_VLAN_STRIP_MASK | | |
1987 | ETH_VLAN_FILTER_MASK | | |
1988 | ETH_VLAN_EXTEND_MASK); | |
9f95a23c TL |
1989 | ret = avp_vlan_offload_set(eth_dev, mask); |
1990 | if (ret < 0) { | |
1991 | PMD_DRV_LOG(ERR, "VLAN offload set failed by host, ret=%d\n", | |
1992 | ret); | |
1993 | goto unlock; | |
1994 | } | |
11fdf7f2 TL |
1995 | |
1996 | /* update device config */ | |
1997 | memset(&config, 0, sizeof(config)); | |
1998 | config.device_id = host_info->device_id; | |
1999 | config.driver_type = RTE_AVP_DRIVER_TYPE_DPDK; | |
2000 | config.driver_version = AVP_DPDK_DRIVER_VERSION; | |
2001 | config.features = avp->features; | |
2002 | config.num_tx_queues = avp->num_tx_queues; | |
2003 | config.num_rx_queues = avp->num_rx_queues; | |
2004 | ||
2005 | ret = avp_dev_ctrl_set_config(eth_dev, &config); | |
2006 | if (ret < 0) { | |
2007 | PMD_DRV_LOG(ERR, "Config request failed by host, ret=%d\n", | |
2008 | ret); | |
2009 | goto unlock; | |
2010 | } | |
2011 | ||
2012 | avp->flags |= AVP_F_CONFIGURED; | |
2013 | ret = 0; | |
2014 | ||
2015 | unlock: | |
2016 | rte_spinlock_unlock(&avp->lock); | |
2017 | return ret; | |
2018 | } | |
2019 | ||
2020 | static int | |
2021 | avp_dev_start(struct rte_eth_dev *eth_dev) | |
2022 | { | |
2023 | struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); | |
2024 | int ret; | |
2025 | ||
2026 | rte_spinlock_lock(&avp->lock); | |
2027 | if (avp->flags & AVP_F_DETACHED) { | |
2028 | PMD_DRV_LOG(ERR, "Operation not supported during VM live migration\n"); | |
2029 | ret = -ENOTSUP; | |
2030 | goto unlock; | |
2031 | } | |
2032 | ||
11fdf7f2 TL |
2033 | /* update link state */ |
2034 | ret = avp_dev_ctrl_set_link_state(eth_dev, 1); | |
2035 | if (ret < 0) { | |
2036 | PMD_DRV_LOG(ERR, "Link state change failed by host, ret=%d\n", | |
2037 | ret); | |
2038 | goto unlock; | |
2039 | } | |
2040 | ||
2041 | /* remember current link state */ | |
2042 | avp->flags |= AVP_F_LINKUP; | |
2043 | ||
2044 | ret = 0; | |
2045 | ||
2046 | unlock: | |
2047 | rte_spinlock_unlock(&avp->lock); | |
2048 | return ret; | |
2049 | } | |
2050 | ||
2051 | static void | |
2052 | avp_dev_stop(struct rte_eth_dev *eth_dev) | |
2053 | { | |
2054 | struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); | |
2055 | int ret; | |
2056 | ||
2057 | rte_spinlock_lock(&avp->lock); | |
2058 | if (avp->flags & AVP_F_DETACHED) { | |
2059 | PMD_DRV_LOG(ERR, "Operation not supported during VM live migration\n"); | |
2060 | goto unlock; | |
2061 | } | |
2062 | ||
2063 | /* remember current link state */ | |
2064 | avp->flags &= ~AVP_F_LINKUP; | |
2065 | ||
2066 | /* update link state */ | |
2067 | ret = avp_dev_ctrl_set_link_state(eth_dev, 0); | |
2068 | if (ret < 0) { | |
2069 | PMD_DRV_LOG(ERR, "Link state change failed by host, ret=%d\n", | |
2070 | ret); | |
2071 | } | |
2072 | ||
2073 | unlock: | |
2074 | rte_spinlock_unlock(&avp->lock); | |
2075 | } | |
2076 | ||
2077 | static void | |
2078 | avp_dev_close(struct rte_eth_dev *eth_dev) | |
2079 | { | |
2080 | struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); | |
2081 | int ret; | |
2082 | ||
2083 | rte_spinlock_lock(&avp->lock); | |
2084 | if (avp->flags & AVP_F_DETACHED) { | |
2085 | PMD_DRV_LOG(ERR, "Operation not supported during VM live migration\n"); | |
2086 | goto unlock; | |
2087 | } | |
2088 | ||
2089 | /* remember current link state */ | |
2090 | avp->flags &= ~AVP_F_LINKUP; | |
2091 | avp->flags &= ~AVP_F_CONFIGURED; | |
2092 | ||
2093 | ret = avp_dev_disable_interrupts(eth_dev); | |
2094 | if (ret < 0) { | |
2095 | PMD_DRV_LOG(ERR, "Failed to disable interrupts\n"); | |
2096 | /* continue */ | |
2097 | } | |
2098 | ||
2099 | /* update device state */ | |
2100 | ret = avp_dev_ctrl_shutdown(eth_dev); | |
2101 | if (ret < 0) { | |
2102 | PMD_DRV_LOG(ERR, "Device shutdown failed by host, ret=%d\n", | |
2103 | ret); | |
2104 | /* continue */ | |
2105 | } | |
2106 | ||
2107 | unlock: | |
2108 | rte_spinlock_unlock(&avp->lock); | |
2109 | } | |
2110 | ||
2111 | static int | |
2112 | avp_dev_link_update(struct rte_eth_dev *eth_dev, | |
2113 | __rte_unused int wait_to_complete) | |
2114 | { | |
2115 | struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); | |
2116 | struct rte_eth_link *link = ð_dev->data->dev_link; | |
2117 | ||
2118 | link->link_speed = ETH_SPEED_NUM_10G; | |
2119 | link->link_duplex = ETH_LINK_FULL_DUPLEX; | |
2120 | link->link_status = !!(avp->flags & AVP_F_LINKUP); | |
2121 | ||
2122 | return -1; | |
2123 | } | |
2124 | ||
2125 | static void | |
2126 | avp_dev_promiscuous_enable(struct rte_eth_dev *eth_dev) | |
2127 | { | |
2128 | struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); | |
2129 | ||
2130 | rte_spinlock_lock(&avp->lock); | |
2131 | if ((avp->flags & AVP_F_PROMISC) == 0) { | |
2132 | avp->flags |= AVP_F_PROMISC; | |
2133 | PMD_DRV_LOG(DEBUG, "Promiscuous mode enabled on %u\n", | |
2134 | eth_dev->data->port_id); | |
2135 | } | |
2136 | rte_spinlock_unlock(&avp->lock); | |
2137 | } | |
2138 | ||
2139 | static void | |
2140 | avp_dev_promiscuous_disable(struct rte_eth_dev *eth_dev) | |
2141 | { | |
2142 | struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); | |
2143 | ||
2144 | rte_spinlock_lock(&avp->lock); | |
2145 | if ((avp->flags & AVP_F_PROMISC) != 0) { | |
2146 | avp->flags &= ~AVP_F_PROMISC; | |
2147 | PMD_DRV_LOG(DEBUG, "Promiscuous mode disabled on %u\n", | |
2148 | eth_dev->data->port_id); | |
2149 | } | |
2150 | rte_spinlock_unlock(&avp->lock); | |
2151 | } | |
2152 | ||
2153 | static void | |
2154 | avp_dev_info_get(struct rte_eth_dev *eth_dev, | |
2155 | struct rte_eth_dev_info *dev_info) | |
2156 | { | |
2157 | struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); | |
2158 | ||
11fdf7f2 TL |
2159 | dev_info->max_rx_queues = avp->max_rx_queues; |
2160 | dev_info->max_tx_queues = avp->max_tx_queues; | |
2161 | dev_info->min_rx_bufsize = AVP_MIN_RX_BUFSIZE; | |
2162 | dev_info->max_rx_pktlen = avp->max_rx_pkt_len; | |
2163 | dev_info->max_mac_addrs = AVP_MAX_MAC_ADDRS; | |
2164 | if (avp->host_features & RTE_AVP_FEATURE_VLAN_OFFLOAD) { | |
2165 | dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP; | |
2166 | dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT; | |
2167 | } | |
2168 | } | |
2169 | ||
9f95a23c | 2170 | static int |
11fdf7f2 TL |
2171 | avp_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask) |
2172 | { | |
2173 | struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); | |
9f95a23c TL |
2174 | struct rte_eth_conf *dev_conf = ð_dev->data->dev_conf; |
2175 | uint64_t offloads = dev_conf->rxmode.offloads; | |
11fdf7f2 TL |
2176 | |
2177 | if (mask & ETH_VLAN_STRIP_MASK) { | |
2178 | if (avp->host_features & RTE_AVP_FEATURE_VLAN_OFFLOAD) { | |
9f95a23c | 2179 | if (offloads & DEV_RX_OFFLOAD_VLAN_STRIP) |
11fdf7f2 TL |
2180 | avp->features |= RTE_AVP_FEATURE_VLAN_OFFLOAD; |
2181 | else | |
2182 | avp->features &= ~RTE_AVP_FEATURE_VLAN_OFFLOAD; | |
2183 | } else { | |
2184 | PMD_DRV_LOG(ERR, "VLAN strip offload not supported\n"); | |
2185 | } | |
2186 | } | |
2187 | ||
2188 | if (mask & ETH_VLAN_FILTER_MASK) { | |
9f95a23c | 2189 | if (offloads & DEV_RX_OFFLOAD_VLAN_FILTER) |
11fdf7f2 TL |
2190 | PMD_DRV_LOG(ERR, "VLAN filter offload not supported\n"); |
2191 | } | |
2192 | ||
2193 | if (mask & ETH_VLAN_EXTEND_MASK) { | |
9f95a23c | 2194 | if (offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) |
11fdf7f2 TL |
2195 | PMD_DRV_LOG(ERR, "VLAN extend offload not supported\n"); |
2196 | } | |
9f95a23c TL |
2197 | |
2198 | return 0; | |
11fdf7f2 TL |
2199 | } |
2200 | ||
9f95a23c | 2201 | static int |
11fdf7f2 TL |
2202 | avp_dev_stats_get(struct rte_eth_dev *eth_dev, struct rte_eth_stats *stats) |
2203 | { | |
2204 | struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); | |
2205 | unsigned int i; | |
2206 | ||
2207 | for (i = 0; i < avp->num_rx_queues; i++) { | |
2208 | struct avp_queue *rxq = avp->dev_data->rx_queues[i]; | |
2209 | ||
2210 | if (rxq) { | |
2211 | stats->ipackets += rxq->packets; | |
2212 | stats->ibytes += rxq->bytes; | |
2213 | stats->ierrors += rxq->errors; | |
2214 | ||
2215 | stats->q_ipackets[i] += rxq->packets; | |
2216 | stats->q_ibytes[i] += rxq->bytes; | |
2217 | stats->q_errors[i] += rxq->errors; | |
2218 | } | |
2219 | } | |
2220 | ||
2221 | for (i = 0; i < avp->num_tx_queues; i++) { | |
2222 | struct avp_queue *txq = avp->dev_data->tx_queues[i]; | |
2223 | ||
2224 | if (txq) { | |
2225 | stats->opackets += txq->packets; | |
2226 | stats->obytes += txq->bytes; | |
2227 | stats->oerrors += txq->errors; | |
2228 | ||
2229 | stats->q_opackets[i] += txq->packets; | |
2230 | stats->q_obytes[i] += txq->bytes; | |
2231 | stats->q_errors[i] += txq->errors; | |
2232 | } | |
2233 | } | |
9f95a23c TL |
2234 | |
2235 | return 0; | |
11fdf7f2 TL |
2236 | } |
2237 | ||
2238 | static void | |
2239 | avp_dev_stats_reset(struct rte_eth_dev *eth_dev) | |
2240 | { | |
2241 | struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); | |
2242 | unsigned int i; | |
2243 | ||
2244 | for (i = 0; i < avp->num_rx_queues; i++) { | |
2245 | struct avp_queue *rxq = avp->dev_data->rx_queues[i]; | |
2246 | ||
2247 | if (rxq) { | |
2248 | rxq->bytes = 0; | |
2249 | rxq->packets = 0; | |
2250 | rxq->errors = 0; | |
2251 | } | |
2252 | } | |
2253 | ||
2254 | for (i = 0; i < avp->num_tx_queues; i++) { | |
2255 | struct avp_queue *txq = avp->dev_data->tx_queues[i]; | |
2256 | ||
2257 | if (txq) { | |
2258 | txq->bytes = 0; | |
2259 | txq->packets = 0; | |
2260 | txq->errors = 0; | |
2261 | } | |
2262 | } | |
2263 | } | |
2264 | ||
2265 | RTE_PMD_REGISTER_PCI(net_avp, rte_avp_pmd); | |
2266 | RTE_PMD_REGISTER_PCI_TABLE(net_avp, pci_id_avp_map); | |
9f95a23c TL |
2267 | |
2268 | RTE_INIT(avp_init_log) | |
2269 | { | |
2270 | avp_logtype_driver = rte_log_register("pmd.net.avp.driver"); | |
2271 | if (avp_logtype_driver >= 0) | |
2272 | rte_log_set_level(avp_logtype_driver, RTE_LOG_NOTICE); | |
2273 | } |