1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2018 Intel Corporation
5 #ifndef _VHOST_NET_CDEV_H_
6 #define _VHOST_NET_CDEV_H_
10 #include <sys/types.h>
11 #include <sys/queue.h>
13 #include <linux/vhost.h>
14 #include <linux/virtio_net.h>
15 #include <sys/socket.h>
19 #include <rte_ether.h>
20 #include <rte_rwlock.h>
21 #include <rte_malloc.h>
23 #include "rte_vhost.h"
26 /* Used to indicate that the device is running on a data core */
27 #define VIRTIO_DEV_RUNNING 1
28 /* Used to indicate that the device is ready to operate */
29 #define VIRTIO_DEV_READY 2
30 /* Used to indicate that the built-in vhost net device backend is enabled */
31 #define VIRTIO_DEV_BUILTIN_VIRTIO_NET 4
32 /* Used to indicate that the device has its own data path and configured */
33 #define VIRTIO_DEV_VDPA_CONFIGURED 8
35 /* Backend value set by guest. */
36 #define VIRTIO_DEV_STOPPED -1
38 #define BUF_VECTOR_MAX 256
40 #define VHOST_LOG_CACHE_NR 32
43 * Structure contains buffer address, length and descriptor index
44 * from vring to do scatter RX.
54 * A structure to hold some fields needed in zero copy code path,
55 * mainly for associating an mbuf with the right desc_idx.
58 struct rte_mbuf
*mbuf
;
63 TAILQ_ENTRY(zcopy_mbuf
) next
;
65 TAILQ_HEAD(zcopy_mbuf_list
, zcopy_mbuf
);
68 * Structure contains the info for each batched memory copy.
70 struct batch_copy_elem
{
78 * Structure that contains the info for batched dirty logging.
80 struct log_cache_entry
{
85 struct vring_used_elem_packed
{
92 * Structure contains variables relevant to RX/TX virtqueues.
94 struct vhost_virtqueue
{
96 struct vring_desc
*desc
;
97 struct vring_packed_desc
*desc_packed
;
100 struct vring_avail
*avail
;
101 struct vring_packed_desc_event
*driver_event
;
104 struct vring_used
*used
;
105 struct vring_packed_desc_event
*device_event
;
109 uint16_t last_avail_idx
;
110 uint16_t last_used_idx
;
111 /* Last used index we notify to front end. */
112 uint16_t signalled_used
;
113 bool signalled_used_valid
;
114 #define VIRTIO_INVALID_EVENTFD (-1)
115 #define VIRTIO_UNINITIALIZED_EVENTFD (-2)
117 /* Backend value to determine if device should started/stopped */
121 rte_spinlock_t access_lock
;
123 /* Used to notify the guest (trigger interrupt) */
125 /* Currently unused as polling mode is enabled */
128 /* Physical address of used ring, for logging */
129 uint64_t log_guest_addr
;
133 uint16_t last_zmbuf_idx
;
134 struct zcopy_mbuf
*zmbufs
;
135 struct zcopy_mbuf_list zmbuf_list
;
138 struct vring_used_elem
*shadow_used_split
;
139 struct vring_used_elem_packed
*shadow_used_packed
;
141 uint16_t shadow_used_idx
;
142 struct vhost_vring_addr ring_addrs
;
144 struct batch_copy_elem
*batch_copy_elems
;
145 uint16_t batch_copy_nb_elems
;
146 bool used_wrap_counter
;
147 bool avail_wrap_counter
;
149 struct log_cache_entry log_cache
[VHOST_LOG_CACHE_NR
];
150 uint16_t log_cache_nb_elem
;
152 rte_rwlock_t iotlb_lock
;
153 rte_rwlock_t iotlb_pending_lock
;
154 struct rte_mempool
*iotlb_pool
;
155 TAILQ_HEAD(, vhost_iotlb_entry
) iotlb_list
;
157 TAILQ_HEAD(, vhost_iotlb_entry
) iotlb_pending_list
;
158 } __rte_cache_aligned
;
160 /* Old kernels have no such macros defined */
161 #ifndef VIRTIO_NET_F_GUEST_ANNOUNCE
162 #define VIRTIO_NET_F_GUEST_ANNOUNCE 21
165 #ifndef VIRTIO_NET_F_MQ
166 #define VIRTIO_NET_F_MQ 22
169 #define VHOST_MAX_VRING 0x100
170 #define VHOST_MAX_QUEUE_PAIRS 0x80
172 #ifndef VIRTIO_NET_F_MTU
173 #define VIRTIO_NET_F_MTU 3
176 #ifndef VIRTIO_F_ANY_LAYOUT
177 #define VIRTIO_F_ANY_LAYOUT 27
180 /* Declare IOMMU related bits for older kernels */
181 #ifndef VIRTIO_F_IOMMU_PLATFORM
183 #define VIRTIO_F_IOMMU_PLATFORM 33
185 struct vhost_iotlb_msg
{
189 #define VHOST_ACCESS_RO 0x1
190 #define VHOST_ACCESS_WO 0x2
191 #define VHOST_ACCESS_RW 0x3
193 #define VHOST_IOTLB_MISS 1
194 #define VHOST_IOTLB_UPDATE 2
195 #define VHOST_IOTLB_INVALIDATE 3
196 #define VHOST_IOTLB_ACCESS_FAIL 4
200 #define VHOST_IOTLB_MSG 0x1
205 struct vhost_iotlb_msg iotlb
;
212 * Define virtio 1.0 for older kernels
214 #ifndef VIRTIO_F_VERSION_1
215 #define VIRTIO_F_VERSION_1 32
218 /* Declare packed ring related bits for older kernels */
219 #ifndef VIRTIO_F_RING_PACKED
221 #define VIRTIO_F_RING_PACKED 34
223 struct vring_packed_desc
{
230 struct vring_packed_desc_event
{
237 * Declare below packed ring defines unconditionally
238 * as Kernel header might use different names.
240 #define VRING_DESC_F_AVAIL (1ULL << 7)
241 #define VRING_DESC_F_USED (1ULL << 15)
243 #define VRING_EVENT_F_ENABLE 0x0
244 #define VRING_EVENT_F_DISABLE 0x1
245 #define VRING_EVENT_F_DESC 0x2
248 * Available and used descs are in same order
250 #ifndef VIRTIO_F_IN_ORDER
251 #define VIRTIO_F_IN_ORDER 35
254 /* Features supported by this builtin vhost-user net driver. */
255 #define VIRTIO_NET_SUPPORTED_FEATURES ((1ULL << VIRTIO_NET_F_MRG_RXBUF) | \
256 (1ULL << VIRTIO_F_ANY_LAYOUT) | \
257 (1ULL << VIRTIO_NET_F_CTRL_VQ) | \
258 (1ULL << VIRTIO_NET_F_CTRL_RX) | \
259 (1ULL << VIRTIO_NET_F_GUEST_ANNOUNCE) | \
260 (1ULL << VIRTIO_NET_F_MQ) | \
261 (1ULL << VIRTIO_F_VERSION_1) | \
262 (1ULL << VHOST_F_LOG_ALL) | \
263 (1ULL << VHOST_USER_F_PROTOCOL_FEATURES) | \
264 (1ULL << VIRTIO_NET_F_GSO) | \
265 (1ULL << VIRTIO_NET_F_HOST_TSO4) | \
266 (1ULL << VIRTIO_NET_F_HOST_TSO6) | \
267 (1ULL << VIRTIO_NET_F_HOST_UFO) | \
268 (1ULL << VIRTIO_NET_F_HOST_ECN) | \
269 (1ULL << VIRTIO_NET_F_CSUM) | \
270 (1ULL << VIRTIO_NET_F_GUEST_CSUM) | \
271 (1ULL << VIRTIO_NET_F_GUEST_TSO4) | \
272 (1ULL << VIRTIO_NET_F_GUEST_TSO6) | \
273 (1ULL << VIRTIO_NET_F_GUEST_UFO) | \
274 (1ULL << VIRTIO_NET_F_GUEST_ECN) | \
275 (1ULL << VIRTIO_RING_F_INDIRECT_DESC) | \
276 (1ULL << VIRTIO_RING_F_EVENT_IDX) | \
277 (1ULL << VIRTIO_NET_F_MTU) | \
278 (1ULL << VIRTIO_F_IN_ORDER) | \
279 (1ULL << VIRTIO_F_IOMMU_PLATFORM) | \
280 (1ULL << VIRTIO_F_RING_PACKED))
284 uint64_t guest_phys_addr
;
285 uint64_t host_phys_addr
;
290 * Device structure contains all configuration information relating
294 /* Frontend (QEMU) memory and memory region information */
295 struct rte_vhost_memory
*mem
;
297 uint64_t protocol_features
;
301 /* to tell if we need broadcast rarp packet */
302 rte_atomic16_t broadcast_rarp
;
304 int dequeue_zero_copy
;
305 struct vhost_virtqueue
*virtqueue
[VHOST_MAX_QUEUE_PAIRS
* 2];
306 #define IF_NAME_SZ (PATH_MAX > IFNAMSIZ ? PATH_MAX : IFNAMSIZ)
307 char ifname
[IF_NAME_SZ
];
311 struct ether_addr mac
;
314 struct vhost_device_ops
const *notify_ops
;
316 uint32_t nr_guest_pages
;
317 uint32_t max_guest_pages
;
318 struct guest_page
*guest_pages
;
321 rte_spinlock_t slave_req_lock
;
324 int postcopy_listening
;
327 * Device id to identify a specific backend device.
328 * It's set to -1 for the default software implementation.
332 /* context data for the external message handlers */
334 /* pre and post vhost user message handlers for the device */
335 struct rte_vhost_user_extern_ops extern_ops
;
336 } __rte_cache_aligned
;
338 static __rte_always_inline
bool
339 vq_is_packed(struct virtio_net
*dev
)
341 return dev
->features
& (1ull << VIRTIO_F_RING_PACKED
);
345 desc_is_avail(struct vring_packed_desc
*desc
, bool wrap_counter
)
347 uint16_t flags
= *((volatile uint16_t *) &desc
->flags
);
349 return wrap_counter
== !!(flags
& VRING_DESC_F_AVAIL
) &&
350 wrap_counter
!= !!(flags
& VRING_DESC_F_USED
);
353 #define VHOST_LOG_PAGE 4096
356 * Atomically set a bit in memory.
358 static __rte_always_inline
void
359 vhost_set_bit(unsigned int nr
, volatile uint8_t *addr
)
361 #if defined(RTE_TOOLCHAIN_GCC) && (GCC_VERSION < 70100)
363 * __sync_ built-ins are deprecated, but __atomic_ ones
364 * are sub-optimized in older GCC versions.
366 __sync_fetch_and_or_1(addr
, (1U << nr
));
368 __atomic_fetch_or(addr
, (1U << nr
), __ATOMIC_RELAXED
);
372 static __rte_always_inline
void
373 vhost_log_page(uint8_t *log_base
, uint64_t page
)
375 vhost_set_bit(page
% 8, &log_base
[page
/ 8]);
378 static __rte_always_inline
void
379 vhost_log_write(struct virtio_net
*dev
, uint64_t addr
, uint64_t len
)
383 if (likely(((dev
->features
& (1ULL << VHOST_F_LOG_ALL
)) == 0) ||
384 !dev
->log_base
|| !len
))
387 if (unlikely(dev
->log_size
<= ((addr
+ len
- 1) / VHOST_LOG_PAGE
/ 8)))
390 /* To make sure guest memory updates are committed before logging */
393 page
= addr
/ VHOST_LOG_PAGE
;
394 while (page
* VHOST_LOG_PAGE
< addr
+ len
) {
395 vhost_log_page((uint8_t *)(uintptr_t)dev
->log_base
, page
);
400 static __rte_always_inline
void
401 vhost_log_cache_sync(struct virtio_net
*dev
, struct vhost_virtqueue
*vq
)
403 unsigned long *log_base
;
406 if (likely(((dev
->features
& (1ULL << VHOST_F_LOG_ALL
)) == 0) ||
412 log_base
= (unsigned long *)(uintptr_t)dev
->log_base
;
414 for (i
= 0; i
< vq
->log_cache_nb_elem
; i
++) {
415 struct log_cache_entry
*elem
= vq
->log_cache
+ i
;
417 #if defined(RTE_TOOLCHAIN_GCC) && (GCC_VERSION < 70100)
419 * '__sync' builtins are deprecated, but '__atomic' ones
420 * are sub-optimized in older GCC versions.
422 __sync_fetch_and_or(log_base
+ elem
->offset
, elem
->val
);
424 __atomic_fetch_or(log_base
+ elem
->offset
, elem
->val
,
431 vq
->log_cache_nb_elem
= 0;
434 static __rte_always_inline
void
435 vhost_log_cache_page(struct virtio_net
*dev
, struct vhost_virtqueue
*vq
,
438 uint32_t bit_nr
= page
% (sizeof(unsigned long) << 3);
439 uint32_t offset
= page
/ (sizeof(unsigned long) << 3);
442 for (i
= 0; i
< vq
->log_cache_nb_elem
; i
++) {
443 struct log_cache_entry
*elem
= vq
->log_cache
+ i
;
445 if (elem
->offset
== offset
) {
446 elem
->val
|= (1UL << bit_nr
);
451 if (unlikely(i
>= VHOST_LOG_CACHE_NR
)) {
453 * No more room for a new log cache entry,
454 * so write the dirty log map directly.
457 vhost_log_page((uint8_t *)(uintptr_t)dev
->log_base
, page
);
462 vq
->log_cache
[i
].offset
= offset
;
463 vq
->log_cache
[i
].val
= (1UL << bit_nr
);
464 vq
->log_cache_nb_elem
++;
467 static __rte_always_inline
void
468 vhost_log_cache_write(struct virtio_net
*dev
, struct vhost_virtqueue
*vq
,
469 uint64_t addr
, uint64_t len
)
473 if (likely(((dev
->features
& (1ULL << VHOST_F_LOG_ALL
)) == 0) ||
474 !dev
->log_base
|| !len
))
477 if (unlikely(dev
->log_size
<= ((addr
+ len
- 1) / VHOST_LOG_PAGE
/ 8)))
480 page
= addr
/ VHOST_LOG_PAGE
;
481 while (page
* VHOST_LOG_PAGE
< addr
+ len
) {
482 vhost_log_cache_page(dev
, vq
, page
);
487 static __rte_always_inline
void
488 vhost_log_cache_used_vring(struct virtio_net
*dev
, struct vhost_virtqueue
*vq
,
489 uint64_t offset
, uint64_t len
)
491 vhost_log_cache_write(dev
, vq
, vq
->log_guest_addr
+ offset
, len
);
494 static __rte_always_inline
void
495 vhost_log_used_vring(struct virtio_net
*dev
, struct vhost_virtqueue
*vq
,
496 uint64_t offset
, uint64_t len
)
498 vhost_log_write(dev
, vq
->log_guest_addr
+ offset
, len
);
501 /* Macros for printing using RTE_LOG */
502 #define RTE_LOGTYPE_VHOST_CONFIG RTE_LOGTYPE_USER1
503 #define RTE_LOGTYPE_VHOST_DATA RTE_LOGTYPE_USER1
505 #ifdef RTE_LIBRTE_VHOST_DEBUG
506 #define VHOST_MAX_PRINT_BUFF 6072
507 #define VHOST_LOG_DEBUG(log_type, fmt, args...) \
508 RTE_LOG(DEBUG, log_type, fmt, ##args)
509 #define PRINT_PACKET(device, addr, size, header) do { \
510 char *pkt_addr = (char *)(addr); \
511 unsigned int index; \
512 char packet[VHOST_MAX_PRINT_BUFF]; \
515 snprintf(packet, VHOST_MAX_PRINT_BUFF, "(%d) Header size %d: ", (device->vid), (size)); \
517 snprintf(packet, VHOST_MAX_PRINT_BUFF, "(%d) Packet size %d: ", (device->vid), (size)); \
518 for (index = 0; index < (size); index++) { \
519 snprintf(packet + strnlen(packet, VHOST_MAX_PRINT_BUFF), VHOST_MAX_PRINT_BUFF - strnlen(packet, VHOST_MAX_PRINT_BUFF), \
520 "%02hhx ", pkt_addr[index]); \
522 snprintf(packet + strnlen(packet, VHOST_MAX_PRINT_BUFF), VHOST_MAX_PRINT_BUFF - strnlen(packet, VHOST_MAX_PRINT_BUFF), "\n"); \
524 VHOST_LOG_DEBUG(VHOST_DATA, "%s", packet); \
527 #define VHOST_LOG_DEBUG(log_type, fmt, args...) do {} while (0)
528 #define PRINT_PACKET(device, addr, size, header) do {} while (0)
531 extern uint64_t VHOST_FEATURES
;
532 #define MAX_VHOST_DEVICE 1024
533 extern struct virtio_net
*vhost_devices
[MAX_VHOST_DEVICE
];
535 /* Convert guest physical address to host physical address */
536 static __rte_always_inline rte_iova_t
537 gpa_to_hpa(struct virtio_net
*dev
, uint64_t gpa
, uint64_t size
)
540 struct guest_page
*page
;
542 for (i
= 0; i
< dev
->nr_guest_pages
; i
++) {
543 page
= &dev
->guest_pages
[i
];
545 if (gpa
>= page
->guest_phys_addr
&&
546 gpa
+ size
< page
->guest_phys_addr
+ page
->size
) {
547 return gpa
- page
->guest_phys_addr
+
548 page
->host_phys_addr
;
555 static __rte_always_inline
struct virtio_net
*
558 struct virtio_net
*dev
= vhost_devices
[vid
];
560 if (unlikely(!dev
)) {
561 RTE_LOG(ERR
, VHOST_CONFIG
,
562 "(%d) device not found.\n", vid
);
568 int vhost_new_device(void);
569 void cleanup_device(struct virtio_net
*dev
, int destroy
);
570 void reset_device(struct virtio_net
*dev
);
571 void vhost_destroy_device(int);
572 void vhost_destroy_device_notify(struct virtio_net
*dev
);
574 void cleanup_vq(struct vhost_virtqueue
*vq
, int destroy
);
575 void free_vq(struct virtio_net
*dev
, struct vhost_virtqueue
*vq
);
577 int alloc_vring_queue(struct virtio_net
*dev
, uint32_t vring_idx
);
579 void vhost_attach_vdpa_device(int vid
, int did
);
581 void vhost_set_ifname(int, const char *if_name
, unsigned int if_len
);
582 void vhost_enable_dequeue_zero_copy(int vid
);
583 void vhost_set_builtin_virtio_net(int vid
, bool enable
);
585 struct vhost_device_ops
const *vhost_driver_callback_get(const char *path
);
588 * Backend-specific cleanup.
590 * TODO: fix it; we have one backend now
592 void vhost_backend_cleanup(struct virtio_net
*dev
);
594 uint64_t __vhost_iova_to_vva(struct virtio_net
*dev
, struct vhost_virtqueue
*vq
,
595 uint64_t iova
, uint64_t *len
, uint8_t perm
);
596 int vring_translate(struct virtio_net
*dev
, struct vhost_virtqueue
*vq
);
597 void vring_invalidate(struct virtio_net
*dev
, struct vhost_virtqueue
*vq
);
599 static __rte_always_inline
uint64_t
600 vhost_iova_to_vva(struct virtio_net
*dev
, struct vhost_virtqueue
*vq
,
601 uint64_t iova
, uint64_t *len
, uint8_t perm
)
603 if (!(dev
->features
& (1ULL << VIRTIO_F_IOMMU_PLATFORM
)))
604 return rte_vhost_va_from_guest_pa(dev
->mem
, iova
, len
);
606 return __vhost_iova_to_vva(dev
, vq
, iova
, len
, perm
);
609 #define vhost_avail_event(vr) \
610 (*(volatile uint16_t*)&(vr)->used->ring[(vr)->size])
611 #define vhost_used_event(vr) \
612 (*(volatile uint16_t*)&(vr)->avail->ring[(vr)->size])
615 * The following is used with VIRTIO_RING_F_EVENT_IDX.
616 * Assuming a given event_idx value from the other size, if we have
617 * just incremented index from old to new_idx, should we trigger an
620 static __rte_always_inline
int
621 vhost_need_event(uint16_t event_idx
, uint16_t new_idx
, uint16_t old
)
623 return (uint16_t)(new_idx
- event_idx
- 1) < (uint16_t)(new_idx
- old
);
626 static __rte_always_inline
void
627 vhost_vring_call_split(struct virtio_net
*dev
, struct vhost_virtqueue
*vq
)
629 /* Flush used->idx update before we read avail->flags. */
632 /* Don't kick guest if we don't reach index specified by guest. */
633 if (dev
->features
& (1ULL << VIRTIO_RING_F_EVENT_IDX
)) {
634 uint16_t old
= vq
->signalled_used
;
635 uint16_t new = vq
->last_used_idx
;
636 bool signalled_used_valid
= vq
->signalled_used_valid
;
638 vq
->signalled_used
= new;
639 vq
->signalled_used_valid
= true;
641 VHOST_LOG_DEBUG(VHOST_DATA
, "%s: used_event_idx=%d, old=%d, new=%d\n",
643 vhost_used_event(vq
),
646 if ((vhost_need_event(vhost_used_event(vq
), new, old
) &&
647 (vq
->callfd
>= 0)) ||
648 unlikely(!signalled_used_valid
))
649 eventfd_write(vq
->callfd
, (eventfd_t
) 1);
651 /* Kick the guest if necessary. */
652 if (!(vq
->avail
->flags
& VRING_AVAIL_F_NO_INTERRUPT
)
653 && (vq
->callfd
>= 0))
654 eventfd_write(vq
->callfd
, (eventfd_t
)1);
658 static __rte_always_inline
void
659 vhost_vring_call_packed(struct virtio_net
*dev
, struct vhost_virtqueue
*vq
)
661 uint16_t old
, new, off
, off_wrap
;
662 bool signalled_used_valid
, kick
= false;
664 /* Flush used desc update. */
667 if (!(dev
->features
& (1ULL << VIRTIO_RING_F_EVENT_IDX
))) {
668 if (vq
->driver_event
->flags
!=
669 VRING_EVENT_F_DISABLE
)
674 old
= vq
->signalled_used
;
675 new = vq
->last_used_idx
;
676 vq
->signalled_used
= new;
677 signalled_used_valid
= vq
->signalled_used_valid
;
678 vq
->signalled_used_valid
= true;
680 if (vq
->driver_event
->flags
!= VRING_EVENT_F_DESC
) {
681 if (vq
->driver_event
->flags
!= VRING_EVENT_F_DISABLE
)
686 if (unlikely(!signalled_used_valid
)) {
693 off_wrap
= vq
->driver_event
->off_wrap
;
694 off
= off_wrap
& ~(1 << 15);
699 if (vq
->used_wrap_counter
!= off_wrap
>> 15)
702 if (vhost_need_event(off
, new, old
))
706 eventfd_write(vq
->callfd
, (eventfd_t
)1);
709 static __rte_always_inline
void *
710 alloc_copy_ind_table(struct virtio_net
*dev
, struct vhost_virtqueue
*vq
,
711 uint64_t desc_addr
, uint64_t desc_len
)
715 uint64_t len
, remain
= desc_len
;
717 idesc
= rte_malloc(__func__
, desc_len
, 0);
718 if (unlikely(!idesc
))
721 dst
= (uint64_t)(uintptr_t)idesc
;
725 src
= vhost_iova_to_vva(dev
, vq
, desc_addr
, &len
,
727 if (unlikely(!src
|| !len
)) {
732 rte_memcpy((void *)(uintptr_t)dst
, (void *)(uintptr_t)src
, len
);
742 static __rte_always_inline
void
743 free_ind_table(void *idesc
)
748 static __rte_always_inline
void
749 restore_mbuf(struct rte_mbuf
*m
)
751 uint32_t mbuf_size
, priv_size
;
754 priv_size
= rte_pktmbuf_priv_size(m
->pool
);
755 mbuf_size
= sizeof(struct rte_mbuf
) + priv_size
;
756 /* start of buffer is after mbuf structure and priv data */
758 m
->buf_addr
= (char *)m
+ mbuf_size
;
759 m
->buf_iova
= rte_mempool_virt2iova(m
) + mbuf_size
;
764 static __rte_always_inline
bool
765 mbuf_is_consumed(struct rte_mbuf
*m
)
768 if (rte_mbuf_refcnt_read(m
) > 1)
776 static __rte_always_inline
void
777 put_zmbuf(struct zcopy_mbuf
*zmbuf
)
782 #endif /* _VHOST_NET_CDEV_H_ */