2 * Helpers for the host side of a virtio ring.
4 * Since these may be in userspace, we use (inline) accessors.
6 #include <linux/vringh.h>
7 #include <linux/virtio_ring.h>
8 #include <linux/kernel.h>
9 #include <linux/ratelimit.h>
10 #include <linux/uaccess.h>
11 #include <linux/slab.h>
12 #include <linux/export.h>
14 static __printf(1,2) __cold
void vringh_bad(const char *fmt
, ...)
16 static DEFINE_RATELIMIT_STATE(vringh_rs
,
17 DEFAULT_RATELIMIT_INTERVAL
,
18 DEFAULT_RATELIMIT_BURST
);
19 if (__ratelimit(&vringh_rs
)) {
22 printk(KERN_NOTICE
"vringh:");
28 /* Returns vring->num if empty, -ve on error. */
29 static inline int __vringh_get_head(const struct vringh
*vrh
,
30 int (*getu16
)(u16
*val
, const u16
*p
),
33 u16 avail_idx
, i
, head
;
36 err
= getu16(&avail_idx
, &vrh
->vring
.avail
->idx
);
38 vringh_bad("Failed to access avail idx at %p",
39 &vrh
->vring
.avail
->idx
);
43 if (*last_avail_idx
== avail_idx
)
44 return vrh
->vring
.num
;
46 /* Only get avail ring entries after they have been exposed by guest. */
47 virtio_rmb(vrh
->weak_barriers
);
49 i
= *last_avail_idx
& (vrh
->vring
.num
- 1);
51 err
= getu16(&head
, &vrh
->vring
.avail
->ring
[i
]);
53 vringh_bad("Failed to read head: idx %d address %p",
54 *last_avail_idx
, &vrh
->vring
.avail
->ring
[i
]);
58 if (head
>= vrh
->vring
.num
) {
59 vringh_bad("Guest says index %u > %u is available",
60 head
, vrh
->vring
.num
);
68 /* Copy some bytes to/from the iovec. Returns num copied. */
69 static inline ssize_t
vringh_iov_xfer(struct vringh_kiov
*iov
,
70 void *ptr
, size_t len
,
71 int (*xfer
)(void *addr
, void *ptr
,
76 while (len
&& iov
->i
< iov
->used
) {
79 partlen
= min(iov
->iov
[iov
->i
].iov_len
, len
);
80 err
= xfer(iov
->iov
[iov
->i
].iov_base
, ptr
, partlen
);
86 iov
->consumed
+= partlen
;
87 iov
->iov
[iov
->i
].iov_len
-= partlen
;
88 iov
->iov
[iov
->i
].iov_base
+= partlen
;
90 if (!iov
->iov
[iov
->i
].iov_len
) {
91 /* Fix up old iov element then increment. */
92 iov
->iov
[iov
->i
].iov_len
= iov
->consumed
;
93 iov
->iov
[iov
->i
].iov_base
-= iov
->consumed
;
102 /* May reduce *len if range is shorter. */
103 static inline bool range_check(struct vringh
*vrh
, u64 addr
, size_t *len
,
104 struct vringh_range
*range
,
105 bool (*getrange
)(struct vringh
*,
106 u64
, struct vringh_range
*))
108 if (addr
< range
->start
|| addr
> range
->end_incl
) {
109 if (!getrange(vrh
, addr
, range
))
112 BUG_ON(addr
< range
->start
|| addr
> range
->end_incl
);
114 /* To end of memory? */
115 if (unlikely(addr
+ *len
== 0)) {
116 if (range
->end_incl
== -1ULL)
121 /* Otherwise, don't wrap. */
122 if (addr
+ *len
< addr
) {
123 vringh_bad("Wrapping descriptor %zu@0x%llx",
124 *len
, (unsigned long long)addr
);
128 if (unlikely(addr
+ *len
- 1 > range
->end_incl
))
133 *len
= range
->end_incl
+ 1 - addr
;
137 static inline bool no_range_check(struct vringh
*vrh
, u64 addr
, size_t *len
,
138 struct vringh_range
*range
,
139 bool (*getrange
)(struct vringh
*,
140 u64
, struct vringh_range
*))
145 /* No reason for this code to be inline. */
146 static int move_to_indirect(int *up_next
, u16
*i
, void *addr
,
147 const struct vring_desc
*desc
,
148 struct vring_desc
**descs
, int *desc_max
)
150 /* Indirect tables can't have indirect. */
151 if (*up_next
!= -1) {
152 vringh_bad("Multilevel indirect %u->%u", *up_next
, *i
);
156 if (unlikely(desc
->len
% sizeof(struct vring_desc
))) {
157 vringh_bad("Strange indirect len %u", desc
->len
);
161 /* We will check this when we follow it! */
162 if (desc
->flags
& VRING_DESC_F_NEXT
)
163 *up_next
= desc
->next
;
167 *desc_max
= desc
->len
/ sizeof(struct vring_desc
);
169 /* Now, start at the first indirect. */
174 static int resize_iovec(struct vringh_kiov
*iov
, gfp_t gfp
)
177 unsigned int flag
, new_num
= (iov
->max_num
& ~VRINGH_IOV_ALLOCATED
) * 2;
182 flag
= (iov
->max_num
& VRINGH_IOV_ALLOCATED
);
184 new = krealloc(iov
->iov
, new_num
* sizeof(struct iovec
), gfp
);
186 new = kmalloc(new_num
* sizeof(struct iovec
), gfp
);
188 memcpy(new, iov
->iov
,
189 iov
->max_num
* sizeof(struct iovec
));
190 flag
= VRINGH_IOV_ALLOCATED
;
196 iov
->max_num
= (new_num
| flag
);
200 static u16 __cold
return_from_indirect(const struct vringh
*vrh
, int *up_next
,
201 struct vring_desc
**descs
, int *desc_max
)
206 *descs
= vrh
->vring
.desc
;
207 *desc_max
= vrh
->vring
.num
;
211 static int slow_copy(struct vringh
*vrh
, void *dst
, const void *src
,
212 bool (*rcheck
)(struct vringh
*vrh
, u64 addr
, size_t *len
,
213 struct vringh_range
*range
,
214 bool (*getrange
)(struct vringh
*vrh
,
216 struct vringh_range
*)),
217 bool (*getrange
)(struct vringh
*vrh
,
219 struct vringh_range
*r
),
220 struct vringh_range
*range
,
221 int (*copy
)(void *dst
, const void *src
, size_t len
))
223 size_t part
, len
= sizeof(struct vring_desc
);
230 addr
= (u64
)(unsigned long)src
- range
->offset
;
232 if (!rcheck(vrh
, addr
, &part
, range
, getrange
))
235 err
= copy(dst
, src
, part
);
247 __vringh_iov(struct vringh
*vrh
, u16 i
,
248 struct vringh_kiov
*riov
,
249 struct vringh_kiov
*wiov
,
250 bool (*rcheck
)(struct vringh
*vrh
, u64 addr
, size_t *len
,
251 struct vringh_range
*range
,
252 bool (*getrange
)(struct vringh
*, u64
,
253 struct vringh_range
*)),
254 bool (*getrange
)(struct vringh
*, u64
, struct vringh_range
*),
256 int (*copy
)(void *dst
, const void *src
, size_t len
))
258 int err
, count
= 0, up_next
, desc_max
;
259 struct vring_desc desc
, *descs
;
260 struct vringh_range range
= { -1ULL, 0 }, slowrange
;
263 /* We start traversing vring's descriptor table. */
264 descs
= vrh
->vring
.desc
;
265 desc_max
= vrh
->vring
.num
;
269 riov
->i
= riov
->used
= 0;
271 wiov
->i
= wiov
->used
= 0;
273 /* You must want something! */
278 struct vringh_kiov
*iov
;
282 err
= slow_copy(vrh
, &desc
, &descs
[i
], rcheck
, getrange
,
285 err
= copy(&desc
, &descs
[i
], sizeof(desc
));
289 if (unlikely(desc
.flags
& VRING_DESC_F_INDIRECT
)) {
290 /* Make sure it's OK, and get offset. */
292 if (!rcheck(vrh
, desc
.addr
, &len
, &range
, getrange
)) {
297 if (unlikely(len
!= desc
.len
)) {
299 /* We need to save this range to use offset */
303 addr
= (void *)(long)(desc
.addr
+ range
.offset
);
304 err
= move_to_indirect(&up_next
, &i
, addr
, &desc
,
311 if (count
++ == vrh
->vring
.num
) {
312 vringh_bad("Descriptor loop in %p", descs
);
317 if (desc
.flags
& VRING_DESC_F_WRITE
)
321 if (unlikely(wiov
&& wiov
->i
)) {
322 vringh_bad("Readable desc %p after writable",
330 vringh_bad("Unexpected %s desc",
331 !wiov
? "writable" : "readable");
337 /* Make sure it's OK, and get offset. */
339 if (!rcheck(vrh
, desc
.addr
, &len
, &range
, getrange
)) {
343 addr
= (void *)(unsigned long)(desc
.addr
+ range
.offset
);
345 if (unlikely(iov
->used
== (iov
->max_num
& ~VRINGH_IOV_ALLOCATED
))) {
346 err
= resize_iovec(iov
, gfp
);
351 iov
->iov
[iov
->used
].iov_base
= addr
;
352 iov
->iov
[iov
->used
].iov_len
= len
;
355 if (unlikely(len
!= desc
.len
)) {
361 if (desc
.flags
& VRING_DESC_F_NEXT
) {
364 /* Just in case we need to finish traversing above. */
365 if (unlikely(up_next
> 0)) {
366 i
= return_from_indirect(vrh
, &up_next
,
374 vringh_bad("Chained index %u > %u", i
, desc_max
);
386 static inline int __vringh_complete(struct vringh
*vrh
,
387 const struct vring_used_elem
*used
,
388 unsigned int num_used
,
389 int (*putu16
)(u16
*p
, u16 val
),
390 int (*putused
)(struct vring_used_elem
*dst
,
391 const struct vring_used_elem
394 struct vring_used
*used_ring
;
398 used_ring
= vrh
->vring
.used
;
399 used_idx
= vrh
->last_used_idx
+ vrh
->completed
;
401 off
= used_idx
% vrh
->vring
.num
;
403 /* Compiler knows num_used == 1 sometimes, hence extra check */
404 if (num_used
> 1 && unlikely(off
+ num_used
>= vrh
->vring
.num
)) {
405 u16 part
= vrh
->vring
.num
- off
;
406 err
= putused(&used_ring
->ring
[off
], used
, part
);
408 err
= putused(&used_ring
->ring
[0], used
+ part
,
411 err
= putused(&used_ring
->ring
[off
], used
, num_used
);
414 vringh_bad("Failed to write %u used entries %u at %p",
415 num_used
, off
, &used_ring
->ring
[off
]);
419 /* Make sure buffer is written before we update index. */
420 virtio_wmb(vrh
->weak_barriers
);
422 err
= putu16(&vrh
->vring
.used
->idx
, used_idx
+ num_used
);
424 vringh_bad("Failed to update used index at %p",
425 &vrh
->vring
.used
->idx
);
429 vrh
->completed
+= num_used
;
434 static inline int __vringh_need_notify(struct vringh
*vrh
,
435 int (*getu16
)(u16
*val
, const u16
*p
))
441 /* Flush out used index update. This is paired with the
442 * barrier that the Guest executes when enabling
444 virtio_mb(vrh
->weak_barriers
);
446 /* Old-style, without event indices. */
447 if (!vrh
->event_indices
) {
449 err
= getu16(&flags
, &vrh
->vring
.avail
->flags
);
451 vringh_bad("Failed to get flags at %p",
452 &vrh
->vring
.avail
->flags
);
455 return (!(flags
& VRING_AVAIL_F_NO_INTERRUPT
));
458 /* Modern: we know when other side wants to know. */
459 err
= getu16(&used_event
, &vring_used_event(&vrh
->vring
));
461 vringh_bad("Failed to get used event idx at %p",
462 &vring_used_event(&vrh
->vring
));
466 /* Just in case we added so many that we wrap. */
467 if (unlikely(vrh
->completed
> 0xffff))
470 notify
= vring_need_event(used_event
,
471 vrh
->last_used_idx
+ vrh
->completed
,
474 vrh
->last_used_idx
+= vrh
->completed
;
479 static inline bool __vringh_notify_enable(struct vringh
*vrh
,
480 int (*getu16
)(u16
*val
, const u16
*p
),
481 int (*putu16
)(u16
*p
, u16 val
))
485 if (!vrh
->event_indices
) {
486 /* Old-school; update flags. */
487 if (putu16(&vrh
->vring
.used
->flags
, 0) != 0) {
488 vringh_bad("Clearing used flags %p",
489 &vrh
->vring
.used
->flags
);
493 if (putu16(&vring_avail_event(&vrh
->vring
),
494 vrh
->last_avail_idx
) != 0) {
495 vringh_bad("Updating avail event index %p",
496 &vring_avail_event(&vrh
->vring
));
501 /* They could have slipped one in as we were doing that: make
502 * sure it's written, then check again. */
503 virtio_mb(vrh
->weak_barriers
);
505 if (getu16(&avail
, &vrh
->vring
.avail
->idx
) != 0) {
506 vringh_bad("Failed to check avail idx at %p",
507 &vrh
->vring
.avail
->idx
);
511 /* This is unlikely, so we just leave notifications enabled
512 * (if we're using event_indices, we'll only get one
513 * notification anyway). */
514 return avail
== vrh
->last_avail_idx
;
517 static inline void __vringh_notify_disable(struct vringh
*vrh
,
518 int (*putu16
)(u16
*p
, u16 val
))
520 if (!vrh
->event_indices
) {
521 /* Old-school; update flags. */
522 if (putu16(&vrh
->vring
.used
->flags
, VRING_USED_F_NO_NOTIFY
)) {
523 vringh_bad("Setting used flags %p",
524 &vrh
->vring
.used
->flags
);
529 /* Userspace access helpers: in this case, addresses are really userspace. */
530 static inline int getu16_user(u16
*val
, const u16
*p
)
532 return get_user(*val
, (__force u16 __user
*)p
);
535 static inline int putu16_user(u16
*p
, u16 val
)
537 return put_user(val
, (__force u16 __user
*)p
);
540 static inline int copydesc_user(void *dst
, const void *src
, size_t len
)
542 return copy_from_user(dst
, (__force
void __user
*)src
, len
) ?
546 static inline int putused_user(struct vring_used_elem
*dst
,
547 const struct vring_used_elem
*src
,
550 return copy_to_user((__force
void __user
*)dst
, src
,
551 sizeof(*dst
) * num
) ? -EFAULT
: 0;
554 static inline int xfer_from_user(void *src
, void *dst
, size_t len
)
556 return copy_from_user(dst
, (__force
void __user
*)src
, len
) ?
560 static inline int xfer_to_user(void *dst
, void *src
, size_t len
)
562 return copy_to_user((__force
void __user
*)dst
, src
, len
) ?
567 * vringh_init_user - initialize a vringh for a userspace vring.
568 * @vrh: the vringh to initialize.
569 * @features: the feature bits for this ring.
570 * @num: the number of elements.
571 * @weak_barriers: true if we only need memory barriers, not I/O.
572 * @desc: the userpace descriptor pointer.
573 * @avail: the userpace avail pointer.
574 * @used: the userpace used pointer.
576 * Returns an error if num is invalid: you should check pointers
579 int vringh_init_user(struct vringh
*vrh
, u32 features
,
580 unsigned int num
, bool weak_barriers
,
581 struct vring_desc __user
*desc
,
582 struct vring_avail __user
*avail
,
583 struct vring_used __user
*used
)
585 /* Sane power of 2 please! */
586 if (!num
|| num
> 0xffff || (num
& (num
- 1))) {
587 vringh_bad("Bad ring size %u", num
);
591 vrh
->event_indices
= (features
& (1 << VIRTIO_RING_F_EVENT_IDX
));
592 vrh
->weak_barriers
= weak_barriers
;
594 vrh
->last_avail_idx
= 0;
595 vrh
->last_used_idx
= 0;
596 vrh
->vring
.num
= num
;
597 /* vring expects kernel addresses, but only used via accessors. */
598 vrh
->vring
.desc
= (__force
struct vring_desc
*)desc
;
599 vrh
->vring
.avail
= (__force
struct vring_avail
*)avail
;
600 vrh
->vring
.used
= (__force
struct vring_used
*)used
;
603 EXPORT_SYMBOL(vringh_init_user
);
606 * vringh_getdesc_user - get next available descriptor from userspace ring.
607 * @vrh: the userspace vring.
608 * @riov: where to put the readable descriptors (or NULL)
609 * @wiov: where to put the writable descriptors (or NULL)
610 * @getrange: function to call to check ranges.
611 * @head: head index we received, for passing to vringh_complete_user().
613 * Returns 0 if there was no descriptor, 1 if there was, or -errno.
615 * Note that on error return, you can tell the difference between an
616 * invalid ring and a single invalid descriptor: in the former case,
617 * *head will be vrh->vring.num. You may be able to ignore an invalid
618 * descriptor, but there's not much you can do with an invalid ring.
620 * Note that you may need to clean up riov and wiov, even on error!
622 int vringh_getdesc_user(struct vringh
*vrh
,
623 struct vringh_iov
*riov
,
624 struct vringh_iov
*wiov
,
625 bool (*getrange
)(struct vringh
*vrh
,
626 u64 addr
, struct vringh_range
*r
),
631 *head
= vrh
->vring
.num
;
632 err
= __vringh_get_head(vrh
, getu16_user
, &vrh
->last_avail_idx
);
637 if (err
== vrh
->vring
.num
)
640 /* We need the layouts to be the identical for this to work */
641 BUILD_BUG_ON(sizeof(struct vringh_kiov
) != sizeof(struct vringh_iov
));
642 BUILD_BUG_ON(offsetof(struct vringh_kiov
, iov
) !=
643 offsetof(struct vringh_iov
, iov
));
644 BUILD_BUG_ON(offsetof(struct vringh_kiov
, i
) !=
645 offsetof(struct vringh_iov
, i
));
646 BUILD_BUG_ON(offsetof(struct vringh_kiov
, used
) !=
647 offsetof(struct vringh_iov
, used
));
648 BUILD_BUG_ON(offsetof(struct vringh_kiov
, max_num
) !=
649 offsetof(struct vringh_iov
, max_num
));
650 BUILD_BUG_ON(sizeof(struct iovec
) != sizeof(struct kvec
));
651 BUILD_BUG_ON(offsetof(struct iovec
, iov_base
) !=
652 offsetof(struct kvec
, iov_base
));
653 BUILD_BUG_ON(offsetof(struct iovec
, iov_len
) !=
654 offsetof(struct kvec
, iov_len
));
655 BUILD_BUG_ON(sizeof(((struct iovec
*)NULL
)->iov_base
)
656 != sizeof(((struct kvec
*)NULL
)->iov_base
));
657 BUILD_BUG_ON(sizeof(((struct iovec
*)NULL
)->iov_len
)
658 != sizeof(((struct kvec
*)NULL
)->iov_len
));
661 err
= __vringh_iov(vrh
, *head
, (struct vringh_kiov
*)riov
,
662 (struct vringh_kiov
*)wiov
,
663 range_check
, getrange
, GFP_KERNEL
, copydesc_user
);
669 EXPORT_SYMBOL(vringh_getdesc_user
);
672 * vringh_iov_pull_user - copy bytes from vring_iov.
673 * @riov: the riov as passed to vringh_getdesc_user() (updated as we consume)
674 * @dst: the place to copy.
675 * @len: the maximum length to copy.
677 * Returns the bytes copied <= len or a negative errno.
679 ssize_t
vringh_iov_pull_user(struct vringh_iov
*riov
, void *dst
, size_t len
)
681 return vringh_iov_xfer((struct vringh_kiov
*)riov
,
682 dst
, len
, xfer_from_user
);
684 EXPORT_SYMBOL(vringh_iov_pull_user
);
687 * vringh_iov_push_user - copy bytes into vring_iov.
688 * @wiov: the wiov as passed to vringh_getdesc_user() (updated as we consume)
689 * @dst: the place to copy.
690 * @len: the maximum length to copy.
692 * Returns the bytes copied <= len or a negative errno.
694 ssize_t
vringh_iov_push_user(struct vringh_iov
*wiov
,
695 const void *src
, size_t len
)
697 return vringh_iov_xfer((struct vringh_kiov
*)wiov
,
698 (void *)src
, len
, xfer_to_user
);
700 EXPORT_SYMBOL(vringh_iov_push_user
);
703 * vringh_abandon_user - we've decided not to handle the descriptor(s).
705 * @num: the number of descriptors to put back (ie. num
706 * vringh_get_user() to undo).
708 * The next vringh_get_user() will return the old descriptor(s) again.
710 void vringh_abandon_user(struct vringh
*vrh
, unsigned int num
)
712 /* We only update vring_avail_event(vr) when we want to be notified,
713 * so we haven't changed that yet. */
714 vrh
->last_avail_idx
-= num
;
716 EXPORT_SYMBOL(vringh_abandon_user
);
719 * vringh_complete_user - we've finished with descriptor, publish it.
721 * @head: the head as filled in by vringh_getdesc_user.
722 * @len: the length of data we have written.
724 * You should check vringh_need_notify_user() after one or more calls
727 int vringh_complete_user(struct vringh
*vrh
, u16 head
, u32 len
)
729 struct vring_used_elem used
;
733 return __vringh_complete(vrh
, &used
, 1, putu16_user
, putused_user
);
735 EXPORT_SYMBOL(vringh_complete_user
);
738 * vringh_complete_multi_user - we've finished with many descriptors.
740 * @used: the head, length pairs.
741 * @num_used: the number of used elements.
743 * You should check vringh_need_notify_user() after one or more calls
746 int vringh_complete_multi_user(struct vringh
*vrh
,
747 const struct vring_used_elem used
[],
750 return __vringh_complete(vrh
, used
, num_used
,
751 putu16_user
, putused_user
);
753 EXPORT_SYMBOL(vringh_complete_multi_user
);
756 * vringh_notify_enable_user - we want to know if something changes.
759 * This always enables notifications, but returns false if there are
760 * now more buffers available in the vring.
762 bool vringh_notify_enable_user(struct vringh
*vrh
)
764 return __vringh_notify_enable(vrh
, getu16_user
, putu16_user
);
766 EXPORT_SYMBOL(vringh_notify_enable_user
);
769 * vringh_notify_disable_user - don't tell us if something changes.
772 * This is our normal running state: we disable and then only enable when
773 * we're going to sleep.
775 void vringh_notify_disable_user(struct vringh
*vrh
)
777 __vringh_notify_disable(vrh
, putu16_user
);
779 EXPORT_SYMBOL(vringh_notify_disable_user
);
782 * vringh_need_notify_user - must we tell the other side about used buffers?
783 * @vrh: the vring we've called vringh_complete_user() on.
785 * Returns -errno or 0 if we don't need to tell the other side, 1 if we do.
787 int vringh_need_notify_user(struct vringh
*vrh
)
789 return __vringh_need_notify(vrh
, getu16_user
);
791 EXPORT_SYMBOL(vringh_need_notify_user
);
793 /* Kernelspace access helpers. */
794 static inline int getu16_kern(u16
*val
, const u16
*p
)
796 *val
= ACCESS_ONCE(*p
);
800 static inline int putu16_kern(u16
*p
, u16 val
)
802 ACCESS_ONCE(*p
) = val
;
806 static inline int copydesc_kern(void *dst
, const void *src
, size_t len
)
808 memcpy(dst
, src
, len
);
812 static inline int putused_kern(struct vring_used_elem
*dst
,
813 const struct vring_used_elem
*src
,
816 memcpy(dst
, src
, num
* sizeof(*dst
));
820 static inline int xfer_kern(void *src
, void *dst
, size_t len
)
822 memcpy(dst
, src
, len
);
827 * vringh_init_kern - initialize a vringh for a kernelspace vring.
828 * @vrh: the vringh to initialize.
829 * @features: the feature bits for this ring.
830 * @num: the number of elements.
831 * @weak_barriers: true if we only need memory barriers, not I/O.
832 * @desc: the userpace descriptor pointer.
833 * @avail: the userpace avail pointer.
834 * @used: the userpace used pointer.
836 * Returns an error if num is invalid.
838 int vringh_init_kern(struct vringh
*vrh
, u32 features
,
839 unsigned int num
, bool weak_barriers
,
840 struct vring_desc
*desc
,
841 struct vring_avail
*avail
,
842 struct vring_used
*used
)
844 /* Sane power of 2 please! */
845 if (!num
|| num
> 0xffff || (num
& (num
- 1))) {
846 vringh_bad("Bad ring size %u", num
);
850 vrh
->event_indices
= (features
& (1 << VIRTIO_RING_F_EVENT_IDX
));
851 vrh
->weak_barriers
= weak_barriers
;
853 vrh
->last_avail_idx
= 0;
854 vrh
->last_used_idx
= 0;
855 vrh
->vring
.num
= num
;
856 vrh
->vring
.desc
= desc
;
857 vrh
->vring
.avail
= avail
;
858 vrh
->vring
.used
= used
;
861 EXPORT_SYMBOL(vringh_init_kern
);
864 * vringh_getdesc_kern - get next available descriptor from kernelspace ring.
865 * @vrh: the kernelspace vring.
866 * @riov: where to put the readable descriptors (or NULL)
867 * @wiov: where to put the writable descriptors (or NULL)
868 * @head: head index we received, for passing to vringh_complete_kern().
869 * @gfp: flags for allocating larger riov/wiov.
871 * Returns 0 if there was no descriptor, 1 if there was, or -errno.
873 * Note that on error return, you can tell the difference between an
874 * invalid ring and a single invalid descriptor: in the former case,
875 * *head will be vrh->vring.num. You may be able to ignore an invalid
876 * descriptor, but there's not much you can do with an invalid ring.
878 * Note that you may need to clean up riov and wiov, even on error!
880 int vringh_getdesc_kern(struct vringh
*vrh
,
881 struct vringh_kiov
*riov
,
882 struct vringh_kiov
*wiov
,
888 err
= __vringh_get_head(vrh
, getu16_kern
, &vrh
->last_avail_idx
);
893 if (err
== vrh
->vring
.num
)
897 err
= __vringh_iov(vrh
, *head
, riov
, wiov
, no_range_check
, NULL
,
904 EXPORT_SYMBOL(vringh_getdesc_kern
);
907 * vringh_iov_pull_kern - copy bytes from vring_iov.
908 * @riov: the riov as passed to vringh_getdesc_kern() (updated as we consume)
909 * @dst: the place to copy.
910 * @len: the maximum length to copy.
912 * Returns the bytes copied <= len or a negative errno.
914 ssize_t
vringh_iov_pull_kern(struct vringh_kiov
*riov
, void *dst
, size_t len
)
916 return vringh_iov_xfer(riov
, dst
, len
, xfer_kern
);
918 EXPORT_SYMBOL(vringh_iov_pull_kern
);
921 * vringh_iov_push_kern - copy bytes into vring_iov.
922 * @wiov: the wiov as passed to vringh_getdesc_kern() (updated as we consume)
923 * @dst: the place to copy.
924 * @len: the maximum length to copy.
926 * Returns the bytes copied <= len or a negative errno.
928 ssize_t
vringh_iov_push_kern(struct vringh_kiov
*wiov
,
929 const void *src
, size_t len
)
931 return vringh_iov_xfer(wiov
, (void *)src
, len
, xfer_kern
);
933 EXPORT_SYMBOL(vringh_iov_push_kern
);
936 * vringh_abandon_kern - we've decided not to handle the descriptor(s).
938 * @num: the number of descriptors to put back (ie. num
939 * vringh_get_kern() to undo).
941 * The next vringh_get_kern() will return the old descriptor(s) again.
943 void vringh_abandon_kern(struct vringh
*vrh
, unsigned int num
)
945 /* We only update vring_avail_event(vr) when we want to be notified,
946 * so we haven't changed that yet. */
947 vrh
->last_avail_idx
-= num
;
949 EXPORT_SYMBOL(vringh_abandon_kern
);
952 * vringh_complete_kern - we've finished with descriptor, publish it.
954 * @head: the head as filled in by vringh_getdesc_kern.
955 * @len: the length of data we have written.
957 * You should check vringh_need_notify_kern() after one or more calls
960 int vringh_complete_kern(struct vringh
*vrh
, u16 head
, u32 len
)
962 struct vring_used_elem used
;
967 return __vringh_complete(vrh
, &used
, 1, putu16_kern
, putused_kern
);
969 EXPORT_SYMBOL(vringh_complete_kern
);
972 * vringh_notify_enable_kern - we want to know if something changes.
975 * This always enables notifications, but returns false if there are
976 * now more buffers available in the vring.
978 bool vringh_notify_enable_kern(struct vringh
*vrh
)
980 return __vringh_notify_enable(vrh
, getu16_kern
, putu16_kern
);
982 EXPORT_SYMBOL(vringh_notify_enable_kern
);
985 * vringh_notify_disable_kern - don't tell us if something changes.
988 * This is our normal running state: we disable and then only enable when
989 * we're going to sleep.
991 void vringh_notify_disable_kern(struct vringh
*vrh
)
993 __vringh_notify_disable(vrh
, putu16_kern
);
995 EXPORT_SYMBOL(vringh_notify_disable_kern
);
998 * vringh_need_notify_kern - must we tell the other side about used buffers?
999 * @vrh: the vring we've called vringh_complete_kern() on.
1001 * Returns -errno or 0 if we don't need to tell the other side, 1 if we do.
1003 int vringh_need_notify_kern(struct vringh
*vrh
)
1005 return __vringh_need_notify(vrh
, getu16_kern
);
1007 EXPORT_SYMBOL(vringh_need_notify_kern
);