]>
Commit | Line | Data |
---|---|---|
21b4aa5d JA |
1 | #ifndef LIB_URING_H |
2 | #define LIB_URING_H | |
3 | ||
004d564f JA |
4 | #ifdef __cplusplus |
5 | extern "C" { | |
6 | #endif | |
7 | ||
21b4aa5d JA |
8 | #include <sys/uio.h> |
9 | #include <signal.h> | |
10 | #include <string.h> | |
11 | #include "../../include/uapi/linux/io_uring.h" | |
004d564f JA |
12 | #include <inttypes.h> |
13 | #include "barrier.h" | |
21b4aa5d JA |
14 | |
15 | /* | |
16 | * Library interface to io_uring | |
17 | */ | |
18 | struct io_uring_sq { | |
19 | unsigned *khead; | |
20 | unsigned *ktail; | |
21 | unsigned *kring_mask; | |
22 | unsigned *kring_entries; | |
23 | unsigned *kflags; | |
24 | unsigned *kdropped; | |
25 | unsigned *array; | |
26 | struct io_uring_sqe *sqes; | |
27 | ||
28 | unsigned sqe_head; | |
29 | unsigned sqe_tail; | |
30 | ||
31 | size_t ring_sz; | |
32 | }; | |
33 | ||
34 | struct io_uring_cq { | |
35 | unsigned *khead; | |
36 | unsigned *ktail; | |
37 | unsigned *kring_mask; | |
38 | unsigned *kring_entries; | |
39 | unsigned *koverflow; | |
40 | struct io_uring_cqe *cqes; | |
41 | ||
42 | size_t ring_sz; | |
43 | }; | |
44 | ||
45 | struct io_uring { | |
46 | struct io_uring_sq sq; | |
47 | struct io_uring_cq cq; | |
48 | int ring_fd; | |
49 | }; | |
50 | ||
51 | /* | |
52 | * System calls | |
53 | */ | |
54 | extern int io_uring_setup(unsigned entries, struct io_uring_params *p); | |
004d564f | 55 | extern int io_uring_enter(int fd, unsigned to_submit, |
21b4aa5d JA |
56 | unsigned min_complete, unsigned flags, sigset_t *sig); |
57 | extern int io_uring_register(int fd, unsigned int opcode, void *arg, | |
58 | unsigned int nr_args); | |
59 | ||
60 | /* | |
61 | * Library interface | |
62 | */ | |
63 | extern int io_uring_queue_init(unsigned entries, struct io_uring *ring, | |
64 | unsigned flags); | |
65 | extern int io_uring_queue_mmap(int fd, struct io_uring_params *p, | |
66 | struct io_uring *ring); | |
67 | extern void io_uring_queue_exit(struct io_uring *ring); | |
004d564f | 68 | extern int io_uring_peek_cqe(struct io_uring *ring, |
21b4aa5d | 69 | struct io_uring_cqe **cqe_ptr); |
004d564f | 70 | extern int io_uring_wait_cqe(struct io_uring *ring, |
21b4aa5d JA |
71 | struct io_uring_cqe **cqe_ptr); |
72 | extern int io_uring_submit(struct io_uring *ring); | |
73 | extern struct io_uring_sqe *io_uring_get_sqe(struct io_uring *ring); | |
74 | ||
004d564f JA |
75 | /* |
76 | * Must be called after io_uring_{peek,wait}_cqe() after the cqe has | |
77 | * been processed by the application. | |
78 | */ | |
79 | static inline void io_uring_cqe_seen(struct io_uring *ring, | |
80 | struct io_uring_cqe *cqe) | |
81 | { | |
82 | if (cqe) { | |
83 | struct io_uring_cq *cq = &ring->cq; | |
84 | ||
85 | (*cq->khead)++; | |
86 | /* | |
87 | * Ensure that the kernel sees our new head, the kernel has | |
88 | * the matching read barrier. | |
89 | */ | |
90 | write_barrier(); | |
91 | } | |
92 | } | |
93 | ||
21b4aa5d JA |
94 | /* |
95 | * Command prep helpers | |
96 | */ | |
97 | static inline void io_uring_sqe_set_data(struct io_uring_sqe *sqe, void *data) | |
98 | { | |
99 | sqe->user_data = (unsigned long) data; | |
100 | } | |
101 | ||
004d564f JA |
102 | static inline void *io_uring_cqe_get_data(struct io_uring_cqe *cqe) |
103 | { | |
104 | return (void *) (uintptr_t) cqe->user_data; | |
105 | } | |
106 | ||
21b4aa5d | 107 | static inline void io_uring_prep_rw(int op, struct io_uring_sqe *sqe, int fd, |
004d564f JA |
108 | const void *addr, unsigned len, |
109 | off_t offset) | |
21b4aa5d JA |
110 | { |
111 | memset(sqe, 0, sizeof(*sqe)); | |
112 | sqe->opcode = op; | |
113 | sqe->fd = fd; | |
114 | sqe->off = offset; | |
115 | sqe->addr = (unsigned long) addr; | |
116 | sqe->len = len; | |
117 | } | |
118 | ||
119 | static inline void io_uring_prep_readv(struct io_uring_sqe *sqe, int fd, | |
004d564f JA |
120 | const struct iovec *iovecs, |
121 | unsigned nr_vecs, off_t offset) | |
21b4aa5d JA |
122 | { |
123 | io_uring_prep_rw(IORING_OP_READV, sqe, fd, iovecs, nr_vecs, offset); | |
124 | } | |
125 | ||
126 | static inline void io_uring_prep_read_fixed(struct io_uring_sqe *sqe, int fd, | |
127 | void *buf, unsigned nbytes, | |
128 | off_t offset) | |
129 | { | |
130 | io_uring_prep_rw(IORING_OP_READ_FIXED, sqe, fd, buf, nbytes, offset); | |
131 | } | |
132 | ||
133 | static inline void io_uring_prep_writev(struct io_uring_sqe *sqe, int fd, | |
004d564f JA |
134 | const struct iovec *iovecs, |
135 | unsigned nr_vecs, off_t offset) | |
21b4aa5d JA |
136 | { |
137 | io_uring_prep_rw(IORING_OP_WRITEV, sqe, fd, iovecs, nr_vecs, offset); | |
138 | } | |
139 | ||
140 | static inline void io_uring_prep_write_fixed(struct io_uring_sqe *sqe, int fd, | |
004d564f | 141 | const void *buf, unsigned nbytes, |
21b4aa5d JA |
142 | off_t offset) |
143 | { | |
144 | io_uring_prep_rw(IORING_OP_WRITE_FIXED, sqe, fd, buf, nbytes, offset); | |
145 | } | |
146 | ||
147 | static inline void io_uring_prep_poll_add(struct io_uring_sqe *sqe, int fd, | |
148 | short poll_mask) | |
149 | { | |
150 | memset(sqe, 0, sizeof(*sqe)); | |
151 | sqe->opcode = IORING_OP_POLL_ADD; | |
152 | sqe->fd = fd; | |
153 | sqe->poll_events = poll_mask; | |
154 | } | |
155 | ||
156 | static inline void io_uring_prep_poll_remove(struct io_uring_sqe *sqe, | |
157 | void *user_data) | |
158 | { | |
159 | memset(sqe, 0, sizeof(*sqe)); | |
160 | sqe->opcode = IORING_OP_POLL_REMOVE; | |
161 | sqe->addr = (unsigned long) user_data; | |
162 | } | |
163 | ||
164 | static inline void io_uring_prep_fsync(struct io_uring_sqe *sqe, int fd, | |
004d564f | 165 | unsigned fsync_flags) |
21b4aa5d JA |
166 | { |
167 | memset(sqe, 0, sizeof(*sqe)); | |
168 | sqe->opcode = IORING_OP_FSYNC; | |
169 | sqe->fd = fd; | |
004d564f JA |
170 | sqe->fsync_flags = fsync_flags; |
171 | } | |
172 | ||
173 | static inline void io_uring_prep_nop(struct io_uring_sqe *sqe) | |
174 | { | |
175 | memset(sqe, 0, sizeof(*sqe)); | |
176 | sqe->opcode = IORING_OP_NOP; | |
177 | } | |
178 | ||
179 | #ifdef __cplusplus | |
21b4aa5d | 180 | } |
004d564f | 181 | #endif |
21b4aa5d JA |
182 | |
183 | #endif |