]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - net/xdp/xsk_queue.h
Merge branch 'work.iov_iter' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
[mirror_ubuntu-jammy-kernel.git] / net / xdp / xsk_queue.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* XDP user-space ring structure
3 * Copyright(c) 2018 Intel Corporation.
4 */
5
6 #ifndef _LINUX_XSK_QUEUE_H
7 #define _LINUX_XSK_QUEUE_H
8
9 #include <linux/types.h>
10 #include <linux/if_xdp.h>
11 #include <net/xdp_sock.h>
12 #include <net/xsk_buff_pool.h>
13
14 #include "xsk.h"
15
16 struct xdp_ring {
17 u32 producer ____cacheline_aligned_in_smp;
18 /* Hinder the adjacent cache prefetcher to prefetch the consumer
19 * pointer if the producer pointer is touched and vice versa.
20 */
21 u32 pad1 ____cacheline_aligned_in_smp;
22 u32 consumer ____cacheline_aligned_in_smp;
23 u32 pad2 ____cacheline_aligned_in_smp;
24 u32 flags;
25 u32 pad3 ____cacheline_aligned_in_smp;
26 };
27
28 /* Used for the RX and TX queues for packets */
29 struct xdp_rxtx_ring {
30 struct xdp_ring ptrs;
31 struct xdp_desc desc[] ____cacheline_aligned_in_smp;
32 };
33
34 /* Used for the fill and completion queues for buffers */
35 struct xdp_umem_ring {
36 struct xdp_ring ptrs;
37 u64 desc[] ____cacheline_aligned_in_smp;
38 };
39
40 struct xsk_queue {
41 u32 ring_mask;
42 u32 nentries;
43 u32 cached_prod;
44 u32 cached_cons;
45 struct xdp_ring *ring;
46 u64 invalid_descs;
47 u64 queue_empty_descs;
48 };
49
50 /* The structure of the shared state of the rings are a simple
51 * circular buffer, as outlined in
52 * Documentation/core-api/circular-buffers.rst. For the Rx and
53 * completion ring, the kernel is the producer and user space is the
54 * consumer. For the Tx and fill rings, the kernel is the consumer and
55 * user space is the producer.
56 *
57 * producer consumer
58 *
59 * if (LOAD ->consumer) { (A) LOAD.acq ->producer (C)
60 * STORE $data LOAD $data
61 * STORE.rel ->producer (B) STORE.rel ->consumer (D)
62 * }
63 *
64 * (A) pairs with (D), and (B) pairs with (C).
65 *
66 * Starting with (B), it protects the data from being written after
67 * the producer pointer. If this barrier was missing, the consumer
68 * could observe the producer pointer being set and thus load the data
69 * before the producer has written the new data. The consumer would in
70 * this case load the old data.
71 *
72 * (C) protects the consumer from speculatively loading the data before
73 * the producer pointer actually has been read. If we do not have this
74 * barrier, some architectures could load old data as speculative loads
75 * are not discarded as the CPU does not know there is a dependency
76 * between ->producer and data.
77 *
78 * (A) is a control dependency that separates the load of ->consumer
79 * from the stores of $data. In case ->consumer indicates there is no
80 * room in the buffer to store $data we do not. The dependency will
81 * order both of the stores after the loads. So no barrier is needed.
82 *
83 * (D) protects the load of the data to be observed to happen after the
84 * store of the consumer pointer. If we did not have this memory
85 * barrier, the producer could observe the consumer pointer being set
86 * and overwrite the data with a new value before the consumer got the
87 * chance to read the old value. The consumer would thus miss reading
88 * the old entry and very likely read the new entry twice, once right
89 * now and again after circling through the ring.
90 */
91
92 /* The operations on the rings are the following:
93 *
94 * producer consumer
95 *
96 * RESERVE entries PEEK in the ring for entries
97 * WRITE data into the ring READ data from the ring
98 * SUBMIT entries RELEASE entries
99 *
100 * The producer reserves one or more entries in the ring. It can then
101 * fill in these entries and finally submit them so that they can be
102 * seen and read by the consumer.
103 *
104 * The consumer peeks into the ring to see if the producer has written
105 * any new entries. If so, the consumer can then read these entries
106 * and when it is done reading them release them back to the producer
107 * so that the producer can use these slots to fill in new entries.
108 *
109 * The function names below reflect these operations.
110 */
111
112 /* Functions that read and validate content from consumer rings. */
113
114 static inline bool xskq_cons_read_addr_unchecked(struct xsk_queue *q, u64 *addr)
115 {
116 struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;
117
118 if (q->cached_cons != q->cached_prod) {
119 u32 idx = q->cached_cons & q->ring_mask;
120
121 *addr = ring->desc[idx];
122 return true;
123 }
124
125 return false;
126 }
127
128 static inline bool xp_aligned_validate_desc(struct xsk_buff_pool *pool,
129 struct xdp_desc *desc)
130 {
131 u64 chunk, chunk_end;
132
133 chunk = xp_aligned_extract_addr(pool, desc->addr);
134 if (likely(desc->len)) {
135 chunk_end = xp_aligned_extract_addr(pool, desc->addr + desc->len - 1);
136 if (chunk != chunk_end)
137 return false;
138 }
139
140 if (chunk >= pool->addrs_cnt)
141 return false;
142
143 if (desc->options)
144 return false;
145 return true;
146 }
147
148 static inline bool xp_unaligned_validate_desc(struct xsk_buff_pool *pool,
149 struct xdp_desc *desc)
150 {
151 u64 addr, base_addr;
152
153 base_addr = xp_unaligned_extract_addr(desc->addr);
154 addr = xp_unaligned_add_offset_to_addr(desc->addr);
155
156 if (desc->len > pool->chunk_size)
157 return false;
158
159 if (base_addr >= pool->addrs_cnt || addr >= pool->addrs_cnt ||
160 xp_desc_crosses_non_contig_pg(pool, addr, desc->len))
161 return false;
162
163 if (desc->options)
164 return false;
165 return true;
166 }
167
168 static inline bool xp_validate_desc(struct xsk_buff_pool *pool,
169 struct xdp_desc *desc)
170 {
171 return pool->unaligned ? xp_unaligned_validate_desc(pool, desc) :
172 xp_aligned_validate_desc(pool, desc);
173 }
174
175 static inline bool xskq_cons_is_valid_desc(struct xsk_queue *q,
176 struct xdp_desc *d,
177 struct xsk_buff_pool *pool)
178 {
179 if (!xp_validate_desc(pool, d)) {
180 q->invalid_descs++;
181 return false;
182 }
183 return true;
184 }
185
186 static inline bool xskq_cons_read_desc(struct xsk_queue *q,
187 struct xdp_desc *desc,
188 struct xsk_buff_pool *pool)
189 {
190 while (q->cached_cons != q->cached_prod) {
191 struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring;
192 u32 idx = q->cached_cons & q->ring_mask;
193
194 *desc = ring->desc[idx];
195 if (xskq_cons_is_valid_desc(q, desc, pool))
196 return true;
197
198 q->cached_cons++;
199 }
200
201 return false;
202 }
203
204 static inline u32 xskq_cons_read_desc_batch(struct xsk_queue *q,
205 struct xdp_desc *descs,
206 struct xsk_buff_pool *pool, u32 max)
207 {
208 u32 cached_cons = q->cached_cons, nb_entries = 0;
209
210 while (cached_cons != q->cached_prod && nb_entries < max) {
211 struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring;
212 u32 idx = cached_cons & q->ring_mask;
213
214 descs[nb_entries] = ring->desc[idx];
215 if (unlikely(!xskq_cons_is_valid_desc(q, &descs[nb_entries], pool))) {
216 /* Skip the entry */
217 cached_cons++;
218 continue;
219 }
220
221 nb_entries++;
222 cached_cons++;
223 }
224
225 return nb_entries;
226 }
227
228 /* Functions for consumers */
229
230 static inline void __xskq_cons_release(struct xsk_queue *q)
231 {
232 smp_store_release(&q->ring->consumer, q->cached_cons); /* D, matchees A */
233 }
234
235 static inline void __xskq_cons_peek(struct xsk_queue *q)
236 {
237 /* Refresh the local pointer */
238 q->cached_prod = smp_load_acquire(&q->ring->producer); /* C, matches B */
239 }
240
241 static inline void xskq_cons_get_entries(struct xsk_queue *q)
242 {
243 __xskq_cons_release(q);
244 __xskq_cons_peek(q);
245 }
246
247 static inline u32 xskq_cons_nb_entries(struct xsk_queue *q, u32 max)
248 {
249 u32 entries = q->cached_prod - q->cached_cons;
250
251 if (entries >= max)
252 return max;
253
254 __xskq_cons_peek(q);
255 entries = q->cached_prod - q->cached_cons;
256
257 return entries >= max ? max : entries;
258 }
259
260 static inline bool xskq_cons_has_entries(struct xsk_queue *q, u32 cnt)
261 {
262 return xskq_cons_nb_entries(q, cnt) >= cnt ? true : false;
263 }
264
265 static inline bool xskq_cons_peek_addr_unchecked(struct xsk_queue *q, u64 *addr)
266 {
267 if (q->cached_prod == q->cached_cons)
268 xskq_cons_get_entries(q);
269 return xskq_cons_read_addr_unchecked(q, addr);
270 }
271
272 static inline bool xskq_cons_peek_desc(struct xsk_queue *q,
273 struct xdp_desc *desc,
274 struct xsk_buff_pool *pool)
275 {
276 if (q->cached_prod == q->cached_cons)
277 xskq_cons_get_entries(q);
278 return xskq_cons_read_desc(q, desc, pool);
279 }
280
281 static inline u32 xskq_cons_peek_desc_batch(struct xsk_queue *q, struct xdp_desc *descs,
282 struct xsk_buff_pool *pool, u32 max)
283 {
284 u32 entries = xskq_cons_nb_entries(q, max);
285
286 return xskq_cons_read_desc_batch(q, descs, pool, entries);
287 }
288
289 /* To improve performance in the xskq_cons_release functions, only update local state here.
290 * Reflect this to global state when we get new entries from the ring in
291 * xskq_cons_get_entries() and whenever Rx or Tx processing are completed in the NAPI loop.
292 */
293 static inline void xskq_cons_release(struct xsk_queue *q)
294 {
295 q->cached_cons++;
296 }
297
298 static inline void xskq_cons_release_n(struct xsk_queue *q, u32 cnt)
299 {
300 q->cached_cons += cnt;
301 }
302
303 static inline bool xskq_cons_is_full(struct xsk_queue *q)
304 {
305 /* No barriers needed since data is not accessed */
306 return READ_ONCE(q->ring->producer) - READ_ONCE(q->ring->consumer) ==
307 q->nentries;
308 }
309
310 static inline u32 xskq_cons_present_entries(struct xsk_queue *q)
311 {
312 /* No barriers needed since data is not accessed */
313 return READ_ONCE(q->ring->producer) - READ_ONCE(q->ring->consumer);
314 }
315
316 /* Functions for producers */
317
318 static inline u32 xskq_prod_nb_free(struct xsk_queue *q, u32 max)
319 {
320 u32 free_entries = q->nentries - (q->cached_prod - q->cached_cons);
321
322 if (free_entries >= max)
323 return max;
324
325 /* Refresh the local tail pointer */
326 q->cached_cons = READ_ONCE(q->ring->consumer);
327 free_entries = q->nentries - (q->cached_prod - q->cached_cons);
328
329 return free_entries >= max ? max : free_entries;
330 }
331
332 static inline bool xskq_prod_is_full(struct xsk_queue *q)
333 {
334 return xskq_prod_nb_free(q, 1) ? false : true;
335 }
336
337 static inline void xskq_prod_cancel(struct xsk_queue *q)
338 {
339 q->cached_prod--;
340 }
341
342 static inline int xskq_prod_reserve(struct xsk_queue *q)
343 {
344 if (xskq_prod_is_full(q))
345 return -ENOSPC;
346
347 /* A, matches D */
348 q->cached_prod++;
349 return 0;
350 }
351
352 static inline int xskq_prod_reserve_addr(struct xsk_queue *q, u64 addr)
353 {
354 struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;
355
356 if (xskq_prod_is_full(q))
357 return -ENOSPC;
358
359 /* A, matches D */
360 ring->desc[q->cached_prod++ & q->ring_mask] = addr;
361 return 0;
362 }
363
364 static inline u32 xskq_prod_reserve_addr_batch(struct xsk_queue *q, struct xdp_desc *descs,
365 u32 max)
366 {
367 struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;
368 u32 nb_entries, i, cached_prod;
369
370 nb_entries = xskq_prod_nb_free(q, max);
371
372 /* A, matches D */
373 cached_prod = q->cached_prod;
374 for (i = 0; i < nb_entries; i++)
375 ring->desc[cached_prod++ & q->ring_mask] = descs[i].addr;
376 q->cached_prod = cached_prod;
377
378 return nb_entries;
379 }
380
381 static inline int xskq_prod_reserve_desc(struct xsk_queue *q,
382 u64 addr, u32 len)
383 {
384 struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring;
385 u32 idx;
386
387 if (xskq_prod_is_full(q))
388 return -ENOSPC;
389
390 /* A, matches D */
391 idx = q->cached_prod++ & q->ring_mask;
392 ring->desc[idx].addr = addr;
393 ring->desc[idx].len = len;
394
395 return 0;
396 }
397
398 static inline void __xskq_prod_submit(struct xsk_queue *q, u32 idx)
399 {
400 smp_store_release(&q->ring->producer, idx); /* B, matches C */
401 }
402
403 static inline void xskq_prod_submit(struct xsk_queue *q)
404 {
405 __xskq_prod_submit(q, q->cached_prod);
406 }
407
408 static inline void xskq_prod_submit_addr(struct xsk_queue *q, u64 addr)
409 {
410 struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;
411 u32 idx = q->ring->producer;
412
413 ring->desc[idx++ & q->ring_mask] = addr;
414
415 __xskq_prod_submit(q, idx);
416 }
417
418 static inline void xskq_prod_submit_n(struct xsk_queue *q, u32 nb_entries)
419 {
420 __xskq_prod_submit(q, q->ring->producer + nb_entries);
421 }
422
423 static inline bool xskq_prod_is_empty(struct xsk_queue *q)
424 {
425 /* No barriers needed since data is not accessed */
426 return READ_ONCE(q->ring->consumer) == READ_ONCE(q->ring->producer);
427 }
428
429 /* For both producers and consumers */
430
431 static inline u64 xskq_nb_invalid_descs(struct xsk_queue *q)
432 {
433 return q ? q->invalid_descs : 0;
434 }
435
436 static inline u64 xskq_nb_queue_empty_descs(struct xsk_queue *q)
437 {
438 return q ? q->queue_empty_descs : 0;
439 }
440
441 struct xsk_queue *xskq_create(u32 nentries, bool umem_queue);
442 void xskq_destroy(struct xsk_queue *q_ops);
443
444 #endif /* _LINUX_XSK_QUEUE_H */