]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - net/xdp/xsk_queue.h
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dtor/input
[mirror_ubuntu-hirsute-kernel.git] / net / xdp / xsk_queue.h
CommitLineData
dac09149
BT
1/* SPDX-License-Identifier: GPL-2.0 */
2/* XDP user-space ring structure
423f3832 3 * Copyright(c) 2018 Intel Corporation.
423f3832
MK
4 */
5
6#ifndef _LINUX_XSK_QUEUE_H
7#define _LINUX_XSK_QUEUE_H
8
9#include <linux/types.h>
10#include <linux/if_xdp.h>
e61e62b9 11#include <net/xdp_sock.h>
423f3832 12
b3a9e0be
BT
13struct xdp_ring {
14 u32 producer ____cacheline_aligned_in_smp;
15 u32 consumer ____cacheline_aligned_in_smp;
77cd0d7b 16 u32 flags;
b3a9e0be
BT
17};
18
19/* Used for the RX and TX queues for packets */
20struct xdp_rxtx_ring {
21 struct xdp_ring ptrs;
95e486f5 22 struct xdp_desc desc[] ____cacheline_aligned_in_smp;
b3a9e0be
BT
23};
24
25/* Used for the fill and completion queues for buffers */
26struct xdp_umem_ring {
27 struct xdp_ring ptrs;
95e486f5 28 u64 desc[] ____cacheline_aligned_in_smp;
b3a9e0be
BT
29};
30
423f3832 31struct xsk_queue {
93ee30f3
MK
32 u64 chunk_mask;
33 u64 size;
423f3832
MK
34 u32 ring_mask;
35 u32 nentries;
d7012f05 36 u32 cached_prod;
c5ed924b 37 u32 cached_cons;
423f3832
MK
38 struct xdp_ring *ring;
39 u64 invalid_descs;
40};
41
f63666de
MK
42/* The structure of the shared state of the rings are the same as the
43 * ring buffer in kernel/events/ring_buffer.c. For the Rx and completion
44 * ring, the kernel is the producer and user space is the consumer. For
45 * the Tx and fill rings, the kernel is the consumer and user space is
46 * the producer.
47 *
48 * producer consumer
49 *
50 * if (LOAD ->consumer) { LOAD ->producer
51 * (A) smp_rmb() (C)
52 * STORE $data LOAD $data
53 * smp_wmb() (B) smp_mb() (D)
54 * STORE ->producer STORE ->consumer
55 * }
56 *
57 * (A) pairs with (D), and (B) pairs with (C).
58 *
59 * Starting with (B), it protects the data from being written after
60 * the producer pointer. If this barrier was missing, the consumer
61 * could observe the producer pointer being set and thus load the data
62 * before the producer has written the new data. The consumer would in
63 * this case load the old data.
64 *
65 * (C) protects the consumer from speculatively loading the data before
66 * the producer pointer actually has been read. If we do not have this
67 * barrier, some architectures could load old data as speculative loads
68 * are not discarded as the CPU does not know there is a dependency
69 * between ->producer and data.
70 *
71 * (A) is a control dependency that separates the load of ->consumer
72 * from the stores of $data. In case ->consumer indicates there is no
73 * room in the buffer to store $data we do not. So no barrier is needed.
74 *
75 * (D) protects the load of the data to be observed to happen after the
76 * store of the consumer pointer. If we did not have this memory
77 * barrier, the producer could observe the consumer pointer being set
78 * and overwrite the data with a new value before the consumer got the
79 * chance to read the old value. The consumer would thus miss reading
80 * the old entry and very likely read the new entry twice, once right
81 * now and again after circling through the ring.
82 */
83
15d8c916
MK
84/* The operations on the rings are the following:
85 *
86 * producer consumer
87 *
88 * RESERVE entries PEEK in the ring for entries
89 * WRITE data into the ring READ data from the ring
90 * SUBMIT entries RELEASE entries
91 *
92 * The producer reserves one or more entries in the ring. It can then
93 * fill in these entries and finally submit them so that they can be
94 * seen and read by the consumer.
95 *
96 * The consumer peeks into the ring to see if the producer has written
97 * any new entries. If so, the producer can then read these entries
98 * and when it is done reading them release them back to the producer
99 * so that the producer can use these slots to fill in new entries.
100 *
101 * The function names below reflect these operations.
102 */
d57d7642 103
15d8c916 104/* Functions that read and validate content from consumer rings. */
c497176c 105
03896ef1
MK
106static inline bool xskq_cons_crosses_non_contig_pg(struct xdp_umem *umem,
107 u64 addr,
108 u64 length)
c05cd364
KL
109{
110 bool cross_pg = (addr & (PAGE_SIZE - 1)) + length > PAGE_SIZE;
111 bool next_pg_contig =
112 (unsigned long)umem->pages[(addr >> PAGE_SHIFT)].addr &
113 XSK_NEXT_PG_CONTIG_MASK;
114
115 return cross_pg && !next_pg_contig;
116}
117
03896ef1
MK
118static inline bool xskq_cons_is_valid_unaligned(struct xsk_queue *q,
119 u64 addr,
c05cd364
KL
120 u64 length,
121 struct xdp_umem *umem)
122{
123 u64 base_addr = xsk_umem_extract_addr(addr);
124
125 addr = xsk_umem_add_offset_to_addr(addr);
126 if (base_addr >= q->size || addr >= q->size ||
03896ef1 127 xskq_cons_crosses_non_contig_pg(umem, addr, length)) {
c05cd364
KL
128 q->invalid_descs++;
129 return false;
130 }
131
132 return true;
133}
134
15d8c916
MK
135static inline bool xskq_cons_is_valid_addr(struct xsk_queue *q, u64 addr)
136{
137 if (addr >= q->size) {
138 q->invalid_descs++;
139 return false;
140 }
141
142 return true;
143}
144
03896ef1
MK
145static inline bool xskq_cons_read_addr(struct xsk_queue *q, u64 *addr,
146 struct xdp_umem *umem)
c497176c 147{
c5ed924b
MK
148 struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;
149
150 while (q->cached_cons != q->cached_prod) {
151 u32 idx = q->cached_cons & q->ring_mask;
c497176c 152
c34787fc 153 *addr = ring->desc[idx] & q->chunk_mask;
c05cd364
KL
154
155 if (umem->flags & XDP_UMEM_UNALIGNED_CHUNK_FLAG) {
03896ef1 156 if (xskq_cons_is_valid_unaligned(q, *addr,
c05cd364
KL
157 umem->chunk_size_nohr,
158 umem))
03896ef1 159 return true;
c05cd364
KL
160 goto out;
161 }
162
03896ef1
MK
163 if (xskq_cons_is_valid_addr(q, *addr))
164 return true;
c497176c 165
c05cd364 166out:
c5ed924b 167 q->cached_cons++;
c497176c
BT
168 }
169
03896ef1 170 return false;
c497176c
BT
171}
172
03896ef1
MK
173static inline bool xskq_cons_is_valid_desc(struct xsk_queue *q,
174 struct xdp_desc *d,
175 struct xdp_umem *umem)
35fcde7f 176{
c05cd364 177 if (umem->flags & XDP_UMEM_UNALIGNED_CHUNK_FLAG) {
03896ef1 178 if (!xskq_cons_is_valid_unaligned(q, d->addr, d->len, umem))
c05cd364
KL
179 return false;
180
181 if (d->len > umem->chunk_size_nohr || d->options) {
182 q->invalid_descs++;
183 return false;
184 }
185
186 return true;
187 }
188
03896ef1 189 if (!xskq_cons_is_valid_addr(q, d->addr))
35fcde7f 190 return false;
35fcde7f 191
c57b557b
BT
192 if (((d->addr + d->len) & q->chunk_mask) != (d->addr & q->chunk_mask) ||
193 d->options) {
35fcde7f
MK
194 q->invalid_descs++;
195 return false;
196 }
197
198 return true;
199}
200
03896ef1
MK
201static inline bool xskq_cons_read_desc(struct xsk_queue *q,
202 struct xdp_desc *desc,
203 struct xdp_umem *umem)
35fcde7f 204{
c5ed924b 205 while (q->cached_cons != q->cached_prod) {
35fcde7f 206 struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring;
c5ed924b 207 u32 idx = q->cached_cons & q->ring_mask;
35fcde7f 208
c34787fc 209 *desc = ring->desc[idx];
03896ef1
MK
210 if (xskq_cons_is_valid_desc(q, desc, umem))
211 return true;
35fcde7f 212
c5ed924b 213 q->cached_cons++;
35fcde7f
MK
214 }
215
03896ef1 216 return false;
35fcde7f
MK
217}
218
15d8c916
MK
219/* Functions for consumers */
220
221static inline void __xskq_cons_release(struct xsk_queue *q)
222{
223 smp_mb(); /* D, matches A */
224 WRITE_ONCE(q->ring->consumer, q->cached_cons);
225}
226
227static inline void __xskq_cons_peek(struct xsk_queue *q)
228{
229 /* Refresh the local pointer */
230 q->cached_prod = READ_ONCE(q->ring->producer);
231 smp_rmb(); /* C, matches B */
232}
233
234static inline void xskq_cons_get_entries(struct xsk_queue *q)
235{
236 __xskq_cons_release(q);
237 __xskq_cons_peek(q);
238}
239
240static inline bool xskq_cons_has_entries(struct xsk_queue *q, u32 cnt)
241{
242 u32 entries = q->cached_prod - q->cached_cons;
243
244 if (entries >= cnt)
245 return true;
246
247 __xskq_cons_peek(q);
248 entries = q->cached_prod - q->cached_cons;
249
250 return entries >= cnt;
251}
252
253static inline bool xskq_cons_peek_addr(struct xsk_queue *q, u64 *addr,
254 struct xdp_umem *umem)
255{
256 if (q->cached_prod == q->cached_cons)
257 xskq_cons_get_entries(q);
258 return xskq_cons_read_addr(q, addr, umem);
259}
260
03896ef1
MK
261static inline bool xskq_cons_peek_desc(struct xsk_queue *q,
262 struct xdp_desc *desc,
263 struct xdp_umem *umem)
35fcde7f 264{
c5ed924b
MK
265 if (q->cached_prod == q->cached_cons)
266 xskq_cons_get_entries(q);
03896ef1 267 return xskq_cons_read_desc(q, desc, umem);
35fcde7f
MK
268}
269
15d8c916
MK
270static inline void xskq_cons_release(struct xsk_queue *q)
271{
272 /* To improve performance, only update local state here.
273 * Reflect this to global state when we get new entries
30744a68
MK
274 * from the ring in xskq_cons_get_entries() and whenever
275 * Rx or Tx processing are completed in the NAPI loop.
15d8c916
MK
276 */
277 q->cached_cons++;
278}
279
280static inline bool xskq_cons_is_full(struct xsk_queue *q)
281{
282 /* No barriers needed since data is not accessed */
283 return READ_ONCE(q->ring->producer) - READ_ONCE(q->ring->consumer) ==
284 q->nentries;
285}
286
287/* Functions for producers */
288
289static inline bool xskq_prod_is_full(struct xsk_queue *q)
290{
291 u32 free_entries = q->nentries - (q->cached_prod - q->cached_cons);
292
293 if (free_entries)
294 return false;
295
296 /* Refresh the local tail pointer */
297 q->cached_cons = READ_ONCE(q->ring->consumer);
298 free_entries = q->nentries - (q->cached_prod - q->cached_cons);
299
300 return !free_entries;
301}
302
303static inline int xskq_prod_reserve(struct xsk_queue *q)
304{
305 if (xskq_prod_is_full(q))
306 return -ENOSPC;
307
308 /* A, matches D */
309 q->cached_prod++;
310 return 0;
311}
312
313static inline int xskq_prod_reserve_addr(struct xsk_queue *q, u64 addr)
314{
315 struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;
316
317 if (xskq_prod_is_full(q))
318 return -ENOSPC;
319
320 /* A, matches D */
321 ring->desc[q->cached_prod++ & q->ring_mask] = addr;
322 return 0;
323}
324
59e35e55
MK
325static inline int xskq_prod_reserve_desc(struct xsk_queue *q,
326 u64 addr, u32 len)
c497176c
BT
327{
328 struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring;
59e35e55 329 u32 idx;
c497176c 330
df0ae6f7 331 if (xskq_prod_is_full(q))
c497176c
BT
332 return -ENOSPC;
333
f63666de 334 /* A, matches D */
d7012f05 335 idx = q->cached_prod++ & q->ring_mask;
bbff2f32 336 ring->desc[idx].addr = addr;
c497176c 337 ring->desc[idx].len = len;
c497176c
BT
338
339 return 0;
340}
341
15d8c916 342static inline void __xskq_prod_submit(struct xsk_queue *q, u32 idx)
35fcde7f 343{
15d8c916
MK
344 smp_wmb(); /* B, matches C */
345
346 WRITE_ONCE(q->ring->producer, idx);
347}
348
349static inline void xskq_prod_submit(struct xsk_queue *q)
350{
351 __xskq_prod_submit(q, q->cached_prod);
352}
353
354static inline void xskq_prod_submit_addr(struct xsk_queue *q, u64 addr)
355{
356 struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;
357 u32 idx = q->ring->producer;
358
359 ring->desc[idx++ & q->ring_mask] = addr;
360
361 __xskq_prod_submit(q, idx);
362}
363
364static inline void xskq_prod_submit_n(struct xsk_queue *q, u32 nb_entries)
365{
366 __xskq_prod_submit(q, q->ring->producer + nb_entries);
35fcde7f
MK
367}
368
59e35e55 369static inline bool xskq_prod_is_empty(struct xsk_queue *q)
c497176c 370{
11cc2d21
MK
371 /* No barriers needed since data is not accessed */
372 return READ_ONCE(q->ring->consumer) == READ_ONCE(q->ring->producer);
c497176c
BT
373}
374
15d8c916
MK
375/* For both producers and consumers */
376
377static inline u64 xskq_nb_invalid_descs(struct xsk_queue *q)
378{
379 return q ? q->invalid_descs : 0;
380}
381
93ee30f3 382void xskq_set_umem(struct xsk_queue *q, u64 size, u64 chunk_mask);
b9b6b68e 383struct xsk_queue *xskq_create(u32 nentries, bool umem_queue);
c497176c 384void xskq_destroy(struct xsk_queue *q_ops);
423f3832 385
f5bd9138
JK
386/* Executed by the core when the entire UMEM gets freed */
387void xsk_reuseq_destroy(struct xdp_umem *umem);
388
423f3832 389#endif /* _LINUX_XSK_QUEUE_H */