]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - net/xdp/xsk_queue.c
Merge tag 'hwlock-v5.7' of git://git.kernel.org/pub/scm/linux/kernel/git/andersson...
[mirror_ubuntu-hirsute-kernel.git] / net / xdp / xsk_queue.c
CommitLineData
423f3832
MK
1// SPDX-License-Identifier: GPL-2.0
2/* XDP user-space ring structure
3 * Copyright(c) 2018 Intel Corporation.
423f3832
MK
4 */
5
f5bd9138 6#include <linux/log2.h>
423f3832 7#include <linux/slab.h>
f5bd9138 8#include <linux/overflow.h>
423f3832
MK
9
10#include "xsk_queue.h"
11
93ee30f3 12void xskq_set_umem(struct xsk_queue *q, u64 size, u64 chunk_mask)
965a9909
MK
13{
14 if (!q)
15 return;
16
93ee30f3
MK
17 q->size = size;
18 q->chunk_mask = chunk_mask;
965a9909
MK
19}
20
1d9cb1f3 21static size_t xskq_get_ring_size(struct xsk_queue *q, bool umem_queue)
423f3832 22{
1d9cb1f3
MK
23 struct xdp_umem_ring *umem_ring;
24 struct xdp_rxtx_ring *rxtx_ring;
423f3832 25
1d9cb1f3
MK
26 if (umem_queue)
27 return struct_size(umem_ring, desc, q->nentries);
28 return struct_size(rxtx_ring, desc, q->nentries);
b9b6b68e
BT
29}
30
31struct xsk_queue *xskq_create(u32 nentries, bool umem_queue)
423f3832
MK
32{
33 struct xsk_queue *q;
34 gfp_t gfp_flags;
35 size_t size;
36
37 q = kzalloc(sizeof(*q), GFP_KERNEL);
38 if (!q)
39 return NULL;
40
41 q->nentries = nentries;
42 q->ring_mask = nentries - 1;
43
44 gfp_flags = GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN |
45 __GFP_COMP | __GFP_NORETRY;
1d9cb1f3 46 size = xskq_get_ring_size(q, umem_queue);
423f3832
MK
47
48 q->ring = (struct xdp_ring *)__get_free_pages(gfp_flags,
49 get_order(size));
50 if (!q->ring) {
51 kfree(q);
52 return NULL;
53 }
54
55 return q;
56}
57
58void xskq_destroy(struct xsk_queue *q)
59{
60 if (!q)
61 return;
62
63 page_frag_free(q->ring);
64 kfree(q);
65}
f5bd9138
JK
66
67struct xdp_umem_fq_reuse *xsk_reuseq_prepare(u32 nentries)
68{
69 struct xdp_umem_fq_reuse *newq;
70
71 /* Check for overflow */
72 if (nentries > (u32)roundup_pow_of_two(nentries))
73 return NULL;
74 nentries = roundup_pow_of_two(nentries);
75
76 newq = kvmalloc(struct_size(newq, handles, nentries), GFP_KERNEL);
77 if (!newq)
78 return NULL;
79 memset(newq, 0, offsetof(typeof(*newq), handles));
80
81 newq->nentries = nentries;
82 return newq;
83}
84EXPORT_SYMBOL_GPL(xsk_reuseq_prepare);
85
86struct xdp_umem_fq_reuse *xsk_reuseq_swap(struct xdp_umem *umem,
87 struct xdp_umem_fq_reuse *newq)
88{
89 struct xdp_umem_fq_reuse *oldq = umem->fq_reuse;
90
91 if (!oldq) {
92 umem->fq_reuse = newq;
93 return NULL;
94 }
95
96 if (newq->nentries < oldq->length)
97 return newq;
98
99 memcpy(newq->handles, oldq->handles,
100 array_size(oldq->length, sizeof(u64)));
101 newq->length = oldq->length;
102
103 umem->fq_reuse = newq;
104 return oldq;
105}
106EXPORT_SYMBOL_GPL(xsk_reuseq_swap);
107
108void xsk_reuseq_free(struct xdp_umem_fq_reuse *rq)
109{
110 kvfree(rq);
111}
112EXPORT_SYMBOL_GPL(xsk_reuseq_free);
113
114void xsk_reuseq_destroy(struct xdp_umem *umem)
115{
116 xsk_reuseq_free(umem->fq_reuse);
117 umem->fq_reuse = NULL;
118}