]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - include/net/xdp_sock.h
xdp: fix race on generic receive path
[mirror_ubuntu-jammy-kernel.git] / include / net / xdp_sock.h
CommitLineData
dac09149
BT
1/* SPDX-License-Identifier: GPL-2.0 */
2/* AF_XDP internal functions
c0c77d8f 3 * Copyright(c) 2018 Intel Corporation.
c0c77d8f
BT
4 */
5
6#ifndef _LINUX_XDP_SOCK_H
7#define _LINUX_XDP_SOCK_H
8
e61e62b9
BT
9#include <linux/workqueue.h>
10#include <linux/if_xdp.h>
c0c77d8f 11#include <linux/mutex.h>
ac98d8aa 12#include <linux/spinlock.h>
e61e62b9 13#include <linux/mm.h>
c0c77d8f
BT
14#include <net/sock.h>
15
b9b6b68e
BT
16struct net_device;
17struct xsk_queue;
e61e62b9 18
8aef7340
BT
19struct xdp_umem_page {
20 void *addr;
173d3adb 21 dma_addr_t dma;
8aef7340
BT
22};
23
f5bd9138
JK
24struct xdp_umem_fq_reuse {
25 u32 nentries;
26 u32 length;
27 u64 handles[];
28};
29
e61e62b9
BT
30struct xdp_umem {
31 struct xsk_queue *fq;
32 struct xsk_queue *cq;
8aef7340 33 struct xdp_umem_page *pages;
93ee30f3
MK
34 u64 chunk_mask;
35 u64 size;
e61e62b9
BT
36 u32 headroom;
37 u32 chunk_size_nohr;
38 struct user_struct *user;
e61e62b9
BT
39 unsigned long address;
40 refcount_t users;
41 struct work_struct work;
8aef7340 42 struct page **pgs;
e61e62b9 43 u32 npgs;
50e74c01 44 int id;
173d3adb 45 struct net_device *dev;
f5bd9138 46 struct xdp_umem_fq_reuse *fq_reuse;
173d3adb
BT
47 u16 queue_id;
48 bool zc;
ac98d8aa
MK
49 spinlock_t xsk_list_lock;
50 struct list_head xsk_list;
e61e62b9 51};
c0c77d8f
BT
52
53struct xdp_sock {
54 /* struct sock must be the first member of struct xdp_sock */
55 struct sock sk;
b9b6b68e
BT
56 struct xsk_queue *rx;
57 struct net_device *dev;
c0c77d8f 58 struct xdp_umem *umem;
fbfc504a 59 struct list_head flush_node;
965a9909 60 u16 queue_id;
ac98d8aa 61 bool zc;
c0c77d8f
BT
62 /* Protects multiple processes in the control path */
63 struct mutex mutex;
fada7fdc
JL
64 struct xsk_queue *tx ____cacheline_aligned_in_smp;
65 struct list_head list;
a9744f7c
MK
66 /* Mutual exclusion of NAPI TX thread and sendmsg error paths
67 * in the SKB destructor callback.
68 */
69 spinlock_t tx_completion_lock;
bf0bdd13
IM
70 /* Protects generic receive. */
71 spinlock_t rx_lock;
c497176c 72 u64 rx_dropped;
c0c77d8f
BT
73};
74
c497176c
BT
75struct xdp_buff;
76#ifdef CONFIG_XDP_SOCKETS
77int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp);
78int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp);
79void xsk_flush(struct xdp_sock *xs);
fbfc504a 80bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs);
ac98d8aa 81/* Used from netdev driver */
d57d7642 82bool xsk_umem_has_addrs(struct xdp_umem *umem, u32 cnt);
173d3adb
BT
83u64 *xsk_umem_peek_addr(struct xdp_umem *umem, u64 *addr);
84void xsk_umem_discard_addr(struct xdp_umem *umem);
ac98d8aa 85void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries);
4bce4e5c 86bool xsk_umem_consume_tx(struct xdp_umem *umem, struct xdp_desc *desc);
ac98d8aa 87void xsk_umem_consume_tx_done(struct xdp_umem *umem);
f5bd9138
JK
88struct xdp_umem_fq_reuse *xsk_reuseq_prepare(u32 nentries);
89struct xdp_umem_fq_reuse *xsk_reuseq_swap(struct xdp_umem *umem,
90 struct xdp_umem_fq_reuse *newq);
91void xsk_reuseq_free(struct xdp_umem_fq_reuse *rq);
1661d346 92struct xdp_umem *xdp_get_umem_from_qid(struct net_device *dev, u16 queue_id);
90254034
BT
93
94static inline char *xdp_umem_get_data(struct xdp_umem *umem, u64 addr)
95{
96 return umem->pages[addr >> PAGE_SHIFT].addr + (addr & (PAGE_SIZE - 1));
97}
98
99static inline dma_addr_t xdp_umem_get_dma(struct xdp_umem *umem, u64 addr)
100{
101 return umem->pages[addr >> PAGE_SHIFT].dma + (addr & (PAGE_SIZE - 1));
102}
f5bd9138
JK
103
104/* Reuse-queue aware version of FILL queue helpers */
d57d7642
MM
105static inline bool xsk_umem_has_addrs_rq(struct xdp_umem *umem, u32 cnt)
106{
107 struct xdp_umem_fq_reuse *rq = umem->fq_reuse;
108
109 if (rq->length >= cnt)
110 return true;
111
112 return xsk_umem_has_addrs(umem, cnt - rq->length);
113}
114
f5bd9138
JK
115static inline u64 *xsk_umem_peek_addr_rq(struct xdp_umem *umem, u64 *addr)
116{
117 struct xdp_umem_fq_reuse *rq = umem->fq_reuse;
118
119 if (!rq->length)
120 return xsk_umem_peek_addr(umem, addr);
121
122 *addr = rq->handles[rq->length - 1];
123 return addr;
124}
125
126static inline void xsk_umem_discard_addr_rq(struct xdp_umem *umem)
127{
128 struct xdp_umem_fq_reuse *rq = umem->fq_reuse;
129
130 if (!rq->length)
131 xsk_umem_discard_addr(umem);
132 else
133 rq->length--;
134}
135
136static inline void xsk_umem_fq_reuse(struct xdp_umem *umem, u64 addr)
137{
138 struct xdp_umem_fq_reuse *rq = umem->fq_reuse;
139
140 rq->handles[rq->length++] = addr;
141}
c497176c
BT
142#else
143static inline int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
144{
145 return -ENOTSUPP;
146}
147
148static inline int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
149{
150 return -ENOTSUPP;
151}
152
153static inline void xsk_flush(struct xdp_sock *xs)
154{
155}
fbfc504a
BT
156
157static inline bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs)
158{
159 return false;
160}
90254034 161
d57d7642
MM
162static inline bool xsk_umem_has_addrs(struct xdp_umem *umem, u32 cnt)
163{
164 return false;
165}
166
90254034
BT
167static inline u64 *xsk_umem_peek_addr(struct xdp_umem *umem, u64 *addr)
168{
169 return NULL;
170}
171
172static inline void xsk_umem_discard_addr(struct xdp_umem *umem)
173{
174}
175
176static inline void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries)
177{
178}
179
4bce4e5c
MM
180static inline bool xsk_umem_consume_tx(struct xdp_umem *umem,
181 struct xdp_desc *desc)
90254034
BT
182{
183 return false;
184}
185
186static inline void xsk_umem_consume_tx_done(struct xdp_umem *umem)
187{
188}
189
f5bd9138
JK
190static inline struct xdp_umem_fq_reuse *xsk_reuseq_prepare(u32 nentries)
191{
192 return NULL;
193}
194
195static inline struct xdp_umem_fq_reuse *xsk_reuseq_swap(
196 struct xdp_umem *umem,
197 struct xdp_umem_fq_reuse *newq)
198{
199 return NULL;
200}
201static inline void xsk_reuseq_free(struct xdp_umem_fq_reuse *rq)
202{
203}
204
1661d346
JK
205static inline struct xdp_umem *xdp_get_umem_from_qid(struct net_device *dev,
206 u16 queue_id)
207{
208 return NULL;
209}
210
90254034
BT
211static inline char *xdp_umem_get_data(struct xdp_umem *umem, u64 addr)
212{
213 return NULL;
214}
215
216static inline dma_addr_t xdp_umem_get_dma(struct xdp_umem *umem, u64 addr)
217{
218 return 0;
219}
f5bd9138 220
d57d7642
MM
221static inline bool xsk_umem_has_addrs_rq(struct xdp_umem *umem, u32 cnt)
222{
223 return false;
224}
225
f5bd9138
JK
226static inline u64 *xsk_umem_peek_addr_rq(struct xdp_umem *umem, u64 *addr)
227{
228 return NULL;
229}
230
231static inline void xsk_umem_discard_addr_rq(struct xdp_umem *umem)
232{
233}
234
235static inline void xsk_umem_fq_reuse(struct xdp_umem *umem, u64 addr)
236{
237}
238
c497176c
BT
239#endif /* CONFIG_XDP_SOCKETS */
240
c0c77d8f 241#endif /* _LINUX_XDP_SOCK_H */