]> git.proxmox.com Git - mirror_ubuntu-kernels.git/blob - include/net/xdp_sock.h
Merge branches 'for-5.1/upstream-fixes', 'for-5.2/core', 'for-5.2/ish', 'for-5.2...
[mirror_ubuntu-kernels.git] / include / net / xdp_sock.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* AF_XDP internal functions
3 * Copyright(c) 2018 Intel Corporation.
4 */
5
6 #ifndef _LINUX_XDP_SOCK_H
7 #define _LINUX_XDP_SOCK_H
8
9 #include <linux/workqueue.h>
10 #include <linux/if_xdp.h>
11 #include <linux/mutex.h>
12 #include <linux/spinlock.h>
13 #include <linux/mm.h>
14 #include <net/sock.h>
15
16 struct net_device;
17 struct xsk_queue;
18
19 struct xdp_umem_page {
20 void *addr;
21 dma_addr_t dma;
22 };
23
24 struct xdp_umem_fq_reuse {
25 u32 nentries;
26 u32 length;
27 u64 handles[];
28 };
29
30 struct xdp_umem {
31 struct xsk_queue *fq;
32 struct xsk_queue *cq;
33 struct xdp_umem_page *pages;
34 u64 chunk_mask;
35 u64 size;
36 u32 headroom;
37 u32 chunk_size_nohr;
38 struct user_struct *user;
39 unsigned long address;
40 refcount_t users;
41 struct work_struct work;
42 struct page **pgs;
43 u32 npgs;
44 int id;
45 struct net_device *dev;
46 struct xdp_umem_fq_reuse *fq_reuse;
47 u16 queue_id;
48 bool zc;
49 spinlock_t xsk_list_lock;
50 struct list_head xsk_list;
51 };
52
53 struct xdp_sock {
54 /* struct sock must be the first member of struct xdp_sock */
55 struct sock sk;
56 struct xsk_queue *rx;
57 struct net_device *dev;
58 struct xdp_umem *umem;
59 struct list_head flush_node;
60 u16 queue_id;
61 struct xsk_queue *tx ____cacheline_aligned_in_smp;
62 struct list_head list;
63 bool zc;
64 /* Protects multiple processes in the control path */
65 struct mutex mutex;
66 /* Mutual exclusion of NAPI TX thread and sendmsg error paths
67 * in the SKB destructor callback.
68 */
69 spinlock_t tx_completion_lock;
70 u64 rx_dropped;
71 };
72
73 struct xdp_buff;
74 #ifdef CONFIG_XDP_SOCKETS
75 int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp);
76 int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp);
77 void xsk_flush(struct xdp_sock *xs);
78 bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs);
79 /* Used from netdev driver */
80 u64 *xsk_umem_peek_addr(struct xdp_umem *umem, u64 *addr);
81 void xsk_umem_discard_addr(struct xdp_umem *umem);
82 void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries);
83 bool xsk_umem_consume_tx(struct xdp_umem *umem, dma_addr_t *dma, u32 *len);
84 void xsk_umem_consume_tx_done(struct xdp_umem *umem);
85 struct xdp_umem_fq_reuse *xsk_reuseq_prepare(u32 nentries);
86 struct xdp_umem_fq_reuse *xsk_reuseq_swap(struct xdp_umem *umem,
87 struct xdp_umem_fq_reuse *newq);
88 void xsk_reuseq_free(struct xdp_umem_fq_reuse *rq);
89 struct xdp_umem *xdp_get_umem_from_qid(struct net_device *dev, u16 queue_id);
90
91 static inline char *xdp_umem_get_data(struct xdp_umem *umem, u64 addr)
92 {
93 return umem->pages[addr >> PAGE_SHIFT].addr + (addr & (PAGE_SIZE - 1));
94 }
95
96 static inline dma_addr_t xdp_umem_get_dma(struct xdp_umem *umem, u64 addr)
97 {
98 return umem->pages[addr >> PAGE_SHIFT].dma + (addr & (PAGE_SIZE - 1));
99 }
100
101 /* Reuse-queue aware version of FILL queue helpers */
102 static inline u64 *xsk_umem_peek_addr_rq(struct xdp_umem *umem, u64 *addr)
103 {
104 struct xdp_umem_fq_reuse *rq = umem->fq_reuse;
105
106 if (!rq->length)
107 return xsk_umem_peek_addr(umem, addr);
108
109 *addr = rq->handles[rq->length - 1];
110 return addr;
111 }
112
113 static inline void xsk_umem_discard_addr_rq(struct xdp_umem *umem)
114 {
115 struct xdp_umem_fq_reuse *rq = umem->fq_reuse;
116
117 if (!rq->length)
118 xsk_umem_discard_addr(umem);
119 else
120 rq->length--;
121 }
122
123 static inline void xsk_umem_fq_reuse(struct xdp_umem *umem, u64 addr)
124 {
125 struct xdp_umem_fq_reuse *rq = umem->fq_reuse;
126
127 rq->handles[rq->length++] = addr;
128 }
129 #else
130 static inline int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
131 {
132 return -ENOTSUPP;
133 }
134
135 static inline int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
136 {
137 return -ENOTSUPP;
138 }
139
140 static inline void xsk_flush(struct xdp_sock *xs)
141 {
142 }
143
144 static inline bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs)
145 {
146 return false;
147 }
148
149 static inline u64 *xsk_umem_peek_addr(struct xdp_umem *umem, u64 *addr)
150 {
151 return NULL;
152 }
153
154 static inline void xsk_umem_discard_addr(struct xdp_umem *umem)
155 {
156 }
157
158 static inline void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries)
159 {
160 }
161
162 static inline bool xsk_umem_consume_tx(struct xdp_umem *umem, dma_addr_t *dma,
163 u32 *len)
164 {
165 return false;
166 }
167
168 static inline void xsk_umem_consume_tx_done(struct xdp_umem *umem)
169 {
170 }
171
172 static inline struct xdp_umem_fq_reuse *xsk_reuseq_prepare(u32 nentries)
173 {
174 return NULL;
175 }
176
177 static inline struct xdp_umem_fq_reuse *xsk_reuseq_swap(
178 struct xdp_umem *umem,
179 struct xdp_umem_fq_reuse *newq)
180 {
181 return NULL;
182 }
183 static inline void xsk_reuseq_free(struct xdp_umem_fq_reuse *rq)
184 {
185 }
186
187 static inline struct xdp_umem *xdp_get_umem_from_qid(struct net_device *dev,
188 u16 queue_id)
189 {
190 return NULL;
191 }
192
193 static inline char *xdp_umem_get_data(struct xdp_umem *umem, u64 addr)
194 {
195 return NULL;
196 }
197
198 static inline dma_addr_t xdp_umem_get_dma(struct xdp_umem *umem, u64 addr)
199 {
200 return 0;
201 }
202
203 static inline u64 *xsk_umem_peek_addr_rq(struct xdp_umem *umem, u64 *addr)
204 {
205 return NULL;
206 }
207
208 static inline void xsk_umem_discard_addr_rq(struct xdp_umem *umem)
209 {
210 }
211
212 static inline void xsk_umem_fq_reuse(struct xdp_umem *umem, u64 addr)
213 {
214 }
215
216 #endif /* CONFIG_XDP_SOCKETS */
217
218 #endif /* _LINUX_XDP_SOCK_H */