]>
Commit | Line | Data |
---|---|---|
c0c77d8f BT |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* XDP sockets | |
3 | * | |
4 | * AF_XDP sockets allows a channel between XDP programs and userspace | |
5 | * applications. | |
6 | * Copyright(c) 2018 Intel Corporation. | |
7 | * | |
c0c77d8f BT |
8 | * Author(s): Björn Töpel <bjorn.topel@intel.com> |
9 | * Magnus Karlsson <magnus.karlsson@intel.com> | |
10 | */ | |
11 | ||
12 | #define pr_fmt(fmt) "AF_XDP: %s: " fmt, __func__ | |
13 | ||
14 | #include <linux/if_xdp.h> | |
15 | #include <linux/init.h> | |
16 | #include <linux/sched/mm.h> | |
17 | #include <linux/sched/signal.h> | |
18 | #include <linux/sched/task.h> | |
19 | #include <linux/socket.h> | |
20 | #include <linux/file.h> | |
21 | #include <linux/uaccess.h> | |
22 | #include <linux/net.h> | |
23 | #include <linux/netdevice.h> | |
ac98d8aa | 24 | #include <linux/rculist.h> |
a71506a4 | 25 | #include <net/xdp_sock_drv.h> |
a0731952 | 26 | #include <net/busy_poll.h> |
b9b6b68e | 27 | #include <net/xdp.h> |
c0c77d8f | 28 | |
423f3832 | 29 | #include "xsk_queue.h" |
c0c77d8f | 30 | #include "xdp_umem.h" |
a36b38aa | 31 | #include "xsk.h" |
c0c77d8f | 32 | |
e7a1c130 | 33 | #define TX_BATCH_SIZE 32 |
35fcde7f | 34 | |
e312b9e7 BT |
35 | static DEFINE_PER_CPU(struct list_head, xskmap_flush_list); |
36 | ||
c4655761 | 37 | void xsk_set_rx_need_wakeup(struct xsk_buff_pool *pool) |
77cd0d7b | 38 | { |
c2d3d6a4 | 39 | if (pool->cached_need_wakeup & XDP_WAKEUP_RX) |
77cd0d7b MK |
40 | return; |
41 | ||
7361f9c3 | 42 | pool->fq->ring->flags |= XDP_RING_NEED_WAKEUP; |
c2d3d6a4 | 43 | pool->cached_need_wakeup |= XDP_WAKEUP_RX; |
77cd0d7b MK |
44 | } |
45 | EXPORT_SYMBOL(xsk_set_rx_need_wakeup); | |
46 | ||
c4655761 | 47 | void xsk_set_tx_need_wakeup(struct xsk_buff_pool *pool) |
77cd0d7b MK |
48 | { |
49 | struct xdp_sock *xs; | |
50 | ||
c2d3d6a4 | 51 | if (pool->cached_need_wakeup & XDP_WAKEUP_TX) |
77cd0d7b MK |
52 | return; |
53 | ||
54 | rcu_read_lock(); | |
a5aa8e52 | 55 | list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) { |
77cd0d7b MK |
56 | xs->tx->ring->flags |= XDP_RING_NEED_WAKEUP; |
57 | } | |
58 | rcu_read_unlock(); | |
59 | ||
c2d3d6a4 | 60 | pool->cached_need_wakeup |= XDP_WAKEUP_TX; |
77cd0d7b MK |
61 | } |
62 | EXPORT_SYMBOL(xsk_set_tx_need_wakeup); | |
63 | ||
c4655761 | 64 | void xsk_clear_rx_need_wakeup(struct xsk_buff_pool *pool) |
77cd0d7b | 65 | { |
c2d3d6a4 | 66 | if (!(pool->cached_need_wakeup & XDP_WAKEUP_RX)) |
77cd0d7b MK |
67 | return; |
68 | ||
7361f9c3 | 69 | pool->fq->ring->flags &= ~XDP_RING_NEED_WAKEUP; |
c2d3d6a4 | 70 | pool->cached_need_wakeup &= ~XDP_WAKEUP_RX; |
77cd0d7b MK |
71 | } |
72 | EXPORT_SYMBOL(xsk_clear_rx_need_wakeup); | |
73 | ||
c4655761 | 74 | void xsk_clear_tx_need_wakeup(struct xsk_buff_pool *pool) |
77cd0d7b MK |
75 | { |
76 | struct xdp_sock *xs; | |
77 | ||
c2d3d6a4 | 78 | if (!(pool->cached_need_wakeup & XDP_WAKEUP_TX)) |
77cd0d7b MK |
79 | return; |
80 | ||
81 | rcu_read_lock(); | |
a5aa8e52 | 82 | list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) { |
77cd0d7b MK |
83 | xs->tx->ring->flags &= ~XDP_RING_NEED_WAKEUP; |
84 | } | |
85 | rcu_read_unlock(); | |
86 | ||
c2d3d6a4 | 87 | pool->cached_need_wakeup &= ~XDP_WAKEUP_TX; |
77cd0d7b MK |
88 | } |
89 | EXPORT_SYMBOL(xsk_clear_tx_need_wakeup); | |
90 | ||
c4655761 | 91 | bool xsk_uses_need_wakeup(struct xsk_buff_pool *pool) |
77cd0d7b | 92 | { |
c2d3d6a4 | 93 | return pool->uses_need_wakeup; |
77cd0d7b | 94 | } |
c4655761 | 95 | EXPORT_SYMBOL(xsk_uses_need_wakeup); |
77cd0d7b | 96 | |
1c1efc2a MK |
97 | struct xsk_buff_pool *xsk_get_pool_from_qid(struct net_device *dev, |
98 | u16 queue_id) | |
99 | { | |
100 | if (queue_id < dev->real_num_rx_queues) | |
101 | return dev->_rx[queue_id].pool; | |
102 | if (queue_id < dev->real_num_tx_queues) | |
103 | return dev->_tx[queue_id].pool; | |
104 | ||
105 | return NULL; | |
106 | } | |
107 | EXPORT_SYMBOL(xsk_get_pool_from_qid); | |
108 | ||
109 | void xsk_clear_pool_at_qid(struct net_device *dev, u16 queue_id) | |
110 | { | |
b425e24a | 111 | if (queue_id < dev->num_rx_queues) |
1c1efc2a | 112 | dev->_rx[queue_id].pool = NULL; |
b425e24a | 113 | if (queue_id < dev->num_tx_queues) |
1c1efc2a MK |
114 | dev->_tx[queue_id].pool = NULL; |
115 | } | |
116 | ||
117 | /* The buffer pool is stored both in the _rx struct and the _tx struct as we do | |
118 | * not know if the device has more tx queues than rx, or the opposite. | |
119 | * This might also change during run time. | |
120 | */ | |
121 | int xsk_reg_pool_at_qid(struct net_device *dev, struct xsk_buff_pool *pool, | |
122 | u16 queue_id) | |
123 | { | |
124 | if (queue_id >= max_t(unsigned int, | |
125 | dev->real_num_rx_queues, | |
126 | dev->real_num_tx_queues)) | |
127 | return -EINVAL; | |
128 | ||
129 | if (queue_id < dev->real_num_rx_queues) | |
130 | dev->_rx[queue_id].pool = pool; | |
131 | if (queue_id < dev->real_num_tx_queues) | |
132 | dev->_tx[queue_id].pool = pool; | |
133 | ||
134 | return 0; | |
135 | } | |
136 | ||
26062b18 BT |
137 | void xp_release(struct xdp_buff_xsk *xskb) |
138 | { | |
139 | xskb->pool->free_heads[xskb->pool->free_heads_cnt++] = xskb; | |
140 | } | |
141 | ||
142 | static u64 xp_get_handle(struct xdp_buff_xsk *xskb) | |
143 | { | |
144 | u64 offset = xskb->xdp.data - xskb->xdp.data_hard_start; | |
145 | ||
146 | offset += xskb->pool->headroom; | |
147 | if (!xskb->pool->unaligned) | |
148 | return xskb->orig_addr + offset; | |
149 | return xskb->orig_addr + (offset << XSK_UNALIGNED_BUF_OFFSET_SHIFT); | |
150 | } | |
151 | ||
2b43470a | 152 | static int __xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len) |
c05cd364 | 153 | { |
2b43470a BT |
154 | struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp); |
155 | u64 addr; | |
156 | int err; | |
c05cd364 | 157 | |
2b43470a BT |
158 | addr = xp_get_handle(xskb); |
159 | err = xskq_prod_reserve_desc(xs->rx, addr, len); | |
160 | if (err) { | |
8aa5a335 | 161 | xs->rx_queue_full++; |
2b43470a | 162 | return err; |
c05cd364 KL |
163 | } |
164 | ||
2b43470a BT |
165 | xp_release(xskb); |
166 | return 0; | |
c05cd364 KL |
167 | } |
168 | ||
2b43470a | 169 | static void xsk_copy_xdp(struct xdp_buff *to, struct xdp_buff *from, u32 len) |
c497176c | 170 | { |
2b43470a | 171 | void *from_buf, *to_buf; |
18baed26 | 172 | u32 metalen; |
c497176c | 173 | |
2b43470a BT |
174 | if (unlikely(xdp_data_meta_unsupported(from))) { |
175 | from_buf = from->data; | |
176 | to_buf = to->data; | |
18baed26 BT |
177 | metalen = 0; |
178 | } else { | |
2b43470a BT |
179 | from_buf = from->data_meta; |
180 | metalen = from->data - from->data_meta; | |
181 | to_buf = to->data - metalen; | |
18baed26 BT |
182 | } |
183 | ||
2b43470a | 184 | memcpy(to_buf, from_buf, len + metalen); |
c497176c BT |
185 | } |
186 | ||
458f7272 | 187 | static int __xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp) |
c497176c | 188 | { |
2b43470a BT |
189 | struct xdp_buff *xsk_xdp; |
190 | int err; | |
458f7272 | 191 | u32 len; |
c497176c | 192 | |
458f7272 | 193 | len = xdp->data_end - xdp->data; |
c4655761 | 194 | if (len > xsk_pool_get_rx_frame_size(xs->pool)) { |
2b43470a BT |
195 | xs->rx_dropped++; |
196 | return -ENOSPC; | |
197 | } | |
198 | ||
c4655761 | 199 | xsk_xdp = xsk_buff_alloc(xs->pool); |
2b43470a | 200 | if (!xsk_xdp) { |
173d3adb | 201 | xs->rx_dropped++; |
2b43470a BT |
202 | return -ENOSPC; |
203 | } | |
c497176c | 204 | |
2b43470a BT |
205 | xsk_copy_xdp(xsk_xdp, xdp, len); |
206 | err = __xsk_rcv_zc(xs, xsk_xdp, len); | |
207 | if (err) { | |
208 | xsk_buff_free(xsk_xdp); | |
209 | return err; | |
210 | } | |
2b43470a | 211 | return 0; |
c497176c BT |
212 | } |
213 | ||
3413f041 XZ |
214 | static bool xsk_tx_writeable(struct xdp_sock *xs) |
215 | { | |
216 | if (xskq_cons_present_entries(xs->tx) > xs->tx->nentries / 2) | |
217 | return false; | |
218 | ||
219 | return true; | |
220 | } | |
221 | ||
42fddcc7 BT |
222 | static bool xsk_is_bound(struct xdp_sock *xs) |
223 | { | |
224 | if (READ_ONCE(xs->state) == XSK_BOUND) { | |
225 | /* Matches smp_wmb() in bind(). */ | |
226 | smp_rmb(); | |
227 | return true; | |
228 | } | |
229 | return false; | |
230 | } | |
231 | ||
458f7272 | 232 | static int xsk_rcv_check(struct xdp_sock *xs, struct xdp_buff *xdp) |
173d3adb | 233 | { |
42fddcc7 BT |
234 | if (!xsk_is_bound(xs)) |
235 | return -EINVAL; | |
236 | ||
173d3adb BT |
237 | if (xs->dev != xdp->rxq->dev || xs->queue_id != xdp->rxq->queue_index) |
238 | return -EINVAL; | |
239 | ||
b02e5a0e | 240 | sk_mark_napi_id_once_xdp(&xs->sk, xdp); |
458f7272 | 241 | return 0; |
173d3adb BT |
242 | } |
243 | ||
d817991c | 244 | static void xsk_flush(struct xdp_sock *xs) |
c497176c | 245 | { |
59e35e55 | 246 | xskq_prod_submit(xs->rx); |
7361f9c3 | 247 | __xskq_cons_release(xs->pool->fq); |
43a825af | 248 | sock_def_readable(&xs->sk); |
c497176c BT |
249 | } |
250 | ||
251 | int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp) | |
252 | { | |
253 | int err; | |
254 | ||
bf0bdd13 | 255 | spin_lock_bh(&xs->rx_lock); |
458f7272 BT |
256 | err = xsk_rcv_check(xs, xdp); |
257 | if (!err) { | |
258 | err = __xsk_rcv(xs, xdp); | |
259 | xsk_flush(xs); | |
260 | } | |
bf0bdd13 | 261 | spin_unlock_bh(&xs->rx_lock); |
c497176c BT |
262 | return err; |
263 | } | |
264 | ||
458f7272 BT |
265 | static int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp) |
266 | { | |
267 | int err; | |
268 | u32 len; | |
269 | ||
270 | err = xsk_rcv_check(xs, xdp); | |
271 | if (err) | |
272 | return err; | |
273 | ||
274 | if (xdp->rxq->mem.type == MEM_TYPE_XSK_BUFF_POOL) { | |
275 | len = xdp->data_end - xdp->data; | |
276 | return __xsk_rcv_zc(xs, xdp, len); | |
277 | } | |
278 | ||
279 | err = __xsk_rcv(xs, xdp); | |
280 | if (!err) | |
281 | xdp_return_buff(xdp); | |
282 | return err; | |
283 | } | |
284 | ||
e312b9e7 | 285 | int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp) |
d817991c | 286 | { |
e312b9e7 | 287 | struct list_head *flush_list = this_cpu_ptr(&xskmap_flush_list); |
d817991c BT |
288 | int err; |
289 | ||
458f7272 | 290 | err = xsk_rcv(xs, xdp); |
d817991c BT |
291 | if (err) |
292 | return err; | |
293 | ||
294 | if (!xs->flush_node.prev) | |
295 | list_add(&xs->flush_node, flush_list); | |
296 | ||
297 | return 0; | |
298 | } | |
299 | ||
e312b9e7 | 300 | void __xsk_map_flush(void) |
d817991c | 301 | { |
e312b9e7 | 302 | struct list_head *flush_list = this_cpu_ptr(&xskmap_flush_list); |
d817991c BT |
303 | struct xdp_sock *xs, *tmp; |
304 | ||
305 | list_for_each_entry_safe(xs, tmp, flush_list, flush_node) { | |
306 | xsk_flush(xs); | |
307 | __list_del_clearprev(&xs->flush_node); | |
308 | } | |
309 | } | |
310 | ||
c4655761 | 311 | void xsk_tx_completed(struct xsk_buff_pool *pool, u32 nb_entries) |
ac98d8aa | 312 | { |
7361f9c3 | 313 | xskq_prod_submit_n(pool->cq, nb_entries); |
ac98d8aa | 314 | } |
c4655761 | 315 | EXPORT_SYMBOL(xsk_tx_completed); |
ac98d8aa | 316 | |
c4655761 | 317 | void xsk_tx_release(struct xsk_buff_pool *pool) |
ac98d8aa MK |
318 | { |
319 | struct xdp_sock *xs; | |
320 | ||
321 | rcu_read_lock(); | |
a5aa8e52 | 322 | list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) { |
30744a68 | 323 | __xskq_cons_release(xs->tx); |
3413f041 XZ |
324 | if (xsk_tx_writeable(xs)) |
325 | xs->sk.sk_write_space(&xs->sk); | |
ac98d8aa MK |
326 | } |
327 | rcu_read_unlock(); | |
328 | } | |
c4655761 | 329 | EXPORT_SYMBOL(xsk_tx_release); |
ac98d8aa | 330 | |
c4655761 | 331 | bool xsk_tx_peek_desc(struct xsk_buff_pool *pool, struct xdp_desc *desc) |
ac98d8aa | 332 | { |
ac98d8aa MK |
333 | struct xdp_sock *xs; |
334 | ||
335 | rcu_read_lock(); | |
a5aa8e52 | 336 | list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) { |
1c1efc2a | 337 | if (!xskq_cons_peek_desc(xs->tx, desc, pool)) { |
8aa5a335 | 338 | xs->tx->queue_empty_descs++; |
ac98d8aa | 339 | continue; |
8aa5a335 | 340 | } |
ac98d8aa | 341 | |
0a05861f | 342 | /* This is the backpressure mechanism for the Tx path. |
15d8c916 MK |
343 | * Reserve space in the completion queue and only proceed |
344 | * if there is space in it. This avoids having to implement | |
345 | * any buffering in the Tx path. | |
346 | */ | |
7361f9c3 | 347 | if (xskq_prod_reserve_addr(pool->cq, desc->addr)) |
ac98d8aa MK |
348 | goto out; |
349 | ||
c5ed924b | 350 | xskq_cons_release(xs->tx); |
ac98d8aa MK |
351 | rcu_read_unlock(); |
352 | return true; | |
353 | } | |
354 | ||
355 | out: | |
356 | rcu_read_unlock(); | |
357 | return false; | |
358 | } | |
c4655761 | 359 | EXPORT_SYMBOL(xsk_tx_peek_desc); |
ac98d8aa | 360 | |
9349eb3a MK |
361 | static u32 xsk_tx_peek_release_fallback(struct xsk_buff_pool *pool, struct xdp_desc *descs, |
362 | u32 max_entries) | |
363 | { | |
364 | u32 nb_pkts = 0; | |
365 | ||
366 | while (nb_pkts < max_entries && xsk_tx_peek_desc(pool, &descs[nb_pkts])) | |
367 | nb_pkts++; | |
368 | ||
369 | xsk_tx_release(pool); | |
370 | return nb_pkts; | |
371 | } | |
372 | ||
373 | u32 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool *pool, struct xdp_desc *descs, | |
374 | u32 max_entries) | |
375 | { | |
376 | struct xdp_sock *xs; | |
377 | u32 nb_pkts; | |
378 | ||
379 | rcu_read_lock(); | |
380 | if (!list_is_singular(&pool->xsk_tx_list)) { | |
381 | /* Fallback to the non-batched version */ | |
382 | rcu_read_unlock(); | |
383 | return xsk_tx_peek_release_fallback(pool, descs, max_entries); | |
384 | } | |
385 | ||
386 | xs = list_first_or_null_rcu(&pool->xsk_tx_list, struct xdp_sock, tx_list); | |
387 | if (!xs) { | |
388 | nb_pkts = 0; | |
389 | goto out; | |
390 | } | |
391 | ||
392 | nb_pkts = xskq_cons_peek_desc_batch(xs->tx, descs, pool, max_entries); | |
393 | if (!nb_pkts) { | |
394 | xs->tx->queue_empty_descs++; | |
395 | goto out; | |
396 | } | |
397 | ||
398 | /* This is the backpressure mechanism for the Tx path. Try to | |
399 | * reserve space in the completion queue for all packets, but | |
400 | * if there are fewer slots available, just process that many | |
401 | * packets. This avoids having to implement any buffering in | |
402 | * the Tx path. | |
403 | */ | |
404 | nb_pkts = xskq_prod_reserve_addr_batch(pool->cq, descs, nb_pkts); | |
405 | if (!nb_pkts) | |
406 | goto out; | |
407 | ||
408 | xskq_cons_release_n(xs->tx, nb_pkts); | |
409 | __xskq_cons_release(xs->tx); | |
410 | xs->sk.sk_write_space(&xs->sk); | |
411 | ||
412 | out: | |
413 | rcu_read_unlock(); | |
414 | return nb_pkts; | |
415 | } | |
416 | EXPORT_SYMBOL(xsk_tx_peek_release_desc_batch); | |
417 | ||
06870682 | 418 | static int xsk_wakeup(struct xdp_sock *xs, u8 flags) |
ac98d8aa | 419 | { |
ac98d8aa | 420 | struct net_device *dev = xs->dev; |
06870682 | 421 | |
603f5b6d | 422 | return dev->netdev_ops->ndo_xsk_wakeup(dev, xs->queue_id, flags); |
ac98d8aa MK |
423 | } |
424 | ||
35fcde7f MK |
425 | static void xsk_destruct_skb(struct sk_buff *skb) |
426 | { | |
bbff2f32 | 427 | u64 addr = (u64)(long)skb_shinfo(skb)->destructor_arg; |
35fcde7f | 428 | struct xdp_sock *xs = xdp_sk(skb->sk); |
a9744f7c | 429 | unsigned long flags; |
35fcde7f | 430 | |
f09ced40 | 431 | spin_lock_irqsave(&xs->pool->cq_lock, flags); |
7361f9c3 | 432 | xskq_prod_submit_addr(xs->pool->cq, addr); |
f09ced40 | 433 | spin_unlock_irqrestore(&xs->pool->cq_lock, flags); |
35fcde7f MK |
434 | |
435 | sock_wfree(skb); | |
436 | } | |
437 | ||
9c8f21e6 XZ |
438 | static struct sk_buff *xsk_build_skb_zerocopy(struct xdp_sock *xs, |
439 | struct xdp_desc *desc) | |
440 | { | |
441 | struct xsk_buff_pool *pool = xs->pool; | |
442 | u32 hr, len, ts, offset, copy, copied; | |
443 | struct sk_buff *skb; | |
444 | struct page *page; | |
445 | void *buffer; | |
446 | int err, i; | |
447 | u64 addr; | |
448 | ||
449 | hr = max(NET_SKB_PAD, L1_CACHE_ALIGN(xs->dev->needed_headroom)); | |
450 | ||
451 | skb = sock_alloc_send_skb(&xs->sk, hr, 1, &err); | |
452 | if (unlikely(!skb)) | |
453 | return ERR_PTR(err); | |
454 | ||
455 | skb_reserve(skb, hr); | |
456 | ||
457 | addr = desc->addr; | |
458 | len = desc->len; | |
459 | ts = pool->unaligned ? len : pool->chunk_size; | |
460 | ||
461 | buffer = xsk_buff_raw_get_data(pool, addr); | |
462 | offset = offset_in_page(buffer); | |
463 | addr = buffer - pool->addrs; | |
464 | ||
465 | for (copied = 0, i = 0; copied < len; i++) { | |
466 | page = pool->umem->pgs[addr >> PAGE_SHIFT]; | |
467 | get_page(page); | |
468 | ||
469 | copy = min_t(u32, PAGE_SIZE - offset, len - copied); | |
470 | skb_fill_page_desc(skb, i, page, offset, copy); | |
471 | ||
472 | copied += copy; | |
473 | addr += copy; | |
474 | offset = 0; | |
475 | } | |
476 | ||
477 | skb->len += len; | |
478 | skb->data_len += len; | |
479 | skb->truesize += ts; | |
480 | ||
481 | refcount_add(ts, &xs->sk.sk_wmem_alloc); | |
482 | ||
483 | return skb; | |
484 | } | |
485 | ||
486 | static struct sk_buff *xsk_build_skb(struct xdp_sock *xs, | |
487 | struct xdp_desc *desc) | |
488 | { | |
489 | struct net_device *dev = xs->dev; | |
490 | struct sk_buff *skb; | |
491 | ||
492 | if (dev->priv_flags & IFF_TX_SKB_NO_LINEAR) { | |
493 | skb = xsk_build_skb_zerocopy(xs, desc); | |
494 | if (IS_ERR(skb)) | |
495 | return skb; | |
496 | } else { | |
497 | u32 hr, tr, len; | |
498 | void *buffer; | |
499 | int err; | |
500 | ||
501 | hr = max(NET_SKB_PAD, L1_CACHE_ALIGN(dev->needed_headroom)); | |
502 | tr = dev->needed_tailroom; | |
503 | len = desc->len; | |
504 | ||
505 | skb = sock_alloc_send_skb(&xs->sk, hr + len + tr, 1, &err); | |
506 | if (unlikely(!skb)) | |
507 | return ERR_PTR(err); | |
508 | ||
509 | skb_reserve(skb, hr); | |
510 | skb_put(skb, len); | |
511 | ||
512 | buffer = xsk_buff_raw_get_data(xs->pool, desc->addr); | |
513 | err = skb_store_bits(skb, 0, buffer, len); | |
514 | if (unlikely(err)) { | |
515 | kfree_skb(skb); | |
516 | return ERR_PTR(err); | |
517 | } | |
518 | } | |
519 | ||
520 | skb->dev = dev; | |
521 | skb->priority = xs->sk.sk_priority; | |
522 | skb->mark = xs->sk.sk_mark; | |
523 | skb_shinfo(skb)->destructor_arg = (void *)(long)desc->addr; | |
524 | skb->destructor = xsk_destruct_skb; | |
525 | ||
526 | return skb; | |
527 | } | |
528 | ||
df551058 | 529 | static int xsk_generic_xmit(struct sock *sk) |
35fcde7f | 530 | { |
35fcde7f | 531 | struct xdp_sock *xs = xdp_sk(sk); |
df551058 | 532 | u32 max_batch = TX_BATCH_SIZE; |
35fcde7f MK |
533 | bool sent_frame = false; |
534 | struct xdp_desc desc; | |
535 | struct sk_buff *skb; | |
f09ced40 | 536 | unsigned long flags; |
35fcde7f MK |
537 | int err = 0; |
538 | ||
35fcde7f MK |
539 | mutex_lock(&xs->mutex); |
540 | ||
603f5b6d MK |
541 | /* Since we dropped the RCU read lock, the socket state might have changed. */ |
542 | if (unlikely(!xsk_is_bound(xs))) { | |
543 | err = -ENXIO; | |
544 | goto out; | |
545 | } | |
546 | ||
67571640 IM |
547 | if (xs->queue_id >= xs->dev->real_num_tx_queues) |
548 | goto out; | |
549 | ||
1c1efc2a | 550 | while (xskq_cons_peek_desc(xs->tx, &desc, xs->pool)) { |
35fcde7f MK |
551 | if (max_batch-- == 0) { |
552 | err = -EAGAIN; | |
553 | goto out; | |
554 | } | |
555 | ||
9c8f21e6 XZ |
556 | skb = xsk_build_skb(xs, &desc); |
557 | if (IS_ERR(skb)) { | |
558 | err = PTR_ERR(skb); | |
35fcde7f | 559 | goto out; |
9c8f21e6 | 560 | } |
35fcde7f | 561 | |
0a05861f | 562 | /* This is the backpressure mechanism for the Tx path. |
15d8c916 MK |
563 | * Reserve space in the completion queue and only proceed |
564 | * if there is space in it. This avoids having to implement | |
565 | * any buffering in the Tx path. | |
566 | */ | |
f09ced40 | 567 | spin_lock_irqsave(&xs->pool->cq_lock, flags); |
9c8f21e6 | 568 | if (xskq_prod_reserve(xs->pool->cq)) { |
f09ced40 | 569 | spin_unlock_irqrestore(&xs->pool->cq_lock, flags); |
35fcde7f MK |
570 | kfree_skb(skb); |
571 | goto out; | |
572 | } | |
f09ced40 | 573 | spin_unlock_irqrestore(&xs->pool->cq_lock, flags); |
35fcde7f | 574 | |
36ccdf85 | 575 | err = __dev_direct_xmit(skb, xs->queue_id); |
642e450b MK |
576 | if (err == NETDEV_TX_BUSY) { |
577 | /* Tell user-space to retry the send */ | |
578 | skb->destructor = sock_wfree; | |
b1b95cb5 MK |
579 | spin_lock_irqsave(&xs->pool->cq_lock, flags); |
580 | xskq_prod_cancel(xs->pool->cq); | |
581 | spin_unlock_irqrestore(&xs->pool->cq_lock, flags); | |
642e450b MK |
582 | /* Free skb without triggering the perf drop trace */ |
583 | consume_skb(skb); | |
584 | err = -EAGAIN; | |
585 | goto out; | |
586 | } | |
587 | ||
c5ed924b | 588 | xskq_cons_release(xs->tx); |
35fcde7f | 589 | /* Ignore NET_XMIT_CN as packet might have been sent */ |
642e450b | 590 | if (err == NET_XMIT_DROP) { |
fe588685 MK |
591 | /* SKB completed but not sent */ |
592 | err = -EBUSY; | |
35fcde7f MK |
593 | goto out; |
594 | } | |
595 | ||
596 | sent_frame = true; | |
35fcde7f MK |
597 | } |
598 | ||
8aa5a335 CL |
599 | xs->tx->queue_empty_descs++; |
600 | ||
35fcde7f MK |
601 | out: |
602 | if (sent_frame) | |
3413f041 XZ |
603 | if (xsk_tx_writeable(xs)) |
604 | sk->sk_write_space(sk); | |
35fcde7f MK |
605 | |
606 | mutex_unlock(&xs->mutex); | |
607 | return err; | |
608 | } | |
609 | ||
603f5b6d | 610 | static int xsk_xmit(struct sock *sk) |
df551058 MK |
611 | { |
612 | struct xdp_sock *xs = xdp_sk(sk); | |
603f5b6d | 613 | int ret; |
df551058 MK |
614 | |
615 | if (unlikely(!(xs->dev->flags & IFF_UP))) | |
616 | return -ENETDOWN; | |
617 | if (unlikely(!xs->tx)) | |
618 | return -ENOBUFS; | |
619 | ||
603f5b6d MK |
620 | if (xs->zc) |
621 | return xsk_wakeup(xs, XDP_WAKEUP_TX); | |
622 | ||
623 | /* Drop the RCU lock since the SKB path might sleep. */ | |
624 | rcu_read_unlock(); | |
625 | ret = xsk_generic_xmit(sk); | |
626 | /* Reaquire RCU lock before going into common code. */ | |
627 | rcu_read_lock(); | |
628 | ||
629 | return ret; | |
df551058 MK |
630 | } |
631 | ||
a0731952 BT |
632 | static bool xsk_no_wakeup(struct sock *sk) |
633 | { | |
634 | #ifdef CONFIG_NET_RX_BUSY_POLL | |
635 | /* Prefer busy-polling, skip the wakeup. */ | |
636 | return READ_ONCE(sk->sk_prefer_busy_poll) && READ_ONCE(sk->sk_ll_usec) && | |
637 | READ_ONCE(sk->sk_napi_id) >= MIN_NAPI_ID; | |
638 | #else | |
639 | return false; | |
640 | #endif | |
641 | } | |
642 | ||
603f5b6d | 643 | static int __xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len) |
35fcde7f | 644 | { |
ac98d8aa | 645 | bool need_wait = !(m->msg_flags & MSG_DONTWAIT); |
35fcde7f MK |
646 | struct sock *sk = sock->sk; |
647 | struct xdp_sock *xs = xdp_sk(sk); | |
e3920818 | 648 | struct xsk_buff_pool *pool; |
35fcde7f | 649 | |
42fddcc7 | 650 | if (unlikely(!xsk_is_bound(xs))) |
35fcde7f | 651 | return -ENXIO; |
df551058 | 652 | if (unlikely(need_wait)) |
ac98d8aa | 653 | return -EOPNOTSUPP; |
35fcde7f | 654 | |
a0731952 BT |
655 | if (sk_can_busy_loop(sk)) |
656 | sk_busy_loop(sk, 1); /* only support non-blocking sockets */ | |
657 | ||
658 | if (xsk_no_wakeup(sk)) | |
659 | return 0; | |
660 | ||
e3920818 BT |
661 | pool = xs->pool; |
662 | if (pool->cached_need_wakeup & XDP_WAKEUP_TX) | |
603f5b6d | 663 | return xsk_xmit(sk); |
e3920818 | 664 | return 0; |
35fcde7f MK |
665 | } |
666 | ||
603f5b6d MK |
667 | static int xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len) |
668 | { | |
669 | int ret; | |
670 | ||
671 | rcu_read_lock(); | |
672 | ret = __xsk_sendmsg(sock, m, total_len); | |
673 | rcu_read_unlock(); | |
674 | ||
675 | return ret; | |
676 | } | |
677 | ||
678 | static int __xsk_recvmsg(struct socket *sock, struct msghdr *m, size_t len, int flags) | |
45a86681 BT |
679 | { |
680 | bool need_wait = !(flags & MSG_DONTWAIT); | |
681 | struct sock *sk = sock->sk; | |
682 | struct xdp_sock *xs = xdp_sk(sk); | |
683 | ||
3546b9b8 BT |
684 | if (unlikely(!xsk_is_bound(xs))) |
685 | return -ENXIO; | |
45a86681 BT |
686 | if (unlikely(!(xs->dev->flags & IFF_UP))) |
687 | return -ENETDOWN; | |
688 | if (unlikely(!xs->rx)) | |
689 | return -ENOBUFS; | |
45a86681 BT |
690 | if (unlikely(need_wait)) |
691 | return -EOPNOTSUPP; | |
692 | ||
a0731952 BT |
693 | if (sk_can_busy_loop(sk)) |
694 | sk_busy_loop(sk, 1); /* only support non-blocking sockets */ | |
695 | ||
696 | if (xsk_no_wakeup(sk)) | |
697 | return 0; | |
698 | ||
45a86681 BT |
699 | if (xs->pool->cached_need_wakeup & XDP_WAKEUP_RX && xs->zc) |
700 | return xsk_wakeup(xs, XDP_WAKEUP_RX); | |
701 | return 0; | |
35fcde7f MK |
702 | } |
703 | ||
603f5b6d MK |
704 | static int xsk_recvmsg(struct socket *sock, struct msghdr *m, size_t len, int flags) |
705 | { | |
706 | int ret; | |
707 | ||
708 | rcu_read_lock(); | |
709 | ret = __xsk_recvmsg(sock, m, len, flags); | |
710 | rcu_read_unlock(); | |
711 | ||
712 | return ret; | |
713 | } | |
714 | ||
5d946c5a | 715 | static __poll_t xsk_poll(struct file *file, struct socket *sock, |
a11e1d43 | 716 | struct poll_table_struct *wait) |
c497176c | 717 | { |
f5da5418 | 718 | __poll_t mask = 0; |
df551058 MK |
719 | struct sock *sk = sock->sk; |
720 | struct xdp_sock *xs = xdp_sk(sk); | |
c2d3d6a4 | 721 | struct xsk_buff_pool *pool; |
42fddcc7 | 722 | |
d5f35d21 MK |
723 | sock_poll_wait(file, sock, wait); |
724 | ||
603f5b6d MK |
725 | rcu_read_lock(); |
726 | if (unlikely(!xsk_is_bound(xs))) { | |
727 | rcu_read_unlock(); | |
42fddcc7 | 728 | return mask; |
603f5b6d | 729 | } |
42fddcc7 | 730 | |
c2d3d6a4 | 731 | pool = xs->pool; |
77cd0d7b | 732 | |
c2d3d6a4 | 733 | if (pool->cached_need_wakeup) { |
06870682 | 734 | if (xs->zc) |
c2d3d6a4 | 735 | xsk_wakeup(xs, pool->cached_need_wakeup); |
df551058 MK |
736 | else |
737 | /* Poll needs to drive Tx also in copy mode */ | |
603f5b6d | 738 | xsk_xmit(sk); |
df551058 | 739 | } |
c497176c | 740 | |
59e35e55 | 741 | if (xs->rx && !xskq_prod_is_empty(xs->rx)) |
5d946c5a | 742 | mask |= EPOLLIN | EPOLLRDNORM; |
3413f041 | 743 | if (xs->tx && xsk_tx_writeable(xs)) |
5d946c5a | 744 | mask |= EPOLLOUT | EPOLLWRNORM; |
c497176c | 745 | |
603f5b6d | 746 | rcu_read_unlock(); |
c497176c BT |
747 | return mask; |
748 | } | |
749 | ||
b9b6b68e BT |
750 | static int xsk_init_queue(u32 entries, struct xsk_queue **queue, |
751 | bool umem_queue) | |
423f3832 MK |
752 | { |
753 | struct xsk_queue *q; | |
754 | ||
755 | if (entries == 0 || *queue || !is_power_of_2(entries)) | |
756 | return -EINVAL; | |
757 | ||
b9b6b68e | 758 | q = xskq_create(entries, umem_queue); |
423f3832 MK |
759 | if (!q) |
760 | return -ENOMEM; | |
761 | ||
37b07693 BT |
762 | /* Make sure queue is ready before it can be seen by others */ |
763 | smp_wmb(); | |
94a99763 | 764 | WRITE_ONCE(*queue, q); |
423f3832 MK |
765 | return 0; |
766 | } | |
767 | ||
455302d1 IM |
768 | static void xsk_unbind_dev(struct xdp_sock *xs) |
769 | { | |
770 | struct net_device *dev = xs->dev; | |
771 | ||
42fddcc7 | 772 | if (xs->state != XSK_BOUND) |
455302d1 | 773 | return; |
42fddcc7 | 774 | WRITE_ONCE(xs->state, XSK_UNBOUND); |
455302d1 IM |
775 | |
776 | /* Wait for driver to stop using the xdp socket. */ | |
a5aa8e52 | 777 | xp_del_xsk(xs->pool, xs); |
455302d1 IM |
778 | synchronize_net(); |
779 | dev_put(dev); | |
780 | } | |
781 | ||
0402acd6 | 782 | static struct xsk_map *xsk_get_map_list_entry(struct xdp_sock *xs, |
782347b6 | 783 | struct xdp_sock __rcu ***map_entry) |
0402acd6 BT |
784 | { |
785 | struct xsk_map *map = NULL; | |
786 | struct xsk_map_node *node; | |
787 | ||
788 | *map_entry = NULL; | |
789 | ||
790 | spin_lock_bh(&xs->map_list_lock); | |
791 | node = list_first_entry_or_null(&xs->map_list, struct xsk_map_node, | |
792 | node); | |
793 | if (node) { | |
bb1b25ca | 794 | bpf_map_inc(&node->map->map); |
0402acd6 BT |
795 | map = node->map; |
796 | *map_entry = node->map_entry; | |
797 | } | |
798 | spin_unlock_bh(&xs->map_list_lock); | |
799 | return map; | |
800 | } | |
801 | ||
802 | static void xsk_delete_from_maps(struct xdp_sock *xs) | |
803 | { | |
804 | /* This function removes the current XDP socket from all the | |
805 | * maps it resides in. We need to take extra care here, due to | |
806 | * the two locks involved. Each map has a lock synchronizing | |
807 | * updates to the entries, and each socket has a lock that | |
808 | * synchronizes access to the list of maps (map_list). For | |
809 | * deadlock avoidance the locks need to be taken in the order | |
810 | * "map lock"->"socket map list lock". We start off by | |
811 | * accessing the socket map list, and take a reference to the | |
812 | * map to guarantee existence between the | |
813 | * xsk_get_map_list_entry() and xsk_map_try_sock_delete() | |
814 | * calls. Then we ask the map to remove the socket, which | |
815 | * tries to remove the socket from the map. Note that there | |
816 | * might be updates to the map between | |
817 | * xsk_get_map_list_entry() and xsk_map_try_sock_delete(). | |
818 | */ | |
782347b6 | 819 | struct xdp_sock __rcu **map_entry = NULL; |
0402acd6 BT |
820 | struct xsk_map *map; |
821 | ||
822 | while ((map = xsk_get_map_list_entry(xs, &map_entry))) { | |
823 | xsk_map_try_sock_delete(map, xs, map_entry); | |
bb1b25ca | 824 | bpf_map_put(&map->map); |
0402acd6 BT |
825 | } |
826 | } | |
827 | ||
c0c77d8f BT |
828 | static int xsk_release(struct socket *sock) |
829 | { | |
830 | struct sock *sk = sock->sk; | |
965a9909 | 831 | struct xdp_sock *xs = xdp_sk(sk); |
c0c77d8f BT |
832 | struct net *net; |
833 | ||
834 | if (!sk) | |
835 | return 0; | |
836 | ||
837 | net = sock_net(sk); | |
838 | ||
1d0dc069 BT |
839 | mutex_lock(&net->xdp.lock); |
840 | sk_del_node_init_rcu(sk); | |
841 | mutex_unlock(&net->xdp.lock); | |
842 | ||
c0c77d8f BT |
843 | local_bh_disable(); |
844 | sock_prot_inuse_add(net, sk->sk_prot, -1); | |
845 | local_bh_enable(); | |
846 | ||
0402acd6 | 847 | xsk_delete_from_maps(xs); |
42fddcc7 | 848 | mutex_lock(&xs->mutex); |
455302d1 | 849 | xsk_unbind_dev(xs); |
42fddcc7 | 850 | mutex_unlock(&xs->mutex); |
965a9909 | 851 | |
541d7fdd BT |
852 | xskq_destroy(xs->rx); |
853 | xskq_destroy(xs->tx); | |
7361f9c3 MK |
854 | xskq_destroy(xs->fq_tmp); |
855 | xskq_destroy(xs->cq_tmp); | |
541d7fdd | 856 | |
c0c77d8f BT |
857 | sock_orphan(sk); |
858 | sock->sk = NULL; | |
859 | ||
860 | sk_refcnt_debug_release(sk); | |
861 | sock_put(sk); | |
862 | ||
863 | return 0; | |
864 | } | |
865 | ||
965a9909 MK |
866 | static struct socket *xsk_lookup_xsk_from_fd(int fd) |
867 | { | |
868 | struct socket *sock; | |
869 | int err; | |
870 | ||
871 | sock = sockfd_lookup(fd, &err); | |
872 | if (!sock) | |
873 | return ERR_PTR(-ENOTSOCK); | |
874 | ||
875 | if (sock->sk->sk_family != PF_XDP) { | |
876 | sockfd_put(sock); | |
877 | return ERR_PTR(-ENOPROTOOPT); | |
878 | } | |
879 | ||
880 | return sock; | |
881 | } | |
882 | ||
7361f9c3 MK |
883 | static bool xsk_validate_queues(struct xdp_sock *xs) |
884 | { | |
885 | return xs->fq_tmp && xs->cq_tmp; | |
886 | } | |
887 | ||
965a9909 MK |
888 | static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len) |
889 | { | |
890 | struct sockaddr_xdp *sxdp = (struct sockaddr_xdp *)addr; | |
891 | struct sock *sk = sock->sk; | |
965a9909 | 892 | struct xdp_sock *xs = xdp_sk(sk); |
959b71db | 893 | struct net_device *dev; |
173d3adb | 894 | u32 flags, qid; |
965a9909 MK |
895 | int err = 0; |
896 | ||
897 | if (addr_len < sizeof(struct sockaddr_xdp)) | |
898 | return -EINVAL; | |
899 | if (sxdp->sxdp_family != AF_XDP) | |
900 | return -EINVAL; | |
901 | ||
f54ba391 | 902 | flags = sxdp->sxdp_flags; |
77cd0d7b MK |
903 | if (flags & ~(XDP_SHARED_UMEM | XDP_COPY | XDP_ZEROCOPY | |
904 | XDP_USE_NEED_WAKEUP)) | |
f54ba391 BT |
905 | return -EINVAL; |
906 | ||
5464c3a0 | 907 | rtnl_lock(); |
965a9909 | 908 | mutex_lock(&xs->mutex); |
455302d1 | 909 | if (xs->state != XSK_READY) { |
959b71db BT |
910 | err = -EBUSY; |
911 | goto out_release; | |
912 | } | |
913 | ||
965a9909 MK |
914 | dev = dev_get_by_index(sock_net(sk), sxdp->sxdp_ifindex); |
915 | if (!dev) { | |
916 | err = -ENODEV; | |
917 | goto out_release; | |
918 | } | |
919 | ||
f6145903 | 920 | if (!xs->rx && !xs->tx) { |
965a9909 MK |
921 | err = -EINVAL; |
922 | goto out_unlock; | |
923 | } | |
924 | ||
173d3adb | 925 | qid = sxdp->sxdp_queue_id; |
173d3adb BT |
926 | |
927 | if (flags & XDP_SHARED_UMEM) { | |
965a9909 MK |
928 | struct xdp_sock *umem_xs; |
929 | struct socket *sock; | |
930 | ||
77cd0d7b MK |
931 | if ((flags & XDP_COPY) || (flags & XDP_ZEROCOPY) || |
932 | (flags & XDP_USE_NEED_WAKEUP)) { | |
173d3adb BT |
933 | /* Cannot specify flags for shared sockets. */ |
934 | err = -EINVAL; | |
935 | goto out_unlock; | |
936 | } | |
937 | ||
965a9909 MK |
938 | if (xs->umem) { |
939 | /* We have already our own. */ | |
940 | err = -EINVAL; | |
941 | goto out_unlock; | |
942 | } | |
943 | ||
944 | sock = xsk_lookup_xsk_from_fd(sxdp->sxdp_shared_umem_fd); | |
945 | if (IS_ERR(sock)) { | |
946 | err = PTR_ERR(sock); | |
947 | goto out_unlock; | |
948 | } | |
949 | ||
950 | umem_xs = xdp_sk(sock->sk); | |
42fddcc7 | 951 | if (!xsk_is_bound(umem_xs)) { |
965a9909 MK |
952 | err = -EBADF; |
953 | sockfd_put(sock); | |
954 | goto out_unlock; | |
42fddcc7 | 955 | } |
965a9909 | 956 | |
a1132430 MK |
957 | if (umem_xs->queue_id != qid || umem_xs->dev != dev) { |
958 | /* Share the umem with another socket on another qid | |
959 | * and/or device. | |
960 | */ | |
b5aea28d MK |
961 | xs->pool = xp_create_and_assign_umem(xs, |
962 | umem_xs->umem); | |
963 | if (!xs->pool) { | |
1fd17c8c | 964 | err = -ENOMEM; |
b5aea28d MK |
965 | sockfd_put(sock); |
966 | goto out_unlock; | |
967 | } | |
968 | ||
969 | err = xp_assign_dev_shared(xs->pool, umem_xs->umem, | |
970 | dev, qid); | |
971 | if (err) { | |
972 | xp_destroy(xs->pool); | |
83cf5c68 | 973 | xs->pool = NULL; |
b5aea28d MK |
974 | sockfd_put(sock); |
975 | goto out_unlock; | |
976 | } | |
977 | } else { | |
978 | /* Share the buffer pool with the other socket. */ | |
979 | if (xs->fq_tmp || xs->cq_tmp) { | |
980 | /* Do not allow setting your own fq or cq. */ | |
981 | err = -EINVAL; | |
982 | sockfd_put(sock); | |
983 | goto out_unlock; | |
984 | } | |
985 | ||
986 | xp_get_pool(umem_xs->pool); | |
987 | xs->pool = umem_xs->pool; | |
988 | } | |
989 | ||
965a9909 | 990 | xdp_get_umem(umem_xs->umem); |
9764f4b3 | 991 | WRITE_ONCE(xs->umem, umem_xs->umem); |
965a9909 | 992 | sockfd_put(sock); |
7361f9c3 | 993 | } else if (!xs->umem || !xsk_validate_queues(xs)) { |
965a9909 MK |
994 | err = -EINVAL; |
995 | goto out_unlock; | |
c497176c BT |
996 | } else { |
997 | /* This xsk has its own umem. */ | |
1c1efc2a MK |
998 | xs->pool = xp_create_and_assign_umem(xs, xs->umem); |
999 | if (!xs->pool) { | |
1000 | err = -ENOMEM; | |
173d3adb | 1001 | goto out_unlock; |
1c1efc2a MK |
1002 | } |
1003 | ||
1004 | err = xp_assign_dev(xs->pool, dev, qid, flags); | |
1005 | if (err) { | |
1006 | xp_destroy(xs->pool); | |
1007 | xs->pool = NULL; | |
1c1efc2a MK |
1008 | goto out_unlock; |
1009 | } | |
965a9909 MK |
1010 | } |
1011 | ||
8bee6833 MK |
1012 | /* FQ and CQ are now owned by the buffer pool and cleaned up with it. */ |
1013 | xs->fq_tmp = NULL; | |
1014 | xs->cq_tmp = NULL; | |
1015 | ||
965a9909 | 1016 | xs->dev = dev; |
ac98d8aa MK |
1017 | xs->zc = xs->umem->zc; |
1018 | xs->queue_id = qid; | |
a5aa8e52 | 1019 | xp_add_xsk(xs->pool, xs); |
965a9909 MK |
1020 | |
1021 | out_unlock: | |
42fddcc7 | 1022 | if (err) { |
965a9909 | 1023 | dev_put(dev); |
42fddcc7 BT |
1024 | } else { |
1025 | /* Matches smp_rmb() in bind() for shared umem | |
1026 | * sockets, and xsk_is_bound(). | |
1027 | */ | |
1028 | smp_wmb(); | |
1029 | WRITE_ONCE(xs->state, XSK_BOUND); | |
1030 | } | |
965a9909 MK |
1031 | out_release: |
1032 | mutex_unlock(&xs->mutex); | |
5464c3a0 | 1033 | rtnl_unlock(); |
965a9909 MK |
1034 | return err; |
1035 | } | |
1036 | ||
c05cd364 KL |
1037 | struct xdp_umem_reg_v1 { |
1038 | __u64 addr; /* Start of packet data area */ | |
1039 | __u64 len; /* Length of packet data area */ | |
1040 | __u32 chunk_size; | |
1041 | __u32 headroom; | |
1042 | }; | |
1043 | ||
c0c77d8f | 1044 | static int xsk_setsockopt(struct socket *sock, int level, int optname, |
a7b75c5a | 1045 | sockptr_t optval, unsigned int optlen) |
c0c77d8f BT |
1046 | { |
1047 | struct sock *sk = sock->sk; | |
1048 | struct xdp_sock *xs = xdp_sk(sk); | |
1049 | int err; | |
1050 | ||
1051 | if (level != SOL_XDP) | |
1052 | return -ENOPROTOOPT; | |
1053 | ||
1054 | switch (optname) { | |
b9b6b68e | 1055 | case XDP_RX_RING: |
f6145903 | 1056 | case XDP_TX_RING: |
b9b6b68e BT |
1057 | { |
1058 | struct xsk_queue **q; | |
1059 | int entries; | |
1060 | ||
1061 | if (optlen < sizeof(entries)) | |
1062 | return -EINVAL; | |
a7b75c5a | 1063 | if (copy_from_sockptr(&entries, optval, sizeof(entries))) |
b9b6b68e BT |
1064 | return -EFAULT; |
1065 | ||
1066 | mutex_lock(&xs->mutex); | |
455302d1 IM |
1067 | if (xs->state != XSK_READY) { |
1068 | mutex_unlock(&xs->mutex); | |
1069 | return -EBUSY; | |
1070 | } | |
f6145903 | 1071 | q = (optname == XDP_TX_RING) ? &xs->tx : &xs->rx; |
b9b6b68e | 1072 | err = xsk_init_queue(entries, q, false); |
77cd0d7b MK |
1073 | if (!err && optname == XDP_TX_RING) |
1074 | /* Tx needs to be explicitly woken up the first time */ | |
1075 | xs->tx->ring->flags |= XDP_RING_NEED_WAKEUP; | |
b9b6b68e BT |
1076 | mutex_unlock(&xs->mutex); |
1077 | return err; | |
1078 | } | |
c0c77d8f BT |
1079 | case XDP_UMEM_REG: |
1080 | { | |
c05cd364 KL |
1081 | size_t mr_size = sizeof(struct xdp_umem_reg); |
1082 | struct xdp_umem_reg mr = {}; | |
c0c77d8f BT |
1083 | struct xdp_umem *umem; |
1084 | ||
c05cd364 KL |
1085 | if (optlen < sizeof(struct xdp_umem_reg_v1)) |
1086 | return -EINVAL; | |
1087 | else if (optlen < sizeof(mr)) | |
1088 | mr_size = sizeof(struct xdp_umem_reg_v1); | |
1089 | ||
a7b75c5a | 1090 | if (copy_from_sockptr(&mr, optval, mr_size)) |
c0c77d8f BT |
1091 | return -EFAULT; |
1092 | ||
1093 | mutex_lock(&xs->mutex); | |
455302d1 | 1094 | if (xs->state != XSK_READY || xs->umem) { |
a49049ea BT |
1095 | mutex_unlock(&xs->mutex); |
1096 | return -EBUSY; | |
1097 | } | |
c0c77d8f | 1098 | |
a49049ea BT |
1099 | umem = xdp_umem_create(&mr); |
1100 | if (IS_ERR(umem)) { | |
c0c77d8f | 1101 | mutex_unlock(&xs->mutex); |
a49049ea | 1102 | return PTR_ERR(umem); |
c0c77d8f BT |
1103 | } |
1104 | ||
1105 | /* Make sure umem is ready before it can be seen by others */ | |
1106 | smp_wmb(); | |
9764f4b3 | 1107 | WRITE_ONCE(xs->umem, umem); |
c0c77d8f BT |
1108 | mutex_unlock(&xs->mutex); |
1109 | return 0; | |
1110 | } | |
423f3832 | 1111 | case XDP_UMEM_FILL_RING: |
fe230832 | 1112 | case XDP_UMEM_COMPLETION_RING: |
423f3832 MK |
1113 | { |
1114 | struct xsk_queue **q; | |
1115 | int entries; | |
1116 | ||
a7b75c5a | 1117 | if (copy_from_sockptr(&entries, optval, sizeof(entries))) |
423f3832 MK |
1118 | return -EFAULT; |
1119 | ||
1120 | mutex_lock(&xs->mutex); | |
455302d1 IM |
1121 | if (xs->state != XSK_READY) { |
1122 | mutex_unlock(&xs->mutex); | |
1123 | return -EBUSY; | |
1124 | } | |
a49049ea | 1125 | |
7361f9c3 MK |
1126 | q = (optname == XDP_UMEM_FILL_RING) ? &xs->fq_tmp : |
1127 | &xs->cq_tmp; | |
b9b6b68e | 1128 | err = xsk_init_queue(entries, q, true); |
423f3832 MK |
1129 | mutex_unlock(&xs->mutex); |
1130 | return err; | |
1131 | } | |
c0c77d8f BT |
1132 | default: |
1133 | break; | |
1134 | } | |
1135 | ||
1136 | return -ENOPROTOOPT; | |
1137 | } | |
1138 | ||
77cd0d7b MK |
1139 | static void xsk_enter_rxtx_offsets(struct xdp_ring_offset_v1 *ring) |
1140 | { | |
1141 | ring->producer = offsetof(struct xdp_rxtx_ring, ptrs.producer); | |
1142 | ring->consumer = offsetof(struct xdp_rxtx_ring, ptrs.consumer); | |
1143 | ring->desc = offsetof(struct xdp_rxtx_ring, desc); | |
1144 | } | |
1145 | ||
1146 | static void xsk_enter_umem_offsets(struct xdp_ring_offset_v1 *ring) | |
1147 | { | |
1148 | ring->producer = offsetof(struct xdp_umem_ring, ptrs.producer); | |
1149 | ring->consumer = offsetof(struct xdp_umem_ring, ptrs.consumer); | |
1150 | ring->desc = offsetof(struct xdp_umem_ring, desc); | |
1151 | } | |
1152 | ||
8aa5a335 CL |
1153 | struct xdp_statistics_v1 { |
1154 | __u64 rx_dropped; | |
1155 | __u64 rx_invalid_descs; | |
1156 | __u64 tx_invalid_descs; | |
1157 | }; | |
1158 | ||
af75d9e0 MK |
1159 | static int xsk_getsockopt(struct socket *sock, int level, int optname, |
1160 | char __user *optval, int __user *optlen) | |
1161 | { | |
1162 | struct sock *sk = sock->sk; | |
1163 | struct xdp_sock *xs = xdp_sk(sk); | |
1164 | int len; | |
1165 | ||
1166 | if (level != SOL_XDP) | |
1167 | return -ENOPROTOOPT; | |
1168 | ||
1169 | if (get_user(len, optlen)) | |
1170 | return -EFAULT; | |
1171 | if (len < 0) | |
1172 | return -EINVAL; | |
1173 | ||
1174 | switch (optname) { | |
1175 | case XDP_STATISTICS: | |
1176 | { | |
3c4f850e | 1177 | struct xdp_statistics stats = {}; |
8aa5a335 CL |
1178 | bool extra_stats = true; |
1179 | size_t stats_size; | |
af75d9e0 | 1180 | |
8aa5a335 | 1181 | if (len < sizeof(struct xdp_statistics_v1)) { |
af75d9e0 | 1182 | return -EINVAL; |
8aa5a335 CL |
1183 | } else if (len < sizeof(stats)) { |
1184 | extra_stats = false; | |
1185 | stats_size = sizeof(struct xdp_statistics_v1); | |
1186 | } else { | |
1187 | stats_size = sizeof(stats); | |
1188 | } | |
af75d9e0 MK |
1189 | |
1190 | mutex_lock(&xs->mutex); | |
1191 | stats.rx_dropped = xs->rx_dropped; | |
8aa5a335 CL |
1192 | if (extra_stats) { |
1193 | stats.rx_ring_full = xs->rx_queue_full; | |
1194 | stats.rx_fill_ring_empty_descs = | |
7361f9c3 | 1195 | xs->pool ? xskq_nb_queue_empty_descs(xs->pool->fq) : 0; |
8aa5a335 CL |
1196 | stats.tx_ring_empty_descs = xskq_nb_queue_empty_descs(xs->tx); |
1197 | } else { | |
1198 | stats.rx_dropped += xs->rx_queue_full; | |
1199 | } | |
af75d9e0 MK |
1200 | stats.rx_invalid_descs = xskq_nb_invalid_descs(xs->rx); |
1201 | stats.tx_invalid_descs = xskq_nb_invalid_descs(xs->tx); | |
1202 | mutex_unlock(&xs->mutex); | |
1203 | ||
8aa5a335 | 1204 | if (copy_to_user(optval, &stats, stats_size)) |
af75d9e0 | 1205 | return -EFAULT; |
8aa5a335 | 1206 | if (put_user(stats_size, optlen)) |
af75d9e0 MK |
1207 | return -EFAULT; |
1208 | ||
1209 | return 0; | |
1210 | } | |
b3a9e0be BT |
1211 | case XDP_MMAP_OFFSETS: |
1212 | { | |
1213 | struct xdp_mmap_offsets off; | |
77cd0d7b MK |
1214 | struct xdp_mmap_offsets_v1 off_v1; |
1215 | bool flags_supported = true; | |
1216 | void *to_copy; | |
b3a9e0be | 1217 | |
77cd0d7b | 1218 | if (len < sizeof(off_v1)) |
b3a9e0be | 1219 | return -EINVAL; |
77cd0d7b MK |
1220 | else if (len < sizeof(off)) |
1221 | flags_supported = false; | |
1222 | ||
1223 | if (flags_supported) { | |
1224 | /* xdp_ring_offset is identical to xdp_ring_offset_v1 | |
1225 | * except for the flags field added to the end. | |
1226 | */ | |
1227 | xsk_enter_rxtx_offsets((struct xdp_ring_offset_v1 *) | |
1228 | &off.rx); | |
1229 | xsk_enter_rxtx_offsets((struct xdp_ring_offset_v1 *) | |
1230 | &off.tx); | |
1231 | xsk_enter_umem_offsets((struct xdp_ring_offset_v1 *) | |
1232 | &off.fr); | |
1233 | xsk_enter_umem_offsets((struct xdp_ring_offset_v1 *) | |
1234 | &off.cr); | |
1235 | off.rx.flags = offsetof(struct xdp_rxtx_ring, | |
1236 | ptrs.flags); | |
1237 | off.tx.flags = offsetof(struct xdp_rxtx_ring, | |
1238 | ptrs.flags); | |
1239 | off.fr.flags = offsetof(struct xdp_umem_ring, | |
1240 | ptrs.flags); | |
1241 | off.cr.flags = offsetof(struct xdp_umem_ring, | |
1242 | ptrs.flags); | |
1243 | ||
1244 | len = sizeof(off); | |
1245 | to_copy = &off; | |
1246 | } else { | |
1247 | xsk_enter_rxtx_offsets(&off_v1.rx); | |
1248 | xsk_enter_rxtx_offsets(&off_v1.tx); | |
1249 | xsk_enter_umem_offsets(&off_v1.fr); | |
1250 | xsk_enter_umem_offsets(&off_v1.cr); | |
1251 | ||
1252 | len = sizeof(off_v1); | |
1253 | to_copy = &off_v1; | |
1254 | } | |
b3a9e0be | 1255 | |
77cd0d7b | 1256 | if (copy_to_user(optval, to_copy, len)) |
b3a9e0be BT |
1257 | return -EFAULT; |
1258 | if (put_user(len, optlen)) | |
1259 | return -EFAULT; | |
1260 | ||
1261 | return 0; | |
1262 | } | |
2640d3c8 MM |
1263 | case XDP_OPTIONS: |
1264 | { | |
1265 | struct xdp_options opts = {}; | |
1266 | ||
1267 | if (len < sizeof(opts)) | |
1268 | return -EINVAL; | |
1269 | ||
1270 | mutex_lock(&xs->mutex); | |
1271 | if (xs->zc) | |
1272 | opts.flags |= XDP_OPTIONS_ZEROCOPY; | |
1273 | mutex_unlock(&xs->mutex); | |
1274 | ||
1275 | len = sizeof(opts); | |
1276 | if (copy_to_user(optval, &opts, len)) | |
1277 | return -EFAULT; | |
1278 | if (put_user(len, optlen)) | |
1279 | return -EFAULT; | |
1280 | ||
1281 | return 0; | |
1282 | } | |
af75d9e0 MK |
1283 | default: |
1284 | break; | |
1285 | } | |
1286 | ||
1287 | return -EOPNOTSUPP; | |
1288 | } | |
1289 | ||
423f3832 MK |
1290 | static int xsk_mmap(struct file *file, struct socket *sock, |
1291 | struct vm_area_struct *vma) | |
1292 | { | |
a5a16e43 | 1293 | loff_t offset = (loff_t)vma->vm_pgoff << PAGE_SHIFT; |
423f3832 MK |
1294 | unsigned long size = vma->vm_end - vma->vm_start; |
1295 | struct xdp_sock *xs = xdp_sk(sock->sk); | |
1296 | struct xsk_queue *q = NULL; | |
1297 | unsigned long pfn; | |
1298 | struct page *qpg; | |
1299 | ||
42fddcc7 | 1300 | if (READ_ONCE(xs->state) != XSK_READY) |
455302d1 IM |
1301 | return -EBUSY; |
1302 | ||
b9b6b68e | 1303 | if (offset == XDP_PGOFF_RX_RING) { |
37b07693 | 1304 | q = READ_ONCE(xs->rx); |
f6145903 | 1305 | } else if (offset == XDP_PGOFF_TX_RING) { |
37b07693 | 1306 | q = READ_ONCE(xs->tx); |
b9b6b68e | 1307 | } else { |
e6762c8b MK |
1308 | /* Matches the smp_wmb() in XDP_UMEM_REG */ |
1309 | smp_rmb(); | |
b9b6b68e | 1310 | if (offset == XDP_UMEM_PGOFF_FILL_RING) |
7361f9c3 | 1311 | q = READ_ONCE(xs->fq_tmp); |
fe230832 | 1312 | else if (offset == XDP_UMEM_PGOFF_COMPLETION_RING) |
7361f9c3 | 1313 | q = READ_ONCE(xs->cq_tmp); |
b9b6b68e | 1314 | } |
423f3832 MK |
1315 | |
1316 | if (!q) | |
1317 | return -EINVAL; | |
1318 | ||
e6762c8b MK |
1319 | /* Matches the smp_wmb() in xsk_init_queue */ |
1320 | smp_rmb(); | |
423f3832 | 1321 | qpg = virt_to_head_page(q->ring); |
a50b854e | 1322 | if (size > page_size(qpg)) |
423f3832 MK |
1323 | return -EINVAL; |
1324 | ||
1325 | pfn = virt_to_phys(q->ring) >> PAGE_SHIFT; | |
1326 | return remap_pfn_range(vma, vma->vm_start, pfn, | |
1327 | size, vma->vm_page_prot); | |
1328 | } | |
1329 | ||
455302d1 IM |
1330 | static int xsk_notifier(struct notifier_block *this, |
1331 | unsigned long msg, void *ptr) | |
1332 | { | |
1333 | struct net_device *dev = netdev_notifier_info_to_dev(ptr); | |
1334 | struct net *net = dev_net(dev); | |
1335 | struct sock *sk; | |
1336 | ||
1337 | switch (msg) { | |
1338 | case NETDEV_UNREGISTER: | |
1339 | mutex_lock(&net->xdp.lock); | |
1340 | sk_for_each(sk, &net->xdp.list) { | |
1341 | struct xdp_sock *xs = xdp_sk(sk); | |
1342 | ||
1343 | mutex_lock(&xs->mutex); | |
1344 | if (xs->dev == dev) { | |
1345 | sk->sk_err = ENETDOWN; | |
1346 | if (!sock_flag(sk, SOCK_DEAD)) | |
e3ae2365 | 1347 | sk_error_report(sk); |
455302d1 IM |
1348 | |
1349 | xsk_unbind_dev(xs); | |
1350 | ||
1c1efc2a MK |
1351 | /* Clear device references. */ |
1352 | xp_clear_dev(xs->pool); | |
455302d1 IM |
1353 | } |
1354 | mutex_unlock(&xs->mutex); | |
1355 | } | |
1356 | mutex_unlock(&net->xdp.lock); | |
1357 | break; | |
1358 | } | |
1359 | return NOTIFY_DONE; | |
1360 | } | |
1361 | ||
c0c77d8f BT |
1362 | static struct proto xsk_proto = { |
1363 | .name = "XDP", | |
1364 | .owner = THIS_MODULE, | |
1365 | .obj_size = sizeof(struct xdp_sock), | |
1366 | }; | |
1367 | ||
1368 | static const struct proto_ops xsk_proto_ops = { | |
c2f4374b BT |
1369 | .family = PF_XDP, |
1370 | .owner = THIS_MODULE, | |
1371 | .release = xsk_release, | |
1372 | .bind = xsk_bind, | |
1373 | .connect = sock_no_connect, | |
1374 | .socketpair = sock_no_socketpair, | |
1375 | .accept = sock_no_accept, | |
1376 | .getname = sock_no_getname, | |
a11e1d43 | 1377 | .poll = xsk_poll, |
c2f4374b BT |
1378 | .ioctl = sock_no_ioctl, |
1379 | .listen = sock_no_listen, | |
1380 | .shutdown = sock_no_shutdown, | |
1381 | .setsockopt = xsk_setsockopt, | |
1382 | .getsockopt = xsk_getsockopt, | |
1383 | .sendmsg = xsk_sendmsg, | |
45a86681 | 1384 | .recvmsg = xsk_recvmsg, |
c2f4374b BT |
1385 | .mmap = xsk_mmap, |
1386 | .sendpage = sock_no_sendpage, | |
c0c77d8f BT |
1387 | }; |
1388 | ||
11fe9262 BT |
1389 | static void xsk_destruct(struct sock *sk) |
1390 | { | |
1391 | struct xdp_sock *xs = xdp_sk(sk); | |
1392 | ||
1393 | if (!sock_flag(sk, SOCK_DEAD)) | |
1394 | return; | |
1395 | ||
e5e1a4bc | 1396 | if (!xp_put_pool(xs->pool)) |
537cf4e3 | 1397 | xdp_put_umem(xs->umem, !xs->pool); |
11fe9262 BT |
1398 | |
1399 | sk_refcnt_debug_dec(sk); | |
1400 | } | |
1401 | ||
c0c77d8f BT |
1402 | static int xsk_create(struct net *net, struct socket *sock, int protocol, |
1403 | int kern) | |
1404 | { | |
c0c77d8f | 1405 | struct xdp_sock *xs; |
1c1efc2a | 1406 | struct sock *sk; |
c0c77d8f BT |
1407 | |
1408 | if (!ns_capable(net->user_ns, CAP_NET_RAW)) | |
1409 | return -EPERM; | |
1410 | if (sock->type != SOCK_RAW) | |
1411 | return -ESOCKTNOSUPPORT; | |
1412 | ||
1413 | if (protocol) | |
1414 | return -EPROTONOSUPPORT; | |
1415 | ||
1416 | sock->state = SS_UNCONNECTED; | |
1417 | ||
1418 | sk = sk_alloc(net, PF_XDP, GFP_KERNEL, &xsk_proto, kern); | |
1419 | if (!sk) | |
1420 | return -ENOBUFS; | |
1421 | ||
1422 | sock->ops = &xsk_proto_ops; | |
1423 | ||
1424 | sock_init_data(sock, sk); | |
1425 | ||
1426 | sk->sk_family = PF_XDP; | |
1427 | ||
11fe9262 BT |
1428 | sk->sk_destruct = xsk_destruct; |
1429 | sk_refcnt_debug_inc(sk); | |
1430 | ||
cee27167 BT |
1431 | sock_set_flag(sk, SOCK_RCU_FREE); |
1432 | ||
c0c77d8f | 1433 | xs = xdp_sk(sk); |
455302d1 | 1434 | xs->state = XSK_READY; |
c0c77d8f | 1435 | mutex_init(&xs->mutex); |
bf0bdd13 | 1436 | spin_lock_init(&xs->rx_lock); |
c0c77d8f | 1437 | |
0402acd6 BT |
1438 | INIT_LIST_HEAD(&xs->map_list); |
1439 | spin_lock_init(&xs->map_list_lock); | |
1440 | ||
1d0dc069 BT |
1441 | mutex_lock(&net->xdp.lock); |
1442 | sk_add_node_rcu(sk, &net->xdp.list); | |
1443 | mutex_unlock(&net->xdp.lock); | |
1444 | ||
c0c77d8f BT |
1445 | local_bh_disable(); |
1446 | sock_prot_inuse_add(net, &xsk_proto, 1); | |
1447 | local_bh_enable(); | |
1448 | ||
1449 | return 0; | |
1450 | } | |
1451 | ||
1452 | static const struct net_proto_family xsk_family_ops = { | |
1453 | .family = PF_XDP, | |
1454 | .create = xsk_create, | |
1455 | .owner = THIS_MODULE, | |
1456 | }; | |
1457 | ||
455302d1 IM |
1458 | static struct notifier_block xsk_netdev_notifier = { |
1459 | .notifier_call = xsk_notifier, | |
1460 | }; | |
1461 | ||
1d0dc069 BT |
1462 | static int __net_init xsk_net_init(struct net *net) |
1463 | { | |
1464 | mutex_init(&net->xdp.lock); | |
1465 | INIT_HLIST_HEAD(&net->xdp.list); | |
1466 | return 0; | |
1467 | } | |
1468 | ||
1469 | static void __net_exit xsk_net_exit(struct net *net) | |
1470 | { | |
1471 | WARN_ON_ONCE(!hlist_empty(&net->xdp.list)); | |
1472 | } | |
1473 | ||
1474 | static struct pernet_operations xsk_net_ops = { | |
1475 | .init = xsk_net_init, | |
1476 | .exit = xsk_net_exit, | |
1477 | }; | |
1478 | ||
c0c77d8f BT |
1479 | static int __init xsk_init(void) |
1480 | { | |
e312b9e7 | 1481 | int err, cpu; |
c0c77d8f BT |
1482 | |
1483 | err = proto_register(&xsk_proto, 0 /* no slab */); | |
1484 | if (err) | |
1485 | goto out; | |
1486 | ||
1487 | err = sock_register(&xsk_family_ops); | |
1488 | if (err) | |
1489 | goto out_proto; | |
1490 | ||
1d0dc069 BT |
1491 | err = register_pernet_subsys(&xsk_net_ops); |
1492 | if (err) | |
1493 | goto out_sk; | |
455302d1 IM |
1494 | |
1495 | err = register_netdevice_notifier(&xsk_netdev_notifier); | |
1496 | if (err) | |
1497 | goto out_pernet; | |
1498 | ||
e312b9e7 BT |
1499 | for_each_possible_cpu(cpu) |
1500 | INIT_LIST_HEAD(&per_cpu(xskmap_flush_list, cpu)); | |
c0c77d8f BT |
1501 | return 0; |
1502 | ||
455302d1 IM |
1503 | out_pernet: |
1504 | unregister_pernet_subsys(&xsk_net_ops); | |
1d0dc069 BT |
1505 | out_sk: |
1506 | sock_unregister(PF_XDP); | |
c0c77d8f BT |
1507 | out_proto: |
1508 | proto_unregister(&xsk_proto); | |
1509 | out: | |
1510 | return err; | |
1511 | } | |
1512 | ||
1513 | fs_initcall(xsk_init); |