]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - net/xdp/xsk.c
xsk: Fix null check on error return path
[mirror_ubuntu-jammy-kernel.git] / net / xdp / xsk.c
CommitLineData
c0c77d8f
BT
1// SPDX-License-Identifier: GPL-2.0
2/* XDP sockets
3 *
4 * AF_XDP sockets allows a channel between XDP programs and userspace
5 * applications.
6 * Copyright(c) 2018 Intel Corporation.
7 *
c0c77d8f
BT
8 * Author(s): Björn Töpel <bjorn.topel@intel.com>
9 * Magnus Karlsson <magnus.karlsson@intel.com>
10 */
11
12#define pr_fmt(fmt) "AF_XDP: %s: " fmt, __func__
13
14#include <linux/if_xdp.h>
15#include <linux/init.h>
16#include <linux/sched/mm.h>
17#include <linux/sched/signal.h>
18#include <linux/sched/task.h>
19#include <linux/socket.h>
20#include <linux/file.h>
21#include <linux/uaccess.h>
22#include <linux/net.h>
23#include <linux/netdevice.h>
ac98d8aa 24#include <linux/rculist.h>
a71506a4 25#include <net/xdp_sock_drv.h>
b9b6b68e 26#include <net/xdp.h>
c0c77d8f 27
423f3832 28#include "xsk_queue.h"
c0c77d8f 29#include "xdp_umem.h"
a36b38aa 30#include "xsk.h"
c0c77d8f 31
35fcde7f
MK
32#define TX_BATCH_SIZE 16
33
e312b9e7
BT
34static DEFINE_PER_CPU(struct list_head, xskmap_flush_list);
35
c4655761 36void xsk_set_rx_need_wakeup(struct xsk_buff_pool *pool)
77cd0d7b 37{
c2d3d6a4 38 if (pool->cached_need_wakeup & XDP_WAKEUP_RX)
77cd0d7b
MK
39 return;
40
7361f9c3 41 pool->fq->ring->flags |= XDP_RING_NEED_WAKEUP;
c2d3d6a4 42 pool->cached_need_wakeup |= XDP_WAKEUP_RX;
77cd0d7b
MK
43}
44EXPORT_SYMBOL(xsk_set_rx_need_wakeup);
45
c4655761 46void xsk_set_tx_need_wakeup(struct xsk_buff_pool *pool)
77cd0d7b
MK
47{
48 struct xdp_sock *xs;
49
c2d3d6a4 50 if (pool->cached_need_wakeup & XDP_WAKEUP_TX)
77cd0d7b
MK
51 return;
52
53 rcu_read_lock();
a5aa8e52 54 list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
77cd0d7b
MK
55 xs->tx->ring->flags |= XDP_RING_NEED_WAKEUP;
56 }
57 rcu_read_unlock();
58
c2d3d6a4 59 pool->cached_need_wakeup |= XDP_WAKEUP_TX;
77cd0d7b
MK
60}
61EXPORT_SYMBOL(xsk_set_tx_need_wakeup);
62
c4655761 63void xsk_clear_rx_need_wakeup(struct xsk_buff_pool *pool)
77cd0d7b 64{
c2d3d6a4 65 if (!(pool->cached_need_wakeup & XDP_WAKEUP_RX))
77cd0d7b
MK
66 return;
67
7361f9c3 68 pool->fq->ring->flags &= ~XDP_RING_NEED_WAKEUP;
c2d3d6a4 69 pool->cached_need_wakeup &= ~XDP_WAKEUP_RX;
77cd0d7b
MK
70}
71EXPORT_SYMBOL(xsk_clear_rx_need_wakeup);
72
c4655761 73void xsk_clear_tx_need_wakeup(struct xsk_buff_pool *pool)
77cd0d7b
MK
74{
75 struct xdp_sock *xs;
76
c2d3d6a4 77 if (!(pool->cached_need_wakeup & XDP_WAKEUP_TX))
77cd0d7b
MK
78 return;
79
80 rcu_read_lock();
a5aa8e52 81 list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
77cd0d7b
MK
82 xs->tx->ring->flags &= ~XDP_RING_NEED_WAKEUP;
83 }
84 rcu_read_unlock();
85
c2d3d6a4 86 pool->cached_need_wakeup &= ~XDP_WAKEUP_TX;
77cd0d7b
MK
87}
88EXPORT_SYMBOL(xsk_clear_tx_need_wakeup);
89
c4655761 90bool xsk_uses_need_wakeup(struct xsk_buff_pool *pool)
77cd0d7b 91{
c2d3d6a4 92 return pool->uses_need_wakeup;
77cd0d7b 93}
c4655761 94EXPORT_SYMBOL(xsk_uses_need_wakeup);
77cd0d7b 95
1c1efc2a
MK
96struct xsk_buff_pool *xsk_get_pool_from_qid(struct net_device *dev,
97 u16 queue_id)
98{
99 if (queue_id < dev->real_num_rx_queues)
100 return dev->_rx[queue_id].pool;
101 if (queue_id < dev->real_num_tx_queues)
102 return dev->_tx[queue_id].pool;
103
104 return NULL;
105}
106EXPORT_SYMBOL(xsk_get_pool_from_qid);
107
108void xsk_clear_pool_at_qid(struct net_device *dev, u16 queue_id)
109{
110 if (queue_id < dev->real_num_rx_queues)
111 dev->_rx[queue_id].pool = NULL;
112 if (queue_id < dev->real_num_tx_queues)
113 dev->_tx[queue_id].pool = NULL;
114}
115
116/* The buffer pool is stored both in the _rx struct and the _tx struct as we do
117 * not know if the device has more tx queues than rx, or the opposite.
118 * This might also change during run time.
119 */
120int xsk_reg_pool_at_qid(struct net_device *dev, struct xsk_buff_pool *pool,
121 u16 queue_id)
122{
123 if (queue_id >= max_t(unsigned int,
124 dev->real_num_rx_queues,
125 dev->real_num_tx_queues))
126 return -EINVAL;
127
128 if (queue_id < dev->real_num_rx_queues)
129 dev->_rx[queue_id].pool = pool;
130 if (queue_id < dev->real_num_tx_queues)
131 dev->_tx[queue_id].pool = pool;
132
133 return 0;
134}
135
26062b18
BT
136void xp_release(struct xdp_buff_xsk *xskb)
137{
138 xskb->pool->free_heads[xskb->pool->free_heads_cnt++] = xskb;
139}
140
141static u64 xp_get_handle(struct xdp_buff_xsk *xskb)
142{
143 u64 offset = xskb->xdp.data - xskb->xdp.data_hard_start;
144
145 offset += xskb->pool->headroom;
146 if (!xskb->pool->unaligned)
147 return xskb->orig_addr + offset;
148 return xskb->orig_addr + (offset << XSK_UNALIGNED_BUF_OFFSET_SHIFT);
149}
150
2b43470a 151static int __xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
c05cd364 152{
2b43470a
BT
153 struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
154 u64 addr;
155 int err;
c05cd364 156
2b43470a
BT
157 addr = xp_get_handle(xskb);
158 err = xskq_prod_reserve_desc(xs->rx, addr, len);
159 if (err) {
8aa5a335 160 xs->rx_queue_full++;
2b43470a 161 return err;
c05cd364
KL
162 }
163
2b43470a
BT
164 xp_release(xskb);
165 return 0;
c05cd364
KL
166}
167
2b43470a 168static void xsk_copy_xdp(struct xdp_buff *to, struct xdp_buff *from, u32 len)
c497176c 169{
2b43470a 170 void *from_buf, *to_buf;
18baed26 171 u32 metalen;
c497176c 172
2b43470a
BT
173 if (unlikely(xdp_data_meta_unsupported(from))) {
174 from_buf = from->data;
175 to_buf = to->data;
18baed26
BT
176 metalen = 0;
177 } else {
2b43470a
BT
178 from_buf = from->data_meta;
179 metalen = from->data - from->data_meta;
180 to_buf = to->data - metalen;
18baed26
BT
181 }
182
2b43470a 183 memcpy(to_buf, from_buf, len + metalen);
c497176c
BT
184}
185
2b43470a
BT
186static int __xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len,
187 bool explicit_free)
c497176c 188{
2b43470a
BT
189 struct xdp_buff *xsk_xdp;
190 int err;
c497176c 191
c4655761 192 if (len > xsk_pool_get_rx_frame_size(xs->pool)) {
2b43470a
BT
193 xs->rx_dropped++;
194 return -ENOSPC;
195 }
196
c4655761 197 xsk_xdp = xsk_buff_alloc(xs->pool);
2b43470a 198 if (!xsk_xdp) {
173d3adb 199 xs->rx_dropped++;
2b43470a
BT
200 return -ENOSPC;
201 }
c497176c 202
2b43470a
BT
203 xsk_copy_xdp(xsk_xdp, xdp, len);
204 err = __xsk_rcv_zc(xs, xsk_xdp, len);
205 if (err) {
206 xsk_buff_free(xsk_xdp);
207 return err;
208 }
209 if (explicit_free)
210 xdp_return_buff(xdp);
211 return 0;
c497176c
BT
212}
213
42fddcc7
BT
214static bool xsk_is_bound(struct xdp_sock *xs)
215{
216 if (READ_ONCE(xs->state) == XSK_BOUND) {
217 /* Matches smp_wmb() in bind(). */
218 smp_rmb();
219 return true;
220 }
221 return false;
222}
223
2b43470a
BT
224static int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp,
225 bool explicit_free)
173d3adb
BT
226{
227 u32 len;
228
42fddcc7
BT
229 if (!xsk_is_bound(xs))
230 return -EINVAL;
231
173d3adb
BT
232 if (xs->dev != xdp->rxq->dev || xs->queue_id != xdp->rxq->queue_index)
233 return -EINVAL;
234
235 len = xdp->data_end - xdp->data;
236
0807892e 237 return xdp->rxq->mem.type == MEM_TYPE_XSK_BUFF_POOL ?
2b43470a
BT
238 __xsk_rcv_zc(xs, xdp, len) :
239 __xsk_rcv(xs, xdp, len, explicit_free);
173d3adb
BT
240}
241
d817991c 242static void xsk_flush(struct xdp_sock *xs)
c497176c 243{
59e35e55 244 xskq_prod_submit(xs->rx);
7361f9c3 245 __xskq_cons_release(xs->pool->fq);
43a825af 246 sock_def_readable(&xs->sk);
c497176c
BT
247}
248
249int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
250{
251 int err;
252
bf0bdd13 253 spin_lock_bh(&xs->rx_lock);
2b43470a
BT
254 err = xsk_rcv(xs, xdp, false);
255 xsk_flush(xs);
bf0bdd13 256 spin_unlock_bh(&xs->rx_lock);
c497176c
BT
257 return err;
258}
259
e312b9e7 260int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp)
d817991c 261{
e312b9e7 262 struct list_head *flush_list = this_cpu_ptr(&xskmap_flush_list);
d817991c
BT
263 int err;
264
2b43470a 265 err = xsk_rcv(xs, xdp, true);
d817991c
BT
266 if (err)
267 return err;
268
269 if (!xs->flush_node.prev)
270 list_add(&xs->flush_node, flush_list);
271
272 return 0;
273}
274
e312b9e7 275void __xsk_map_flush(void)
d817991c 276{
e312b9e7 277 struct list_head *flush_list = this_cpu_ptr(&xskmap_flush_list);
d817991c
BT
278 struct xdp_sock *xs, *tmp;
279
280 list_for_each_entry_safe(xs, tmp, flush_list, flush_node) {
281 xsk_flush(xs);
282 __list_del_clearprev(&xs->flush_node);
283 }
284}
285
c4655761 286void xsk_tx_completed(struct xsk_buff_pool *pool, u32 nb_entries)
ac98d8aa 287{
7361f9c3 288 xskq_prod_submit_n(pool->cq, nb_entries);
ac98d8aa 289}
c4655761 290EXPORT_SYMBOL(xsk_tx_completed);
ac98d8aa 291
c4655761 292void xsk_tx_release(struct xsk_buff_pool *pool)
ac98d8aa
MK
293{
294 struct xdp_sock *xs;
295
296 rcu_read_lock();
a5aa8e52 297 list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
30744a68 298 __xskq_cons_release(xs->tx);
ac98d8aa
MK
299 xs->sk.sk_write_space(&xs->sk);
300 }
301 rcu_read_unlock();
302}
c4655761 303EXPORT_SYMBOL(xsk_tx_release);
ac98d8aa 304
c4655761 305bool xsk_tx_peek_desc(struct xsk_buff_pool *pool, struct xdp_desc *desc)
ac98d8aa 306{
ac98d8aa
MK
307 struct xdp_sock *xs;
308
309 rcu_read_lock();
a5aa8e52 310 list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
1c1efc2a 311 if (!xskq_cons_peek_desc(xs->tx, desc, pool)) {
8aa5a335 312 xs->tx->queue_empty_descs++;
ac98d8aa 313 continue;
8aa5a335 314 }
ac98d8aa 315
0a05861f 316 /* This is the backpressure mechanism for the Tx path.
15d8c916
MK
317 * Reserve space in the completion queue and only proceed
318 * if there is space in it. This avoids having to implement
319 * any buffering in the Tx path.
320 */
7361f9c3 321 if (xskq_prod_reserve_addr(pool->cq, desc->addr))
ac98d8aa
MK
322 goto out;
323
c5ed924b 324 xskq_cons_release(xs->tx);
ac98d8aa
MK
325 rcu_read_unlock();
326 return true;
327 }
328
329out:
330 rcu_read_unlock();
331 return false;
332}
c4655761 333EXPORT_SYMBOL(xsk_tx_peek_desc);
ac98d8aa 334
06870682 335static int xsk_wakeup(struct xdp_sock *xs, u8 flags)
ac98d8aa 336{
ac98d8aa 337 struct net_device *dev = xs->dev;
06870682
MM
338 int err;
339
340 rcu_read_lock();
341 err = dev->netdev_ops->ndo_xsk_wakeup(dev, xs->queue_id, flags);
342 rcu_read_unlock();
ac98d8aa 343
06870682
MM
344 return err;
345}
346
347static int xsk_zc_xmit(struct xdp_sock *xs)
348{
349 return xsk_wakeup(xs, XDP_WAKEUP_TX);
ac98d8aa
MK
350}
351
35fcde7f
MK
352static void xsk_destruct_skb(struct sk_buff *skb)
353{
bbff2f32 354 u64 addr = (u64)(long)skb_shinfo(skb)->destructor_arg;
35fcde7f 355 struct xdp_sock *xs = xdp_sk(skb->sk);
a9744f7c 356 unsigned long flags;
35fcde7f 357
a9744f7c 358 spin_lock_irqsave(&xs->tx_completion_lock, flags);
7361f9c3 359 xskq_prod_submit_addr(xs->pool->cq, addr);
a9744f7c 360 spin_unlock_irqrestore(&xs->tx_completion_lock, flags);
35fcde7f
MK
361
362 sock_wfree(skb);
363}
364
df551058 365static int xsk_generic_xmit(struct sock *sk)
35fcde7f 366{
35fcde7f 367 struct xdp_sock *xs = xdp_sk(sk);
df551058 368 u32 max_batch = TX_BATCH_SIZE;
35fcde7f
MK
369 bool sent_frame = false;
370 struct xdp_desc desc;
371 struct sk_buff *skb;
372 int err = 0;
373
35fcde7f
MK
374 mutex_lock(&xs->mutex);
375
67571640
IM
376 if (xs->queue_id >= xs->dev->real_num_tx_queues)
377 goto out;
378
1c1efc2a 379 while (xskq_cons_peek_desc(xs->tx, &desc, xs->pool)) {
35fcde7f 380 char *buffer;
bbff2f32
BT
381 u64 addr;
382 u32 len;
35fcde7f
MK
383
384 if (max_batch-- == 0) {
385 err = -EAGAIN;
386 goto out;
387 }
388
09210c4b 389 len = desc.len;
ac98d8aa 390 skb = sock_alloc_send_skb(sk, len, 1, &err);
aa2cad06 391 if (unlikely(!skb))
35fcde7f 392 goto out;
35fcde7f
MK
393
394 skb_put(skb, len);
bbff2f32 395 addr = desc.addr;
c4655761 396 buffer = xsk_buff_raw_get_data(xs->pool, addr);
35fcde7f 397 err = skb_store_bits(skb, 0, buffer, len);
0a05861f 398 /* This is the backpressure mechanism for the Tx path.
15d8c916
MK
399 * Reserve space in the completion queue and only proceed
400 * if there is space in it. This avoids having to implement
401 * any buffering in the Tx path.
402 */
7361f9c3 403 if (unlikely(err) || xskq_prod_reserve(xs->pool->cq)) {
35fcde7f
MK
404 kfree_skb(skb);
405 goto out;
406 }
407
408 skb->dev = xs->dev;
409 skb->priority = sk->sk_priority;
410 skb->mark = sk->sk_mark;
c05cd364 411 skb_shinfo(skb)->destructor_arg = (void *)(long)desc.addr;
35fcde7f
MK
412 skb->destructor = xsk_destruct_skb;
413
414 err = dev_direct_xmit(skb, xs->queue_id);
c5ed924b 415 xskq_cons_release(xs->tx);
35fcde7f
MK
416 /* Ignore NET_XMIT_CN as packet might have been sent */
417 if (err == NET_XMIT_DROP || err == NETDEV_TX_BUSY) {
fe588685
MK
418 /* SKB completed but not sent */
419 err = -EBUSY;
35fcde7f
MK
420 goto out;
421 }
422
423 sent_frame = true;
35fcde7f
MK
424 }
425
8aa5a335
CL
426 xs->tx->queue_empty_descs++;
427
35fcde7f
MK
428out:
429 if (sent_frame)
430 sk->sk_write_space(sk);
431
432 mutex_unlock(&xs->mutex);
433 return err;
434}
435
df551058
MK
436static int __xsk_sendmsg(struct sock *sk)
437{
438 struct xdp_sock *xs = xdp_sk(sk);
439
440 if (unlikely(!(xs->dev->flags & IFF_UP)))
441 return -ENETDOWN;
442 if (unlikely(!xs->tx))
443 return -ENOBUFS;
444
445 return xs->zc ? xsk_zc_xmit(xs) : xsk_generic_xmit(sk);
446}
447
35fcde7f
MK
448static int xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
449{
ac98d8aa 450 bool need_wait = !(m->msg_flags & MSG_DONTWAIT);
35fcde7f
MK
451 struct sock *sk = sock->sk;
452 struct xdp_sock *xs = xdp_sk(sk);
453
42fddcc7 454 if (unlikely(!xsk_is_bound(xs)))
35fcde7f 455 return -ENXIO;
df551058 456 if (unlikely(need_wait))
ac98d8aa 457 return -EOPNOTSUPP;
35fcde7f 458
df551058 459 return __xsk_sendmsg(sk);
35fcde7f
MK
460}
461
5d946c5a 462static __poll_t xsk_poll(struct file *file, struct socket *sock,
a11e1d43 463 struct poll_table_struct *wait)
c497176c 464{
5d946c5a 465 __poll_t mask = datagram_poll(file, sock, wait);
df551058
MK
466 struct sock *sk = sock->sk;
467 struct xdp_sock *xs = xdp_sk(sk);
c2d3d6a4 468 struct xsk_buff_pool *pool;
42fddcc7
BT
469
470 if (unlikely(!xsk_is_bound(xs)))
471 return mask;
472
c2d3d6a4 473 pool = xs->pool;
77cd0d7b 474
c2d3d6a4 475 if (pool->cached_need_wakeup) {
06870682 476 if (xs->zc)
c2d3d6a4 477 xsk_wakeup(xs, pool->cached_need_wakeup);
df551058
MK
478 else
479 /* Poll needs to drive Tx also in copy mode */
480 __xsk_sendmsg(sk);
481 }
c497176c 482
59e35e55 483 if (xs->rx && !xskq_prod_is_empty(xs->rx))
5d946c5a 484 mask |= EPOLLIN | EPOLLRDNORM;
c5ed924b 485 if (xs->tx && !xskq_cons_is_full(xs->tx))
5d946c5a 486 mask |= EPOLLOUT | EPOLLWRNORM;
c497176c
BT
487
488 return mask;
489}
490
b9b6b68e
BT
491static int xsk_init_queue(u32 entries, struct xsk_queue **queue,
492 bool umem_queue)
423f3832
MK
493{
494 struct xsk_queue *q;
495
496 if (entries == 0 || *queue || !is_power_of_2(entries))
497 return -EINVAL;
498
b9b6b68e 499 q = xskq_create(entries, umem_queue);
423f3832
MK
500 if (!q)
501 return -ENOMEM;
502
37b07693
BT
503 /* Make sure queue is ready before it can be seen by others */
504 smp_wmb();
94a99763 505 WRITE_ONCE(*queue, q);
423f3832
MK
506 return 0;
507}
508
455302d1
IM
509static void xsk_unbind_dev(struct xdp_sock *xs)
510{
511 struct net_device *dev = xs->dev;
512
42fddcc7 513 if (xs->state != XSK_BOUND)
455302d1 514 return;
42fddcc7 515 WRITE_ONCE(xs->state, XSK_UNBOUND);
455302d1
IM
516
517 /* Wait for driver to stop using the xdp socket. */
a5aa8e52 518 xp_del_xsk(xs->pool, xs);
455302d1
IM
519 xs->dev = NULL;
520 synchronize_net();
521 dev_put(dev);
522}
523
0402acd6
BT
524static struct xsk_map *xsk_get_map_list_entry(struct xdp_sock *xs,
525 struct xdp_sock ***map_entry)
526{
527 struct xsk_map *map = NULL;
528 struct xsk_map_node *node;
529
530 *map_entry = NULL;
531
532 spin_lock_bh(&xs->map_list_lock);
533 node = list_first_entry_or_null(&xs->map_list, struct xsk_map_node,
534 node);
535 if (node) {
536 WARN_ON(xsk_map_inc(node->map));
537 map = node->map;
538 *map_entry = node->map_entry;
539 }
540 spin_unlock_bh(&xs->map_list_lock);
541 return map;
542}
543
544static void xsk_delete_from_maps(struct xdp_sock *xs)
545{
546 /* This function removes the current XDP socket from all the
547 * maps it resides in. We need to take extra care here, due to
548 * the two locks involved. Each map has a lock synchronizing
549 * updates to the entries, and each socket has a lock that
550 * synchronizes access to the list of maps (map_list). For
551 * deadlock avoidance the locks need to be taken in the order
552 * "map lock"->"socket map list lock". We start off by
553 * accessing the socket map list, and take a reference to the
554 * map to guarantee existence between the
555 * xsk_get_map_list_entry() and xsk_map_try_sock_delete()
556 * calls. Then we ask the map to remove the socket, which
557 * tries to remove the socket from the map. Note that there
558 * might be updates to the map between
559 * xsk_get_map_list_entry() and xsk_map_try_sock_delete().
560 */
561 struct xdp_sock **map_entry = NULL;
562 struct xsk_map *map;
563
564 while ((map = xsk_get_map_list_entry(xs, &map_entry))) {
565 xsk_map_try_sock_delete(map, xs, map_entry);
566 xsk_map_put(map);
567 }
568}
569
c0c77d8f
BT
570static int xsk_release(struct socket *sock)
571{
572 struct sock *sk = sock->sk;
965a9909 573 struct xdp_sock *xs = xdp_sk(sk);
c0c77d8f
BT
574 struct net *net;
575
576 if (!sk)
577 return 0;
578
579 net = sock_net(sk);
580
1d0dc069
BT
581 mutex_lock(&net->xdp.lock);
582 sk_del_node_init_rcu(sk);
583 mutex_unlock(&net->xdp.lock);
584
c0c77d8f
BT
585 local_bh_disable();
586 sock_prot_inuse_add(net, sk->sk_prot, -1);
587 local_bh_enable();
588
0402acd6 589 xsk_delete_from_maps(xs);
42fddcc7 590 mutex_lock(&xs->mutex);
455302d1 591 xsk_unbind_dev(xs);
42fddcc7 592 mutex_unlock(&xs->mutex);
965a9909 593
541d7fdd
BT
594 xskq_destroy(xs->rx);
595 xskq_destroy(xs->tx);
7361f9c3
MK
596 xskq_destroy(xs->fq_tmp);
597 xskq_destroy(xs->cq_tmp);
541d7fdd 598
c0c77d8f
BT
599 sock_orphan(sk);
600 sock->sk = NULL;
601
602 sk_refcnt_debug_release(sk);
603 sock_put(sk);
604
605 return 0;
606}
607
965a9909
MK
608static struct socket *xsk_lookup_xsk_from_fd(int fd)
609{
610 struct socket *sock;
611 int err;
612
613 sock = sockfd_lookup(fd, &err);
614 if (!sock)
615 return ERR_PTR(-ENOTSOCK);
616
617 if (sock->sk->sk_family != PF_XDP) {
618 sockfd_put(sock);
619 return ERR_PTR(-ENOPROTOOPT);
620 }
621
622 return sock;
623}
624
7361f9c3
MK
625static bool xsk_validate_queues(struct xdp_sock *xs)
626{
627 return xs->fq_tmp && xs->cq_tmp;
628}
629
965a9909
MK
630static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
631{
632 struct sockaddr_xdp *sxdp = (struct sockaddr_xdp *)addr;
633 struct sock *sk = sock->sk;
965a9909 634 struct xdp_sock *xs = xdp_sk(sk);
959b71db 635 struct net_device *dev;
173d3adb 636 u32 flags, qid;
965a9909
MK
637 int err = 0;
638
639 if (addr_len < sizeof(struct sockaddr_xdp))
640 return -EINVAL;
641 if (sxdp->sxdp_family != AF_XDP)
642 return -EINVAL;
643
f54ba391 644 flags = sxdp->sxdp_flags;
77cd0d7b
MK
645 if (flags & ~(XDP_SHARED_UMEM | XDP_COPY | XDP_ZEROCOPY |
646 XDP_USE_NEED_WAKEUP))
f54ba391
BT
647 return -EINVAL;
648
5464c3a0 649 rtnl_lock();
965a9909 650 mutex_lock(&xs->mutex);
455302d1 651 if (xs->state != XSK_READY) {
959b71db
BT
652 err = -EBUSY;
653 goto out_release;
654 }
655
965a9909
MK
656 dev = dev_get_by_index(sock_net(sk), sxdp->sxdp_ifindex);
657 if (!dev) {
658 err = -ENODEV;
659 goto out_release;
660 }
661
f6145903 662 if (!xs->rx && !xs->tx) {
965a9909
MK
663 err = -EINVAL;
664 goto out_unlock;
665 }
666
173d3adb 667 qid = sxdp->sxdp_queue_id;
173d3adb
BT
668
669 if (flags & XDP_SHARED_UMEM) {
965a9909
MK
670 struct xdp_sock *umem_xs;
671 struct socket *sock;
672
77cd0d7b
MK
673 if ((flags & XDP_COPY) || (flags & XDP_ZEROCOPY) ||
674 (flags & XDP_USE_NEED_WAKEUP)) {
173d3adb
BT
675 /* Cannot specify flags for shared sockets. */
676 err = -EINVAL;
677 goto out_unlock;
678 }
679
965a9909
MK
680 if (xs->umem) {
681 /* We have already our own. */
682 err = -EINVAL;
683 goto out_unlock;
684 }
685
686 sock = xsk_lookup_xsk_from_fd(sxdp->sxdp_shared_umem_fd);
687 if (IS_ERR(sock)) {
688 err = PTR_ERR(sock);
689 goto out_unlock;
690 }
691
692 umem_xs = xdp_sk(sock->sk);
42fddcc7 693 if (!xsk_is_bound(umem_xs)) {
965a9909
MK
694 err = -EBADF;
695 sockfd_put(sock);
696 goto out_unlock;
42fddcc7 697 }
965a9909 698
a1132430
MK
699 if (umem_xs->queue_id != qid || umem_xs->dev != dev) {
700 /* Share the umem with another socket on another qid
701 * and/or device.
702 */
b5aea28d
MK
703 xs->pool = xp_create_and_assign_umem(xs,
704 umem_xs->umem);
705 if (!xs->pool) {
706 sockfd_put(sock);
707 goto out_unlock;
708 }
709
710 err = xp_assign_dev_shared(xs->pool, umem_xs->umem,
711 dev, qid);
712 if (err) {
713 xp_destroy(xs->pool);
714 sockfd_put(sock);
715 goto out_unlock;
716 }
717 } else {
718 /* Share the buffer pool with the other socket. */
719 if (xs->fq_tmp || xs->cq_tmp) {
720 /* Do not allow setting your own fq or cq. */
721 err = -EINVAL;
722 sockfd_put(sock);
723 goto out_unlock;
724 }
725
726 xp_get_pool(umem_xs->pool);
727 xs->pool = umem_xs->pool;
728 }
729
965a9909 730 xdp_get_umem(umem_xs->umem);
9764f4b3 731 WRITE_ONCE(xs->umem, umem_xs->umem);
965a9909 732 sockfd_put(sock);
7361f9c3 733 } else if (!xs->umem || !xsk_validate_queues(xs)) {
965a9909
MK
734 err = -EINVAL;
735 goto out_unlock;
c497176c
BT
736 } else {
737 /* This xsk has its own umem. */
1c1efc2a
MK
738 xs->pool = xp_create_and_assign_umem(xs, xs->umem);
739 if (!xs->pool) {
740 err = -ENOMEM;
173d3adb 741 goto out_unlock;
1c1efc2a
MK
742 }
743
744 err = xp_assign_dev(xs->pool, dev, qid, flags);
745 if (err) {
746 xp_destroy(xs->pool);
747 xs->pool = NULL;
1c1efc2a
MK
748 goto out_unlock;
749 }
965a9909
MK
750 }
751
965a9909 752 xs->dev = dev;
ac98d8aa
MK
753 xs->zc = xs->umem->zc;
754 xs->queue_id = qid;
a5aa8e52 755 xp_add_xsk(xs->pool, xs);
965a9909
MK
756
757out_unlock:
42fddcc7 758 if (err) {
965a9909 759 dev_put(dev);
42fddcc7
BT
760 } else {
761 /* Matches smp_rmb() in bind() for shared umem
762 * sockets, and xsk_is_bound().
763 */
764 smp_wmb();
765 WRITE_ONCE(xs->state, XSK_BOUND);
766 }
965a9909
MK
767out_release:
768 mutex_unlock(&xs->mutex);
5464c3a0 769 rtnl_unlock();
965a9909
MK
770 return err;
771}
772
c05cd364
KL
773struct xdp_umem_reg_v1 {
774 __u64 addr; /* Start of packet data area */
775 __u64 len; /* Length of packet data area */
776 __u32 chunk_size;
777 __u32 headroom;
778};
779
c0c77d8f 780static int xsk_setsockopt(struct socket *sock, int level, int optname,
a7b75c5a 781 sockptr_t optval, unsigned int optlen)
c0c77d8f
BT
782{
783 struct sock *sk = sock->sk;
784 struct xdp_sock *xs = xdp_sk(sk);
785 int err;
786
787 if (level != SOL_XDP)
788 return -ENOPROTOOPT;
789
790 switch (optname) {
b9b6b68e 791 case XDP_RX_RING:
f6145903 792 case XDP_TX_RING:
b9b6b68e
BT
793 {
794 struct xsk_queue **q;
795 int entries;
796
797 if (optlen < sizeof(entries))
798 return -EINVAL;
a7b75c5a 799 if (copy_from_sockptr(&entries, optval, sizeof(entries)))
b9b6b68e
BT
800 return -EFAULT;
801
802 mutex_lock(&xs->mutex);
455302d1
IM
803 if (xs->state != XSK_READY) {
804 mutex_unlock(&xs->mutex);
805 return -EBUSY;
806 }
f6145903 807 q = (optname == XDP_TX_RING) ? &xs->tx : &xs->rx;
b9b6b68e 808 err = xsk_init_queue(entries, q, false);
77cd0d7b
MK
809 if (!err && optname == XDP_TX_RING)
810 /* Tx needs to be explicitly woken up the first time */
811 xs->tx->ring->flags |= XDP_RING_NEED_WAKEUP;
b9b6b68e
BT
812 mutex_unlock(&xs->mutex);
813 return err;
814 }
c0c77d8f
BT
815 case XDP_UMEM_REG:
816 {
c05cd364
KL
817 size_t mr_size = sizeof(struct xdp_umem_reg);
818 struct xdp_umem_reg mr = {};
c0c77d8f
BT
819 struct xdp_umem *umem;
820
c05cd364
KL
821 if (optlen < sizeof(struct xdp_umem_reg_v1))
822 return -EINVAL;
823 else if (optlen < sizeof(mr))
824 mr_size = sizeof(struct xdp_umem_reg_v1);
825
a7b75c5a 826 if (copy_from_sockptr(&mr, optval, mr_size))
c0c77d8f
BT
827 return -EFAULT;
828
829 mutex_lock(&xs->mutex);
455302d1 830 if (xs->state != XSK_READY || xs->umem) {
a49049ea
BT
831 mutex_unlock(&xs->mutex);
832 return -EBUSY;
833 }
c0c77d8f 834
a49049ea
BT
835 umem = xdp_umem_create(&mr);
836 if (IS_ERR(umem)) {
c0c77d8f 837 mutex_unlock(&xs->mutex);
a49049ea 838 return PTR_ERR(umem);
c0c77d8f
BT
839 }
840
841 /* Make sure umem is ready before it can be seen by others */
842 smp_wmb();
9764f4b3 843 WRITE_ONCE(xs->umem, umem);
c0c77d8f
BT
844 mutex_unlock(&xs->mutex);
845 return 0;
846 }
423f3832 847 case XDP_UMEM_FILL_RING:
fe230832 848 case XDP_UMEM_COMPLETION_RING:
423f3832
MK
849 {
850 struct xsk_queue **q;
851 int entries;
852
a7b75c5a 853 if (copy_from_sockptr(&entries, optval, sizeof(entries)))
423f3832
MK
854 return -EFAULT;
855
856 mutex_lock(&xs->mutex);
455302d1
IM
857 if (xs->state != XSK_READY) {
858 mutex_unlock(&xs->mutex);
859 return -EBUSY;
860 }
a49049ea 861
7361f9c3
MK
862 q = (optname == XDP_UMEM_FILL_RING) ? &xs->fq_tmp :
863 &xs->cq_tmp;
b9b6b68e 864 err = xsk_init_queue(entries, q, true);
423f3832
MK
865 mutex_unlock(&xs->mutex);
866 return err;
867 }
c0c77d8f
BT
868 default:
869 break;
870 }
871
872 return -ENOPROTOOPT;
873}
874
77cd0d7b
MK
875static void xsk_enter_rxtx_offsets(struct xdp_ring_offset_v1 *ring)
876{
877 ring->producer = offsetof(struct xdp_rxtx_ring, ptrs.producer);
878 ring->consumer = offsetof(struct xdp_rxtx_ring, ptrs.consumer);
879 ring->desc = offsetof(struct xdp_rxtx_ring, desc);
880}
881
882static void xsk_enter_umem_offsets(struct xdp_ring_offset_v1 *ring)
883{
884 ring->producer = offsetof(struct xdp_umem_ring, ptrs.producer);
885 ring->consumer = offsetof(struct xdp_umem_ring, ptrs.consumer);
886 ring->desc = offsetof(struct xdp_umem_ring, desc);
887}
888
8aa5a335
CL
889struct xdp_statistics_v1 {
890 __u64 rx_dropped;
891 __u64 rx_invalid_descs;
892 __u64 tx_invalid_descs;
893};
894
af75d9e0
MK
895static int xsk_getsockopt(struct socket *sock, int level, int optname,
896 char __user *optval, int __user *optlen)
897{
898 struct sock *sk = sock->sk;
899 struct xdp_sock *xs = xdp_sk(sk);
900 int len;
901
902 if (level != SOL_XDP)
903 return -ENOPROTOOPT;
904
905 if (get_user(len, optlen))
906 return -EFAULT;
907 if (len < 0)
908 return -EINVAL;
909
910 switch (optname) {
911 case XDP_STATISTICS:
912 {
3c4f850e 913 struct xdp_statistics stats = {};
8aa5a335
CL
914 bool extra_stats = true;
915 size_t stats_size;
af75d9e0 916
8aa5a335 917 if (len < sizeof(struct xdp_statistics_v1)) {
af75d9e0 918 return -EINVAL;
8aa5a335
CL
919 } else if (len < sizeof(stats)) {
920 extra_stats = false;
921 stats_size = sizeof(struct xdp_statistics_v1);
922 } else {
923 stats_size = sizeof(stats);
924 }
af75d9e0
MK
925
926 mutex_lock(&xs->mutex);
927 stats.rx_dropped = xs->rx_dropped;
8aa5a335
CL
928 if (extra_stats) {
929 stats.rx_ring_full = xs->rx_queue_full;
930 stats.rx_fill_ring_empty_descs =
7361f9c3 931 xs->pool ? xskq_nb_queue_empty_descs(xs->pool->fq) : 0;
8aa5a335
CL
932 stats.tx_ring_empty_descs = xskq_nb_queue_empty_descs(xs->tx);
933 } else {
934 stats.rx_dropped += xs->rx_queue_full;
935 }
af75d9e0
MK
936 stats.rx_invalid_descs = xskq_nb_invalid_descs(xs->rx);
937 stats.tx_invalid_descs = xskq_nb_invalid_descs(xs->tx);
938 mutex_unlock(&xs->mutex);
939
8aa5a335 940 if (copy_to_user(optval, &stats, stats_size))
af75d9e0 941 return -EFAULT;
8aa5a335 942 if (put_user(stats_size, optlen))
af75d9e0
MK
943 return -EFAULT;
944
945 return 0;
946 }
b3a9e0be
BT
947 case XDP_MMAP_OFFSETS:
948 {
949 struct xdp_mmap_offsets off;
77cd0d7b
MK
950 struct xdp_mmap_offsets_v1 off_v1;
951 bool flags_supported = true;
952 void *to_copy;
b3a9e0be 953
77cd0d7b 954 if (len < sizeof(off_v1))
b3a9e0be 955 return -EINVAL;
77cd0d7b
MK
956 else if (len < sizeof(off))
957 flags_supported = false;
958
959 if (flags_supported) {
960 /* xdp_ring_offset is identical to xdp_ring_offset_v1
961 * except for the flags field added to the end.
962 */
963 xsk_enter_rxtx_offsets((struct xdp_ring_offset_v1 *)
964 &off.rx);
965 xsk_enter_rxtx_offsets((struct xdp_ring_offset_v1 *)
966 &off.tx);
967 xsk_enter_umem_offsets((struct xdp_ring_offset_v1 *)
968 &off.fr);
969 xsk_enter_umem_offsets((struct xdp_ring_offset_v1 *)
970 &off.cr);
971 off.rx.flags = offsetof(struct xdp_rxtx_ring,
972 ptrs.flags);
973 off.tx.flags = offsetof(struct xdp_rxtx_ring,
974 ptrs.flags);
975 off.fr.flags = offsetof(struct xdp_umem_ring,
976 ptrs.flags);
977 off.cr.flags = offsetof(struct xdp_umem_ring,
978 ptrs.flags);
979
980 len = sizeof(off);
981 to_copy = &off;
982 } else {
983 xsk_enter_rxtx_offsets(&off_v1.rx);
984 xsk_enter_rxtx_offsets(&off_v1.tx);
985 xsk_enter_umem_offsets(&off_v1.fr);
986 xsk_enter_umem_offsets(&off_v1.cr);
987
988 len = sizeof(off_v1);
989 to_copy = &off_v1;
990 }
b3a9e0be 991
77cd0d7b 992 if (copy_to_user(optval, to_copy, len))
b3a9e0be
BT
993 return -EFAULT;
994 if (put_user(len, optlen))
995 return -EFAULT;
996
997 return 0;
998 }
2640d3c8
MM
999 case XDP_OPTIONS:
1000 {
1001 struct xdp_options opts = {};
1002
1003 if (len < sizeof(opts))
1004 return -EINVAL;
1005
1006 mutex_lock(&xs->mutex);
1007 if (xs->zc)
1008 opts.flags |= XDP_OPTIONS_ZEROCOPY;
1009 mutex_unlock(&xs->mutex);
1010
1011 len = sizeof(opts);
1012 if (copy_to_user(optval, &opts, len))
1013 return -EFAULT;
1014 if (put_user(len, optlen))
1015 return -EFAULT;
1016
1017 return 0;
1018 }
af75d9e0
MK
1019 default:
1020 break;
1021 }
1022
1023 return -EOPNOTSUPP;
1024}
1025
423f3832
MK
1026static int xsk_mmap(struct file *file, struct socket *sock,
1027 struct vm_area_struct *vma)
1028{
a5a16e43 1029 loff_t offset = (loff_t)vma->vm_pgoff << PAGE_SHIFT;
423f3832
MK
1030 unsigned long size = vma->vm_end - vma->vm_start;
1031 struct xdp_sock *xs = xdp_sk(sock->sk);
1032 struct xsk_queue *q = NULL;
1033 unsigned long pfn;
1034 struct page *qpg;
1035
42fddcc7 1036 if (READ_ONCE(xs->state) != XSK_READY)
455302d1
IM
1037 return -EBUSY;
1038
b9b6b68e 1039 if (offset == XDP_PGOFF_RX_RING) {
37b07693 1040 q = READ_ONCE(xs->rx);
f6145903 1041 } else if (offset == XDP_PGOFF_TX_RING) {
37b07693 1042 q = READ_ONCE(xs->tx);
b9b6b68e 1043 } else {
e6762c8b
MK
1044 /* Matches the smp_wmb() in XDP_UMEM_REG */
1045 smp_rmb();
b9b6b68e 1046 if (offset == XDP_UMEM_PGOFF_FILL_RING)
7361f9c3 1047 q = READ_ONCE(xs->fq_tmp);
fe230832 1048 else if (offset == XDP_UMEM_PGOFF_COMPLETION_RING)
7361f9c3 1049 q = READ_ONCE(xs->cq_tmp);
b9b6b68e 1050 }
423f3832
MK
1051
1052 if (!q)
1053 return -EINVAL;
1054
e6762c8b
MK
1055 /* Matches the smp_wmb() in xsk_init_queue */
1056 smp_rmb();
423f3832 1057 qpg = virt_to_head_page(q->ring);
a50b854e 1058 if (size > page_size(qpg))
423f3832
MK
1059 return -EINVAL;
1060
1061 pfn = virt_to_phys(q->ring) >> PAGE_SHIFT;
1062 return remap_pfn_range(vma, vma->vm_start, pfn,
1063 size, vma->vm_page_prot);
1064}
1065
455302d1
IM
1066static int xsk_notifier(struct notifier_block *this,
1067 unsigned long msg, void *ptr)
1068{
1069 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1070 struct net *net = dev_net(dev);
1071 struct sock *sk;
1072
1073 switch (msg) {
1074 case NETDEV_UNREGISTER:
1075 mutex_lock(&net->xdp.lock);
1076 sk_for_each(sk, &net->xdp.list) {
1077 struct xdp_sock *xs = xdp_sk(sk);
1078
1079 mutex_lock(&xs->mutex);
1080 if (xs->dev == dev) {
1081 sk->sk_err = ENETDOWN;
1082 if (!sock_flag(sk, SOCK_DEAD))
1083 sk->sk_error_report(sk);
1084
1085 xsk_unbind_dev(xs);
1086
1c1efc2a
MK
1087 /* Clear device references. */
1088 xp_clear_dev(xs->pool);
455302d1
IM
1089 }
1090 mutex_unlock(&xs->mutex);
1091 }
1092 mutex_unlock(&net->xdp.lock);
1093 break;
1094 }
1095 return NOTIFY_DONE;
1096}
1097
c0c77d8f
BT
1098static struct proto xsk_proto = {
1099 .name = "XDP",
1100 .owner = THIS_MODULE,
1101 .obj_size = sizeof(struct xdp_sock),
1102};
1103
1104static const struct proto_ops xsk_proto_ops = {
c2f4374b
BT
1105 .family = PF_XDP,
1106 .owner = THIS_MODULE,
1107 .release = xsk_release,
1108 .bind = xsk_bind,
1109 .connect = sock_no_connect,
1110 .socketpair = sock_no_socketpair,
1111 .accept = sock_no_accept,
1112 .getname = sock_no_getname,
a11e1d43 1113 .poll = xsk_poll,
c2f4374b
BT
1114 .ioctl = sock_no_ioctl,
1115 .listen = sock_no_listen,
1116 .shutdown = sock_no_shutdown,
1117 .setsockopt = xsk_setsockopt,
1118 .getsockopt = xsk_getsockopt,
1119 .sendmsg = xsk_sendmsg,
1120 .recvmsg = sock_no_recvmsg,
1121 .mmap = xsk_mmap,
1122 .sendpage = sock_no_sendpage,
c0c77d8f
BT
1123};
1124
11fe9262
BT
1125static void xsk_destruct(struct sock *sk)
1126{
1127 struct xdp_sock *xs = xdp_sk(sk);
1128
1129 if (!sock_flag(sk, SOCK_DEAD))
1130 return;
1131
1c1efc2a 1132 xp_put_pool(xs->pool);
11fe9262
BT
1133
1134 sk_refcnt_debug_dec(sk);
1135}
1136
c0c77d8f
BT
1137static int xsk_create(struct net *net, struct socket *sock, int protocol,
1138 int kern)
1139{
c0c77d8f 1140 struct xdp_sock *xs;
1c1efc2a 1141 struct sock *sk;
c0c77d8f
BT
1142
1143 if (!ns_capable(net->user_ns, CAP_NET_RAW))
1144 return -EPERM;
1145 if (sock->type != SOCK_RAW)
1146 return -ESOCKTNOSUPPORT;
1147
1148 if (protocol)
1149 return -EPROTONOSUPPORT;
1150
1151 sock->state = SS_UNCONNECTED;
1152
1153 sk = sk_alloc(net, PF_XDP, GFP_KERNEL, &xsk_proto, kern);
1154 if (!sk)
1155 return -ENOBUFS;
1156
1157 sock->ops = &xsk_proto_ops;
1158
1159 sock_init_data(sock, sk);
1160
1161 sk->sk_family = PF_XDP;
1162
11fe9262
BT
1163 sk->sk_destruct = xsk_destruct;
1164 sk_refcnt_debug_inc(sk);
1165
cee27167
BT
1166 sock_set_flag(sk, SOCK_RCU_FREE);
1167
c0c77d8f 1168 xs = xdp_sk(sk);
455302d1 1169 xs->state = XSK_READY;
c0c77d8f 1170 mutex_init(&xs->mutex);
bf0bdd13 1171 spin_lock_init(&xs->rx_lock);
a9744f7c 1172 spin_lock_init(&xs->tx_completion_lock);
c0c77d8f 1173
0402acd6
BT
1174 INIT_LIST_HEAD(&xs->map_list);
1175 spin_lock_init(&xs->map_list_lock);
1176
1d0dc069
BT
1177 mutex_lock(&net->xdp.lock);
1178 sk_add_node_rcu(sk, &net->xdp.list);
1179 mutex_unlock(&net->xdp.lock);
1180
c0c77d8f
BT
1181 local_bh_disable();
1182 sock_prot_inuse_add(net, &xsk_proto, 1);
1183 local_bh_enable();
1184
1185 return 0;
1186}
1187
1188static const struct net_proto_family xsk_family_ops = {
1189 .family = PF_XDP,
1190 .create = xsk_create,
1191 .owner = THIS_MODULE,
1192};
1193
455302d1
IM
1194static struct notifier_block xsk_netdev_notifier = {
1195 .notifier_call = xsk_notifier,
1196};
1197
1d0dc069
BT
1198static int __net_init xsk_net_init(struct net *net)
1199{
1200 mutex_init(&net->xdp.lock);
1201 INIT_HLIST_HEAD(&net->xdp.list);
1202 return 0;
1203}
1204
1205static void __net_exit xsk_net_exit(struct net *net)
1206{
1207 WARN_ON_ONCE(!hlist_empty(&net->xdp.list));
1208}
1209
1210static struct pernet_operations xsk_net_ops = {
1211 .init = xsk_net_init,
1212 .exit = xsk_net_exit,
1213};
1214
c0c77d8f
BT
1215static int __init xsk_init(void)
1216{
e312b9e7 1217 int err, cpu;
c0c77d8f
BT
1218
1219 err = proto_register(&xsk_proto, 0 /* no slab */);
1220 if (err)
1221 goto out;
1222
1223 err = sock_register(&xsk_family_ops);
1224 if (err)
1225 goto out_proto;
1226
1d0dc069
BT
1227 err = register_pernet_subsys(&xsk_net_ops);
1228 if (err)
1229 goto out_sk;
455302d1
IM
1230
1231 err = register_netdevice_notifier(&xsk_netdev_notifier);
1232 if (err)
1233 goto out_pernet;
1234
e312b9e7
BT
1235 for_each_possible_cpu(cpu)
1236 INIT_LIST_HEAD(&per_cpu(xskmap_flush_list, cpu));
c0c77d8f
BT
1237 return 0;
1238
455302d1
IM
1239out_pernet:
1240 unregister_pernet_subsys(&xsk_net_ops);
1d0dc069
BT
1241out_sk:
1242 sock_unregister(PF_XDP);
c0c77d8f
BT
1243out_proto:
1244 proto_unregister(&xsk_proto);
1245out:
1246 return err;
1247}
1248
1249fs_initcall(xsk_init);