1 // SPDX-License-Identifier: GPL-2.0
4 * AF_XDP sockets allows a channel between XDP programs and userspace
6 * Copyright(c) 2018 Intel Corporation.
8 * Author(s): Björn Töpel <bjorn.topel@intel.com>
9 * Magnus Karlsson <magnus.karlsson@intel.com>
12 #define pr_fmt(fmt) "AF_XDP: %s: " fmt, __func__
14 #include <linux/if_xdp.h>
15 #include <linux/init.h>
16 #include <linux/sched/mm.h>
17 #include <linux/sched/signal.h>
18 #include <linux/sched/task.h>
19 #include <linux/socket.h>
20 #include <linux/file.h>
21 #include <linux/uaccess.h>
22 #include <linux/net.h>
23 #include <linux/netdevice.h>
24 #include <linux/rculist.h>
25 #include <net/xdp_sock_drv.h>
28 #include "xsk_queue.h"
32 #define TX_BATCH_SIZE 16
34 static DEFINE_PER_CPU(struct list_head
, xskmap_flush_list
);
36 void xsk_set_rx_need_wakeup(struct xsk_buff_pool
*pool
)
38 if (pool
->cached_need_wakeup
& XDP_WAKEUP_RX
)
41 pool
->fq
->ring
->flags
|= XDP_RING_NEED_WAKEUP
;
42 pool
->cached_need_wakeup
|= XDP_WAKEUP_RX
;
44 EXPORT_SYMBOL(xsk_set_rx_need_wakeup
);
46 void xsk_set_tx_need_wakeup(struct xsk_buff_pool
*pool
)
50 if (pool
->cached_need_wakeup
& XDP_WAKEUP_TX
)
54 list_for_each_entry_rcu(xs
, &pool
->xsk_tx_list
, tx_list
) {
55 xs
->tx
->ring
->flags
|= XDP_RING_NEED_WAKEUP
;
59 pool
->cached_need_wakeup
|= XDP_WAKEUP_TX
;
61 EXPORT_SYMBOL(xsk_set_tx_need_wakeup
);
63 void xsk_clear_rx_need_wakeup(struct xsk_buff_pool
*pool
)
65 if (!(pool
->cached_need_wakeup
& XDP_WAKEUP_RX
))
68 pool
->fq
->ring
->flags
&= ~XDP_RING_NEED_WAKEUP
;
69 pool
->cached_need_wakeup
&= ~XDP_WAKEUP_RX
;
71 EXPORT_SYMBOL(xsk_clear_rx_need_wakeup
);
73 void xsk_clear_tx_need_wakeup(struct xsk_buff_pool
*pool
)
77 if (!(pool
->cached_need_wakeup
& XDP_WAKEUP_TX
))
81 list_for_each_entry_rcu(xs
, &pool
->xsk_tx_list
, tx_list
) {
82 xs
->tx
->ring
->flags
&= ~XDP_RING_NEED_WAKEUP
;
86 pool
->cached_need_wakeup
&= ~XDP_WAKEUP_TX
;
88 EXPORT_SYMBOL(xsk_clear_tx_need_wakeup
);
90 bool xsk_uses_need_wakeup(struct xsk_buff_pool
*pool
)
92 return pool
->uses_need_wakeup
;
94 EXPORT_SYMBOL(xsk_uses_need_wakeup
);
96 struct xsk_buff_pool
*xsk_get_pool_from_qid(struct net_device
*dev
,
99 if (queue_id
< dev
->real_num_rx_queues
)
100 return dev
->_rx
[queue_id
].pool
;
101 if (queue_id
< dev
->real_num_tx_queues
)
102 return dev
->_tx
[queue_id
].pool
;
106 EXPORT_SYMBOL(xsk_get_pool_from_qid
);
108 void xsk_clear_pool_at_qid(struct net_device
*dev
, u16 queue_id
)
110 if (queue_id
< dev
->real_num_rx_queues
)
111 dev
->_rx
[queue_id
].pool
= NULL
;
112 if (queue_id
< dev
->real_num_tx_queues
)
113 dev
->_tx
[queue_id
].pool
= NULL
;
116 /* The buffer pool is stored both in the _rx struct and the _tx struct as we do
117 * not know if the device has more tx queues than rx, or the opposite.
118 * This might also change during run time.
120 int xsk_reg_pool_at_qid(struct net_device
*dev
, struct xsk_buff_pool
*pool
,
123 if (queue_id
>= max_t(unsigned int,
124 dev
->real_num_rx_queues
,
125 dev
->real_num_tx_queues
))
128 if (queue_id
< dev
->real_num_rx_queues
)
129 dev
->_rx
[queue_id
].pool
= pool
;
130 if (queue_id
< dev
->real_num_tx_queues
)
131 dev
->_tx
[queue_id
].pool
= pool
;
136 void xp_release(struct xdp_buff_xsk
*xskb
)
138 xskb
->pool
->free_heads
[xskb
->pool
->free_heads_cnt
++] = xskb
;
141 static u64
xp_get_handle(struct xdp_buff_xsk
*xskb
)
143 u64 offset
= xskb
->xdp
.data
- xskb
->xdp
.data_hard_start
;
145 offset
+= xskb
->pool
->headroom
;
146 if (!xskb
->pool
->unaligned
)
147 return xskb
->orig_addr
+ offset
;
148 return xskb
->orig_addr
+ (offset
<< XSK_UNALIGNED_BUF_OFFSET_SHIFT
);
151 static int __xsk_rcv_zc(struct xdp_sock
*xs
, struct xdp_buff
*xdp
, u32 len
)
153 struct xdp_buff_xsk
*xskb
= container_of(xdp
, struct xdp_buff_xsk
, xdp
);
157 addr
= xp_get_handle(xskb
);
158 err
= xskq_prod_reserve_desc(xs
->rx
, addr
, len
);
168 static void xsk_copy_xdp(struct xdp_buff
*to
, struct xdp_buff
*from
, u32 len
)
170 void *from_buf
, *to_buf
;
173 if (unlikely(xdp_data_meta_unsupported(from
))) {
174 from_buf
= from
->data
;
178 from_buf
= from
->data_meta
;
179 metalen
= from
->data
- from
->data_meta
;
180 to_buf
= to
->data
- metalen
;
183 memcpy(to_buf
, from_buf
, len
+ metalen
);
186 static int __xsk_rcv(struct xdp_sock
*xs
, struct xdp_buff
*xdp
, u32 len
,
189 struct xdp_buff
*xsk_xdp
;
192 if (len
> xsk_pool_get_rx_frame_size(xs
->pool
)) {
197 xsk_xdp
= xsk_buff_alloc(xs
->pool
);
203 xsk_copy_xdp(xsk_xdp
, xdp
, len
);
204 err
= __xsk_rcv_zc(xs
, xsk_xdp
, len
);
206 xsk_buff_free(xsk_xdp
);
210 xdp_return_buff(xdp
);
214 static bool xsk_is_bound(struct xdp_sock
*xs
)
216 if (READ_ONCE(xs
->state
) == XSK_BOUND
) {
217 /* Matches smp_wmb() in bind(). */
224 static int xsk_rcv(struct xdp_sock
*xs
, struct xdp_buff
*xdp
,
229 if (!xsk_is_bound(xs
))
232 if (xs
->dev
!= xdp
->rxq
->dev
|| xs
->queue_id
!= xdp
->rxq
->queue_index
)
235 len
= xdp
->data_end
- xdp
->data
;
237 return xdp
->rxq
->mem
.type
== MEM_TYPE_XSK_BUFF_POOL
?
238 __xsk_rcv_zc(xs
, xdp
, len
) :
239 __xsk_rcv(xs
, xdp
, len
, explicit_free
);
242 static void xsk_flush(struct xdp_sock
*xs
)
244 xskq_prod_submit(xs
->rx
);
245 __xskq_cons_release(xs
->pool
->fq
);
246 sock_def_readable(&xs
->sk
);
249 int xsk_generic_rcv(struct xdp_sock
*xs
, struct xdp_buff
*xdp
)
253 spin_lock_bh(&xs
->rx_lock
);
254 err
= xsk_rcv(xs
, xdp
, false);
256 spin_unlock_bh(&xs
->rx_lock
);
260 int __xsk_map_redirect(struct xdp_sock
*xs
, struct xdp_buff
*xdp
)
262 struct list_head
*flush_list
= this_cpu_ptr(&xskmap_flush_list
);
265 err
= xsk_rcv(xs
, xdp
, true);
269 if (!xs
->flush_node
.prev
)
270 list_add(&xs
->flush_node
, flush_list
);
275 void __xsk_map_flush(void)
277 struct list_head
*flush_list
= this_cpu_ptr(&xskmap_flush_list
);
278 struct xdp_sock
*xs
, *tmp
;
280 list_for_each_entry_safe(xs
, tmp
, flush_list
, flush_node
) {
282 __list_del_clearprev(&xs
->flush_node
);
286 void xsk_tx_completed(struct xsk_buff_pool
*pool
, u32 nb_entries
)
288 xskq_prod_submit_n(pool
->cq
, nb_entries
);
290 EXPORT_SYMBOL(xsk_tx_completed
);
292 void xsk_tx_release(struct xsk_buff_pool
*pool
)
297 list_for_each_entry_rcu(xs
, &pool
->xsk_tx_list
, tx_list
) {
298 __xskq_cons_release(xs
->tx
);
299 xs
->sk
.sk_write_space(&xs
->sk
);
303 EXPORT_SYMBOL(xsk_tx_release
);
305 bool xsk_tx_peek_desc(struct xsk_buff_pool
*pool
, struct xdp_desc
*desc
)
310 list_for_each_entry_rcu(xs
, &pool
->xsk_tx_list
, tx_list
) {
311 if (!xskq_cons_peek_desc(xs
->tx
, desc
, pool
)) {
312 xs
->tx
->queue_empty_descs
++;
316 /* This is the backpressure mechanism for the Tx path.
317 * Reserve space in the completion queue and only proceed
318 * if there is space in it. This avoids having to implement
319 * any buffering in the Tx path.
321 if (xskq_prod_reserve_addr(pool
->cq
, desc
->addr
))
324 xskq_cons_release(xs
->tx
);
333 EXPORT_SYMBOL(xsk_tx_peek_desc
);
335 static int xsk_wakeup(struct xdp_sock
*xs
, u8 flags
)
337 struct net_device
*dev
= xs
->dev
;
341 err
= dev
->netdev_ops
->ndo_xsk_wakeup(dev
, xs
->queue_id
, flags
);
347 static int xsk_zc_xmit(struct xdp_sock
*xs
)
349 return xsk_wakeup(xs
, XDP_WAKEUP_TX
);
352 static void xsk_destruct_skb(struct sk_buff
*skb
)
354 u64 addr
= (u64
)(long)skb_shinfo(skb
)->destructor_arg
;
355 struct xdp_sock
*xs
= xdp_sk(skb
->sk
);
358 spin_lock_irqsave(&xs
->tx_completion_lock
, flags
);
359 xskq_prod_submit_addr(xs
->pool
->cq
, addr
);
360 spin_unlock_irqrestore(&xs
->tx_completion_lock
, flags
);
365 static int xsk_generic_xmit(struct sock
*sk
)
367 struct xdp_sock
*xs
= xdp_sk(sk
);
368 u32 max_batch
= TX_BATCH_SIZE
;
369 bool sent_frame
= false;
370 struct xdp_desc desc
;
374 mutex_lock(&xs
->mutex
);
376 if (xs
->queue_id
>= xs
->dev
->real_num_tx_queues
)
379 while (xskq_cons_peek_desc(xs
->tx
, &desc
, xs
->pool
)) {
384 if (max_batch
-- == 0) {
390 skb
= sock_alloc_send_skb(sk
, len
, 1, &err
);
396 buffer
= xsk_buff_raw_get_data(xs
->pool
, addr
);
397 err
= skb_store_bits(skb
, 0, buffer
, len
);
398 /* This is the backpressure mechanism for the Tx path.
399 * Reserve space in the completion queue and only proceed
400 * if there is space in it. This avoids having to implement
401 * any buffering in the Tx path.
403 if (unlikely(err
) || xskq_prod_reserve(xs
->pool
->cq
)) {
409 skb
->priority
= sk
->sk_priority
;
410 skb
->mark
= sk
->sk_mark
;
411 skb_shinfo(skb
)->destructor_arg
= (void *)(long)desc
.addr
;
412 skb
->destructor
= xsk_destruct_skb
;
414 err
= dev_direct_xmit(skb
, xs
->queue_id
);
415 xskq_cons_release(xs
->tx
);
416 /* Ignore NET_XMIT_CN as packet might have been sent */
417 if (err
== NET_XMIT_DROP
|| err
== NETDEV_TX_BUSY
) {
418 /* SKB completed but not sent */
426 xs
->tx
->queue_empty_descs
++;
430 sk
->sk_write_space(sk
);
432 mutex_unlock(&xs
->mutex
);
436 static int __xsk_sendmsg(struct sock
*sk
)
438 struct xdp_sock
*xs
= xdp_sk(sk
);
440 if (unlikely(!(xs
->dev
->flags
& IFF_UP
)))
442 if (unlikely(!xs
->tx
))
445 return xs
->zc
? xsk_zc_xmit(xs
) : xsk_generic_xmit(sk
);
448 static int xsk_sendmsg(struct socket
*sock
, struct msghdr
*m
, size_t total_len
)
450 bool need_wait
= !(m
->msg_flags
& MSG_DONTWAIT
);
451 struct sock
*sk
= sock
->sk
;
452 struct xdp_sock
*xs
= xdp_sk(sk
);
454 if (unlikely(!xsk_is_bound(xs
)))
456 if (unlikely(need_wait
))
459 return __xsk_sendmsg(sk
);
462 static __poll_t
xsk_poll(struct file
*file
, struct socket
*sock
,
463 struct poll_table_struct
*wait
)
465 __poll_t mask
= datagram_poll(file
, sock
, wait
);
466 struct sock
*sk
= sock
->sk
;
467 struct xdp_sock
*xs
= xdp_sk(sk
);
468 struct xsk_buff_pool
*pool
;
470 if (unlikely(!xsk_is_bound(xs
)))
475 if (pool
->cached_need_wakeup
) {
477 xsk_wakeup(xs
, pool
->cached_need_wakeup
);
479 /* Poll needs to drive Tx also in copy mode */
483 if (xs
->rx
&& !xskq_prod_is_empty(xs
->rx
))
484 mask
|= EPOLLIN
| EPOLLRDNORM
;
485 if (xs
->tx
&& !xskq_cons_is_full(xs
->tx
))
486 mask
|= EPOLLOUT
| EPOLLWRNORM
;
491 static int xsk_init_queue(u32 entries
, struct xsk_queue
**queue
,
496 if (entries
== 0 || *queue
|| !is_power_of_2(entries
))
499 q
= xskq_create(entries
, umem_queue
);
503 /* Make sure queue is ready before it can be seen by others */
505 WRITE_ONCE(*queue
, q
);
509 static void xsk_unbind_dev(struct xdp_sock
*xs
)
511 struct net_device
*dev
= xs
->dev
;
513 if (xs
->state
!= XSK_BOUND
)
515 WRITE_ONCE(xs
->state
, XSK_UNBOUND
);
517 /* Wait for driver to stop using the xdp socket. */
518 xp_del_xsk(xs
->pool
, xs
);
524 static struct xsk_map
*xsk_get_map_list_entry(struct xdp_sock
*xs
,
525 struct xdp_sock
***map_entry
)
527 struct xsk_map
*map
= NULL
;
528 struct xsk_map_node
*node
;
532 spin_lock_bh(&xs
->map_list_lock
);
533 node
= list_first_entry_or_null(&xs
->map_list
, struct xsk_map_node
,
536 WARN_ON(xsk_map_inc(node
->map
));
538 *map_entry
= node
->map_entry
;
540 spin_unlock_bh(&xs
->map_list_lock
);
544 static void xsk_delete_from_maps(struct xdp_sock
*xs
)
546 /* This function removes the current XDP socket from all the
547 * maps it resides in. We need to take extra care here, due to
548 * the two locks involved. Each map has a lock synchronizing
549 * updates to the entries, and each socket has a lock that
550 * synchronizes access to the list of maps (map_list). For
551 * deadlock avoidance the locks need to be taken in the order
552 * "map lock"->"socket map list lock". We start off by
553 * accessing the socket map list, and take a reference to the
554 * map to guarantee existence between the
555 * xsk_get_map_list_entry() and xsk_map_try_sock_delete()
556 * calls. Then we ask the map to remove the socket, which
557 * tries to remove the socket from the map. Note that there
558 * might be updates to the map between
559 * xsk_get_map_list_entry() and xsk_map_try_sock_delete().
561 struct xdp_sock
**map_entry
= NULL
;
564 while ((map
= xsk_get_map_list_entry(xs
, &map_entry
))) {
565 xsk_map_try_sock_delete(map
, xs
, map_entry
);
570 static int xsk_release(struct socket
*sock
)
572 struct sock
*sk
= sock
->sk
;
573 struct xdp_sock
*xs
= xdp_sk(sk
);
581 mutex_lock(&net
->xdp
.lock
);
582 sk_del_node_init_rcu(sk
);
583 mutex_unlock(&net
->xdp
.lock
);
586 sock_prot_inuse_add(net
, sk
->sk_prot
, -1);
589 xsk_delete_from_maps(xs
);
590 mutex_lock(&xs
->mutex
);
592 mutex_unlock(&xs
->mutex
);
594 xskq_destroy(xs
->rx
);
595 xskq_destroy(xs
->tx
);
596 xskq_destroy(xs
->fq_tmp
);
597 xskq_destroy(xs
->cq_tmp
);
602 sk_refcnt_debug_release(sk
);
608 static struct socket
*xsk_lookup_xsk_from_fd(int fd
)
613 sock
= sockfd_lookup(fd
, &err
);
615 return ERR_PTR(-ENOTSOCK
);
617 if (sock
->sk
->sk_family
!= PF_XDP
) {
619 return ERR_PTR(-ENOPROTOOPT
);
625 static bool xsk_validate_queues(struct xdp_sock
*xs
)
627 return xs
->fq_tmp
&& xs
->cq_tmp
;
630 static int xsk_bind(struct socket
*sock
, struct sockaddr
*addr
, int addr_len
)
632 struct sockaddr_xdp
*sxdp
= (struct sockaddr_xdp
*)addr
;
633 struct sock
*sk
= sock
->sk
;
634 struct xdp_sock
*xs
= xdp_sk(sk
);
635 struct net_device
*dev
;
639 if (addr_len
< sizeof(struct sockaddr_xdp
))
641 if (sxdp
->sxdp_family
!= AF_XDP
)
644 flags
= sxdp
->sxdp_flags
;
645 if (flags
& ~(XDP_SHARED_UMEM
| XDP_COPY
| XDP_ZEROCOPY
|
646 XDP_USE_NEED_WAKEUP
))
650 mutex_lock(&xs
->mutex
);
651 if (xs
->state
!= XSK_READY
) {
656 dev
= dev_get_by_index(sock_net(sk
), sxdp
->sxdp_ifindex
);
662 if (!xs
->rx
&& !xs
->tx
) {
667 qid
= sxdp
->sxdp_queue_id
;
669 if (flags
& XDP_SHARED_UMEM
) {
670 struct xdp_sock
*umem_xs
;
673 if ((flags
& XDP_COPY
) || (flags
& XDP_ZEROCOPY
) ||
674 (flags
& XDP_USE_NEED_WAKEUP
)) {
675 /* Cannot specify flags for shared sockets. */
681 /* We have already our own. */
686 sock
= xsk_lookup_xsk_from_fd(sxdp
->sxdp_shared_umem_fd
);
692 umem_xs
= xdp_sk(sock
->sk
);
693 if (!xsk_is_bound(umem_xs
)) {
699 if (umem_xs
->queue_id
!= qid
|| umem_xs
->dev
!= dev
) {
700 /* Share the umem with another socket on another qid
703 xs
->pool
= xp_create_and_assign_umem(xs
,
710 err
= xp_assign_dev_shared(xs
->pool
, umem_xs
->umem
,
713 xp_destroy(xs
->pool
);
719 /* Share the buffer pool with the other socket. */
720 if (xs
->fq_tmp
|| xs
->cq_tmp
) {
721 /* Do not allow setting your own fq or cq. */
727 xp_get_pool(umem_xs
->pool
);
728 xs
->pool
= umem_xs
->pool
;
731 xdp_get_umem(umem_xs
->umem
);
732 WRITE_ONCE(xs
->umem
, umem_xs
->umem
);
734 } else if (!xs
->umem
|| !xsk_validate_queues(xs
)) {
738 /* This xsk has its own umem. */
739 xs
->pool
= xp_create_and_assign_umem(xs
, xs
->umem
);
745 err
= xp_assign_dev(xs
->pool
, dev
, qid
, flags
);
747 xp_destroy(xs
->pool
);
754 xs
->zc
= xs
->umem
->zc
;
756 xp_add_xsk(xs
->pool
, xs
);
762 /* Matches smp_rmb() in bind() for shared umem
763 * sockets, and xsk_is_bound().
766 WRITE_ONCE(xs
->state
, XSK_BOUND
);
769 mutex_unlock(&xs
->mutex
);
774 struct xdp_umem_reg_v1
{
775 __u64 addr
; /* Start of packet data area */
776 __u64 len
; /* Length of packet data area */
781 static int xsk_setsockopt(struct socket
*sock
, int level
, int optname
,
782 sockptr_t optval
, unsigned int optlen
)
784 struct sock
*sk
= sock
->sk
;
785 struct xdp_sock
*xs
= xdp_sk(sk
);
788 if (level
!= SOL_XDP
)
795 struct xsk_queue
**q
;
798 if (optlen
< sizeof(entries
))
800 if (copy_from_sockptr(&entries
, optval
, sizeof(entries
)))
803 mutex_lock(&xs
->mutex
);
804 if (xs
->state
!= XSK_READY
) {
805 mutex_unlock(&xs
->mutex
);
808 q
= (optname
== XDP_TX_RING
) ? &xs
->tx
: &xs
->rx
;
809 err
= xsk_init_queue(entries
, q
, false);
810 if (!err
&& optname
== XDP_TX_RING
)
811 /* Tx needs to be explicitly woken up the first time */
812 xs
->tx
->ring
->flags
|= XDP_RING_NEED_WAKEUP
;
813 mutex_unlock(&xs
->mutex
);
818 size_t mr_size
= sizeof(struct xdp_umem_reg
);
819 struct xdp_umem_reg mr
= {};
820 struct xdp_umem
*umem
;
822 if (optlen
< sizeof(struct xdp_umem_reg_v1
))
824 else if (optlen
< sizeof(mr
))
825 mr_size
= sizeof(struct xdp_umem_reg_v1
);
827 if (copy_from_sockptr(&mr
, optval
, mr_size
))
830 mutex_lock(&xs
->mutex
);
831 if (xs
->state
!= XSK_READY
|| xs
->umem
) {
832 mutex_unlock(&xs
->mutex
);
836 umem
= xdp_umem_create(&mr
);
838 mutex_unlock(&xs
->mutex
);
839 return PTR_ERR(umem
);
842 /* Make sure umem is ready before it can be seen by others */
844 WRITE_ONCE(xs
->umem
, umem
);
845 mutex_unlock(&xs
->mutex
);
848 case XDP_UMEM_FILL_RING
:
849 case XDP_UMEM_COMPLETION_RING
:
851 struct xsk_queue
**q
;
854 if (copy_from_sockptr(&entries
, optval
, sizeof(entries
)))
857 mutex_lock(&xs
->mutex
);
858 if (xs
->state
!= XSK_READY
) {
859 mutex_unlock(&xs
->mutex
);
863 q
= (optname
== XDP_UMEM_FILL_RING
) ? &xs
->fq_tmp
:
865 err
= xsk_init_queue(entries
, q
, true);
866 mutex_unlock(&xs
->mutex
);
876 static void xsk_enter_rxtx_offsets(struct xdp_ring_offset_v1
*ring
)
878 ring
->producer
= offsetof(struct xdp_rxtx_ring
, ptrs
.producer
);
879 ring
->consumer
= offsetof(struct xdp_rxtx_ring
, ptrs
.consumer
);
880 ring
->desc
= offsetof(struct xdp_rxtx_ring
, desc
);
883 static void xsk_enter_umem_offsets(struct xdp_ring_offset_v1
*ring
)
885 ring
->producer
= offsetof(struct xdp_umem_ring
, ptrs
.producer
);
886 ring
->consumer
= offsetof(struct xdp_umem_ring
, ptrs
.consumer
);
887 ring
->desc
= offsetof(struct xdp_umem_ring
, desc
);
890 struct xdp_statistics_v1
{
892 __u64 rx_invalid_descs
;
893 __u64 tx_invalid_descs
;
896 static int xsk_getsockopt(struct socket
*sock
, int level
, int optname
,
897 char __user
*optval
, int __user
*optlen
)
899 struct sock
*sk
= sock
->sk
;
900 struct xdp_sock
*xs
= xdp_sk(sk
);
903 if (level
!= SOL_XDP
)
906 if (get_user(len
, optlen
))
914 struct xdp_statistics stats
= {};
915 bool extra_stats
= true;
918 if (len
< sizeof(struct xdp_statistics_v1
)) {
920 } else if (len
< sizeof(stats
)) {
922 stats_size
= sizeof(struct xdp_statistics_v1
);
924 stats_size
= sizeof(stats
);
927 mutex_lock(&xs
->mutex
);
928 stats
.rx_dropped
= xs
->rx_dropped
;
930 stats
.rx_ring_full
= xs
->rx_queue_full
;
931 stats
.rx_fill_ring_empty_descs
=
932 xs
->pool
? xskq_nb_queue_empty_descs(xs
->pool
->fq
) : 0;
933 stats
.tx_ring_empty_descs
= xskq_nb_queue_empty_descs(xs
->tx
);
935 stats
.rx_dropped
+= xs
->rx_queue_full
;
937 stats
.rx_invalid_descs
= xskq_nb_invalid_descs(xs
->rx
);
938 stats
.tx_invalid_descs
= xskq_nb_invalid_descs(xs
->tx
);
939 mutex_unlock(&xs
->mutex
);
941 if (copy_to_user(optval
, &stats
, stats_size
))
943 if (put_user(stats_size
, optlen
))
948 case XDP_MMAP_OFFSETS
:
950 struct xdp_mmap_offsets off
;
951 struct xdp_mmap_offsets_v1 off_v1
;
952 bool flags_supported
= true;
955 if (len
< sizeof(off_v1
))
957 else if (len
< sizeof(off
))
958 flags_supported
= false;
960 if (flags_supported
) {
961 /* xdp_ring_offset is identical to xdp_ring_offset_v1
962 * except for the flags field added to the end.
964 xsk_enter_rxtx_offsets((struct xdp_ring_offset_v1
*)
966 xsk_enter_rxtx_offsets((struct xdp_ring_offset_v1
*)
968 xsk_enter_umem_offsets((struct xdp_ring_offset_v1
*)
970 xsk_enter_umem_offsets((struct xdp_ring_offset_v1
*)
972 off
.rx
.flags
= offsetof(struct xdp_rxtx_ring
,
974 off
.tx
.flags
= offsetof(struct xdp_rxtx_ring
,
976 off
.fr
.flags
= offsetof(struct xdp_umem_ring
,
978 off
.cr
.flags
= offsetof(struct xdp_umem_ring
,
984 xsk_enter_rxtx_offsets(&off_v1
.rx
);
985 xsk_enter_rxtx_offsets(&off_v1
.tx
);
986 xsk_enter_umem_offsets(&off_v1
.fr
);
987 xsk_enter_umem_offsets(&off_v1
.cr
);
989 len
= sizeof(off_v1
);
993 if (copy_to_user(optval
, to_copy
, len
))
995 if (put_user(len
, optlen
))
1002 struct xdp_options opts
= {};
1004 if (len
< sizeof(opts
))
1007 mutex_lock(&xs
->mutex
);
1009 opts
.flags
|= XDP_OPTIONS_ZEROCOPY
;
1010 mutex_unlock(&xs
->mutex
);
1013 if (copy_to_user(optval
, &opts
, len
))
1015 if (put_user(len
, optlen
))
1027 static int xsk_mmap(struct file
*file
, struct socket
*sock
,
1028 struct vm_area_struct
*vma
)
1030 loff_t offset
= (loff_t
)vma
->vm_pgoff
<< PAGE_SHIFT
;
1031 unsigned long size
= vma
->vm_end
- vma
->vm_start
;
1032 struct xdp_sock
*xs
= xdp_sk(sock
->sk
);
1033 struct xsk_queue
*q
= NULL
;
1037 if (READ_ONCE(xs
->state
) != XSK_READY
)
1040 if (offset
== XDP_PGOFF_RX_RING
) {
1041 q
= READ_ONCE(xs
->rx
);
1042 } else if (offset
== XDP_PGOFF_TX_RING
) {
1043 q
= READ_ONCE(xs
->tx
);
1045 /* Matches the smp_wmb() in XDP_UMEM_REG */
1047 if (offset
== XDP_UMEM_PGOFF_FILL_RING
)
1048 q
= READ_ONCE(xs
->fq_tmp
);
1049 else if (offset
== XDP_UMEM_PGOFF_COMPLETION_RING
)
1050 q
= READ_ONCE(xs
->cq_tmp
);
1056 /* Matches the smp_wmb() in xsk_init_queue */
1058 qpg
= virt_to_head_page(q
->ring
);
1059 if (size
> page_size(qpg
))
1062 pfn
= virt_to_phys(q
->ring
) >> PAGE_SHIFT
;
1063 return remap_pfn_range(vma
, vma
->vm_start
, pfn
,
1064 size
, vma
->vm_page_prot
);
1067 static int xsk_notifier(struct notifier_block
*this,
1068 unsigned long msg
, void *ptr
)
1070 struct net_device
*dev
= netdev_notifier_info_to_dev(ptr
);
1071 struct net
*net
= dev_net(dev
);
1075 case NETDEV_UNREGISTER
:
1076 mutex_lock(&net
->xdp
.lock
);
1077 sk_for_each(sk
, &net
->xdp
.list
) {
1078 struct xdp_sock
*xs
= xdp_sk(sk
);
1080 mutex_lock(&xs
->mutex
);
1081 if (xs
->dev
== dev
) {
1082 sk
->sk_err
= ENETDOWN
;
1083 if (!sock_flag(sk
, SOCK_DEAD
))
1084 sk
->sk_error_report(sk
);
1088 /* Clear device references. */
1089 xp_clear_dev(xs
->pool
);
1091 mutex_unlock(&xs
->mutex
);
1093 mutex_unlock(&net
->xdp
.lock
);
1099 static struct proto xsk_proto
= {
1101 .owner
= THIS_MODULE
,
1102 .obj_size
= sizeof(struct xdp_sock
),
1105 static const struct proto_ops xsk_proto_ops
= {
1107 .owner
= THIS_MODULE
,
1108 .release
= xsk_release
,
1110 .connect
= sock_no_connect
,
1111 .socketpair
= sock_no_socketpair
,
1112 .accept
= sock_no_accept
,
1113 .getname
= sock_no_getname
,
1115 .ioctl
= sock_no_ioctl
,
1116 .listen
= sock_no_listen
,
1117 .shutdown
= sock_no_shutdown
,
1118 .setsockopt
= xsk_setsockopt
,
1119 .getsockopt
= xsk_getsockopt
,
1120 .sendmsg
= xsk_sendmsg
,
1121 .recvmsg
= sock_no_recvmsg
,
1123 .sendpage
= sock_no_sendpage
,
1126 static void xsk_destruct(struct sock
*sk
)
1128 struct xdp_sock
*xs
= xdp_sk(sk
);
1130 if (!sock_flag(sk
, SOCK_DEAD
))
1133 xp_put_pool(xs
->pool
);
1135 sk_refcnt_debug_dec(sk
);
1138 static int xsk_create(struct net
*net
, struct socket
*sock
, int protocol
,
1141 struct xdp_sock
*xs
;
1144 if (!ns_capable(net
->user_ns
, CAP_NET_RAW
))
1146 if (sock
->type
!= SOCK_RAW
)
1147 return -ESOCKTNOSUPPORT
;
1150 return -EPROTONOSUPPORT
;
1152 sock
->state
= SS_UNCONNECTED
;
1154 sk
= sk_alloc(net
, PF_XDP
, GFP_KERNEL
, &xsk_proto
, kern
);
1158 sock
->ops
= &xsk_proto_ops
;
1160 sock_init_data(sock
, sk
);
1162 sk
->sk_family
= PF_XDP
;
1164 sk
->sk_destruct
= xsk_destruct
;
1165 sk_refcnt_debug_inc(sk
);
1167 sock_set_flag(sk
, SOCK_RCU_FREE
);
1170 xs
->state
= XSK_READY
;
1171 mutex_init(&xs
->mutex
);
1172 spin_lock_init(&xs
->rx_lock
);
1173 spin_lock_init(&xs
->tx_completion_lock
);
1175 INIT_LIST_HEAD(&xs
->map_list
);
1176 spin_lock_init(&xs
->map_list_lock
);
1178 mutex_lock(&net
->xdp
.lock
);
1179 sk_add_node_rcu(sk
, &net
->xdp
.list
);
1180 mutex_unlock(&net
->xdp
.lock
);
1183 sock_prot_inuse_add(net
, &xsk_proto
, 1);
1189 static const struct net_proto_family xsk_family_ops
= {
1191 .create
= xsk_create
,
1192 .owner
= THIS_MODULE
,
1195 static struct notifier_block xsk_netdev_notifier
= {
1196 .notifier_call
= xsk_notifier
,
1199 static int __net_init
xsk_net_init(struct net
*net
)
1201 mutex_init(&net
->xdp
.lock
);
1202 INIT_HLIST_HEAD(&net
->xdp
.list
);
1206 static void __net_exit
xsk_net_exit(struct net
*net
)
1208 WARN_ON_ONCE(!hlist_empty(&net
->xdp
.list
));
1211 static struct pernet_operations xsk_net_ops
= {
1212 .init
= xsk_net_init
,
1213 .exit
= xsk_net_exit
,
1216 static int __init
xsk_init(void)
1220 err
= proto_register(&xsk_proto
, 0 /* no slab */);
1224 err
= sock_register(&xsk_family_ops
);
1228 err
= register_pernet_subsys(&xsk_net_ops
);
1232 err
= register_netdevice_notifier(&xsk_netdev_notifier
);
1236 for_each_possible_cpu(cpu
)
1237 INIT_LIST_HEAD(&per_cpu(xskmap_flush_list
, cpu
));
1241 unregister_pernet_subsys(&xsk_net_ops
);
1243 sock_unregister(PF_XDP
);
1245 proto_unregister(&xsk_proto
);
1250 fs_initcall(xsk_init
);