1 // SPDX-License-Identifier: GPL-2.0
4 * AF_XDP sockets allows a channel between XDP programs and userspace
6 * Copyright(c) 2018 Intel Corporation.
8 * Author(s): Björn Töpel <bjorn.topel@intel.com>
9 * Magnus Karlsson <magnus.karlsson@intel.com>
12 #define pr_fmt(fmt) "AF_XDP: %s: " fmt, __func__
14 #include <linux/if_xdp.h>
15 #include <linux/init.h>
16 #include <linux/sched/mm.h>
17 #include <linux/sched/signal.h>
18 #include <linux/sched/task.h>
19 #include <linux/socket.h>
20 #include <linux/file.h>
21 #include <linux/uaccess.h>
22 #include <linux/net.h>
23 #include <linux/netdevice.h>
24 #include <linux/rculist.h>
25 #include <net/xdp_sock_drv.h>
26 #include <net/busy_poll.h>
29 #include "xsk_queue.h"
33 #define TX_BATCH_SIZE 32
35 static DEFINE_PER_CPU(struct list_head
, xskmap_flush_list
);
37 void xsk_set_rx_need_wakeup(struct xsk_buff_pool
*pool
)
39 if (pool
->cached_need_wakeup
& XDP_WAKEUP_RX
)
42 pool
->fq
->ring
->flags
|= XDP_RING_NEED_WAKEUP
;
43 pool
->cached_need_wakeup
|= XDP_WAKEUP_RX
;
45 EXPORT_SYMBOL(xsk_set_rx_need_wakeup
);
47 void xsk_set_tx_need_wakeup(struct xsk_buff_pool
*pool
)
51 if (pool
->cached_need_wakeup
& XDP_WAKEUP_TX
)
55 list_for_each_entry_rcu(xs
, &pool
->xsk_tx_list
, tx_list
) {
56 xs
->tx
->ring
->flags
|= XDP_RING_NEED_WAKEUP
;
60 pool
->cached_need_wakeup
|= XDP_WAKEUP_TX
;
62 EXPORT_SYMBOL(xsk_set_tx_need_wakeup
);
64 void xsk_clear_rx_need_wakeup(struct xsk_buff_pool
*pool
)
66 if (!(pool
->cached_need_wakeup
& XDP_WAKEUP_RX
))
69 pool
->fq
->ring
->flags
&= ~XDP_RING_NEED_WAKEUP
;
70 pool
->cached_need_wakeup
&= ~XDP_WAKEUP_RX
;
72 EXPORT_SYMBOL(xsk_clear_rx_need_wakeup
);
74 void xsk_clear_tx_need_wakeup(struct xsk_buff_pool
*pool
)
78 if (!(pool
->cached_need_wakeup
& XDP_WAKEUP_TX
))
82 list_for_each_entry_rcu(xs
, &pool
->xsk_tx_list
, tx_list
) {
83 xs
->tx
->ring
->flags
&= ~XDP_RING_NEED_WAKEUP
;
87 pool
->cached_need_wakeup
&= ~XDP_WAKEUP_TX
;
89 EXPORT_SYMBOL(xsk_clear_tx_need_wakeup
);
91 bool xsk_uses_need_wakeup(struct xsk_buff_pool
*pool
)
93 return pool
->uses_need_wakeup
;
95 EXPORT_SYMBOL(xsk_uses_need_wakeup
);
97 struct xsk_buff_pool
*xsk_get_pool_from_qid(struct net_device
*dev
,
100 if (queue_id
< dev
->real_num_rx_queues
)
101 return dev
->_rx
[queue_id
].pool
;
102 if (queue_id
< dev
->real_num_tx_queues
)
103 return dev
->_tx
[queue_id
].pool
;
107 EXPORT_SYMBOL(xsk_get_pool_from_qid
);
109 void xsk_clear_pool_at_qid(struct net_device
*dev
, u16 queue_id
)
111 if (queue_id
< dev
->num_rx_queues
)
112 dev
->_rx
[queue_id
].pool
= NULL
;
113 if (queue_id
< dev
->num_tx_queues
)
114 dev
->_tx
[queue_id
].pool
= NULL
;
117 /* The buffer pool is stored both in the _rx struct and the _tx struct as we do
118 * not know if the device has more tx queues than rx, or the opposite.
119 * This might also change during run time.
121 int xsk_reg_pool_at_qid(struct net_device
*dev
, struct xsk_buff_pool
*pool
,
124 if (queue_id
>= max_t(unsigned int,
125 dev
->real_num_rx_queues
,
126 dev
->real_num_tx_queues
))
129 if (queue_id
< dev
->real_num_rx_queues
)
130 dev
->_rx
[queue_id
].pool
= pool
;
131 if (queue_id
< dev
->real_num_tx_queues
)
132 dev
->_tx
[queue_id
].pool
= pool
;
137 void xp_release(struct xdp_buff_xsk
*xskb
)
139 xskb
->pool
->free_heads
[xskb
->pool
->free_heads_cnt
++] = xskb
;
142 static u64
xp_get_handle(struct xdp_buff_xsk
*xskb
)
144 u64 offset
= xskb
->xdp
.data
- xskb
->xdp
.data_hard_start
;
146 offset
+= xskb
->pool
->headroom
;
147 if (!xskb
->pool
->unaligned
)
148 return xskb
->orig_addr
+ offset
;
149 return xskb
->orig_addr
+ (offset
<< XSK_UNALIGNED_BUF_OFFSET_SHIFT
);
152 static int __xsk_rcv_zc(struct xdp_sock
*xs
, struct xdp_buff
*xdp
, u32 len
)
154 struct xdp_buff_xsk
*xskb
= container_of(xdp
, struct xdp_buff_xsk
, xdp
);
158 addr
= xp_get_handle(xskb
);
159 err
= xskq_prod_reserve_desc(xs
->rx
, addr
, len
);
169 static void xsk_copy_xdp(struct xdp_buff
*to
, struct xdp_buff
*from
, u32 len
)
171 void *from_buf
, *to_buf
;
174 if (unlikely(xdp_data_meta_unsupported(from
))) {
175 from_buf
= from
->data
;
179 from_buf
= from
->data_meta
;
180 metalen
= from
->data
- from
->data_meta
;
181 to_buf
= to
->data
- metalen
;
184 memcpy(to_buf
, from_buf
, len
+ metalen
);
187 static int __xsk_rcv(struct xdp_sock
*xs
, struct xdp_buff
*xdp
)
189 struct xdp_buff
*xsk_xdp
;
193 len
= xdp
->data_end
- xdp
->data
;
194 if (len
> xsk_pool_get_rx_frame_size(xs
->pool
)) {
199 xsk_xdp
= xsk_buff_alloc(xs
->pool
);
205 xsk_copy_xdp(xsk_xdp
, xdp
, len
);
206 err
= __xsk_rcv_zc(xs
, xsk_xdp
, len
);
208 xsk_buff_free(xsk_xdp
);
214 static bool xsk_tx_writeable(struct xdp_sock
*xs
)
216 if (xskq_cons_present_entries(xs
->tx
) > xs
->tx
->nentries
/ 2)
222 static bool xsk_is_bound(struct xdp_sock
*xs
)
224 if (READ_ONCE(xs
->state
) == XSK_BOUND
) {
225 /* Matches smp_wmb() in bind(). */
232 static int xsk_rcv_check(struct xdp_sock
*xs
, struct xdp_buff
*xdp
)
234 if (!xsk_is_bound(xs
))
237 if (xs
->dev
!= xdp
->rxq
->dev
|| xs
->queue_id
!= xdp
->rxq
->queue_index
)
240 sk_mark_napi_id_once_xdp(&xs
->sk
, xdp
);
244 static void xsk_flush(struct xdp_sock
*xs
)
246 xskq_prod_submit(xs
->rx
);
247 __xskq_cons_release(xs
->pool
->fq
);
248 sock_def_readable(&xs
->sk
);
251 int xsk_generic_rcv(struct xdp_sock
*xs
, struct xdp_buff
*xdp
)
255 spin_lock_bh(&xs
->rx_lock
);
256 err
= xsk_rcv_check(xs
, xdp
);
258 err
= __xsk_rcv(xs
, xdp
);
261 spin_unlock_bh(&xs
->rx_lock
);
265 static int xsk_rcv(struct xdp_sock
*xs
, struct xdp_buff
*xdp
)
270 err
= xsk_rcv_check(xs
, xdp
);
274 if (xdp
->rxq
->mem
.type
== MEM_TYPE_XSK_BUFF_POOL
) {
275 len
= xdp
->data_end
- xdp
->data
;
276 return __xsk_rcv_zc(xs
, xdp
, len
);
279 err
= __xsk_rcv(xs
, xdp
);
281 xdp_return_buff(xdp
);
285 int __xsk_map_redirect(struct xdp_sock
*xs
, struct xdp_buff
*xdp
)
287 struct list_head
*flush_list
= this_cpu_ptr(&xskmap_flush_list
);
290 err
= xsk_rcv(xs
, xdp
);
294 if (!xs
->flush_node
.prev
)
295 list_add(&xs
->flush_node
, flush_list
);
300 void __xsk_map_flush(void)
302 struct list_head
*flush_list
= this_cpu_ptr(&xskmap_flush_list
);
303 struct xdp_sock
*xs
, *tmp
;
305 list_for_each_entry_safe(xs
, tmp
, flush_list
, flush_node
) {
307 __list_del_clearprev(&xs
->flush_node
);
311 void xsk_tx_completed(struct xsk_buff_pool
*pool
, u32 nb_entries
)
313 xskq_prod_submit_n(pool
->cq
, nb_entries
);
315 EXPORT_SYMBOL(xsk_tx_completed
);
317 void xsk_tx_release(struct xsk_buff_pool
*pool
)
322 list_for_each_entry_rcu(xs
, &pool
->xsk_tx_list
, tx_list
) {
323 __xskq_cons_release(xs
->tx
);
324 if (xsk_tx_writeable(xs
))
325 xs
->sk
.sk_write_space(&xs
->sk
);
329 EXPORT_SYMBOL(xsk_tx_release
);
331 bool xsk_tx_peek_desc(struct xsk_buff_pool
*pool
, struct xdp_desc
*desc
)
336 list_for_each_entry_rcu(xs
, &pool
->xsk_tx_list
, tx_list
) {
337 if (!xskq_cons_peek_desc(xs
->tx
, desc
, pool
)) {
338 xs
->tx
->queue_empty_descs
++;
342 /* This is the backpressure mechanism for the Tx path.
343 * Reserve space in the completion queue and only proceed
344 * if there is space in it. This avoids having to implement
345 * any buffering in the Tx path.
347 if (xskq_prod_reserve_addr(pool
->cq
, desc
->addr
))
350 xskq_cons_release(xs
->tx
);
359 EXPORT_SYMBOL(xsk_tx_peek_desc
);
361 static u32
xsk_tx_peek_release_fallback(struct xsk_buff_pool
*pool
, u32 max_entries
)
363 struct xdp_desc
*descs
= pool
->tx_descs
;
366 while (nb_pkts
< max_entries
&& xsk_tx_peek_desc(pool
, &descs
[nb_pkts
]))
369 xsk_tx_release(pool
);
373 u32
xsk_tx_peek_release_desc_batch(struct xsk_buff_pool
*pool
, u32 max_entries
)
379 if (!list_is_singular(&pool
->xsk_tx_list
)) {
380 /* Fallback to the non-batched version */
382 return xsk_tx_peek_release_fallback(pool
, max_entries
);
385 xs
= list_first_or_null_rcu(&pool
->xsk_tx_list
, struct xdp_sock
, tx_list
);
391 max_entries
= xskq_cons_nb_entries(xs
->tx
, max_entries
);
392 nb_pkts
= xskq_cons_read_desc_batch(xs
->tx
, pool
, max_entries
);
394 xs
->tx
->queue_empty_descs
++;
398 /* This is the backpressure mechanism for the Tx path. Try to
399 * reserve space in the completion queue for all packets, but
400 * if there are fewer slots available, just process that many
401 * packets. This avoids having to implement any buffering in
404 nb_pkts
= xskq_prod_reserve_addr_batch(pool
->cq
, pool
->tx_descs
, nb_pkts
);
408 xskq_cons_release_n(xs
->tx
, max_entries
);
409 __xskq_cons_release(xs
->tx
);
410 xs
->sk
.sk_write_space(&xs
->sk
);
416 EXPORT_SYMBOL(xsk_tx_peek_release_desc_batch
);
418 static int xsk_wakeup(struct xdp_sock
*xs
, u8 flags
)
420 struct net_device
*dev
= xs
->dev
;
422 return dev
->netdev_ops
->ndo_xsk_wakeup(dev
, xs
->queue_id
, flags
);
425 static void xsk_destruct_skb(struct sk_buff
*skb
)
427 u64 addr
= (u64
)(long)skb_shinfo(skb
)->destructor_arg
;
428 struct xdp_sock
*xs
= xdp_sk(skb
->sk
);
431 spin_lock_irqsave(&xs
->pool
->cq_lock
, flags
);
432 xskq_prod_submit_addr(xs
->pool
->cq
, addr
);
433 spin_unlock_irqrestore(&xs
->pool
->cq_lock
, flags
);
438 static struct sk_buff
*xsk_build_skb_zerocopy(struct xdp_sock
*xs
,
439 struct xdp_desc
*desc
)
441 struct xsk_buff_pool
*pool
= xs
->pool
;
442 u32 hr
, len
, ts
, offset
, copy
, copied
;
449 hr
= max(NET_SKB_PAD
, L1_CACHE_ALIGN(xs
->dev
->needed_headroom
));
451 skb
= sock_alloc_send_skb(&xs
->sk
, hr
, 1, &err
);
455 skb_reserve(skb
, hr
);
459 ts
= pool
->unaligned
? len
: pool
->chunk_size
;
461 buffer
= xsk_buff_raw_get_data(pool
, addr
);
462 offset
= offset_in_page(buffer
);
463 addr
= buffer
- pool
->addrs
;
465 for (copied
= 0, i
= 0; copied
< len
; i
++) {
466 page
= pool
->umem
->pgs
[addr
>> PAGE_SHIFT
];
469 copy
= min_t(u32
, PAGE_SIZE
- offset
, len
- copied
);
470 skb_fill_page_desc(skb
, i
, page
, offset
, copy
);
478 skb
->data_len
+= len
;
481 refcount_add(ts
, &xs
->sk
.sk_wmem_alloc
);
486 static struct sk_buff
*xsk_build_skb(struct xdp_sock
*xs
,
487 struct xdp_desc
*desc
)
489 struct net_device
*dev
= xs
->dev
;
492 if (dev
->priv_flags
& IFF_TX_SKB_NO_LINEAR
) {
493 skb
= xsk_build_skb_zerocopy(xs
, desc
);
501 hr
= max(NET_SKB_PAD
, L1_CACHE_ALIGN(dev
->needed_headroom
));
502 tr
= dev
->needed_tailroom
;
505 skb
= sock_alloc_send_skb(&xs
->sk
, hr
+ len
+ tr
, 1, &err
);
509 skb_reserve(skb
, hr
);
512 buffer
= xsk_buff_raw_get_data(xs
->pool
, desc
->addr
);
513 err
= skb_store_bits(skb
, 0, buffer
, len
);
521 skb
->priority
= xs
->sk
.sk_priority
;
522 skb
->mark
= xs
->sk
.sk_mark
;
523 skb_shinfo(skb
)->destructor_arg
= (void *)(long)desc
->addr
;
524 skb
->destructor
= xsk_destruct_skb
;
529 static int xsk_generic_xmit(struct sock
*sk
)
531 struct xdp_sock
*xs
= xdp_sk(sk
);
532 u32 max_batch
= TX_BATCH_SIZE
;
533 bool sent_frame
= false;
534 struct xdp_desc desc
;
539 mutex_lock(&xs
->mutex
);
541 /* Since we dropped the RCU read lock, the socket state might have changed. */
542 if (unlikely(!xsk_is_bound(xs
))) {
547 if (xs
->queue_id
>= xs
->dev
->real_num_tx_queues
)
550 while (xskq_cons_peek_desc(xs
->tx
, &desc
, xs
->pool
)) {
551 if (max_batch
-- == 0) {
556 /* This is the backpressure mechanism for the Tx path.
557 * Reserve space in the completion queue and only proceed
558 * if there is space in it. This avoids having to implement
559 * any buffering in the Tx path.
561 spin_lock_irqsave(&xs
->pool
->cq_lock
, flags
);
562 if (xskq_prod_reserve(xs
->pool
->cq
)) {
563 spin_unlock_irqrestore(&xs
->pool
->cq_lock
, flags
);
566 spin_unlock_irqrestore(&xs
->pool
->cq_lock
, flags
);
568 skb
= xsk_build_skb(xs
, &desc
);
571 spin_lock_irqsave(&xs
->pool
->cq_lock
, flags
);
572 xskq_prod_cancel(xs
->pool
->cq
);
573 spin_unlock_irqrestore(&xs
->pool
->cq_lock
, flags
);
577 err
= __dev_direct_xmit(skb
, xs
->queue_id
);
578 if (err
== NETDEV_TX_BUSY
) {
579 /* Tell user-space to retry the send */
580 skb
->destructor
= sock_wfree
;
581 spin_lock_irqsave(&xs
->pool
->cq_lock
, flags
);
582 xskq_prod_cancel(xs
->pool
->cq
);
583 spin_unlock_irqrestore(&xs
->pool
->cq_lock
, flags
);
584 /* Free skb without triggering the perf drop trace */
590 xskq_cons_release(xs
->tx
);
591 /* Ignore NET_XMIT_CN as packet might have been sent */
592 if (err
== NET_XMIT_DROP
) {
593 /* SKB completed but not sent */
601 xs
->tx
->queue_empty_descs
++;
605 if (xsk_tx_writeable(xs
))
606 sk
->sk_write_space(sk
);
608 mutex_unlock(&xs
->mutex
);
612 static int xsk_xmit(struct sock
*sk
)
614 struct xdp_sock
*xs
= xdp_sk(sk
);
617 if (unlikely(!(xs
->dev
->flags
& IFF_UP
)))
619 if (unlikely(!xs
->tx
))
623 return xsk_wakeup(xs
, XDP_WAKEUP_TX
);
625 /* Drop the RCU lock since the SKB path might sleep. */
627 ret
= xsk_generic_xmit(sk
);
628 /* Reaquire RCU lock before going into common code. */
634 static bool xsk_no_wakeup(struct sock
*sk
)
636 #ifdef CONFIG_NET_RX_BUSY_POLL
637 /* Prefer busy-polling, skip the wakeup. */
638 return READ_ONCE(sk
->sk_prefer_busy_poll
) && READ_ONCE(sk
->sk_ll_usec
) &&
639 READ_ONCE(sk
->sk_napi_id
) >= MIN_NAPI_ID
;
645 static int __xsk_sendmsg(struct socket
*sock
, struct msghdr
*m
, size_t total_len
)
647 bool need_wait
= !(m
->msg_flags
& MSG_DONTWAIT
);
648 struct sock
*sk
= sock
->sk
;
649 struct xdp_sock
*xs
= xdp_sk(sk
);
650 struct xsk_buff_pool
*pool
;
652 if (unlikely(!xsk_is_bound(xs
)))
654 if (unlikely(need_wait
))
657 if (sk_can_busy_loop(sk
))
658 sk_busy_loop(sk
, 1); /* only support non-blocking sockets */
660 if (xs
->zc
&& xsk_no_wakeup(sk
))
664 if (pool
->cached_need_wakeup
& XDP_WAKEUP_TX
)
669 static int xsk_sendmsg(struct socket
*sock
, struct msghdr
*m
, size_t total_len
)
674 ret
= __xsk_sendmsg(sock
, m
, total_len
);
680 static int __xsk_recvmsg(struct socket
*sock
, struct msghdr
*m
, size_t len
, int flags
)
682 bool need_wait
= !(flags
& MSG_DONTWAIT
);
683 struct sock
*sk
= sock
->sk
;
684 struct xdp_sock
*xs
= xdp_sk(sk
);
686 if (unlikely(!xsk_is_bound(xs
)))
688 if (unlikely(!(xs
->dev
->flags
& IFF_UP
)))
690 if (unlikely(!xs
->rx
))
692 if (unlikely(need_wait
))
695 if (sk_can_busy_loop(sk
))
696 sk_busy_loop(sk
, 1); /* only support non-blocking sockets */
698 if (xsk_no_wakeup(sk
))
701 if (xs
->pool
->cached_need_wakeup
& XDP_WAKEUP_RX
&& xs
->zc
)
702 return xsk_wakeup(xs
, XDP_WAKEUP_RX
);
706 static int xsk_recvmsg(struct socket
*sock
, struct msghdr
*m
, size_t len
, int flags
)
711 ret
= __xsk_recvmsg(sock
, m
, len
, flags
);
717 static __poll_t
xsk_poll(struct file
*file
, struct socket
*sock
,
718 struct poll_table_struct
*wait
)
721 struct sock
*sk
= sock
->sk
;
722 struct xdp_sock
*xs
= xdp_sk(sk
);
723 struct xsk_buff_pool
*pool
;
725 sock_poll_wait(file
, sock
, wait
);
728 if (unlikely(!xsk_is_bound(xs
))) {
735 if (pool
->cached_need_wakeup
) {
737 xsk_wakeup(xs
, pool
->cached_need_wakeup
);
739 /* Poll needs to drive Tx also in copy mode */
743 if (xs
->rx
&& !xskq_prod_is_empty(xs
->rx
))
744 mask
|= EPOLLIN
| EPOLLRDNORM
;
745 if (xs
->tx
&& xsk_tx_writeable(xs
))
746 mask
|= EPOLLOUT
| EPOLLWRNORM
;
752 static int xsk_init_queue(u32 entries
, struct xsk_queue
**queue
,
757 if (entries
== 0 || *queue
|| !is_power_of_2(entries
))
760 q
= xskq_create(entries
, umem_queue
);
764 /* Make sure queue is ready before it can be seen by others */
766 WRITE_ONCE(*queue
, q
);
770 static void xsk_unbind_dev(struct xdp_sock
*xs
)
772 struct net_device
*dev
= xs
->dev
;
774 if (xs
->state
!= XSK_BOUND
)
776 WRITE_ONCE(xs
->state
, XSK_UNBOUND
);
778 /* Wait for driver to stop using the xdp socket. */
779 xp_del_xsk(xs
->pool
, xs
);
784 static struct xsk_map
*xsk_get_map_list_entry(struct xdp_sock
*xs
,
785 struct xdp_sock __rcu
***map_entry
)
787 struct xsk_map
*map
= NULL
;
788 struct xsk_map_node
*node
;
792 spin_lock_bh(&xs
->map_list_lock
);
793 node
= list_first_entry_or_null(&xs
->map_list
, struct xsk_map_node
,
796 bpf_map_inc(&node
->map
->map
);
798 *map_entry
= node
->map_entry
;
800 spin_unlock_bh(&xs
->map_list_lock
);
804 static void xsk_delete_from_maps(struct xdp_sock
*xs
)
806 /* This function removes the current XDP socket from all the
807 * maps it resides in. We need to take extra care here, due to
808 * the two locks involved. Each map has a lock synchronizing
809 * updates to the entries, and each socket has a lock that
810 * synchronizes access to the list of maps (map_list). For
811 * deadlock avoidance the locks need to be taken in the order
812 * "map lock"->"socket map list lock". We start off by
813 * accessing the socket map list, and take a reference to the
814 * map to guarantee existence between the
815 * xsk_get_map_list_entry() and xsk_map_try_sock_delete()
816 * calls. Then we ask the map to remove the socket, which
817 * tries to remove the socket from the map. Note that there
818 * might be updates to the map between
819 * xsk_get_map_list_entry() and xsk_map_try_sock_delete().
821 struct xdp_sock __rcu
**map_entry
= NULL
;
824 while ((map
= xsk_get_map_list_entry(xs
, &map_entry
))) {
825 xsk_map_try_sock_delete(map
, xs
, map_entry
);
826 bpf_map_put(&map
->map
);
830 static int xsk_release(struct socket
*sock
)
832 struct sock
*sk
= sock
->sk
;
833 struct xdp_sock
*xs
= xdp_sk(sk
);
841 mutex_lock(&net
->xdp
.lock
);
842 sk_del_node_init_rcu(sk
);
843 mutex_unlock(&net
->xdp
.lock
);
846 sock_prot_inuse_add(net
, sk
->sk_prot
, -1);
849 xsk_delete_from_maps(xs
);
850 mutex_lock(&xs
->mutex
);
852 mutex_unlock(&xs
->mutex
);
854 xskq_destroy(xs
->rx
);
855 xskq_destroy(xs
->tx
);
856 xskq_destroy(xs
->fq_tmp
);
857 xskq_destroy(xs
->cq_tmp
);
862 sk_refcnt_debug_release(sk
);
868 static struct socket
*xsk_lookup_xsk_from_fd(int fd
)
873 sock
= sockfd_lookup(fd
, &err
);
875 return ERR_PTR(-ENOTSOCK
);
877 if (sock
->sk
->sk_family
!= PF_XDP
) {
879 return ERR_PTR(-ENOPROTOOPT
);
885 static bool xsk_validate_queues(struct xdp_sock
*xs
)
887 return xs
->fq_tmp
&& xs
->cq_tmp
;
890 static int xsk_bind(struct socket
*sock
, struct sockaddr
*addr
, int addr_len
)
892 struct sockaddr_xdp
*sxdp
= (struct sockaddr_xdp
*)addr
;
893 struct sock
*sk
= sock
->sk
;
894 struct xdp_sock
*xs
= xdp_sk(sk
);
895 struct net_device
*dev
;
899 if (addr_len
< sizeof(struct sockaddr_xdp
))
901 if (sxdp
->sxdp_family
!= AF_XDP
)
904 flags
= sxdp
->sxdp_flags
;
905 if (flags
& ~(XDP_SHARED_UMEM
| XDP_COPY
| XDP_ZEROCOPY
|
906 XDP_USE_NEED_WAKEUP
))
910 mutex_lock(&xs
->mutex
);
911 if (xs
->state
!= XSK_READY
) {
916 dev
= dev_get_by_index(sock_net(sk
), sxdp
->sxdp_ifindex
);
922 if (!xs
->rx
&& !xs
->tx
) {
927 qid
= sxdp
->sxdp_queue_id
;
929 if (flags
& XDP_SHARED_UMEM
) {
930 struct xdp_sock
*umem_xs
;
933 if ((flags
& XDP_COPY
) || (flags
& XDP_ZEROCOPY
) ||
934 (flags
& XDP_USE_NEED_WAKEUP
)) {
935 /* Cannot specify flags for shared sockets. */
941 /* We have already our own. */
946 sock
= xsk_lookup_xsk_from_fd(sxdp
->sxdp_shared_umem_fd
);
952 umem_xs
= xdp_sk(sock
->sk
);
953 if (!xsk_is_bound(umem_xs
)) {
959 if (umem_xs
->queue_id
!= qid
|| umem_xs
->dev
!= dev
) {
960 /* Share the umem with another socket on another qid
963 xs
->pool
= xp_create_and_assign_umem(xs
,
971 err
= xp_assign_dev_shared(xs
->pool
, umem_xs
, dev
,
974 xp_destroy(xs
->pool
);
980 /* Share the buffer pool with the other socket. */
981 if (xs
->fq_tmp
|| xs
->cq_tmp
) {
982 /* Do not allow setting your own fq or cq. */
988 xp_get_pool(umem_xs
->pool
);
989 xs
->pool
= umem_xs
->pool
;
991 /* If underlying shared umem was created without Tx
992 * ring, allocate Tx descs array that Tx batching API
995 if (xs
->tx
&& !xs
->pool
->tx_descs
) {
996 err
= xp_alloc_tx_descs(xs
->pool
, xs
);
998 xp_put_pool(xs
->pool
);
1005 xdp_get_umem(umem_xs
->umem
);
1006 WRITE_ONCE(xs
->umem
, umem_xs
->umem
);
1008 } else if (!xs
->umem
|| !xsk_validate_queues(xs
)) {
1012 /* This xsk has its own umem. */
1013 xs
->pool
= xp_create_and_assign_umem(xs
, xs
->umem
);
1019 err
= xp_assign_dev(xs
->pool
, dev
, qid
, flags
);
1021 xp_destroy(xs
->pool
);
1027 /* FQ and CQ are now owned by the buffer pool and cleaned up with it. */
1032 xs
->zc
= xs
->umem
->zc
;
1034 xp_add_xsk(xs
->pool
, xs
);
1040 /* Matches smp_rmb() in bind() for shared umem
1041 * sockets, and xsk_is_bound().
1044 WRITE_ONCE(xs
->state
, XSK_BOUND
);
1047 mutex_unlock(&xs
->mutex
);
1052 struct xdp_umem_reg_v1
{
1053 __u64 addr
; /* Start of packet data area */
1054 __u64 len
; /* Length of packet data area */
1059 static int xsk_setsockopt(struct socket
*sock
, int level
, int optname
,
1060 sockptr_t optval
, unsigned int optlen
)
1062 struct sock
*sk
= sock
->sk
;
1063 struct xdp_sock
*xs
= xdp_sk(sk
);
1066 if (level
!= SOL_XDP
)
1067 return -ENOPROTOOPT
;
1073 struct xsk_queue
**q
;
1076 if (optlen
< sizeof(entries
))
1078 if (copy_from_sockptr(&entries
, optval
, sizeof(entries
)))
1081 mutex_lock(&xs
->mutex
);
1082 if (xs
->state
!= XSK_READY
) {
1083 mutex_unlock(&xs
->mutex
);
1086 q
= (optname
== XDP_TX_RING
) ? &xs
->tx
: &xs
->rx
;
1087 err
= xsk_init_queue(entries
, q
, false);
1088 if (!err
&& optname
== XDP_TX_RING
)
1089 /* Tx needs to be explicitly woken up the first time */
1090 xs
->tx
->ring
->flags
|= XDP_RING_NEED_WAKEUP
;
1091 mutex_unlock(&xs
->mutex
);
1096 size_t mr_size
= sizeof(struct xdp_umem_reg
);
1097 struct xdp_umem_reg mr
= {};
1098 struct xdp_umem
*umem
;
1100 if (optlen
< sizeof(struct xdp_umem_reg_v1
))
1102 else if (optlen
< sizeof(mr
))
1103 mr_size
= sizeof(struct xdp_umem_reg_v1
);
1105 if (copy_from_sockptr(&mr
, optval
, mr_size
))
1108 mutex_lock(&xs
->mutex
);
1109 if (xs
->state
!= XSK_READY
|| xs
->umem
) {
1110 mutex_unlock(&xs
->mutex
);
1114 umem
= xdp_umem_create(&mr
);
1116 mutex_unlock(&xs
->mutex
);
1117 return PTR_ERR(umem
);
1120 /* Make sure umem is ready before it can be seen by others */
1122 WRITE_ONCE(xs
->umem
, umem
);
1123 mutex_unlock(&xs
->mutex
);
1126 case XDP_UMEM_FILL_RING
:
1127 case XDP_UMEM_COMPLETION_RING
:
1129 struct xsk_queue
**q
;
1132 if (copy_from_sockptr(&entries
, optval
, sizeof(entries
)))
1135 mutex_lock(&xs
->mutex
);
1136 if (xs
->state
!= XSK_READY
) {
1137 mutex_unlock(&xs
->mutex
);
1141 q
= (optname
== XDP_UMEM_FILL_RING
) ? &xs
->fq_tmp
:
1143 err
= xsk_init_queue(entries
, q
, true);
1144 mutex_unlock(&xs
->mutex
);
1151 return -ENOPROTOOPT
;
1154 static void xsk_enter_rxtx_offsets(struct xdp_ring_offset_v1
*ring
)
1156 ring
->producer
= offsetof(struct xdp_rxtx_ring
, ptrs
.producer
);
1157 ring
->consumer
= offsetof(struct xdp_rxtx_ring
, ptrs
.consumer
);
1158 ring
->desc
= offsetof(struct xdp_rxtx_ring
, desc
);
1161 static void xsk_enter_umem_offsets(struct xdp_ring_offset_v1
*ring
)
1163 ring
->producer
= offsetof(struct xdp_umem_ring
, ptrs
.producer
);
1164 ring
->consumer
= offsetof(struct xdp_umem_ring
, ptrs
.consumer
);
1165 ring
->desc
= offsetof(struct xdp_umem_ring
, desc
);
1168 struct xdp_statistics_v1
{
1170 __u64 rx_invalid_descs
;
1171 __u64 tx_invalid_descs
;
1174 static int xsk_getsockopt(struct socket
*sock
, int level
, int optname
,
1175 char __user
*optval
, int __user
*optlen
)
1177 struct sock
*sk
= sock
->sk
;
1178 struct xdp_sock
*xs
= xdp_sk(sk
);
1181 if (level
!= SOL_XDP
)
1182 return -ENOPROTOOPT
;
1184 if (get_user(len
, optlen
))
1190 case XDP_STATISTICS
:
1192 struct xdp_statistics stats
= {};
1193 bool extra_stats
= true;
1196 if (len
< sizeof(struct xdp_statistics_v1
)) {
1198 } else if (len
< sizeof(stats
)) {
1199 extra_stats
= false;
1200 stats_size
= sizeof(struct xdp_statistics_v1
);
1202 stats_size
= sizeof(stats
);
1205 mutex_lock(&xs
->mutex
);
1206 stats
.rx_dropped
= xs
->rx_dropped
;
1208 stats
.rx_ring_full
= xs
->rx_queue_full
;
1209 stats
.rx_fill_ring_empty_descs
=
1210 xs
->pool
? xskq_nb_queue_empty_descs(xs
->pool
->fq
) : 0;
1211 stats
.tx_ring_empty_descs
= xskq_nb_queue_empty_descs(xs
->tx
);
1213 stats
.rx_dropped
+= xs
->rx_queue_full
;
1215 stats
.rx_invalid_descs
= xskq_nb_invalid_descs(xs
->rx
);
1216 stats
.tx_invalid_descs
= xskq_nb_invalid_descs(xs
->tx
);
1217 mutex_unlock(&xs
->mutex
);
1219 if (copy_to_user(optval
, &stats
, stats_size
))
1221 if (put_user(stats_size
, optlen
))
1226 case XDP_MMAP_OFFSETS
:
1228 struct xdp_mmap_offsets off
;
1229 struct xdp_mmap_offsets_v1 off_v1
;
1230 bool flags_supported
= true;
1233 if (len
< sizeof(off_v1
))
1235 else if (len
< sizeof(off
))
1236 flags_supported
= false;
1238 if (flags_supported
) {
1239 /* xdp_ring_offset is identical to xdp_ring_offset_v1
1240 * except for the flags field added to the end.
1242 xsk_enter_rxtx_offsets((struct xdp_ring_offset_v1
*)
1244 xsk_enter_rxtx_offsets((struct xdp_ring_offset_v1
*)
1246 xsk_enter_umem_offsets((struct xdp_ring_offset_v1
*)
1248 xsk_enter_umem_offsets((struct xdp_ring_offset_v1
*)
1250 off
.rx
.flags
= offsetof(struct xdp_rxtx_ring
,
1252 off
.tx
.flags
= offsetof(struct xdp_rxtx_ring
,
1254 off
.fr
.flags
= offsetof(struct xdp_umem_ring
,
1256 off
.cr
.flags
= offsetof(struct xdp_umem_ring
,
1262 xsk_enter_rxtx_offsets(&off_v1
.rx
);
1263 xsk_enter_rxtx_offsets(&off_v1
.tx
);
1264 xsk_enter_umem_offsets(&off_v1
.fr
);
1265 xsk_enter_umem_offsets(&off_v1
.cr
);
1267 len
= sizeof(off_v1
);
1271 if (copy_to_user(optval
, to_copy
, len
))
1273 if (put_user(len
, optlen
))
1280 struct xdp_options opts
= {};
1282 if (len
< sizeof(opts
))
1285 mutex_lock(&xs
->mutex
);
1287 opts
.flags
|= XDP_OPTIONS_ZEROCOPY
;
1288 mutex_unlock(&xs
->mutex
);
1291 if (copy_to_user(optval
, &opts
, len
))
1293 if (put_user(len
, optlen
))
1305 static int xsk_mmap(struct file
*file
, struct socket
*sock
,
1306 struct vm_area_struct
*vma
)
1308 loff_t offset
= (loff_t
)vma
->vm_pgoff
<< PAGE_SHIFT
;
1309 unsigned long size
= vma
->vm_end
- vma
->vm_start
;
1310 struct xdp_sock
*xs
= xdp_sk(sock
->sk
);
1311 struct xsk_queue
*q
= NULL
;
1315 if (READ_ONCE(xs
->state
) != XSK_READY
)
1318 if (offset
== XDP_PGOFF_RX_RING
) {
1319 q
= READ_ONCE(xs
->rx
);
1320 } else if (offset
== XDP_PGOFF_TX_RING
) {
1321 q
= READ_ONCE(xs
->tx
);
1323 /* Matches the smp_wmb() in XDP_UMEM_REG */
1325 if (offset
== XDP_UMEM_PGOFF_FILL_RING
)
1326 q
= READ_ONCE(xs
->fq_tmp
);
1327 else if (offset
== XDP_UMEM_PGOFF_COMPLETION_RING
)
1328 q
= READ_ONCE(xs
->cq_tmp
);
1334 /* Matches the smp_wmb() in xsk_init_queue */
1336 qpg
= virt_to_head_page(q
->ring
);
1337 if (size
> page_size(qpg
))
1340 pfn
= virt_to_phys(q
->ring
) >> PAGE_SHIFT
;
1341 return remap_pfn_range(vma
, vma
->vm_start
, pfn
,
1342 size
, vma
->vm_page_prot
);
1345 static int xsk_notifier(struct notifier_block
*this,
1346 unsigned long msg
, void *ptr
)
1348 struct net_device
*dev
= netdev_notifier_info_to_dev(ptr
);
1349 struct net
*net
= dev_net(dev
);
1353 case NETDEV_UNREGISTER
:
1354 mutex_lock(&net
->xdp
.lock
);
1355 sk_for_each(sk
, &net
->xdp
.list
) {
1356 struct xdp_sock
*xs
= xdp_sk(sk
);
1358 mutex_lock(&xs
->mutex
);
1359 if (xs
->dev
== dev
) {
1360 sk
->sk_err
= ENETDOWN
;
1361 if (!sock_flag(sk
, SOCK_DEAD
))
1362 sk_error_report(sk
);
1366 /* Clear device references. */
1367 xp_clear_dev(xs
->pool
);
1369 mutex_unlock(&xs
->mutex
);
1371 mutex_unlock(&net
->xdp
.lock
);
1377 static struct proto xsk_proto
= {
1379 .owner
= THIS_MODULE
,
1380 .obj_size
= sizeof(struct xdp_sock
),
1383 static const struct proto_ops xsk_proto_ops
= {
1385 .owner
= THIS_MODULE
,
1386 .release
= xsk_release
,
1388 .connect
= sock_no_connect
,
1389 .socketpair
= sock_no_socketpair
,
1390 .accept
= sock_no_accept
,
1391 .getname
= sock_no_getname
,
1393 .ioctl
= sock_no_ioctl
,
1394 .listen
= sock_no_listen
,
1395 .shutdown
= sock_no_shutdown
,
1396 .setsockopt
= xsk_setsockopt
,
1397 .getsockopt
= xsk_getsockopt
,
1398 .sendmsg
= xsk_sendmsg
,
1399 .recvmsg
= xsk_recvmsg
,
1401 .sendpage
= sock_no_sendpage
,
1404 static void xsk_destruct(struct sock
*sk
)
1406 struct xdp_sock
*xs
= xdp_sk(sk
);
1408 if (!sock_flag(sk
, SOCK_DEAD
))
1411 if (!xp_put_pool(xs
->pool
))
1412 xdp_put_umem(xs
->umem
, !xs
->pool
);
1414 sk_refcnt_debug_dec(sk
);
1417 static int xsk_create(struct net
*net
, struct socket
*sock
, int protocol
,
1420 struct xdp_sock
*xs
;
1423 if (!ns_capable(net
->user_ns
, CAP_NET_RAW
))
1425 if (sock
->type
!= SOCK_RAW
)
1426 return -ESOCKTNOSUPPORT
;
1429 return -EPROTONOSUPPORT
;
1431 sock
->state
= SS_UNCONNECTED
;
1433 sk
= sk_alloc(net
, PF_XDP
, GFP_KERNEL
, &xsk_proto
, kern
);
1437 sock
->ops
= &xsk_proto_ops
;
1439 sock_init_data(sock
, sk
);
1441 sk
->sk_family
= PF_XDP
;
1443 sk
->sk_destruct
= xsk_destruct
;
1444 sk_refcnt_debug_inc(sk
);
1446 sock_set_flag(sk
, SOCK_RCU_FREE
);
1449 xs
->state
= XSK_READY
;
1450 mutex_init(&xs
->mutex
);
1451 spin_lock_init(&xs
->rx_lock
);
1453 INIT_LIST_HEAD(&xs
->map_list
);
1454 spin_lock_init(&xs
->map_list_lock
);
1456 mutex_lock(&net
->xdp
.lock
);
1457 sk_add_node_rcu(sk
, &net
->xdp
.list
);
1458 mutex_unlock(&net
->xdp
.lock
);
1461 sock_prot_inuse_add(net
, &xsk_proto
, 1);
1467 static const struct net_proto_family xsk_family_ops
= {
1469 .create
= xsk_create
,
1470 .owner
= THIS_MODULE
,
1473 static struct notifier_block xsk_netdev_notifier
= {
1474 .notifier_call
= xsk_notifier
,
1477 static int __net_init
xsk_net_init(struct net
*net
)
1479 mutex_init(&net
->xdp
.lock
);
1480 INIT_HLIST_HEAD(&net
->xdp
.list
);
1484 static void __net_exit
xsk_net_exit(struct net
*net
)
1486 WARN_ON_ONCE(!hlist_empty(&net
->xdp
.list
));
1489 static struct pernet_operations xsk_net_ops
= {
1490 .init
= xsk_net_init
,
1491 .exit
= xsk_net_exit
,
1494 static int __init
xsk_init(void)
1498 err
= proto_register(&xsk_proto
, 0 /* no slab */);
1502 err
= sock_register(&xsk_family_ops
);
1506 err
= register_pernet_subsys(&xsk_net_ops
);
1510 err
= register_netdevice_notifier(&xsk_netdev_notifier
);
1514 for_each_possible_cpu(cpu
)
1515 INIT_LIST_HEAD(&per_cpu(xskmap_flush_list
, cpu
));
1519 unregister_pernet_subsys(&xsk_net_ops
);
1521 sock_unregister(PF_XDP
);
1523 proto_unregister(&xsk_proto
);
1528 fs_initcall(xsk_init
);