]>
Commit | Line | Data |
---|---|---|
dac09149 BT |
1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | /* AF_XDP internal functions | |
c0c77d8f | 3 | * Copyright(c) 2018 Intel Corporation. |
c0c77d8f BT |
4 | */ |
5 | ||
6 | #ifndef _LINUX_XDP_SOCK_H | |
7 | #define _LINUX_XDP_SOCK_H | |
8 | ||
e61e62b9 BT |
9 | #include <linux/workqueue.h> |
10 | #include <linux/if_xdp.h> | |
c0c77d8f | 11 | #include <linux/mutex.h> |
ac98d8aa | 12 | #include <linux/spinlock.h> |
e61e62b9 | 13 | #include <linux/mm.h> |
c0c77d8f BT |
14 | #include <net/sock.h> |
15 | ||
b9b6b68e BT |
16 | struct net_device; |
17 | struct xsk_queue; | |
e61e62b9 BT |
18 | |
19 | struct xdp_umem_props { | |
20 | u64 chunk_mask; | |
21 | u64 size; | |
22 | }; | |
23 | ||
8aef7340 BT |
24 | struct xdp_umem_page { |
25 | void *addr; | |
173d3adb | 26 | dma_addr_t dma; |
8aef7340 BT |
27 | }; |
28 | ||
e61e62b9 BT |
29 | struct xdp_umem { |
30 | struct xsk_queue *fq; | |
31 | struct xsk_queue *cq; | |
8aef7340 | 32 | struct xdp_umem_page *pages; |
e61e62b9 BT |
33 | struct xdp_umem_props props; |
34 | u32 headroom; | |
35 | u32 chunk_size_nohr; | |
36 | struct user_struct *user; | |
37 | struct pid *pid; | |
38 | unsigned long address; | |
39 | refcount_t users; | |
40 | struct work_struct work; | |
8aef7340 | 41 | struct page **pgs; |
e61e62b9 | 42 | u32 npgs; |
173d3adb BT |
43 | struct net_device *dev; |
44 | u16 queue_id; | |
45 | bool zc; | |
ac98d8aa MK |
46 | spinlock_t xsk_list_lock; |
47 | struct list_head xsk_list; | |
e61e62b9 | 48 | }; |
c0c77d8f BT |
49 | |
50 | struct xdp_sock { | |
51 | /* struct sock must be the first member of struct xdp_sock */ | |
52 | struct sock sk; | |
b9b6b68e BT |
53 | struct xsk_queue *rx; |
54 | struct net_device *dev; | |
c0c77d8f | 55 | struct xdp_umem *umem; |
fbfc504a | 56 | struct list_head flush_node; |
965a9909 | 57 | u16 queue_id; |
f6145903 | 58 | struct xsk_queue *tx ____cacheline_aligned_in_smp; |
ac98d8aa MK |
59 | struct list_head list; |
60 | bool zc; | |
c0c77d8f BT |
61 | /* Protects multiple processes in the control path */ |
62 | struct mutex mutex; | |
a9744f7c MK |
63 | /* Mutual exclusion of NAPI TX thread and sendmsg error paths |
64 | * in the SKB destructor callback. | |
65 | */ | |
66 | spinlock_t tx_completion_lock; | |
c497176c | 67 | u64 rx_dropped; |
c0c77d8f BT |
68 | }; |
69 | ||
c497176c BT |
70 | struct xdp_buff; |
71 | #ifdef CONFIG_XDP_SOCKETS | |
72 | int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp); | |
73 | int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp); | |
74 | void xsk_flush(struct xdp_sock *xs); | |
fbfc504a | 75 | bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs); |
ac98d8aa | 76 | /* Used from netdev driver */ |
173d3adb BT |
77 | u64 *xsk_umem_peek_addr(struct xdp_umem *umem, u64 *addr); |
78 | void xsk_umem_discard_addr(struct xdp_umem *umem); | |
ac98d8aa MK |
79 | void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries); |
80 | bool xsk_umem_consume_tx(struct xdp_umem *umem, dma_addr_t *dma, u32 *len); | |
81 | void xsk_umem_consume_tx_done(struct xdp_umem *umem); | |
c497176c BT |
82 | #else |
83 | static inline int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp) | |
84 | { | |
85 | return -ENOTSUPP; | |
86 | } | |
87 | ||
88 | static inline int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp) | |
89 | { | |
90 | return -ENOTSUPP; | |
91 | } | |
92 | ||
93 | static inline void xsk_flush(struct xdp_sock *xs) | |
94 | { | |
95 | } | |
fbfc504a BT |
96 | |
97 | static inline bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs) | |
98 | { | |
99 | return false; | |
100 | } | |
c497176c BT |
101 | #endif /* CONFIG_XDP_SOCKETS */ |
102 | ||
c0c77d8f | 103 | #endif /* _LINUX_XDP_SOCK_H */ |