]>
Commit | Line | Data |
---|---|---|
dac09149 BT |
1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | /* AF_XDP internal functions | |
c0c77d8f | 3 | * Copyright(c) 2018 Intel Corporation. |
c0c77d8f BT |
4 | */ |
5 | ||
6 | #ifndef _LINUX_XDP_SOCK_H | |
7 | #define _LINUX_XDP_SOCK_H | |
8 | ||
e61e62b9 BT |
9 | #include <linux/workqueue.h> |
10 | #include <linux/if_xdp.h> | |
c0c77d8f | 11 | #include <linux/mutex.h> |
ac98d8aa | 12 | #include <linux/spinlock.h> |
e61e62b9 | 13 | #include <linux/mm.h> |
c0c77d8f BT |
14 | #include <net/sock.h> |
15 | ||
b9b6b68e BT |
16 | struct net_device; |
17 | struct xsk_queue; | |
e61e62b9 | 18 | |
8aef7340 BT |
19 | struct xdp_umem_page { |
20 | void *addr; | |
173d3adb | 21 | dma_addr_t dma; |
8aef7340 BT |
22 | }; |
23 | ||
f5bd9138 JK |
24 | struct xdp_umem_fq_reuse { |
25 | u32 nentries; | |
26 | u32 length; | |
27 | u64 handles[]; | |
28 | }; | |
29 | ||
e61e62b9 BT |
30 | struct xdp_umem { |
31 | struct xsk_queue *fq; | |
32 | struct xsk_queue *cq; | |
8aef7340 | 33 | struct xdp_umem_page *pages; |
93ee30f3 MK |
34 | u64 chunk_mask; |
35 | u64 size; | |
e61e62b9 BT |
36 | u32 headroom; |
37 | u32 chunk_size_nohr; | |
38 | struct user_struct *user; | |
39 | struct pid *pid; | |
40 | unsigned long address; | |
41 | refcount_t users; | |
42 | struct work_struct work; | |
8aef7340 | 43 | struct page **pgs; |
e61e62b9 | 44 | u32 npgs; |
173d3adb | 45 | struct net_device *dev; |
f5bd9138 | 46 | struct xdp_umem_fq_reuse *fq_reuse; |
173d3adb BT |
47 | u16 queue_id; |
48 | bool zc; | |
ac98d8aa MK |
49 | spinlock_t xsk_list_lock; |
50 | struct list_head xsk_list; | |
e61e62b9 | 51 | }; |
c0c77d8f BT |
52 | |
53 | struct xdp_sock { | |
54 | /* struct sock must be the first member of struct xdp_sock */ | |
55 | struct sock sk; | |
b9b6b68e BT |
56 | struct xsk_queue *rx; |
57 | struct net_device *dev; | |
c0c77d8f | 58 | struct xdp_umem *umem; |
fbfc504a | 59 | struct list_head flush_node; |
965a9909 | 60 | u16 queue_id; |
f6145903 | 61 | struct xsk_queue *tx ____cacheline_aligned_in_smp; |
ac98d8aa MK |
62 | struct list_head list; |
63 | bool zc; | |
c0c77d8f BT |
64 | /* Protects multiple processes in the control path */ |
65 | struct mutex mutex; | |
a9744f7c MK |
66 | /* Mutual exclusion of NAPI TX thread and sendmsg error paths |
67 | * in the SKB destructor callback. | |
68 | */ | |
69 | spinlock_t tx_completion_lock; | |
c497176c | 70 | u64 rx_dropped; |
c0c77d8f BT |
71 | }; |
72 | ||
c497176c BT |
73 | struct xdp_buff; |
74 | #ifdef CONFIG_XDP_SOCKETS | |
75 | int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp); | |
76 | int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp); | |
77 | void xsk_flush(struct xdp_sock *xs); | |
fbfc504a | 78 | bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs); |
ac98d8aa | 79 | /* Used from netdev driver */ |
173d3adb BT |
80 | u64 *xsk_umem_peek_addr(struct xdp_umem *umem, u64 *addr); |
81 | void xsk_umem_discard_addr(struct xdp_umem *umem); | |
ac98d8aa MK |
82 | void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries); |
83 | bool xsk_umem_consume_tx(struct xdp_umem *umem, dma_addr_t *dma, u32 *len); | |
84 | void xsk_umem_consume_tx_done(struct xdp_umem *umem); | |
f5bd9138 JK |
85 | struct xdp_umem_fq_reuse *xsk_reuseq_prepare(u32 nentries); |
86 | struct xdp_umem_fq_reuse *xsk_reuseq_swap(struct xdp_umem *umem, | |
87 | struct xdp_umem_fq_reuse *newq); | |
88 | void xsk_reuseq_free(struct xdp_umem_fq_reuse *rq); | |
90254034 BT |
89 | |
90 | static inline char *xdp_umem_get_data(struct xdp_umem *umem, u64 addr) | |
91 | { | |
92 | return umem->pages[addr >> PAGE_SHIFT].addr + (addr & (PAGE_SIZE - 1)); | |
93 | } | |
94 | ||
95 | static inline dma_addr_t xdp_umem_get_dma(struct xdp_umem *umem, u64 addr) | |
96 | { | |
97 | return umem->pages[addr >> PAGE_SHIFT].dma + (addr & (PAGE_SIZE - 1)); | |
98 | } | |
f5bd9138 JK |
99 | |
100 | /* Reuse-queue aware version of FILL queue helpers */ | |
101 | static inline u64 *xsk_umem_peek_addr_rq(struct xdp_umem *umem, u64 *addr) | |
102 | { | |
103 | struct xdp_umem_fq_reuse *rq = umem->fq_reuse; | |
104 | ||
105 | if (!rq->length) | |
106 | return xsk_umem_peek_addr(umem, addr); | |
107 | ||
108 | *addr = rq->handles[rq->length - 1]; | |
109 | return addr; | |
110 | } | |
111 | ||
112 | static inline void xsk_umem_discard_addr_rq(struct xdp_umem *umem) | |
113 | { | |
114 | struct xdp_umem_fq_reuse *rq = umem->fq_reuse; | |
115 | ||
116 | if (!rq->length) | |
117 | xsk_umem_discard_addr(umem); | |
118 | else | |
119 | rq->length--; | |
120 | } | |
121 | ||
122 | static inline void xsk_umem_fq_reuse(struct xdp_umem *umem, u64 addr) | |
123 | { | |
124 | struct xdp_umem_fq_reuse *rq = umem->fq_reuse; | |
125 | ||
126 | rq->handles[rq->length++] = addr; | |
127 | } | |
c497176c BT |
128 | #else |
129 | static inline int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp) | |
130 | { | |
131 | return -ENOTSUPP; | |
132 | } | |
133 | ||
134 | static inline int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp) | |
135 | { | |
136 | return -ENOTSUPP; | |
137 | } | |
138 | ||
139 | static inline void xsk_flush(struct xdp_sock *xs) | |
140 | { | |
141 | } | |
fbfc504a BT |
142 | |
143 | static inline bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs) | |
144 | { | |
145 | return false; | |
146 | } | |
90254034 BT |
147 | |
148 | static inline u64 *xsk_umem_peek_addr(struct xdp_umem *umem, u64 *addr) | |
149 | { | |
150 | return NULL; | |
151 | } | |
152 | ||
153 | static inline void xsk_umem_discard_addr(struct xdp_umem *umem) | |
154 | { | |
155 | } | |
156 | ||
157 | static inline void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries) | |
158 | { | |
159 | } | |
160 | ||
161 | static inline bool xsk_umem_consume_tx(struct xdp_umem *umem, dma_addr_t *dma, | |
162 | u32 *len) | |
163 | { | |
164 | return false; | |
165 | } | |
166 | ||
167 | static inline void xsk_umem_consume_tx_done(struct xdp_umem *umem) | |
168 | { | |
169 | } | |
170 | ||
f5bd9138 JK |
171 | static inline struct xdp_umem_fq_reuse *xsk_reuseq_prepare(u32 nentries) |
172 | { | |
173 | return NULL; | |
174 | } | |
175 | ||
176 | static inline struct xdp_umem_fq_reuse *xsk_reuseq_swap( | |
177 | struct xdp_umem *umem, | |
178 | struct xdp_umem_fq_reuse *newq) | |
179 | { | |
180 | return NULL; | |
181 | } | |
182 | static inline void xsk_reuseq_free(struct xdp_umem_fq_reuse *rq) | |
183 | { | |
184 | } | |
185 | ||
90254034 BT |
186 | static inline char *xdp_umem_get_data(struct xdp_umem *umem, u64 addr) |
187 | { | |
188 | return NULL; | |
189 | } | |
190 | ||
191 | static inline dma_addr_t xdp_umem_get_dma(struct xdp_umem *umem, u64 addr) | |
192 | { | |
193 | return 0; | |
194 | } | |
f5bd9138 JK |
195 | |
196 | static inline u64 *xsk_umem_peek_addr_rq(struct xdp_umem *umem, u64 *addr) | |
197 | { | |
198 | return NULL; | |
199 | } | |
200 | ||
201 | static inline void xsk_umem_discard_addr_rq(struct xdp_umem *umem) | |
202 | { | |
203 | } | |
204 | ||
205 | static inline void xsk_umem_fq_reuse(struct xdp_umem *umem, u64 addr) | |
206 | { | |
207 | } | |
208 | ||
c497176c BT |
209 | #endif /* CONFIG_XDP_SOCKETS */ |
210 | ||
c0c77d8f | 211 | #endif /* _LINUX_XDP_SOCK_H */ |