]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - include/net/xdp_sock_drv.h
Merge tag 'devicetree-for-5.11' of git://git.kernel.org/pub/scm/linux/kernel/git...
[mirror_ubuntu-jammy-kernel.git] / include / net / xdp_sock_drv.h
CommitLineData
a71506a4
MK
1/* SPDX-License-Identifier: GPL-2.0 */
2/* Interface for implementing AF_XDP zero-copy support in drivers.
3 * Copyright(c) 2020 Intel Corporation.
4 */
5
6#ifndef _LINUX_XDP_SOCK_DRV_H
7#define _LINUX_XDP_SOCK_DRV_H
8
9#include <net/xdp_sock.h>
2b43470a 10#include <net/xsk_buff_pool.h>
a71506a4
MK
11
12#ifdef CONFIG_XDP_SOCKETS
13
c4655761
MK
14void xsk_tx_completed(struct xsk_buff_pool *pool, u32 nb_entries);
15bool xsk_tx_peek_desc(struct xsk_buff_pool *pool, struct xdp_desc *desc);
9349eb3a 16u32 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool *pool, struct xdp_desc *desc, u32 max);
c4655761
MK
17void xsk_tx_release(struct xsk_buff_pool *pool);
18struct xsk_buff_pool *xsk_get_pool_from_qid(struct net_device *dev,
19 u16 queue_id);
20void xsk_set_rx_need_wakeup(struct xsk_buff_pool *pool);
21void xsk_set_tx_need_wakeup(struct xsk_buff_pool *pool);
22void xsk_clear_rx_need_wakeup(struct xsk_buff_pool *pool);
23void xsk_clear_tx_need_wakeup(struct xsk_buff_pool *pool);
24bool xsk_uses_need_wakeup(struct xsk_buff_pool *pool);
a71506a4 25
c4655761 26static inline u32 xsk_pool_get_headroom(struct xsk_buff_pool *pool)
2b43470a 27{
c4655761 28 return XDP_PACKET_HEADROOM + pool->headroom;
2b43470a
BT
29}
30
c4655761 31static inline u32 xsk_pool_get_chunk_size(struct xsk_buff_pool *pool)
2b43470a 32{
c4655761 33 return pool->chunk_size;
2b43470a
BT
34}
35
c4655761 36static inline u32 xsk_pool_get_rx_frame_size(struct xsk_buff_pool *pool)
2b43470a 37{
c4655761 38 return xsk_pool_get_chunk_size(pool) - xsk_pool_get_headroom(pool);
2b43470a
BT
39}
40
c4655761 41static inline void xsk_pool_set_rxq_info(struct xsk_buff_pool *pool,
2b43470a
BT
42 struct xdp_rxq_info *rxq)
43{
c4655761 44 xp_set_rxq_info(pool, rxq);
2b43470a
BT
45}
46
c4655761 47static inline void xsk_pool_dma_unmap(struct xsk_buff_pool *pool,
2b43470a
BT
48 unsigned long attrs)
49{
c4655761 50 xp_dma_unmap(pool, attrs);
2b43470a
BT
51}
52
c4655761
MK
53static inline int xsk_pool_dma_map(struct xsk_buff_pool *pool,
54 struct device *dev, unsigned long attrs)
2b43470a 55{
c4655761
MK
56 struct xdp_umem *umem = pool->umem;
57
58 return xp_dma_map(pool, dev, attrs, umem->pgs, umem->npgs);
2b43470a
BT
59}
60
61static inline dma_addr_t xsk_buff_xdp_get_dma(struct xdp_buff *xdp)
62{
63 struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
64
65 return xp_get_dma(xskb);
66}
67
68static inline dma_addr_t xsk_buff_xdp_get_frame_dma(struct xdp_buff *xdp)
69{
70 struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
71
72 return xp_get_frame_dma(xskb);
73}
74
c4655761 75static inline struct xdp_buff *xsk_buff_alloc(struct xsk_buff_pool *pool)
2b43470a 76{
c4655761 77 return xp_alloc(pool);
2b43470a
BT
78}
79
c4655761 80static inline bool xsk_buff_can_alloc(struct xsk_buff_pool *pool, u32 count)
2b43470a 81{
c4655761 82 return xp_can_alloc(pool, count);
2b43470a
BT
83}
84
85static inline void xsk_buff_free(struct xdp_buff *xdp)
86{
87 struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
88
89 xp_free(xskb);
90}
91
c4655761
MK
92static inline dma_addr_t xsk_buff_raw_get_dma(struct xsk_buff_pool *pool,
93 u64 addr)
2b43470a 94{
c4655761 95 return xp_raw_get_dma(pool, addr);
2b43470a
BT
96}
97
c4655761 98static inline void *xsk_buff_raw_get_data(struct xsk_buff_pool *pool, u64 addr)
2b43470a 99{
c4655761 100 return xp_raw_get_data(pool, addr);
2b43470a
BT
101}
102
9647c57b 103static inline void xsk_buff_dma_sync_for_cpu(struct xdp_buff *xdp, struct xsk_buff_pool *pool)
2b43470a
BT
104{
105 struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
106
9647c57b
MK
107 if (!pool->dma_need_sync)
108 return;
109
2b43470a
BT
110 xp_dma_sync_for_cpu(xskb);
111}
112
c4655761 113static inline void xsk_buff_raw_dma_sync_for_device(struct xsk_buff_pool *pool,
2b43470a
BT
114 dma_addr_t dma,
115 size_t size)
116{
c4655761 117 xp_dma_sync_for_device(pool, dma, size);
2b43470a
BT
118}
119
a71506a4
MK
120#else
121
c4655761 122static inline void xsk_tx_completed(struct xsk_buff_pool *pool, u32 nb_entries)
a71506a4
MK
123{
124}
125
c4655761
MK
126static inline bool xsk_tx_peek_desc(struct xsk_buff_pool *pool,
127 struct xdp_desc *desc)
a71506a4
MK
128{
129 return false;
130}
131
9349eb3a
MK
132static inline u32 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool *pool, struct xdp_desc *desc,
133 u32 max)
134{
135 return 0;
136}
137
c4655761 138static inline void xsk_tx_release(struct xsk_buff_pool *pool)
a71506a4
MK
139{
140}
141
1742b3d5 142static inline struct xsk_buff_pool *
c4655761 143xsk_get_pool_from_qid(struct net_device *dev, u16 queue_id)
a71506a4
MK
144{
145 return NULL;
146}
147
c4655761 148static inline void xsk_set_rx_need_wakeup(struct xsk_buff_pool *pool)
a71506a4
MK
149{
150}
151
c4655761 152static inline void xsk_set_tx_need_wakeup(struct xsk_buff_pool *pool)
a71506a4
MK
153{
154}
155
c4655761 156static inline void xsk_clear_rx_need_wakeup(struct xsk_buff_pool *pool)
a71506a4
MK
157{
158}
159
c4655761 160static inline void xsk_clear_tx_need_wakeup(struct xsk_buff_pool *pool)
a71506a4
MK
161{
162}
163
c4655761 164static inline bool xsk_uses_need_wakeup(struct xsk_buff_pool *pool)
a71506a4
MK
165{
166 return false;
167}
168
c4655761 169static inline u32 xsk_pool_get_headroom(struct xsk_buff_pool *pool)
2b43470a
BT
170{
171 return 0;
172}
173
c4655761 174static inline u32 xsk_pool_get_chunk_size(struct xsk_buff_pool *pool)
2b43470a
BT
175{
176 return 0;
177}
178
c4655761 179static inline u32 xsk_pool_get_rx_frame_size(struct xsk_buff_pool *pool)
2b43470a
BT
180{
181 return 0;
182}
183
c4655761 184static inline void xsk_pool_set_rxq_info(struct xsk_buff_pool *pool,
2b43470a
BT
185 struct xdp_rxq_info *rxq)
186{
187}
188
c4655761 189static inline void xsk_pool_dma_unmap(struct xsk_buff_pool *pool,
2b43470a
BT
190 unsigned long attrs)
191{
192}
193
c4655761
MK
194static inline int xsk_pool_dma_map(struct xsk_buff_pool *pool,
195 struct device *dev, unsigned long attrs)
2b43470a
BT
196{
197 return 0;
198}
199
200static inline dma_addr_t xsk_buff_xdp_get_dma(struct xdp_buff *xdp)
201{
202 return 0;
203}
204
205static inline dma_addr_t xsk_buff_xdp_get_frame_dma(struct xdp_buff *xdp)
206{
207 return 0;
208}
209
c4655761 210static inline struct xdp_buff *xsk_buff_alloc(struct xsk_buff_pool *pool)
2b43470a
BT
211{
212 return NULL;
213}
214
c4655761 215static inline bool xsk_buff_can_alloc(struct xsk_buff_pool *pool, u32 count)
2b43470a
BT
216{
217 return false;
218}
219
220static inline void xsk_buff_free(struct xdp_buff *xdp)
221{
222}
223
c4655761
MK
224static inline dma_addr_t xsk_buff_raw_get_dma(struct xsk_buff_pool *pool,
225 u64 addr)
2b43470a
BT
226{
227 return 0;
228}
229
c4655761 230static inline void *xsk_buff_raw_get_data(struct xsk_buff_pool *pool, u64 addr)
2b43470a
BT
231{
232 return NULL;
233}
234
9647c57b 235static inline void xsk_buff_dma_sync_for_cpu(struct xdp_buff *xdp, struct xsk_buff_pool *pool)
2b43470a
BT
236{
237}
238
c4655761 239static inline void xsk_buff_raw_dma_sync_for_device(struct xsk_buff_pool *pool,
2b43470a
BT
240 dma_addr_t dma,
241 size_t size)
242{
243}
244
a71506a4
MK
245#endif /* CONFIG_XDP_SOCKETS */
246
247#endif /* _LINUX_XDP_SOCK_DRV_H */