]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - include/trace/events/xdp.h
xdp: Use bulking for non-map XDP_REDIRECT and consolidate code paths
[mirror_ubuntu-jammy-kernel.git] / include / trace / events / xdp.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
a67edbf4
DB
2#undef TRACE_SYSTEM
3#define TRACE_SYSTEM xdp
4
5#if !defined(_TRACE_XDP_H) || defined(TRACE_HEADER_MULTI_READ)
6#define _TRACE_XDP_H
7
8#include <linux/netdevice.h>
9#include <linux/filter.h>
10#include <linux/tracepoint.h>
23721a75 11#include <linux/bpf.h>
a67edbf4
DB
12
13#define __XDP_ACT_MAP(FN) \
14 FN(ABORTED) \
15 FN(DROP) \
16 FN(PASS) \
5acaee0a
JF
17 FN(TX) \
18 FN(REDIRECT)
a67edbf4
DB
19
20#define __XDP_ACT_TP_FN(x) \
21 TRACE_DEFINE_ENUM(XDP_##x);
22#define __XDP_ACT_SYM_FN(x) \
23 { XDP_##x, #x },
24#define __XDP_ACT_SYM_TAB \
5e5b03d1 25 __XDP_ACT_MAP(__XDP_ACT_SYM_FN) { -1, NULL }
a67edbf4
DB
26__XDP_ACT_MAP(__XDP_ACT_TP_FN)
27
28TRACE_EVENT(xdp_exception,
29
30 TP_PROTO(const struct net_device *dev,
31 const struct bpf_prog *xdp, u32 act),
32
33 TP_ARGS(dev, xdp, act),
34
35 TP_STRUCT__entry(
b06337df 36 __field(int, prog_id)
a67edbf4 37 __field(u32, act)
315ec399 38 __field(int, ifindex)
a67edbf4
DB
39 ),
40
41 TP_fast_assign(
b06337df 42 __entry->prog_id = xdp->aux->id;
315ec399
JDB
43 __entry->act = act;
44 __entry->ifindex = dev->ifindex;
a67edbf4
DB
45 ),
46
b06337df
JDB
47 TP_printk("prog_id=%d action=%s ifindex=%d",
48 __entry->prog_id,
315ec399
JDB
49 __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB),
50 __entry->ifindex)
a67edbf4
DB
51);
52
e7d47989
TM
53TRACE_EVENT(xdp_bulk_tx,
54
55 TP_PROTO(const struct net_device *dev,
56 int sent, int drops, int err),
57
58 TP_ARGS(dev, sent, drops, err),
59
60 TP_STRUCT__entry(
61 __field(int, ifindex)
62 __field(u32, act)
63 __field(int, drops)
64 __field(int, sent)
65 __field(int, err)
66 ),
67
68 TP_fast_assign(
69 __entry->ifindex = dev->ifindex;
70 __entry->act = XDP_TX;
71 __entry->drops = drops;
72 __entry->sent = sent;
73 __entry->err = err;
74 ),
75
76 TP_printk("ifindex=%d action=%s sent=%d drops=%d err=%d",
77 __entry->ifindex,
78 __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB),
79 __entry->sent, __entry->drops, __entry->err)
80);
81
1d233886
THJ
82#ifndef __DEVMAP_OBJ_TYPE
83#define __DEVMAP_OBJ_TYPE
84struct _bpf_dtab_netdev {
85 struct net_device *dev;
86};
87#endif /* __DEVMAP_OBJ_TYPE */
88
89#define devmap_ifindex(tgt, map) \
90 (((map->map_type == BPF_MAP_TYPE_DEVMAP || \
91 map->map_type == BPF_MAP_TYPE_DEVMAP_HASH)) ? \
92 ((struct _bpf_dtab_netdev *)tgt)->dev->ifindex : 0)
93
8d3b778f 94DECLARE_EVENT_CLASS(xdp_redirect_template,
5acaee0a 95
a8735855 96 TP_PROTO(const struct net_device *dev,
c31e5a48 97 const struct bpf_prog *xdp,
1d233886
THJ
98 const void *tgt, int err,
99 const struct bpf_map *map, u32 index),
5acaee0a 100
1d233886 101 TP_ARGS(dev, xdp, tgt, err, map, index),
5acaee0a
JF
102
103 TP_STRUCT__entry(
b06337df 104 __field(int, prog_id)
5acaee0a 105 __field(u32, act)
a8735855 106 __field(int, ifindex)
4c03bdd7 107 __field(int, err)
8d3b778f
JDB
108 __field(int, to_ifindex)
109 __field(u32, map_id)
110 __field(int, map_index)
5acaee0a
JF
111 ),
112
113 TP_fast_assign(
b06337df 114 __entry->prog_id = xdp->aux->id;
c31e5a48 115 __entry->act = XDP_REDIRECT;
a8735855 116 __entry->ifindex = dev->ifindex;
a8735855 117 __entry->err = err;
1d233886
THJ
118 __entry->to_ifindex = map ? devmap_ifindex(tgt, map) :
119 index;
8d3b778f 120 __entry->map_id = map ? map->id : 0;
1d233886 121 __entry->map_index = map ? index : 0;
5acaee0a
JF
122 ),
123
1d233886
THJ
124 TP_printk("prog_id=%d action=%s ifindex=%d to_ifindex=%d err=%d"
125 " map_id=%d map_index=%d",
b06337df 126 __entry->prog_id,
4c03bdd7 127 __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB),
8d3b778f 128 __entry->ifindex, __entry->to_ifindex,
1d233886 129 __entry->err, __entry->map_id, __entry->map_index)
5acaee0a 130);
8d3b778f
JDB
131
132DEFINE_EVENT(xdp_redirect_template, xdp_redirect,
133 TP_PROTO(const struct net_device *dev,
134 const struct bpf_prog *xdp,
1d233886
THJ
135 const void *tgt, int err,
136 const struct bpf_map *map, u32 index),
137 TP_ARGS(dev, xdp, tgt, err, map, index)
8d3b778f
JDB
138);
139
f5836ca5
JDB
140DEFINE_EVENT(xdp_redirect_template, xdp_redirect_err,
141 TP_PROTO(const struct net_device *dev,
142 const struct bpf_prog *xdp,
1d233886
THJ
143 const void *tgt, int err,
144 const struct bpf_map *map, u32 index),
145 TP_ARGS(dev, xdp, tgt, err, map, index)
f5836ca5
JDB
146);
147
148#define _trace_xdp_redirect(dev, xdp, to) \
1d233886 149 trace_xdp_redirect(dev, xdp, NULL, 0, NULL, to);
f5836ca5
JDB
150
151#define _trace_xdp_redirect_err(dev, xdp, to, err) \
1d233886
THJ
152 trace_xdp_redirect_err(dev, xdp, NULL, err, NULL, to);
153
154#define _trace_xdp_redirect_map(dev, xdp, to, map, index) \
155 trace_xdp_redirect(dev, xdp, to, 0, map, index);
156
157#define _trace_xdp_redirect_map_err(dev, xdp, to, map, index, err) \
158 trace_xdp_redirect_err(dev, xdp, to, err, map, index);
f5836ca5 159
1d233886
THJ
160/* not used anymore, but kept around so as not to break old programs */
161DEFINE_EVENT(xdp_redirect_template, xdp_redirect_map,
59a30896
JDB
162 TP_PROTO(const struct net_device *dev,
163 const struct bpf_prog *xdp,
1d233886
THJ
164 const void *tgt, int err,
165 const struct bpf_map *map, u32 index),
166 TP_ARGS(dev, xdp, tgt, err, map, index)
59a30896
JDB
167);
168
1d233886 169DEFINE_EVENT(xdp_redirect_template, xdp_redirect_map_err,
59a30896
JDB
170 TP_PROTO(const struct net_device *dev,
171 const struct bpf_prog *xdp,
1d233886
THJ
172 const void *tgt, int err,
173 const struct bpf_map *map, u32 index),
174 TP_ARGS(dev, xdp, tgt, err, map, index)
59a30896
JDB
175);
176
f9419f7b
JDB
177TRACE_EVENT(xdp_cpumap_kthread,
178
179 TP_PROTO(int map_id, unsigned int processed, unsigned int drops,
180 int sched),
181
182 TP_ARGS(map_id, processed, drops, sched),
183
184 TP_STRUCT__entry(
185 __field(int, map_id)
186 __field(u32, act)
187 __field(int, cpu)
188 __field(unsigned int, drops)
189 __field(unsigned int, processed)
190 __field(int, sched)
191 ),
192
193 TP_fast_assign(
194 __entry->map_id = map_id;
195 __entry->act = XDP_REDIRECT;
196 __entry->cpu = smp_processor_id();
197 __entry->drops = drops;
198 __entry->processed = processed;
199 __entry->sched = sched;
200 ),
201
202 TP_printk("kthread"
203 " cpu=%d map_id=%d action=%s"
204 " processed=%u drops=%u"
205 " sched=%d",
206 __entry->cpu, __entry->map_id,
207 __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB),
208 __entry->processed, __entry->drops,
209 __entry->sched)
210);
211
212TRACE_EVENT(xdp_cpumap_enqueue,
213
214 TP_PROTO(int map_id, unsigned int processed, unsigned int drops,
215 int to_cpu),
216
217 TP_ARGS(map_id, processed, drops, to_cpu),
218
219 TP_STRUCT__entry(
220 __field(int, map_id)
221 __field(u32, act)
222 __field(int, cpu)
223 __field(unsigned int, drops)
224 __field(unsigned int, processed)
225 __field(int, to_cpu)
226 ),
227
228 TP_fast_assign(
229 __entry->map_id = map_id;
230 __entry->act = XDP_REDIRECT;
231 __entry->cpu = smp_processor_id();
232 __entry->drops = drops;
233 __entry->processed = processed;
234 __entry->to_cpu = to_cpu;
235 ),
236
237 TP_printk("enqueue"
238 " cpu=%d map_id=%d action=%s"
239 " processed=%u drops=%u"
240 " to_cpu=%d",
241 __entry->cpu, __entry->map_id,
242 __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB),
243 __entry->processed, __entry->drops,
244 __entry->to_cpu)
245);
246
38edddb8
JDB
247TRACE_EVENT(xdp_devmap_xmit,
248
249 TP_PROTO(const struct bpf_map *map, u32 map_index,
250 int sent, int drops,
251 const struct net_device *from_dev,
e74de52e 252 const struct net_device *to_dev, int err),
38edddb8 253
e74de52e 254 TP_ARGS(map, map_index, sent, drops, from_dev, to_dev, err),
38edddb8
JDB
255
256 TP_STRUCT__entry(
257 __field(int, map_id)
258 __field(u32, act)
259 __field(u32, map_index)
260 __field(int, drops)
261 __field(int, sent)
262 __field(int, from_ifindex)
263 __field(int, to_ifindex)
e74de52e 264 __field(int, err)
38edddb8
JDB
265 ),
266
267 TP_fast_assign(
75ccae62 268 __entry->map_id = map ? map->id : 0;
38edddb8
JDB
269 __entry->act = XDP_REDIRECT;
270 __entry->map_index = map_index;
271 __entry->drops = drops;
272 __entry->sent = sent;
273 __entry->from_ifindex = from_dev->ifindex;
274 __entry->to_ifindex = to_dev->ifindex;
e74de52e 275 __entry->err = err;
38edddb8
JDB
276 ),
277
278 TP_printk("ndo_xdp_xmit"
279 " map_id=%d map_index=%d action=%s"
280 " sent=%d drops=%d"
e74de52e 281 " from_ifindex=%d to_ifindex=%d err=%d",
38edddb8
JDB
282 __entry->map_id, __entry->map_index,
283 __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB),
284 __entry->sent, __entry->drops,
e74de52e 285 __entry->from_ifindex, __entry->to_ifindex, __entry->err)
38edddb8
JDB
286);
287
f033b688
JDB
288/* Expect users already include <net/xdp.h>, but not xdp_priv.h */
289#include <net/xdp_priv.h>
290
291#define __MEM_TYPE_MAP(FN) \
292 FN(PAGE_SHARED) \
293 FN(PAGE_ORDER0) \
294 FN(PAGE_POOL) \
295 FN(ZERO_COPY)
296
297#define __MEM_TYPE_TP_FN(x) \
298 TRACE_DEFINE_ENUM(MEM_TYPE_##x);
299#define __MEM_TYPE_SYM_FN(x) \
300 { MEM_TYPE_##x, #x },
301#define __MEM_TYPE_SYM_TAB \
302 __MEM_TYPE_MAP(__MEM_TYPE_SYM_FN) { -1, 0 }
303__MEM_TYPE_MAP(__MEM_TYPE_TP_FN)
304
305TRACE_EVENT(mem_disconnect,
306
c3f812ce 307 TP_PROTO(const struct xdp_mem_allocator *xa),
f033b688 308
c3f812ce 309 TP_ARGS(xa),
f033b688
JDB
310
311 TP_STRUCT__entry(
312 __field(const struct xdp_mem_allocator *, xa)
313 __field(u32, mem_id)
314 __field(u32, mem_type)
315 __field(const void *, allocator)
f033b688
JDB
316 ),
317
318 TP_fast_assign(
319 __entry->xa = xa;
320 __entry->mem_id = xa->mem.id;
321 __entry->mem_type = xa->mem.type;
322 __entry->allocator = xa->allocator;
f033b688
JDB
323 ),
324
c3f812ce 325 TP_printk("mem_id=%d mem_type=%s allocator=%p",
f033b688
JDB
326 __entry->mem_id,
327 __print_symbolic(__entry->mem_type, __MEM_TYPE_SYM_TAB),
c3f812ce 328 __entry->allocator
f033b688
JDB
329 )
330);
331
332TRACE_EVENT(mem_connect,
333
334 TP_PROTO(const struct xdp_mem_allocator *xa,
335 const struct xdp_rxq_info *rxq),
336
337 TP_ARGS(xa, rxq),
338
339 TP_STRUCT__entry(
340 __field(const struct xdp_mem_allocator *, xa)
341 __field(u32, mem_id)
342 __field(u32, mem_type)
343 __field(const void *, allocator)
344 __field(const struct xdp_rxq_info *, rxq)
345 __field(int, ifindex)
346 ),
347
348 TP_fast_assign(
349 __entry->xa = xa;
350 __entry->mem_id = xa->mem.id;
351 __entry->mem_type = xa->mem.type;
352 __entry->allocator = xa->allocator;
353 __entry->rxq = rxq;
354 __entry->ifindex = rxq->dev->ifindex;
355 ),
356
357 TP_printk("mem_id=%d mem_type=%s allocator=%p"
358 " ifindex=%d",
359 __entry->mem_id,
360 __print_symbolic(__entry->mem_type, __MEM_TYPE_SYM_TAB),
361 __entry->allocator,
362 __entry->ifindex
363 )
364);
365
366TRACE_EVENT(mem_return_failed,
367
368 TP_PROTO(const struct xdp_mem_info *mem,
369 const struct page *page),
370
371 TP_ARGS(mem, page),
372
373 TP_STRUCT__entry(
374 __field(const struct page *, page)
375 __field(u32, mem_id)
376 __field(u32, mem_type)
377 ),
378
379 TP_fast_assign(
380 __entry->page = page;
381 __entry->mem_id = mem->id;
382 __entry->mem_type = mem->type;
383 ),
384
385 TP_printk("mem_id=%d mem_type=%s page=%p",
386 __entry->mem_id,
387 __print_symbolic(__entry->mem_type, __MEM_TYPE_SYM_TAB),
388 __entry->page
389 )
390);
391
a67edbf4
DB
392#endif /* _TRACE_XDP_H */
393
394#include <trace/define_trace.h>