Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0 */
2#undef TRACE_SYSTEM
3#define TRACE_SYSTEM xdp
4
5#if !defined(_TRACE_XDP_H) || defined(TRACE_HEADER_MULTI_READ)
6#define _TRACE_XDP_H
7
8#include <linux/netdevice.h>
9#include <linux/filter.h>
10#include <linux/tracepoint.h>
11#include <linux/bpf.h>
12
13#define __XDP_ACT_MAP(FN) \
14 FN(ABORTED) \
15 FN(DROP) \
16 FN(PASS) \
17 FN(TX) \
18 FN(REDIRECT)
19
20#define __XDP_ACT_TP_FN(x) \
21 TRACE_DEFINE_ENUM(XDP_##x);
22#define __XDP_ACT_SYM_FN(x) \
23 { XDP_##x, #x },
24#define __XDP_ACT_SYM_TAB \
25 __XDP_ACT_MAP(__XDP_ACT_SYM_FN) { -1, NULL }
26__XDP_ACT_MAP(__XDP_ACT_TP_FN)
27
28TRACE_EVENT(xdp_exception,
29
30 TP_PROTO(const struct net_device *dev,
31 const struct bpf_prog *xdp, u32 act),
32
33 TP_ARGS(dev, xdp, act),
34
35 TP_STRUCT__entry(
36 __field(int, prog_id)
37 __field(u32, act)
38 __field(int, ifindex)
39 ),
40
41 TP_fast_assign(
42 __entry->prog_id = xdp->aux->id;
43 __entry->act = act;
44 __entry->ifindex = dev->ifindex;
45 ),
46
47 TP_printk("prog_id=%d action=%s ifindex=%d",
48 __entry->prog_id,
49 __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB),
50 __entry->ifindex)
51);
52
53TRACE_EVENT(xdp_bulk_tx,
54
55 TP_PROTO(const struct net_device *dev,
56 int sent, int drops, int err),
57
58 TP_ARGS(dev, sent, drops, err),
59
60 TP_STRUCT__entry(
61 __field(int, ifindex)
62 __field(u32, act)
63 __field(int, drops)
64 __field(int, sent)
65 __field(int, err)
66 ),
67
68 TP_fast_assign(
69 __entry->ifindex = dev->ifindex;
70 __entry->act = XDP_TX;
71 __entry->drops = drops;
72 __entry->sent = sent;
73 __entry->err = err;
74 ),
75
76 TP_printk("ifindex=%d action=%s sent=%d drops=%d err=%d",
77 __entry->ifindex,
78 __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB),
79 __entry->sent, __entry->drops, __entry->err)
80);
81
82#ifndef __DEVMAP_OBJ_TYPE
83#define __DEVMAP_OBJ_TYPE
84struct _bpf_dtab_netdev {
85 struct net_device *dev;
86};
87#endif /* __DEVMAP_OBJ_TYPE */
88
89#define devmap_ifindex(tgt, map) \
90 (((map->map_type == BPF_MAP_TYPE_DEVMAP || \
91 map->map_type == BPF_MAP_TYPE_DEVMAP_HASH)) ? \
92 ((struct _bpf_dtab_netdev *)tgt)->dev->ifindex : 0)
93
94DECLARE_EVENT_CLASS(xdp_redirect_template,
95
96 TP_PROTO(const struct net_device *dev,
97 const struct bpf_prog *xdp,
98 const void *tgt, int err,
99 const struct bpf_map *map, u32 index),
100
101 TP_ARGS(dev, xdp, tgt, err, map, index),
102
103 TP_STRUCT__entry(
104 __field(int, prog_id)
105 __field(u32, act)
106 __field(int, ifindex)
107 __field(int, err)
108 __field(int, to_ifindex)
109 __field(u32, map_id)
110 __field(int, map_index)
111 ),
112
113 TP_fast_assign(
114 __entry->prog_id = xdp->aux->id;
115 __entry->act = XDP_REDIRECT;
116 __entry->ifindex = dev->ifindex;
117 __entry->err = err;
118 __entry->to_ifindex = map ? devmap_ifindex(tgt, map) :
119 index;
120 __entry->map_id = map ? map->id : 0;
121 __entry->map_index = map ? index : 0;
122 ),
123
124 TP_printk("prog_id=%d action=%s ifindex=%d to_ifindex=%d err=%d"
125 " map_id=%d map_index=%d",
126 __entry->prog_id,
127 __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB),
128 __entry->ifindex, __entry->to_ifindex,
129 __entry->err, __entry->map_id, __entry->map_index)
130);
131
132DEFINE_EVENT(xdp_redirect_template, xdp_redirect,
133 TP_PROTO(const struct net_device *dev,
134 const struct bpf_prog *xdp,
135 const void *tgt, int err,
136 const struct bpf_map *map, u32 index),
137 TP_ARGS(dev, xdp, tgt, err, map, index)
138);
139
140DEFINE_EVENT(xdp_redirect_template, xdp_redirect_err,
141 TP_PROTO(const struct net_device *dev,
142 const struct bpf_prog *xdp,
143 const void *tgt, int err,
144 const struct bpf_map *map, u32 index),
145 TP_ARGS(dev, xdp, tgt, err, map, index)
146);
147
148#define _trace_xdp_redirect(dev, xdp, to) \
149 trace_xdp_redirect(dev, xdp, NULL, 0, NULL, to);
150
151#define _trace_xdp_redirect_err(dev, xdp, to, err) \
152 trace_xdp_redirect_err(dev, xdp, NULL, err, NULL, to);
153
154#define _trace_xdp_redirect_map(dev, xdp, to, map, index) \
155 trace_xdp_redirect(dev, xdp, to, 0, map, index);
156
157#define _trace_xdp_redirect_map_err(dev, xdp, to, map, index, err) \
158 trace_xdp_redirect_err(dev, xdp, to, err, map, index);
159
160/* not used anymore, but kept around so as not to break old programs */
161DEFINE_EVENT(xdp_redirect_template, xdp_redirect_map,
162 TP_PROTO(const struct net_device *dev,
163 const struct bpf_prog *xdp,
164 const void *tgt, int err,
165 const struct bpf_map *map, u32 index),
166 TP_ARGS(dev, xdp, tgt, err, map, index)
167);
168
169DEFINE_EVENT(xdp_redirect_template, xdp_redirect_map_err,
170 TP_PROTO(const struct net_device *dev,
171 const struct bpf_prog *xdp,
172 const void *tgt, int err,
173 const struct bpf_map *map, u32 index),
174 TP_ARGS(dev, xdp, tgt, err, map, index)
175);
176
177TRACE_EVENT(xdp_cpumap_kthread,
178
179 TP_PROTO(int map_id, unsigned int processed, unsigned int drops,
180 int sched),
181
182 TP_ARGS(map_id, processed, drops, sched),
183
184 TP_STRUCT__entry(
185 __field(int, map_id)
186 __field(u32, act)
187 __field(int, cpu)
188 __field(unsigned int, drops)
189 __field(unsigned int, processed)
190 __field(int, sched)
191 ),
192
193 TP_fast_assign(
194 __entry->map_id = map_id;
195 __entry->act = XDP_REDIRECT;
196 __entry->cpu = smp_processor_id();
197 __entry->drops = drops;
198 __entry->processed = processed;
199 __entry->sched = sched;
200 ),
201
202 TP_printk("kthread"
203 " cpu=%d map_id=%d action=%s"
204 " processed=%u drops=%u"
205 " sched=%d",
206 __entry->cpu, __entry->map_id,
207 __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB),
208 __entry->processed, __entry->drops,
209 __entry->sched)
210);
211
212TRACE_EVENT(xdp_cpumap_enqueue,
213
214 TP_PROTO(int map_id, unsigned int processed, unsigned int drops,
215 int to_cpu),
216
217 TP_ARGS(map_id, processed, drops, to_cpu),
218
219 TP_STRUCT__entry(
220 __field(int, map_id)
221 __field(u32, act)
222 __field(int, cpu)
223 __field(unsigned int, drops)
224 __field(unsigned int, processed)
225 __field(int, to_cpu)
226 ),
227
228 TP_fast_assign(
229 __entry->map_id = map_id;
230 __entry->act = XDP_REDIRECT;
231 __entry->cpu = smp_processor_id();
232 __entry->drops = drops;
233 __entry->processed = processed;
234 __entry->to_cpu = to_cpu;
235 ),
236
237 TP_printk("enqueue"
238 " cpu=%d map_id=%d action=%s"
239 " processed=%u drops=%u"
240 " to_cpu=%d",
241 __entry->cpu, __entry->map_id,
242 __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB),
243 __entry->processed, __entry->drops,
244 __entry->to_cpu)
245);
246
247TRACE_EVENT(xdp_devmap_xmit,
248
249 TP_PROTO(const struct net_device *from_dev,
250 const struct net_device *to_dev,
251 int sent, int drops, int err),
252
253 TP_ARGS(from_dev, to_dev, sent, drops, err),
254
255 TP_STRUCT__entry(
256 __field(int, from_ifindex)
257 __field(u32, act)
258 __field(int, to_ifindex)
259 __field(int, drops)
260 __field(int, sent)
261 __field(int, err)
262 ),
263
264 TP_fast_assign(
265 __entry->from_ifindex = from_dev->ifindex;
266 __entry->act = XDP_REDIRECT;
267 __entry->to_ifindex = to_dev->ifindex;
268 __entry->drops = drops;
269 __entry->sent = sent;
270 __entry->err = err;
271 ),
272
273 TP_printk("ndo_xdp_xmit"
274 " from_ifindex=%d to_ifindex=%d action=%s"
275 " sent=%d drops=%d"
276 " err=%d",
277 __entry->from_ifindex, __entry->to_ifindex,
278 __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB),
279 __entry->sent, __entry->drops,
280 __entry->err)
281);
282
283/* Expect users already include <net/xdp.h>, but not xdp_priv.h */
284#include <net/xdp_priv.h>
285
286#define __MEM_TYPE_MAP(FN) \
287 FN(PAGE_SHARED) \
288 FN(PAGE_ORDER0) \
289 FN(PAGE_POOL) \
290 FN(ZERO_COPY)
291
292#define __MEM_TYPE_TP_FN(x) \
293 TRACE_DEFINE_ENUM(MEM_TYPE_##x);
294#define __MEM_TYPE_SYM_FN(x) \
295 { MEM_TYPE_##x, #x },
296#define __MEM_TYPE_SYM_TAB \
297 __MEM_TYPE_MAP(__MEM_TYPE_SYM_FN) { -1, 0 }
298__MEM_TYPE_MAP(__MEM_TYPE_TP_FN)
299
300TRACE_EVENT(mem_disconnect,
301
302 TP_PROTO(const struct xdp_mem_allocator *xa),
303
304 TP_ARGS(xa),
305
306 TP_STRUCT__entry(
307 __field(const struct xdp_mem_allocator *, xa)
308 __field(u32, mem_id)
309 __field(u32, mem_type)
310 __field(const void *, allocator)
311 ),
312
313 TP_fast_assign(
314 __entry->xa = xa;
315 __entry->mem_id = xa->mem.id;
316 __entry->mem_type = xa->mem.type;
317 __entry->allocator = xa->allocator;
318 ),
319
320 TP_printk("mem_id=%d mem_type=%s allocator=%p",
321 __entry->mem_id,
322 __print_symbolic(__entry->mem_type, __MEM_TYPE_SYM_TAB),
323 __entry->allocator
324 )
325);
326
327TRACE_EVENT(mem_connect,
328
329 TP_PROTO(const struct xdp_mem_allocator *xa,
330 const struct xdp_rxq_info *rxq),
331
332 TP_ARGS(xa, rxq),
333
334 TP_STRUCT__entry(
335 __field(const struct xdp_mem_allocator *, xa)
336 __field(u32, mem_id)
337 __field(u32, mem_type)
338 __field(const void *, allocator)
339 __field(const struct xdp_rxq_info *, rxq)
340 __field(int, ifindex)
341 ),
342
343 TP_fast_assign(
344 __entry->xa = xa;
345 __entry->mem_id = xa->mem.id;
346 __entry->mem_type = xa->mem.type;
347 __entry->allocator = xa->allocator;
348 __entry->rxq = rxq;
349 __entry->ifindex = rxq->dev->ifindex;
350 ),
351
352 TP_printk("mem_id=%d mem_type=%s allocator=%p"
353 " ifindex=%d",
354 __entry->mem_id,
355 __print_symbolic(__entry->mem_type, __MEM_TYPE_SYM_TAB),
356 __entry->allocator,
357 __entry->ifindex
358 )
359);
360
361TRACE_EVENT(mem_return_failed,
362
363 TP_PROTO(const struct xdp_mem_info *mem,
364 const struct page *page),
365
366 TP_ARGS(mem, page),
367
368 TP_STRUCT__entry(
369 __field(const struct page *, page)
370 __field(u32, mem_id)
371 __field(u32, mem_type)
372 ),
373
374 TP_fast_assign(
375 __entry->page = page;
376 __entry->mem_id = mem->id;
377 __entry->mem_type = mem->type;
378 ),
379
380 TP_printk("mem_id=%d mem_type=%s page=%p",
381 __entry->mem_id,
382 __print_symbolic(__entry->mem_type, __MEM_TYPE_SYM_TAB),
383 __entry->page
384 )
385);
386
387#endif /* _TRACE_XDP_H */
388
389#include <trace/define_trace.h>