Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0 */
2#undef TRACE_SYSTEM
3#define TRACE_SYSTEM io_uring
4
5#if !defined(_TRACE_IO_URING_H) || defined(TRACE_HEADER_MULTI_READ)
6#define _TRACE_IO_URING_H
7
8#include <linux/tracepoint.h>
9#include <uapi/linux/io_uring.h>
10
11struct io_wq_work;
12
13/**
14 * io_uring_create - called after a new io_uring context was prepared
15 *
16 * @fd: corresponding file descriptor
17 * @ctx: pointer to a ring context structure
18 * @sq_entries: actual SQ size
19 * @cq_entries: actual CQ size
20 * @flags: SQ ring flags, provided to io_uring_setup(2)
21 *
22 * Allows to trace io_uring creation and provide pointer to a context, that can
23 * be used later to find correlated events.
24 */
25TRACE_EVENT(io_uring_create,
26
27 TP_PROTO(int fd, void *ctx, u32 sq_entries, u32 cq_entries, u32 flags),
28
29 TP_ARGS(fd, ctx, sq_entries, cq_entries, flags),
30
31 TP_STRUCT__entry (
32 __field( int, fd )
33 __field( void *, ctx )
34 __field( u32, sq_entries )
35 __field( u32, cq_entries )
36 __field( u32, flags )
37 ),
38
39 TP_fast_assign(
40 __entry->fd = fd;
41 __entry->ctx = ctx;
42 __entry->sq_entries = sq_entries;
43 __entry->cq_entries = cq_entries;
44 __entry->flags = flags;
45 ),
46
47 TP_printk("ring %p, fd %d sq size %d, cq size %d, flags 0x%x",
48 __entry->ctx, __entry->fd, __entry->sq_entries,
49 __entry->cq_entries, __entry->flags)
50);
51
52/**
53 * io_uring_register - called after a buffer/file/eventfd was successfully
54 * registered for a ring
55 *
56 * @ctx: pointer to a ring context structure
57 * @opcode: describes which operation to perform
58 * @nr_user_files: number of registered files
59 * @nr_user_bufs: number of registered buffers
60 * @ret: return code
61 *
62 * Allows to trace fixed files/buffers, that could be registered to
63 * avoid an overhead of getting references to them for every operation. This
64 * event, together with io_uring_file_get, can provide a full picture of how
65 * much overhead one can reduce via fixing.
66 */
67TRACE_EVENT(io_uring_register,
68
69 TP_PROTO(void *ctx, unsigned opcode, unsigned nr_files,
70 unsigned nr_bufs, long ret),
71
72 TP_ARGS(ctx, opcode, nr_files, nr_bufs, ret),
73
74 TP_STRUCT__entry (
75 __field( void *, ctx )
76 __field( unsigned, opcode )
77 __field( unsigned, nr_files)
78 __field( unsigned, nr_bufs )
79 __field( long, ret )
80 ),
81
82 TP_fast_assign(
83 __entry->ctx = ctx;
84 __entry->opcode = opcode;
85 __entry->nr_files = nr_files;
86 __entry->nr_bufs = nr_bufs;
87 __entry->ret = ret;
88 ),
89
90 TP_printk("ring %p, opcode %d, nr_user_files %d, nr_user_bufs %d, "
91 "ret %ld",
92 __entry->ctx, __entry->opcode, __entry->nr_files,
93 __entry->nr_bufs, __entry->ret)
94);
95
96/**
97 * io_uring_file_get - called before getting references to an SQE file
98 *
99 * @ctx: pointer to a ring context structure
100 * @req: pointer to a submitted request
101 * @user_data: user data associated with the request
102 * @fd: SQE file descriptor
103 *
104 * Allows to trace out how often an SQE file reference is obtained, which can
105 * help figuring out if it makes sense to use fixed files, or check that fixed
106 * files are used correctly.
107 */
108TRACE_EVENT(io_uring_file_get,
109
110 TP_PROTO(void *ctx, void *req, unsigned long long user_data, int fd),
111
112 TP_ARGS(ctx, req, user_data, fd),
113
114 TP_STRUCT__entry (
115 __field( void *, ctx )
116 __field( void *, req )
117 __field( u64, user_data )
118 __field( int, fd )
119 ),
120
121 TP_fast_assign(
122 __entry->ctx = ctx;
123 __entry->req = req;
124 __entry->user_data = user_data;
125 __entry->fd = fd;
126 ),
127
128 TP_printk("ring %p, req %p, user_data 0x%llx, fd %d",
129 __entry->ctx, __entry->req, __entry->user_data, __entry->fd)
130);
131
132/**
133 * io_uring_queue_async_work - called before submitting a new async work
134 *
135 * @ctx: pointer to a ring context structure
136 * @req: pointer to a submitted request
137 * @user_data: user data associated with the request
138 * @opcode: opcode of request
139 * @flags request flags
140 * @work: pointer to a submitted io_wq_work
141 * @rw: type of workqueue, hashed or normal
142 *
143 * Allows to trace asynchronous work submission.
144 */
145TRACE_EVENT(io_uring_queue_async_work,
146
147 TP_PROTO(void *ctx, void * req, unsigned long long user_data, u8 opcode,
148 unsigned int flags, struct io_wq_work *work, int rw),
149
150 TP_ARGS(ctx, req, user_data, flags, opcode, work, rw),
151
152 TP_STRUCT__entry (
153 __field( void *, ctx )
154 __field( void *, req )
155 __field( u64, user_data )
156 __field( u8, opcode )
157 __field( unsigned int, flags )
158 __field( struct io_wq_work *, work )
159 __field( int, rw )
160 ),
161
162 TP_fast_assign(
163 __entry->ctx = ctx;
164 __entry->req = req;
165 __entry->user_data = user_data;
166 __entry->flags = flags;
167 __entry->opcode = opcode;
168 __entry->work = work;
169 __entry->rw = rw;
170 ),
171
172 TP_printk("ring %p, request %p, user_data 0x%llx, opcode %d, flags 0x%x, %s queue, work %p",
173 __entry->ctx, __entry->req, __entry->user_data, __entry->opcode,
174 __entry->flags, __entry->rw ? "hashed" : "normal", __entry->work)
175);
176
177/**
178 * io_uring_defer - called when an io_uring request is deferred
179 *
180 * @ctx: pointer to a ring context structure
181 * @req: pointer to a deferred request
182 * @user_data: user data associated with the request
183 * @opcode: opcode of request
184 *
185 * Allows to track deferred requests, to get an insight about what requests are
186 * not started immediately.
187 */
188TRACE_EVENT(io_uring_defer,
189
190 TP_PROTO(void *ctx, void *req, unsigned long long user_data, u8 opcode),
191
192 TP_ARGS(ctx, req, user_data, opcode),
193
194 TP_STRUCT__entry (
195 __field( void *, ctx )
196 __field( void *, req )
197 __field( unsigned long long, data )
198 __field( u8, opcode )
199 ),
200
201 TP_fast_assign(
202 __entry->ctx = ctx;
203 __entry->req = req;
204 __entry->data = user_data;
205 __entry->opcode = opcode;
206 ),
207
208 TP_printk("ring %p, request %p, user_data 0x%llx, opcode %d",
209 __entry->ctx, __entry->req, __entry->data, __entry->opcode)
210);
211
212/**
213 * io_uring_link - called before the io_uring request added into link_list of
214 * another request
215 *
216 * @ctx: pointer to a ring context structure
217 * @req: pointer to a linked request
218 * @target_req: pointer to a previous request, that would contain @req
219 *
220 * Allows to track linked requests, to understand dependencies between requests
221 * and how does it influence their execution flow.
222 */
223TRACE_EVENT(io_uring_link,
224
225 TP_PROTO(void *ctx, void *req, void *target_req),
226
227 TP_ARGS(ctx, req, target_req),
228
229 TP_STRUCT__entry (
230 __field( void *, ctx )
231 __field( void *, req )
232 __field( void *, target_req )
233 ),
234
235 TP_fast_assign(
236 __entry->ctx = ctx;
237 __entry->req = req;
238 __entry->target_req = target_req;
239 ),
240
241 TP_printk("ring %p, request %p linked after %p",
242 __entry->ctx, __entry->req, __entry->target_req)
243);
244
245/**
246 * io_uring_cqring_wait - called before start waiting for an available CQE
247 *
248 * @ctx: pointer to a ring context structure
249 * @min_events: minimal number of events to wait for
250 *
251 * Allows to track waiting for CQE, so that we can e.g. troubleshoot
252 * situations, when an application wants to wait for an event, that never
253 * comes.
254 */
255TRACE_EVENT(io_uring_cqring_wait,
256
257 TP_PROTO(void *ctx, int min_events),
258
259 TP_ARGS(ctx, min_events),
260
261 TP_STRUCT__entry (
262 __field( void *, ctx )
263 __field( int, min_events )
264 ),
265
266 TP_fast_assign(
267 __entry->ctx = ctx;
268 __entry->min_events = min_events;
269 ),
270
271 TP_printk("ring %p, min_events %d", __entry->ctx, __entry->min_events)
272);
273
274/**
275 * io_uring_fail_link - called before failing a linked request
276 *
277 * @ctx: pointer to a ring context structure
278 * @req: request, which links were cancelled
279 * @user_data: user data associated with the request
280 * @opcode: opcode of request
281 * @link: cancelled link
282 *
283 * Allows to track linked requests cancellation, to see not only that some work
284 * was cancelled, but also which request was the reason.
285 */
286TRACE_EVENT(io_uring_fail_link,
287
288 TP_PROTO(void *ctx, void *req, unsigned long long user_data, u8 opcode, void *link),
289
290 TP_ARGS(ctx, req, user_data, opcode, link),
291
292 TP_STRUCT__entry (
293 __field( void *, ctx )
294 __field( void *, req )
295 __field( unsigned long long, user_data )
296 __field( u8, opcode )
297 __field( void *, link )
298 ),
299
300 TP_fast_assign(
301 __entry->ctx = ctx;
302 __entry->req = req;
303 __entry->user_data = user_data;
304 __entry->opcode = opcode;
305 __entry->link = link;
306 ),
307
308 TP_printk("ring %p, request %p, user_data 0x%llx, opcode %d, link %p",
309 __entry->ctx, __entry->req, __entry->user_data, __entry->opcode,
310 __entry->link)
311);
312
313/**
314 * io_uring_complete - called when completing an SQE
315 *
316 * @ctx: pointer to a ring context structure
317 * @req: pointer to a submitted request
318 * @user_data: user data associated with the request
319 * @res: result of the request
320 * @cflags: completion flags
321 *
322 */
323TRACE_EVENT(io_uring_complete,
324
325 TP_PROTO(void *ctx, void *req, u64 user_data, int res, unsigned cflags),
326
327 TP_ARGS(ctx, req, user_data, res, cflags),
328
329 TP_STRUCT__entry (
330 __field( void *, ctx )
331 __field( void *, req )
332 __field( u64, user_data )
333 __field( int, res )
334 __field( unsigned, cflags )
335 ),
336
337 TP_fast_assign(
338 __entry->ctx = ctx;
339 __entry->req = req;
340 __entry->user_data = user_data;
341 __entry->res = res;
342 __entry->cflags = cflags;
343 ),
344
345 TP_printk("ring %p, req %p, user_data 0x%llx, result %d, cflags 0x%x",
346 __entry->ctx, __entry->req,
347 __entry->user_data,
348 __entry->res, __entry->cflags)
349);
350
351/**
352 * io_uring_submit_sqe - called before submitting one SQE
353 *
354 * @ctx: pointer to a ring context structure
355 * @req: pointer to a submitted request
356 * @user_data: user data associated with the request
357 * @opcode: opcode of request
358 * @flags request flags
359 * @force_nonblock: whether a context blocking or not
360 * @sq_thread: true if sq_thread has submitted this SQE
361 *
362 * Allows to track SQE submitting, to understand what was the source of it, SQ
363 * thread or io_uring_enter call.
364 */
365TRACE_EVENT(io_uring_submit_sqe,
366
367 TP_PROTO(void *ctx, void *req, unsigned long long user_data, u8 opcode, u32 flags,
368 bool force_nonblock, bool sq_thread),
369
370 TP_ARGS(ctx, req, user_data, opcode, flags, force_nonblock, sq_thread),
371
372 TP_STRUCT__entry (
373 __field( void *, ctx )
374 __field( void *, req )
375 __field( unsigned long long, user_data )
376 __field( u8, opcode )
377 __field( u32, flags )
378 __field( bool, force_nonblock )
379 __field( bool, sq_thread )
380 ),
381
382 TP_fast_assign(
383 __entry->ctx = ctx;
384 __entry->req = req;
385 __entry->user_data = user_data;
386 __entry->opcode = opcode;
387 __entry->flags = flags;
388 __entry->force_nonblock = force_nonblock;
389 __entry->sq_thread = sq_thread;
390 ),
391
392 TP_printk("ring %p, req %p, user_data 0x%llx, opcode %d, flags 0x%x, "
393 "non block %d, sq_thread %d", __entry->ctx, __entry->req,
394 __entry->user_data, __entry->opcode,
395 __entry->flags, __entry->force_nonblock, __entry->sq_thread)
396);
397
398/*
399 * io_uring_poll_arm - called after arming a poll wait if successful
400 *
401 * @ctx: pointer to a ring context structure
402 * @req: pointer to the armed request
403 * @user_data: user data associated with the request
404 * @opcode: opcode of request
405 * @mask: request poll events mask
406 * @events: registered events of interest
407 *
408 * Allows to track which fds are waiting for and what are the events of
409 * interest.
410 */
411TRACE_EVENT(io_uring_poll_arm,
412
413 TP_PROTO(void *ctx, void *req, u64 user_data, u8 opcode,
414 int mask, int events),
415
416 TP_ARGS(ctx, req, user_data, opcode, mask, events),
417
418 TP_STRUCT__entry (
419 __field( void *, ctx )
420 __field( void *, req )
421 __field( unsigned long long, user_data )
422 __field( u8, opcode )
423 __field( int, mask )
424 __field( int, events )
425 ),
426
427 TP_fast_assign(
428 __entry->ctx = ctx;
429 __entry->req = req;
430 __entry->user_data = user_data;
431 __entry->opcode = opcode;
432 __entry->mask = mask;
433 __entry->events = events;
434 ),
435
436 TP_printk("ring %p, req %p, user_data 0x%llx, opcode %d, mask 0x%x, events 0x%x",
437 __entry->ctx, __entry->req, __entry->user_data, __entry->opcode,
438 __entry->mask, __entry->events)
439);
440
441/*
442 * io_uring_task_add - called after adding a task
443 *
444 * @ctx: pointer to a ring context structure
445 * @req: pointer to request
446 * @user_data: user data associated with the request
447 * @opcode: opcode of request
448 * @mask: request poll events mask
449 *
450 */
451TRACE_EVENT(io_uring_task_add,
452
453 TP_PROTO(void *ctx, void *req, unsigned long long user_data, u8 opcode, int mask),
454
455 TP_ARGS(ctx, req, user_data, opcode, mask),
456
457 TP_STRUCT__entry (
458 __field( void *, ctx )
459 __field( void *, req )
460 __field( unsigned long long, user_data )
461 __field( u8, opcode )
462 __field( int, mask )
463 ),
464
465 TP_fast_assign(
466 __entry->ctx = ctx;
467 __entry->req = req;
468 __entry->user_data = user_data;
469 __entry->opcode = opcode;
470 __entry->mask = mask;
471 ),
472
473 TP_printk("ring %p, req %p, user_data 0x%llx, opcode %d, mask %x",
474 __entry->ctx, __entry->req, __entry->user_data, __entry->opcode,
475 __entry->mask)
476);
477
478/*
479 * io_uring_req_failed - called when an sqe is errored dring submission
480 *
481 * @sqe: pointer to the io_uring_sqe that failed
482 * @ctx: pointer to a ring context structure
483 * @req: pointer to request
484 * @error: error it failed with
485 *
486 * Allows easier diagnosing of malformed requests in production systems.
487 */
488TRACE_EVENT(io_uring_req_failed,
489
490 TP_PROTO(const struct io_uring_sqe *sqe, void *ctx, void *req, int error),
491
492 TP_ARGS(sqe, ctx, req, error),
493
494 TP_STRUCT__entry (
495 __field( void *, ctx )
496 __field( void *, req )
497 __field( unsigned long long, user_data )
498 __field( u8, opcode )
499 __field( u8, flags )
500 __field( u8, ioprio )
501 __field( u64, off )
502 __field( u64, addr )
503 __field( u32, len )
504 __field( u32, op_flags )
505 __field( u16, buf_index )
506 __field( u16, personality )
507 __field( u32, file_index )
508 __field( u64, pad1 )
509 __field( u64, pad2 )
510 __field( int, error )
511 ),
512
513 TP_fast_assign(
514 __entry->ctx = ctx;
515 __entry->req = req;
516 __entry->user_data = sqe->user_data;
517 __entry->opcode = sqe->opcode;
518 __entry->flags = sqe->flags;
519 __entry->ioprio = sqe->ioprio;
520 __entry->off = sqe->off;
521 __entry->addr = sqe->addr;
522 __entry->len = sqe->len;
523 __entry->op_flags = sqe->rw_flags;
524 __entry->buf_index = sqe->buf_index;
525 __entry->personality = sqe->personality;
526 __entry->file_index = sqe->file_index;
527 __entry->pad1 = sqe->__pad2[0];
528 __entry->pad2 = sqe->__pad2[1];
529 __entry->error = error;
530 ),
531
532 TP_printk("ring %p, req %p, user_data 0x%llx, "
533 "op %d, flags 0x%x, prio=%d, off=%llu, addr=%llu, "
534 "len=%u, rw_flags=0x%x, buf_index=%d, "
535 "personality=%d, file_index=%d, pad=0x%llx/%llx, error=%d",
536 __entry->ctx, __entry->req, __entry->user_data,
537 __entry->opcode, __entry->flags, __entry->ioprio,
538 (unsigned long long)__entry->off,
539 (unsigned long long) __entry->addr, __entry->len,
540 __entry->op_flags,
541 __entry->buf_index, __entry->personality, __entry->file_index,
542 (unsigned long long) __entry->pad1,
543 (unsigned long long) __entry->pad2, __entry->error)
544);
545
546#endif /* _TRACE_IO_URING_H */
547
548/* This part must be outside protection */
549#include <trace/define_trace.h>