Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

ftrace: Rework event_create_dir()

Rework event_create_dir() to use an array of static data instead of
function pointers where possible.

The problem is that it would call the function pointer on module load
before parse_args(), possibly even before jump_labels were initialized.
Luckily the generated functions don't use jump_labels but it still seems
fragile. It also gets in the way of changing when we make the module map
executable.

The generated function are basically calling trace_define_field() with a
bunch of static arguments. So instead of a function, capture these
arguments in a static array, avoiding the function call.

Now there are a number of cases where the fields are dynamic (syscall
arguments, kprobes and uprobes), in which case a static array does not
work, for these we preserve the function call. Luckily all these cases
are not related to modules and so we can retain the function call for
them.

Also fix up all broken tracepoint definitions that now generate a
compile error.

Tested-by: Alexei Starovoitov <ast@kernel.org>
Tested-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
Acked-by: Alexei Starovoitov <ast@kernel.org>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: https://lkml.kernel.org/r/20191111132458.342979914@infradead.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>

authored by

Peter Zijlstra and committed by
Ingo Molnar
04ae87a5 958de668

+213 -249
+4 -4
drivers/infiniband/hw/hfi1/trace_tid.h
··· 138 138 TP_ARGS(dd, index, type, pa, order), 139 139 TP_STRUCT__entry(/* entry */ 140 140 DD_DEV_ENTRY(dd) 141 - __field(unsigned long, pa); 142 - __field(u32, index); 143 - __field(u32, type); 144 - __field(u16, order); 141 + __field(unsigned long, pa) 142 + __field(u32, index) 143 + __field(u32, type) 144 + __field(u16, order) 145 145 ), 146 146 TP_fast_assign(/* assign */ 147 147 DD_DEV_ASSIGN(dd);
+1 -1
drivers/infiniband/hw/hfi1/trace_tx.h
··· 588 588 TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u8 subctxt, u16 *i), 589 589 TP_ARGS(dd, ctxt, subctxt, i), 590 590 TP_STRUCT__entry( 591 - DD_DEV_ENTRY(dd); 591 + DD_DEV_ENTRY(dd) 592 592 __field(u16, ctxt) 593 593 __field(u8, subctxt) 594 594 __field(u8, ver_opcode)
+4 -4
drivers/lightnvm/pblk-trace.h
··· 46 46 TP_STRUCT__entry( 47 47 __string(name, name) 48 48 __field(u64, ppa) 49 - __field(int, state); 49 + __field(int, state) 50 50 ), 51 51 52 52 TP_fast_assign( ··· 72 72 TP_STRUCT__entry( 73 73 __string(name, name) 74 74 __field(u64, ppa) 75 - __field(int, state); 75 + __field(int, state) 76 76 ), 77 77 78 78 TP_fast_assign( ··· 98 98 TP_STRUCT__entry( 99 99 __string(name, name) 100 100 __field(int, line) 101 - __field(int, state); 101 + __field(int, state) 102 102 ), 103 103 104 104 TP_fast_assign( ··· 121 121 122 122 TP_STRUCT__entry( 123 123 __string(name, name) 124 - __field(int, state); 124 + __field(int, state) 125 125 ), 126 126 127 127 TP_fast_assign(
+1 -1
drivers/net/fjes/fjes_trace.h
··· 28 28 __field(u8, cs_busy) 29 29 __field(u8, cs_complete) 30 30 __field(int, timeout) 31 - __field(int, ret); 31 + __field(int, ret) 32 32 ), 33 33 TP_fast_assign( 34 34 __entry->cr_req = cr->bits.req_code;
+3 -3
drivers/net/wireless/ath/ath10k/trace.h
··· 239 239 TP_STRUCT__entry( 240 240 __string(device, dev_name(ar->dev)) 241 241 __string(driver, dev_driver_string(ar->dev)) 242 - __field(u8, hw_type); 242 + __field(u8, hw_type) 243 243 __field(size_t, buf_len) 244 244 __dynamic_array(u8, buf, buf_len) 245 245 ), ··· 269 269 TP_STRUCT__entry( 270 270 __string(device, dev_name(ar->dev)) 271 271 __string(driver, dev_driver_string(ar->dev)) 272 - __field(u8, hw_type); 272 + __field(u8, hw_type) 273 273 __field(u16, buf_len) 274 274 __dynamic_array(u8, pktlog, buf_len) 275 275 ), ··· 435 435 TP_STRUCT__entry( 436 436 __string(device, dev_name(ar->dev)) 437 437 __string(driver, dev_driver_string(ar->dev)) 438 - __field(u8, hw_type); 438 + __field(u8, hw_type) 439 439 __field(u16, len) 440 440 __dynamic_array(u8, rxdesc, len) 441 441 ),
+3 -3
fs/xfs/scrub/trace.h
··· 329 329 __field(int, level) 330 330 __field(xfs_agnumber_t, agno) 331 331 __field(xfs_agblock_t, bno) 332 - __field(int, ptr); 332 + __field(int, ptr) 333 333 __field(int, error) 334 334 __field(void *, ret_ip) 335 335 ), ··· 414 414 __field(int, level) 415 415 __field(xfs_agnumber_t, agno) 416 416 __field(xfs_agblock_t, bno) 417 - __field(int, ptr); 417 + __field(int, ptr) 418 418 __field(void *, ret_ip) 419 419 ), 420 420 TP_fast_assign( ··· 452 452 __field(int, level) 453 453 __field(xfs_agnumber_t, agno) 454 454 __field(xfs_agblock_t, bno) 455 - __field(int, ptr); 455 + __field(int, ptr) 456 456 __field(void *, ret_ip) 457 457 ), 458 458 TP_fast_assign(
+2 -2
fs/xfs/xfs_trace.h
··· 218 218 TP_STRUCT__entry( 219 219 __field(dev_t, dev) 220 220 __field(xfs_ino_t, ino) 221 - __field(void *, leaf); 222 - __field(int, pos); 221 + __field(void *, leaf) 222 + __field(int, pos) 223 223 __field(xfs_fileoff_t, startoff) 224 224 __field(xfs_fsblock_t, startblock) 225 225 __field(xfs_filblks_t, blockcount)
+17 -1
include/linux/trace_events.h
··· 187 187 188 188 struct trace_event_call; 189 189 190 + #define TRACE_FUNCTION_TYPE ((const char *)~0UL) 191 + 192 + struct trace_event_fields { 193 + const char *type; 194 + union { 195 + struct { 196 + const char *name; 197 + const int size; 198 + const int align; 199 + const int is_signed; 200 + const int filter_type; 201 + }; 202 + int (*define_fields)(struct trace_event_call *); 203 + }; 204 + }; 205 + 190 206 struct trace_event_class { 191 207 const char *system; 192 208 void *probe; ··· 211 195 #endif 212 196 int (*reg)(struct trace_event_call *event, 213 197 enum trace_reg type, void *data); 214 - int (*define_fields)(struct trace_event_call *); 198 + struct trace_event_fields *fields_array; 215 199 struct list_head *(*get_fields)(struct trace_event_call *); 216 200 struct list_head fields; 217 201 int (*raw_init)(struct trace_event_call *);
+1 -1
include/trace/events/filemap.h
··· 85 85 TP_ARGS(file, old), 86 86 87 87 TP_STRUCT__entry( 88 - __field(struct file *, file); 88 + __field(struct file *, file) 89 89 __field(unsigned long, i_ino) 90 90 __field(dev_t, s_dev) 91 91 __field(errseq_t, old)
+1 -1
include/trace/events/rpcrdma.h
··· 1507 1507 TP_ARGS(rdma, page), 1508 1508 1509 1509 TP_STRUCT__entry( 1510 - __field(const void *, page); 1510 + __field(const void *, page) 1511 1511 __string(device, rdma->sc_cm_id->device->name) 1512 1512 __string(addr, rdma->sc_xprt.xpt_remotebuf) 1513 1513 ),
+21 -43
include/trace/trace_events.h
··· 394 394 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) 395 395 396 396 #undef __field_ext 397 - #define __field_ext(type, item, filter_type) \ 398 - ret = trace_define_field(event_call, #type, #item, \ 399 - offsetof(typeof(field), item), \ 400 - sizeof(field.item), \ 401 - is_signed_type(type), filter_type); \ 402 - if (ret) \ 403 - return ret; 397 + #define __field_ext(_type, _item, _filter_type) { \ 398 + .type = #_type, .name = #_item, \ 399 + .size = sizeof(_type), .align = __alignof__(_type), \ 400 + .is_signed = is_signed_type(_type), .filter_type = _filter_type }, 404 401 405 402 #undef __field_struct_ext 406 - #define __field_struct_ext(type, item, filter_type) \ 407 - ret = trace_define_field(event_call, #type, #item, \ 408 - offsetof(typeof(field), item), \ 409 - sizeof(field.item), \ 410 - 0, filter_type); \ 411 - if (ret) \ 412 - return ret; 403 + #define __field_struct_ext(_type, _item, _filter_type) { \ 404 + .type = #_type, .name = #_item, \ 405 + .size = sizeof(_type), .align = __alignof__(_type), \ 406 + 0, .filter_type = _filter_type }, 413 407 414 408 #undef __field 415 409 #define __field(type, item) __field_ext(type, item, FILTER_OTHER) ··· 412 418 #define __field_struct(type, item) __field_struct_ext(type, item, FILTER_OTHER) 413 419 414 420 #undef __array 415 - #define __array(type, item, len) \ 416 - do { \ 417 - char *type_str = #type"["__stringify(len)"]"; \ 418 - BUILD_BUG_ON(len > MAX_FILTER_STR_VAL); \ 419 - BUILD_BUG_ON(len <= 0); \ 420 - ret = trace_define_field(event_call, type_str, #item, \ 421 - offsetof(typeof(field), item), \ 422 - sizeof(field.item), \ 423 - is_signed_type(type), FILTER_OTHER); \ 424 - if (ret) \ 425 - return ret; \ 426 - } while (0); 421 + #define __array(_type, _item, _len) { \ 422 + .type = #_type"["__stringify(_len)"]", .name = #_item, \ 423 + .size = sizeof(_type[_len]), .align = __alignof__(_type), \ 424 + .is_signed = is_signed_type(_type), .filter_type = FILTER_OTHER }, 427 425 428 426 #undef __dynamic_array 429 - #define __dynamic_array(type, item, len) \ 430 - ret = trace_define_field(event_call, "__data_loc " #type "[]", #item, \ 431 - offsetof(typeof(field), __data_loc_##item), \ 432 - sizeof(field.__data_loc_##item), \ 433 - is_signed_type(type), FILTER_OTHER); 427 + #define __dynamic_array(_type, _item, _len) { \ 428 + .type = "__data_loc " #_type "[]", .name = #_item, \ 429 + .size = 4, .align = 4, \ 430 + .is_signed = is_signed_type(_type), .filter_type = FILTER_OTHER }, 434 431 435 432 #undef __string 436 433 #define __string(item, src) __dynamic_array(char, item, -1) ··· 431 446 432 447 #undef DECLARE_EVENT_CLASS 433 448 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, func, print) \ 434 - static int notrace __init \ 435 - trace_event_define_fields_##call(struct trace_event_call *event_call) \ 436 - { \ 437 - struct trace_event_raw_##call field; \ 438 - int ret; \ 439 - \ 440 - tstruct; \ 441 - \ 442 - return ret; \ 443 - } 449 + static struct trace_event_fields trace_event_fields_##call[] = { \ 450 + tstruct \ 451 + {} }; 444 452 445 453 #undef DEFINE_EVENT 446 454 #define DEFINE_EVENT(template, name, proto, args) ··· 591 613 * 592 614 * static struct trace_event_class __used event_class_<template> = { 593 615 * .system = "<system>", 594 - * .define_fields = trace_event_define_fields_<call>, 616 + * .fields_array = trace_event_fields_<call>, 595 617 * .fields = LIST_HEAD_INIT(event_class_##call.fields), 596 618 * .raw_init = trace_event_raw_init, 597 619 * .probe = trace_event_raw_event_##call, ··· 739 761 static char print_fmt_##call[] = print; \ 740 762 static struct trace_event_class __used __refdata event_class_##call = { \ 741 763 .system = TRACE_SYSTEM_STRING, \ 742 - .define_fields = trace_event_define_fields_##call, \ 764 + .fields_array = trace_event_fields_##call, \ 743 765 .fields = LIST_HEAD_INIT(event_class_##call.fields),\ 744 766 .raw_init = trace_event_raw_init, \ 745 767 .probe = trace_event_raw_event_##call, \
+14 -17
kernel/trace/trace.h
··· 49 49 #undef __field 50 50 #define __field(type, item) type item; 51 51 52 + #undef __field_fn 53 + #define __field_fn(type, item) type item; 54 + 52 55 #undef __field_struct 53 56 #define __field_struct(type, item) __field(type, item) 54 57 ··· 71 68 #define F_STRUCT(args...) args 72 69 73 70 #undef FTRACE_ENTRY 74 - #define FTRACE_ENTRY(name, struct_name, id, tstruct, print, filter) \ 71 + #define FTRACE_ENTRY(name, struct_name, id, tstruct, print) \ 75 72 struct struct_name { \ 76 73 struct trace_entry ent; \ 77 74 tstruct \ 78 75 } 79 76 80 77 #undef FTRACE_ENTRY_DUP 81 - #define FTRACE_ENTRY_DUP(name, name_struct, id, tstruct, printk, filter) 78 + #define FTRACE_ENTRY_DUP(name, name_struct, id, tstruct, printk) 82 79 83 80 #undef FTRACE_ENTRY_REG 84 - #define FTRACE_ENTRY_REG(name, struct_name, id, tstruct, print, \ 85 - filter, regfn) \ 86 - FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print), \ 87 - filter) 81 + #define FTRACE_ENTRY_REG(name, struct_name, id, tstruct, print, regfn) \ 82 + FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print)) 88 83 89 84 #undef FTRACE_ENTRY_PACKED 90 - #define FTRACE_ENTRY_PACKED(name, struct_name, id, tstruct, print, \ 91 - filter) \ 92 - FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print), \ 93 - filter) __packed 85 + #define FTRACE_ENTRY_PACKED(name, struct_name, id, tstruct, print) \ 86 + FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print)) __packed 94 87 95 88 #include "trace_entries.h" 96 89 ··· 1898 1899 #define internal_trace_puts(str) __trace_puts(_THIS_IP_, str, strlen(str)) 1899 1900 1900 1901 #undef FTRACE_ENTRY 1901 - #define FTRACE_ENTRY(call, struct_name, id, tstruct, print, filter) \ 1902 + #define FTRACE_ENTRY(call, struct_name, id, tstruct, print) \ 1902 1903 extern struct trace_event_call \ 1903 1904 __aligned(4) event_##call; 1904 1905 #undef FTRACE_ENTRY_DUP 1905 - #define FTRACE_ENTRY_DUP(call, struct_name, id, tstruct, print, filter) \ 1906 - FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print), \ 1907 - filter) 1906 + #define FTRACE_ENTRY_DUP(call, struct_name, id, tstruct, print) \ 1907 + FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print)) 1908 1908 #undef FTRACE_ENTRY_PACKED 1909 - #define FTRACE_ENTRY_PACKED(call, struct_name, id, tstruct, print, filter) \ 1910 - FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print), \ 1911 - filter) 1909 + #define FTRACE_ENTRY_PACKED(call, struct_name, id, tstruct, print) \ 1910 + FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print)) 1912 1911 1913 1912 #include "trace_entries.h" 1914 1913
+18 -48
kernel/trace/trace_entries.h
··· 61 61 TRACE_FN, 62 62 63 63 F_STRUCT( 64 - __field( unsigned long, ip ) 65 - __field( unsigned long, parent_ip ) 64 + __field_fn( unsigned long, ip ) 65 + __field_fn( unsigned long, parent_ip ) 66 66 ), 67 67 68 68 F_printk(" %ps <-- %ps", 69 69 (void *)__entry->ip, (void *)__entry->parent_ip), 70 - 71 - FILTER_TRACE_FN, 72 70 73 71 perf_ftrace_event_register 74 72 ); ··· 82 84 __field_desc( int, graph_ent, depth ) 83 85 ), 84 86 85 - F_printk("--> %ps (%d)", (void *)__entry->func, __entry->depth), 86 - 87 - FILTER_OTHER 87 + F_printk("--> %ps (%d)", (void *)__entry->func, __entry->depth) 88 88 ); 89 89 90 90 /* Function return entry */ ··· 93 97 F_STRUCT( 94 98 __field_struct( struct ftrace_graph_ret, ret ) 95 99 __field_desc( unsigned long, ret, func ) 100 + __field_desc( unsigned long, ret, overrun ) 96 101 __field_desc( unsigned long long, ret, calltime) 97 102 __field_desc( unsigned long long, ret, rettime ) 98 - __field_desc( unsigned long, ret, overrun ) 99 103 __field_desc( int, ret, depth ) 100 104 ), 101 105 102 106 F_printk("<-- %ps (%d) (start: %llx end: %llx) over: %d", 103 107 (void *)__entry->func, __entry->depth, 104 108 __entry->calltime, __entry->rettime, 105 - __entry->depth), 106 - 107 - FILTER_OTHER 109 + __entry->depth) 108 110 ); 109 111 110 112 /* ··· 131 137 F_printk("%u:%u:%u ==> %u:%u:%u [%03u]", 132 138 __entry->prev_pid, __entry->prev_prio, __entry->prev_state, 133 139 __entry->next_pid, __entry->next_prio, __entry->next_state, 134 - __entry->next_cpu), 135 - 136 - FILTER_OTHER 140 + __entry->next_cpu) 137 141 ); 138 142 139 143 /* ··· 149 157 F_printk("%u:%u:%u ==+ %u:%u:%u [%03u]", 150 158 __entry->prev_pid, __entry->prev_prio, __entry->prev_state, 151 159 __entry->next_pid, __entry->next_prio, __entry->next_state, 152 - __entry->next_cpu), 153 - 154 - FILTER_OTHER 160 + __entry->next_cpu) 155 161 ); 156 162 157 163 /* ··· 173 183 (void *)__entry->caller[0], (void *)__entry->caller[1], 174 184 (void *)__entry->caller[2], (void *)__entry->caller[3], 175 185 (void *)__entry->caller[4], (void *)__entry->caller[5], 176 - (void *)__entry->caller[6], (void *)__entry->caller[7]), 177 - 178 - FILTER_OTHER 186 + (void *)__entry->caller[6], (void *)__entry->caller[7]) 179 187 ); 180 188 181 189 FTRACE_ENTRY(user_stack, userstack_entry, ··· 191 203 (void *)__entry->caller[0], (void *)__entry->caller[1], 192 204 (void *)__entry->caller[2], (void *)__entry->caller[3], 193 205 (void *)__entry->caller[4], (void *)__entry->caller[5], 194 - (void *)__entry->caller[6], (void *)__entry->caller[7]), 195 - 196 - FILTER_OTHER 206 + (void *)__entry->caller[6], (void *)__entry->caller[7]) 197 207 ); 198 208 199 209 /* ··· 208 222 ), 209 223 210 224 F_printk("%ps: %s", 211 - (void *)__entry->ip, __entry->fmt), 212 - 213 - FILTER_OTHER 225 + (void *)__entry->ip, __entry->fmt) 214 226 ); 215 227 216 228 FTRACE_ENTRY_REG(print, print_entry, ··· 223 239 F_printk("%ps: %s", 224 240 (void *)__entry->ip, __entry->buf), 225 241 226 - FILTER_OTHER, 227 - 228 242 ftrace_event_register 229 243 ); 230 244 ··· 236 254 ), 237 255 238 256 F_printk("id:%04x %08x", 239 - __entry->id, (int)__entry->buf[0]), 240 - 241 - FILTER_OTHER 257 + __entry->id, (int)__entry->buf[0]) 242 258 ); 243 259 244 260 FTRACE_ENTRY(bputs, bputs_entry, ··· 249 269 ), 250 270 251 271 F_printk("%ps: %s", 252 - (void *)__entry->ip, __entry->str), 253 - 254 - FILTER_OTHER 272 + (void *)__entry->ip, __entry->str) 255 273 ); 256 274 257 275 FTRACE_ENTRY(mmiotrace_rw, trace_mmiotrace_rw, ··· 261 283 __field_desc( resource_size_t, rw, phys ) 262 284 __field_desc( unsigned long, rw, value ) 263 285 __field_desc( unsigned long, rw, pc ) 264 - __field_desc( int, rw, map_id ) 286 + __field_desc( int, rw, map_id ) 265 287 __field_desc( unsigned char, rw, opcode ) 266 288 __field_desc( unsigned char, rw, width ) 267 289 ), 268 290 269 291 F_printk("%lx %lx %lx %d %x %x", 270 292 (unsigned long)__entry->phys, __entry->value, __entry->pc, 271 - __entry->map_id, __entry->opcode, __entry->width), 272 - 273 - FILTER_OTHER 293 + __entry->map_id, __entry->opcode, __entry->width) 274 294 ); 275 295 276 296 FTRACE_ENTRY(mmiotrace_map, trace_mmiotrace_map, ··· 280 304 __field_desc( resource_size_t, map, phys ) 281 305 __field_desc( unsigned long, map, virt ) 282 306 __field_desc( unsigned long, map, len ) 283 - __field_desc( int, map, map_id ) 307 + __field_desc( int, map, map_id ) 284 308 __field_desc( unsigned char, map, opcode ) 285 309 ), 286 310 287 311 F_printk("%lx %lx %lx %d %x", 288 312 (unsigned long)__entry->phys, __entry->virt, __entry->len, 289 - __entry->map_id, __entry->opcode), 290 - 291 - FILTER_OTHER 313 + __entry->map_id, __entry->opcode) 292 314 ); 293 315 294 316 ··· 308 334 F_printk("%u:%s:%s (%u)%s", 309 335 __entry->line, 310 336 __entry->func, __entry->file, __entry->correct, 311 - __entry->constant ? " CONSTANT" : ""), 312 - 313 - FILTER_OTHER 337 + __entry->constant ? " CONSTANT" : "") 314 338 ); 315 339 316 340 ··· 334 362 __entry->duration, 335 363 __entry->outer_duration, 336 364 __entry->nmi_total_ts, 337 - __entry->nmi_count), 338 - 339 - FILTER_OTHER 365 + __entry->nmi_count) 340 366 );
+19 -1
kernel/trace/trace_events.c
··· 24 24 #include <linux/delay.h> 25 25 26 26 #include <trace/events/sched.h> 27 + #include <trace/syscall.h> 27 28 28 29 #include <asm/setup.h> 29 30 ··· 1991 1990 */ 1992 1991 head = trace_get_fields(call); 1993 1992 if (list_empty(head)) { 1994 - ret = call->class->define_fields(call); 1993 + struct trace_event_fields *field = call->class->fields_array; 1994 + unsigned int offset = sizeof(struct trace_entry); 1995 + 1996 + for (; field->type; field++) { 1997 + if (field->type == TRACE_FUNCTION_TYPE) { 1998 + ret = field->define_fields(call); 1999 + break; 2000 + } 2001 + 2002 + offset = ALIGN(offset, field->align); 2003 + ret = trace_define_field(call, field->type, field->name, 2004 + offset, field->size, 2005 + field->is_signed, field->filter_type); 2006 + if (ret) 2007 + break; 2008 + 2009 + offset += field->size; 2010 + } 1995 2011 if (ret < 0) { 1996 2012 pr_warn("Could not initialize trace point events/%s\n", 1997 2013 name);
+7 -1
kernel/trace/trace_events_hist.c
··· 1135 1135 return NULL; 1136 1136 } 1137 1137 1138 + static struct trace_event_fields synth_event_fields_array[] = { 1139 + { .type = TRACE_FUNCTION_TYPE, 1140 + .define_fields = synth_event_define_fields }, 1141 + {} 1142 + }; 1143 + 1138 1144 static int register_synth_event(struct synth_event *event) 1139 1145 { 1140 1146 struct trace_event_call *call = &event->call; ··· 1162 1156 1163 1157 INIT_LIST_HEAD(&call->class->fields); 1164 1158 call->event.funcs = &synth_event_funcs; 1165 - call->class->define_fields = synth_event_define_fields; 1159 + call->class->fields_array = synth_event_fields_array; 1166 1160 1167 1161 ret = register_trace_event(&call->event); 1168 1162 if (!ret) {
+39 -67
kernel/trace/trace_export.c
··· 29 29 * function and thus become accesible via perf. 30 30 */ 31 31 #undef FTRACE_ENTRY_REG 32 - #define FTRACE_ENTRY_REG(name, struct_name, id, tstruct, print, \ 33 - filter, regfn) \ 34 - FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print), \ 35 - filter) 32 + #define FTRACE_ENTRY_REG(name, struct_name, id, tstruct, print, regfn) \ 33 + FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print)) 36 34 37 35 /* not needed for this file */ 38 36 #undef __field_struct ··· 38 40 39 41 #undef __field 40 42 #define __field(type, item) type item; 43 + 44 + #undef __field_fn 45 + #define __field_fn(type, item) type item; 41 46 42 47 #undef __field_desc 43 48 #define __field_desc(type, container, item) type item; ··· 61 60 #define F_printk(fmt, args...) fmt, args 62 61 63 62 #undef FTRACE_ENTRY 64 - #define FTRACE_ENTRY(name, struct_name, id, tstruct, print, filter) \ 63 + #define FTRACE_ENTRY(name, struct_name, id, tstruct, print) \ 65 64 struct ____ftrace_##name { \ 66 65 tstruct \ 67 66 }; \ ··· 74 73 } 75 74 76 75 #undef FTRACE_ENTRY_DUP 77 - #define FTRACE_ENTRY_DUP(name, struct_name, id, tstruct, print, filter) \ 78 - FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print), \ 79 - filter) 76 + #define FTRACE_ENTRY_DUP(name, struct_name, id, tstruct, print) \ 77 + FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print)) 80 78 81 79 #include "trace_entries.h" 82 80 81 + #undef __field_ext 82 + #define __field_ext(_type, _item, _filter_type) { \ 83 + .type = #_type, .name = #_item, \ 84 + .size = sizeof(_type), .align = __alignof__(_type), \ 85 + is_signed_type(_type), .filter_type = _filter_type }, 86 + 83 87 #undef __field 84 - #define __field(type, item) \ 85 - ret = trace_define_field(event_call, #type, #item, \ 86 - offsetof(typeof(field), item), \ 87 - sizeof(field.item), \ 88 - is_signed_type(type), filter_type); \ 89 - if (ret) \ 90 - return ret; 88 + #define __field(_type, _item) __field_ext(_type, _item, FILTER_OTHER) 89 + 90 + #undef __field_fn 91 + #define __field_fn(_type, _item) __field_ext(_type, _item, FILTER_TRACE_FN) 91 92 92 93 #undef __field_desc 93 - #define __field_desc(type, container, item) \ 94 - ret = trace_define_field(event_call, #type, #item, \ 95 - offsetof(typeof(field), \ 96 - container.item), \ 97 - sizeof(field.container.item), \ 98 - is_signed_type(type), filter_type); \ 99 - if (ret) \ 100 - return ret; 94 + #define __field_desc(_type, _container, _item) __field_ext(_type, _item, FILTER_OTHER) 101 95 102 96 #undef __array 103 - #define __array(type, item, len) \ 104 - do { \ 105 - char *type_str = #type"["__stringify(len)"]"; \ 106 - BUILD_BUG_ON(len > MAX_FILTER_STR_VAL); \ 107 - ret = trace_define_field(event_call, type_str, #item, \ 108 - offsetof(typeof(field), item), \ 109 - sizeof(field.item), \ 110 - is_signed_type(type), filter_type); \ 111 - if (ret) \ 112 - return ret; \ 113 - } while (0); 97 + #define __array(_type, _item, _len) { \ 98 + .type = #_type"["__stringify(_len)"]", .name = #_item, \ 99 + .size = sizeof(_type[_len]), .align = __alignof__(_type), \ 100 + is_signed_type(_type), .filter_type = FILTER_OTHER }, 114 101 115 102 #undef __array_desc 116 - #define __array_desc(type, container, item, len) \ 117 - BUILD_BUG_ON(len > MAX_FILTER_STR_VAL); \ 118 - ret = trace_define_field(event_call, #type "[" #len "]", #item, \ 119 - offsetof(typeof(field), \ 120 - container.item), \ 121 - sizeof(field.container.item), \ 122 - is_signed_type(type), filter_type); \ 123 - if (ret) \ 124 - return ret; 103 + #define __array_desc(_type, _container, _item, _len) __array(_type, _item, _len) 125 104 126 105 #undef __dynamic_array 127 - #define __dynamic_array(type, item) \ 128 - ret = trace_define_field(event_call, #type "[]", #item, \ 129 - offsetof(typeof(field), item), \ 130 - 0, is_signed_type(type), filter_type);\ 131 - if (ret) \ 132 - return ret; 106 + #define __dynamic_array(_type, _item) { \ 107 + .type = #_type "[]", .name = #_item, \ 108 + .size = 0, .align = __alignof__(_type), \ 109 + is_signed_type(_type), .filter_type = FILTER_OTHER }, 133 110 134 111 #undef FTRACE_ENTRY 135 - #define FTRACE_ENTRY(name, struct_name, id, tstruct, print, filter) \ 136 - static int __init \ 137 - ftrace_define_fields_##name(struct trace_event_call *event_call) \ 138 - { \ 139 - struct struct_name field; \ 140 - int ret; \ 141 - int filter_type = filter; \ 142 - \ 143 - tstruct; \ 144 - \ 145 - return ret; \ 146 - } 112 + #define FTRACE_ENTRY(name, struct_name, id, tstruct, print) \ 113 + static struct trace_event_fields ftrace_event_fields_##name[] = { \ 114 + tstruct \ 115 + {} }; 147 116 148 117 #include "trace_entries.h" 149 118 ··· 122 151 123 152 #undef __field 124 153 #define __field(type, item) 154 + 155 + #undef __field_fn 156 + #define __field_fn(type, item) 125 157 126 158 #undef __field_desc 127 159 #define __field_desc(type, container, item) ··· 142 168 #define F_printk(fmt, args...) __stringify(fmt) ", " __stringify(args) 143 169 144 170 #undef FTRACE_ENTRY_REG 145 - #define FTRACE_ENTRY_REG(call, struct_name, etype, tstruct, print, filter,\ 146 - regfn) \ 147 - \ 171 + #define FTRACE_ENTRY_REG(call, struct_name, etype, tstruct, print, regfn) \ 148 172 struct trace_event_class __refdata event_class_ftrace_##call = { \ 149 173 .system = __stringify(TRACE_SYSTEM), \ 150 - .define_fields = ftrace_define_fields_##call, \ 174 + .fields_array = ftrace_event_fields_##call, \ 151 175 .fields = LIST_HEAD_INIT(event_class_ftrace_##call.fields),\ 152 176 .reg = regfn, \ 153 177 }; \ ··· 163 191 __attribute__((section("_ftrace_events"))) *__event_##call = &event_##call; 164 192 165 193 #undef FTRACE_ENTRY 166 - #define FTRACE_ENTRY(call, struct_name, etype, tstruct, print, filter) \ 194 + #define FTRACE_ENTRY(call, struct_name, etype, tstruct, print) \ 167 195 FTRACE_ENTRY_REG(call, struct_name, etype, \ 168 - PARAMS(tstruct), PARAMS(print), filter, NULL) 196 + PARAMS(tstruct), PARAMS(print), NULL) 169 197 170 198 bool ftrace_event_is_function(struct trace_event_call *call) 171 199 {
+14 -2
kernel/trace/trace_kprobe.c
··· 1534 1534 .trace = print_kprobe_event 1535 1535 }; 1536 1536 1537 + static struct trace_event_fields kretprobe_fields_array[] = { 1538 + { .type = TRACE_FUNCTION_TYPE, 1539 + .define_fields = kretprobe_event_define_fields }, 1540 + {} 1541 + }; 1542 + 1543 + static struct trace_event_fields kprobe_fields_array[] = { 1544 + { .type = TRACE_FUNCTION_TYPE, 1545 + .define_fields = kprobe_event_define_fields }, 1546 + {} 1547 + }; 1548 + 1537 1549 static inline void init_trace_event_call(struct trace_kprobe *tk) 1538 1550 { 1539 1551 struct trace_event_call *call = trace_probe_event_call(&tk->tp); 1540 1552 1541 1553 if (trace_kprobe_is_return(tk)) { 1542 1554 call->event.funcs = &kretprobe_funcs; 1543 - call->class->define_fields = kretprobe_event_define_fields; 1555 + call->class->fields_array = kretprobe_fields_array; 1544 1556 } else { 1545 1557 call->event.funcs = &kprobe_funcs; 1546 - call->class->define_fields = kprobe_event_define_fields; 1558 + call->class->fields_array = kprobe_fields_array; 1547 1559 } 1548 1560 1549 1561 call->flags = TRACE_EVENT_FL_KPROBE;
+20 -30
kernel/trace/trace_syscalls.c
··· 198 198 199 199 extern char *__bad_type_size(void); 200 200 201 - #define SYSCALL_FIELD(type, field, name) \ 202 - sizeof(type) != sizeof(trace.field) ? \ 203 - __bad_type_size() : \ 204 - #type, #name, offsetof(typeof(trace), field), \ 205 - sizeof(trace.field), is_signed_type(type) 201 + #define SYSCALL_FIELD(_type, _name) { \ 202 + .type = #_type, .name = #_name, \ 203 + .size = sizeof(_type), .align = __alignof__(_type), \ 204 + .is_signed = is_signed_type(_type), .filter_type = FILTER_OTHER } 206 205 207 206 static int __init 208 207 __set_enter_print_fmt(struct syscall_metadata *entry, char *buf, int len) ··· 268 269 { 269 270 struct syscall_trace_enter trace; 270 271 struct syscall_metadata *meta = call->data; 271 - int ret; 272 - int i; 273 272 int offset = offsetof(typeof(trace), args); 274 - 275 - ret = trace_define_field(call, SYSCALL_FIELD(int, nr, __syscall_nr), 276 - FILTER_OTHER); 277 - if (ret) 278 - return ret; 273 + int ret, i; 279 274 280 275 for (i = 0; i < meta->nb_args; i++) { 281 276 ret = trace_define_field(call, meta->types[i], 282 277 meta->args[i], offset, 283 278 sizeof(unsigned long), 0, 284 279 FILTER_OTHER); 280 + if (ret) 281 + break; 285 282 offset += sizeof(unsigned long); 286 283 } 287 - 288 - return ret; 289 - } 290 - 291 - static int __init syscall_exit_define_fields(struct trace_event_call *call) 292 - { 293 - struct syscall_trace_exit trace; 294 - int ret; 295 - 296 - ret = trace_define_field(call, SYSCALL_FIELD(int, nr, __syscall_nr), 297 - FILTER_OTHER); 298 - if (ret) 299 - return ret; 300 - 301 - ret = trace_define_field(call, SYSCALL_FIELD(long, ret, ret), 302 - FILTER_OTHER); 303 284 304 285 return ret; 305 286 } ··· 481 502 return id; 482 503 } 483 504 505 + static struct trace_event_fields __refdata syscall_enter_fields_array[] = { 506 + SYSCALL_FIELD(int, __syscall_nr), 507 + { .type = TRACE_FUNCTION_TYPE, 508 + .define_fields = syscall_enter_define_fields }, 509 + {} 510 + }; 511 + 484 512 struct trace_event_functions enter_syscall_print_funcs = { 485 513 .trace = print_syscall_enter, 486 514 }; ··· 499 513 struct trace_event_class __refdata event_class_syscall_enter = { 500 514 .system = "syscalls", 501 515 .reg = syscall_enter_register, 502 - .define_fields = syscall_enter_define_fields, 516 + .fields_array = syscall_enter_fields_array, 503 517 .get_fields = syscall_get_enter_fields, 504 518 .raw_init = init_syscall_trace, 505 519 }; ··· 507 521 struct trace_event_class __refdata event_class_syscall_exit = { 508 522 .system = "syscalls", 509 523 .reg = syscall_exit_register, 510 - .define_fields = syscall_exit_define_fields, 524 + .fields_array = (struct trace_event_fields[]){ 525 + SYSCALL_FIELD(int, __syscall_nr), 526 + SYSCALL_FIELD(long, ret), 527 + {} 528 + }, 511 529 .fields = LIST_HEAD_INIT(event_class_syscall_exit.fields), 512 530 .raw_init = init_syscall_trace, 513 531 };
+7 -2
kernel/trace/trace_uprobe.c
··· 1507 1507 .trace = print_uprobe_event 1508 1508 }; 1509 1509 1510 + static struct trace_event_fields uprobe_fields_array[] = { 1511 + { .type = TRACE_FUNCTION_TYPE, 1512 + .define_fields = uprobe_event_define_fields }, 1513 + {} 1514 + }; 1515 + 1510 1516 static inline void init_trace_event_call(struct trace_uprobe *tu) 1511 1517 { 1512 1518 struct trace_event_call *call = trace_probe_event_call(&tu->tp); 1513 - 1514 1519 call->event.funcs = &uprobe_funcs; 1515 - call->class->define_fields = uprobe_event_define_fields; 1520 + call->class->fields_array = uprobe_fields_array; 1516 1521 1517 1522 call->flags = TRACE_EVENT_FL_UPROBE | TRACE_EVENT_FL_CAP_ANY; 1518 1523 call->class->reg = trace_uprobe_register;
+14 -14
net/mac80211/trace.h
··· 408 408 __field(u32, basic_rates) 409 409 __array(int, mcast_rate, NUM_NL80211_BANDS) 410 410 __field(u16, ht_operation_mode) 411 - __field(s32, cqm_rssi_thold); 412 - __field(s32, cqm_rssi_hyst); 413 - __field(u32, channel_width); 414 - __field(u32, channel_cfreq1); 411 + __field(s32, cqm_rssi_thold) 412 + __field(s32, cqm_rssi_hyst) 413 + __field(u32, channel_width) 414 + __field(u32, channel_cfreq1) 415 415 __dynamic_array(u32, arp_addr_list, 416 416 info->arp_addr_cnt > IEEE80211_BSS_ARP_ADDR_LIST_LEN ? 417 417 IEEE80211_BSS_ARP_ADDR_LIST_LEN : 418 - info->arp_addr_cnt); 419 - __field(int, arp_addr_cnt); 420 - __field(bool, qos); 421 - __field(bool, idle); 422 - __field(bool, ps); 423 - __dynamic_array(u8, ssid, info->ssid_len); 424 - __field(bool, hidden_ssid); 418 + info->arp_addr_cnt) 419 + __field(int, arp_addr_cnt) 420 + __field(bool, qos) 421 + __field(bool, idle) 422 + __field(bool, ps) 423 + __dynamic_array(u8, ssid, info->ssid_len) 424 + __field(bool, hidden_ssid) 425 425 __field(int, txpower) 426 426 __field(u8, p2p_oppps_ctwindow) 427 427 ), ··· 1672 1672 VIF_ENTRY 1673 1673 __field(u8, dtimper) 1674 1674 __field(u16, bcnint) 1675 - __dynamic_array(u8, ssid, info->ssid_len); 1676 - __field(bool, hidden_ssid); 1675 + __dynamic_array(u8, ssid, info->ssid_len) 1676 + __field(bool, hidden_ssid) 1677 1677 ), 1678 1678 1679 1679 TP_fast_assign( ··· 1739 1739 VIF_ENTRY 1740 1740 __field(u8, dtimper) 1741 1741 __field(u16, bcnint) 1742 - __dynamic_array(u8, ssid, info->ssid_len); 1742 + __dynamic_array(u8, ssid, info->ssid_len) 1743 1743 ), 1744 1744 1745 1745 TP_fast_assign(
+3 -3
net/wireless/trace.h
··· 2009 2009 WIPHY_ENTRY 2010 2010 WDEV_ENTRY 2011 2011 __field(u8, master_pref) 2012 - __field(u8, bands); 2012 + __field(u8, bands) 2013 2013 ), 2014 2014 TP_fast_assign( 2015 2015 WIPHY_ASSIGN; ··· 2031 2031 WIPHY_ENTRY 2032 2032 WDEV_ENTRY 2033 2033 __field(u8, master_pref) 2034 - __field(u8, bands); 2035 - __field(u32, changes); 2034 + __field(u8, bands) 2035 + __field(u32, changes) 2036 2036 ), 2037 2037 TP_fast_assign( 2038 2038 WIPHY_ASSIGN;