Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'trace-v6.5-rc1-3' of git://git.kernel.org/pub/scm/linux/kernel/git/trace/linux-trace

Pull tracing fixes from Steven Rostedt:

- Fix some missing-prototype warnings

- Fix user events struct args (did not include size of struct)

When creating a user event, the "struct" keyword is to denote that
the size of the field will be passed in. But the parsing failed to
handle this case.

- Add selftest to struct sizes for user events

- Fix sample code for direct trampolines.

The sample code for direct trampolines attached to handle_mm_fault().
But the prototype changed and the direct trampoline sample code was
not updated. Direct trampolines needs to have the arguments correct
otherwise it can fail or crash the system.

- Remove unused ftrace_regs_caller_ret() prototype.

- Quiet false positive of FORTIFY_SOURCE

Due to backward compatibility, the structure used to save stack
traces in the kernel had a fixed size of 8. This structure is
exported to user space via the tracing format file. A change was made
to allow more than 8 functions to be recorded, and user space now
uses the size field to know how many functions are actually in the
stack.

But the structure still has size of 8 (even though it points into the
ring buffer that has the required amount allocated to hold a full
stack.

This was fine until the fortifier noticed that the
memcpy(&entry->caller, stack, size) was greater than the 8 functions
and would complain at runtime about it.

Hide this by using a pointer to the stack location on the ring buffer
instead of using the address of the entry structure caller field.

- Fix a deadloop in reading trace_pipe that was caused by a mismatch
between ring_buffer_empty() returning false which then asked to read
the data, but the read code uses rb_num_of_entries() that returned
zero, and causing a infinite "retry".

- Fix a warning caused by not using all pages allocated to store ftrace
functions, where this can happen if the linker inserts a bunch of
"NULL" entries, causing the accounting of how many pages needed to be
off.

- Fix histogram synthetic event crashing when the start event is
removed and the end event is still using a variable from it

- Fix memory leak in freeing iter->temp in tracing_release_pipe()

* tag 'trace-v6.5-rc1-3' of git://git.kernel.org/pub/scm/linux/kernel/git/trace/linux-trace:
tracing: Fix memory leak of iter->temp when reading trace_pipe
tracing/histograms: Add histograms to hist_vars if they have referenced variables
tracing: Stop FORTIFY_SOURCE complaining about stack trace caller
ftrace: Fix possible warning on checking all pages used in ftrace_process_locs()
ring-buffer: Fix deadloop issue on reading trace_pipe
tracing: arm64: Avoid missing-prototype warnings
selftests/user_events: Test struct size match cases
tracing/user_events: Fix struct arg size match check
x86/ftrace: Remove unsued extern declaration ftrace_regs_caller_ret()
arm64: ftrace: Add direct call trampoline samples support
samples: ftrace: Save required argument registers in sample trampolines

+268 -40
+2
arch/arm64/Kconfig
··· 197 197 !CC_OPTIMIZE_FOR_SIZE) 198 198 select FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY \ 199 199 if DYNAMIC_FTRACE_WITH_ARGS 200 + select HAVE_SAMPLE_FTRACE_DIRECT 201 + select HAVE_SAMPLE_FTRACE_DIRECT_MULTI 200 202 select HAVE_EFFICIENT_UNALIGNED_ACCESS 201 203 select HAVE_FAST_GUP 202 204 select HAVE_FTRACE_MCOUNT_RECORD
+4
arch/arm64/include/asm/ftrace.h
··· 211 211 { 212 212 return ret_regs->fp; 213 213 } 214 + 215 + void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent, 216 + unsigned long frame_pointer); 217 + 214 218 #endif /* ifdef CONFIG_FUNCTION_GRAPH_TRACER */ 215 219 #endif 216 220
+3
arch/arm64/include/asm/syscall.h
··· 85 85 return AUDIT_ARCH_AARCH64; 86 86 } 87 87 88 + int syscall_trace_enter(struct pt_regs *regs); 89 + void syscall_trace_exit(struct pt_regs *regs); 90 + 88 91 #endif /* __ASM_SYSCALL_H */
-3
arch/arm64/kernel/syscall.c
··· 75 75 return unlikely(flags & _TIF_SYSCALL_WORK); 76 76 } 77 77 78 - int syscall_trace_enter(struct pt_regs *regs); 79 - void syscall_trace_exit(struct pt_regs *regs); 80 - 81 78 static void el0_svc_common(struct pt_regs *regs, int scno, int sc_nr, 82 79 const syscall_fn_t syscall_table[]) 83 80 {
-1
arch/x86/kernel/ftrace.c
··· 282 282 283 283 /* Defined as markers to the end of the ftrace default trampolines */ 284 284 extern void ftrace_regs_caller_end(void); 285 - extern void ftrace_regs_caller_ret(void); 286 285 extern void ftrace_caller_end(void); 287 286 extern void ftrace_caller_op_ptr(void); 288 287 extern void ftrace_regs_caller_op_ptr(void);
+9
include/linux/ftrace.h
··· 41 41 struct ftrace_regs; 42 42 struct dyn_ftrace; 43 43 44 + char *arch_ftrace_match_adjust(char *str, const char *search); 45 + 46 + #ifdef CONFIG_HAVE_FUNCTION_GRAPH_RETVAL 47 + struct fgraph_ret_regs; 48 + unsigned long ftrace_return_to_handler(struct fgraph_ret_regs *ret_regs); 49 + #else 50 + unsigned long ftrace_return_to_handler(unsigned long frame_pointer); 51 + #endif 52 + 44 53 #ifdef CONFIG_FUNCTION_TRACER 45 54 /* 46 55 * If the arch's mcount caller does not support all of ftrace's
+1
kernel/trace/fgraph.c
··· 15 15 #include <trace/events/sched.h> 16 16 17 17 #include "ftrace_internal.h" 18 + #include "trace.h" 18 19 19 20 #ifdef CONFIG_DYNAMIC_FTRACE 20 21 #define ASSIGN_OPS_HASH(opsname, val) \
+31 -14
kernel/trace/ftrace.c
··· 3305 3305 return cnt; 3306 3306 } 3307 3307 3308 + static void ftrace_free_pages(struct ftrace_page *pages) 3309 + { 3310 + struct ftrace_page *pg = pages; 3311 + 3312 + while (pg) { 3313 + if (pg->records) { 3314 + free_pages((unsigned long)pg->records, pg->order); 3315 + ftrace_number_of_pages -= 1 << pg->order; 3316 + } 3317 + pages = pg->next; 3318 + kfree(pg); 3319 + pg = pages; 3320 + ftrace_number_of_groups--; 3321 + } 3322 + } 3323 + 3308 3324 static struct ftrace_page * 3309 3325 ftrace_allocate_pages(unsigned long num_to_init) 3310 3326 { ··· 3359 3343 return start_pg; 3360 3344 3361 3345 free_pages: 3362 - pg = start_pg; 3363 - while (pg) { 3364 - if (pg->records) { 3365 - free_pages((unsigned long)pg->records, pg->order); 3366 - ftrace_number_of_pages -= 1 << pg->order; 3367 - } 3368 - start_pg = pg->next; 3369 - kfree(pg); 3370 - pg = start_pg; 3371 - ftrace_number_of_groups--; 3372 - } 3346 + ftrace_free_pages(start_pg); 3373 3347 pr_info("ftrace: FAILED to allocate memory for functions\n"); 3374 3348 return NULL; 3375 3349 } ··· 6477 6471 unsigned long *start, 6478 6472 unsigned long *end) 6479 6473 { 6474 + struct ftrace_page *pg_unuse = NULL; 6480 6475 struct ftrace_page *start_pg; 6481 6476 struct ftrace_page *pg; 6482 6477 struct dyn_ftrace *rec; 6478 + unsigned long skipped = 0; 6483 6479 unsigned long count; 6484 6480 unsigned long *p; 6485 6481 unsigned long addr; ··· 6544 6536 * object files to satisfy alignments. 6545 6537 * Skip any NULL pointers. 6546 6538 */ 6547 - if (!addr) 6539 + if (!addr) { 6540 + skipped++; 6548 6541 continue; 6542 + } 6549 6543 6550 6544 end_offset = (pg->index+1) * sizeof(pg->records[0]); 6551 6545 if (end_offset > PAGE_SIZE << pg->order) { ··· 6561 6551 rec->ip = addr; 6562 6552 } 6563 6553 6564 - /* We should have used all pages */ 6565 - WARN_ON(pg->next); 6554 + if (pg->next) { 6555 + pg_unuse = pg->next; 6556 + pg->next = NULL; 6557 + } 6566 6558 6567 6559 /* Assign the last page to ftrace_pages */ 6568 6560 ftrace_pages = pg; ··· 6586 6574 out: 6587 6575 mutex_unlock(&ftrace_lock); 6588 6576 6577 + /* We should have used all pages unless we skipped some */ 6578 + if (pg_unuse) { 6579 + WARN_ON(!skipped); 6580 + ftrace_free_pages(pg_unuse); 6581 + } 6589 6582 return ret; 6590 6583 } 6591 6584
+3 -2
kernel/trace/ftrace_internal.h
··· 2 2 #ifndef _LINUX_KERNEL_FTRACE_INTERNAL_H 3 3 #define _LINUX_KERNEL_FTRACE_INTERNAL_H 4 4 5 + int __register_ftrace_function(struct ftrace_ops *ops); 6 + int __unregister_ftrace_function(struct ftrace_ops *ops); 7 + 5 8 #ifdef CONFIG_FUNCTION_TRACER 6 9 7 10 extern struct mutex ftrace_lock; ··· 18 15 19 16 #else /* !CONFIG_DYNAMIC_FTRACE */ 20 17 21 - int __register_ftrace_function(struct ftrace_ops *ops); 22 - int __unregister_ftrace_function(struct ftrace_ops *ops); 23 18 /* Keep as macros so we do not need to define the commands */ 24 19 # define ftrace_startup(ops, command) \ 25 20 ({ \
+15 -9
kernel/trace/ring_buffer.c
··· 5242 5242 } 5243 5243 EXPORT_SYMBOL_GPL(ring_buffer_size); 5244 5244 5245 + static void rb_clear_buffer_page(struct buffer_page *page) 5246 + { 5247 + local_set(&page->write, 0); 5248 + local_set(&page->entries, 0); 5249 + rb_init_page(page->page); 5250 + page->read = 0; 5251 + } 5252 + 5245 5253 static void 5246 5254 rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer) 5247 5255 { 5256 + struct buffer_page *page; 5257 + 5248 5258 rb_head_page_deactivate(cpu_buffer); 5249 5259 5250 5260 cpu_buffer->head_page 5251 5261 = list_entry(cpu_buffer->pages, struct buffer_page, list); 5252 - local_set(&cpu_buffer->head_page->write, 0); 5253 - local_set(&cpu_buffer->head_page->entries, 0); 5254 - local_set(&cpu_buffer->head_page->page->commit, 0); 5255 - 5256 - cpu_buffer->head_page->read = 0; 5262 + rb_clear_buffer_page(cpu_buffer->head_page); 5263 + list_for_each_entry(page, cpu_buffer->pages, list) { 5264 + rb_clear_buffer_page(page); 5265 + } 5257 5266 5258 5267 cpu_buffer->tail_page = cpu_buffer->head_page; 5259 5268 cpu_buffer->commit_page = cpu_buffer->head_page; 5260 5269 5261 5270 INIT_LIST_HEAD(&cpu_buffer->reader_page->list); 5262 5271 INIT_LIST_HEAD(&cpu_buffer->new_pages); 5263 - local_set(&cpu_buffer->reader_page->write, 0); 5264 - local_set(&cpu_buffer->reader_page->entries, 0); 5265 - local_set(&cpu_buffer->reader_page->page->commit, 0); 5266 - cpu_buffer->reader_page->read = 0; 5272 + rb_clear_buffer_page(cpu_buffer->reader_page); 5267 5273 5268 5274 local_set(&cpu_buffer->entries_bytes, 0); 5269 5275 local_set(&cpu_buffer->overrun, 0);
+20 -2
kernel/trace/trace.c
··· 3118 3118 struct ftrace_stack *fstack; 3119 3119 struct stack_entry *entry; 3120 3120 int stackidx; 3121 + void *ptr; 3121 3122 3122 3123 /* 3123 3124 * Add one, for this function and the call to save_stack_trace() ··· 3162 3161 trace_ctx); 3163 3162 if (!event) 3164 3163 goto out; 3165 - entry = ring_buffer_event_data(event); 3164 + ptr = ring_buffer_event_data(event); 3165 + entry = ptr; 3166 3166 3167 - memcpy(&entry->caller, fstack->calls, size); 3167 + /* 3168 + * For backward compatibility reasons, the entry->caller is an 3169 + * array of 8 slots to store the stack. This is also exported 3170 + * to user space. The amount allocated on the ring buffer actually 3171 + * holds enough for the stack specified by nr_entries. This will 3172 + * go into the location of entry->caller. Due to string fortifiers 3173 + * checking the size of the destination of memcpy() it triggers 3174 + * when it detects that size is greater than 8. To hide this from 3175 + * the fortifiers, we use "ptr" and pointer arithmetic to assign caller. 3176 + * 3177 + * The below is really just: 3178 + * memcpy(&entry->caller, fstack->calls, size); 3179 + */ 3180 + ptr += offsetof(typeof(*entry), caller); 3181 + memcpy(ptr, fstack->calls, size); 3182 + 3168 3183 entry->size = nr_entries; 3169 3184 3170 3185 if (!call_filter_check_discard(call, entry, buffer, event)) ··· 6781 6764 6782 6765 free_cpumask_var(iter->started); 6783 6766 kfree(iter->fmt); 6767 + kfree(iter->temp); 6784 6768 mutex_destroy(&iter->mutex); 6785 6769 kfree(iter); 6786 6770
+5 -3
kernel/trace/trace_events_hist.c
··· 6663 6663 if (get_named_trigger_data(trigger_data)) 6664 6664 goto enable; 6665 6665 6666 - if (has_hist_vars(hist_data)) 6667 - save_hist_vars(hist_data); 6668 - 6669 6666 ret = create_actions(hist_data); 6670 6667 if (ret) 6671 6668 goto out_unreg; 6669 + 6670 + if (has_hist_vars(hist_data) || hist_data->n_var_refs) { 6671 + if (save_hist_vars(hist_data)) 6672 + goto out_unreg; 6673 + } 6672 6674 6673 6675 ret = tracing_map_init(hist_data->map); 6674 6676 if (ret)
+3
kernel/trace/trace_events_user.c
··· 1317 1317 pos += snprintf(buf + pos, LEN_OR_ZERO, " "); 1318 1318 pos += snprintf(buf + pos, LEN_OR_ZERO, "%s", field->name); 1319 1319 1320 + if (str_has_prefix(field->type, "struct ")) 1321 + pos += snprintf(buf + pos, LEN_OR_ZERO, " %d", field->size); 1322 + 1320 1323 if (colon) 1321 1324 pos += snprintf(buf + pos, LEN_OR_ZERO, ";"); 1322 1325
+3
kernel/trace/trace_kprobe_selftest.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 + 3 + #include "trace_kprobe_selftest.h" 4 + 2 5 /* 3 6 * Function used during the kprobe self test. This function is in a separate 4 7 * compile unit so it can be compile with CC_FLAGS_FTRACE to ensure that it
+34
samples/ftrace/ftrace-direct-modify.c
··· 2 2 #include <linux/module.h> 3 3 #include <linux/kthread.h> 4 4 #include <linux/ftrace.h> 5 + #ifndef CONFIG_ARM64 5 6 #include <asm/asm-offsets.h> 7 + #endif 6 8 7 9 extern void my_direct_func1(void); 8 10 extern void my_direct_func2(void); ··· 97 95 ); 98 96 99 97 #endif /* CONFIG_S390 */ 98 + 99 + #ifdef CONFIG_ARM64 100 + 101 + asm ( 102 + " .pushsection .text, \"ax\", @progbits\n" 103 + " .type my_tramp1, @function\n" 104 + " .globl my_tramp1\n" 105 + " my_tramp1:" 106 + " bti c\n" 107 + " sub sp, sp, #16\n" 108 + " stp x9, x30, [sp]\n" 109 + " bl my_direct_func1\n" 110 + " ldp x30, x9, [sp]\n" 111 + " add sp, sp, #16\n" 112 + " ret x9\n" 113 + " .size my_tramp1, .-my_tramp1\n" 114 + 115 + " .type my_tramp2, @function\n" 116 + " .globl my_tramp2\n" 117 + " my_tramp2:" 118 + " bti c\n" 119 + " sub sp, sp, #16\n" 120 + " stp x9, x30, [sp]\n" 121 + " bl my_direct_func2\n" 122 + " ldp x30, x9, [sp]\n" 123 + " add sp, sp, #16\n" 124 + " ret x9\n" 125 + " .size my_tramp2, .-my_tramp2\n" 126 + " .popsection\n" 127 + ); 128 + 129 + #endif /* CONFIG_ARM64 */ 100 130 101 131 #ifdef CONFIG_LOONGARCH 102 132
+40
samples/ftrace/ftrace-direct-multi-modify.c
··· 2 2 #include <linux/module.h> 3 3 #include <linux/kthread.h> 4 4 #include <linux/ftrace.h> 5 + #ifndef CONFIG_ARM64 5 6 #include <asm/asm-offsets.h> 7 + #endif 6 8 7 9 extern void my_direct_func1(unsigned long ip); 8 10 extern void my_direct_func2(unsigned long ip); ··· 104 102 ); 105 103 106 104 #endif /* CONFIG_S390 */ 105 + 106 + #ifdef CONFIG_ARM64 107 + 108 + asm ( 109 + " .pushsection .text, \"ax\", @progbits\n" 110 + " .type my_tramp1, @function\n" 111 + " .globl my_tramp1\n" 112 + " my_tramp1:" 113 + " bti c\n" 114 + " sub sp, sp, #32\n" 115 + " stp x9, x30, [sp]\n" 116 + " str x0, [sp, #16]\n" 117 + " mov x0, x30\n" 118 + " bl my_direct_func1\n" 119 + " ldp x30, x9, [sp]\n" 120 + " ldr x0, [sp, #16]\n" 121 + " add sp, sp, #32\n" 122 + " ret x9\n" 123 + " .size my_tramp1, .-my_tramp1\n" 124 + 125 + " .type my_tramp2, @function\n" 126 + " .globl my_tramp2\n" 127 + " my_tramp2:" 128 + " bti c\n" 129 + " sub sp, sp, #32\n" 130 + " stp x9, x30, [sp]\n" 131 + " str x0, [sp, #16]\n" 132 + " mov x0, x30\n" 133 + " bl my_direct_func2\n" 134 + " ldp x30, x9, [sp]\n" 135 + " ldr x0, [sp, #16]\n" 136 + " add sp, sp, #32\n" 137 + " ret x9\n" 138 + " .size my_tramp2, .-my_tramp2\n" 139 + " .popsection\n" 140 + ); 141 + 142 + #endif /* CONFIG_ARM64 */ 107 143 108 144 #ifdef CONFIG_LOONGARCH 109 145 #include <asm/asm.h>
+25
samples/ftrace/ftrace-direct-multi.c
··· 4 4 #include <linux/mm.h> /* for handle_mm_fault() */ 5 5 #include <linux/ftrace.h> 6 6 #include <linux/sched/stat.h> 7 + #ifndef CONFIG_ARM64 7 8 #include <asm/asm-offsets.h> 9 + #endif 8 10 9 11 extern void my_direct_func(unsigned long ip); 10 12 ··· 67 65 ); 68 66 69 67 #endif /* CONFIG_S390 */ 68 + 69 + #ifdef CONFIG_ARM64 70 + 71 + asm ( 72 + " .pushsection .text, \"ax\", @progbits\n" 73 + " .type my_tramp, @function\n" 74 + " .globl my_tramp\n" 75 + " my_tramp:" 76 + " bti c\n" 77 + " sub sp, sp, #32\n" 78 + " stp x9, x30, [sp]\n" 79 + " str x0, [sp, #16]\n" 80 + " mov x0, x30\n" 81 + " bl my_direct_func\n" 82 + " ldp x30, x9, [sp]\n" 83 + " ldr x0, [sp, #16]\n" 84 + " add sp, sp, #32\n" 85 + " ret x9\n" 86 + " .size my_tramp, .-my_tramp\n" 87 + " .popsection\n" 88 + ); 89 + 90 + #endif /* CONFIG_ARM64 */ 70 91 71 92 #ifdef CONFIG_LOONGARCH 72 93
+34 -6
samples/ftrace/ftrace-direct-too.c
··· 3 3 4 4 #include <linux/mm.h> /* for handle_mm_fault() */ 5 5 #include <linux/ftrace.h> 6 + #ifndef CONFIG_ARM64 6 7 #include <asm/asm-offsets.h> 8 + #endif 7 9 8 - extern void my_direct_func(struct vm_area_struct *vma, 9 - unsigned long address, unsigned int flags); 10 + extern void my_direct_func(struct vm_area_struct *vma, unsigned long address, 11 + unsigned int flags, struct pt_regs *regs); 10 12 11 - void my_direct_func(struct vm_area_struct *vma, 12 - unsigned long address, unsigned int flags) 13 + void my_direct_func(struct vm_area_struct *vma, unsigned long address, 14 + unsigned int flags, struct pt_regs *regs) 13 15 { 14 - trace_printk("handle mm fault vma=%p address=%lx flags=%x\n", 15 - vma, address, flags); 16 + trace_printk("handle mm fault vma=%p address=%lx flags=%x regs=%p\n", 17 + vma, address, flags, regs); 16 18 } 17 19 18 20 extern void my_tramp(void *); ··· 36 34 " pushq %rdi\n" 37 35 " pushq %rsi\n" 38 36 " pushq %rdx\n" 37 + " pushq %rcx\n" 39 38 " call my_direct_func\n" 39 + " popq %rcx\n" 40 40 " popq %rdx\n" 41 41 " popq %rsi\n" 42 42 " popq %rdi\n" ··· 73 69 ); 74 70 75 71 #endif /* CONFIG_S390 */ 72 + 73 + #ifdef CONFIG_ARM64 74 + 75 + asm ( 76 + " .pushsection .text, \"ax\", @progbits\n" 77 + " .type my_tramp, @function\n" 78 + " .globl my_tramp\n" 79 + " my_tramp:" 80 + " bti c\n" 81 + " sub sp, sp, #48\n" 82 + " stp x9, x30, [sp]\n" 83 + " stp x0, x1, [sp, #16]\n" 84 + " stp x2, x3, [sp, #32]\n" 85 + " bl my_direct_func\n" 86 + " ldp x30, x9, [sp]\n" 87 + " ldp x0, x1, [sp, #16]\n" 88 + " ldp x2, x3, [sp, #32]\n" 89 + " add sp, sp, #48\n" 90 + " ret x9\n" 91 + " .size my_tramp, .-my_tramp\n" 92 + " .popsection\n" 93 + ); 94 + 95 + #endif /* CONFIG_ARM64 */ 76 96 77 97 #ifdef CONFIG_LOONGARCH 78 98
+24
samples/ftrace/ftrace-direct.c
··· 3 3 4 4 #include <linux/sched.h> /* for wake_up_process() */ 5 5 #include <linux/ftrace.h> 6 + #ifndef CONFIG_ARM64 6 7 #include <asm/asm-offsets.h> 8 + #endif 7 9 8 10 extern void my_direct_func(struct task_struct *p); 9 11 ··· 64 62 ); 65 63 66 64 #endif /* CONFIG_S390 */ 65 + 66 + #ifdef CONFIG_ARM64 67 + 68 + asm ( 69 + " .pushsection .text, \"ax\", @progbits\n" 70 + " .type my_tramp, @function\n" 71 + " .globl my_tramp\n" 72 + " my_tramp:" 73 + " bti c\n" 74 + " sub sp, sp, #32\n" 75 + " stp x9, x30, [sp]\n" 76 + " str x0, [sp, #16]\n" 77 + " bl my_direct_func\n" 78 + " ldp x30, x9, [sp]\n" 79 + " ldr x0, [sp, #16]\n" 80 + " add sp, sp, #32\n" 81 + " ret x9\n" 82 + " .size my_tramp, .-my_tramp\n" 83 + " .popsection\n" 84 + ); 85 + 86 + #endif /* CONFIG_ARM64 */ 67 87 68 88 #ifdef CONFIG_LOONGARCH 69 89
+12
tools/testing/selftests/user_events/dyn_test.c
··· 217 217 /* Types don't match */ 218 218 TEST_NMATCH("__test_event u64 a; u64 b", 219 219 "__test_event u32 a; u32 b"); 220 + 221 + /* Struct name and size matches */ 222 + TEST_MATCH("__test_event struct my_struct a 20", 223 + "__test_event struct my_struct a 20"); 224 + 225 + /* Struct name don't match */ 226 + TEST_NMATCH("__test_event struct my_struct a 20", 227 + "__test_event struct my_struct b 20"); 228 + 229 + /* Struct size don't match */ 230 + TEST_NMATCH("__test_event struct my_struct a 20", 231 + "__test_event struct my_struct a 21"); 220 232 } 221 233 222 234 int main(int argc, char **argv)