Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

stacktrace: Remove reliable argument from arch_stack_walk() callback

Currently the callback passed to arch_stack_walk() has an argument called
reliable passed to it to indicate if the stack entry is reliable, a comment
says that this is used by some printk() consumers. However in the current
kernel none of the arch_stack_walk() implementations ever set this flag to
true and the only callback implementation we have is in the generic
stacktrace code which ignores the flag. It therefore appears that this
flag is redundant so we can simplify and clarify things by removing it.

Signed-off-by: Mark Brown <broonie@kernel.org>
Reviewed-by: Miroslav Benes <mbenes@suse.cz>
Link: https://lore.kernel.org/r/20200914153409.25097-2-broonie@kernel.org
Signed-off-by: Will Deacon <will@kernel.org>

authored by

Mark Brown and committed by
Will Deacon
264c03a2 f75aef39

+11 -16
+2 -2
arch/s390/kernel/stacktrace.c
··· 19 19 20 20 unwind_for_each_frame(&state, task, regs, 0) { 21 21 addr = unwind_get_return_address(&state); 22 - if (!addr || !consume_entry(cookie, addr, false)) 22 + if (!addr || !consume_entry(cookie, addr)) 23 23 break; 24 24 } 25 25 } ··· 56 56 return -EINVAL; 57 57 #endif 58 58 59 - if (!consume_entry(cookie, addr, false)) 59 + if (!consume_entry(cookie, addr)) 60 60 return -EINVAL; 61 61 } 62 62
+5 -5
arch/x86/kernel/stacktrace.c
··· 18 18 struct unwind_state state; 19 19 unsigned long addr; 20 20 21 - if (regs && !consume_entry(cookie, regs->ip, false)) 21 + if (regs && !consume_entry(cookie, regs->ip)) 22 22 return; 23 23 24 24 for (unwind_start(&state, task, regs, NULL); !unwind_done(&state); 25 25 unwind_next_frame(&state)) { 26 26 addr = unwind_get_return_address(&state); 27 - if (!addr || !consume_entry(cookie, addr, false)) 27 + if (!addr || !consume_entry(cookie, addr)) 28 28 break; 29 29 } 30 30 } ··· 72 72 if (!addr) 73 73 return -EINVAL; 74 74 75 - if (!consume_entry(cookie, addr, false)) 75 + if (!consume_entry(cookie, addr)) 76 76 return -EINVAL; 77 77 } 78 78 ··· 114 114 { 115 115 const void __user *fp = (const void __user *)regs->bp; 116 116 117 - if (!consume_entry(cookie, regs->ip, false)) 117 + if (!consume_entry(cookie, regs->ip)) 118 118 return; 119 119 120 120 while (1) { ··· 128 128 break; 129 129 if (!frame.ret_addr) 130 130 break; 131 - if (!consume_entry(cookie, frame.ret_addr, false)) 131 + if (!consume_entry(cookie, frame.ret_addr)) 132 132 break; 133 133 fp = frame.next_fp; 134 134 }
+1 -4
include/linux/stacktrace.h
··· 29 29 * stack_trace_consume_fn - Callback for arch_stack_walk() 30 30 * @cookie: Caller supplied pointer handed back by arch_stack_walk() 31 31 * @addr: The stack entry address to consume 32 - * @reliable: True when the stack entry is reliable. Required by 33 - * some printk based consumers. 34 32 * 35 33 * Return: True, if the entry was consumed or skipped 36 34 * False, if there is no space left to store 37 35 */ 38 - typedef bool (*stack_trace_consume_fn)(void *cookie, unsigned long addr, 39 - bool reliable); 36 + typedef bool (*stack_trace_consume_fn)(void *cookie, unsigned long addr); 40 37 /** 41 38 * arch_stack_walk - Architecture specific function to walk the stack 42 39 * @consume_entry: Callback which is invoked by the architecture code for
+3 -5
kernel/stacktrace.c
··· 78 78 unsigned int len; 79 79 }; 80 80 81 - static bool stack_trace_consume_entry(void *cookie, unsigned long addr, 82 - bool reliable) 81 + static bool stack_trace_consume_entry(void *cookie, unsigned long addr) 83 82 { 84 83 struct stacktrace_cookie *c = cookie; 85 84 ··· 93 94 return c->len < c->size; 94 95 } 95 96 96 - static bool stack_trace_consume_entry_nosched(void *cookie, unsigned long addr, 97 - bool reliable) 97 + static bool stack_trace_consume_entry_nosched(void *cookie, unsigned long addr) 98 98 { 99 99 if (in_sched_functions(addr)) 100 100 return true; 101 - return stack_trace_consume_entry(cookie, addr, reliable); 101 + return stack_trace_consume_entry(cookie, addr); 102 102 } 103 103 104 104 /**