Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

arm64: stacktrace: rework stack boundary discovery

In subsequent patches we'll want to acquire the stack boundaries
ahead-of-time, and we'll need to be able to acquire the relevant
stack_info regardless of whether we have an object the happens to be on
the stack.

This patch replaces the on_XXX_stack() helpers with stackinfo_get_XXX()
helpers, with the caller being responsible for the checking whether an
object is on a relevant stack. For the moment this is moved into the
on_accessible_stack() functions, making these slightly larger;
subsequent patches will remove the on_accessible_stack() functions and
simplify the logic.

The on_irq_stack() and on_task_stack() helpers are kept as these are
used by IRQ entry sequences and stackleak respectively. As they're only
used as predicates, the stack_info pointer parameter is removed in both
cases.

As the on_accessible_stack() functions are always passed a non-NULL info
pointer, these now update info unconditionally. When updating the type
to STACK_TYPE_UNKNOWN, the low/high bounds are also modified, but as
these will not be consumed this should have no adverse affect.

There should be no functional change as a result of this patch.

Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Reviewed-by: Kalesh Singh <kaleshsingh@google.com>
Reviewed-by: Madhavan T. Venkataraman <madvenka@linux.microsoft.com>
Reviewed-by: Mark Brown <broonie@kernel.org>
Cc: Fuad Tabba <tabba@google.com>
Cc: Marc Zyngier <maz@kernel.org>
Cc: Will Deacon <will@kernel.org>
Link: https://lore.kernel.org/r/20220901130646.1316937-7-mark.rutland@arm.com
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>

authored by

Mark Rutland and committed by
Catalin Marinas
d1f684e4 36f9a879

+151 -94
+1 -1
arch/arm64/include/asm/processor.h
··· 410 410 * The top of the current task's task stack 411 411 */ 412 412 #define current_top_of_stack() ((unsigned long)current->stack + THREAD_SIZE) 413 - #define on_thread_stack() (on_task_stack(current, current_stack_pointer, 1, NULL)) 413 + #define on_thread_stack() (on_task_stack(current, current_stack_pointer, 1)) 414 414 415 415 #endif /* __ASSEMBLY__ */ 416 416 #endif /* __ASM_PROCESSOR_H */
+46 -32
arch/arm64/include/asm/stacktrace.h
··· 22 22 23 23 DECLARE_PER_CPU(unsigned long *, irq_stack_ptr); 24 24 25 - static inline bool on_irq_stack(unsigned long sp, unsigned long size, 26 - struct stack_info *info) 25 + static inline struct stack_info stackinfo_get_irq(void) 27 26 { 28 27 unsigned long low = (unsigned long)raw_cpu_read(irq_stack_ptr); 29 28 unsigned long high = low + IRQ_STACK_SIZE; 30 29 31 - return on_stack(sp, size, low, high, STACK_TYPE_IRQ, info); 30 + return (struct stack_info) { 31 + .low = low, 32 + .high = high, 33 + .type = STACK_TYPE_IRQ, 34 + }; 32 35 } 33 36 34 - static inline bool on_task_stack(const struct task_struct *tsk, 35 - unsigned long sp, unsigned long size, 36 - struct stack_info *info) 37 + static inline bool on_irq_stack(unsigned long sp, unsigned long size) 38 + { 39 + struct stack_info info = stackinfo_get_irq(); 40 + return stackinfo_on_stack(&info, sp, size); 41 + } 42 + 43 + static inline struct stack_info stackinfo_get_task(const struct task_struct *tsk) 37 44 { 38 45 unsigned long low = (unsigned long)task_stack_page(tsk); 39 46 unsigned long high = low + THREAD_SIZE; 40 47 41 - return on_stack(sp, size, low, high, STACK_TYPE_TASK, info); 48 + return (struct stack_info) { 49 + .low = low, 50 + .high = high, 51 + .type = STACK_TYPE_TASK, 52 + }; 53 + } 54 + 55 + static inline bool on_task_stack(const struct task_struct *tsk, 56 + unsigned long sp, unsigned long size) 57 + { 58 + struct stack_info info = stackinfo_get_task(tsk); 59 + return stackinfo_on_stack(&info, sp, size); 42 60 } 43 61 44 62 #ifdef CONFIG_VMAP_STACK 45 63 DECLARE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack); 46 64 47 - static inline bool on_overflow_stack(unsigned long sp, unsigned long size, 48 - struct stack_info *info) 65 + static inline struct stack_info stackinfo_get_overflow(void) 49 66 { 50 67 unsigned long low = (unsigned long)raw_cpu_ptr(overflow_stack); 51 68 unsigned long high = low + OVERFLOW_STACK_SIZE; 52 69 53 - return on_stack(sp, size, low, high, STACK_TYPE_OVERFLOW, info); 70 + return (struct stack_info) { 71 + .low = low, 72 + .high = high, 73 + .type = STACK_TYPE_OVERFLOW, 74 + }; 54 75 } 55 76 #else 56 - static inline bool on_overflow_stack(unsigned long sp, unsigned long size, 57 - struct stack_info *info) 58 - { 59 - return false; 60 - } 77 + #define stackinfo_get_overflow() stackinfo_get_unknown() 61 78 #endif 62 79 63 80 #if defined(CONFIG_ARM_SDE_INTERFACE) && defined(CONFIG_VMAP_STACK) 64 81 DECLARE_PER_CPU(unsigned long *, sdei_stack_normal_ptr); 65 82 DECLARE_PER_CPU(unsigned long *, sdei_stack_critical_ptr); 66 83 67 - static inline bool on_sdei_normal_stack(unsigned long sp, unsigned long size, 68 - struct stack_info *info) 84 + static inline struct stack_info stackinfo_get_sdei_normal(void) 69 85 { 70 86 unsigned long low = (unsigned long)raw_cpu_read(sdei_stack_normal_ptr); 71 87 unsigned long high = low + SDEI_STACK_SIZE; 72 88 73 - return on_stack(sp, size, low, high, STACK_TYPE_SDEI_NORMAL, info); 89 + return (struct stack_info) { 90 + .low = low, 91 + .high = high, 92 + .type = STACK_TYPE_SDEI_NORMAL, 93 + }; 74 94 } 75 95 76 - static inline bool on_sdei_critical_stack(unsigned long sp, unsigned long size, 77 - struct stack_info *info) 96 + static inline struct stack_info stackinfo_get_sdei_critical(void) 78 97 { 79 98 unsigned long low = (unsigned long)raw_cpu_read(sdei_stack_critical_ptr); 80 99 unsigned long high = low + SDEI_STACK_SIZE; 81 100 82 - return on_stack(sp, size, low, high, STACK_TYPE_SDEI_CRITICAL, info); 101 + return (struct stack_info) { 102 + .low = low, 103 + .high = high, 104 + .type = STACK_TYPE_SDEI_CRITICAL, 105 + }; 83 106 } 84 107 #else 85 - static inline bool on_sdei_normal_stack(unsigned long sp, unsigned long size, 86 - struct stack_info *info) 87 - { 88 - return false; 89 - } 90 - 91 - static inline bool on_sdei_critical_stack(unsigned long sp, unsigned long size, 92 - struct stack_info *info) 93 - { 94 - return false; 95 - } 108 + #define stackinfo_get_sdei_normal() stackinfo_get_unknown() 109 + #define stackinfo_get_sdei_critical() stackinfo_get_unknown() 96 110 #endif 97 111 98 112 #endif /* __ASM_STACKTRACE_H */
+9 -19
arch/arm64/include/asm/stacktrace/common.h
··· 65 65 struct task_struct *task; 66 66 }; 67 67 68 + static inline struct stack_info stackinfo_get_unknown(void) 69 + { 70 + return (struct stack_info) { 71 + .low = 0, 72 + .high = 0, 73 + .type = STACK_TYPE_UNKNOWN, 74 + }; 75 + } 76 + 68 77 static inline bool stackinfo_on_stack(const struct stack_info *info, 69 78 unsigned long sp, unsigned long size) 70 79 { ··· 82 73 83 74 if (sp < info->low || sp + size < sp || sp + size > info->high) 84 75 return false; 85 - 86 - return true; 87 - } 88 - 89 - static inline bool on_stack(unsigned long sp, unsigned long size, 90 - unsigned long low, unsigned long high, 91 - enum stack_type type, struct stack_info *info) 92 - { 93 - struct stack_info tmp = { 94 - .low = low, 95 - .high = high, 96 - .type = type, 97 - }; 98 - 99 - if (!stackinfo_on_stack(&tmp, sp, size)) 100 - return false; 101 - 102 - if (info) 103 - *info = tmp; 104 76 105 77 return true; 106 78 }
+1 -1
arch/arm64/kernel/ptrace.c
··· 121 121 { 122 122 return ((addr & ~(THREAD_SIZE - 1)) == 123 123 (kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1))) || 124 - on_irq_stack(addr, sizeof(unsigned long), NULL); 124 + on_irq_stack(addr, sizeof(unsigned long)); 125 125 } 126 126 127 127 /**
+40 -21
arch/arm64/kernel/stacktrace.c
··· 67 67 state->pc = thread_saved_pc(task); 68 68 } 69 69 70 - /* 71 - * We can only safely access per-cpu stacks from current in a non-preemptible 72 - * context. 73 - */ 74 70 static bool on_accessible_stack(const struct task_struct *tsk, 75 71 unsigned long sp, unsigned long size, 76 72 struct stack_info *info) 77 73 { 78 - if (info) 79 - info->type = STACK_TYPE_UNKNOWN; 74 + struct stack_info tmp; 80 75 81 - if (on_task_stack(tsk, sp, size, info)) 82 - return true; 76 + tmp = stackinfo_get_task(tsk); 77 + if (stackinfo_on_stack(&tmp, sp, size)) 78 + goto found; 79 + 80 + /* 81 + * We can only safely access per-cpu stacks when unwinding the current 82 + * task in a non-preemptible context. 83 + */ 83 84 if (tsk != current || preemptible()) 84 - return false; 85 - if (on_irq_stack(sp, size, info)) 86 - return true; 87 - if (on_overflow_stack(sp, size, info)) 88 - return true; 85 + goto not_found; 89 86 90 - if (IS_ENABLED(CONFIG_VMAP_STACK) && 91 - IS_ENABLED(CONFIG_ARM_SDE_INTERFACE) && 92 - in_nmi()) { 93 - if (on_sdei_critical_stack(sp, size, info)) 94 - return true; 95 - if (on_sdei_normal_stack(sp, size, info)) 96 - return true; 97 - } 87 + tmp = stackinfo_get_irq(); 88 + if (stackinfo_on_stack(&tmp, sp, size)) 89 + goto found; 98 90 91 + tmp = stackinfo_get_overflow(); 92 + if (stackinfo_on_stack(&tmp, sp, size)) 93 + goto found; 94 + 95 + /* 96 + * We can only safely access SDEI stacks which unwinding the current 97 + * task in an NMI context. 98 + */ 99 + if (!IS_ENABLED(CONFIG_VMAP_STACK) || 100 + !IS_ENABLED(CONFIG_ARM_SDE_INTERFACE) || 101 + !in_nmi()) 102 + goto not_found; 103 + 104 + tmp = stackinfo_get_sdei_normal(); 105 + if (stackinfo_on_stack(&tmp, sp, size)) 106 + goto found; 107 + 108 + tmp = stackinfo_get_sdei_critical(); 109 + if (stackinfo_on_stack(&tmp, sp, size)) 110 + goto found; 111 + 112 + not_found: 113 + *info = stackinfo_get_unknown(); 99 114 return false; 115 + 116 + found: 117 + *info = tmp; 118 + return true; 100 119 } 101 120 102 121 /*
+27 -10
arch/arm64/kvm/hyp/nvhe/stacktrace.c
··· 39 39 40 40 DEFINE_PER_CPU(unsigned long [NVHE_STACKTRACE_SIZE/sizeof(long)], pkvm_stacktrace); 41 41 42 - static bool on_overflow_stack(unsigned long sp, unsigned long size, 43 - struct stack_info *info) 42 + static struct stack_info stackinfo_get_overflow(void) 44 43 { 45 44 unsigned long low = (unsigned long)this_cpu_ptr(overflow_stack); 46 45 unsigned long high = low + OVERFLOW_STACK_SIZE; 47 46 48 - return on_stack(sp, size, low, high, STACK_TYPE_OVERFLOW, info); 47 + return (struct stack_info) { 48 + .low = low, 49 + .high = high, 50 + .type = STACK_TYPE_OVERFLOW, 51 + }; 49 52 } 50 53 51 - static bool on_hyp_stack(unsigned long sp, unsigned long size, 52 - struct stack_info *info) 54 + static struct stack_info stackinfo_get_hyp(void) 53 55 { 54 56 struct kvm_nvhe_init_params *params = this_cpu_ptr(&kvm_init_params); 55 57 unsigned long high = params->stack_hyp_va; 56 58 unsigned long low = high - PAGE_SIZE; 57 59 58 - return on_stack(sp, size, low, high, STACK_TYPE_HYP, info); 60 + return (struct stack_info) { 61 + .low = low, 62 + .high = high, 63 + .type = STACK_TYPE_HYP, 64 + }; 59 65 } 60 66 61 67 static bool on_accessible_stack(const struct task_struct *tsk, 62 68 unsigned long sp, unsigned long size, 63 69 struct stack_info *info) 64 70 { 65 - if (info) 66 - info->type = STACK_TYPE_UNKNOWN; 71 + struct stack_info tmp; 67 72 68 - return (on_overflow_stack(sp, size, info) || 69 - on_hyp_stack(sp, size, info)); 73 + tmp = stackinfo_get_overflow(); 74 + if (stackinfo_on_stack(&tmp, sp, size)) 75 + goto found; 76 + 77 + tmp = stackinfo_get_hyp(); 78 + if (stackinfo_on_stack(&tmp, sp, size)) 79 + goto found; 80 + 81 + *info = stackinfo_get_unknown(); 82 + return false; 83 + 84 + found: 85 + *info = tmp; 86 + return true; 70 87 } 71 88 72 89 static int unwind_next(struct unwind_state *state)
+27 -10
arch/arm64/kvm/stacktrace.c
··· 62 62 return true; 63 63 } 64 64 65 - static bool on_overflow_stack(unsigned long sp, unsigned long size, 66 - struct stack_info *info) 65 + static struct stack_info stackinfo_get_overflow(void) 67 66 { 68 67 struct kvm_nvhe_stacktrace_info *stacktrace_info 69 68 = this_cpu_ptr_nvhe_sym(kvm_stacktrace_info); 70 69 unsigned long low = (unsigned long)stacktrace_info->overflow_stack_base; 71 70 unsigned long high = low + OVERFLOW_STACK_SIZE; 72 71 73 - return on_stack(sp, size, low, high, STACK_TYPE_OVERFLOW, info); 72 + return (struct stack_info) { 73 + .low = low, 74 + .high = high, 75 + .type = STACK_TYPE_OVERFLOW, 76 + }; 74 77 } 75 78 76 - static bool on_hyp_stack(unsigned long sp, unsigned long size, 77 - struct stack_info *info) 79 + static struct stack_info stackinfo_get_hyp(void) 78 80 { 79 81 struct kvm_nvhe_stacktrace_info *stacktrace_info 80 82 = this_cpu_ptr_nvhe_sym(kvm_stacktrace_info); 81 83 unsigned long low = (unsigned long)stacktrace_info->stack_base; 82 84 unsigned long high = low + PAGE_SIZE; 83 85 84 - return on_stack(sp, size, low, high, STACK_TYPE_HYP, info); 86 + return (struct stack_info) { 87 + .low = low, 88 + .high = high, 89 + .type = STACK_TYPE_HYP, 90 + }; 85 91 } 86 92 87 93 static bool on_accessible_stack(const struct task_struct *tsk, 88 94 unsigned long sp, unsigned long size, 89 95 struct stack_info *info) 90 96 { 91 - if (info) 92 - info->type = STACK_TYPE_UNKNOWN; 97 + struct stack_info tmp; 93 98 94 - return (on_overflow_stack(sp, size, info) || 95 - on_hyp_stack(sp, size, info)); 99 + tmp = stackinfo_get_overflow(); 100 + if (stackinfo_on_stack(&tmp, sp, size)) 101 + goto found; 102 + 103 + tmp = stackinfo_get_hyp(); 104 + if (stackinfo_on_stack(&tmp, sp, size)) 105 + goto found; 106 + 107 + *info = stackinfo_get_unknown(); 108 + return false; 109 + 110 + found: 111 + *info = tmp; 112 + return true; 96 113 } 97 114 98 115 static int unwind_next(struct unwind_state *state)