Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'for-linus' of git://git.armlinux.org.uk/~rmk/linux-arm

Pull ARM updates from Russell King:

- add support for ftrace-with-registers, which is needed for kgraft and
other ftrace tools

- support for mremap() for the sigpage/vDSO so that checkpoint/restore
can work

- add timestamps to each line of the register dump output

- remove the unused KTHREAD_SIZE from nommu

- align the ARM bitops APIs with the generic API (using unsigned long
pointers rather than void pointers)

- make the configuration of userspace Thumb support an expert option so
that we can default it on, and avoid some hard to debug userspace
crashes

* 'for-linus' of git://git.armlinux.org.uk/~rmk/linux-arm:
ARM: 8684/1: NOMMU: Remove unused KTHREAD_SIZE definition
ARM: 8683/1: ARM32: Support mremap() for sigpage/vDSO
ARM: 8679/1: bitops: Align prototypes to generic API
ARM: 8678/1: ftrace: Adds support for CONFIG_DYNAMIC_FTRACE_WITH_REGS
ARM: make configuration of userspace Thumb support an expert option
ARM: 8673/1: Fix __show_regs output timestamps

+185 -18
+1
arch/arm/Kconfig
··· 58 58 select HAVE_DMA_API_DEBUG 59 59 select HAVE_DMA_CONTIGUOUS if MMU 60 60 select HAVE_DYNAMIC_FTRACE if (!XIP_KERNEL) && !CPU_ENDIAN_BE32 && MMU 61 + select HAVE_DYNAMIC_FTRACE_WITH_REGS if HAVE_DYNAMIC_FTRACE 61 62 select HAVE_EFFICIENT_UNALIGNED_ACCESS if (CPU_V6 || CPU_V6K || CPU_V7) && MMU 62 63 select HAVE_EXIT_THREAD 63 64 select HAVE_FTRACE_MCOUNT_RECORD if (!XIP_KERNEL)
+4 -4
arch/arm/include/asm/bitops.h
··· 159 159 /* 160 160 * Little endian assembly bitops. nr = 0 -> byte 0 bit 0. 161 161 */ 162 - extern int _find_first_zero_bit_le(const void * p, unsigned size); 163 - extern int _find_next_zero_bit_le(const void * p, int size, int offset); 162 + extern int _find_first_zero_bit_le(const unsigned long *p, unsigned size); 163 + extern int _find_next_zero_bit_le(const unsigned long *p, int size, int offset); 164 164 extern int _find_first_bit_le(const unsigned long *p, unsigned size); 165 165 extern int _find_next_bit_le(const unsigned long *p, int size, int offset); 166 166 167 167 /* 168 168 * Big endian assembly bitops. nr = 0 -> byte 3 bit 0. 169 169 */ 170 - extern int _find_first_zero_bit_be(const void * p, unsigned size); 171 - extern int _find_next_zero_bit_be(const void * p, int size, int offset); 170 + extern int _find_first_zero_bit_be(const unsigned long *p, unsigned size); 171 + extern int _find_next_zero_bit_be(const unsigned long *p, int size, int offset); 172 172 extern int _find_first_bit_be(const unsigned long *p, unsigned size); 173 173 extern int _find_next_bit_be(const unsigned long *p, int size, int offset); 174 174
+4
arch/arm/include/asm/ftrace.h
··· 1 1 #ifndef _ASM_ARM_FTRACE 2 2 #define _ASM_ARM_FTRACE 3 3 4 + #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS 5 + #define ARCH_SUPPORTS_FTRACE_OPS 1 6 + #endif 7 + 4 8 #ifdef CONFIG_FUNCTION_TRACER 5 9 #define MCOUNT_ADDR ((unsigned long)(__gnu_mcount_nc)) 6 10 #define MCOUNT_INSN_SIZE 4 /* sizeof mcount call */
-6
arch/arm/include/asm/page-nommu.h
··· 11 11 #ifndef _ASMARM_PAGE_NOMMU_H 12 12 #define _ASMARM_PAGE_NOMMU_H 13 13 14 - #if !defined(CONFIG_SMALL_TASKS) && PAGE_SHIFT < 13 15 - #define KTHREAD_SIZE (8192) 16 - #else 17 - #define KTHREAD_SIZE PAGE_SIZE 18 - #endif 19 - 20 14 #define clear_page(page) memset((page), 0, PAGE_SIZE) 21 15 #define copy_page(to,from) memcpy((to), (from), PAGE_SIZE) 22 16
+100
arch/arm/kernel/entry-ftrace.S
··· 92 92 2: mcount_exit 93 93 .endm 94 94 95 + #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS 96 + 97 + .macro __ftrace_regs_caller 98 + 99 + sub sp, sp, #8 @ space for PC and CPSR OLD_R0, 100 + @ OLD_R0 will overwrite previous LR 101 + 102 + add ip, sp, #12 @ move in IP the value of SP as it was 103 + @ before the push {lr} of the mcount mechanism 104 + 105 + str lr, [sp, #0] @ store LR instead of PC 106 + 107 + ldr lr, [sp, #8] @ get previous LR 108 + 109 + str r0, [sp, #8] @ write r0 as OLD_R0 over previous LR 110 + 111 + stmdb sp!, {ip, lr} 112 + stmdb sp!, {r0-r11, lr} 113 + 114 + @ stack content at this point: 115 + @ 0 4 48 52 56 60 64 68 72 116 + @ R0 | R1 | ... | LR | SP + 4 | previous LR | LR | PSR | OLD_R0 | 117 + 118 + mov r3, sp @ struct pt_regs* 119 + 120 + ldr r2, =function_trace_op 121 + ldr r2, [r2] @ pointer to the current 122 + @ function tracing op 123 + 124 + ldr r1, [sp, #S_LR] @ lr of instrumented func 125 + 126 + ldr lr, [sp, #S_PC] @ get LR 127 + 128 + mcount_adjust_addr r0, lr @ instrumented function 129 + 130 + .globl ftrace_regs_call 131 + ftrace_regs_call: 132 + bl ftrace_stub 133 + 134 + #ifdef CONFIG_FUNCTION_GRAPH_TRACER 135 + .globl ftrace_graph_regs_call 136 + ftrace_graph_regs_call: 137 + mov r0, r0 138 + #endif 139 + 140 + @ pop saved regs 141 + ldmia sp!, {r0-r12} @ restore r0 through r12 142 + ldr ip, [sp, #8] @ restore PC 143 + ldr lr, [sp, #4] @ restore LR 144 + ldr sp, [sp, #0] @ restore SP 145 + mov pc, ip @ return 146 + .endm 147 + 148 + #ifdef CONFIG_FUNCTION_GRAPH_TRACER 149 + .macro __ftrace_graph_regs_caller 150 + 151 + sub r0, fp, #4 @ lr of instrumented routine (parent) 152 + 153 + @ called from __ftrace_regs_caller 154 + ldr r1, [sp, #S_PC] @ instrumented routine (func) 155 + mcount_adjust_addr r1, r1 156 + 157 + mov r2, fp @ frame pointer 158 + bl prepare_ftrace_return 159 + 160 + @ pop registers saved in ftrace_regs_caller 161 + ldmia sp!, {r0-r12} @ restore r0 through r12 162 + ldr ip, [sp, #8] @ restore PC 163 + ldr lr, [sp, #4] @ restore LR 164 + ldr sp, [sp, #0] @ restore SP 165 + mov pc, ip @ return 166 + 167 + .endm 168 + #endif 169 + #endif 170 + 95 171 .macro __ftrace_caller suffix 96 172 mcount_enter 97 173 98 174 mcount_get_lr r1 @ lr of instrumented func 99 175 mcount_adjust_addr r0, lr @ instrumented function 176 + 177 + #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS 178 + ldr r2, =function_trace_op 179 + ldr r2, [r2] @ pointer to the current 180 + @ function tracing op 181 + mov r3, #0 @ regs is NULL 182 + #endif 100 183 101 184 .globl ftrace_call\suffix 102 185 ftrace_call\suffix: ··· 295 212 __ftrace_caller 296 213 UNWIND(.fnend) 297 214 ENDPROC(ftrace_caller) 215 + 216 + #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS 217 + ENTRY(ftrace_regs_caller) 218 + UNWIND(.fnstart) 219 + __ftrace_regs_caller 220 + UNWIND(.fnend) 221 + ENDPROC(ftrace_regs_caller) 222 + #endif 223 + 298 224 #endif 299 225 300 226 #ifdef CONFIG_FUNCTION_GRAPH_TRACER ··· 312 220 __ftrace_graph_caller 313 221 UNWIND(.fnend) 314 222 ENDPROC(ftrace_graph_caller) 223 + 224 + #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS 225 + ENTRY(ftrace_graph_regs_caller) 226 + UNWIND(.fnstart) 227 + __ftrace_graph_regs_caller 228 + UNWIND(.fnend) 229 + ENDPROC(ftrace_graph_regs_caller) 230 + #endif 315 231 #endif 316 232 317 233 .purgem mcount_enter
+37
arch/arm/kernel/ftrace.c
··· 141 141 142 142 ret = ftrace_modify_code(pc, 0, new, false); 143 143 144 + #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS 145 + if (!ret) { 146 + pc = (unsigned long)&ftrace_regs_call; 147 + new = ftrace_call_replace(pc, (unsigned long)func); 148 + 149 + ret = ftrace_modify_code(pc, 0, new, false); 150 + } 151 + #endif 152 + 144 153 #ifdef CONFIG_OLD_MCOUNT 145 154 if (!ret) { 146 155 pc = (unsigned long)&ftrace_call_old; ··· 168 159 unsigned long ip = rec->ip; 169 160 170 161 old = ftrace_nop_replace(rec); 162 + 171 163 new = ftrace_call_replace(ip, adjust_address(rec, addr)); 172 164 173 165 return ftrace_modify_code(rec->ip, old, new, true); 174 166 } 167 + 168 + #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS 169 + 170 + int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, 171 + unsigned long addr) 172 + { 173 + unsigned long new, old; 174 + unsigned long ip = rec->ip; 175 + 176 + old = ftrace_call_replace(ip, adjust_address(rec, old_addr)); 177 + 178 + new = ftrace_call_replace(ip, adjust_address(rec, addr)); 179 + 180 + return ftrace_modify_code(rec->ip, old, new, true); 181 + } 182 + 183 + #endif 175 184 176 185 int ftrace_make_nop(struct module *mod, 177 186 struct dyn_ftrace *rec, unsigned long addr) ··· 258 231 extern unsigned long ftrace_graph_call; 259 232 extern unsigned long ftrace_graph_call_old; 260 233 extern void ftrace_graph_caller_old(void); 234 + extern unsigned long ftrace_graph_regs_call; 235 + extern void ftrace_graph_regs_caller(void); 261 236 262 237 static int __ftrace_modify_caller(unsigned long *callsite, 263 238 void (*func) (void), bool enable) ··· 281 252 ret = __ftrace_modify_caller(&ftrace_graph_call, 282 253 ftrace_graph_caller, 283 254 enable); 255 + 256 + #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS 257 + if (!ret) 258 + ret = __ftrace_modify_caller(&ftrace_graph_regs_call, 259 + ftrace_graph_regs_caller, 260 + enable); 261 + #endif 262 + 284 263 285 264 #ifdef CONFIG_OLD_MCOUNT 286 265 if (!ret)
+12 -4
arch/arm/kernel/process.c
··· 123 123 124 124 print_symbol("PC is at %s\n", instruction_pointer(regs)); 125 125 print_symbol("LR is at %s\n", regs->ARM_lr); 126 - printk("pc : [<%08lx>] lr : [<%08lx>] psr: %08lx\n" 127 - "sp : %08lx ip : %08lx fp : %08lx\n", 128 - regs->ARM_pc, regs->ARM_lr, regs->ARM_cpsr, 129 - regs->ARM_sp, regs->ARM_ip, regs->ARM_fp); 126 + printk("pc : [<%08lx>] lr : [<%08lx>] psr: %08lx\n", 127 + regs->ARM_pc, regs->ARM_lr, regs->ARM_cpsr); 128 + printk("sp : %08lx ip : %08lx fp : %08lx\n", 129 + regs->ARM_sp, regs->ARM_ip, regs->ARM_fp); 130 130 printk("r10: %08lx r9 : %08lx r8 : %08lx\n", 131 131 regs->ARM_r10, regs->ARM_r9, 132 132 regs->ARM_r8); ··· 404 404 static struct page *signal_page; 405 405 extern struct page *get_signal_page(void); 406 406 407 + static int sigpage_mremap(const struct vm_special_mapping *sm, 408 + struct vm_area_struct *new_vma) 409 + { 410 + current->mm->context.sigpage = new_vma->vm_start; 411 + return 0; 412 + } 413 + 407 414 static const struct vm_special_mapping sigpage_mapping = { 408 415 .name = "[sigpage]", 409 416 .pages = &signal_page, 417 + .mremap = sigpage_mremap, 410 418 }; 411 419 412 420 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
+18
arch/arm/kernel/vdso.c
··· 54 54 .pages = &vdso_data_page, 55 55 }; 56 56 57 + static int vdso_mremap(const struct vm_special_mapping *sm, 58 + struct vm_area_struct *new_vma) 59 + { 60 + unsigned long new_size = new_vma->vm_end - new_vma->vm_start; 61 + unsigned long vdso_size; 62 + 63 + /* without VVAR page */ 64 + vdso_size = (vdso_total_pages - 1) << PAGE_SHIFT; 65 + 66 + if (vdso_size != new_size) 67 + return -EINVAL; 68 + 69 + current->mm->context.vdso = new_vma->vm_start; 70 + 71 + return 0; 72 + } 73 + 57 74 static struct vm_special_mapping vdso_text_mapping __ro_after_init = { 58 75 .name = "[vdso]", 76 + .mremap = vdso_mremap, 59 77 }; 60 78 61 79 struct elfinfo {
+5 -1
arch/arm/mm/Kconfig
··· 679 679 bool 680 680 681 681 config ARM_THUMB 682 - bool "Support Thumb user binaries" if !CPU_THUMBONLY 682 + bool "Support Thumb user binaries" if !CPU_THUMBONLY && EXPERT 683 683 depends on CPU_THUMB_CAPABLE 684 684 default y 685 685 help ··· 689 689 The Thumb instruction set is a compressed form of the standard ARM 690 690 instruction set resulting in smaller binaries at the expense of 691 691 slightly less efficient code. 692 + 693 + If this option is disabled, and you run userspace that switches to 694 + Thumb mode, signal handling will not work correctly, resulting in 695 + segmentation faults or illegal instruction aborts. 692 696 693 697 If you don't know what this all is, saying Y is a safe choice. 694 698
-3
arch/x86/entry/vdso/vma.c
··· 78 78 if (image->size != new_size) 79 79 return -EINVAL; 80 80 81 - if (WARN_ON_ONCE(current->mm != new_vma->vm_mm)) 82 - return -EFAULT; 83 - 84 81 vdso_fix_landing(image, new_vma); 85 82 current->mm->context.vdso = (void __user *)new_vma->vm_start; 86 83
+4
mm/mmap.c
··· 3186 3186 { 3187 3187 struct vm_special_mapping *sm = new_vma->vm_private_data; 3188 3188 3189 + if (WARN_ON_ONCE(current->mm != new_vma->vm_mm)) 3190 + return -EFAULT; 3191 + 3189 3192 if (sm->mremap) 3190 3193 return sm->mremap(sm, new_vma); 3194 + 3191 3195 return 0; 3192 3196 } 3193 3197