Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'parisc-4.19-2' of git://git.kernel.org/pub/scm/linux/kernel/git/deller/parisc-linux

Pull more parisc updates from Helge Deller:

- fix boot failure of 64-bit kernel. It got broken by the unwind
optimization commit in merge window.

- fix 64-bit userspace support (static 64-bit applications only, e.g.
we don't yet have 64-bit userspace support in glibc).

- consolidate unwind initialization code.

- add machine model description to stack trace.

* 'parisc-4.19-2' of git://git.kernel.org/pub/scm/linux/kernel/git/deller/parisc-linux:
parisc: Add hardware description to stack traces
parisc: Fix boot failure of 64-bit kernel
parisc: Consolidate unwind initialization calls
parisc: Update comments in syscall.S regarding wide userland
parisc: Fix ptraced 64-bit applications to call 64-bit syscalls
parisc: Restore possibility to execute 64-bit applications

+113 -137
+6 -3
arch/parisc/include/asm/elf.h
··· 235 235 #define SET_PERSONALITY(ex) \ 236 236 ({ \ 237 237 set_personality((current->personality & ~PER_MASK) | PER_LINUX); \ 238 + clear_thread_flag(TIF_32BIT); \ 238 239 current->thread.map_base = DEFAULT_MAP_BASE; \ 239 240 current->thread.task_size = DEFAULT_TASK_SIZE; \ 240 241 }) ··· 244 243 245 244 #define COMPAT_SET_PERSONALITY(ex) \ 246 245 ({ \ 247 - set_thread_flag(TIF_32BIT); \ 248 - current->thread.map_base = DEFAULT_MAP_BASE32; \ 249 - current->thread.task_size = DEFAULT_TASK_SIZE32; \ 246 + if ((ex).e_ident[EI_CLASS] == ELFCLASS32) { \ 247 + set_thread_flag(TIF_32BIT); \ 248 + current->thread.map_base = DEFAULT_MAP_BASE32; \ 249 + current->thread.task_size = DEFAULT_TASK_SIZE32; \ 250 + } else clear_thread_flag(TIF_32BIT); \ 250 251 }) 251 252 252 253 /*
-9
arch/parisc/include/asm/linkage.h
··· 22 22 name: ASM_NL\ 23 23 .export name 24 24 25 - #ifdef CONFIG_64BIT 26 - #define ENDPROC(name) \ 27 - END(name) 28 - #else 29 - #define ENDPROC(name) \ 30 - .type name, @function !\ 31 - END(name) 32 - #endif 33 - 34 25 #define ENTRY_CFI(name, ...) \ 35 26 ENTRY(name) ASM_NL\ 36 27 .proc ASM_NL\
+1 -5
arch/parisc/include/asm/processor.h
··· 256 256 * it in here from the current->personality 257 257 */ 258 258 259 - #ifdef CONFIG_64BIT 260 - #define USER_WIDE_MODE (!test_thread_flag(TIF_32BIT)) 261 - #else 262 - #define USER_WIDE_MODE 0 263 - #endif 259 + #define USER_WIDE_MODE (!is_32bit_task()) 264 260 265 261 #define start_thread(regs, new_pc, new_sp) do { \ 266 262 elf_addr_t *sp = (elf_addr_t *)new_sp; \
+3 -1
arch/parisc/include/asm/traps.h
··· 2 2 #ifndef __ASM_TRAPS_H 3 3 #define __ASM_TRAPS_H 4 4 5 - #ifdef __KERNEL__ 5 + #define PARISC_ITLB_TRAP 6 /* defined by architecture. Do not change. */ 6 + 7 + #if !defined(__ASSEMBLY__) 6 8 struct pt_regs; 7 9 8 10 /* traps.c */
+4 -2
arch/parisc/include/asm/unwind.h
··· 73 73 74 74 void unwind_frame_init(struct unwind_frame_info *info, struct task_struct *t, 75 75 struct pt_regs *regs); 76 - void unwind_frame_init_from_blocked_task(struct unwind_frame_info *info, struct task_struct *t); 77 - void unwind_frame_init_running(struct unwind_frame_info *info, struct pt_regs *regs); 76 + void unwind_frame_init_from_blocked_task(struct unwind_frame_info *info, 77 + struct task_struct *t); 78 + void unwind_frame_init_task(struct unwind_frame_info *info, 79 + struct task_struct *task, struct pt_regs *regs); 78 80 int unwind_once(struct unwind_frame_info *info); 79 81 int unwind_to_user(struct unwind_frame_info *info); 80 82
+39 -40
arch/parisc/kernel/entry.S
··· 36 36 #include <asm/signal.h> 37 37 #include <asm/unistd.h> 38 38 #include <asm/ldcw.h> 39 + #include <asm/traps.h> 39 40 #include <asm/thread_info.h> 40 41 41 42 #include <linux/linkage.h> ··· 693 692 def 3 694 693 extint 4 695 694 def 5 696 - itlb_20 6 695 + itlb_20 PARISC_ITLB_TRAP 697 696 def 7 698 697 def 8 699 698 def 9 ··· 736 735 def 3 737 736 extint 4 738 737 def 5 739 - itlb_11 6 738 + itlb_11 PARISC_ITLB_TRAP 740 739 def 7 741 740 def 8 742 741 def 9 ··· 777 776 * copy_thread moved args into task save area. 778 777 */ 779 778 780 - ENTRY_CFI(ret_from_kernel_thread) 779 + ENTRY(ret_from_kernel_thread) 781 780 /* Call schedule_tail first though */ 782 781 BL schedule_tail, %r2 783 782 nop ··· 792 791 copy %r31, %r2 793 792 b finish_child_return 794 793 nop 795 - ENDPROC_CFI(ret_from_kernel_thread) 794 + END(ret_from_kernel_thread) 796 795 797 796 798 797 /* ··· 816 815 LDREG TASK_THREAD_INFO(%r25), %r25 817 816 bv %r0(%r2) 818 817 mtctl %r25,%cr30 819 - ENDPROC_CFI(_switch_to) 820 818 821 - ENTRY_CFI(_switch_to_ret) 819 + ENTRY(_switch_to_ret) 822 820 mtctl %r0, %cr0 /* Needed for single stepping */ 823 821 callee_rest 824 822 callee_rest_float ··· 825 825 LDREG -RP_OFFSET(%r30), %r2 826 826 bv %r0(%r2) 827 827 copy %r26, %r28 828 - ENDPROC_CFI(_switch_to_ret) 828 + ENDPROC_CFI(_switch_to) 829 829 830 830 /* 831 831 * Common rfi return path for interruptions, kernel execve, and ··· 886 886 STREG %r19,PT_SR5(%r16) 887 887 STREG %r19,PT_SR6(%r16) 888 888 STREG %r19,PT_SR7(%r16) 889 - ENDPROC_CFI(syscall_exit_rfi) 890 889 891 - ENTRY_CFI(intr_return) 890 + ENTRY(intr_return) 892 891 /* check for reschedule */ 893 892 mfctl %cr30,%r1 894 893 LDREG TI_FLAGS(%r1),%r19 /* sched.h: TIF_NEED_RESCHED */ 895 894 bb,<,n %r19,31-TIF_NEED_RESCHED,intr_do_resched /* forward */ 896 - ENDPROC_CFI(intr_return) 897 895 898 896 .import do_notify_resume,code 899 897 intr_check_sig: ··· 1047 1049 1048 1050 b do_cpu_irq_mask 1049 1051 ldo R%intr_return(%r2), %r2 /* return to intr_return, not here */ 1052 + ENDPROC_CFI(syscall_exit_rfi) 1050 1053 1051 1054 1052 1055 /* Generic interruptions (illegal insn, unaligned, page fault, etc) */ ··· 1067 1068 save_specials %r29 1068 1069 1069 1070 /* If this trap is a itlb miss, skip saving/adjusting isr/ior */ 1070 - 1071 - /* 1072 - * FIXME: 1) Use a #define for the hardwired "6" below (and in 1073 - * traps.c. 1074 - * 2) Once we start executing code above 4 Gb, we need 1075 - * to adjust iasq/iaoq here in the same way we 1076 - * adjust isr/ior below. 1077 - */ 1078 - 1079 - cmpib,COND(=),n 6,%r26,skip_save_ior 1071 + cmpib,COND(=),n PARISC_ITLB_TRAP,%r26,skip_save_ior 1080 1072 1081 1073 1082 - mfctl %cr20, %r16 /* isr */ 1074 + mfctl %isr, %r16 1083 1075 nop /* serialize mfctl on PA 2.0 to avoid 4 cycle penalty */ 1084 - mfctl %cr21, %r17 /* ior */ 1076 + mfctl %ior, %r17 1085 1077 1086 1078 1087 1079 #ifdef CONFIG_64BIT ··· 1084 1094 extrd,u,*<> %r8,PSW_W_BIT,1,%r0 1085 1095 depdi 0,1,2,%r17 1086 1096 1087 - /* 1088 - * FIXME: This code has hardwired assumptions about the split 1089 - * between space bits and offset bits. This will change 1090 - * when we allow alternate page sizes. 1091 - */ 1092 - 1093 - /* adjust isr/ior. */ 1094 - extrd,u %r16,63,SPACEID_SHIFT,%r1 /* get high bits from isr for ior */ 1095 - depd %r1,31,SPACEID_SHIFT,%r17 /* deposit them into ior */ 1096 - depdi 0,63,SPACEID_SHIFT,%r16 /* clear them from isr */ 1097 + /* adjust isr/ior: get high bits from isr and deposit in ior */ 1098 + space_adjust %r16,%r17,%r1 1097 1099 #endif 1098 1100 STREG %r16, PT_ISR(%r29) 1099 1101 STREG %r17, PT_IOR(%r29) 1100 1102 1103 + #if 0 && defined(CONFIG_64BIT) 1104 + /* Revisit when we have 64-bit code above 4Gb */ 1105 + b,n intr_save2 1101 1106 1102 1107 skip_save_ior: 1108 + /* We have a itlb miss, and when executing code above 4 Gb on ILP64, we 1109 + * need to adjust iasq/iaoq here in the same way we adjusted isr/ior 1110 + * above. 1111 + */ 1112 + extrd,u,* %r8,PSW_W_BIT,1,%r1 1113 + cmpib,COND(=),n 1,%r1,intr_save2 1114 + LDREG PT_IASQ0(%r29), %r16 1115 + LDREG PT_IAOQ0(%r29), %r17 1116 + /* adjust iasq/iaoq */ 1117 + space_adjust %r16,%r17,%r1 1118 + STREG %r16, PT_IASQ0(%r29) 1119 + STREG %r17, PT_IAOQ0(%r29) 1120 + #else 1121 + skip_save_ior: 1122 + #endif 1123 + 1124 + intr_save2: 1103 1125 virt_map 1104 1126 save_general %r29 1105 1127 ··· 1749 1747 fork_like vfork 1750 1748 1751 1749 /* Set the return value for the child */ 1752 - ENTRY_CFI(child_return) 1750 + ENTRY(child_return) 1753 1751 BL schedule_tail, %r2 1754 1752 nop 1755 1753 finish_child_return: ··· 1761 1759 reg_restore %r1 1762 1760 b syscall_exit 1763 1761 copy %r0,%r28 1764 - ENDPROC_CFI(child_return) 1762 + END(child_return) 1765 1763 1766 1764 ENTRY_CFI(sys_rt_sigreturn_wrapper) 1767 1765 LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r26 ··· 1793 1791 LDREG PT_GR28(%r1),%r28 /* reload original r28 for syscall_exit */ 1794 1792 ENDPROC_CFI(sys_rt_sigreturn_wrapper) 1795 1793 1796 - ENTRY_CFI(syscall_exit) 1794 + ENTRY(syscall_exit) 1797 1795 /* NOTE: Not all syscalls exit this way. rt_sigreturn will exit 1798 1796 * via syscall_exit_rfi if the signal was received while the process 1799 1797 * was running. ··· 1992 1990 #else 1993 1991 nop 1994 1992 #endif 1995 - ENDPROC_CFI(syscall_exit) 1993 + END(syscall_exit) 1996 1994 1997 1995 1998 1996 #ifdef CONFIG_FUNCTION_TRACER 1999 1997 2000 1998 .import ftrace_function_trampoline,code 2001 1999 .align L1_CACHE_BYTES 2002 - .globl mcount 2003 - .type mcount, @function 2004 2000 ENTRY_CFI(mcount, caller) 2005 2001 _mcount: 2006 2002 .export _mcount,data ··· 2027 2027 2028 2028 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 2029 2029 .align 8 2030 - .globl return_to_handler 2031 - .type return_to_handler, @function 2032 2030 ENTRY_CFI(return_to_handler, caller,frame=FRAME_SIZE) 2033 2031 .export parisc_return_to_handler,data 2034 2032 parisc_return_to_handler: ··· 2076 2078 /* void call_on_stack(unsigned long param1, void *func, 2077 2079 unsigned long new_stack) */ 2078 2080 ENTRY_CFI(call_on_stack, FRAME=2*FRAME_SIZE,CALLS,SAVE_RP,SAVE_SP) 2081 + ENTRY(_call_on_stack) 2079 2082 copy %sp, %r1 2080 2083 2081 2084 /* Regarding the HPPA calling conventions for function pointers,
+2
arch/parisc/kernel/processor.c
··· 288 288 printk(KERN_INFO "model %s\n", 289 289 boot_cpu_data.pdc.sys_model_name); 290 290 291 + dump_stack_set_arch_desc("%s", boot_cpu_data.pdc.sys_model_name); 292 + 291 293 boot_cpu_data.hversion = boot_cpu_data.pdc.model.hversion; 292 294 boot_cpu_data.sversion = boot_cpu_data.pdc.model.sversion; 293 295
+1 -14
arch/parisc/kernel/stacktrace.c
··· 16 16 { 17 17 struct unwind_frame_info info; 18 18 19 - /* initialize unwind info */ 20 - if (task == current) { 21 - unsigned long sp; 22 - struct pt_regs r; 23 - HERE: 24 - asm volatile ("copy %%r30, %0" : "=r"(sp)); 25 - memset(&r, 0, sizeof(struct pt_regs)); 26 - r.iaoq[0] = (unsigned long)&&HERE; 27 - r.gr[2] = (unsigned long)__builtin_return_address(0); 28 - r.gr[30] = sp; 29 - unwind_frame_init(&info, task, &r); 30 - } else { 31 - unwind_frame_init_from_blocked_task(&info, task); 32 - } 19 + unwind_frame_init_task(&info, task, NULL); 33 20 34 21 /* unwind stack and save entries in stack_trace struct */ 35 22 trace->nr_entries = 0;
-5
arch/parisc/kernel/sys_parisc.c
··· 156 156 int do_color_align, last_mmap; 157 157 struct vm_unmapped_area_info info; 158 158 159 - #ifdef CONFIG_64BIT 160 - /* This should only ever run for 32-bit processes. */ 161 - BUG_ON(!test_thread_flag(TIF_32BIT)); 162 - #endif 163 - 164 159 /* requested length too big for entire address space */ 165 160 if (len > TASK_SIZE) 166 161 return -ENOMEM;
+21 -14
arch/parisc/kernel/syscall.S
··· 108 108 mtsp %r0,%sr6 /* get kernel space into sr6 */ 109 109 110 110 #ifdef CONFIG_64BIT 111 - /* for now we can *always* set the W bit on entry to the syscall 112 - * since we don't support wide userland processes. We could 113 - * also save the current SM other than in r0 and restore it on 114 - * exit from the syscall, and also use that value to know 115 - * whether to do narrow or wide syscalls. -PB 116 - */ 111 + /* Store W bit on entry to the syscall in case it's a wide userland 112 + * process. */ 117 113 ssm PSW_SM_W, %r1 118 114 extrd,u %r1,PSW_W_BIT,1,%r1 119 115 /* sp must be aligned on 4, so deposit the W bit setting into ··· 223 227 or,= %r2,%r2,%r2 224 228 ldo R%sys_call_table64(%r1), %r19 225 229 #else 226 - ldil L%sys_call_table, %r1 227 - ldo R%sys_call_table(%r1), %r19 230 + load32 sys_call_table, %r19 228 231 #endif 229 232 comiclr,>> __NR_Linux_syscalls, %r20, %r0 230 233 b,n .Lsyscall_nosys ··· 326 331 * task->thread.regs.gr[20] above. 327 332 */ 328 333 copy %ret0,%r20 329 - ldil L%sys_call_table,%r1 330 - ldo R%sys_call_table(%r1), %r19 331 334 332 335 ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */ 333 336 LDREG TI_TASK(%r1), %r1 ··· 346 353 cmpib,COND(=),n -1,%r20,tracesys_exit /* seccomp may have returned -1 */ 347 354 comiclr,>> __NR_Linux_syscalls, %r20, %r0 348 355 b,n .Ltracesys_nosys 356 + 357 + /* Note! We cannot use the syscall table that is mapped 358 + nearby since the gateway page is mapped execute-only. */ 359 + 360 + #ifdef CONFIG_64BIT 361 + LDREG TASK_PT_GR30(%r1), %r19 /* get users sp back */ 362 + extrd,u %r19,63,1,%r2 /* W hidden in bottom bit */ 363 + 364 + ldil L%sys_call_table, %r1 365 + or,= %r2,%r2,%r2 366 + addil L%(sys_call_table64-sys_call_table), %r1 367 + ldo R%sys_call_table(%r1), %r19 368 + or,= %r2,%r2,%r2 369 + ldo R%sys_call_table64(%r1), %r19 370 + #else 371 + load32 sys_call_table, %r19 372 + #endif 349 373 350 374 LDREGX %r20(%r19), %r19 351 375 ··· 474 464 lws_start: 475 465 476 466 #ifdef CONFIG_64BIT 477 - /* FIXME: If we are a 64-bit kernel just 478 - * turn this on unconditionally. 479 - */ 480 467 ssm PSW_SM_W, %r1 481 468 extrd,u %r1,PSW_W_BIT,1,%r1 482 469 /* sp must be aligned on 4, so deposit the W bit setting into 483 470 * the bottom of sp temporarily */ 484 471 or,ev %r1,%r30,%r30 485 472 486 - /* Clip LWS number to a 32-bit value always */ 473 + /* Clip LWS number to a 32-bit value for 32-bit processes */ 487 474 depdi 0, 31, 32, %r20 488 475 #endif 489 476
+6 -31
arch/parisc/kernel/traps.c
··· 45 45 46 46 #include "../math-emu/math-emu.h" /* for handle_fpe() */ 47 47 48 - static void parisc_show_stack(struct task_struct *task, unsigned long *sp, 48 + static void parisc_show_stack(struct task_struct *task, 49 49 struct pt_regs *regs); 50 50 51 51 static int printbinary(char *buf, unsigned long x, int nbits) ··· 152 152 printk("%s IAOQ[1]: %pS\n", level, (void *) regs->iaoq[1]); 153 153 printk("%s RP(r2): %pS\n", level, (void *) regs->gr[2]); 154 154 155 - parisc_show_stack(current, NULL, regs); 155 + parisc_show_stack(current, regs); 156 156 } 157 157 } 158 158 ··· 185 185 printk(KERN_CRIT "\n"); 186 186 } 187 187 188 - static void parisc_show_stack(struct task_struct *task, unsigned long *sp, 188 + static void parisc_show_stack(struct task_struct *task, 189 189 struct pt_regs *regs) 190 190 { 191 191 struct unwind_frame_info info; 192 - struct task_struct *t; 193 192 194 - t = task ? task : current; 195 - if (regs) { 196 - unwind_frame_init(&info, t, regs); 197 - goto show_stack; 198 - } 193 + unwind_frame_init_task(&info, task, regs); 199 194 200 - if (t == current) { 201 - unsigned long sp; 202 - 203 - HERE: 204 - asm volatile ("copy %%r30, %0" : "=r"(sp)); 205 - { 206 - struct pt_regs r; 207 - 208 - memset(&r, 0, sizeof(struct pt_regs)); 209 - r.iaoq[0] = (unsigned long)&&HERE; 210 - r.gr[2] = (unsigned long)__builtin_return_address(0); 211 - r.gr[30] = sp; 212 - 213 - unwind_frame_init(&info, current, &r); 214 - } 215 - } else { 216 - unwind_frame_init_from_blocked_task(&info, t); 217 - } 218 - 219 - show_stack: 220 195 do_show_stack(&info); 221 196 } 222 197 223 198 void show_stack(struct task_struct *t, unsigned long *sp) 224 199 { 225 - return parisc_show_stack(t, sp, NULL); 200 + parisc_show_stack(t, NULL); 226 201 } 227 202 228 203 int is_valid_bugaddr(unsigned long iaoq) ··· 532 557 cpu_lpmc(5, regs); 533 558 return; 534 559 535 - case 6: 560 + case PARISC_ITLB_TRAP: 536 561 /* Instruction TLB miss fault/Instruction page fault */ 537 562 fault_address = regs->iaoq[0]; 538 563 fault_space = regs->iasq[0];
+30 -13
arch/parisc/kernel/unwind.c
··· 209 209 * We have to use void * instead of a function pointer, because 210 210 * function pointers aren't a pointer to the function on 64-bit. 211 211 * Make them const so the compiler knows they live in .text 212 + * Note: We could use dereference_kernel_function_descriptor() 213 + * instead but we want to keep it simple here. 212 214 */ 213 215 extern void * const handle_interruption; 214 216 extern void * const ret_from_kernel_thread; ··· 218 216 extern void * const intr_return; 219 217 extern void * const _switch_to_ret; 220 218 #ifdef CONFIG_IRQSTACKS 221 - extern void * const call_on_stack; 219 + extern void * const _call_on_stack; 222 220 #endif /* CONFIG_IRQSTACKS */ 223 221 224 222 if (pc == (unsigned long) &handle_interruption) { ··· 253 251 } 254 252 255 253 #ifdef CONFIG_IRQSTACKS 256 - if (pc == (unsigned long) &call_on_stack) { 254 + if (pc == (unsigned long) &_call_on_stack) { 257 255 info->prev_sp = *(unsigned long *)(info->sp - FRAME_SIZE - REG_SZ); 258 256 info->prev_ip = *(unsigned long *)(info->sp - FRAME_SIZE - RP_OFFSET); 259 257 return 1; ··· 405 403 kfree(r2); 406 404 } 407 405 408 - void unwind_frame_init_running(struct unwind_frame_info *info, struct pt_regs *regs) 406 + #define get_parisc_stackpointer() ({ \ 407 + unsigned long sp; \ 408 + __asm__("copy %%r30, %0" : "=r"(sp)); \ 409 + (sp); \ 410 + }) 411 + 412 + void unwind_frame_init_task(struct unwind_frame_info *info, 413 + struct task_struct *task, struct pt_regs *regs) 409 414 { 410 - unwind_frame_init(info, current, regs); 415 + task = task ? task : current; 416 + 417 + if (task == current) { 418 + struct pt_regs r; 419 + 420 + if (!regs) { 421 + memset(&r, 0, sizeof(r)); 422 + r.iaoq[0] = _THIS_IP_; 423 + r.gr[2] = _RET_IP_; 424 + r.gr[30] = get_parisc_stackpointer(); 425 + regs = &r; 426 + } 427 + unwind_frame_init(info, task, &r); 428 + } else { 429 + unwind_frame_init_from_blocked_task(info, task); 430 + } 411 431 } 412 432 413 433 int unwind_once(struct unwind_frame_info *next_frame) ··· 466 442 unsigned long return_address(unsigned int level) 467 443 { 468 444 struct unwind_frame_info info; 469 - struct pt_regs r; 470 - unsigned long sp; 471 445 472 446 /* initialize unwind info */ 473 - asm volatile ("copy %%r30, %0" : "=r"(sp)); 474 - memset(&r, 0, sizeof(struct pt_regs)); 475 - r.iaoq[0] = _THIS_IP_; 476 - r.gr[2] = _RET_IP_; 477 - r.gr[30] = sp; 478 - unwind_frame_init(&info, current, &r); 447 + unwind_frame_init_task(&info, current, NULL); 479 448 480 449 /* unwind stack */ 481 - ++level; 450 + level += 2; 482 451 do { 483 452 if (unwind_once(&info) < 0 || info.ip == 0) 484 453 return 0;