Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 's390-5.5-2' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux

Pull more s390 updates from Vasily Gorbik:

- Make stack unwinder reliable and suitable for livepatching. Add
unwinder testing module.

- Fixes for CALL_ON_STACK helper used for stack switching.

- Fix unwinding from bpf code.

- Fix getcpu and remove compat support in vdso code.

- Fix address space control registers initialization.

- Save KASLR offset for early dumps.

- Handle new FILTERED_BY_HYPERVISOR reply code in crypto code.

- Minor perf code cleanup and potential memory leak fix.

- Add couple of error messages for corner cases during PCI device
creation.

* tag 's390-5.5-2' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: (33 commits)
s390: remove compat vdso code
s390/livepatch: Implement reliable stack tracing for the consistency model
s390/unwind: add stack pointer alignment sanity checks
s390/unwind: filter out unreliable bogus %r14
s390/unwind: start unwinding from reliable state
s390/test_unwind: add program check context tests
s390/test_unwind: add irq context tests
s390/test_unwind: print verbose unwinding results
s390/test_unwind: add CALL_ON_STACK tests
s390: fix register clobbering in CALL_ON_STACK
s390/test_unwind: require that unwinding ended successfully
s390/unwind: add a test for the internal API
s390/unwind: always inline get_stack_pointer
s390/pci: add error message on device number limit
s390/pci: add error message for UID collision
s390/cpum_sf: Check for SDBT and SDB consistency
s390/cpum_sf: Use TEAR_REG macro consistantly
s390/cpum_sf: Remove unnecessary check for pending SDBs
s390/cpum_sf: Replace function name in debug statements
s390/kaslr: store KASLR offset for early dumps
...

+627 -768
+15 -3
arch/s390/Kconfig
··· 170 170 select HAVE_PERF_EVENTS 171 171 select HAVE_RCU_TABLE_FREE 172 172 select HAVE_REGS_AND_STACK_ACCESS_API 173 + select HAVE_RELIABLE_STACKTRACE 173 174 select HAVE_RSEQ 174 175 select HAVE_SYSCALL_TRACEPOINTS 175 176 select HAVE_VIRT_CPU_ACCOUNTING ··· 426 425 handle system-calls from ELF binaries for 31 bit ESA. This option 427 426 (and some other stuff like libraries and such) is needed for 428 427 executing 31 bit applications. It is safe to say "Y". 429 - 430 - config COMPAT_VDSO 431 - def_bool COMPAT && !CC_IS_CLANG 432 428 433 429 config SYSVIPC_COMPAT 434 430 def_bool y if COMPAT && SYSVIPC ··· 1014 1016 1015 1017 Select this option if you want to run the kernel as a guest under 1016 1018 the KVM hypervisor. 1019 + 1020 + endmenu 1021 + 1022 + menu "Selftests" 1023 + 1024 + config S390_UNWIND_SELFTEST 1025 + def_tristate n 1026 + prompt "Test unwind functions" 1027 + help 1028 + This option enables s390 specific stack unwinder testing kernel 1029 + module. This option is not useful for distributions or general 1030 + kernels, but only for kernel developers working on architecture code. 1031 + 1032 + Say N if you are unsure. 1017 1033 1018 1034 endmenu
-1
arch/s390/Makefile
··· 157 157 158 158 vdso_install: 159 159 $(Q)$(MAKE) $(build)=arch/$(ARCH)/kernel/vdso64 $@ 160 - $(Q)$(MAKE) $(build)=arch/$(ARCH)/kernel/vdso32 $@ 161 160 162 161 archclean: 163 162 $(Q)$(MAKE) $(clean)=$(boot)
+5
arch/s390/boot/startup.c
··· 170 170 handle_relocs(__kaslr_offset); 171 171 172 172 if (__kaslr_offset) { 173 + /* 174 + * Save KASLR offset for early dumps, before vmcore_info is set. 175 + * Mark as uneven to distinguish from real vmcore_info pointer. 176 + */ 177 + S390_lowcore.vmcore_info = __kaslr_offset | 0x1UL; 173 178 /* Clear non-relocated kernel */ 174 179 if (IS_ENABLED(CONFIG_KERNEL_UNCOMPRESSED)) 175 180 memset(img, 0, vmlinux.image_size);
+1 -1
arch/s390/include/asm/cpu_mf.h
··· 313 313 return (unsigned long *) ret; 314 314 } 315 315 316 - /* Return if the entry in the sample data block table (sdbt) 316 + /* Return true if the entry in the sample data block table (sdbt) 317 317 * is a link to the next sdbt */ 318 318 static inline int is_link_entry(unsigned long *s) 319 319 {
+7
arch/s390/include/asm/perf_event.h
··· 12 12 13 13 #include <linux/perf_event.h> 14 14 #include <linux/device.h> 15 + #include <asm/stacktrace.h> 15 16 16 17 /* Per-CPU flags for PMU states */ 17 18 #define PMU_F_RESERVED 0x1000 ··· 73 72 #define SAMPL_DIAG_MODE(hwc) (SAMPL_FLAGS(hwc) & PERF_CPUM_SF_DIAG_MODE) 74 73 #define SDB_FULL_BLOCKS(hwc) (SAMPL_FLAGS(hwc) & PERF_CPUM_SF_FULL_BLOCKS) 75 74 #define SAMPLE_FREQ_MODE(hwc) (SAMPL_FLAGS(hwc) & PERF_CPUM_SF_FREQ_MODE) 75 + 76 + #define perf_arch_fetch_caller_regs(regs, __ip) do { \ 77 + (regs)->psw.addr = (__ip); \ 78 + (regs)->gprs[15] = (unsigned long)__builtin_frame_address(0) - \ 79 + offsetof(struct stack_frame, back_chain); \ 80 + } while (0) 76 81 77 82 #endif /* _ASM_S390_PERF_EVENT_H */
+1 -1
arch/s390/include/asm/processor.h
··· 310 310 /* 311 311 * Function to drop a processor into disabled wait state 312 312 */ 313 - static inline void __noreturn disabled_wait(void) 313 + static __always_inline void __noreturn disabled_wait(void) 314 314 { 315 315 psw_t psw; 316 316
+30 -6
arch/s390/include/asm/stacktrace.h
··· 33 33 return addr >= info->begin && addr + len <= info->end; 34 34 } 35 35 36 - static inline unsigned long get_stack_pointer(struct task_struct *task, 37 - struct pt_regs *regs) 36 + static __always_inline unsigned long get_stack_pointer(struct task_struct *task, 37 + struct pt_regs *regs) 38 38 { 39 39 if (regs) 40 40 return (unsigned long) kernel_stack_pointer(regs); 41 - if (!task || task == current) 41 + if (task == current) 42 42 return current_stack_pointer(); 43 43 return (unsigned long) task->thread.ksp; 44 44 } ··· 61 61 unsigned long back_chain; 62 62 }; 63 63 #endif 64 + 65 + /* 66 + * Unlike current_stack_pointer() which simply returns current value of %r15 67 + * current_frame_address() returns function stack frame address, which matches 68 + * %r15 upon function invocation. It may differ from %r15 later if function 69 + * allocates stack for local variables or new stack frame to call other 70 + * functions. 71 + */ 72 + #define current_frame_address() \ 73 + ((unsigned long)__builtin_frame_address(0) - \ 74 + offsetof(struct stack_frame, back_chain)) 64 75 65 76 #define CALL_ARGS_0() \ 66 77 register unsigned long r2 asm("2") ··· 106 95 107 96 #define CALL_ON_STACK(fn, stack, nr, args...) \ 108 97 ({ \ 98 + unsigned long frame = current_frame_address(); \ 109 99 CALL_ARGS_##nr(args); \ 110 100 unsigned long prev; \ 111 101 \ 112 102 asm volatile( \ 113 103 " la %[_prev],0(15)\n" \ 114 - " la 15,0(%[_stack])\n" \ 115 - " stg %[_prev],%[_bc](15)\n" \ 104 + " lg 15,%[_stack]\n" \ 105 + " stg %[_frame],%[_bc](15)\n" \ 116 106 " brasl 14,%[_fn]\n" \ 117 107 " la 15,0(%[_prev])\n" \ 118 108 : [_prev] "=&a" (prev), CALL_FMT_##nr \ 119 - [_stack] "a" (stack), \ 109 + [_stack] "R" (stack), \ 120 110 [_bc] "i" (offsetof(struct stack_frame, back_chain)), \ 111 + [_frame] "d" (frame), \ 121 112 [_fn] "X" (fn) : CALL_CLOBBER_##nr); \ 122 113 r2; \ 114 + }) 115 + 116 + #define CALL_ON_STACK_NORETURN(fn, stack) \ 117 + ({ \ 118 + asm volatile( \ 119 + " la 15,0(%[_stack])\n" \ 120 + " xc %[_bc](8,15),%[_bc](15)\n" \ 121 + " brasl 14,%[_fn]\n" \ 122 + ::[_bc] "i" (offsetof(struct stack_frame, back_chain)), \ 123 + [_stack] "a" (stack), [_fn] "X" (fn)); \ 124 + BUG(); \ 123 125 }) 124 126 125 127 #endif /* _ASM_S390_STACKTRACE_H */
+4 -4
arch/s390/include/asm/unwind.h
··· 35 35 struct task_struct *task; 36 36 struct pt_regs *regs; 37 37 unsigned long sp, ip; 38 - bool reuse_sp; 39 38 int graph_idx; 40 39 bool reliable; 41 40 bool error; ··· 58 59 static inline void unwind_start(struct unwind_state *state, 59 60 struct task_struct *task, 60 61 struct pt_regs *regs, 61 - unsigned long sp) 62 + unsigned long first_frame) 62 63 { 63 - sp = sp ? : get_stack_pointer(task, regs); 64 - __unwind_start(state, task, regs, sp); 64 + task = task ?: current; 65 + first_frame = first_frame ?: get_stack_pointer(task, regs); 66 + __unwind_start(state, task, regs, first_frame); 65 67 } 66 68 67 69 static inline struct pt_regs *unwind_get_entry_regs(struct unwind_state *state)
+11 -2
arch/s390/include/asm/vdso.h
··· 41 41 struct vdso_per_cpu_data { 42 42 __u64 ectg_timer_base; 43 43 __u64 ectg_user_time; 44 - __u32 cpu_nr; 45 - __u32 node_id; 44 + /* 45 + * Note: node_id and cpu_nr must be at adjacent memory locations. 46 + * VDSO userspace must read both values with a single instruction. 47 + */ 48 + union { 49 + __u64 getcpu_val; 50 + struct { 51 + __u32 node_id; 52 + __u32 cpu_nr; 53 + }; 54 + }; 46 55 }; 47 56 48 57 extern struct vdso_data *vdso_data;
-1
arch/s390/kernel/Makefile
··· 81 81 82 82 # vdso 83 83 obj-y += vdso64/ 84 - obj-$(CONFIG_COMPAT_VDSO) += vdso32/
+1 -2
arch/s390/kernel/asm-offsets.c
··· 78 78 OFFSET(__VDSO_TS_END, vdso_data, ts_end); 79 79 OFFSET(__VDSO_ECTG_BASE, vdso_per_cpu_data, ectg_timer_base); 80 80 OFFSET(__VDSO_ECTG_USER, vdso_per_cpu_data, ectg_user_time); 81 - OFFSET(__VDSO_CPU_NR, vdso_per_cpu_data, cpu_nr); 82 - OFFSET(__VDSO_NODE_ID, vdso_per_cpu_data, node_id); 81 + OFFSET(__VDSO_GETCPU_VAL, vdso_per_cpu_data, getcpu_val); 83 82 BLANK(); 84 83 /* constants used by the vdso */ 85 84 DEFINE(__CLOCK_REALTIME, CLOCK_REALTIME);
+4 -3
arch/s390/kernel/dumpstack.c
··· 38 38 return "unknown"; 39 39 } 40 40 } 41 + EXPORT_SYMBOL_GPL(stack_type_name); 41 42 42 43 static inline bool in_stack(unsigned long sp, struct stack_info *info, 43 44 enum stack_type type, unsigned long low, ··· 94 93 if (!sp) 95 94 goto unknown; 96 95 97 - task = task ? : current; 96 + /* Sanity check: ABI requires SP to be aligned 8 bytes. */ 97 + if (sp & 0x7) 98 + goto unknown; 98 99 99 100 /* Check per-task stack */ 100 101 if (in_task_stack(sp, task, info)) ··· 131 128 struct unwind_state state; 132 129 133 130 printk("Call Trace:\n"); 134 - if (!task) 135 - task = current; 136 131 unwind_for_each_frame(&state, task, NULL, (unsigned long) stack) 137 132 printk(state.reliable ? " [<%016lx>] %pSR \n" : 138 133 "([<%016lx>] %pSR)\n",
+1 -1
arch/s390/kernel/head64.S
··· 31 31 # 32 32 larl %r14,init_task 33 33 stg %r14,__LC_CURRENT 34 - larl %r15,init_thread_union+THREAD_SIZE-STACK_FRAME_OVERHEAD 34 + larl %r15,init_thread_union+THREAD_SIZE-STACK_FRAME_OVERHEAD-__PT_SIZE 35 35 #ifdef CONFIG_KASAN 36 36 brasl %r14,kasan_early_init 37 37 #endif
+3 -1
arch/s390/kernel/machine_kexec.c
··· 164 164 #ifdef CONFIG_CRASH_DUMP 165 165 int rc; 166 166 167 + preempt_disable(); 167 168 rc = CALL_ON_STACK(do_start_kdump, S390_lowcore.nodat_stack, 1, image); 169 + preempt_enable(); 168 170 return rc == 0; 169 171 #else 170 172 return false; ··· 256 254 VMCOREINFO_SYMBOL(lowcore_ptr); 257 255 VMCOREINFO_SYMBOL(high_memory); 258 256 VMCOREINFO_LENGTH(lowcore_ptr, NR_CPUS); 259 - mem_assign_absolute(S390_lowcore.vmcore_info, paddr_vmcoreinfo_note()); 260 257 vmcoreinfo_append_str("SDMA=%lx\n", __sdma); 261 258 vmcoreinfo_append_str("EDMA=%lx\n", __edma); 262 259 vmcoreinfo_append_str("KERNELOFFSET=%lx\n", kaslr_offset()); 260 + mem_assign_absolute(S390_lowcore.vmcore_info, paddr_vmcoreinfo_note()); 263 261 } 264 262 265 263 void machine_shutdown(void)
+73 -63
arch/s390/kernel/perf_cpum_sf.c
··· 156 156 } 157 157 } 158 158 159 - debug_sprintf_event(sfdbg, 5, "%s freed sdbt %p\n", __func__, 160 - sfb->sdbt); 159 + debug_sprintf_event(sfdbg, 5, "%s: freed sdbt %#lx\n", __func__, 160 + (unsigned long)sfb->sdbt); 161 161 memset(sfb, 0, sizeof(*sfb)); 162 162 } 163 163 ··· 193 193 unsigned long num_sdb, gfp_t gfp_flags) 194 194 { 195 195 int i, rc; 196 - unsigned long *new, *tail; 196 + unsigned long *new, *tail, *tail_prev = NULL; 197 197 198 198 if (!sfb->sdbt || !sfb->tail) 199 199 return -EINVAL; ··· 213 213 */ 214 214 if (sfb->sdbt != get_next_sdbt(tail)) { 215 215 debug_sprintf_event(sfdbg, 3, "%s: " 216 - "sampling buffer is not linked: origin %p" 217 - " tail %p\n", __func__, 218 - (void *)sfb->sdbt, (void *)tail); 216 + "sampling buffer is not linked: origin %#lx" 217 + " tail %#lx\n", __func__, 218 + (unsigned long)sfb->sdbt, 219 + (unsigned long)tail); 219 220 return -EINVAL; 220 221 } 221 222 ··· 233 232 sfb->num_sdbt++; 234 233 /* Link current page to tail of chain */ 235 234 *tail = (unsigned long)(void *) new + 1; 235 + tail_prev = tail; 236 236 tail = new; 237 237 } 238 238 ··· 243 241 * issue, a new realloc call (if required) might succeed. 244 242 */ 245 243 rc = alloc_sample_data_block(tail, gfp_flags); 246 - if (rc) 244 + if (rc) { 245 + /* Undo last SDBT. An SDBT with no SDB at its first 246 + * entry but with an SDBT entry instead can not be 247 + * handled by the interrupt handler code. 248 + * Avoid this situation. 249 + */ 250 + if (tail_prev) { 251 + sfb->num_sdbt--; 252 + free_page((unsigned long) new); 253 + tail = tail_prev; 254 + } 247 255 break; 256 + } 248 257 sfb->num_sdb++; 249 258 tail++; 259 + tail_prev = new = NULL; /* Allocated at least one SBD */ 250 260 } 251 261 252 262 /* Link sampling buffer to its origin */ 253 263 *tail = (unsigned long) sfb->sdbt + 1; 254 264 sfb->tail = tail; 255 265 256 - debug_sprintf_event(sfdbg, 4, "realloc_sampling_buffer: new buffer" 257 - " settings: sdbt %lu sdb %lu\n", 266 + debug_sprintf_event(sfdbg, 4, "%s: new buffer" 267 + " settings: sdbt %lu sdb %lu\n", __func__, 258 268 sfb->num_sdbt, sfb->num_sdb); 259 269 return rc; 260 270 } ··· 306 292 rc = realloc_sampling_buffer(sfb, num_sdb, GFP_KERNEL); 307 293 if (rc) { 308 294 free_sampling_buffer(sfb); 309 - debug_sprintf_event(sfdbg, 4, "alloc_sampling_buffer: " 310 - "realloc_sampling_buffer failed with rc %i\n", rc); 295 + debug_sprintf_event(sfdbg, 4, "%s: " 296 + "realloc_sampling_buffer failed with rc %i\n", 297 + __func__, rc); 311 298 } else 312 299 debug_sprintf_event(sfdbg, 4, 313 - "alloc_sampling_buffer: tear %p dear %p\n", 314 - sfb->sdbt, (void *)*sfb->sdbt); 300 + "%s: tear %#lx dear %#lx\n", __func__, 301 + (unsigned long)sfb->sdbt, (unsigned long)*sfb->sdbt); 315 302 return rc; 316 303 } 317 304 ··· 480 465 if (num) 481 466 sfb_account_allocs(num, hwc); 482 467 483 - debug_sprintf_event(sfdbg, 5, "sfb: overflow: overflow %llu ratio %lu" 484 - " num %lu\n", OVERFLOW_REG(hwc), ratio, num); 468 + debug_sprintf_event(sfdbg, 5, "%s: overflow %llu ratio %lu num %lu\n", 469 + __func__, OVERFLOW_REG(hwc), ratio, num); 485 470 OVERFLOW_REG(hwc) = 0; 486 471 } 487 472 ··· 519 504 */ 520 505 rc = realloc_sampling_buffer(sfb, num, GFP_ATOMIC); 521 506 if (rc) 522 - debug_sprintf_event(sfdbg, 5, "sfb: extend: realloc " 523 - "failed with rc %i\n", rc); 507 + debug_sprintf_event(sfdbg, 5, "%s: realloc failed with rc %i\n", 508 + __func__, rc); 524 509 525 510 if (sfb_has_pending_allocs(sfb, hwc)) 526 - debug_sprintf_event(sfdbg, 5, "sfb: extend: " 511 + debug_sprintf_event(sfdbg, 5, "%s: " 527 512 "req %lu alloc %lu remaining %lu\n", 528 - num, sfb->num_sdb - num_old, 513 + __func__, num, sfb->num_sdb - num_old, 529 514 sfb_pending_allocs(sfb, hwc)); 530 515 } 531 516 ··· 613 598 hwc->sample_period = period; 614 599 hwc->last_period = hwc->sample_period; 615 600 local64_set(&hwc->period_left, hwc->sample_period); 616 - } 617 - 618 - static void hw_reset_registers(struct hw_perf_event *hwc, 619 - unsigned long *sdbt_origin) 620 - { 621 - /* (Re)set to first sample-data-block-table */ 622 - TEAR_REG(hwc) = (unsigned long) sdbt_origin; 623 601 } 624 602 625 603 static unsigned long hw_limit_rate(const struct hws_qsi_info_block *si, ··· 706 698 */ 707 699 if (sample_rate_to_freq(si, rate) > 708 700 sysctl_perf_event_sample_rate) { 709 - debug_sprintf_event(sfdbg, 1, 701 + debug_sprintf_event(sfdbg, 1, "%s: " 710 702 "Sampling rate exceeds maximum " 711 - "perf sample rate\n"); 703 + "perf sample rate\n", __func__); 712 704 rate = 0; 713 705 } 714 706 } ··· 753 745 attr->sample_period = rate; 754 746 SAMPL_RATE(hwc) = rate; 755 747 hw_init_period(hwc, SAMPL_RATE(hwc)); 756 - debug_sprintf_event(sfdbg, 4, "__hw_perf_event_init_rate:" 757 - "cpu:%d period:%#llx freq:%d,%#lx\n", event->cpu, 758 - event->attr.sample_period, event->attr.freq, 759 - SAMPLE_FREQ_MODE(hwc)); 748 + debug_sprintf_event(sfdbg, 4, "%s: cpu %d period %#llx freq %d,%#lx\n", 749 + __func__, event->cpu, event->attr.sample_period, 750 + event->attr.freq, SAMPLE_FREQ_MODE(hwc)); 760 751 return 0; 761 752 } 762 753 ··· 958 951 * buffer extents 959 952 */ 960 953 sfb_account_overflows(cpuhw, hwc); 961 - if (sfb_has_pending_allocs(&cpuhw->sfb, hwc)) 962 - extend_sampling_buffer(&cpuhw->sfb, hwc); 954 + extend_sampling_buffer(&cpuhw->sfb, hwc); 963 955 } 964 956 /* Rate may be adjusted with ioctl() */ 965 957 cpuhw->lsctl.interval = SAMPL_RATE(&cpuhw->event->hw); ··· 979 973 /* Load current program parameter */ 980 974 lpp(&S390_lowcore.lpp); 981 975 982 - debug_sprintf_event(sfdbg, 6, "pmu_enable: es %i cs %i ed %i cd %i " 983 - "interval %#lx tear %p dear %p\n", 976 + debug_sprintf_event(sfdbg, 6, "%s: es %i cs %i ed %i cd %i " 977 + "interval %#lx tear %#lx dear %#lx\n", __func__, 984 978 cpuhw->lsctl.es, cpuhw->lsctl.cs, cpuhw->lsctl.ed, 985 979 cpuhw->lsctl.cd, cpuhw->lsctl.interval, 986 - (void *) cpuhw->lsctl.tear, 987 - (void *) cpuhw->lsctl.dear); 980 + cpuhw->lsctl.tear, cpuhw->lsctl.dear); 988 981 } 989 982 990 983 static void cpumsf_pmu_disable(struct pmu *pmu) ··· 1024 1019 cpuhw->lsctl.dear = si.dear; 1025 1020 } 1026 1021 } else 1027 - debug_sprintf_event(sfdbg, 3, "cpumsf_pmu_disable: " 1028 - "qsi() failed with err %i\n", err); 1022 + debug_sprintf_event(sfdbg, 3, "%s: qsi() failed with err %i\n", 1023 + __func__, err); 1029 1024 1030 1025 cpuhw->flags &= ~PMU_F_ENABLED; 1031 1026 } ··· 1270 1265 sampl_overflow += te->overflow; 1271 1266 1272 1267 /* Timestamps are valid for full sample-data-blocks only */ 1273 - debug_sprintf_event(sfdbg, 6, "%s: sdbt %p " 1268 + debug_sprintf_event(sfdbg, 6, "%s: sdbt %#lx " 1274 1269 "overflow %llu timestamp %#llx\n", 1275 - __func__, sdbt, te->overflow, 1270 + __func__, (unsigned long)sdbt, te->overflow, 1276 1271 (te->f) ? trailer_timestamp(te) : 0ULL); 1277 1272 1278 1273 /* Collect all samples from a single sample-data-block and ··· 1317 1312 sampl_overflow, 1 + num_sdb); 1318 1313 if (sampl_overflow || event_overflow) 1319 1314 debug_sprintf_event(sfdbg, 4, "%s: " 1320 - "overflow stats: sample %llu event %llu\n", 1321 - __func__, sampl_overflow, event_overflow); 1315 + "overflows: sample %llu event %llu" 1316 + " total %llu num_sdb %llu\n", 1317 + __func__, sampl_overflow, event_overflow, 1318 + OVERFLOW_REG(hwc), num_sdb); 1322 1319 } 1323 1320 1324 1321 #define AUX_SDB_INDEX(aux, i) ((i) % aux->sfb.num_sdb) ··· 1431 1424 cpuhw->lsctl.tear = base + offset * sizeof(unsigned long); 1432 1425 cpuhw->lsctl.dear = aux->sdb_index[head]; 1433 1426 1434 - debug_sprintf_event(sfdbg, 6, "aux_output_begin: " 1427 + debug_sprintf_event(sfdbg, 6, "%s: " 1435 1428 "head->alert_mark->empty_mark (num_alert, range)" 1436 1429 "[%#lx -> %#lx -> %#lx] (%#lx, %#lx) " 1437 - "tear index %#lx, tear %#lx dear %#lx\n", 1430 + "tear index %#lx, tear %#lx dear %#lx\n", __func__, 1438 1431 aux->head, aux->alert_mark, aux->empty_mark, 1439 1432 AUX_SDB_NUM_ALERT(aux), range, 1440 1433 head / CPUM_SF_SDB_PER_TABLE, ··· 1578 1571 pr_err("The AUX buffer with %lu pages for the " 1579 1572 "diagnostic-sampling mode is full\n", 1580 1573 num_sdb); 1581 - debug_sprintf_event(sfdbg, 1, "AUX buffer used up\n"); 1574 + debug_sprintf_event(sfdbg, 1, 1575 + "%s: AUX buffer used up\n", 1576 + __func__); 1582 1577 break; 1583 1578 } 1584 1579 if (WARN_ON_ONCE(!aux)) ··· 1603 1594 perf_aux_output_end(&cpuhw->handle, size); 1604 1595 pr_err("Sample data caused the AUX buffer with %lu " 1605 1596 "pages to overflow\n", num_sdb); 1606 - debug_sprintf_event(sfdbg, 1, "head %#lx range %#lx " 1607 - "overflow %#llx\n", 1597 + debug_sprintf_event(sfdbg, 1, "%s: head %#lx range %#lx " 1598 + "overflow %#llx\n", __func__, 1608 1599 aux->head, range, overflow); 1609 1600 } else { 1610 1601 size = AUX_SDB_NUM_ALERT(aux) << PAGE_SHIFT; 1611 1602 perf_aux_output_end(&cpuhw->handle, size); 1612 - debug_sprintf_event(sfdbg, 6, "head %#lx alert %#lx " 1603 + debug_sprintf_event(sfdbg, 6, "%s: head %#lx alert %#lx " 1613 1604 "already full, try another\n", 1605 + __func__, 1614 1606 aux->head, aux->alert_mark); 1615 1607 } 1616 1608 } 1617 1609 1618 1610 if (done) 1619 - debug_sprintf_event(sfdbg, 6, "aux_reset_buffer: " 1611 + debug_sprintf_event(sfdbg, 6, "%s: aux_reset_buffer " 1620 1612 "[%#lx -> %#lx -> %#lx] (%#lx, %#lx)\n", 1621 - aux->head, aux->alert_mark, aux->empty_mark, 1622 - AUX_SDB_NUM_ALERT(aux), range); 1613 + __func__, aux->head, aux->alert_mark, 1614 + aux->empty_mark, AUX_SDB_NUM_ALERT(aux), 1615 + range); 1623 1616 } 1624 1617 1625 1618 /* ··· 1644 1633 kfree(aux->sdb_index); 1645 1634 kfree(aux); 1646 1635 1647 - debug_sprintf_event(sfdbg, 4, "aux_buffer_free: free " 1648 - "%lu SDBTs\n", num_sdbt); 1636 + debug_sprintf_event(sfdbg, 4, "%s: free " 1637 + "%lu SDBTs\n", __func__, num_sdbt); 1649 1638 } 1650 1639 1651 1640 static void aux_sdb_init(unsigned long sdb) ··· 1753 1742 */ 1754 1743 aux->empty_mark = sfb->num_sdb - 1; 1755 1744 1756 - debug_sprintf_event(sfdbg, 4, "aux_buffer_setup: setup %lu SDBTs" 1757 - " and %lu SDBs\n", 1758 - sfb->num_sdbt, sfb->num_sdb); 1745 + debug_sprintf_event(sfdbg, 4, "%s: setup %lu SDBTs and %lu SDBs\n", 1746 + __func__, sfb->num_sdbt, sfb->num_sdb); 1759 1747 1760 1748 return aux; 1761 1749 ··· 1807 1797 event->attr.sample_period = rate; 1808 1798 SAMPL_RATE(&event->hw) = rate; 1809 1799 hw_init_period(&event->hw, SAMPL_RATE(&event->hw)); 1810 - debug_sprintf_event(sfdbg, 4, "cpumsf_pmu_check_period:" 1811 - "cpu:%d value:%#llx period:%#llx freq:%d\n", 1812 - event->cpu, value, 1800 + debug_sprintf_event(sfdbg, 4, "%s:" 1801 + " cpu %d value %#llx period %#llx freq %d\n", 1802 + __func__, event->cpu, value, 1813 1803 event->attr.sample_period, do_freq); 1814 1804 return 0; 1815 1805 } ··· 1885 1875 if (!SAMPL_DIAG_MODE(&event->hw)) { 1886 1876 cpuhw->lsctl.tear = (unsigned long) cpuhw->sfb.sdbt; 1887 1877 cpuhw->lsctl.dear = *(unsigned long *) cpuhw->sfb.sdbt; 1888 - hw_reset_registers(&event->hw, cpuhw->sfb.sdbt); 1878 + TEAR_REG(&event->hw) = (unsigned long) cpuhw->sfb.sdbt; 1889 1879 } 1890 1880 1891 1881 /* Ensure sampling functions are in the disabled state. If disabled, ··· 2040 2030 2041 2031 /* Report measurement alerts only for non-PRA codes */ 2042 2032 if (alert != CPU_MF_INT_SF_PRA) 2043 - debug_sprintf_event(sfdbg, 6, "measurement alert: %#x\n", 2033 + debug_sprintf_event(sfdbg, 6, "%s: alert %#x\n", __func__, 2044 2034 alert); 2045 2035 2046 2036 /* Sampling authorization change request */
+1 -8
arch/s390/kernel/setup.c
··· 355 355 356 356 void __init arch_call_rest_init(void) 357 357 { 358 - struct stack_frame *frame; 359 358 unsigned long stack; 360 359 361 360 stack = stack_alloc(); ··· 367 368 set_task_stack_end_magic(current); 368 369 stack += STACK_INIT_OFFSET; 369 370 S390_lowcore.kernel_stack = stack; 370 - frame = (struct stack_frame *) stack; 371 - memset(frame, 0, sizeof(*frame)); 372 - /* Branch to rest_init on the new stack, never returns */ 373 - asm volatile( 374 - " la 15,0(%[_frame])\n" 375 - " jg rest_init\n" 376 - : : [_frame] "a" (frame)); 371 + CALL_ON_STACK_NORETURN(rest_init, stack); 377 372 } 378 373 379 374 static void __init setup_lowcore_dat_off(void)
+6 -1
arch/s390/kernel/smp.c
··· 262 262 lc->spinlock_index = 0; 263 263 lc->percpu_offset = __per_cpu_offset[cpu]; 264 264 lc->kernel_asce = S390_lowcore.kernel_asce; 265 + lc->user_asce = S390_lowcore.kernel_asce; 265 266 lc->machine_flags = S390_lowcore.machine_flags; 266 267 lc->user_timer = lc->system_timer = 267 268 lc->steal_timer = lc->avg_steal_timer = 0; 268 269 __ctl_store(lc->cregs_save_area, 0, 15); 270 + lc->cregs_save_area[1] = lc->kernel_asce; 271 + lc->cregs_save_area[7] = lc->vdso_asce; 269 272 save_access_regs((unsigned int *) lc->access_regs_save_area); 270 273 memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list, 271 274 sizeof(lc->stfle_fac_list)); ··· 847 844 848 845 S390_lowcore.last_update_clock = get_tod_clock(); 849 846 restore_access_regs(S390_lowcore.access_regs_save_area); 847 + set_cpu_flag(CIF_ASCE_PRIMARY); 848 + set_cpu_flag(CIF_ASCE_SECONDARY); 850 849 cpu_init(); 851 850 preempt_disable(); 852 851 init_cpu_timer(); ··· 876 871 S390_lowcore.restart_source = -1UL; 877 872 __ctl_load(S390_lowcore.cregs_save_area, 0, 15); 878 873 __load_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT); 879 - CALL_ON_STACK(smp_init_secondary, S390_lowcore.kernel_stack, 0); 874 + CALL_ON_STACK_NORETURN(smp_init_secondary, S390_lowcore.kernel_stack); 880 875 } 881 876 882 877 /* Upping and downing of CPUs */
+43
arch/s390/kernel/stacktrace.c
··· 9 9 #include <linux/stacktrace.h> 10 10 #include <asm/stacktrace.h> 11 11 #include <asm/unwind.h> 12 + #include <asm/kprobes.h> 12 13 13 14 void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie, 14 15 struct task_struct *task, struct pt_regs *regs) ··· 22 21 if (!addr || !consume_entry(cookie, addr, false)) 23 22 break; 24 23 } 24 + } 25 + 26 + /* 27 + * This function returns an error if it detects any unreliable features of the 28 + * stack. Otherwise it guarantees that the stack trace is reliable. 29 + * 30 + * If the task is not 'current', the caller *must* ensure the task is inactive. 31 + */ 32 + int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry, 33 + void *cookie, struct task_struct *task) 34 + { 35 + struct unwind_state state; 36 + unsigned long addr; 37 + 38 + unwind_for_each_frame(&state, task, NULL, 0) { 39 + if (state.stack_info.type != STACK_TYPE_TASK) 40 + return -EINVAL; 41 + 42 + if (state.regs) 43 + return -EINVAL; 44 + 45 + addr = unwind_get_return_address(&state); 46 + if (!addr) 47 + return -EINVAL; 48 + 49 + #ifdef CONFIG_KPROBES 50 + /* 51 + * Mark stacktraces with kretprobed functions on them 52 + * as unreliable. 53 + */ 54 + if (state.ip == (unsigned long)kretprobe_trampoline) 55 + return -EINVAL; 56 + #endif 57 + 58 + if (!consume_entry(cookie, addr, false)) 59 + return -EINVAL; 60 + } 61 + 62 + /* Check for stack corruption */ 63 + if (unwind_error(&state)) 64 + return -EINVAL; 65 + return 0; 25 66 }
+51 -29
arch/s390/kernel/unwind_bc.c
··· 36 36 return true; 37 37 } 38 38 39 + static inline bool is_task_pt_regs(struct unwind_state *state, 40 + struct pt_regs *regs) 41 + { 42 + return task_pt_regs(state->task) == regs; 43 + } 44 + 39 45 bool unwind_next_frame(struct unwind_state *state) 40 46 { 41 47 struct stack_info *info = &state->stack_info; ··· 52 46 53 47 regs = state->regs; 54 48 if (unlikely(regs)) { 55 - if (state->reuse_sp) { 56 - sp = state->sp; 57 - state->reuse_sp = false; 58 - } else { 59 - sp = READ_ONCE_NOCHECK(regs->gprs[15]); 60 - if (unlikely(outside_of_stack(state, sp))) { 61 - if (!update_stack_info(state, sp)) 62 - goto out_err; 63 - } 64 - } 49 + sp = state->sp; 65 50 sf = (struct stack_frame *) sp; 66 51 ip = READ_ONCE_NOCHECK(sf->gprs[8]); 67 52 reliable = false; 68 53 regs = NULL; 54 + if (!__kernel_text_address(ip)) { 55 + /* skip bogus %r14 */ 56 + state->regs = NULL; 57 + return unwind_next_frame(state); 58 + } 69 59 } else { 70 60 sf = (struct stack_frame *) state->sp; 71 61 sp = READ_ONCE_NOCHECK(sf->back_chain); ··· 78 76 /* No back-chain, look for a pt_regs structure */ 79 77 sp = state->sp + STACK_FRAME_OVERHEAD; 80 78 if (!on_stack(info, sp, sizeof(struct pt_regs))) 81 - goto out_stop; 79 + goto out_err; 82 80 regs = (struct pt_regs *) sp; 83 - if (READ_ONCE_NOCHECK(regs->psw.mask) & PSW_MASK_PSTATE) 81 + if (is_task_pt_regs(state, regs)) 84 82 goto out_stop; 85 83 ip = READ_ONCE_NOCHECK(regs->psw.addr); 84 + sp = READ_ONCE_NOCHECK(regs->gprs[15]); 85 + if (unlikely(outside_of_stack(state, sp))) { 86 + if (!update_stack_info(state, sp)) 87 + goto out_err; 88 + } 86 89 reliable = true; 87 90 } 88 91 } 92 + 93 + /* Sanity check: ABI requires SP to be aligned 8 bytes. */ 94 + if (sp & 0x7) 95 + goto out_err; 89 96 90 97 ip = ftrace_graph_ret_addr(state->task, &state->graph_idx, ip, (void *) sp); 91 98 ··· 114 103 EXPORT_SYMBOL_GPL(unwind_next_frame); 115 104 116 105 void __unwind_start(struct unwind_state *state, struct task_struct *task, 117 - struct pt_regs *regs, unsigned long sp) 106 + struct pt_regs *regs, unsigned long first_frame) 118 107 { 119 108 struct stack_info *info = &state->stack_info; 120 - unsigned long *mask = &state->stack_mask; 121 - bool reliable, reuse_sp; 122 109 struct stack_frame *sf; 123 - unsigned long ip; 110 + unsigned long ip, sp; 124 111 125 112 memset(state, 0, sizeof(*state)); 126 113 state->task = task; ··· 130 121 return; 131 122 } 132 123 124 + /* Get the instruction pointer from pt_regs or the stack frame */ 125 + if (regs) { 126 + ip = regs->psw.addr; 127 + sp = regs->gprs[15]; 128 + } else if (task == current) { 129 + sp = current_frame_address(); 130 + } else { 131 + sp = task->thread.ksp; 132 + } 133 + 133 134 /* Get current stack pointer and initialize stack info */ 134 - if (get_stack_info(sp, task, info, mask) != 0 || 135 - !on_stack(info, sp, sizeof(struct stack_frame))) { 135 + if (!update_stack_info(state, sp)) { 136 136 /* Something is wrong with the stack pointer */ 137 137 info->type = STACK_TYPE_UNKNOWN; 138 138 state->error = true; 139 139 return; 140 140 } 141 141 142 - /* Get the instruction pointer from pt_regs or the stack frame */ 143 - if (regs) { 144 - ip = READ_ONCE_NOCHECK(regs->psw.addr); 145 - reliable = true; 146 - reuse_sp = true; 147 - } else { 148 - sf = (struct stack_frame *) sp; 142 + if (!regs) { 143 + /* Stack frame is within valid stack */ 144 + sf = (struct stack_frame *)sp; 149 145 ip = READ_ONCE_NOCHECK(sf->gprs[8]); 150 - reliable = false; 151 - reuse_sp = false; 152 146 } 153 147 154 148 ip = ftrace_graph_ret_addr(state->task, &state->graph_idx, ip, NULL); ··· 159 147 /* Update unwind state */ 160 148 state->sp = sp; 161 149 state->ip = ip; 162 - state->reliable = reliable; 163 - state->reuse_sp = reuse_sp; 150 + state->reliable = true; 151 + 152 + if (!first_frame) 153 + return; 154 + /* Skip through the call chain to the specified starting frame */ 155 + while (!unwind_done(state)) { 156 + if (on_stack(&state->stack_info, first_frame, sizeof(struct stack_frame))) { 157 + if (state->sp >= first_frame) 158 + break; 159 + } 160 + unwind_next_frame(state); 161 + } 164 162 } 165 163 EXPORT_SYMBOL_GPL(__unwind_start);
+3 -39
arch/s390/kernel/vdso.c
··· 29 29 #include <asm/vdso.h> 30 30 #include <asm/facility.h> 31 31 32 - #ifdef CONFIG_COMPAT_VDSO 33 - extern char vdso32_start, vdso32_end; 34 - static void *vdso32_kbase = &vdso32_start; 35 - static unsigned int vdso32_pages; 36 - static struct page **vdso32_pagelist; 37 - #endif 38 - 39 32 extern char vdso64_start, vdso64_end; 40 33 static void *vdso64_kbase = &vdso64_start; 41 34 static unsigned int vdso64_pages; ··· 48 55 49 56 vdso_pagelist = vdso64_pagelist; 50 57 vdso_pages = vdso64_pages; 51 - #ifdef CONFIG_COMPAT_VDSO 52 - if (vma->vm_mm->context.compat_mm) { 53 - vdso_pagelist = vdso32_pagelist; 54 - vdso_pages = vdso32_pages; 55 - } 56 - #endif 57 58 58 59 if (vmf->pgoff >= vdso_pages) 59 60 return VM_FAULT_SIGBUS; ··· 63 76 unsigned long vdso_pages; 64 77 65 78 vdso_pages = vdso64_pages; 66 - #ifdef CONFIG_COMPAT_VDSO 67 - if (vma->vm_mm->context.compat_mm) 68 - vdso_pages = vdso32_pages; 69 - #endif 70 79 71 80 if ((vdso_pages << PAGE_SHIFT) != vma->vm_end - vma->vm_start) 72 81 return -EINVAL; ··· 192 209 if (!vdso_enabled) 193 210 return 0; 194 211 212 + if (is_compat_task()) 213 + return 0; 214 + 195 215 vdso_pages = vdso64_pages; 196 - #ifdef CONFIG_COMPAT_VDSO 197 - mm->context.compat_mm = is_compat_task(); 198 - if (mm->context.compat_mm) 199 - vdso_pages = vdso32_pages; 200 - #endif 201 216 /* 202 217 * vDSO has a problem and was disabled, just don't "enable" it for 203 218 * the process ··· 248 267 int i; 249 268 250 269 vdso_init_data(vdso_data); 251 - #ifdef CONFIG_COMPAT_VDSO 252 - /* Calculate the size of the 32 bit vDSO */ 253 - vdso32_pages = ((&vdso32_end - &vdso32_start 254 - + PAGE_SIZE - 1) >> PAGE_SHIFT) + 1; 255 - 256 - /* Make sure pages are in the correct state */ 257 - vdso32_pagelist = kcalloc(vdso32_pages + 1, sizeof(struct page *), 258 - GFP_KERNEL); 259 - BUG_ON(vdso32_pagelist == NULL); 260 - for (i = 0; i < vdso32_pages - 1; i++) { 261 - struct page *pg = virt_to_page(vdso32_kbase + i*PAGE_SIZE); 262 - get_page(pg); 263 - vdso32_pagelist[i] = pg; 264 - } 265 - vdso32_pagelist[vdso32_pages - 1] = virt_to_page(vdso_data); 266 - vdso32_pagelist[vdso32_pages] = NULL; 267 - #endif 268 270 269 271 /* Calculate the size of the 64 bit vDSO */ 270 272 vdso64_pages = ((&vdso64_end - &vdso64_start
-1
arch/s390/kernel/vdso32/.gitignore
··· 1 - vdso32.lds
-66
arch/s390/kernel/vdso32/Makefile
··· 1 - # SPDX-License-Identifier: GPL-2.0 2 - # List of files in the vdso, has to be asm only for now 3 - 4 - KCOV_INSTRUMENT := n 5 - 6 - obj-vdso32 = gettimeofday.o clock_getres.o clock_gettime.o note.o getcpu.o 7 - 8 - # Build rules 9 - 10 - targets := $(obj-vdso32) vdso32.so vdso32.so.dbg 11 - obj-vdso32 := $(addprefix $(obj)/, $(obj-vdso32)) 12 - 13 - KBUILD_AFLAGS += -DBUILD_VDSO 14 - KBUILD_CFLAGS += -DBUILD_VDSO 15 - 16 - KBUILD_AFLAGS_31 := $(filter-out -m64,$(KBUILD_AFLAGS)) 17 - KBUILD_AFLAGS_31 += -m31 -s 18 - 19 - KBUILD_CFLAGS_31 := $(filter-out -m64,$(KBUILD_CFLAGS)) 20 - KBUILD_CFLAGS_31 += -m31 -fPIC -shared -fno-common -fno-builtin 21 - KBUILD_CFLAGS_31 += -nostdlib -Wl,-soname=linux-vdso32.so.1 \ 22 - -Wl,--hash-style=both 23 - 24 - $(targets:%=$(obj)/%.dbg): KBUILD_CFLAGS = $(KBUILD_CFLAGS_31) 25 - $(targets:%=$(obj)/%.dbg): KBUILD_AFLAGS = $(KBUILD_AFLAGS_31) 26 - 27 - obj-y += vdso32_wrapper.o 28 - extra-y += vdso32.lds 29 - CPPFLAGS_vdso32.lds += -P -C -U$(ARCH) 30 - 31 - # Disable gcov profiling, ubsan and kasan for VDSO code 32 - GCOV_PROFILE := n 33 - UBSAN_SANITIZE := n 34 - KASAN_SANITIZE := n 35 - 36 - # Force dependency (incbin is bad) 37 - $(obj)/vdso32_wrapper.o : $(obj)/vdso32.so 38 - 39 - # link rule for the .so file, .lds has to be first 40 - $(obj)/vdso32.so.dbg: $(src)/vdso32.lds $(obj-vdso32) FORCE 41 - $(call if_changed,vdso32ld) 42 - 43 - # strip rule for the .so file 44 - $(obj)/%.so: OBJCOPYFLAGS := -S 45 - $(obj)/%.so: $(obj)/%.so.dbg FORCE 46 - $(call if_changed,objcopy) 47 - 48 - # assembly rules for the .S files 49 - $(obj-vdso32): %.o: %.S FORCE 50 - $(call if_changed_dep,vdso32as) 51 - 52 - # actual build commands 53 - quiet_cmd_vdso32ld = VDSO32L $@ 54 - cmd_vdso32ld = $(CC) $(c_flags) -Wl,-T $(filter %.lds %.o,$^) -o $@ 55 - quiet_cmd_vdso32as = VDSO32A $@ 56 - cmd_vdso32as = $(CC) $(a_flags) -c -o $@ $< 57 - 58 - # install commands for the unstripped file 59 - quiet_cmd_vdso_install = INSTALL $@ 60 - cmd_vdso_install = cp $(obj)/$@.dbg $(MODLIB)/vdso/$@ 61 - 62 - vdso32.so: $(obj)/vdso32.so.dbg 63 - @mkdir -p $(MODLIB)/vdso 64 - $(call cmd,vdso_install) 65 - 66 - vdso_install: vdso32.so
-44
arch/s390/kernel/vdso32/clock_getres.S
··· 1 - /* SPDX-License-Identifier: GPL-2.0 */ 2 - /* 3 - * Userland implementation of clock_getres() for 32 bits processes in a 4 - * s390 kernel for use in the vDSO 5 - * 6 - * Copyright IBM Corp. 2008 7 - * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) 8 - */ 9 - #include <asm/vdso.h> 10 - #include <asm/asm-offsets.h> 11 - #include <asm/unistd.h> 12 - #include <asm/dwarf.h> 13 - 14 - .text 15 - .align 4 16 - .globl __kernel_clock_getres 17 - .type __kernel_clock_getres,@function 18 - __kernel_clock_getres: 19 - CFI_STARTPROC 20 - basr %r1,0 21 - la %r1,4f-.(%r1) 22 - chi %r2,__CLOCK_REALTIME 23 - je 0f 24 - chi %r2,__CLOCK_MONOTONIC 25 - je 0f 26 - la %r1,5f-4f(%r1) 27 - chi %r2,__CLOCK_REALTIME_COARSE 28 - je 0f 29 - chi %r2,__CLOCK_MONOTONIC_COARSE 30 - jne 3f 31 - 0: ltr %r3,%r3 32 - jz 2f /* res == NULL */ 33 - 1: l %r0,0(%r1) 34 - xc 0(4,%r3),0(%r3) /* set tp->tv_sec to zero */ 35 - st %r0,4(%r3) /* store tp->tv_usec */ 36 - 2: lhi %r2,0 37 - br %r14 38 - 3: lhi %r1,__NR_clock_getres /* fallback to svc */ 39 - svc 0 40 - br %r14 41 - CFI_ENDPROC 42 - 4: .long __CLOCK_REALTIME_RES 43 - 5: .long __CLOCK_COARSE_RES 44 - .size __kernel_clock_getres,.-__kernel_clock_getres
-179
arch/s390/kernel/vdso32/clock_gettime.S
··· 1 - /* SPDX-License-Identifier: GPL-2.0 */ 2 - /* 3 - * Userland implementation of clock_gettime() for 32 bits processes in a 4 - * s390 kernel for use in the vDSO 5 - * 6 - * Copyright IBM Corp. 2008 7 - * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) 8 - */ 9 - #include <asm/vdso.h> 10 - #include <asm/asm-offsets.h> 11 - #include <asm/unistd.h> 12 - #include <asm/dwarf.h> 13 - #include <asm/ptrace.h> 14 - 15 - .text 16 - .align 4 17 - .globl __kernel_clock_gettime 18 - .type __kernel_clock_gettime,@function 19 - __kernel_clock_gettime: 20 - CFI_STARTPROC 21 - ahi %r15,-16 22 - CFI_DEF_CFA_OFFSET STACK_FRAME_OVERHEAD+16 23 - CFI_VAL_OFFSET 15, -STACK_FRAME_OVERHEAD 24 - basr %r5,0 25 - 0: al %r5,21f-0b(%r5) /* get &_vdso_data */ 26 - chi %r2,__CLOCK_REALTIME_COARSE 27 - je 10f 28 - chi %r2,__CLOCK_REALTIME 29 - je 11f 30 - chi %r2,__CLOCK_MONOTONIC_COARSE 31 - je 9f 32 - chi %r2,__CLOCK_MONOTONIC 33 - jne 19f 34 - 35 - /* CLOCK_MONOTONIC */ 36 - 1: l %r4,__VDSO_UPD_COUNT+4(%r5) /* load update counter */ 37 - tml %r4,0x0001 /* pending update ? loop */ 38 - jnz 1b 39 - stcke 0(%r15) /* Store TOD clock */ 40 - lm %r0,%r1,1(%r15) 41 - s %r0,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */ 42 - sl %r1,__VDSO_XTIME_STAMP+4(%r5) 43 - brc 3,2f 44 - ahi %r0,-1 45 - 2: ms %r0,__VDSO_TK_MULT(%r5) /* * tk->mult */ 46 - lr %r2,%r0 47 - l %r0,__VDSO_TK_MULT(%r5) 48 - ltr %r1,%r1 49 - mr %r0,%r0 50 - jnm 3f 51 - a %r0,__VDSO_TK_MULT(%r5) 52 - 3: alr %r0,%r2 53 - al %r0,__VDSO_WTOM_NSEC(%r5) 54 - al %r1,__VDSO_WTOM_NSEC+4(%r5) 55 - brc 12,5f 56 - ahi %r0,1 57 - 5: l %r2,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */ 58 - srdl %r0,0(%r2) /* >> tk->shift */ 59 - l %r2,__VDSO_WTOM_SEC+4(%r5) 60 - cl %r4,__VDSO_UPD_COUNT+4(%r5) /* check update counter */ 61 - jne 1b 62 - basr %r5,0 63 - 6: ltr %r0,%r0 64 - jnz 7f 65 - cl %r1,20f-6b(%r5) 66 - jl 8f 67 - 7: ahi %r2,1 68 - sl %r1,20f-6b(%r5) 69 - brc 3,6b 70 - ahi %r0,-1 71 - j 6b 72 - 8: st %r2,0(%r3) /* store tp->tv_sec */ 73 - st %r1,4(%r3) /* store tp->tv_nsec */ 74 - lhi %r2,0 75 - ahi %r15,16 76 - CFI_DEF_CFA_OFFSET STACK_FRAME_OVERHEAD 77 - CFI_RESTORE 15 78 - br %r14 79 - 80 - /* CLOCK_MONOTONIC_COARSE */ 81 - CFI_DEF_CFA_OFFSET STACK_FRAME_OVERHEAD+16 82 - CFI_VAL_OFFSET 15, -STACK_FRAME_OVERHEAD 83 - 9: l %r4,__VDSO_UPD_COUNT+4(%r5) /* load update counter */ 84 - tml %r4,0x0001 /* pending update ? loop */ 85 - jnz 9b 86 - l %r2,__VDSO_WTOM_CRS_SEC+4(%r5) 87 - l %r1,__VDSO_WTOM_CRS_NSEC+4(%r5) 88 - cl %r4,__VDSO_UPD_COUNT+4(%r5) /* check update counter */ 89 - jne 9b 90 - j 8b 91 - 92 - /* CLOCK_REALTIME_COARSE */ 93 - 10: l %r4,__VDSO_UPD_COUNT+4(%r5) /* load update counter */ 94 - tml %r4,0x0001 /* pending update ? loop */ 95 - jnz 10b 96 - l %r2,__VDSO_XTIME_CRS_SEC+4(%r5) 97 - l %r1,__VDSO_XTIME_CRS_NSEC+4(%r5) 98 - cl %r4,__VDSO_UPD_COUNT+4(%r5) /* check update counter */ 99 - jne 10b 100 - j 17f 101 - 102 - /* CLOCK_REALTIME */ 103 - 11: l %r4,__VDSO_UPD_COUNT+4(%r5) /* load update counter */ 104 - tml %r4,0x0001 /* pending update ? loop */ 105 - jnz 11b 106 - stcke 0(%r15) /* Store TOD clock */ 107 - lm %r0,%r1,__VDSO_TS_END(%r5) /* TOD steering end time */ 108 - s %r0,1(%r15) /* no - ts_steering_end */ 109 - sl %r1,5(%r15) 110 - brc 3,22f 111 - ahi %r0,-1 112 - 22: ltr %r0,%r0 /* past end of steering? */ 113 - jm 24f 114 - srdl %r0,15 /* 1 per 2^16 */ 115 - tm __VDSO_TS_DIR+3(%r5),0x01 /* steering direction? */ 116 - jz 23f 117 - lcr %r0,%r0 /* negative TOD offset */ 118 - lcr %r1,%r1 119 - je 23f 120 - ahi %r0,-1 121 - 23: a %r0,1(%r15) /* add TOD timestamp */ 122 - al %r1,5(%r15) 123 - brc 12,25f 124 - ahi %r0,1 125 - j 25f 126 - 24: lm %r0,%r1,1(%r15) /* load TOD timestamp */ 127 - 25: s %r0,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */ 128 - sl %r1,__VDSO_XTIME_STAMP+4(%r5) 129 - brc 3,12f 130 - ahi %r0,-1 131 - 12: ms %r0,__VDSO_TK_MULT(%r5) /* * tk->mult */ 132 - lr %r2,%r0 133 - l %r0,__VDSO_TK_MULT(%r5) 134 - ltr %r1,%r1 135 - mr %r0,%r0 136 - jnm 13f 137 - a %r0,__VDSO_TK_MULT(%r5) 138 - 13: alr %r0,%r2 139 - al %r0,__VDSO_XTIME_NSEC(%r5) /* + tk->xtime_nsec */ 140 - al %r1,__VDSO_XTIME_NSEC+4(%r5) 141 - brc 12,14f 142 - ahi %r0,1 143 - 14: l %r2,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */ 144 - srdl %r0,0(%r2) /* >> tk->shift */ 145 - l %r2,__VDSO_XTIME_SEC+4(%r5) 146 - cl %r4,__VDSO_UPD_COUNT+4(%r5) /* check update counter */ 147 - jne 11b 148 - basr %r5,0 149 - 15: ltr %r0,%r0 150 - jnz 16f 151 - cl %r1,20f-15b(%r5) 152 - jl 17f 153 - 16: ahi %r2,1 154 - sl %r1,20f-15b(%r5) 155 - brc 3,15b 156 - ahi %r0,-1 157 - j 15b 158 - 17: st %r2,0(%r3) /* store tp->tv_sec */ 159 - st %r1,4(%r3) /* store tp->tv_nsec */ 160 - lhi %r2,0 161 - ahi %r15,16 162 - CFI_DEF_CFA_OFFSET STACK_FRAME_OVERHEAD 163 - CFI_RESTORE 15 164 - br %r14 165 - 166 - /* Fallback to system call */ 167 - CFI_DEF_CFA_OFFSET STACK_FRAME_OVERHEAD+16 168 - CFI_VAL_OFFSET 15, -STACK_FRAME_OVERHEAD 169 - 19: lhi %r1,__NR_clock_gettime 170 - svc 0 171 - ahi %r15,16 172 - CFI_DEF_CFA_OFFSET STACK_FRAME_OVERHEAD 173 - CFI_RESTORE 15 174 - br %r14 175 - CFI_ENDPROC 176 - 177 - 20: .long 1000000000 178 - 21: .long _vdso_data - 0b 179 - .size __kernel_clock_gettime,.-__kernel_clock_gettime
-33
arch/s390/kernel/vdso32/getcpu.S
··· 1 - /* SPDX-License-Identifier: GPL-2.0 */ 2 - /* 3 - * Userland implementation of getcpu() for 32 bits processes in a 4 - * s390 kernel for use in the vDSO 5 - * 6 - * Copyright IBM Corp. 2016 7 - * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> 8 - */ 9 - #include <asm/vdso.h> 10 - #include <asm/asm-offsets.h> 11 - #include <asm/dwarf.h> 12 - 13 - .text 14 - .align 4 15 - .globl __kernel_getcpu 16 - .type __kernel_getcpu,@function 17 - __kernel_getcpu: 18 - CFI_STARTPROC 19 - la %r4,0 20 - sacf 256 21 - l %r5,__VDSO_CPU_NR(%r4) 22 - l %r4,__VDSO_NODE_ID(%r4) 23 - sacf 0 24 - ltr %r2,%r2 25 - jz 2f 26 - st %r5,0(%r2) 27 - 2: ltr %r3,%r3 28 - jz 3f 29 - st %r4,0(%r3) 30 - 3: lhi %r2,0 31 - br %r14 32 - CFI_ENDPROC 33 - .size __kernel_getcpu,.-__kernel_getcpu
-103
arch/s390/kernel/vdso32/gettimeofday.S
··· 1 - /* SPDX-License-Identifier: GPL-2.0 */ 2 - /* 3 - * Userland implementation of gettimeofday() for 32 bits processes in a 4 - * s390 kernel for use in the vDSO 5 - * 6 - * Copyright IBM Corp. 2008 7 - * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) 8 - */ 9 - #include <asm/vdso.h> 10 - #include <asm/asm-offsets.h> 11 - #include <asm/unistd.h> 12 - #include <asm/dwarf.h> 13 - #include <asm/ptrace.h> 14 - 15 - .text 16 - .align 4 17 - .globl __kernel_gettimeofday 18 - .type __kernel_gettimeofday,@function 19 - __kernel_gettimeofday: 20 - CFI_STARTPROC 21 - ahi %r15,-16 22 - CFI_ADJUST_CFA_OFFSET 16 23 - CFI_VAL_OFFSET 15, -STACK_FRAME_OVERHEAD 24 - basr %r5,0 25 - 0: al %r5,13f-0b(%r5) /* get &_vdso_data */ 26 - 1: ltr %r3,%r3 /* check if tz is NULL */ 27 - je 2f 28 - mvc 0(8,%r3),__VDSO_TIMEZONE(%r5) 29 - 2: ltr %r2,%r2 /* check if tv is NULL */ 30 - je 10f 31 - l %r4,__VDSO_UPD_COUNT+4(%r5) /* load update counter */ 32 - tml %r4,0x0001 /* pending update ? loop */ 33 - jnz 1b 34 - stcke 0(%r15) /* Store TOD clock */ 35 - lm %r0,%r1,__VDSO_TS_END(%r5) /* TOD steering end time */ 36 - s %r0,1(%r15) 37 - sl %r1,5(%r15) 38 - brc 3,14f 39 - ahi %r0,-1 40 - 14: ltr %r0,%r0 /* past end of steering? */ 41 - jm 16f 42 - srdl %r0,15 /* 1 per 2^16 */ 43 - tm __VDSO_TS_DIR+3(%r5),0x01 /* steering direction? */ 44 - jz 15f 45 - lcr %r0,%r0 /* negative TOD offset */ 46 - lcr %r1,%r1 47 - je 15f 48 - ahi %r0,-1 49 - 15: a %r0,1(%r15) /* add TOD timestamp */ 50 - al %r1,5(%r15) 51 - brc 12,17f 52 - ahi %r0,1 53 - j 17f 54 - 16: lm %r0,%r1,1(%r15) /* load TOD timestamp */ 55 - 17: s %r0,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */ 56 - sl %r1,__VDSO_XTIME_STAMP+4(%r5) 57 - brc 3,3f 58 - ahi %r0,-1 59 - 3: ms %r0,__VDSO_TK_MULT(%r5) /* * tk->mult */ 60 - st %r0,0(%r15) 61 - l %r0,__VDSO_TK_MULT(%r5) 62 - ltr %r1,%r1 63 - mr %r0,%r0 64 - jnm 4f 65 - a %r0,__VDSO_TK_MULT(%r5) 66 - 4: al %r0,0(%r15) 67 - al %r0,__VDSO_XTIME_NSEC(%r5) /* + xtime */ 68 - al %r1,__VDSO_XTIME_NSEC+4(%r5) 69 - brc 12,5f 70 - ahi %r0,1 71 - 5: mvc 0(4,%r15),__VDSO_XTIME_SEC+4(%r5) 72 - cl %r4,__VDSO_UPD_COUNT+4(%r5) /* check update counter */ 73 - jne 1b 74 - l %r4,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */ 75 - srdl %r0,0(%r4) /* >> tk->shift */ 76 - l %r4,0(%r15) /* get tv_sec from stack */ 77 - basr %r5,0 78 - 6: ltr %r0,%r0 79 - jnz 7f 80 - cl %r1,11f-6b(%r5) 81 - jl 8f 82 - 7: ahi %r4,1 83 - sl %r1,11f-6b(%r5) 84 - brc 3,6b 85 - ahi %r0,-1 86 - j 6b 87 - 8: st %r4,0(%r2) /* store tv->tv_sec */ 88 - ltr %r1,%r1 89 - m %r0,12f-6b(%r5) 90 - jnm 9f 91 - al %r0,12f-6b(%r5) 92 - 9: srl %r0,6 93 - st %r0,4(%r2) /* store tv->tv_usec */ 94 - 10: slr %r2,%r2 95 - ahi %r15,16 96 - CFI_ADJUST_CFA_OFFSET -16 97 - CFI_RESTORE 15 98 - br %r14 99 - CFI_ENDPROC 100 - 11: .long 1000000000 101 - 12: .long 274877907 102 - 13: .long _vdso_data - 0b 103 - .size __kernel_gettimeofday,.-__kernel_gettimeofday
-13
arch/s390/kernel/vdso32/note.S
··· 1 - /* SPDX-License-Identifier: GPL-2.0 */ 2 - /* 3 - * This supplies .note.* sections to go into the PT_NOTE inside the vDSO text. 4 - * Here we can supply some information useful to userland. 5 - */ 6 - 7 - #include <linux/uts.h> 8 - #include <linux/version.h> 9 - #include <linux/elfnote.h> 10 - 11 - ELFNOTE_START(Linux, 0, "a") 12 - .long LINUX_VERSION_CODE 13 - ELFNOTE_END
-142
arch/s390/kernel/vdso32/vdso32.lds.S
··· 1 - /* SPDX-License-Identifier: GPL-2.0 */ 2 - /* 3 - * This is the infamous ld script for the 32 bits vdso 4 - * library 5 - */ 6 - 7 - #include <asm/page.h> 8 - #include <asm/vdso.h> 9 - 10 - OUTPUT_FORMAT("elf32-s390", "elf32-s390", "elf32-s390") 11 - OUTPUT_ARCH(s390:31-bit) 12 - ENTRY(_start) 13 - 14 - SECTIONS 15 - { 16 - . = VDSO32_LBASE + SIZEOF_HEADERS; 17 - 18 - .hash : { *(.hash) } :text 19 - .gnu.hash : { *(.gnu.hash) } 20 - .dynsym : { *(.dynsym) } 21 - .dynstr : { *(.dynstr) } 22 - .gnu.version : { *(.gnu.version) } 23 - .gnu.version_d : { *(.gnu.version_d) } 24 - .gnu.version_r : { *(.gnu.version_r) } 25 - 26 - .note : { *(.note.*) } :text :note 27 - 28 - . = ALIGN(16); 29 - .text : { 30 - *(.text .stub .text.* .gnu.linkonce.t.*) 31 - } :text 32 - PROVIDE(__etext = .); 33 - PROVIDE(_etext = .); 34 - PROVIDE(etext = .); 35 - 36 - /* 37 - * Other stuff is appended to the text segment: 38 - */ 39 - .rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) } 40 - .rodata1 : { *(.rodata1) } 41 - 42 - .dynamic : { *(.dynamic) } :text :dynamic 43 - 44 - .eh_frame_hdr : { *(.eh_frame_hdr) } :text :eh_frame_hdr 45 - .eh_frame : { KEEP (*(.eh_frame)) } :text 46 - .gcc_except_table : { *(.gcc_except_table .gcc_except_table.*) } 47 - 48 - .rela.dyn ALIGN(8) : { *(.rela.dyn) } 49 - .got ALIGN(8) : { *(.got .toc) } 50 - 51 - _end = .; 52 - PROVIDE(end = .); 53 - 54 - /* 55 - * Stabs debugging sections are here too. 56 - */ 57 - .stab 0 : { *(.stab) } 58 - .stabstr 0 : { *(.stabstr) } 59 - .stab.excl 0 : { *(.stab.excl) } 60 - .stab.exclstr 0 : { *(.stab.exclstr) } 61 - .stab.index 0 : { *(.stab.index) } 62 - .stab.indexstr 0 : { *(.stab.indexstr) } 63 - .comment 0 : { *(.comment) } 64 - 65 - /* 66 - * DWARF debug sections. 67 - * Symbols in the DWARF debugging sections are relative to the 68 - * beginning of the section so we begin them at 0. 69 - */ 70 - /* DWARF 1 */ 71 - .debug 0 : { *(.debug) } 72 - .line 0 : { *(.line) } 73 - /* GNU DWARF 1 extensions */ 74 - .debug_srcinfo 0 : { *(.debug_srcinfo) } 75 - .debug_sfnames 0 : { *(.debug_sfnames) } 76 - /* DWARF 1.1 and DWARF 2 */ 77 - .debug_aranges 0 : { *(.debug_aranges) } 78 - .debug_pubnames 0 : { *(.debug_pubnames) } 79 - /* DWARF 2 */ 80 - .debug_info 0 : { *(.debug_info .gnu.linkonce.wi.*) } 81 - .debug_abbrev 0 : { *(.debug_abbrev) } 82 - .debug_line 0 : { *(.debug_line) } 83 - .debug_frame 0 : { *(.debug_frame) } 84 - .debug_str 0 : { *(.debug_str) } 85 - .debug_loc 0 : { *(.debug_loc) } 86 - .debug_macinfo 0 : { *(.debug_macinfo) } 87 - /* SGI/MIPS DWARF 2 extensions */ 88 - .debug_weaknames 0 : { *(.debug_weaknames) } 89 - .debug_funcnames 0 : { *(.debug_funcnames) } 90 - .debug_typenames 0 : { *(.debug_typenames) } 91 - .debug_varnames 0 : { *(.debug_varnames) } 92 - /* DWARF 3 */ 93 - .debug_pubtypes 0 : { *(.debug_pubtypes) } 94 - .debug_ranges 0 : { *(.debug_ranges) } 95 - .gnu.attributes 0 : { KEEP (*(.gnu.attributes)) } 96 - 97 - . = ALIGN(PAGE_SIZE); 98 - PROVIDE(_vdso_data = .); 99 - 100 - /DISCARD/ : { 101 - *(.note.GNU-stack) 102 - *(.branch_lt) 103 - *(.data .data.* .gnu.linkonce.d.* .sdata*) 104 - *(.bss .sbss .dynbss .dynsbss) 105 - } 106 - } 107 - 108 - /* 109 - * Very old versions of ld do not recognize this name token; use the constant. 110 - */ 111 - #define PT_GNU_EH_FRAME 0x6474e550 112 - 113 - /* 114 - * We must supply the ELF program headers explicitly to get just one 115 - * PT_LOAD segment, and set the flags explicitly to make segments read-only. 116 - */ 117 - PHDRS 118 - { 119 - text PT_LOAD FILEHDR PHDRS FLAGS(5); /* PF_R|PF_X */ 120 - dynamic PT_DYNAMIC FLAGS(4); /* PF_R */ 121 - note PT_NOTE FLAGS(4); /* PF_R */ 122 - eh_frame_hdr PT_GNU_EH_FRAME; 123 - } 124 - 125 - /* 126 - * This controls what symbols we export from the DSO. 127 - */ 128 - VERSION 129 - { 130 - VDSO_VERSION_STRING { 131 - global: 132 - /* 133 - * Has to be there for the kernel to find 134 - */ 135 - __kernel_gettimeofday; 136 - __kernel_clock_gettime; 137 - __kernel_clock_getres; 138 - __kernel_getcpu; 139 - 140 - local: *; 141 - }; 142 - }
-15
arch/s390/kernel/vdso32/vdso32_wrapper.S
··· 1 - /* SPDX-License-Identifier: GPL-2.0 */ 2 - #include <linux/init.h> 3 - #include <linux/linkage.h> 4 - #include <asm/page.h> 5 - 6 - __PAGE_ALIGNED_DATA 7 - 8 - .globl vdso32_start, vdso32_end 9 - .balign PAGE_SIZE 10 - vdso32_start: 11 - .incbin "arch/s390/kernel/vdso32/vdso32.so" 12 - .balign PAGE_SIZE 13 - vdso32_end: 14 - 15 - .previous
+1 -3
arch/s390/kernel/vdso64/getcpu.S
··· 16 16 .type __kernel_getcpu,@function 17 17 __kernel_getcpu: 18 18 CFI_STARTPROC 19 - la %r4,0 20 19 sacf 256 21 - l %r5,__VDSO_CPU_NR(%r4) 22 - l %r4,__VDSO_NODE_ID(%r4) 20 + lm %r4,%r5,__VDSO_GETCPU_VAL(%r0) 23 21 sacf 0 24 22 ltgr %r2,%r2 25 23 jz 2f
+3
arch/s390/lib/Makefile
··· 11 11 # Instrumenting memory accesses to __user data (in different address space) 12 12 # produce false positives 13 13 KASAN_SANITIZE_uaccess.o := n 14 + 15 + obj-$(CONFIG_S390_UNWIND_SELFTEST) += test_unwind.o 16 + CFLAGS_test_unwind.o += -fno-optimize-sibling-calls
+347
arch/s390/lib/test_unwind.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-only 2 + /* 3 + * Test module for unwind_for_each_frame 4 + */ 5 + 6 + #define pr_fmt(fmt) "test_unwind: " fmt 7 + #include <asm/unwind.h> 8 + #include <linux/completion.h> 9 + #include <linux/kallsyms.h> 10 + #include <linux/kthread.h> 11 + #include <linux/module.h> 12 + #include <linux/string.h> 13 + #include <linux/kprobes.h> 14 + #include <linux/wait.h> 15 + #include <asm/irq.h> 16 + #include <asm/delay.h> 17 + 18 + #define BT_BUF_SIZE (PAGE_SIZE * 4) 19 + 20 + /* 21 + * To avoid printk line limit split backtrace by lines 22 + */ 23 + static void print_backtrace(char *bt) 24 + { 25 + char *p; 26 + 27 + while (true) { 28 + p = strsep(&bt, "\n"); 29 + if (!p) 30 + break; 31 + pr_err("%s\n", p); 32 + } 33 + } 34 + 35 + /* 36 + * Calls unwind_for_each_frame(task, regs, sp) and verifies that the result 37 + * contains unwindme_func2 followed by unwindme_func1. 38 + */ 39 + static noinline int test_unwind(struct task_struct *task, struct pt_regs *regs, 40 + unsigned long sp) 41 + { 42 + int frame_count, prev_is_func2, seen_func2_func1; 43 + const int max_frames = 128; 44 + struct unwind_state state; 45 + size_t bt_pos = 0; 46 + int ret = 0; 47 + char *bt; 48 + 49 + bt = kmalloc(BT_BUF_SIZE, GFP_ATOMIC); 50 + if (!bt) { 51 + pr_err("failed to allocate backtrace buffer\n"); 52 + return -ENOMEM; 53 + } 54 + /* Unwind. */ 55 + frame_count = 0; 56 + prev_is_func2 = 0; 57 + seen_func2_func1 = 0; 58 + unwind_for_each_frame(&state, task, regs, sp) { 59 + unsigned long addr = unwind_get_return_address(&state); 60 + char sym[KSYM_SYMBOL_LEN]; 61 + 62 + if (frame_count++ == max_frames) 63 + break; 64 + if (state.reliable && !addr) { 65 + pr_err("unwind state reliable but addr is 0\n"); 66 + return -EINVAL; 67 + } 68 + sprint_symbol(sym, addr); 69 + if (bt_pos < BT_BUF_SIZE) { 70 + bt_pos += snprintf(bt + bt_pos, BT_BUF_SIZE - bt_pos, 71 + state.reliable ? " [%-7s%px] %pSR\n" : 72 + "([%-7s%px] %pSR)\n", 73 + stack_type_name(state.stack_info.type), 74 + (void *)state.sp, (void *)state.ip); 75 + if (bt_pos >= BT_BUF_SIZE) 76 + pr_err("backtrace buffer is too small\n"); 77 + } 78 + frame_count += 1; 79 + if (prev_is_func2 && str_has_prefix(sym, "unwindme_func1")) 80 + seen_func2_func1 = 1; 81 + prev_is_func2 = str_has_prefix(sym, "unwindme_func2"); 82 + } 83 + 84 + /* Check the results. */ 85 + if (unwind_error(&state)) { 86 + pr_err("unwind error\n"); 87 + ret = -EINVAL; 88 + } 89 + if (!seen_func2_func1) { 90 + pr_err("unwindme_func2 and unwindme_func1 not found\n"); 91 + ret = -EINVAL; 92 + } 93 + if (frame_count == max_frames) { 94 + pr_err("Maximum number of frames exceeded\n"); 95 + ret = -EINVAL; 96 + } 97 + if (ret) 98 + print_backtrace(bt); 99 + kfree(bt); 100 + return ret; 101 + } 102 + 103 + /* State of the task being unwound. */ 104 + struct unwindme { 105 + int flags; 106 + int ret; 107 + struct task_struct *task; 108 + struct completion task_ready; 109 + wait_queue_head_t task_wq; 110 + unsigned long sp; 111 + }; 112 + 113 + static struct unwindme *unwindme; 114 + 115 + /* Values of unwindme.flags. */ 116 + #define UWM_DEFAULT 0x0 117 + #define UWM_THREAD 0x1 /* Unwind a separate task. */ 118 + #define UWM_REGS 0x2 /* Pass regs to test_unwind(). */ 119 + #define UWM_SP 0x4 /* Pass sp to test_unwind(). */ 120 + #define UWM_CALLER 0x8 /* Unwind starting from caller. */ 121 + #define UWM_SWITCH_STACK 0x10 /* Use CALL_ON_STACK. */ 122 + #define UWM_IRQ 0x20 /* Unwind from irq context. */ 123 + #define UWM_PGM 0x40 /* Unwind from program check handler. */ 124 + 125 + static __always_inline unsigned long get_psw_addr(void) 126 + { 127 + unsigned long psw_addr; 128 + 129 + asm volatile( 130 + "basr %[psw_addr],0\n" 131 + : [psw_addr] "=d" (psw_addr)); 132 + return psw_addr; 133 + } 134 + 135 + #ifdef CONFIG_KPROBES 136 + static int pgm_pre_handler(struct kprobe *p, struct pt_regs *regs) 137 + { 138 + struct unwindme *u = unwindme; 139 + 140 + u->ret = test_unwind(NULL, (u->flags & UWM_REGS) ? regs : NULL, 141 + (u->flags & UWM_SP) ? u->sp : 0); 142 + return 0; 143 + } 144 + #endif 145 + 146 + /* This function may or may not appear in the backtrace. */ 147 + static noinline int unwindme_func4(struct unwindme *u) 148 + { 149 + if (!(u->flags & UWM_CALLER)) 150 + u->sp = current_frame_address(); 151 + if (u->flags & UWM_THREAD) { 152 + complete(&u->task_ready); 153 + wait_event(u->task_wq, kthread_should_park()); 154 + kthread_parkme(); 155 + return 0; 156 + #ifdef CONFIG_KPROBES 157 + } else if (u->flags & UWM_PGM) { 158 + struct kprobe kp; 159 + int ret; 160 + 161 + unwindme = u; 162 + memset(&kp, 0, sizeof(kp)); 163 + kp.symbol_name = "do_report_trap"; 164 + kp.pre_handler = pgm_pre_handler; 165 + ret = register_kprobe(&kp); 166 + if (ret < 0) { 167 + pr_err("register_kprobe failed %d\n", ret); 168 + return -EINVAL; 169 + } 170 + 171 + /* 172 + * trigger specification exception 173 + */ 174 + asm volatile( 175 + " mvcl %%r1,%%r1\n" 176 + "0: nopr %%r7\n" 177 + EX_TABLE(0b, 0b) 178 + :); 179 + 180 + unregister_kprobe(&kp); 181 + unwindme = NULL; 182 + return u->ret; 183 + #endif 184 + } else { 185 + struct pt_regs regs; 186 + 187 + memset(&regs, 0, sizeof(regs)); 188 + regs.psw.addr = get_psw_addr(); 189 + regs.gprs[15] = current_stack_pointer(); 190 + return test_unwind(NULL, 191 + (u->flags & UWM_REGS) ? &regs : NULL, 192 + (u->flags & UWM_SP) ? u->sp : 0); 193 + } 194 + } 195 + 196 + /* This function may or may not appear in the backtrace. */ 197 + static noinline int unwindme_func3(struct unwindme *u) 198 + { 199 + u->sp = current_frame_address(); 200 + return unwindme_func4(u); 201 + } 202 + 203 + /* This function must appear in the backtrace. */ 204 + static noinline int unwindme_func2(struct unwindme *u) 205 + { 206 + int rc; 207 + 208 + if (u->flags & UWM_SWITCH_STACK) { 209 + preempt_disable(); 210 + rc = CALL_ON_STACK(unwindme_func3, S390_lowcore.nodat_stack, 1, u); 211 + preempt_enable(); 212 + return rc; 213 + } else { 214 + return unwindme_func3(u); 215 + } 216 + } 217 + 218 + /* This function must follow unwindme_func2 in the backtrace. */ 219 + static noinline int unwindme_func1(void *u) 220 + { 221 + return unwindme_func2((struct unwindme *)u); 222 + } 223 + 224 + static void unwindme_irq_handler(struct ext_code ext_code, 225 + unsigned int param32, 226 + unsigned long param64) 227 + { 228 + struct unwindme *u = READ_ONCE(unwindme); 229 + 230 + if (u && u->task == current) { 231 + unwindme = NULL; 232 + u->task = NULL; 233 + u->ret = unwindme_func1(u); 234 + } 235 + } 236 + 237 + static int test_unwind_irq(struct unwindme *u) 238 + { 239 + preempt_disable(); 240 + if (register_external_irq(EXT_IRQ_CLK_COMP, unwindme_irq_handler)) { 241 + pr_info("Couldn't reqister external interrupt handler"); 242 + return -1; 243 + } 244 + u->task = current; 245 + unwindme = u; 246 + udelay(1); 247 + unregister_external_irq(EXT_IRQ_CLK_COMP, unwindme_irq_handler); 248 + preempt_enable(); 249 + return u->ret; 250 + } 251 + 252 + /* Spawns a task and passes it to test_unwind(). */ 253 + static int test_unwind_task(struct unwindme *u) 254 + { 255 + struct task_struct *task; 256 + int ret; 257 + 258 + /* Initialize thread-related fields. */ 259 + init_completion(&u->task_ready); 260 + init_waitqueue_head(&u->task_wq); 261 + 262 + /* 263 + * Start the task and wait until it reaches unwindme_func4() and sleeps 264 + * in (task_ready, unwind_done] range. 265 + */ 266 + task = kthread_run(unwindme_func1, u, "%s", __func__); 267 + if (IS_ERR(task)) { 268 + pr_err("kthread_run() failed\n"); 269 + return PTR_ERR(task); 270 + } 271 + /* 272 + * Make sure task reaches unwindme_func4 before parking it, 273 + * we might park it before kthread function has been executed otherwise 274 + */ 275 + wait_for_completion(&u->task_ready); 276 + kthread_park(task); 277 + /* Unwind. */ 278 + ret = test_unwind(task, NULL, (u->flags & UWM_SP) ? u->sp : 0); 279 + kthread_stop(task); 280 + return ret; 281 + } 282 + 283 + static int test_unwind_flags(int flags) 284 + { 285 + struct unwindme u; 286 + 287 + u.flags = flags; 288 + if (u.flags & UWM_THREAD) 289 + return test_unwind_task(&u); 290 + else if (u.flags & UWM_IRQ) 291 + return test_unwind_irq(&u); 292 + else 293 + return unwindme_func1(&u); 294 + } 295 + 296 + static int test_unwind_init(void) 297 + { 298 + int ret = 0; 299 + 300 + #define TEST(flags) \ 301 + do { \ 302 + pr_info("[ RUN ] " #flags "\n"); \ 303 + if (!test_unwind_flags((flags))) { \ 304 + pr_info("[ OK ] " #flags "\n"); \ 305 + } else { \ 306 + pr_err("[ FAILED ] " #flags "\n"); \ 307 + ret = -EINVAL; \ 308 + } \ 309 + } while (0) 310 + 311 + TEST(UWM_DEFAULT); 312 + TEST(UWM_SP); 313 + TEST(UWM_REGS); 314 + TEST(UWM_SWITCH_STACK); 315 + TEST(UWM_SP | UWM_REGS); 316 + TEST(UWM_CALLER | UWM_SP); 317 + TEST(UWM_CALLER | UWM_SP | UWM_REGS); 318 + TEST(UWM_CALLER | UWM_SP | UWM_REGS | UWM_SWITCH_STACK); 319 + TEST(UWM_THREAD); 320 + TEST(UWM_THREAD | UWM_SP); 321 + TEST(UWM_THREAD | UWM_CALLER | UWM_SP); 322 + TEST(UWM_IRQ); 323 + TEST(UWM_IRQ | UWM_SWITCH_STACK); 324 + TEST(UWM_IRQ | UWM_SP); 325 + TEST(UWM_IRQ | UWM_REGS); 326 + TEST(UWM_IRQ | UWM_SP | UWM_REGS); 327 + TEST(UWM_IRQ | UWM_CALLER | UWM_SP); 328 + TEST(UWM_IRQ | UWM_CALLER | UWM_SP | UWM_REGS); 329 + TEST(UWM_IRQ | UWM_CALLER | UWM_SP | UWM_REGS | UWM_SWITCH_STACK); 330 + #ifdef CONFIG_KPROBES 331 + TEST(UWM_PGM); 332 + TEST(UWM_PGM | UWM_SP); 333 + TEST(UWM_PGM | UWM_REGS); 334 + TEST(UWM_PGM | UWM_SP | UWM_REGS); 335 + #endif 336 + #undef TEST 337 + 338 + return ret; 339 + } 340 + 341 + static void test_unwind_exit(void) 342 + { 343 + } 344 + 345 + module_init(test_unwind_init); 346 + module_exit(test_unwind_exit); 347 + MODULE_LICENSE("GPL");
+9 -3
arch/s390/mm/maccess.c
··· 119 119 */ 120 120 int memcpy_real(void *dest, void *src, size_t count) 121 121 { 122 - if (S390_lowcore.nodat_stack != 0) 123 - return CALL_ON_STACK(_memcpy_real, S390_lowcore.nodat_stack, 124 - 3, dest, src, count); 122 + int rc; 123 + 124 + if (S390_lowcore.nodat_stack != 0) { 125 + preempt_disable(); 126 + rc = CALL_ON_STACK(_memcpy_real, S390_lowcore.nodat_stack, 3, 127 + dest, src, count); 128 + preempt_enable(); 129 + return rc; 130 + } 125 131 /* 126 132 * This is a really early memcpy_real call, the stacks are 127 133 * not set up yet. Just call _memcpy_real on the early boot
+5
arch/s390/pci/pci.c
··· 27 27 #include <linux/seq_file.h> 28 28 #include <linux/jump_label.h> 29 29 #include <linux/pci.h> 30 + #include <linux/printk.h> 30 31 31 32 #include <asm/isc.h> 32 33 #include <asm/airq.h> ··· 660 659 spin_lock(&zpci_domain_lock); 661 660 if (test_bit(zdev->domain, zpci_domain)) { 662 661 spin_unlock(&zpci_domain_lock); 662 + pr_err("Adding PCI function %08x failed because domain %04x is already assigned\n", 663 + zdev->fid, zdev->domain); 663 664 return -EEXIST; 664 665 } 665 666 set_bit(zdev->domain, zpci_domain); ··· 673 670 zdev->domain = find_first_zero_bit(zpci_domain, ZPCI_NR_DEVICES); 674 671 if (zdev->domain == ZPCI_NR_DEVICES) { 675 672 spin_unlock(&zpci_domain_lock); 673 + pr_err("Adding PCI function %08x failed because the configured limit of %d is reached\n", 674 + zdev->fid, ZPCI_NR_DEVICES); 676 675 return -ENOSPC; 677 676 } 678 677 set_bit(zdev->domain, zpci_domain);
+2
drivers/s390/crypto/zcrypt_error.h
··· 61 61 #define REP82_ERROR_EVEN_MOD_IN_OPND 0x85 62 62 #define REP82_ERROR_RESERVED_FIELD 0x88 63 63 #define REP82_ERROR_INVALID_DOMAIN_PENDING 0x8A 64 + #define REP82_ERROR_FILTERED_BY_HYPERVISOR 0x8B 64 65 #define REP82_ERROR_TRANSPORT_FAIL 0x90 65 66 #define REP82_ERROR_PACKET_TRUNCATED 0xA0 66 67 #define REP82_ERROR_ZERO_BUFFER_LEN 0xB0 ··· 92 91 case REP82_ERROR_INVALID_DOMAIN_PRECHECK: 93 92 case REP82_ERROR_INVALID_DOMAIN_PENDING: 94 93 case REP82_ERROR_INVALID_SPECIAL_CMD: 94 + case REP82_ERROR_FILTERED_BY_HYPERVISOR: 95 95 // REP88_ERROR_INVALID_KEY // '82' CEX2A 96 96 // REP88_ERROR_OPERAND // '84' CEX2A 97 97 // REP88_ERROR_OPERAND_EVEN_MOD // '85' CEX2A