Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[S390] ptrace cleanup

Overhaul program event recording and the code dealing with the ptrace
user space interface.

Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>

authored by

Martin Schwidefsky and committed by
Martin Schwidefsky
5e9a2692 da7f51c1

+346 -262
+2 -1
arch/s390/include/asm/processor.h
··· 81 81 mm_segment_t mm_segment; 82 82 unsigned long prot_addr; /* address of protection-excep. */ 83 83 unsigned int trap_no; 84 - per_struct per_info; 84 + struct per_regs per_user; /* User specified PER registers */ 85 + struct per_event per_event; /* Cause of the last PER trap */ 85 86 /* pfault_wait is used to block the process on a pfault event */ 86 87 unsigned long pfault_wait; 87 88 };
+51 -1
arch/s390/include/asm/ptrace.h
··· 331 331 unsigned short ilc; 332 332 unsigned short svcnr; 333 333 }; 334 + 335 + /* 336 + * Program event recording (PER) register set. 337 + */ 338 + struct per_regs { 339 + unsigned long control; /* PER control bits */ 340 + unsigned long start; /* PER starting address */ 341 + unsigned long end; /* PER ending address */ 342 + }; 343 + 344 + /* 345 + * PER event contains information about the cause of the last PER exception. 346 + */ 347 + struct per_event { 348 + unsigned short cause; /* PER code, ATMID and AI */ 349 + unsigned long address; /* PER address */ 350 + unsigned char paid; /* PER access identification */ 351 + }; 352 + 353 + /* 354 + * Simplified per_info structure used to decode the ptrace user space ABI. 355 + */ 356 + struct per_struct_kernel { 357 + unsigned long cr9; /* PER control bits */ 358 + unsigned long cr10; /* PER starting address */ 359 + unsigned long cr11; /* PER ending address */ 360 + unsigned long bits; /* Obsolete software bits */ 361 + unsigned long starting_addr; /* User specified start address */ 362 + unsigned long ending_addr; /* User specified end address */ 363 + unsigned short perc_atmid; /* PER trap ATMID */ 364 + unsigned long address; /* PER trap instruction address */ 365 + unsigned char access_id; /* PER trap access identification */ 366 + }; 367 + 368 + #define PER_EVENT_MASK 0xE9000000UL 369 + 370 + #define PER_EVENT_BRANCH 0x80000000UL 371 + #define PER_EVENT_IFETCH 0x40000000UL 372 + #define PER_EVENT_STORE 0x20000000UL 373 + #define PER_EVENT_STORE_REAL 0x08000000UL 374 + #define PER_EVENT_NULLIFICATION 0x01000000UL 375 + 376 + #define PER_CONTROL_MASK 0x00a00000UL 377 + 378 + #define PER_CONTROL_BRANCH_ADDRESS 0x00800000UL 379 + #define PER_CONTROL_ALTERATION 0x00200000UL 380 + 334 381 #endif 335 382 336 383 /* 337 - * Now for the program event recording (trace) definitions. 384 + * Now for the user space program event recording (trace) definitions. 385 + * The following structures are used only for the ptrace interface, don't 386 + * touch or even look at it if you don't want to modify the user-space 387 + * ptrace interface. In particular stay away from it for in-kernel PER. 338 388 */ 339 389 typedef struct 340 390 {
+2
arch/s390/include/asm/system.h
··· 20 20 struct task_struct; 21 21 22 22 extern struct task_struct *__switch_to(void *, void *); 23 + extern void update_per_regs(struct task_struct *task); 23 24 24 25 static inline void save_fp_regs(s390_fp_regs *fpregs) 25 26 { ··· 94 93 if (next->mm) { \ 95 94 restore_fp_regs(&next->thread.fp_regs); \ 96 95 restore_access_regs(&next->thread.acrs[0]); \ 96 + update_per_regs(next); \ 97 97 } \ 98 98 prev = __switch_to(prev,next); \ 99 99 } while (0)
+5 -3
arch/s390/include/asm/thread_info.h
··· 88 88 #define TIF_SIGPENDING 2 /* signal pending */ 89 89 #define TIF_NEED_RESCHED 3 /* rescheduling necessary */ 90 90 #define TIF_RESTART_SVC 4 /* restart svc with new svc number */ 91 - #define TIF_SINGLE_STEP 6 /* deliver sigtrap on return to user */ 91 + #define TIF_PER_TRAP 6 /* deliver sigtrap on return to user */ 92 92 #define TIF_MCCK_PENDING 7 /* machine check handling is pending */ 93 93 #define TIF_SYSCALL_TRACE 8 /* syscall trace active */ 94 94 #define TIF_SYSCALL_AUDIT 9 /* syscall auditing active */ ··· 99 99 #define TIF_31BIT 17 /* 32bit process */ 100 100 #define TIF_MEMDIE 18 /* is terminating due to OOM killer */ 101 101 #define TIF_RESTORE_SIGMASK 19 /* restore signal mask in do_signal() */ 102 - #define TIF_FREEZE 20 /* thread is freezing for suspend */ 102 + #define TIF_SINGLE_STEP 20 /* This task is single stepped */ 103 + #define TIF_FREEZE 21 /* thread is freezing for suspend */ 103 104 104 105 #define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME) 105 106 #define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK) 106 107 #define _TIF_SIGPENDING (1<<TIF_SIGPENDING) 107 108 #define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) 108 109 #define _TIF_RESTART_SVC (1<<TIF_RESTART_SVC) 109 - #define _TIF_SINGLE_STEP (1<<TIF_SINGLE_STEP) 110 + #define _TIF_PER_TRAP (1<<TIF_PER_TRAP) 110 111 #define _TIF_MCCK_PENDING (1<<TIF_MCCK_PENDING) 111 112 #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) 112 113 #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT) ··· 115 114 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT) 116 115 #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) 117 116 #define _TIF_31BIT (1<<TIF_31BIT) 117 + #define _TIF_SINGLE_STEP (1<<TIF_FREEZE) 118 118 #define _TIF_FREEZE (1<<TIF_FREEZE) 119 119 120 120 #endif /* __KERNEL__ */
+8 -6
arch/s390/kernel/asm-offsets.c
··· 23 23 { 24 24 DEFINE(__THREAD_info, offsetof(struct task_struct, stack)); 25 25 DEFINE(__THREAD_ksp, offsetof(struct task_struct, thread.ksp)); 26 - DEFINE(__THREAD_per, offsetof(struct task_struct, thread.per_info)); 27 26 DEFINE(__THREAD_mm_segment, offsetof(struct task_struct, thread.mm_segment)); 28 27 BLANK(); 29 28 DEFINE(__TASK_pid, offsetof(struct task_struct, pid)); 30 29 BLANK(); 31 - DEFINE(__PER_atmid, offsetof(per_struct, lowcore.words.perc_atmid)); 32 - DEFINE(__PER_address, offsetof(per_struct, lowcore.words.address)); 33 - DEFINE(__PER_access_id, offsetof(per_struct, lowcore.words.access_id)); 30 + DEFINE(__THREAD_per_cause, 31 + offsetof(struct task_struct, thread.per_event.cause)); 32 + DEFINE(__THREAD_per_address, 33 + offsetof(struct task_struct, thread.per_event.address)); 34 + DEFINE(__THREAD_per_paid, 35 + offsetof(struct task_struct, thread.per_event.paid)); 34 36 BLANK(); 35 37 DEFINE(__TI_task, offsetof(struct thread_info, task)); 36 38 DEFINE(__TI_domain, offsetof(struct thread_info, exec_domain)); ··· 87 85 DEFINE(__LC_PGM_ILC, offsetof(struct _lowcore, pgm_ilc)); 88 86 DEFINE(__LC_PGM_INT_CODE, offsetof(struct _lowcore, pgm_code)); 89 87 DEFINE(__LC_TRANS_EXC_CODE, offsetof(struct _lowcore, trans_exc_code)); 90 - DEFINE(__LC_PER_ATMID, offsetof(struct _lowcore, per_perc_atmid)); 88 + DEFINE(__LC_PER_CAUSE, offsetof(struct _lowcore, per_perc_atmid)); 91 89 DEFINE(__LC_PER_ADDRESS, offsetof(struct _lowcore, per_address)); 92 - DEFINE(__LC_PER_ACCESS_ID, offsetof(struct _lowcore, per_access_id)); 90 + DEFINE(__LC_PER_PAID, offsetof(struct _lowcore, per_access_id)); 93 91 DEFINE(__LC_AR_MODE_ID, offsetof(struct _lowcore, ar_access_id)); 94 92 DEFINE(__LC_SUBCHANNEL_ID, offsetof(struct _lowcore, subchannel_id)); 95 93 DEFINE(__LC_SUBCHANNEL_NR, offsetof(struct _lowcore, subchannel_nr));
+16 -37
arch/s390/kernel/compat_ptrace.h
··· 4 4 #include <asm/ptrace.h> /* needed for NUM_CR_WORDS */ 5 5 #include "compat_linux.h" /* needed for psw_compat_t */ 6 6 7 - typedef struct { 8 - __u32 cr[NUM_CR_WORDS]; 9 - } per_cr_words32; 7 + struct compat_per_struct_kernel { 8 + __u32 cr9; /* PER control bits */ 9 + __u32 cr10; /* PER starting address */ 10 + __u32 cr11; /* PER ending address */ 11 + __u32 bits; /* Obsolete software bits */ 12 + __u32 starting_addr; /* User specified start address */ 13 + __u32 ending_addr; /* User specified end address */ 14 + __u16 perc_atmid; /* PER trap ATMID */ 15 + __u32 address; /* PER trap instruction address */ 16 + __u8 access_id; /* PER trap access identification */ 17 + }; 10 18 11 - typedef struct { 12 - __u16 perc_atmid; /* 0x096 */ 13 - __u32 address; /* 0x098 */ 14 - __u8 access_id; /* 0x0a1 */ 15 - } per_lowcore_words32; 16 - 17 - typedef struct { 18 - union { 19 - per_cr_words32 words; 20 - } control_regs; 21 - /* 22 - * Use these flags instead of setting em_instruction_fetch 23 - * directly they are used so that single stepping can be 24 - * switched on & off while not affecting other tracing 25 - */ 26 - unsigned single_step : 1; 27 - unsigned instruction_fetch : 1; 28 - unsigned : 30; 29 - /* 30 - * These addresses are copied into cr10 & cr11 if single 31 - * stepping is switched off 32 - */ 33 - __u32 starting_addr; 34 - __u32 ending_addr; 35 - union { 36 - per_lowcore_words32 words; 37 - } lowcore; 38 - } per_struct32; 39 - 40 - struct user_regs_struct32 19 + struct compat_user_regs_struct 41 20 { 42 21 psw_compat_t psw; 43 22 u32 gprs[NUM_GPRS]; ··· 29 50 * itself as there is no "official" ptrace interface for hardware 30 51 * watchpoints. This is the way intel does it. 31 52 */ 32 - per_struct32 per_info; 53 + struct compat_per_struct_kernel per_info; 33 54 u32 ieee_instruction_pointer; /* obsolete, always 0 */ 34 55 }; 35 56 36 - struct user32 { 57 + struct compat_user { 37 58 /* We start with the registers, to mimic the way that "memory" 38 59 is returned from the ptrace(3,...) function. */ 39 - struct user_regs_struct32 regs; /* Where the registers are actually stored */ 60 + struct compat_user_regs_struct regs; 40 61 /* The rest of this junk is to help gdb figure out what goes where */ 41 62 u32 u_tsize; /* Text segment size (pages). */ 42 63 u32 u_dsize; /* Data segment size (pages). */ ··· 58 79 __u32 len; 59 80 __u32 kernel_addr; 60 81 __u32 process_addr; 61 - } ptrace_area_emu31; 82 + } compat_ptrace_area; 62 83 63 84 #endif /* _PTRACE32_H */
+30 -40
arch/s390/kernel/entry.S
··· 48 48 SP_SIZE = STACK_FRAME_OVERHEAD + __PT_SIZE 49 49 50 50 _TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ 51 - _TIF_MCCK_PENDING | _TIF_RESTART_SVC | _TIF_SINGLE_STEP ) 51 + _TIF_MCCK_PENDING | _TIF_RESTART_SVC | _TIF_PER_TRAP ) 52 52 _TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ 53 53 _TIF_MCCK_PENDING) 54 54 _TIF_SYSCALL = (_TIF_SYSCALL_TRACE>>8 | _TIF_SYSCALL_AUDIT>>8 | \ ··· 200 200 .globl __switch_to 201 201 __switch_to: 202 202 basr %r1,0 203 - __switch_to_base: 204 - tm __THREAD_per(%r3),0xe8 # new process is using per ? 205 - bz __switch_to_noper-__switch_to_base(%r1) # if not we're fine 206 - stctl %c9,%c11,__SF_EMPTY(%r15) # We are using per stuff 207 - clc __THREAD_per(12,%r3),__SF_EMPTY(%r15) 208 - be __switch_to_noper-__switch_to_base(%r1) # we got away w/o bashing TLB's 209 - lctl %c9,%c11,__THREAD_per(%r3) # Nope we didn't 210 - __switch_to_noper: 211 - l %r4,__THREAD_info(%r2) # get thread_info of prev 203 + 0: l %r4,__THREAD_info(%r2) # get thread_info of prev 204 + l %r5,__THREAD_info(%r3) # get thread_info of next 212 205 tm __TI_flags+3(%r4),_TIF_MCCK_PENDING # machine check pending? 213 - bz __switch_to_no_mcck-__switch_to_base(%r1) 214 - ni __TI_flags+3(%r4),255-_TIF_MCCK_PENDING # clear flag in prev 215 - l %r4,__THREAD_info(%r3) # get thread_info of next 216 - oi __TI_flags+3(%r4),_TIF_MCCK_PENDING # set it in next 217 - __switch_to_no_mcck: 218 - stm %r6,%r15,__SF_GPRS(%r15)# store __switch_to registers of prev task 219 - st %r15,__THREAD_ksp(%r2) # store kernel stack to prev->tss.ksp 220 - l %r15,__THREAD_ksp(%r3) # load kernel stack from next->tss.ksp 221 - lm %r6,%r15,__SF_GPRS(%r15)# load __switch_to registers of next task 222 - st %r3,__LC_CURRENT # __LC_CURRENT = current task struct 223 - lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4 224 - l %r3,__THREAD_info(%r3) # load thread_info from task struct 225 - st %r3,__LC_THREAD_INFO 226 - ahi %r3,STACK_SIZE 227 - st %r3,__LC_KERNEL_STACK # __LC_KERNEL_STACK = new kernel stack 206 + bz 1f-0b(%r1) 207 + ni __TI_flags+3(%r4),255-_TIF_MCCK_PENDING # clear flag in prev 208 + oi __TI_flags+3(%r5),_TIF_MCCK_PENDING # set it in next 209 + 1: stm %r6,%r15,__SF_GPRS(%r15) # store gprs of prev task 210 + st %r15,__THREAD_ksp(%r2) # store kernel stack of prev 211 + l %r15,__THREAD_ksp(%r3) # load kernel stack of next 212 + lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4 213 + lm %r6,%r15,__SF_GPRS(%r15) # load gprs of next task 214 + st %r3,__LC_CURRENT # store task struct of next 215 + st %r5,__LC_THREAD_INFO # store thread info of next 216 + ahi %r5,STACK_SIZE # end of kernel stack of next 217 + st %r5,__LC_KERNEL_STACK # store end of kernel stack 228 218 br %r14 229 219 230 220 __critical_start: ··· 287 297 bo BASED(sysc_notify_resume) 288 298 tm __TI_flags+3(%r12),_TIF_RESTART_SVC 289 299 bo BASED(sysc_restart) 290 - tm __TI_flags+3(%r12),_TIF_SINGLE_STEP 300 + tm __TI_flags+3(%r12),_TIF_PER_TRAP 291 301 bo BASED(sysc_singlestep) 292 302 b BASED(sysc_return) # beware of critical section cleanup 293 303 ··· 311 321 # _TIF_SIGPENDING is set, call do_signal 312 322 # 313 323 sysc_sigpending: 314 - ni __TI_flags+3(%r12),255-_TIF_SINGLE_STEP # clear TIF_SINGLE_STEP 324 + ni __TI_flags+3(%r12),255-_TIF_PER_TRAP # clear TIF_PER_TRAP 315 325 la %r2,SP_PTREGS(%r15) # load pt_regs 316 326 l %r1,BASED(.Ldo_signal) 317 327 basr %r14,%r1 # call do_signal 318 328 tm __TI_flags+3(%r12),_TIF_RESTART_SVC 319 329 bo BASED(sysc_restart) 320 - tm __TI_flags+3(%r12),_TIF_SINGLE_STEP 330 + tm __TI_flags+3(%r12),_TIF_PER_TRAP 321 331 bo BASED(sysc_singlestep) 322 332 b BASED(sysc_return) 323 333 ··· 343 353 b BASED(sysc_nr_ok) # restart svc 344 354 345 355 # 346 - # _TIF_SINGLE_STEP is set, call do_single_step 356 + # _TIF_PER_TRAP is set, call do_per_trap 347 357 # 348 358 sysc_singlestep: 349 - ni __TI_flags+3(%r12),255-_TIF_SINGLE_STEP # clear TIF_SINGLE_STEP 359 + ni __TI_flags+3(%r12),255-_TIF_PER_TRAP # clear TIF_PER_TRAP 350 360 xc SP_SVCNR(2,%r15),SP_SVCNR(%r15) # clear svc number 351 361 la %r2,SP_PTREGS(%r15) # address of register-save area 352 362 l %r1,BASED(.Lhandle_per) # load adr. of per handler 353 363 la %r14,BASED(sysc_return) # load adr. of system return 354 - br %r1 # branch to do_single_step 364 + br %r1 # branch to do_per_trap 355 365 356 366 # 357 367 # call tracehook_report_syscall_entry/tracehook_report_syscall_exit before ··· 510 520 l %r1,__TI_task(%r12) 511 521 tm SP_PSW+1(%r15),0x01 # kernel per event ? 512 522 bz BASED(kernel_per) 513 - mvc __THREAD_per+__PER_atmid(2,%r1),__LC_PER_ATMID 514 - mvc __THREAD_per+__PER_address(4,%r1),__LC_PER_ADDRESS 515 - mvc __THREAD_per+__PER_access_id(1,%r1),__LC_PER_ACCESS_ID 516 - oi __TI_flags+3(%r12),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP 523 + mvc __THREAD_per_cause(2,%r1),__LC_PER_CAUSE 524 + mvc __THREAD_per_address(4,%r1),__LC_PER_ADDRESS 525 + mvc __THREAD_per_paid(1,%r1),__LC_PER_PAID 526 + oi __TI_flags+3(%r12),_TIF_PER_TRAP # set TIF_PER_TRAP 517 527 l %r3,__LC_PGM_ILC # load program interruption code 518 528 l %r4,__LC_TRANS_EXC_CODE 519 529 REENABLE_IRQS ··· 541 551 UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER 542 552 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER 543 553 l %r8,__TI_task(%r12) 544 - mvc __THREAD_per+__PER_atmid(2,%r8),__LC_PER_ATMID 545 - mvc __THREAD_per+__PER_address(4,%r8),__LC_PER_ADDRESS 546 - mvc __THREAD_per+__PER_access_id(1,%r8),__LC_PER_ACCESS_ID 547 - oi __TI_flags+3(%r12),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP 554 + mvc __THREAD_per_cause(2,%r8),__LC_PER_CAUSE 555 + mvc __THREAD_per_address(4,%r8),__LC_PER_ADDRESS 556 + mvc __THREAD_per_paid(1,%r8),__LC_PER_PAID 557 + oi __TI_flags+3(%r12),_TIF_PER_TRAP # set TIF_PER_TRAP 548 558 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts 549 559 lm %r2,%r6,SP_R2(%r15) # load svc arguments 550 560 b BASED(sysc_do_svc) ··· 1046 1056 .Ldo_signal: .long do_signal 1047 1057 .Ldo_notify_resume: 1048 1058 .long do_notify_resume 1049 - .Lhandle_per: .long do_single_step 1059 + .Lhandle_per: .long do_per_trap 1050 1060 .Ldo_execve: .long do_execve 1051 1061 .Lexecve_tail: .long execve_tail 1052 1062 .Ljump_table: .long pgm_check_table
+1 -1
arch/s390/kernel/entry.h
··· 12 12 13 13 extern int sysctl_userprocess_debug; 14 14 15 - void do_single_step(struct pt_regs *regs); 15 + void do_per_trap(struct pt_regs *regs); 16 16 void syscall_trace(struct pt_regs *regs, int entryexit); 17 17 void kernel_stack_overflow(struct pt_regs * regs); 18 18 void do_signal(struct pt_regs *regs);
+30 -39
arch/s390/kernel/entry64.S
··· 51 51 STACK_SIZE = 1 << STACK_SHIFT 52 52 53 53 _TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ 54 - _TIF_MCCK_PENDING | _TIF_RESTART_SVC | _TIF_SINGLE_STEP ) 54 + _TIF_MCCK_PENDING | _TIF_RESTART_SVC | _TIF_PER_TRAP ) 55 55 _TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ 56 56 _TIF_MCCK_PENDING) 57 57 _TIF_SYSCALL = (_TIF_SYSCALL_TRACE>>8 | _TIF_SYSCALL_AUDIT>>8 | \ ··· 208 208 */ 209 209 .globl __switch_to 210 210 __switch_to: 211 - tm __THREAD_per+4(%r3),0xe8 # is the new process using per ? 212 - jz __switch_to_noper # if not we're fine 213 - stctg %c9,%c11,__SF_EMPTY(%r15)# We are using per stuff 214 - clc __THREAD_per(24,%r3),__SF_EMPTY(%r15) 215 - je __switch_to_noper # we got away without bashing TLB's 216 - lctlg %c9,%c11,__THREAD_per(%r3) # Nope we didn't 217 - __switch_to_noper: 218 - lg %r4,__THREAD_info(%r2) # get thread_info of prev 211 + lg %r4,__THREAD_info(%r2) # get thread_info of prev 212 + lg %r5,__THREAD_info(%r3) # get thread_info of next 219 213 tm __TI_flags+7(%r4),_TIF_MCCK_PENDING # machine check pending? 220 - jz __switch_to_no_mcck 221 - ni __TI_flags+7(%r4),255-_TIF_MCCK_PENDING # clear flag in prev 222 - lg %r4,__THREAD_info(%r3) # get thread_info of next 223 - oi __TI_flags+7(%r4),_TIF_MCCK_PENDING # set it in next 224 - __switch_to_no_mcck: 225 - stmg %r6,%r15,__SF_GPRS(%r15)# store __switch_to registers of prev task 226 - stg %r15,__THREAD_ksp(%r2) # store kernel stack to prev->tss.ksp 227 - lg %r15,__THREAD_ksp(%r3) # load kernel stack from next->tss.ksp 228 - lmg %r6,%r15,__SF_GPRS(%r15)# load __switch_to registers of next task 229 - stg %r3,__LC_CURRENT # __LC_CURRENT = current task struct 230 - lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4 231 - lg %r3,__THREAD_info(%r3) # load thread_info from task struct 232 - stg %r3,__LC_THREAD_INFO 233 - aghi %r3,STACK_SIZE 234 - stg %r3,__LC_KERNEL_STACK # __LC_KERNEL_STACK = new kernel stack 214 + jz 0f 215 + ni __TI_flags+7(%r4),255-_TIF_MCCK_PENDING # clear flag in prev 216 + oi __TI_flags+7(%r5),_TIF_MCCK_PENDING # set it in next 217 + 0: stmg %r6,%r15,__SF_GPRS(%r15) # store gprs of prev task 218 + stg %r15,__THREAD_ksp(%r2) # store kernel stack of prev 219 + lg %r15,__THREAD_ksp(%r3) # load kernel stack of next 220 + lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4 221 + lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task 222 + stg %r3,__LC_CURRENT # store task struct of next 223 + stg %r5,__LC_THREAD_INFO # store thread info of next 224 + aghi %r5,STACK_SIZE # end of kernel stack of next 225 + stg %r5,__LC_KERNEL_STACK # store end of kernel stack 235 226 br %r14 236 227 237 228 __critical_start: ··· 302 311 jo sysc_notify_resume 303 312 tm __TI_flags+7(%r12),_TIF_RESTART_SVC 304 313 jo sysc_restart 305 - tm __TI_flags+7(%r12),_TIF_SINGLE_STEP 314 + tm __TI_flags+7(%r12),_TIF_PER_TRAP 306 315 jo sysc_singlestep 307 316 j sysc_return # beware of critical section cleanup 308 317 ··· 324 333 # _TIF_SIGPENDING is set, call do_signal 325 334 # 326 335 sysc_sigpending: 327 - ni __TI_flags+7(%r12),255-_TIF_SINGLE_STEP # clear TIF_SINGLE_STEP 336 + ni __TI_flags+7(%r12),255-_TIF_PER_TRAP # clear TIF_PER_TRAP 328 337 la %r2,SP_PTREGS(%r15) # load pt_regs 329 338 brasl %r14,do_signal # call do_signal 330 339 tm __TI_flags+7(%r12),_TIF_RESTART_SVC 331 340 jo sysc_restart 332 - tm __TI_flags+7(%r12),_TIF_SINGLE_STEP 341 + tm __TI_flags+7(%r12),_TIF_PER_TRAP 333 342 jo sysc_singlestep 334 343 j sysc_return 335 344 ··· 354 363 j sysc_nr_ok # restart svc 355 364 356 365 # 357 - # _TIF_SINGLE_STEP is set, call do_single_step 366 + # _TIF_PER_TRAP is set, call do_per_trap 358 367 # 359 368 sysc_singlestep: 360 - ni __TI_flags+7(%r12),255-_TIF_SINGLE_STEP # clear TIF_SINGLE_STEP 369 + ni __TI_flags+7(%r12),255-_TIF_PER_TRAP # clear TIF_PER_TRAP 361 370 xc SP_SVCNR(2,%r15),SP_SVCNR(%r15) # clear svc number 362 371 la %r2,SP_PTREGS(%r15) # address of register-save area 363 372 larl %r14,sysc_return # load adr. of system return 364 - jg do_single_step # branch to do_sigtrap 373 + jg do_per_trap 365 374 366 375 # 367 376 # call tracehook_report_syscall_entry/tracehook_report_syscall_exit before ··· 517 526 lg %r1,__TI_task(%r12) 518 527 tm SP_PSW+1(%r15),0x01 # kernel per event ? 519 528 jz kernel_per 520 - mvc __THREAD_per+__PER_atmid(2,%r1),__LC_PER_ATMID 521 - mvc __THREAD_per+__PER_address(8,%r1),__LC_PER_ADDRESS 522 - mvc __THREAD_per+__PER_access_id(1,%r1),__LC_PER_ACCESS_ID 523 - oi __TI_flags+7(%r12),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP 529 + mvc __THREAD_per_cause(2,%r1),__LC_PER_CAUSE 530 + mvc __THREAD_per_address(8,%r1),__LC_PER_ADDRESS 531 + mvc __THREAD_per_paid(1,%r1),__LC_PER_PAID 532 + oi __TI_flags+7(%r12),_TIF_PER_TRAP # set TIF_PER_TRAP 524 533 lgf %r3,__LC_PGM_ILC # load program interruption code 525 534 lg %r4,__LC_TRANS_EXC_CODE 526 535 REENABLE_IRQS ··· 549 558 mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER 550 559 LAST_BREAK 551 560 lg %r8,__TI_task(%r12) 552 - mvc __THREAD_per+__PER_atmid(2,%r8),__LC_PER_ATMID 553 - mvc __THREAD_per+__PER_address(8,%r8),__LC_PER_ADDRESS 554 - mvc __THREAD_per+__PER_access_id(1,%r8),__LC_PER_ACCESS_ID 555 - oi __TI_flags+7(%r12),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP 561 + mvc __THREAD_per_cause(2,%r8),__LC_PER_CAUSE 562 + mvc __THREAD_per_address(8,%r8),__LC_PER_ADDRESS 563 + mvc __THREAD_per_paid(1,%r8),__LC_PER_PAID 564 + oi __TI_flags+7(%r12),_TIF_PER_TRAP # set TIF_PER_TRAP 556 565 stosm __SF_EMPTY(%r15),0x03 # reenable interrupts 557 566 lmg %r2,%r6,SP_R2(%r15) # load svc arguments 558 567 j sysc_do_svc ··· 564 573 REENABLE_IRQS 565 574 xc SP_SVCNR(2,%r15),SP_SVCNR(%r15) # clear svc number 566 575 la %r2,SP_PTREGS(%r15) # address of register-save area 567 - brasl %r14,do_single_step 576 + brasl %r14,do_per_trap 568 577 j pgm_exit 569 578 570 579 /*
+6 -7
arch/s390/kernel/kprobes.c
··· 175 175 struct pt_regs *regs, 176 176 unsigned long ip) 177 177 { 178 - per_cr_bits kprobe_per_regs[1]; 178 + struct per_regs per_kprobe; 179 179 180 - /* Set up the per control reg info, will pass to lctl */ 181 - memset(kprobe_per_regs, 0, sizeof(per_cr_bits)); 182 - kprobe_per_regs[0].em_instruction_fetch = 1; 183 - kprobe_per_regs[0].starting_addr = ip; 184 - kprobe_per_regs[0].ending_addr = ip; 180 + /* Set up the PER control registers %cr9-%cr11 */ 181 + per_kprobe.control = PER_EVENT_IFETCH; 182 + per_kprobe.start = ip; 183 + per_kprobe.end = ip; 185 184 186 185 /* Save control regs and psw mask */ 187 186 __ctl_store(kcb->kprobe_saved_ctl, 9, 11); ··· 188 189 (PSW_MASK_PER | PSW_MASK_IO | PSW_MASK_EXT); 189 190 190 191 /* Set PER control regs, turns on single step for the given address */ 191 - __ctl_load(kprobe_per_regs, 9, 11); 192 + __ctl_load(per_kprobe, 9, 11); 192 193 regs->psw.mask |= PSW_MASK_PER; 193 194 regs->psw.mask &= ~(PSW_MASK_IO | PSW_MASK_EXT); 194 195 regs->psw.addr = ip | PSW_ADDR_AMODE;
+3 -1
arch/s390/kernel/process.c
··· 213 213 /* start new process with ar4 pointing to the correct address space */ 214 214 p->thread.mm_segment = get_fs(); 215 215 /* Don't copy debug registers */ 216 - memset(&p->thread.per_info, 0, sizeof(p->thread.per_info)); 216 + memset(&p->thread.per_user, 0, sizeof(p->thread.per_user)); 217 + memset(&p->thread.per_event, 0, sizeof(p->thread.per_event)); 217 218 clear_tsk_thread_flag(p, TIF_SINGLE_STEP); 219 + clear_tsk_thread_flag(p, TIF_PER_TRAP); 218 220 /* Initialize per thread user and system timer values */ 219 221 ti = task_thread_info(p); 220 222 ti->user_timer = 0;
+186 -118
arch/s390/kernel/ptrace.c
··· 1 1 /* 2 - * arch/s390/kernel/ptrace.c 2 + * Ptrace user space interface. 3 3 * 4 - * S390 version 5 - * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation 6 - * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), 4 + * Copyright IBM Corp. 1999,2010 5 + * Author(s): Denis Joseph Barrow 7 6 * Martin Schwidefsky (schwidefsky@de.ibm.com) 8 - * 9 - * Based on PowerPC version 10 - * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) 11 - * 12 - * Derived from "arch/m68k/kernel/ptrace.c" 13 - * Copyright (C) 1994 by Hamish Macdonald 14 - * Taken from linux/kernel/ptrace.c and modified for M680x0. 15 - * linux/kernel/ptrace.c is by Ross Biro 1/23/92, edited by Linus Torvalds 16 - * 17 - * Modified by Cort Dougan (cort@cs.nmt.edu) 18 - * 19 - * 20 - * This file is subject to the terms and conditions of the GNU General 21 - * Public License. See the file README.legal in the main directory of 22 - * this archive for more details. 23 7 */ 24 8 25 9 #include <linux/kernel.h> ··· 45 61 REGSET_GENERAL_EXTENDED, 46 62 }; 47 63 48 - static void 49 - FixPerRegisters(struct task_struct *task) 64 + void update_per_regs(struct task_struct *task) 50 65 { 51 - struct pt_regs *regs; 52 - per_struct *per_info; 53 - per_cr_words cr_words; 66 + static const struct per_regs per_single_step = { 67 + .control = PER_EVENT_IFETCH, 68 + .start = 0, 69 + .end = PSW_ADDR_INSN, 70 + }; 71 + struct pt_regs *regs = task_pt_regs(task); 72 + struct thread_struct *thread = &task->thread; 73 + const struct per_regs *new; 74 + struct per_regs old; 54 75 55 - regs = task_pt_regs(task); 56 - per_info = (per_struct *) &task->thread.per_info; 57 - per_info->control_regs.bits.em_instruction_fetch = 58 - per_info->single_step | per_info->instruction_fetch; 59 - 60 - if (per_info->single_step) { 61 - per_info->control_regs.bits.starting_addr = 0; 62 - #ifdef CONFIG_COMPAT 63 - if (is_compat_task()) 64 - per_info->control_regs.bits.ending_addr = 0x7fffffffUL; 65 - else 66 - #endif 67 - per_info->control_regs.bits.ending_addr = PSW_ADDR_INSN; 68 - } else { 69 - per_info->control_regs.bits.starting_addr = 70 - per_info->starting_addr; 71 - per_info->control_regs.bits.ending_addr = 72 - per_info->ending_addr; 73 - } 74 - /* 75 - * if any of the control reg tracing bits are on 76 - * we switch on per in the psw 77 - */ 78 - if (per_info->control_regs.words.cr[0] & PER_EM_MASK) 79 - regs->psw.mask |= PSW_MASK_PER; 80 - else 76 + /* TIF_SINGLE_STEP overrides the user specified PER registers. */ 77 + new = test_tsk_thread_flag(task, TIF_SINGLE_STEP) ? 78 + &per_single_step : &thread->per_user; 79 + 80 + /* Take care of the PER enablement bit in the PSW. */ 81 + if (!(new->control & PER_EVENT_MASK)) { 81 82 regs->psw.mask &= ~PSW_MASK_PER; 82 - 83 - if (per_info->control_regs.bits.em_storage_alteration) 84 - per_info->control_regs.bits.storage_alt_space_ctl = 1; 85 - else 86 - per_info->control_regs.bits.storage_alt_space_ctl = 0; 87 - 88 - if (task == current) { 89 - __ctl_store(cr_words, 9, 11); 90 - if (memcmp(&cr_words, &per_info->control_regs.words, 91 - sizeof(cr_words)) != 0) 92 - __ctl_load(per_info->control_regs.words, 9, 11); 83 + return; 93 84 } 85 + regs->psw.mask |= PSW_MASK_PER; 86 + __ctl_store(old, 9, 11); 87 + if (memcmp(new, &old, sizeof(struct per_regs)) != 0) 88 + __ctl_load(*new, 9, 11); 94 89 } 95 90 96 91 void user_enable_single_step(struct task_struct *task) 97 92 { 98 - task->thread.per_info.single_step = 1; 99 - FixPerRegisters(task); 93 + set_tsk_thread_flag(task, TIF_SINGLE_STEP); 94 + if (task == current) 95 + update_per_regs(task); 100 96 } 101 97 102 98 void user_disable_single_step(struct task_struct *task) 103 99 { 104 - task->thread.per_info.single_step = 0; 105 - FixPerRegisters(task); 100 + clear_tsk_thread_flag(task, TIF_SINGLE_STEP); 101 + if (task == current) 102 + update_per_regs(task); 106 103 } 107 104 108 105 /* 109 106 * Called by kernel/ptrace.c when detaching.. 110 107 * 111 - * Make sure single step bits etc are not set. 108 + * Clear all debugging related fields. 112 109 */ 113 - void 114 - ptrace_disable(struct task_struct *child) 110 + void ptrace_disable(struct task_struct *task) 115 111 { 116 - /* make sure the single step bit is not set. */ 117 - user_disable_single_step(child); 112 + memset(&task->thread.per_user, 0, sizeof(task->thread.per_user)); 113 + memset(&task->thread.per_event, 0, sizeof(task->thread.per_event)); 114 + clear_tsk_thread_flag(task, TIF_SINGLE_STEP); 115 + clear_tsk_thread_flag(task, TIF_PER_TRAP); 118 116 } 119 117 120 118 #ifndef CONFIG_64BIT ··· 104 138 #else 105 139 # define __ADDR_MASK 7 106 140 #endif 141 + 142 + static inline unsigned long __peek_user_per(struct task_struct *child, 143 + addr_t addr) 144 + { 145 + struct per_struct_kernel *dummy = NULL; 146 + 147 + if (addr == (addr_t) &dummy->cr9) 148 + /* Control bits of the active per set. */ 149 + return test_thread_flag(TIF_SINGLE_STEP) ? 150 + PER_EVENT_IFETCH : child->thread.per_user.control; 151 + else if (addr == (addr_t) &dummy->cr10) 152 + /* Start address of the active per set. */ 153 + return test_thread_flag(TIF_SINGLE_STEP) ? 154 + 0 : child->thread.per_user.start; 155 + else if (addr == (addr_t) &dummy->cr11) 156 + /* End address of the active per set. */ 157 + return test_thread_flag(TIF_SINGLE_STEP) ? 158 + PSW_ADDR_INSN : child->thread.per_user.end; 159 + else if (addr == (addr_t) &dummy->bits) 160 + /* Single-step bit. */ 161 + return test_thread_flag(TIF_SINGLE_STEP) ? 162 + (1UL << (BITS_PER_LONG - 1)) : 0; 163 + else if (addr == (addr_t) &dummy->starting_addr) 164 + /* Start address of the user specified per set. */ 165 + return child->thread.per_user.start; 166 + else if (addr == (addr_t) &dummy->ending_addr) 167 + /* End address of the user specified per set. */ 168 + return child->thread.per_user.end; 169 + else if (addr == (addr_t) &dummy->perc_atmid) 170 + /* PER code, ATMID and AI of the last PER trap */ 171 + return (unsigned long) 172 + child->thread.per_event.cause << (BITS_PER_LONG - 16); 173 + else if (addr == (addr_t) &dummy->address) 174 + /* Address of the last PER trap */ 175 + return child->thread.per_event.address; 176 + else if (addr == (addr_t) &dummy->access_id) 177 + /* Access id of the last PER trap */ 178 + return (unsigned long) 179 + child->thread.per_event.paid << (BITS_PER_LONG - 8); 180 + return 0; 181 + } 107 182 108 183 /* 109 184 * Read the word at offset addr from the user area of a process. The ··· 211 204 212 205 } else if (addr < (addr_t) (&dummy->regs.per_info + 1)) { 213 206 /* 214 - * per_info is found in the thread structure 207 + * Handle access to the per_info structure. 215 208 */ 216 - offset = addr - (addr_t) &dummy->regs.per_info; 217 - tmp = *(addr_t *)((addr_t) &child->thread.per_info + offset); 209 + addr -= (addr_t) &dummy->regs.per_info; 210 + tmp = __peek_user_per(child, addr); 218 211 219 212 } else 220 213 tmp = 0; ··· 242 235 243 236 tmp = __peek_user(child, addr); 244 237 return put_user(tmp, (addr_t __user *) data); 238 + } 239 + 240 + static inline void __poke_user_per(struct task_struct *child, 241 + addr_t addr, addr_t data) 242 + { 243 + struct per_struct_kernel *dummy = NULL; 244 + 245 + /* 246 + * There are only three fields in the per_info struct that the 247 + * debugger user can write to. 248 + * 1) cr9: the debugger wants to set a new PER event mask 249 + * 2) starting_addr: the debugger wants to set a new starting 250 + * address to use with the PER event mask. 251 + * 3) ending_addr: the debugger wants to set a new ending 252 + * address to use with the PER event mask. 253 + * The user specified PER event mask and the start and end 254 + * addresses are used only if single stepping is not in effect. 255 + * Writes to any other field in per_info are ignored. 256 + */ 257 + if (addr == (addr_t) &dummy->cr9) 258 + /* PER event mask of the user specified per set. */ 259 + child->thread.per_user.control = 260 + data & (PER_EVENT_MASK | PER_CONTROL_MASK); 261 + else if (addr == (addr_t) &dummy->starting_addr) 262 + /* Starting address of the user specified per set. */ 263 + child->thread.per_user.start = data; 264 + else if (addr == (addr_t) &dummy->ending_addr) 265 + /* Ending address of the user specified per set. */ 266 + child->thread.per_user.end = data; 245 267 } 246 268 247 269 /* ··· 347 311 348 312 } else if (addr < (addr_t) (&dummy->regs.per_info + 1)) { 349 313 /* 350 - * per_info is found in the thread structure 314 + * Handle access to the per_info structure. 351 315 */ 352 - offset = addr - (addr_t) &dummy->regs.per_info; 353 - *(addr_t *)((addr_t) &child->thread.per_info + offset) = data; 316 + addr -= (addr_t) &dummy->regs.per_info; 317 + __poke_user_per(child, addr, data); 354 318 355 319 } 356 320 357 - FixPerRegisters(child); 358 321 return 0; 359 322 } 360 323 361 - static int 362 - poke_user(struct task_struct *child, addr_t addr, addr_t data) 324 + static int poke_user(struct task_struct *child, addr_t addr, addr_t data) 363 325 { 364 326 addr_t mask; 365 327 ··· 444 410 */ 445 411 446 412 /* 413 + * Same as peek_user_per but for a 31 bit program. 414 + */ 415 + static inline __u32 __peek_user_per_compat(struct task_struct *child, 416 + addr_t addr) 417 + { 418 + struct compat_per_struct_kernel *dummy32 = NULL; 419 + 420 + if (addr == (addr_t) &dummy32->cr9) 421 + /* Control bits of the active per set. */ 422 + return (__u32) test_thread_flag(TIF_SINGLE_STEP) ? 423 + PER_EVENT_IFETCH : child->thread.per_user.control; 424 + else if (addr == (addr_t) &dummy32->cr10) 425 + /* Start address of the active per set. */ 426 + return (__u32) test_thread_flag(TIF_SINGLE_STEP) ? 427 + 0 : child->thread.per_user.start; 428 + else if (addr == (addr_t) &dummy32->cr11) 429 + /* End address of the active per set. */ 430 + return test_thread_flag(TIF_SINGLE_STEP) ? 431 + PSW32_ADDR_INSN : child->thread.per_user.end; 432 + else if (addr == (addr_t) &dummy32->bits) 433 + /* Single-step bit. */ 434 + return (__u32) test_thread_flag(TIF_SINGLE_STEP) ? 435 + 0x80000000 : 0; 436 + else if (addr == (addr_t) &dummy32->starting_addr) 437 + /* Start address of the user specified per set. */ 438 + return (__u32) child->thread.per_user.start; 439 + else if (addr == (addr_t) &dummy32->ending_addr) 440 + /* End address of the user specified per set. */ 441 + return (__u32) child->thread.per_user.end; 442 + else if (addr == (addr_t) &dummy32->perc_atmid) 443 + /* PER code, ATMID and AI of the last PER trap */ 444 + return (__u32) child->thread.per_event.cause << 16; 445 + else if (addr == (addr_t) &dummy32->address) 446 + /* Address of the last PER trap */ 447 + return (__u32) child->thread.per_event.address; 448 + else if (addr == (addr_t) &dummy32->access_id) 449 + /* Access id of the last PER trap */ 450 + return (__u32) child->thread.per_event.paid << 24; 451 + return 0; 452 + } 453 + 454 + /* 447 455 * Same as peek_user but for a 31 bit program. 448 456 */ 449 457 static u32 __peek_user_compat(struct task_struct *child, addr_t addr) 450 458 { 451 - struct user32 *dummy32 = NULL; 452 - per_struct32 *dummy_per32 = NULL; 459 + struct compat_user *dummy32 = NULL; 453 460 addr_t offset; 454 461 __u32 tmp; 455 462 ··· 540 465 541 466 } else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) { 542 467 /* 543 - * per_info is found in the thread structure 468 + * Handle access to the per_info structure. 544 469 */ 545 - offset = addr - (addr_t) &dummy32->regs.per_info; 546 - /* This is magic. See per_struct and per_struct32. */ 547 - if ((offset >= (addr_t) &dummy_per32->control_regs && 548 - offset < (addr_t) (&dummy_per32->control_regs + 1)) || 549 - (offset >= (addr_t) &dummy_per32->starting_addr && 550 - offset <= (addr_t) &dummy_per32->ending_addr) || 551 - offset == (addr_t) &dummy_per32->lowcore.words.address) 552 - offset = offset*2 + 4; 553 - else 554 - offset = offset*2; 555 - tmp = *(__u32 *)((addr_t) &child->thread.per_info + offset); 470 + addr -= (addr_t) &dummy32->regs.per_info; 471 + tmp = __peek_user_per_compat(child, addr); 556 472 557 473 } else 558 474 tmp = 0; ··· 564 498 } 565 499 566 500 /* 501 + * Same as poke_user_per but for a 31 bit program. 502 + */ 503 + static inline void __poke_user_per_compat(struct task_struct *child, 504 + addr_t addr, __u32 data) 505 + { 506 + struct compat_per_struct_kernel *dummy32 = NULL; 507 + 508 + if (addr == (addr_t) &dummy32->cr9) 509 + /* PER event mask of the user specified per set. */ 510 + child->thread.per_user.control = 511 + data & (PER_EVENT_MASK | PER_CONTROL_MASK); 512 + else if (addr == (addr_t) &dummy32->starting_addr) 513 + /* Starting address of the user specified per set. */ 514 + child->thread.per_user.start = data; 515 + else if (addr == (addr_t) &dummy32->ending_addr) 516 + /* Ending address of the user specified per set. */ 517 + child->thread.per_user.end = data; 518 + } 519 + 520 + /* 567 521 * Same as poke_user but for a 31 bit program. 568 522 */ 569 523 static int __poke_user_compat(struct task_struct *child, 570 524 addr_t addr, addr_t data) 571 525 { 572 - struct user32 *dummy32 = NULL; 573 - per_struct32 *dummy_per32 = NULL; 526 + struct compat_user *dummy32 = NULL; 574 527 __u32 tmp = (__u32) data; 575 528 addr_t offset; 576 529 ··· 646 561 647 562 } else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) { 648 563 /* 649 - * per_info is found in the thread structure. 564 + * Handle access to the per_info structure. 650 565 */ 651 - offset = addr - (addr_t) &dummy32->regs.per_info; 652 - /* 653 - * This is magic. See per_struct and per_struct32. 654 - * By incident the offsets in per_struct are exactly 655 - * twice the offsets in per_struct32 for all fields. 656 - * The 8 byte fields need special handling though, 657 - * because the second half (bytes 4-7) is needed and 658 - * not the first half. 659 - */ 660 - if ((offset >= (addr_t) &dummy_per32->control_regs && 661 - offset < (addr_t) (&dummy_per32->control_regs + 1)) || 662 - (offset >= (addr_t) &dummy_per32->starting_addr && 663 - offset <= (addr_t) &dummy_per32->ending_addr) || 664 - offset == (addr_t) &dummy_per32->lowcore.words.address) 665 - offset = offset*2 + 4; 666 - else 667 - offset = offset*2; 668 - *(__u32 *)((addr_t) &child->thread.per_info + offset) = tmp; 669 - 566 + addr -= (addr_t) &dummy32->regs.per_info; 567 + __poke_user_per_compat(child, addr, data); 670 568 } 671 569 672 - FixPerRegisters(child); 673 570 return 0; 674 571 } 675 572 676 573 static int poke_user_compat(struct task_struct *child, 677 574 addr_t addr, addr_t data) 678 575 { 679 - if (!is_compat_task() || (addr & 3) || addr > sizeof(struct user32) - 3) 576 + if (!is_compat_task() || (addr & 3) || 577 + addr > sizeof(struct compat_user) - 3) 680 578 return -EIO; 681 579 682 580 return __poke_user_compat(child, addr, data); ··· 670 602 { 671 603 unsigned long addr = caddr; 672 604 unsigned long data = cdata; 673 - ptrace_area_emu31 parea; 605 + compat_ptrace_area parea; 674 606 int copied, ret; 675 607 676 608 switch (request) {
+1 -1
arch/s390/kernel/signal.c
··· 505 505 * Let tracing know that we've done the handler setup. 506 506 */ 507 507 tracehook_signal_handler(signr, &info, &ka, regs, 508 - current->thread.per_info.single_step); 508 + test_thread_flag(TIF_SINGLE_STEP)); 509 509 } 510 510 return; 511 511 }
+2 -4
arch/s390/kernel/traps.c
··· 365 365 ((regs->psw.addr - (pgm_int_code >> 16)) & PSW_ADDR_INSN); 366 366 } 367 367 368 - void __kprobes do_single_step(struct pt_regs *regs) 368 + void __kprobes do_per_trap(struct pt_regs *regs) 369 369 { 370 - if (notify_die(DIE_SSTEP, "sstep", regs, 0, 0, 371 - SIGTRAP) == NOTIFY_STOP){ 370 + if (notify_die(DIE_SSTEP, "sstep", regs, 0, 0, SIGTRAP) == NOTIFY_STOP) 372 371 return; 373 - } 374 372 if (tracehook_consider_fatal_signal(current, SIGTRAP)) 375 373 force_sig(SIGTRAP, current); 376 374 }
+3 -3
arch/s390/mm/fault.c
··· 235 235 rc = __get_user(instruction, (u16 __user *) regs->psw.addr); 236 236 237 237 if (!rc && instruction == 0x0a77) { 238 - clear_tsk_thread_flag(current, TIF_SINGLE_STEP); 238 + clear_tsk_thread_flag(current, TIF_PER_TRAP); 239 239 if (is_compat_task()) 240 240 sys32_sigreturn(); 241 241 else 242 242 sys_sigreturn(); 243 243 } else if (!rc && instruction == 0x0aad) { 244 - clear_tsk_thread_flag(current, TIF_SINGLE_STEP); 244 + clear_tsk_thread_flag(current, TIF_PER_TRAP); 245 245 if (is_compat_task()) 246 246 sys32_rt_sigreturn(); 247 247 else ··· 379 379 * The instruction that caused the program check will 380 380 * be repeated. Don't signal single step via SIGTRAP. 381 381 */ 382 - clear_tsk_thread_flag(tsk, TIF_SINGLE_STEP); 382 + clear_tsk_thread_flag(tsk, TIF_PER_TRAP); 383 383 fault = 0; 384 384 out_up: 385 385 up_read(&mm->mmap_sem);