[IA64] Synchronize kernel RSE to user-space and back

This is base kernel patch for ptrace RSE bug. It's basically a backport
from the utrace RSE patch I sent out several weeks ago. please review.

when a thread is stopped (ptraced), debugger might change thread's user
stack (change memory directly), and we must avoid the RSE stored in
kernel to override user stack (user space's RSE is newer than kernel's
in the case). To workaround the issue, we copy kernel RSE to user RSE
before the task is stopped, so user RSE has updated data. we then copy
user RSE to kernel after the task is resummed from traced stop and
kernel will use the newer RSE to return to user.

Signed-off-by: Shaohua Li <shaohua.li@intel.com>
Signed-off-by: Petr Tesarik <ptesarik@suse.cz>
CC: Roland McGrath <roland@redhat.com>
Signed-off-by: Tony Luck <tony.luck@intel.com>

authored by Petr Tesarik and committed by Tony Luck 3b2ce0b1 5aa92ffd

+97
+6
arch/ia64/kernel/process.c
··· 163 163 if (tsk->thread.pfm_needs_checking) 164 164 return; 165 165 #endif 166 + if (test_ti_thread_flag(task_thread_info(tsk), TIF_RESTORE_RSE)) 167 + return; 166 168 clear_ti_thread_flag(task_thread_info(tsk), TIF_NOTIFY_RESUME); 167 169 } 168 170 ··· 186 184 /* deal with pending signal delivery */ 187 185 if (test_thread_flag(TIF_SIGPENDING)||test_thread_flag(TIF_RESTORE_SIGMASK)) 188 186 ia64_do_signal(scr, in_syscall); 187 + 188 + /* copy user rbs to kernel rbs */ 189 + if (unlikely(test_thread_flag(TIF_RESTORE_RSE))) 190 + ia64_sync_krbs(); 189 191 } 190 192 191 193 static int pal_halt = 1;
+82
arch/ia64/kernel/ptrace.c
··· 547 547 return 0; 548 548 } 549 549 550 + static long 551 + ia64_sync_kernel_rbs (struct task_struct *child, struct switch_stack *sw, 552 + unsigned long user_rbs_start, unsigned long user_rbs_end) 553 + { 554 + unsigned long addr, val; 555 + long ret; 556 + 557 + /* now copy word for word from user rbs to kernel rbs: */ 558 + for (addr = user_rbs_start; addr < user_rbs_end; addr += 8) { 559 + if (access_process_vm(child, addr, &val, sizeof(val), 0) 560 + != sizeof(val)) 561 + return -EIO; 562 + 563 + ret = ia64_poke(child, sw, user_rbs_end, addr, val); 564 + if (ret < 0) 565 + return ret; 566 + } 567 + return 0; 568 + } 569 + 570 + typedef long (*syncfunc_t)(struct task_struct *, struct switch_stack *, 571 + unsigned long, unsigned long); 572 + 573 + static void do_sync_rbs(struct unw_frame_info *info, void *arg) 574 + { 575 + struct pt_regs *pt; 576 + unsigned long urbs_end; 577 + syncfunc_t fn = arg; 578 + 579 + if (unw_unwind_to_user(info) < 0) 580 + return; 581 + pt = task_pt_regs(info->task); 582 + urbs_end = ia64_get_user_rbs_end(info->task, pt, NULL); 583 + 584 + fn(info->task, info->sw, pt->ar_bspstore, urbs_end); 585 + } 586 + 587 + /* 588 + * when a thread is stopped (ptraced), debugger might change thread's user 589 + * stack (change memory directly), and we must avoid the RSE stored in kernel 590 + * to override user stack (user space's RSE is newer than kernel's in the 591 + * case). To workaround the issue, we copy kernel RSE to user RSE before the 592 + * task is stopped, so user RSE has updated data. we then copy user RSE to 593 + * kernel after the task is resummed from traced stop and kernel will use the 594 + * newer RSE to return to user. TIF_RESTORE_RSE is the flag to indicate we need 595 + * synchronize user RSE to kernel. 596 + */ 597 + void ia64_ptrace_stop(void) 598 + { 599 + if (test_and_set_tsk_thread_flag(current, TIF_RESTORE_RSE)) 600 + return; 601 + tsk_set_notify_resume(current); 602 + unw_init_running(do_sync_rbs, ia64_sync_user_rbs); 603 + } 604 + 605 + /* 606 + * This is called to read back the register backing store. 607 + */ 608 + void ia64_sync_krbs(void) 609 + { 610 + clear_tsk_thread_flag(current, TIF_RESTORE_RSE); 611 + tsk_clear_notify_resume(current); 612 + 613 + unw_init_running(do_sync_rbs, ia64_sync_kernel_rbs); 614 + } 615 + 550 616 static inline int 551 617 thread_matches (struct task_struct *thread, unsigned long addr) 552 618 { ··· 1488 1422 struct task_struct *child; 1489 1423 struct switch_stack *sw; 1490 1424 long ret; 1425 + struct unw_frame_info info; 1491 1426 1492 1427 lock_kernel(); 1493 1428 ret = -EPERM; ··· 1520 1453 1521 1454 if (request == PTRACE_ATTACH) { 1522 1455 ret = ptrace_attach(child); 1456 + if (!ret) 1457 + arch_ptrace_attach(child); 1523 1458 goto out_tsk; 1524 1459 } 1525 1460 ··· 1550 1481 /* write the word at location addr */ 1551 1482 urbs_end = ia64_get_user_rbs_end(child, pt, NULL); 1552 1483 ret = ia64_poke(child, sw, urbs_end, addr, data); 1484 + 1485 + /* Make sure user RBS has the latest data */ 1486 + unw_init_from_blocked_task(&info, child); 1487 + do_sync_rbs(&info, ia64_sync_user_rbs); 1488 + 1553 1489 goto out_tsk; 1554 1490 1555 1491 case PTRACE_PEEKUSR: ··· 1708 1634 && (current->ptrace & PT_PTRACED)) 1709 1635 syscall_trace(); 1710 1636 1637 + /* copy user rbs to kernel rbs */ 1638 + if (test_thread_flag(TIF_RESTORE_RSE)) 1639 + ia64_sync_krbs(); 1640 + 1711 1641 if (unlikely(current->audit_context)) { 1712 1642 long syscall; 1713 1643 int arch; ··· 1749 1671 || test_thread_flag(TIF_SINGLESTEP)) 1750 1672 && (current->ptrace & PT_PTRACED)) 1751 1673 syscall_trace(); 1674 + 1675 + /* copy user rbs to kernel rbs */ 1676 + if (test_thread_flag(TIF_RESTORE_RSE)) 1677 + ia64_sync_krbs(); 1752 1678 }
+7
include/asm-ia64/ptrace.h
··· 292 292 unsigned long, long); 293 293 extern void ia64_flush_fph (struct task_struct *); 294 294 extern void ia64_sync_fph (struct task_struct *); 295 + extern void ia64_sync_krbs(void); 295 296 extern long ia64_sync_user_rbs (struct task_struct *, struct switch_stack *, 296 297 unsigned long, unsigned long); 297 298 ··· 303 302 304 303 extern void ia64_increment_ip (struct pt_regs *pt); 305 304 extern void ia64_decrement_ip (struct pt_regs *pt); 305 + 306 + extern void ia64_ptrace_stop(void); 307 + #define arch_ptrace_stop(code, info) \ 308 + ia64_ptrace_stop() 309 + #define arch_ptrace_stop_needed(code, info) \ 310 + (!test_thread_flag(TIF_RESTORE_RSE)) 306 311 307 312 #endif /* !__KERNEL__ */ 308 313
+2
include/asm-ia64/thread_info.h
··· 94 94 #define TIF_MCA_INIT 18 /* this task is processing MCA or INIT */ 95 95 #define TIF_DB_DISABLED 19 /* debug trap disabled for fsyscall */ 96 96 #define TIF_FREEZE 20 /* is freezing for suspend */ 97 + #define TIF_RESTORE_RSE 21 /* user RBS is newer than kernel RBS */ 97 98 98 99 #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) 99 100 #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) ··· 108 107 #define _TIF_MCA_INIT (1 << TIF_MCA_INIT) 109 108 #define _TIF_DB_DISABLED (1 << TIF_DB_DISABLED) 110 109 #define _TIF_FREEZE (1 << TIF_FREEZE) 110 + #define _TIF_RESTORE_RSE (1 << TIF_RESTORE_RSE) 111 111 112 112 /* "work to do on user-return" bits */ 113 113 #define TIF_ALLWORK_MASK (_TIF_SIGPENDING|_TIF_NOTIFY_RESUME|_TIF_SYSCALL_AUDIT|\