[PATCH] ia64: task_pt_regs()

Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>

authored by Al Viro and committed by Linus Torvalds 6450578f ab03591d

+56 -57
+1 -2
arch/ia64/ia32/elfcore32.h
··· 95 95 static inline int elf_core_copy_task_regs(struct task_struct *t, 96 96 elf_gregset_t* elfregs) 97 97 { 98 - struct pt_regs *pp = ia64_task_regs(t); 99 - ELF_CORE_COPY_REGS((*elfregs), pp); 98 + ELF_CORE_COPY_REGS((*elfregs), task_pt_regs(t)); 100 99 return 1; 101 100 } 102 101
+2 -2
arch/ia64/ia32/ia32_signal.c
··· 255 255 */ 256 256 fp_tos = (fsr>>11)&0x7; 257 257 fr8_st_map = (8-fp_tos)&0x7; 258 - ptp = ia64_task_regs(tsk); 258 + ptp = task_pt_regs(tsk); 259 259 fpregp = (struct _fpreg_ia32 *)(((unsigned long)buf + 15) & ~15); 260 260 ia64f2ia32f(fpregp, &ptp->f8); 261 261 copy_to_user(&save->_st[(0+fr8_st_map)&0x7], fpregp, sizeof(struct _fpreg_ia32)); ··· 389 389 fr8_st_map = (8-fp_tos)&0x7; 390 390 fpregp = (struct _fpreg_ia32 *)(((unsigned long)buf + 15) & ~15); 391 391 392 - ptp = ia64_task_regs(tsk); 392 + ptp = task_pt_regs(tsk); 393 393 copy_from_user(fpregp, &save->_st[(0+fr8_st_map)&0x7], sizeof(struct _fpreg_ia32)); 394 394 ia32f2ia64f(&ptp->f8, fpregp); 395 395 copy_from_user(fpregp, &save->_st[(1+fr8_st_map)&0x7], sizeof(struct _fpreg_ia32));
+2 -2
arch/ia64/ia32/ia32_support.c
··· 58 58 void 59 59 ia32_load_segment_descriptors (struct task_struct *task) 60 60 { 61 - struct pt_regs *regs = ia64_task_regs(task); 61 + struct pt_regs *regs = task_pt_regs(task); 62 62 63 63 /* Setup the segment descriptors */ 64 64 regs->r24 = load_desc(regs->r16 >> 16); /* ESD */ ··· 113 113 ia32_load_state (struct task_struct *t) 114 114 { 115 115 unsigned long eflag, fsr, fcr, fir, fdr, tssd; 116 - struct pt_regs *regs = ia64_task_regs(t); 116 + struct pt_regs *regs = task_pt_regs(t); 117 117 118 118 eflag = t->thread.eflag; 119 119 fsr = t->thread.fsr;
+6 -6
arch/ia64/ia32/sys_ia32.c
··· 1482 1482 { 1483 1483 struct pt_regs *child_regs; 1484 1484 1485 - child_regs = ia64_task_regs(child); 1485 + child_regs = task_pt_regs(child); 1486 1486 switch (regno / sizeof(int)) { 1487 1487 case PT_EBX: return child_regs->r11; 1488 1488 case PT_ECX: return child_regs->r9; ··· 1510 1510 { 1511 1511 struct pt_regs *child_regs; 1512 1512 1513 - child_regs = ia64_task_regs(child); 1513 + child_regs = task_pt_regs(child); 1514 1514 switch (regno / sizeof(int)) { 1515 1515 case PT_EBX: child_regs->r11 = value; break; 1516 1516 case PT_ECX: child_regs->r9 = value; break; ··· 1626 1626 * Stack frames start with 16-bytes of temp space 1627 1627 */ 1628 1628 swp = (struct switch_stack *)(tsk->thread.ksp + 16); 1629 - ptp = ia64_task_regs(tsk); 1629 + ptp = task_pt_regs(tsk); 1630 1630 tos = (tsk->thread.fsr >> 11) & 7; 1631 1631 for (i = 0; i < 8; i++) 1632 1632 put_fpreg(i, &save->st_space[i], ptp, swp, tos); ··· 1659 1659 * Stack frames start with 16-bytes of temp space 1660 1660 */ 1661 1661 swp = (struct switch_stack *)(tsk->thread.ksp + 16); 1662 - ptp = ia64_task_regs(tsk); 1662 + ptp = task_pt_regs(tsk); 1663 1663 tos = (tsk->thread.fsr >> 11) & 7; 1664 1664 for (i = 0; i < 8; i++) 1665 1665 get_fpreg(i, &save->st_space[i], ptp, swp, tos); ··· 1690 1690 * Stack frames start with 16-bytes of temp space 1691 1691 */ 1692 1692 swp = (struct switch_stack *)(tsk->thread.ksp + 16); 1693 - ptp = ia64_task_regs(tsk); 1693 + ptp = task_pt_regs(tsk); 1694 1694 tos = (tsk->thread.fsr >> 11) & 7; 1695 1695 for (i = 0; i < 8; i++) 1696 1696 put_fpreg(i, (struct _fpreg_ia32 __user *)&save->st_space[4*i], ptp, swp, tos); ··· 1734 1734 * Stack frames start with 16-bytes of temp space 1735 1735 */ 1736 1736 swp = (struct switch_stack *)(tsk->thread.ksp + 16); 1737 - ptp = ia64_task_regs(tsk); 1737 + ptp = task_pt_regs(tsk); 1738 1738 tos = (tsk->thread.fsr >> 11) & 7; 1739 1739 for (i = 0; i < 8; i++) 1740 1740 get_fpreg(i, (struct _fpreg_ia32 __user *)&save->st_space[4*i], ptp, swp, tos);
+16 -16
arch/ia64/kernel/perfmon.c
··· 1710 1710 pfm_syswide_force_stop(void *info) 1711 1711 { 1712 1712 pfm_context_t *ctx = (pfm_context_t *)info; 1713 - struct pt_regs *regs = ia64_task_regs(current); 1713 + struct pt_regs *regs = task_pt_regs(current); 1714 1714 struct task_struct *owner; 1715 1715 unsigned long flags; 1716 1716 int ret; ··· 1815 1815 is_system = ctx->ctx_fl_system; 1816 1816 1817 1817 task = PFM_CTX_TASK(ctx); 1818 - regs = ia64_task_regs(task); 1818 + regs = task_pt_regs(task); 1819 1819 1820 1820 DPRINT(("ctx_state=%d is_current=%d\n", 1821 1821 state, ··· 1945 1945 is_system = ctx->ctx_fl_system; 1946 1946 1947 1947 task = PFM_CTX_TASK(ctx); 1948 - regs = ia64_task_regs(task); 1948 + regs = task_pt_regs(task); 1949 1949 1950 1950 DPRINT(("ctx_state=%d is_current=%d\n", 1951 1951 state, ··· 4052 4052 */ 4053 4053 ia64_psr(regs)->up = 0; 4054 4054 } else { 4055 - tregs = ia64_task_regs(task); 4055 + tregs = task_pt_regs(task); 4056 4056 4057 4057 /* 4058 4058 * stop monitoring at the user level ··· 4134 4134 ia64_psr(regs)->up = 1; 4135 4135 4136 4136 } else { 4137 - tregs = ia64_task_regs(ctx->ctx_task); 4137 + tregs = task_pt_regs(ctx->ctx_task); 4138 4138 4139 4139 /* 4140 4140 * start monitoring at the kernel level the next ··· 4404 4404 /* 4405 4405 * when not current, task MUST be stopped, so this is safe 4406 4406 */ 4407 - regs = ia64_task_regs(task); 4407 + regs = task_pt_regs(task); 4408 4408 4409 4409 /* force a full reload */ 4410 4410 ctx->ctx_last_activation = PFM_INVALID_ACTIVATION; ··· 4530 4530 /* 4531 4531 * per-task mode 4532 4532 */ 4533 - tregs = task == current ? regs : ia64_task_regs(task); 4533 + tregs = task == current ? regs : task_pt_regs(task); 4534 4534 4535 4535 if (task == current) { 4536 4536 /* ··· 4593 4593 { 4594 4594 pfm_context_t *ctx; 4595 4595 unsigned long flags; 4596 - struct pt_regs *regs = ia64_task_regs(task); 4596 + struct pt_regs *regs = task_pt_regs(task); 4597 4597 int ret, state; 4598 4598 int free_ok = 0; 4599 4599 ··· 4926 4926 if (unlikely(ret)) goto abort_locked; 4927 4927 4928 4928 skip_fd: 4929 - ret = (*func)(ctx, args_k, count, ia64_task_regs(current)); 4929 + ret = (*func)(ctx, args_k, count, task_pt_regs(current)); 4930 4930 4931 4931 call_made = 1; 4932 4932 ··· 5050 5050 5051 5051 pfm_clear_task_notify(); 5052 5052 5053 - regs = ia64_task_regs(current); 5053 + regs = task_pt_regs(current); 5054 5054 5055 5055 /* 5056 5056 * extract reason for being here and clear ··· 5794 5794 * on every CPU, so we can rely on the pid to identify the idle task. 5795 5795 */ 5796 5796 if ((info & PFM_CPUINFO_EXCL_IDLE) == 0 || task->pid) { 5797 - regs = ia64_task_regs(task); 5797 + regs = task_pt_regs(task); 5798 5798 ia64_psr(regs)->pp = is_ctxswin ? dcr_pp : 0; 5799 5799 return; 5800 5800 } ··· 5877 5877 flags = pfm_protect_ctx_ctxsw(ctx); 5878 5878 5879 5879 if (ctx->ctx_state == PFM_CTX_ZOMBIE) { 5880 - struct pt_regs *regs = ia64_task_regs(task); 5880 + struct pt_regs *regs = task_pt_regs(task); 5881 5881 5882 5882 pfm_clear_psr_up(); 5883 5883 ··· 6077 6077 BUG_ON(psr & IA64_PSR_I); 6078 6078 6079 6079 if (unlikely(ctx->ctx_state == PFM_CTX_ZOMBIE)) { 6080 - struct pt_regs *regs = ia64_task_regs(task); 6080 + struct pt_regs *regs = task_pt_regs(task); 6081 6081 6082 6082 BUG_ON(ctx->ctx_smpl_hdr); 6083 6083 ··· 6446 6446 { 6447 6447 struct pt_regs *regs; 6448 6448 6449 - regs = ia64_task_regs(current); 6449 + regs = task_pt_regs(current); 6450 6450 6451 6451 DPRINT(("called\n")); 6452 6452 ··· 6472 6472 { 6473 6473 struct pt_regs *regs; 6474 6474 6475 - regs = ia64_task_regs(current); 6475 + regs = task_pt_regs(current); 6476 6476 6477 6477 DPRINT(("called\n")); 6478 6478 ··· 6754 6754 local_irq_save(flags); 6755 6755 6756 6756 this_cpu = smp_processor_id(); 6757 - regs = ia64_task_regs(current); 6757 + regs = task_pt_regs(current); 6758 6758 info = PFM_CPUINFO_GET(); 6759 6759 dcr = ia64_getreg(_IA64_REG_CR_DCR); 6760 6760
+6 -6
arch/ia64/kernel/process.c
··· 328 328 #endif 329 329 330 330 #ifdef CONFIG_IA32_SUPPORT 331 - if (IS_IA32_PROCESS(ia64_task_regs(task))) 331 + if (IS_IA32_PROCESS(task_pt_regs(task))) 332 332 ia32_save_state(task); 333 333 #endif 334 334 } ··· 353 353 #endif 354 354 355 355 #ifdef CONFIG_IA32_SUPPORT 356 - if (IS_IA32_PROCESS(ia64_task_regs(task))) 356 + if (IS_IA32_PROCESS(task_pt_regs(task))) 357 357 ia32_load_state(task); 358 358 #endif 359 359 } ··· 488 488 * If we're cloning an IA32 task then save the IA32 extra 489 489 * state from the current task to the new task 490 490 */ 491 - if (IS_IA32_PROCESS(ia64_task_regs(current))) { 491 + if (IS_IA32_PROCESS(task_pt_regs(current))) { 492 492 ia32_save_state(p); 493 493 if (clone_flags & CLONE_SETTLS) 494 494 retval = ia32_clone_tls(p, child_ptregs); ··· 701 701 kernel_thread_helper (int (*fn)(void *), void *arg) 702 702 { 703 703 #ifdef CONFIG_IA32_SUPPORT 704 - if (IS_IA32_PROCESS(ia64_task_regs(current))) { 704 + if (IS_IA32_PROCESS(task_pt_regs(current))) { 705 705 /* A kernel thread is always a 64-bit process. */ 706 706 current->thread.map_base = DEFAULT_MAP_BASE; 707 707 current->thread.task_size = DEFAULT_TASK_SIZE; ··· 722 722 current->thread.flags &= ~(IA64_THREAD_FPH_VALID | IA64_THREAD_DBG_VALID); 723 723 ia64_drop_fpu(current); 724 724 #ifdef CONFIG_IA32_SUPPORT 725 - if (IS_IA32_PROCESS(ia64_task_regs(current))) { 725 + if (IS_IA32_PROCESS(task_pt_regs(current))) { 726 726 ia32_drop_partial_page_list(current); 727 727 current->thread.task_size = IA32_PAGE_OFFSET; 728 728 set_fs(USER_DS); ··· 755 755 if (current->thread.flags & IA64_THREAD_DBG_VALID) 756 756 pfm_release_debug_registers(current); 757 757 #endif 758 - if (IS_IA32_PROCESS(ia64_task_regs(current))) 758 + if (IS_IA32_PROCESS(task_pt_regs(current))) 759 759 ia32_drop_partial_page_list(current); 760 760 } 761 761
+12 -12
arch/ia64/kernel/ptrace.c
··· 254 254 long num_regs, nbits; 255 255 struct pt_regs *pt; 256 256 257 - pt = ia64_task_regs(task); 257 + pt = task_pt_regs(task); 258 258 kbsp = (unsigned long *) sw->ar_bspstore; 259 259 ubspstore = (unsigned long *) pt->ar_bspstore; 260 260 ··· 314 314 struct pt_regs *pt; 315 315 unsigned long cfm, *urbs_kargs; 316 316 317 - pt = ia64_task_regs(task); 317 + pt = task_pt_regs(task); 318 318 kbsp = (unsigned long *) sw->ar_bspstore; 319 319 ubspstore = (unsigned long *) pt->ar_bspstore; 320 320 ··· 407 407 408 408 urbs_end = (long *) user_rbs_end; 409 409 laddr = (unsigned long *) addr; 410 - child_regs = ia64_task_regs(child); 410 + child_regs = task_pt_regs(child); 411 411 bspstore = (unsigned long *) child_regs->ar_bspstore; 412 412 krbs = (unsigned long *) child + IA64_RBS_OFFSET/8; 413 413 if (on_kernel_rbs(addr, (unsigned long) bspstore, ··· 467 467 struct pt_regs *child_regs; 468 468 469 469 laddr = (unsigned long *) addr; 470 - child_regs = ia64_task_regs(child); 470 + child_regs = task_pt_regs(child); 471 471 bspstore = (unsigned long *) child_regs->ar_bspstore; 472 472 krbs = (unsigned long *) child + IA64_RBS_OFFSET/8; 473 473 if (on_kernel_rbs(addr, (unsigned long) bspstore, ··· 567 567 */ 568 568 return 0; 569 569 570 - thread_regs = ia64_task_regs(thread); 570 + thread_regs = task_pt_regs(thread); 571 571 thread_rbs_end = ia64_get_user_rbs_end(thread, thread_regs, NULL); 572 572 if (!on_kernel_rbs(addr, thread_regs->ar_bspstore, thread_rbs_end)) 573 573 return 0; ··· 627 627 inline void 628 628 ia64_flush_fph (struct task_struct *task) 629 629 { 630 - struct ia64_psr *psr = ia64_psr(ia64_task_regs(task)); 630 + struct ia64_psr *psr = ia64_psr(task_pt_regs(task)); 631 631 632 632 /* 633 633 * Prevent migrating this task while ··· 653 653 void 654 654 ia64_sync_fph (struct task_struct *task) 655 655 { 656 - struct ia64_psr *psr = ia64_psr(ia64_task_regs(task)); 656 + struct ia64_psr *psr = ia64_psr(task_pt_regs(task)); 657 657 658 658 ia64_flush_fph(task); 659 659 if (!(task->thread.flags & IA64_THREAD_FPH_VALID)) { ··· 794 794 + offsetof(struct pt_regs, reg))) 795 795 796 796 797 - pt = ia64_task_regs(child); 797 + pt = task_pt_regs(child); 798 798 sw = (struct switch_stack *) (child->thread.ksp + 16); 799 799 800 800 if ((addr & 0x7) != 0) { ··· 1120 1120 if (!access_ok(VERIFY_WRITE, ppr, sizeof(struct pt_all_user_regs))) 1121 1121 return -EIO; 1122 1122 1123 - pt = ia64_task_regs(child); 1123 + pt = task_pt_regs(child); 1124 1124 sw = (struct switch_stack *) (child->thread.ksp + 16); 1125 1125 unw_init_from_blocked_task(&info, child); 1126 1126 if (unw_unwind_to_user(&info) < 0) { ··· 1265 1265 if (!access_ok(VERIFY_READ, ppr, sizeof(struct pt_all_user_regs))) 1266 1266 return -EIO; 1267 1267 1268 - pt = ia64_task_regs(child); 1268 + pt = task_pt_regs(child); 1269 1269 sw = (struct switch_stack *) (child->thread.ksp + 16); 1270 1270 unw_init_from_blocked_task(&info, child); 1271 1271 if (unw_unwind_to_user(&info) < 0) { ··· 1403 1403 void 1404 1404 ptrace_disable (struct task_struct *child) 1405 1405 { 1406 - struct ia64_psr *child_psr = ia64_psr(ia64_task_regs(child)); 1406 + struct ia64_psr *child_psr = ia64_psr(task_pt_regs(child)); 1407 1407 1408 1408 /* make sure the single step/taken-branch trap bits are not set: */ 1409 1409 child_psr->ss = 0; ··· 1456 1456 if (ret < 0) 1457 1457 goto out_tsk; 1458 1458 1459 - pt = ia64_task_regs(child); 1459 + pt = task_pt_regs(child); 1460 1460 sw = (struct switch_stack *) (child->thread.ksp + 16); 1461 1461 1462 1462 switch (request) {
+1 -1
arch/ia64/kernel/setup.c
··· 801 801 #endif 802 802 803 803 /* Clear the stack memory reserved for pt_regs: */ 804 - memset(ia64_task_regs(current), 0, sizeof(struct pt_regs)); 804 + memset(task_pt_regs(current), 0, sizeof(struct pt_regs)); 805 805 806 806 ia64_set_kr(IA64_KR_FPU_OWNER, 0); 807 807
+1 -1
arch/ia64/kernel/sys_ia64.c
··· 151 151 asmlinkage long 152 152 sys_pipe (void) 153 153 { 154 - struct pt_regs *regs = ia64_task_regs(current); 154 + struct pt_regs *regs = task_pt_regs(current); 155 155 int fd[2]; 156 156 int retval; 157 157
+1 -1
drivers/input/evdev.c
··· 159 159 #ifdef CONFIG_X86_64 160 160 # define COMPAT_TEST is_compat_task() 161 161 #elif defined(CONFIG_IA64) 162 - # define COMPAT_TEST IS_IA32_PROCESS(ia64_task_regs(current)) 162 + # define COMPAT_TEST IS_IA32_PROCESS(task_pt_regs(current)) 163 163 #elif defined(CONFIG_S390) 164 164 # define COMPAT_TEST test_thread_flag(TIF_31BIT) 165 165 #elif defined(CONFIG_MIPS)
+1 -1
include/asm-ia64/compat.h
··· 192 192 static __inline__ void __user * 193 193 compat_alloc_user_space (long len) 194 194 { 195 - struct pt_regs *regs = ia64_task_regs(current); 195 + struct pt_regs *regs = task_pt_regs(current); 196 196 return (void __user *) (((regs->r12 & 0xffffffff) & -16) - len); 197 197 } 198 198
+1 -1
include/asm-ia64/processor.h
··· 352 352 /* Return instruction pointer of blocked task TSK. */ 353 353 #define KSTK_EIP(tsk) \ 354 354 ({ \ 355 - struct pt_regs *_regs = ia64_task_regs(tsk); \ 355 + struct pt_regs *_regs = task_pt_regs(tsk); \ 356 356 _regs->cr_iip + ia64_psr(_regs)->ri; \ 357 357 }) 358 358
+2 -2
include/asm-ia64/ptrace.h
··· 248 248 }) 249 249 250 250 /* given a pointer to a task_struct, return the user's pt_regs */ 251 - # define ia64_task_regs(t) (((struct pt_regs *) ((char *) (t) + IA64_STK_OFFSET)) - 1) 251 + # define task_pt_regs(t) (((struct pt_regs *) ((char *) (t) + IA64_STK_OFFSET)) - 1) 252 252 # define ia64_psr(regs) ((struct ia64_psr *) &(regs)->cr_ipsr) 253 253 # define user_mode(regs) (((struct ia64_psr *) &(regs)->cr_ipsr)->cpl != 0) 254 254 # define user_stack(task,regs) ((long) regs - (long) task == IA64_STK_OFFSET - sizeof(*regs)) ··· 271 271 * 272 272 * On ia64, we can clear the user's pt_regs->r8 to force a successful syscall. 273 273 */ 274 - # define force_successful_syscall_return() (ia64_task_regs(current)->r8 = 0) 274 + # define force_successful_syscall_return() (task_pt_regs(current)->r8 = 0) 275 275 276 276 struct task_struct; /* forward decl */ 277 277 struct unw_frame_info; /* forward decl */
+4 -4
include/asm-ia64/system.h
··· 219 219 220 220 #define IA64_HAS_EXTRA_STATE(t) \ 221 221 ((t)->thread.flags & (IA64_THREAD_DBG_VALID|IA64_THREAD_PM_VALID) \ 222 - || IS_IA32_PROCESS(ia64_task_regs(t)) || PERFMON_IS_SYSWIDE()) 222 + || IS_IA32_PROCESS(task_pt_regs(t)) || PERFMON_IS_SYSWIDE()) 223 223 224 224 #define __switch_to(prev,next,last) do { \ 225 225 if (IA64_HAS_EXTRA_STATE(prev)) \ 226 226 ia64_save_extra(prev); \ 227 227 if (IA64_HAS_EXTRA_STATE(next)) \ 228 228 ia64_load_extra(next); \ 229 - ia64_psr(ia64_task_regs(next))->dfh = !ia64_is_local_fpu_owner(next); \ 229 + ia64_psr(task_pt_regs(next))->dfh = !ia64_is_local_fpu_owner(next); \ 230 230 (last) = ia64_switch_to((next)); \ 231 231 } while (0) 232 232 ··· 238 238 * the latest fph state from another CPU. In other words: eager save, lazy restore. 239 239 */ 240 240 # define switch_to(prev,next,last) do { \ 241 - if (ia64_psr(ia64_task_regs(prev))->mfh && ia64_is_local_fpu_owner(prev)) { \ 242 - ia64_psr(ia64_task_regs(prev))->mfh = 0; \ 241 + if (ia64_psr(task_pt_regs(prev))->mfh && ia64_is_local_fpu_owner(prev)) { \ 242 + ia64_psr(task_pt_regs(prev))->mfh = 0; \ 243 243 (prev)->thread.flags |= IA64_THREAD_FPH_VALID; \ 244 244 __ia64_save_fpu((prev)->thread.fph); \ 245 245 } \