···1482{1483 struct pt_regs *child_regs;14841485- child_regs = ia64_task_regs(child);1486 switch (regno / sizeof(int)) {1487 case PT_EBX: return child_regs->r11;1488 case PT_ECX: return child_regs->r9;···1510{1511 struct pt_regs *child_regs;15121513- child_regs = ia64_task_regs(child);1514 switch (regno / sizeof(int)) {1515 case PT_EBX: child_regs->r11 = value; break;1516 case PT_ECX: child_regs->r9 = value; break;···1626 * Stack frames start with 16-bytes of temp space1627 */1628 swp = (struct switch_stack *)(tsk->thread.ksp + 16);1629- ptp = ia64_task_regs(tsk);1630 tos = (tsk->thread.fsr >> 11) & 7;1631 for (i = 0; i < 8; i++)1632 put_fpreg(i, &save->st_space[i], ptp, swp, tos);···1659 * Stack frames start with 16-bytes of temp space1660 */1661 swp = (struct switch_stack *)(tsk->thread.ksp + 16);1662- ptp = ia64_task_regs(tsk);1663 tos = (tsk->thread.fsr >> 11) & 7;1664 for (i = 0; i < 8; i++)1665 get_fpreg(i, &save->st_space[i], ptp, swp, tos);···1690 * Stack frames start with 16-bytes of temp space1691 */1692 swp = (struct switch_stack *)(tsk->thread.ksp + 16);1693- ptp = ia64_task_regs(tsk);1694 tos = (tsk->thread.fsr >> 11) & 7;1695 for (i = 0; i < 8; i++)1696 put_fpreg(i, (struct _fpreg_ia32 __user *)&save->st_space[4*i], ptp, swp, tos);···1734 * Stack frames start with 16-bytes of temp space1735 */1736 swp = (struct switch_stack *)(tsk->thread.ksp + 16);1737- ptp = ia64_task_regs(tsk);1738 tos = (tsk->thread.fsr >> 11) & 7;1739 for (i = 0; i < 8; i++)1740 get_fpreg(i, (struct _fpreg_ia32 __user *)&save->st_space[4*i], ptp, swp, tos);
···1482{1483 struct pt_regs *child_regs;14841485+ child_regs = task_pt_regs(child);1486 switch (regno / sizeof(int)) {1487 case PT_EBX: return child_regs->r11;1488 case PT_ECX: return child_regs->r9;···1510{1511 struct pt_regs *child_regs;15121513+ child_regs = task_pt_regs(child);1514 switch (regno / sizeof(int)) {1515 case PT_EBX: child_regs->r11 = value; break;1516 case PT_ECX: child_regs->r9 = value; break;···1626 * Stack frames start with 16-bytes of temp space1627 */1628 swp = (struct switch_stack *)(tsk->thread.ksp + 16);1629+ ptp = task_pt_regs(tsk);1630 tos = (tsk->thread.fsr >> 11) & 7;1631 for (i = 0; i < 8; i++)1632 put_fpreg(i, &save->st_space[i], ptp, swp, tos);···1659 * Stack frames start with 16-bytes of temp space1660 */1661 swp = (struct switch_stack *)(tsk->thread.ksp + 16);1662+ ptp = task_pt_regs(tsk);1663 tos = (tsk->thread.fsr >> 11) & 7;1664 for (i = 0; i < 8; i++)1665 get_fpreg(i, &save->st_space[i], ptp, swp, tos);···1690 * Stack frames start with 16-bytes of temp space1691 */1692 swp = (struct switch_stack *)(tsk->thread.ksp + 16);1693+ ptp = task_pt_regs(tsk);1694 tos = (tsk->thread.fsr >> 11) & 7;1695 for (i = 0; i < 8; i++)1696 put_fpreg(i, (struct _fpreg_ia32 __user *)&save->st_space[4*i], ptp, swp, tos);···1734 * Stack frames start with 16-bytes of temp space1735 */1736 swp = (struct switch_stack *)(tsk->thread.ksp + 16);1737+ ptp = task_pt_regs(tsk);1738 tos = (tsk->thread.fsr >> 11) & 7;1739 for (i = 0; i < 8; i++)1740 get_fpreg(i, (struct _fpreg_ia32 __user *)&save->st_space[4*i], ptp, swp, tos);
+16-16
arch/ia64/kernel/perfmon.c
···1710pfm_syswide_force_stop(void *info)1711{1712 pfm_context_t *ctx = (pfm_context_t *)info;1713- struct pt_regs *regs = ia64_task_regs(current);1714 struct task_struct *owner;1715 unsigned long flags;1716 int ret;···1815 is_system = ctx->ctx_fl_system;18161817 task = PFM_CTX_TASK(ctx);1818- regs = ia64_task_regs(task);18191820 DPRINT(("ctx_state=%d is_current=%d\n",1821 state,···1945 is_system = ctx->ctx_fl_system;19461947 task = PFM_CTX_TASK(ctx);1948- regs = ia64_task_regs(task);19491950 DPRINT(("ctx_state=%d is_current=%d\n", 1951 state,···4052 */4053 ia64_psr(regs)->up = 0;4054 } else {4055- tregs = ia64_task_regs(task);40564057 /*4058 * stop monitoring at the user level···4134 ia64_psr(regs)->up = 1;41354136 } else {4137- tregs = ia64_task_regs(ctx->ctx_task);41384139 /*4140 * start monitoring at the kernel level the next···4404 /*4405 * when not current, task MUST be stopped, so this is safe4406 */4407- regs = ia64_task_regs(task);44084409 /* force a full reload */4410 ctx->ctx_last_activation = PFM_INVALID_ACTIVATION;···4530 /*4531 * per-task mode4532 */4533- tregs = task == current ? regs : ia64_task_regs(task);45344535 if (task == current) {4536 /*···4593{4594 pfm_context_t *ctx;4595 unsigned long flags;4596- struct pt_regs *regs = ia64_task_regs(task);4597 int ret, state;4598 int free_ok = 0;4599···4926 if (unlikely(ret)) goto abort_locked;49274928skip_fd:4929- ret = (*func)(ctx, args_k, count, ia64_task_regs(current));49304931 call_made = 1;4932···50505051 pfm_clear_task_notify();50525053- regs = ia64_task_regs(current);50545055 /*5056 * extract reason for being here and clear···5794 * on every CPU, so we can rely on the pid to identify the idle task.5795 */5796 if ((info & PFM_CPUINFO_EXCL_IDLE) == 0 || task->pid) {5797- regs = ia64_task_regs(task);5798 ia64_psr(regs)->pp = is_ctxswin ? dcr_pp : 0;5799 return;5800 }···5877 flags = pfm_protect_ctx_ctxsw(ctx);58785879 if (ctx->ctx_state == PFM_CTX_ZOMBIE) {5880- struct pt_regs *regs = ia64_task_regs(task);58815882 pfm_clear_psr_up();5883···6077 BUG_ON(psr & IA64_PSR_I);60786079 if (unlikely(ctx->ctx_state == PFM_CTX_ZOMBIE)) {6080- struct pt_regs *regs = ia64_task_regs(task);60816082 BUG_ON(ctx->ctx_smpl_hdr);6083···6446{6447 struct pt_regs *regs;64486449- regs = ia64_task_regs(current);64506451 DPRINT(("called\n"));6452···6472{6473 struct pt_regs *regs;64746475- regs = ia64_task_regs(current);64766477 DPRINT(("called\n"));6478···6754 local_irq_save(flags);67556756 this_cpu = smp_processor_id();6757- regs = ia64_task_regs(current);6758 info = PFM_CPUINFO_GET();6759 dcr = ia64_getreg(_IA64_REG_CR_DCR);6760
···1710pfm_syswide_force_stop(void *info)1711{1712 pfm_context_t *ctx = (pfm_context_t *)info;1713+ struct pt_regs *regs = task_pt_regs(current);1714 struct task_struct *owner;1715 unsigned long flags;1716 int ret;···1815 is_system = ctx->ctx_fl_system;18161817 task = PFM_CTX_TASK(ctx);1818+ regs = task_pt_regs(task);18191820 DPRINT(("ctx_state=%d is_current=%d\n",1821 state,···1945 is_system = ctx->ctx_fl_system;19461947 task = PFM_CTX_TASK(ctx);1948+ regs = task_pt_regs(task);19491950 DPRINT(("ctx_state=%d is_current=%d\n", 1951 state,···4052 */4053 ia64_psr(regs)->up = 0;4054 } else {4055+ tregs = task_pt_regs(task);40564057 /*4058 * stop monitoring at the user level···4134 ia64_psr(regs)->up = 1;41354136 } else {4137+ tregs = task_pt_regs(ctx->ctx_task);41384139 /*4140 * start monitoring at the kernel level the next···4404 /*4405 * when not current, task MUST be stopped, so this is safe4406 */4407+ regs = task_pt_regs(task);44084409 /* force a full reload */4410 ctx->ctx_last_activation = PFM_INVALID_ACTIVATION;···4530 /*4531 * per-task mode4532 */4533+ tregs = task == current ? regs : task_pt_regs(task);45344535 if (task == current) {4536 /*···4593{4594 pfm_context_t *ctx;4595 unsigned long flags;4596+ struct pt_regs *regs = task_pt_regs(task);4597 int ret, state;4598 int free_ok = 0;4599···4926 if (unlikely(ret)) goto abort_locked;49274928skip_fd:4929+ ret = (*func)(ctx, args_k, count, task_pt_regs(current));49304931 call_made = 1;4932···50505051 pfm_clear_task_notify();50525053+ regs = task_pt_regs(current);50545055 /*5056 * extract reason for being here and clear···5794 * on every CPU, so we can rely on the pid to identify the idle task.5795 */5796 if ((info & PFM_CPUINFO_EXCL_IDLE) == 0 || task->pid) {5797+ regs = task_pt_regs(task);5798 ia64_psr(regs)->pp = is_ctxswin ? dcr_pp : 0;5799 return;5800 }···5877 flags = pfm_protect_ctx_ctxsw(ctx);58785879 if (ctx->ctx_state == PFM_CTX_ZOMBIE) {5880+ struct pt_regs *regs = task_pt_regs(task);58815882 pfm_clear_psr_up();5883···6077 BUG_ON(psr & IA64_PSR_I);60786079 if (unlikely(ctx->ctx_state == PFM_CTX_ZOMBIE)) {6080+ struct pt_regs *regs = task_pt_regs(task);60816082 BUG_ON(ctx->ctx_smpl_hdr);6083···6446{6447 struct pt_regs *regs;64486449+ regs = task_pt_regs(current);64506451 DPRINT(("called\n"));6452···6472{6473 struct pt_regs *regs;64746475+ regs = task_pt_regs(current);64766477 DPRINT(("called\n"));6478···6754 local_irq_save(flags);67556756 this_cpu = smp_processor_id();6757+ regs = task_pt_regs(current);6758 info = PFM_CPUINFO_GET();6759 dcr = ia64_getreg(_IA64_REG_CR_DCR);6760
+6-6
arch/ia64/kernel/process.c
···328#endif329330#ifdef CONFIG_IA32_SUPPORT331- if (IS_IA32_PROCESS(ia64_task_regs(task)))332 ia32_save_state(task);333#endif334}···353#endif354355#ifdef CONFIG_IA32_SUPPORT356- if (IS_IA32_PROCESS(ia64_task_regs(task)))357 ia32_load_state(task);358#endif359}···488 * If we're cloning an IA32 task then save the IA32 extra489 * state from the current task to the new task490 */491- if (IS_IA32_PROCESS(ia64_task_regs(current))) {492 ia32_save_state(p);493 if (clone_flags & CLONE_SETTLS)494 retval = ia32_clone_tls(p, child_ptregs);···701kernel_thread_helper (int (*fn)(void *), void *arg)702{703#ifdef CONFIG_IA32_SUPPORT704- if (IS_IA32_PROCESS(ia64_task_regs(current))) {705 /* A kernel thread is always a 64-bit process. */706 current->thread.map_base = DEFAULT_MAP_BASE;707 current->thread.task_size = DEFAULT_TASK_SIZE;···722 current->thread.flags &= ~(IA64_THREAD_FPH_VALID | IA64_THREAD_DBG_VALID);723 ia64_drop_fpu(current);724#ifdef CONFIG_IA32_SUPPORT725- if (IS_IA32_PROCESS(ia64_task_regs(current))) {726 ia32_drop_partial_page_list(current);727 current->thread.task_size = IA32_PAGE_OFFSET;728 set_fs(USER_DS);···755 if (current->thread.flags & IA64_THREAD_DBG_VALID)756 pfm_release_debug_registers(current);757#endif758- if (IS_IA32_PROCESS(ia64_task_regs(current)))759 ia32_drop_partial_page_list(current);760}761
···328#endif329330#ifdef CONFIG_IA32_SUPPORT331+ if (IS_IA32_PROCESS(task_pt_regs(task)))332 ia32_save_state(task);333#endif334}···353#endif354355#ifdef CONFIG_IA32_SUPPORT356+ if (IS_IA32_PROCESS(task_pt_regs(task)))357 ia32_load_state(task);358#endif359}···488 * If we're cloning an IA32 task then save the IA32 extra489 * state from the current task to the new task490 */491+ if (IS_IA32_PROCESS(task_pt_regs(current))) {492 ia32_save_state(p);493 if (clone_flags & CLONE_SETTLS)494 retval = ia32_clone_tls(p, child_ptregs);···701kernel_thread_helper (int (*fn)(void *), void *arg)702{703#ifdef CONFIG_IA32_SUPPORT704+ if (IS_IA32_PROCESS(task_pt_regs(current))) {705 /* A kernel thread is always a 64-bit process. */706 current->thread.map_base = DEFAULT_MAP_BASE;707 current->thread.task_size = DEFAULT_TASK_SIZE;···722 current->thread.flags &= ~(IA64_THREAD_FPH_VALID | IA64_THREAD_DBG_VALID);723 ia64_drop_fpu(current);724#ifdef CONFIG_IA32_SUPPORT725+ if (IS_IA32_PROCESS(task_pt_regs(current))) {726 ia32_drop_partial_page_list(current);727 current->thread.task_size = IA32_PAGE_OFFSET;728 set_fs(USER_DS);···755 if (current->thread.flags & IA64_THREAD_DBG_VALID)756 pfm_release_debug_registers(current);757#endif758+ if (IS_IA32_PROCESS(task_pt_regs(current)))759 ia32_drop_partial_page_list(current);760}761
+12-12
arch/ia64/kernel/ptrace.c
···254 long num_regs, nbits;255 struct pt_regs *pt;256257- pt = ia64_task_regs(task);258 kbsp = (unsigned long *) sw->ar_bspstore;259 ubspstore = (unsigned long *) pt->ar_bspstore;260···314 struct pt_regs *pt;315 unsigned long cfm, *urbs_kargs;316317- pt = ia64_task_regs(task);318 kbsp = (unsigned long *) sw->ar_bspstore;319 ubspstore = (unsigned long *) pt->ar_bspstore;320···407408 urbs_end = (long *) user_rbs_end;409 laddr = (unsigned long *) addr;410- child_regs = ia64_task_regs(child);411 bspstore = (unsigned long *) child_regs->ar_bspstore;412 krbs = (unsigned long *) child + IA64_RBS_OFFSET/8;413 if (on_kernel_rbs(addr, (unsigned long) bspstore,···467 struct pt_regs *child_regs;468469 laddr = (unsigned long *) addr;470- child_regs = ia64_task_regs(child);471 bspstore = (unsigned long *) child_regs->ar_bspstore;472 krbs = (unsigned long *) child + IA64_RBS_OFFSET/8;473 if (on_kernel_rbs(addr, (unsigned long) bspstore,···567 */568 return 0;569570- thread_regs = ia64_task_regs(thread);571 thread_rbs_end = ia64_get_user_rbs_end(thread, thread_regs, NULL);572 if (!on_kernel_rbs(addr, thread_regs->ar_bspstore, thread_rbs_end))573 return 0;···627inline void628ia64_flush_fph (struct task_struct *task)629{630- struct ia64_psr *psr = ia64_psr(ia64_task_regs(task));631632 /*633 * Prevent migrating this task while···653void654ia64_sync_fph (struct task_struct *task)655{656- struct ia64_psr *psr = ia64_psr(ia64_task_regs(task));657658 ia64_flush_fph(task);659 if (!(task->thread.flags & IA64_THREAD_FPH_VALID)) {···794 + offsetof(struct pt_regs, reg)))795796797- pt = ia64_task_regs(child);798 sw = (struct switch_stack *) (child->thread.ksp + 16);799800 if ((addr & 0x7) != 0) {···1120 if (!access_ok(VERIFY_WRITE, ppr, sizeof(struct pt_all_user_regs)))1121 return -EIO;11221123- pt = ia64_task_regs(child);1124 sw = (struct switch_stack *) (child->thread.ksp + 16);1125 unw_init_from_blocked_task(&info, child);1126 if (unw_unwind_to_user(&info) < 0) {···1265 if (!access_ok(VERIFY_READ, ppr, sizeof(struct pt_all_user_regs)))1266 return -EIO;12671268- pt = ia64_task_regs(child);1269 sw = (struct switch_stack *) (child->thread.ksp + 16);1270 unw_init_from_blocked_task(&info, child);1271 if (unw_unwind_to_user(&info) < 0) {···1403void1404ptrace_disable (struct task_struct *child)1405{1406- struct ia64_psr *child_psr = ia64_psr(ia64_task_regs(child));14071408 /* make sure the single step/taken-branch trap bits are not set: */1409 child_psr->ss = 0;···1456 if (ret < 0)1457 goto out_tsk;14581459- pt = ia64_task_regs(child);1460 sw = (struct switch_stack *) (child->thread.ksp + 16);14611462 switch (request) {
···254 long num_regs, nbits;255 struct pt_regs *pt;256257+ pt = task_pt_regs(task);258 kbsp = (unsigned long *) sw->ar_bspstore;259 ubspstore = (unsigned long *) pt->ar_bspstore;260···314 struct pt_regs *pt;315 unsigned long cfm, *urbs_kargs;316317+ pt = task_pt_regs(task);318 kbsp = (unsigned long *) sw->ar_bspstore;319 ubspstore = (unsigned long *) pt->ar_bspstore;320···407408 urbs_end = (long *) user_rbs_end;409 laddr = (unsigned long *) addr;410+ child_regs = task_pt_regs(child);411 bspstore = (unsigned long *) child_regs->ar_bspstore;412 krbs = (unsigned long *) child + IA64_RBS_OFFSET/8;413 if (on_kernel_rbs(addr, (unsigned long) bspstore,···467 struct pt_regs *child_regs;468469 laddr = (unsigned long *) addr;470+ child_regs = task_pt_regs(child);471 bspstore = (unsigned long *) child_regs->ar_bspstore;472 krbs = (unsigned long *) child + IA64_RBS_OFFSET/8;473 if (on_kernel_rbs(addr, (unsigned long) bspstore,···567 */568 return 0;569570+ thread_regs = task_pt_regs(thread);571 thread_rbs_end = ia64_get_user_rbs_end(thread, thread_regs, NULL);572 if (!on_kernel_rbs(addr, thread_regs->ar_bspstore, thread_rbs_end))573 return 0;···627inline void628ia64_flush_fph (struct task_struct *task)629{630+ struct ia64_psr *psr = ia64_psr(task_pt_regs(task));631632 /*633 * Prevent migrating this task while···653void654ia64_sync_fph (struct task_struct *task)655{656+ struct ia64_psr *psr = ia64_psr(task_pt_regs(task));657658 ia64_flush_fph(task);659 if (!(task->thread.flags & IA64_THREAD_FPH_VALID)) {···794 + offsetof(struct pt_regs, reg)))795796797+ pt = task_pt_regs(child);798 sw = (struct switch_stack *) (child->thread.ksp + 16);799800 if ((addr & 0x7) != 0) {···1120 if (!access_ok(VERIFY_WRITE, ppr, sizeof(struct pt_all_user_regs)))1121 return -EIO;11221123+ pt = task_pt_regs(child);1124 sw = (struct switch_stack *) (child->thread.ksp + 16);1125 unw_init_from_blocked_task(&info, child);1126 if (unw_unwind_to_user(&info) < 0) {···1265 if (!access_ok(VERIFY_READ, ppr, sizeof(struct pt_all_user_regs)))1266 return -EIO;12671268+ pt = task_pt_regs(child);1269 sw = (struct switch_stack *) (child->thread.ksp + 16);1270 unw_init_from_blocked_task(&info, child);1271 if (unw_unwind_to_user(&info) < 0) {···1403void1404ptrace_disable (struct task_struct *child)1405{1406+ struct ia64_psr *child_psr = ia64_psr(task_pt_regs(child));14071408 /* make sure the single step/taken-branch trap bits are not set: */1409 child_psr->ss = 0;···1456 if (ret < 0)1457 goto out_tsk;14581459+ pt = task_pt_regs(child);1460 sw = (struct switch_stack *) (child->thread.ksp + 16);14611462 switch (request) {
+1-1
arch/ia64/kernel/setup.c
···801#endif802803 /* Clear the stack memory reserved for pt_regs: */804- memset(ia64_task_regs(current), 0, sizeof(struct pt_regs));805806 ia64_set_kr(IA64_KR_FPU_OWNER, 0);807
···801#endif802803 /* Clear the stack memory reserved for pt_regs: */804+ memset(task_pt_regs(current), 0, sizeof(struct pt_regs));805806 ia64_set_kr(IA64_KR_FPU_OWNER, 0);807
+1-1
arch/ia64/kernel/sys_ia64.c
···151asmlinkage long152sys_pipe (void)153{154- struct pt_regs *regs = ia64_task_regs(current);155 int fd[2];156 int retval;157
···151asmlinkage long152sys_pipe (void)153{154+ struct pt_regs *regs = task_pt_regs(current);155 int fd[2];156 int retval;157