Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mattst88/alpha

Pull alpha updates from Al Viro:
"Mostly small janitorial fixes but there's also more important ones: a
patch to fix loading large modules from Edward Humes, and some fixes
from Al Viro"

[ The fixes from Al mostly came in separately through Al's trees too and
are now duplicated.. - Linus ]

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mattst88/alpha:
alpha: in_irq() cleanup
alpha: lazy FPU switching
alpha/boot/misc: trim unused declarations
alpha/boot/tools/objstrip: fix the check for ELF header
alpha/boot: fix the breakage from -isystem series...
alpha: fix FEN fault handling
alpha: Avoid comma separated statements
alpha: fixed a typo in core_cia.c
alpha: remove unused __SLOW_DOWN_IO and SLOW_DOWN_IO definitions
alpha: update config files
alpha: fix R_ALPHA_LITERAL reloc for large modules
alpha: Add some spaces to ensure format specification
alpha: replace NR_SYSCALLS by NR_syscalls
alpha: Remove redundant local asm header redirections
alpha: Implement "current_stack_pointer"
alpha: remove redundant err variable
alpha: osf_sys: reduce kernel log spamming on invalid osf_mount call typenr

+217 -154
+1
arch/alpha/Kconfig
··· 3 3 bool 4 4 default y 5 5 select ARCH_32BIT_USTAT_F_TINODE 6 + select ARCH_HAS_CURRENT_STACK_POINTER 6 7 select ARCH_MIGHT_HAVE_PC_PARPORT 7 8 select ARCH_MIGHT_HAVE_PC_SERIO 8 9 select ARCH_NO_PREEMPT
+6 -6
arch/alpha/boot/stdio.c
··· 42 42 43 43 static char * number(char * str, unsigned long long num, int base, int size, int precision, int type) 44 44 { 45 - char c,sign,tmp[66]; 46 - const char *digits="0123456789abcdefghijklmnopqrstuvwxyz"; 45 + char c, sign, tmp[66]; 46 + const char *digits = "0123456789abcdefghijklmnopqrstuvwxyz"; 47 47 int i; 48 48 49 49 if (type & LARGE) ··· 83 83 precision = i; 84 84 size -= precision; 85 85 if (!(type&(ZEROPAD+LEFT))) 86 - while(size-->0) 86 + while (size-- > 0) 87 87 *str++ = ' '; 88 88 if (sign) 89 89 *str++ = sign; 90 90 if (type & SPECIAL) { 91 91 if (base==8) 92 92 *str++ = '0'; 93 - else if (base==16) { 93 + else if (base == 16) { 94 94 *str++ = '0'; 95 95 *str++ = digits[33]; 96 96 } ··· 125 125 /* 'z' changed to 'Z' --davidm 1/25/99 */ 126 126 127 127 128 - for (str=buf ; *fmt ; ++fmt) { 128 + for (str = buf ; *fmt ; ++fmt) { 129 129 if (*fmt != '%') { 130 130 *str++ = *fmt; 131 131 continue; ··· 296 296 int i; 297 297 298 298 va_start(args, fmt); 299 - i=vsprintf(buf,fmt,args); 299 + i = vsprintf(buf, fmt, args); 300 300 va_end(args); 301 301 return i; 302 302 }
-2
arch/alpha/configs/defconfig
··· 39 39 CONFIG_ATA_GENERIC=y 40 40 CONFIG_NETDEVICES=y 41 41 CONFIG_DUMMY=m 42 - CONFIG_NET_ETHERNET=y 43 42 CONFIG_NET_VENDOR_3COM=y 44 43 CONFIG_VORTEX=y 45 44 CONFIG_NET_TULIP=y 46 45 CONFIG_DE2104X=m 47 46 CONFIG_TULIP=y 48 47 CONFIG_TULIP_MMIO=y 49 - CONFIG_NET_PCI=y 50 48 CONFIG_YELLOWFIN=y 51 49 CONFIG_SERIAL_8250=y 52 50 CONFIG_SERIAL_8250_CONSOLE=y
+1
arch/alpha/include/asm/Kbuild
··· 2 2 3 3 generated-y += syscall_table.h 4 4 generic-y += agp.h 5 + generic-y += asm-offsets.h 5 6 generic-y += export.h 6 7 generic-y += kvm_para.h 7 8 generic-y += mcs_spinlock.h
-1
arch/alpha/include/asm/asm-offsets.h
··· 1 - #include <generated/asm-offsets.h>
-1
arch/alpha/include/asm/div64.h
··· 1 - #include <asm-generic/div64.h>
+37 -24
arch/alpha/include/asm/fpu.h
··· 15 15 { 16 16 unsigned long tmp, ret; 17 17 18 + preempt_disable(); 19 + if (current_thread_info()->status & TS_SAVED_FP) { 20 + ret = current_thread_info()->fp[31]; 21 + } else { 18 22 #if defined(CONFIG_ALPHA_EV6) || defined(CONFIG_ALPHA_EV67) 19 - __asm__ __volatile__ ( 20 - "ftoit $f0,%0\n\t" 21 - "mf_fpcr $f0\n\t" 22 - "ftoit $f0,%1\n\t" 23 - "itoft %0,$f0" 24 - : "=r"(tmp), "=r"(ret)); 23 + __asm__ __volatile__ ( 24 + "ftoit $f0,%0\n\t" 25 + "mf_fpcr $f0\n\t" 26 + "ftoit $f0,%1\n\t" 27 + "itoft %0,$f0" 28 + : "=r"(tmp), "=r"(ret)); 25 29 #else 26 - __asm__ __volatile__ ( 27 - "stt $f0,%0\n\t" 28 - "mf_fpcr $f0\n\t" 29 - "stt $f0,%1\n\t" 30 - "ldt $f0,%0" 31 - : "=m"(tmp), "=m"(ret)); 30 + __asm__ __volatile__ ( 31 + "stt $f0,%0\n\t" 32 + "mf_fpcr $f0\n\t" 33 + "stt $f0,%1\n\t" 34 + "ldt $f0,%0" 35 + : "=m"(tmp), "=m"(ret)); 32 36 #endif 37 + } 38 + preempt_enable(); 33 39 34 40 return ret; 35 41 } ··· 45 39 { 46 40 unsigned long tmp; 47 41 42 + preempt_disable(); 43 + if (current_thread_info()->status & TS_SAVED_FP) { 44 + current_thread_info()->status |= TS_RESTORE_FP; 45 + current_thread_info()->fp[31] = val; 46 + } else { 48 47 #if defined(CONFIG_ALPHA_EV6) || defined(CONFIG_ALPHA_EV67) 49 - __asm__ __volatile__ ( 50 - "ftoit $f0,%0\n\t" 51 - "itoft %1,$f0\n\t" 52 - "mt_fpcr $f0\n\t" 53 - "itoft %0,$f0" 54 - : "=&r"(tmp) : "r"(val)); 48 + __asm__ __volatile__ ( 49 + "ftoit $f0,%0\n\t" 50 + "itoft %1,$f0\n\t" 51 + "mt_fpcr $f0\n\t" 52 + "itoft %0,$f0" 53 + : "=&r"(tmp) : "r"(val)); 55 54 #else 56 - __asm__ __volatile__ ( 57 - "stt $f0,%0\n\t" 58 - "ldt $f0,%1\n\t" 59 - "mt_fpcr $f0\n\t" 60 - "ldt $f0,%0" 61 - : "=m"(tmp) : "m"(val)); 55 + __asm__ __volatile__ ( 56 + "stt $f0,%0\n\t" 57 + "ldt $f0,%1\n\t" 58 + "mt_fpcr $f0\n\t" 59 + "ldt $f0,%0" 60 + : "=m"(tmp) : "m"(val)); 62 61 #endif 62 + } 63 + preempt_enable(); 63 64 } 64 65 65 66 static inline unsigned long
-4
arch/alpha/include/asm/io.h
··· 14 14 the implementation we have here matches that interface. */ 15 15 #include <asm-generic/iomap.h> 16 16 17 - /* We don't use IO slowdowns on the Alpha, but.. */ 18 - #define __SLOW_DOWN_IO do { } while (0) 19 - #define SLOW_DOWN_IO do { } while (0) 20 - 21 17 /* 22 18 * Virtual -> physical identity mapping starts at this offset 23 19 */
-1
arch/alpha/include/asm/irq_regs.h
··· 1 - #include <asm-generic/irq_regs.h>
-1
arch/alpha/include/asm/kdebug.h
··· 1 - #include <asm-generic/kdebug.h>
+18
arch/alpha/include/asm/thread_info.h
··· 26 26 int bpt_nsaved; 27 27 unsigned long bpt_addr[2]; /* breakpoint handling */ 28 28 unsigned int bpt_insn[2]; 29 + unsigned long fp[32]; 29 30 }; 30 31 31 32 /* ··· 41 40 /* How to get the thread information struct from C. */ 42 41 register struct thread_info *__current_thread_info __asm__("$8"); 43 42 #define current_thread_info() __current_thread_info 43 + 44 + register unsigned long *current_stack_pointer __asm__ ("$30"); 44 45 45 46 #endif /* __ASSEMBLY__ */ 46 47 ··· 84 81 #define TS_UAC_NOFIX 0x0002 /* ! flags as they match */ 85 82 #define TS_UAC_SIGBUS 0x0004 /* ! userspace part of 'osf_sysinfo' */ 86 83 84 + #define TS_SAVED_FP 0x0008 85 + #define TS_RESTORE_FP 0x0010 86 + 87 87 #define SET_UNALIGN_CTL(task,value) ({ \ 88 88 __u32 status = task_thread_info(task)->status & ~UAC_BITMASK; \ 89 89 if (value & PR_UNALIGN_NOPRINT) \ ··· 109 103 res |= 4; \ 110 104 put_user(res, (int __user *)(value)); \ 111 105 }) 106 + 107 + #ifndef __ASSEMBLY__ 108 + extern void __save_fpu(void); 109 + 110 + static inline void save_fpu(void) 111 + { 112 + if (!(current_thread_info()->status & TS_SAVED_FP)) { 113 + current_thread_info()->status |= TS_SAVED_FP; 114 + __save_fpu(); 115 + } 116 + } 117 + #endif 112 118 113 119 #endif /* __KERNEL__ */ 114 120 #endif /* _ALPHA_THREAD_INFO_H */
+1 -1
arch/alpha/include/asm/unistd.h
··· 4 4 5 5 #include <uapi/asm/unistd.h> 6 6 7 - #define NR_SYSCALLS __NR_syscalls 7 + #define NR_syscalls __NR_syscalls 8 8 9 9 #define __ARCH_WANT_NEW_STAT 10 10 #define __ARCH_WANT_OLD_READDIR
+2
arch/alpha/include/uapi/asm/ptrace.h
··· 64 64 unsigned long r14; 65 65 unsigned long r15; 66 66 unsigned long r26; 67 + #ifndef __KERNEL__ 67 68 unsigned long fp[32]; /* fp[31] is fpcr */ 69 + #endif 68 70 }; 69 71 70 72
+2
arch/alpha/kernel/asm-offsets.c
··· 17 17 DEFINE(TI_TASK, offsetof(struct thread_info, task)); 18 18 DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); 19 19 DEFINE(TI_CPU, offsetof(struct thread_info, cpu)); 20 + DEFINE(TI_FP, offsetof(struct thread_info, fp)); 21 + DEFINE(TI_STATUS, offsetof(struct thread_info, status)); 20 22 BLANK(); 21 23 22 24 DEFINE(TASK_BLOCKED, offsetof(struct task_struct, blocked));
+1 -1
arch/alpha/kernel/core_cia.c
··· 527 527 if (use_tbia_try2) { 528 528 alpha_mv.mv_pci_tbi = cia_pci_tbi_try2; 529 529 530 - /* Tags 0-3 must be disabled if we use this workaraund. */ 530 + /* Tags 0-3 must be disabled if we use this workaround. */ 531 531 wmb(); 532 532 *(vip)CIA_IOC_TB_TAGn(0) = 2; 533 533 *(vip)CIA_IOC_TB_TAGn(1) = 2;
+75 -73
arch/alpha/kernel/entry.S
··· 17 17 18 18 /* Stack offsets. */ 19 19 #define SP_OFF 184 20 - #define SWITCH_STACK_SIZE 320 20 + #define SWITCH_STACK_SIZE 64 21 21 22 22 .macro CFI_START_OSF_FRAME func 23 23 .align 4 ··· 159 159 .cfi_rel_offset $13, 32 160 160 .cfi_rel_offset $14, 40 161 161 .cfi_rel_offset $15, 48 162 - /* We don't really care about the FP registers for debugging. */ 163 162 .endm 164 163 165 164 .macro UNDO_SWITCH_STACK ··· 453 454 SAVE_ALL 454 455 lda $8, 0x3fff 455 456 bic $sp, $8, $8 456 - lda $4, NR_SYSCALLS($31) 457 + lda $4, NR_syscalls($31) 457 458 stq $16, SP_OFF+24($sp) 458 459 lda $5, sys_call_table 459 460 lda $27, sys_ni_syscall ··· 497 498 and $17, _TIF_WORK_MASK, $2 498 499 bne $2, work_pending 499 500 restore_all: 501 + ldl $2, TI_STATUS($8) 502 + and $2, TS_SAVED_FP | TS_RESTORE_FP, $3 503 + bne $3, restore_fpu 504 + restore_other: 500 505 .cfi_remember_state 501 506 RESTORE_ALL 502 507 call_pal PAL_rti ··· 509 506 .cfi_restore_state 510 507 lda $16, 7 511 508 call_pal PAL_swpipl 512 - br restore_all 509 + br restore_other 513 510 514 511 .align 3 515 512 $syscall_error: ··· 573 570 .type strace, @function 574 571 strace: 575 572 /* set up signal stack, call syscall_trace */ 573 + // NB: if anyone adds preemption, this block will need to be protected 574 + ldl $1, TI_STATUS($8) 575 + and $1, TS_SAVED_FP, $3 576 + or $1, TS_SAVED_FP, $2 577 + bne $3, 1f 578 + stl $2, TI_STATUS($8) 579 + bsr $26, __save_fpu 580 + 1: 576 581 DO_SWITCH_STACK 577 582 jsr $26, syscall_trace_enter /* returns the syscall number */ 578 583 UNDO_SWITCH_STACK ··· 594 583 ldq $21, 88($sp) 595 584 596 585 /* get the system call pointer.. */ 597 - lda $1, NR_SYSCALLS($31) 586 + lda $1, NR_syscalls($31) 598 587 lda $2, sys_call_table 599 588 lda $27, sys_ni_syscall 600 589 cmpult $0, $1, $1 ··· 660 649 stq $14, 40($sp) 661 650 stq $15, 48($sp) 662 651 stq $26, 56($sp) 663 - stt $f0, 64($sp) 664 - stt $f1, 72($sp) 665 - stt $f2, 80($sp) 666 - stt $f3, 88($sp) 667 - stt $f4, 96($sp) 668 - stt $f5, 104($sp) 669 - stt $f6, 112($sp) 670 - stt $f7, 120($sp) 671 - stt $f8, 128($sp) 672 - stt $f9, 136($sp) 673 - stt $f10, 144($sp) 674 - stt $f11, 152($sp) 675 - stt $f12, 160($sp) 676 - stt $f13, 168($sp) 677 - stt $f14, 176($sp) 678 - stt $f15, 184($sp) 679 - stt $f16, 192($sp) 680 - stt $f17, 200($sp) 681 - stt $f18, 208($sp) 682 - stt $f19, 216($sp) 683 - stt $f20, 224($sp) 684 - stt $f21, 232($sp) 685 - stt $f22, 240($sp) 686 - stt $f23, 248($sp) 687 - stt $f24, 256($sp) 688 - stt $f25, 264($sp) 689 - stt $f26, 272($sp) 690 - stt $f27, 280($sp) 691 - mf_fpcr $f0 # get fpcr 692 - stt $f28, 288($sp) 693 - stt $f29, 296($sp) 694 - stt $f30, 304($sp) 695 - stt $f0, 312($sp) # save fpcr in slot of $f31 696 - ldt $f0, 64($sp) # dont let "do_switch_stack" change fp state. 697 652 ret $31, ($1), 1 698 653 .cfi_endproc 699 654 .size do_switch_stack, .-do_switch_stack ··· 678 701 ldq $14, 40($sp) 679 702 ldq $15, 48($sp) 680 703 ldq $26, 56($sp) 681 - ldt $f30, 312($sp) # get saved fpcr 682 - ldt $f0, 64($sp) 683 - ldt $f1, 72($sp) 684 - ldt $f2, 80($sp) 685 - ldt $f3, 88($sp) 686 - mt_fpcr $f30 # install saved fpcr 687 - ldt $f4, 96($sp) 688 - ldt $f5, 104($sp) 689 - ldt $f6, 112($sp) 690 - ldt $f7, 120($sp) 691 - ldt $f8, 128($sp) 692 - ldt $f9, 136($sp) 693 - ldt $f10, 144($sp) 694 - ldt $f11, 152($sp) 695 - ldt $f12, 160($sp) 696 - ldt $f13, 168($sp) 697 - ldt $f14, 176($sp) 698 - ldt $f15, 184($sp) 699 - ldt $f16, 192($sp) 700 - ldt $f17, 200($sp) 701 - ldt $f18, 208($sp) 702 - ldt $f19, 216($sp) 703 - ldt $f20, 224($sp) 704 - ldt $f21, 232($sp) 705 - ldt $f22, 240($sp) 706 - ldt $f23, 248($sp) 707 - ldt $f24, 256($sp) 708 - ldt $f25, 264($sp) 709 - ldt $f26, 272($sp) 710 - ldt $f27, 280($sp) 711 - ldt $f28, 288($sp) 712 - ldt $f29, 296($sp) 713 - ldt $f30, 304($sp) 714 704 lda $sp, SWITCH_STACK_SIZE($sp) 715 705 ret $31, ($1), 1 716 706 .cfi_endproc 717 707 .size undo_switch_stack, .-undo_switch_stack 708 + 709 + #define FR(n) n * 8 + TI_FP($8) 710 + .align 4 711 + .globl __save_fpu 712 + .type __save_fpu, @function 713 + __save_fpu: 714 + #define V(n) stt $f##n, FR(n) 715 + V( 0); V( 1); V( 2); V( 3) 716 + V( 4); V( 5); V( 6); V( 7) 717 + V( 8); V( 9); V(10); V(11) 718 + V(12); V(13); V(14); V(15) 719 + V(16); V(17); V(18); V(19) 720 + V(20); V(21); V(22); V(23) 721 + V(24); V(25); V(26); V(27) 722 + mf_fpcr $f0 # get fpcr 723 + V(28); V(29); V(30) 724 + stt $f0, FR(31) # save fpcr in slot of $f31 725 + ldt $f0, FR(0) # don't let "__save_fpu" change fp state. 726 + ret 727 + #undef V 728 + .size __save_fpu, .-__save_fpu 729 + 730 + .align 4 731 + restore_fpu: 732 + and $3, TS_RESTORE_FP, $3 733 + bic $2, TS_SAVED_FP | TS_RESTORE_FP, $2 734 + beq $3, 1f 735 + #define V(n) ldt $f##n, FR(n) 736 + ldt $f30, FR(31) # get saved fpcr 737 + V( 0); V( 1); V( 2); V( 3) 738 + mt_fpcr $f30 # install saved fpcr 739 + V( 4); V( 5); V( 6); V( 7) 740 + V( 8); V( 9); V(10); V(11) 741 + V(12); V(13); V(14); V(15) 742 + V(16); V(17); V(18); V(19) 743 + V(20); V(21); V(22); V(23) 744 + V(24); V(25); V(26); V(27) 745 + V(28); V(29); V(30) 746 + 1: stl $2, TI_STATUS($8) 747 + br restore_other 748 + #undef V 749 + 718 750 719 751 /* 720 752 * The meat of the context switch code. 721 753 */ 722 - 723 754 .align 4 724 755 .globl alpha_switch_to 725 756 .type alpha_switch_to, @function 726 757 .cfi_startproc 727 758 alpha_switch_to: 728 759 DO_SWITCH_STACK 760 + ldl $1, TI_STATUS($8) 761 + and $1, TS_RESTORE_FP, $3 762 + bne $3, 1f 763 + or $1, TS_RESTORE_FP | TS_SAVED_FP, $2 764 + and $1, TS_SAVED_FP, $3 765 + stl $2, TI_STATUS($8) 766 + bne $3, 1f 767 + bsr $26, __save_fpu 768 + 1: 729 769 call_pal PAL_swpctx 730 770 lda $8, 0x3fff 731 771 UNDO_SWITCH_STACK ··· 793 799 alpha_\name: 794 800 .prologue 0 795 801 bsr $1, do_switch_stack 802 + // NB: if anyone adds preemption, this block will need to be protected 803 + ldl $1, TI_STATUS($8) 804 + and $1, TS_SAVED_FP, $3 805 + or $1, TS_SAVED_FP, $2 806 + bne $3, 1f 807 + stl $2, TI_STATUS($8) 808 + bsr $26, __save_fpu 809 + 1: 796 810 jsr $26, sys_\name 797 811 ldq $26, 56($sp) 798 812 lda $sp, SWITCH_STACK_SIZE($sp)
+1 -3
arch/alpha/kernel/module.c
··· 146 146 base = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr; 147 147 symtab = (Elf64_Sym *)sechdrs[symindex].sh_addr; 148 148 149 - /* The small sections were sorted to the end of the segment. 150 - The following should definitely cover them. */ 151 - gp = (u64)me->core_layout.base + me->core_layout.size - 0x8000; 152 149 got = sechdrs[me->arch.gotsecindex].sh_addr; 150 + gp = got + 0x8000; 153 151 154 152 for (i = 0; i < n; i++) { 155 153 unsigned long r_sym = ELF64_R_SYM (rela[i].r_info);
+1 -1
arch/alpha/kernel/osf_sys.c
··· 522 522 break; 523 523 default: 524 524 retval = -EINVAL; 525 - printk("osf_mount(%ld, %x)\n", typenr, flag); 525 + printk_ratelimited("osf_mount(%ld, %x)\n", typenr, flag); 526 526 } 527 527 528 528 return retval;
+5 -3
arch/alpha/kernel/pci_iommu.c
··· 127 127 goto again; 128 128 } 129 129 130 - if (ptes[p+i]) 131 - p = ALIGN(p + i + 1, mask + 1), i = 0; 132 - else 130 + if (ptes[p+i]) { 131 + p = ALIGN(p + i + 1, mask + 1); 132 + i = 0; 133 + } else { 133 134 i = i + 1; 135 + } 134 136 } 135 137 136 138 if (i < n) {
+1 -5
arch/alpha/kernel/perf_event.c
··· 689 689 */ 690 690 static int alpha_pmu_event_init(struct perf_event *event) 691 691 { 692 - int err; 693 - 694 692 /* does not support taken branch sampling */ 695 693 if (has_branch_stack(event)) 696 694 return -EOPNOTSUPP; ··· 707 709 return -ENODEV; 708 710 709 711 /* Do the real initialisation work. */ 710 - err = __hw_perf_event_init(event); 711 - 712 - return err; 712 + return __hw_perf_event_init(event); 713 713 } 714 714 715 715 /*
+4 -3
arch/alpha/kernel/process.c
··· 133 133 #ifdef CONFIG_DUMMY_CONSOLE 134 134 /* If we've gotten here after SysRq-b, leave interrupt 135 135 context before taking over the console. */ 136 - if (in_irq()) 136 + if (in_hardirq()) 137 137 irq_exit(); 138 138 /* This has the effect of resetting the VGA video origin. */ 139 139 console_lock(); ··· 243 243 childstack = ((struct switch_stack *) childregs) - 1; 244 244 childti->pcb.ksp = (unsigned long) childstack; 245 245 childti->pcb.flags = 1; /* set FEN, clear everything else */ 246 + childti->status |= TS_SAVED_FP | TS_RESTORE_FP; 246 247 247 248 if (unlikely(args->fn)) { 248 249 /* kernel thread */ ··· 253 252 childstack->r9 = (unsigned long) args->fn; 254 253 childstack->r10 = (unsigned long) args->fn_arg; 255 254 childregs->hae = alpha_mv.hae_cache; 255 + memset(childti->fp, '\0', sizeof(childti->fp)); 256 256 childti->pcb.usp = 0; 257 257 return 0; 258 258 } ··· 336 334 337 335 int elf_core_copy_task_fpregs(struct task_struct *t, elf_fpregset_t *fpu) 338 336 { 339 - struct switch_stack *sw = (struct switch_stack *)task_pt_regs(t) - 1; 340 - memcpy(fpu, sw->fp, 32 * 8); 337 + memcpy(fpu, task_thread_info(t)->fp, 32 * 8); 341 338 return 1; 342 339 } 343 340
+10 -8
arch/alpha/kernel/ptrace.c
··· 78 78 (PAGE_SIZE*2 - sizeof(struct pt_regs) - sizeof(struct switch_stack) \ 79 79 + offsetof(struct switch_stack, reg)) 80 80 81 + #define FP_REG(reg) (offsetof(struct thread_info, reg)) 82 + 81 83 static int regoff[] = { 82 84 PT_REG( r0), PT_REG( r1), PT_REG( r2), PT_REG( r3), 83 85 PT_REG( r4), PT_REG( r5), PT_REG( r6), PT_REG( r7), ··· 89 87 PT_REG( r20), PT_REG( r21), PT_REG( r22), PT_REG( r23), 90 88 PT_REG( r24), PT_REG( r25), PT_REG( r26), PT_REG( r27), 91 89 PT_REG( r28), PT_REG( gp), -1, -1, 92 - SW_REG(fp[ 0]), SW_REG(fp[ 1]), SW_REG(fp[ 2]), SW_REG(fp[ 3]), 93 - SW_REG(fp[ 4]), SW_REG(fp[ 5]), SW_REG(fp[ 6]), SW_REG(fp[ 7]), 94 - SW_REG(fp[ 8]), SW_REG(fp[ 9]), SW_REG(fp[10]), SW_REG(fp[11]), 95 - SW_REG(fp[12]), SW_REG(fp[13]), SW_REG(fp[14]), SW_REG(fp[15]), 96 - SW_REG(fp[16]), SW_REG(fp[17]), SW_REG(fp[18]), SW_REG(fp[19]), 97 - SW_REG(fp[20]), SW_REG(fp[21]), SW_REG(fp[22]), SW_REG(fp[23]), 98 - SW_REG(fp[24]), SW_REG(fp[25]), SW_REG(fp[26]), SW_REG(fp[27]), 99 - SW_REG(fp[28]), SW_REG(fp[29]), SW_REG(fp[30]), SW_REG(fp[31]), 90 + FP_REG(fp[ 0]), FP_REG(fp[ 1]), FP_REG(fp[ 2]), FP_REG(fp[ 3]), 91 + FP_REG(fp[ 4]), FP_REG(fp[ 5]), FP_REG(fp[ 6]), FP_REG(fp[ 7]), 92 + FP_REG(fp[ 8]), FP_REG(fp[ 9]), FP_REG(fp[10]), FP_REG(fp[11]), 93 + FP_REG(fp[12]), FP_REG(fp[13]), FP_REG(fp[14]), FP_REG(fp[15]), 94 + FP_REG(fp[16]), FP_REG(fp[17]), FP_REG(fp[18]), FP_REG(fp[19]), 95 + FP_REG(fp[20]), FP_REG(fp[21]), FP_REG(fp[22]), FP_REG(fp[23]), 96 + FP_REG(fp[24]), FP_REG(fp[25]), FP_REG(fp[26]), FP_REG(fp[27]), 97 + FP_REG(fp[28]), FP_REG(fp[29]), FP_REG(fp[30]), FP_REG(fp[31]), 100 98 PT_REG( pc) 101 99 }; 102 100
+12 -8
arch/alpha/kernel/signal.c
··· 150 150 { 151 151 unsigned long usp; 152 152 struct switch_stack *sw = (struct switch_stack *)regs - 1; 153 - long i, err = __get_user(regs->pc, &sc->sc_pc); 153 + long err = __get_user(regs->pc, &sc->sc_pc); 154 154 155 155 current->restart_block.fn = do_no_restart_syscall; 156 + current_thread_info()->status |= TS_SAVED_FP | TS_RESTORE_FP; 156 157 157 158 sw->r26 = (unsigned long) ret_from_sys_call; 158 159 ··· 190 189 err |= __get_user(usp, sc->sc_regs+30); 191 190 wrusp(usp); 192 191 193 - for (i = 0; i < 31; i++) 194 - err |= __get_user(sw->fp[i], sc->sc_fpregs+i); 195 - err |= __get_user(sw->fp[31], &sc->sc_fpcr); 192 + err |= __copy_from_user(current_thread_info()->fp, 193 + sc->sc_fpregs, 31 * 8); 194 + err |= __get_user(current_thread_info()->fp[31], &sc->sc_fpcr); 196 195 197 196 return err; 198 197 } ··· 273 272 unsigned long mask, unsigned long sp) 274 273 { 275 274 struct switch_stack *sw = (struct switch_stack *)regs - 1; 276 - long i, err = 0; 275 + long err = 0; 277 276 278 277 err |= __put_user(on_sig_stack((unsigned long)sc), &sc->sc_onstack); 279 278 err |= __put_user(mask, &sc->sc_mask); ··· 313 312 err |= __put_user(sp, sc->sc_regs+30); 314 313 err |= __put_user(0, sc->sc_regs+31); 315 314 316 - for (i = 0; i < 31; i++) 317 - err |= __put_user(sw->fp[i], sc->sc_fpregs+i); 315 + err |= __copy_to_user(sc->sc_fpregs, 316 + current_thread_info()->fp, 31 * 8); 318 317 err |= __put_user(0, sc->sc_fpregs+31); 319 - err |= __put_user(sw->fp[31], &sc->sc_fpcr); 318 + err |= __put_user(current_thread_info()->fp[31], &sc->sc_fpcr); 320 319 321 320 err |= __put_user(regs->trap_a0, &sc->sc_traparg_a0); 322 321 err |= __put_user(regs->trap_a1, &sc->sc_traparg_a1); ··· 529 528 } else { 530 529 local_irq_enable(); 531 530 if (thread_flags & (_TIF_SIGPENDING|_TIF_NOTIFY_SIGNAL)) { 531 + preempt_disable(); 532 + save_fpu(); 533 + preempt_enable(); 532 534 do_signal(regs, r0, r19); 533 535 r0 = 0; 534 536 } else {
+37 -6
arch/alpha/lib/fpreg.c
··· 7 7 8 8 #include <linux/compiler.h> 9 9 #include <linux/export.h> 10 + #include <linux/preempt.h> 11 + #include <asm/thread_info.h> 10 12 11 13 #if defined(CONFIG_ALPHA_EV6) || defined(CONFIG_ALPHA_EV67) 12 14 #define STT(reg,val) asm volatile ("ftoit $f"#reg",%0" : "=r"(val)); ··· 21 19 { 22 20 unsigned long val; 23 21 24 - switch (reg) { 22 + if (unlikely(reg >= 32)) 23 + return 0; 24 + preempt_enable(); 25 + if (current_thread_info()->status & TS_SAVED_FP) 26 + val = current_thread_info()->fp[reg]; 27 + else switch (reg) { 25 28 case 0: STT( 0, val); break; 26 29 case 1: STT( 1, val); break; 27 30 case 2: STT( 2, val); break; ··· 59 52 case 29: STT(29, val); break; 60 53 case 30: STT(30, val); break; 61 54 case 31: STT(31, val); break; 62 - default: return 0; 63 55 } 56 + preempt_enable(); 64 57 return val; 65 58 } 66 59 EXPORT_SYMBOL(alpha_read_fp_reg); ··· 74 67 void 75 68 alpha_write_fp_reg (unsigned long reg, unsigned long val) 76 69 { 77 - switch (reg) { 70 + if (unlikely(reg >= 32)) 71 + return; 72 + 73 + preempt_disable(); 74 + if (current_thread_info()->status & TS_SAVED_FP) { 75 + current_thread_info()->status |= TS_RESTORE_FP; 76 + current_thread_info()->fp[reg] = val; 77 + } else switch (reg) { 78 78 case 0: LDT( 0, val); break; 79 79 case 1: LDT( 1, val); break; 80 80 case 2: LDT( 2, val); break; ··· 115 101 case 30: LDT(30, val); break; 116 102 case 31: LDT(31, val); break; 117 103 } 104 + preempt_enable(); 118 105 } 119 106 EXPORT_SYMBOL(alpha_write_fp_reg); 120 107 ··· 130 115 { 131 116 unsigned long val; 132 117 133 - switch (reg) { 118 + if (unlikely(reg >= 32)) 119 + return 0; 120 + 121 + preempt_enable(); 122 + if (current_thread_info()->status & TS_SAVED_FP) { 123 + LDT(0, current_thread_info()->fp[reg]); 124 + STS(0, val); 125 + } else switch (reg) { 134 126 case 0: STS( 0, val); break; 135 127 case 1: STS( 1, val); break; 136 128 case 2: STS( 2, val); break; ··· 170 148 case 29: STS(29, val); break; 171 149 case 30: STS(30, val); break; 172 150 case 31: STS(31, val); break; 173 - default: return 0; 174 151 } 152 + preempt_enable(); 175 153 return val; 176 154 } 177 155 EXPORT_SYMBOL(alpha_read_fp_reg_s); ··· 185 163 void 186 164 alpha_write_fp_reg_s (unsigned long reg, unsigned long val) 187 165 { 188 - switch (reg) { 166 + if (unlikely(reg >= 32)) 167 + return; 168 + 169 + preempt_disable(); 170 + if (current_thread_info()->status & TS_SAVED_FP) { 171 + current_thread_info()->status |= TS_RESTORE_FP; 172 + LDS(0, val); 173 + STT(0, current_thread_info()->fp[reg]); 174 + } else switch (reg) { 189 175 case 0: LDS( 0, val); break; 190 176 case 1: LDS( 1, val); break; 191 177 case 2: LDS( 2, val); break; ··· 227 197 case 30: LDS(30, val); break; 228 198 case 31: LDS(31, val); break; 229 199 } 200 + preempt_enable(); 230 201 } 231 202 EXPORT_SYMBOL(alpha_write_fp_reg_s);
+1 -1
arch/alpha/lib/stacktrace.c
··· 92 92 { 93 93 instr * ret_pc; 94 94 instr * prologue = (instr *)stacktrace; 95 - register unsigned char * sp __asm__ ("$30"); 95 + unsigned char *sp = (unsigned char *)current_stack_pointer; 96 96 97 97 printk("\tstack trace:\n"); 98 98 do {
+1 -1
kernel/trace/trace.h
··· 25 25 #include "pid_list.h" 26 26 27 27 #ifdef CONFIG_FTRACE_SYSCALLS 28 - #include <asm/unistd.h> /* For NR_SYSCALLS */ 28 + #include <asm/unistd.h> /* For NR_syscalls */ 29 29 #include <asm/syscall.h> /* some archs define it here */ 30 30 #endif 31 31