Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

parisc: Implement __get/put_kernel_nofault()

Remove CONFIG_SET_FS from parisc, so we need to add
__get_kernel_nofault() and __put_kernel_nofault(), define
HAVE_GET_KERNEL_NOFAULT and remove set_fs(), get_fs(), load_sr2(),
thread_info->addr_limit, KERNEL_DS and USER_DS.

The nice side-effect of this patch is that we now can directly access
userspace via sr3 without the need to use a temporary sr2 which is
either copied from sr3 or set to zero (for kernel space).

Signed-off-by: Helge Deller <deller@gmx.de>
Suggested-by: Arnd Bergmann <arnd@kernel.org>

+62 -86
-1
arch/parisc/Kconfig
··· 64 64 select HAVE_KPROBES_ON_FTRACE 65 65 select HAVE_DYNAMIC_FTRACE_WITH_REGS 66 66 select HAVE_SOFTIRQ_ON_OWN_STACK if IRQSTACKS 67 - select SET_FS 68 67 select TRACE_IRQFLAGS_SUPPORT 69 68 70 69 help
-4
arch/parisc/include/asm/processor.h
··· 101 101 102 102 #define CPU_HVERSION ((boot_cpu_data.hversion >> 4) & 0x0FFF) 103 103 104 - typedef struct { 105 - int seg; 106 - } mm_segment_t; 107 - 108 104 #define ARCH_MIN_TASKALIGN 8 109 105 110 106 struct thread_struct {
-2
arch/parisc/include/asm/thread_info.h
··· 11 11 struct thread_info { 12 12 struct task_struct *task; /* main task structure */ 13 13 unsigned long flags; /* thread_info flags (see TIF_*) */ 14 - mm_segment_t addr_limit; /* user-level address space limit */ 15 14 __u32 cpu; /* current CPU */ 16 15 int preempt_count; /* 0=premptable, <0=BUG; will also serve as bh-counter */ 17 16 }; ··· 20 21 .task = &tsk, \ 21 22 .flags = 0, \ 22 23 .cpu = 0, \ 23 - .addr_limit = KERNEL_DS, \ 24 24 .preempt_count = INIT_PREEMPT_COUNT, \ 25 25 } 26 26
+61 -61
arch/parisc/include/asm/uaccess.h
··· 11 11 #include <linux/bug.h> 12 12 #include <linux/string.h> 13 13 14 - #define KERNEL_DS ((mm_segment_t){0}) 15 - #define USER_DS ((mm_segment_t){1}) 16 - 17 - #define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg) 18 - 19 - #define get_fs() (current_thread_info()->addr_limit) 20 - #define set_fs(x) (current_thread_info()->addr_limit = (x)) 21 - 22 14 /* 23 15 * Note that since kernel addresses are in a separate address space on 24 16 * parisc, we don't need to do anything for access_ok(). ··· 25 33 #define get_user __get_user 26 34 27 35 #if !defined(CONFIG_64BIT) 28 - #define LDD_USER(val, ptr) __get_user_asm64(val, ptr) 29 - #define STD_USER(x, ptr) __put_user_asm64(x, ptr) 36 + #define LDD_USER(sr, val, ptr) __get_user_asm64(sr, val, ptr) 37 + #define STD_USER(sr, x, ptr) __put_user_asm64(sr, x, ptr) 30 38 #else 31 - #define LDD_USER(val, ptr) __get_user_asm(val, "ldd", ptr) 32 - #define STD_USER(x, ptr) __put_user_asm("std", x, ptr) 39 + #define LDD_USER(sr, val, ptr) __get_user_asm(sr, val, "ldd", ptr) 40 + #define STD_USER(sr, x, ptr) __put_user_asm(sr, "std", x, ptr) 33 41 #endif 34 42 35 43 /* ··· 59 67 #define ASM_EXCEPTIONTABLE_ENTRY_EFAULT( fault_addr, except_addr )\ 60 68 ASM_EXCEPTIONTABLE_ENTRY( fault_addr, except_addr + 1) 61 69 62 - /* 63 - * load_sr2() preloads the space register %%sr2 - based on the value of 64 - * get_fs() - with either a value of 0 to access kernel space (KERNEL_DS which 65 - * is 0), or with the current value of %%sr3 to access user space (USER_DS) 66 - * memory. The following __get_user_asm() and __put_user_asm() functions have 67 - * %%sr2 hard-coded to access the requested memory. 68 - */ 69 - #define load_sr2() \ 70 - __asm__(" or,= %0,%%r0,%%r0\n\t" \ 71 - " mfsp %%sr3,%0\n\t" \ 72 - " mtsp %0,%%sr2\n\t" \ 73 - : : "r"(get_fs()) : ) 74 - 75 - #define __get_user_internal(val, ptr) \ 70 + #define __get_user_internal(sr, val, ptr) \ 76 71 ({ \ 77 72 register long __gu_err __asm__ ("r8") = 0; \ 78 73 \ 79 74 switch (sizeof(*(ptr))) { \ 80 - case 1: __get_user_asm(val, "ldb", ptr); break; \ 81 - case 2: __get_user_asm(val, "ldh", ptr); break; \ 82 - case 4: __get_user_asm(val, "ldw", ptr); break; \ 83 - case 8: LDD_USER(val, ptr); break; \ 75 + case 1: __get_user_asm(sr, val, "ldb", ptr); break; \ 76 + case 2: __get_user_asm(sr, val, "ldh", ptr); break; \ 77 + case 4: __get_user_asm(sr, val, "ldw", ptr); break; \ 78 + case 8: LDD_USER(sr, val, ptr); break; \ 84 79 default: BUILD_BUG(); \ 85 80 } \ 86 81 \ ··· 76 97 77 98 #define __get_user(val, ptr) \ 78 99 ({ \ 79 - load_sr2(); \ 80 - __get_user_internal(val, ptr); \ 100 + __get_user_internal("%%sr3,", val, ptr); \ 81 101 }) 82 102 83 - #define __get_user_asm(val, ldx, ptr) \ 103 + #define __get_user_asm(sr, val, ldx, ptr) \ 84 104 { \ 85 105 register long __gu_val; \ 86 106 \ 87 - __asm__("1: " ldx " 0(%%sr2,%2),%0\n" \ 107 + __asm__("1: " ldx " 0(" sr "%2),%0\n" \ 88 108 "9:\n" \ 89 109 ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \ 90 110 : "=r"(__gu_val), "=r"(__gu_err) \ ··· 92 114 (val) = (__force __typeof__(*(ptr))) __gu_val; \ 93 115 } 94 116 117 + #define HAVE_GET_KERNEL_NOFAULT 118 + #define __get_kernel_nofault(dst, src, type, err_label) \ 119 + { \ 120 + type __z; \ 121 + long __err; \ 122 + __err = __get_user_internal("%%sr0,", __z, (type *)(src)); \ 123 + if (unlikely(__err)) \ 124 + goto err_label; \ 125 + else \ 126 + *(type *)(dst) = __z; \ 127 + } 128 + 129 + 95 130 #if !defined(CONFIG_64BIT) 96 131 97 - #define __get_user_asm64(val, ptr) \ 132 + #define __get_user_asm64(sr, val, ptr) \ 98 133 { \ 99 134 union { \ 100 135 unsigned long long l; \ ··· 115 124 } __gu_tmp; \ 116 125 \ 117 126 __asm__(" copy %%r0,%R0\n" \ 118 - "1: ldw 0(%%sr2,%2),%0\n" \ 119 - "2: ldw 4(%%sr2,%2),%R0\n" \ 127 + "1: ldw 0(" sr "%2),%0\n" \ 128 + "2: ldw 4(" sr "%2),%R0\n" \ 120 129 "9:\n" \ 121 130 ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \ 122 131 ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b) \ ··· 129 138 #endif /* !defined(CONFIG_64BIT) */ 130 139 131 140 132 - #define __put_user_internal(x, ptr) \ 141 + #define __put_user_internal(sr, x, ptr) \ 133 142 ({ \ 134 143 register long __pu_err __asm__ ("r8") = 0; \ 135 144 __typeof__(*(ptr)) __x = (__typeof__(*(ptr)))(x); \ 136 145 \ 137 146 switch (sizeof(*(ptr))) { \ 138 - case 1: __put_user_asm("stb", __x, ptr); break; \ 139 - case 2: __put_user_asm("sth", __x, ptr); break; \ 140 - case 4: __put_user_asm("stw", __x, ptr); break; \ 141 - case 8: STD_USER(__x, ptr); break; \ 147 + case 1: __put_user_asm(sr, "stb", __x, ptr); break; \ 148 + case 2: __put_user_asm(sr, "sth", __x, ptr); break; \ 149 + case 4: __put_user_asm(sr, "stw", __x, ptr); break; \ 150 + case 8: STD_USER(sr, __x, ptr); break; \ 142 151 default: BUILD_BUG(); \ 143 152 } \ 144 153 \ ··· 147 156 148 157 #define __put_user(x, ptr) \ 149 158 ({ \ 150 - load_sr2(); \ 151 - __put_user_internal(x, ptr); \ 159 + __put_user_internal("%%sr3,", x, ptr); \ 152 160 }) 161 + 162 + #define __put_kernel_nofault(dst, src, type, err_label) \ 163 + { \ 164 + type __z = *(type *)(src); \ 165 + long __err; \ 166 + __err = __put_user_internal("%%sr0,", __z, (type *)(dst)); \ 167 + if (unlikely(__err)) \ 168 + goto err_label; \ 169 + } 170 + 171 + 153 172 154 173 155 174 /* ··· 171 170 * r8 is already listed as err. 172 171 */ 173 172 174 - #define __put_user_asm(stx, x, ptr) \ 175 - __asm__ __volatile__ ( \ 176 - "1: " stx " %2,0(%%sr2,%1)\n" \ 177 - "9:\n" \ 178 - ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \ 179 - : "=r"(__pu_err) \ 173 + #define __put_user_asm(sr, stx, x, ptr) \ 174 + __asm__ __volatile__ ( \ 175 + "1: " stx " %2,0(" sr "%1)\n" \ 176 + "9:\n" \ 177 + ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \ 178 + : "=r"(__pu_err) \ 180 179 : "r"(ptr), "r"(x), "0"(__pu_err)) 181 180 182 181 183 182 #if !defined(CONFIG_64BIT) 184 183 185 - #define __put_user_asm64(__val, ptr) do { \ 186 - __asm__ __volatile__ ( \ 187 - "1: stw %2,0(%%sr2,%1)\n" \ 188 - "2: stw %R2,4(%%sr2,%1)\n" \ 189 - "9:\n" \ 190 - ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \ 191 - ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b) \ 192 - : "=r"(__pu_err) \ 193 - : "r"(ptr), "r"(__val), "0"(__pu_err)); \ 184 + #define __put_user_asm64(sr, __val, ptr) do { \ 185 + __asm__ __volatile__ ( \ 186 + "1: stw %2,0(" sr "%1)\n" \ 187 + "2: stw %R2,4(" sr "%1)\n" \ 188 + "9:\n" \ 189 + ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \ 190 + ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b) \ 191 + : "=r"(__pu_err) \ 192 + : "r"(ptr), "r"(__val), "0"(__pu_err)); \ 194 193 } while (0) 195 194 196 195 #endif /* !defined(CONFIG_64BIT) */ ··· 201 200 */ 202 201 203 202 extern long strncpy_from_user(char *, const char __user *, long); 204 - extern unsigned lclear_user(void __user *, unsigned long); 203 + extern __must_check unsigned lclear_user(void __user *, unsigned long); 205 204 extern __must_check long strnlen_user(const char __user *src, long n); 206 205 /* 207 206 * Complex access routines -- macros 208 207 */ 209 - #define user_addr_max() (~0UL) 210 208 211 209 #define clear_user lclear_user 212 210 #define __clear_user lclear_user
-1
arch/parisc/kernel/asm-offsets.c
··· 230 230 DEFINE(TI_TASK, offsetof(struct thread_info, task)); 231 231 DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); 232 232 DEFINE(TI_CPU, offsetof(struct thread_info, cpu)); 233 - DEFINE(TI_SEGMENT, offsetof(struct thread_info, addr_limit)); 234 233 DEFINE(TI_PRE_COUNT, offsetof(struct thread_info, preempt_count)); 235 234 DEFINE(THREAD_SZ, sizeof(struct thread_info)); 236 235 /* THREAD_SZ_ALGN includes space for a stack frame. */
+1 -17
arch/parisc/lib/lusercopy.S
··· 28 28 #include <linux/linkage.h> 29 29 30 30 /* 31 - * get_sr gets the appropriate space value into 32 - * sr1 for kernel/user space access, depending 33 - * on the flag stored in the task structure. 34 - */ 35 - 36 - .macro get_sr 37 - mfctl %cr30,%r1 38 - ldw TI_SEGMENT(%r1),%r22 39 - mfsp %sr3,%r1 40 - or,<> %r22,%r0,%r0 41 - copy %r0,%r1 42 - mtsp %r1,%sr1 43 - .endm 44 - 45 - /* 46 31 * unsigned long lclear_user(void *to, unsigned long n) 47 32 * 48 33 * Returns 0 for success. ··· 36 51 37 52 ENTRY_CFI(lclear_user) 38 53 comib,=,n 0,%r25,$lclu_done 39 - get_sr 40 54 $lclu_loop: 41 55 addib,<> -1,%r25,$lclu_loop 42 - 1: stbs,ma %r0,1(%sr1,%r26) 56 + 1: stbs,ma %r0,1(%sr3,%r26) 43 57 44 58 $lclu_done: 45 59 bv %r0(%r2)