Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

LoongArch: Adjust user accessors for 32BIT/64BIT

Adjust user accessors for both 32BIT and 64BIT, including: get_user(),
put_user(), copy_user(), clear_user(), etc.

Reviewed-by: Arnd Bergmann <arnd@arndb.de>
Signed-off-by: Jiaxun Yang <jiaxun.yang@flygoat.com>
Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>

+91 -22
+60 -3
arch/loongarch/include/asm/uaccess.h
··· 19 19 #include <asm/asm-extable.h> 20 20 #include <asm-generic/access_ok.h> 21 21 22 + #define __LSW 0 23 + #define __MSW 1 24 + 22 25 extern u64 __ua_limit; 23 26 24 - #define __UA_ADDR ".dword" 27 + #ifdef CONFIG_64BIT 25 28 #define __UA_LIMIT __ua_limit 29 + #else 30 + #define __UA_LIMIT 0x80000000UL 31 + #endif 26 32 27 33 /* 28 34 * get_user: - Get a simple variable from user space. ··· 132 126 * 133 127 * Returns zero on success, or -EFAULT on error. 134 128 */ 129 + 135 130 #define __put_user(x, ptr) \ 136 131 ({ \ 137 132 int __pu_err = 0; \ ··· 153 146 case 1: __get_data_asm(val, "ld.b", ptr); break; \ 154 147 case 2: __get_data_asm(val, "ld.h", ptr); break; \ 155 148 case 4: __get_data_asm(val, "ld.w", ptr); break; \ 156 - case 8: __get_data_asm(val, "ld.d", ptr); break; \ 149 + case 8: __get_data_asm_8(val, ptr); break; \ 157 150 default: BUILD_BUG(); break; \ 158 151 } \ 159 152 } while (0) ··· 174 167 (val) = (__typeof__(*(ptr))) __gu_tmp; \ 175 168 } 176 169 170 + #ifdef CONFIG_64BIT 171 + #define __get_data_asm_8(val, ptr) \ 172 + __get_data_asm(val, "ld.d", ptr) 173 + #else /* !CONFIG_64BIT */ 174 + #define __get_data_asm_8(val, ptr) \ 175 + { \ 176 + u32 __lo, __hi; \ 177 + u32 __user *__ptr = (u32 __user *)(ptr); \ 178 + \ 179 + __asm__ __volatile__ ( \ 180 + "1:\n" \ 181 + " ld.w %1, %3 \n" \ 182 + "2:\n" \ 183 + " ld.w %2, %4 \n" \ 184 + "3:\n" \ 185 + _ASM_EXTABLE_UACCESS_ERR_ZERO(1b, 3b, %0, %1) \ 186 + _ASM_EXTABLE_UACCESS_ERR_ZERO(2b, 3b, %0, %1) \ 187 + : "+r" (__gu_err), "=&r" (__lo), "=r" (__hi) \ 188 + : "m" (__ptr[__LSW]), "m" (__ptr[__MSW])); \ 189 + if (__gu_err) \ 190 + __hi = 0; \ 191 + (val) = (__typeof__(val))((__typeof__((val)-(val))) \ 192 + ((((u64)__hi << 32) | __lo))); \ 193 + } 194 + #endif /* CONFIG_64BIT */ 195 + 177 196 #define __put_user_common(ptr, size) \ 178 197 do { \ 179 198 switch (size) { \ 180 199 case 1: __put_data_asm("st.b", ptr); break; \ 181 200 case 2: __put_data_asm("st.h", ptr); break; \ 182 201 case 4: __put_data_asm("st.w", ptr); break; \ 183 - case 8: __put_data_asm("st.d", ptr); break; \ 202 + case 8: __put_data_asm_8(ptr); break; \ 184 203 default: BUILD_BUG(); break; \ 185 204 } \ 186 205 } while (0) ··· 222 189 : "+r" (__pu_err), "=m" (__m(ptr)) \ 223 190 : "Jr" (__pu_val)); \ 224 191 } 192 + 193 + #ifdef CONFIG_64BIT 194 + #define __put_data_asm_8(ptr) \ 195 + __put_data_asm("st.d", ptr) 196 + #else /* !CONFIG_64BIT */ 197 + #define __put_data_asm_8(ptr) \ 198 + { \ 199 + u32 __user *__ptr = (u32 __user *)(ptr); \ 200 + u64 __x = (__typeof__((__pu_val)-(__pu_val)))(__pu_val); \ 201 + \ 202 + __asm__ __volatile__ ( \ 203 + "1:\n" \ 204 + " st.w %z3, %1 \n" \ 205 + "2:\n" \ 206 + " st.w %z4, %2 \n" \ 207 + "3:\n" \ 208 + _ASM_EXTABLE_UACCESS_ERR(1b, 3b, %0) \ 209 + _ASM_EXTABLE_UACCESS_ERR(2b, 3b, %0) \ 210 + : "+r" (__pu_err), \ 211 + "=m" (__ptr[__LSW]), \ 212 + "=m" (__ptr[__MSW]) \ 213 + : "rJ" (__x), "rJ" (__x >> 32)); \ 214 + } 215 + #endif /* CONFIG_64BIT */ 225 216 226 217 #define __get_kernel_nofault(dst, src, type, err_label) \ 227 218 do { \
+14 -8
arch/loongarch/lib/clear_user.S
··· 13 13 #include <asm/unwind_hints.h> 14 14 15 15 SYM_FUNC_START(__clear_user) 16 + #ifdef CONFIG_32BIT 17 + b __clear_user_generic 18 + #else 16 19 /* 17 20 * Some CPUs support hardware unaligned access 18 21 */ 19 22 ALTERNATIVE "b __clear_user_generic", \ 20 23 "b __clear_user_fast", CPU_FEATURE_UAL 24 + #endif 21 25 SYM_FUNC_END(__clear_user) 22 26 23 27 EXPORT_SYMBOL(__clear_user) ··· 33 29 * a1: size 34 30 */ 35 31 SYM_FUNC_START(__clear_user_generic) 36 - beqz a1, 2f 32 + beqz a1, 2f 37 33 38 - 1: st.b zero, a0, 0 39 - addi.d a0, a0, 1 40 - addi.d a1, a1, -1 41 - bgtz a1, 1b 34 + 1: st.b zero, a0, 0 35 + PTR_ADDI a0, a0, 1 36 + PTR_ADDI a1, a1, -1 37 + bgtz a1, 1b 42 38 43 - 2: move a0, a1 44 - jr ra 39 + 2: move a0, a1 40 + jr ra 45 41 46 - _asm_extable 1b, 2b 42 + _asm_extable 1b, 2b 47 43 SYM_FUNC_END(__clear_user_generic) 48 44 45 + #ifdef CONFIG_64BIT 49 46 /* 50 47 * unsigned long __clear_user_fast(void *addr, unsigned long size) 51 48 * ··· 212 207 SYM_FUNC_END(__clear_user_fast) 213 208 214 209 STACK_FRAME_NON_STANDARD __clear_user_fast 210 + #endif
+17 -11
arch/loongarch/lib/copy_user.S
··· 13 13 #include <asm/unwind_hints.h> 14 14 15 15 SYM_FUNC_START(__copy_user) 16 + #ifdef CONFIG_32BIT 17 + b __copy_user_generic 18 + #else 16 19 /* 17 20 * Some CPUs support hardware unaligned access 18 21 */ 19 22 ALTERNATIVE "b __copy_user_generic", \ 20 23 "b __copy_user_fast", CPU_FEATURE_UAL 24 + #endif 21 25 SYM_FUNC_END(__copy_user) 22 26 23 27 EXPORT_SYMBOL(__copy_user) ··· 34 30 * a2: n 35 31 */ 36 32 SYM_FUNC_START(__copy_user_generic) 37 - beqz a2, 3f 33 + beqz a2, 3f 38 34 39 - 1: ld.b t0, a1, 0 40 - 2: st.b t0, a0, 0 41 - addi.d a0, a0, 1 42 - addi.d a1, a1, 1 43 - addi.d a2, a2, -1 44 - bgtz a2, 1b 35 + 1: ld.b t0, a1, 0 36 + 2: st.b t0, a0, 0 37 + PTR_ADDI a0, a0, 1 38 + PTR_ADDI a1, a1, 1 39 + PTR_ADDI a2, a2, -1 40 + bgtz a2, 1b 45 41 46 - 3: move a0, a2 47 - jr ra 42 + 3: move a0, a2 43 + jr ra 48 44 49 - _asm_extable 1b, 3b 50 - _asm_extable 2b, 3b 45 + _asm_extable 1b, 3b 46 + _asm_extable 2b, 3b 51 47 SYM_FUNC_END(__copy_user_generic) 52 48 49 + #ifdef CONFIG_64BIT 53 50 /* 54 51 * unsigned long __copy_user_fast(void *to, const void *from, unsigned long n) 55 52 * ··· 286 281 SYM_FUNC_END(__copy_user_fast) 287 282 288 283 STACK_FRAME_NON_STANDARD __copy_user_fast 284 + #endif