Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

arm64: uaccess: split user/kernel routines

This patch separates arm64's user and kernel memory access primitives
into distinct routines, adding new __{get,put}_kernel_nofault() helpers
to access kernel memory, upon which core code builds larger copy
routines.

The kernel access routines (using LDR/STR) are not affected by PAN (when
legitimately accessing kernel memory), nor are they affected by UAO.
Switching to KERNEL_DS may set UAO, but this does not adversely affect
the kernel access routines.

The user access routines (using LDTR/STTR) are not affected by PAN (when
legitimately accessing user memory), but are affected by UAO. As these
are only legitimate to use under USER_DS with UAO clear, this should not
be problematic.

Routines performing atomics to user memory (futex and deprecated
instruction emulation) still need to transiently clear PAN, and these
are left as-is. These are never used on kernel memory.

Subsequent patches will refactor the uaccess helpers to remove redundant
code, and will also remove the redundant PAN/UAO manipulation.

Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: James Morse <james.morse@arm.com>
Cc: Will Deacon <will@kernel.org>
Link: https://lore.kernel.org/r/20201202131558.39270-8-mark.rutland@arm.com
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>

authored by

Mark Rutland and committed by
Catalin Marinas
fc703d80 f253d827

+47 -65
+9 -39
arch/arm64/include/asm/asm-uaccess.h
··· 59 59 #endif 60 60 61 61 /* 62 - * Generate the assembly for UAO alternatives with exception table entries. 62 + * Generate the assembly for LDTR/STTR with exception table entries. 63 63 * This is complicated as there is no post-increment or pair versions of the 64 64 * unprivileged instructions, and USER() only works for single instructions. 65 65 */ 66 - #ifdef CONFIG_ARM64_UAO 67 66 .macro uao_ldp l, reg1, reg2, addr, post_inc 68 - alternative_if_not ARM64_HAS_UAO 69 - 8888: ldp \reg1, \reg2, [\addr], \post_inc; 70 - 8889: nop; 71 - nop; 72 - alternative_else 73 - ldtr \reg1, [\addr]; 74 - ldtr \reg2, [\addr, #8]; 75 - add \addr, \addr, \post_inc; 76 - alternative_endif 67 + 8888: ldtr \reg1, [\addr]; 68 + 8889: ldtr \reg2, [\addr, #8]; 69 + add \addr, \addr, \post_inc; 77 70 78 71 _asm_extable 8888b,\l; 79 72 _asm_extable 8889b,\l; 80 73 .endm 81 74 82 75 .macro uao_stp l, reg1, reg2, addr, post_inc 83 - alternative_if_not ARM64_HAS_UAO 84 - 8888: stp \reg1, \reg2, [\addr], \post_inc; 85 - 8889: nop; 86 - nop; 87 - alternative_else 88 - sttr \reg1, [\addr]; 89 - sttr \reg2, [\addr, #8]; 90 - add \addr, \addr, \post_inc; 91 - alternative_endif 76 + 8888: sttr \reg1, [\addr]; 77 + 8889: sttr \reg2, [\addr, #8]; 78 + add \addr, \addr, \post_inc; 92 79 93 80 _asm_extable 8888b,\l; 94 81 _asm_extable 8889b,\l; 95 82 .endm 96 83 97 84 .macro uao_user_alternative l, inst, alt_inst, reg, addr, post_inc 98 - alternative_if_not ARM64_HAS_UAO 99 - 8888: \inst \reg, [\addr], \post_inc; 100 - nop; 101 - alternative_else 102 - \alt_inst \reg, [\addr]; 103 - add \addr, \addr, \post_inc; 104 - alternative_endif 85 + 8888: \alt_inst \reg, [\addr]; 86 + add \addr, \addr, \post_inc; 105 87 106 88 _asm_extable 8888b,\l; 107 89 .endm 108 - #else 109 - .macro uao_ldp l, reg1, reg2, addr, post_inc 110 - USER(\l, ldp \reg1, \reg2, [\addr], \post_inc) 111 - .endm 112 - .macro uao_stp l, reg1, reg2, addr, post_inc 113 - USER(\l, stp \reg1, \reg2, [\addr], \post_inc) 114 - .endm 115 - .macro uao_user_alternative l, inst, alt_inst, reg, addr, post_inc 116 - USER(\l, \inst \reg, [\addr], \post_inc) 117 - .endm 118 - #endif 119 - 120 90 #endif
+38 -26
arch/arm64/include/asm/uaccess.h
··· 24 24 #include <asm/memory.h> 25 25 #include <asm/extable.h> 26 26 27 + #define HAVE_GET_KERNEL_NOFAULT 28 + 27 29 #define get_fs() (current_thread_info()->addr_limit) 28 30 29 31 static inline void set_fs(mm_segment_t fs) ··· 255 253 * The "__xxx_error" versions set the third argument to -EFAULT if an error 256 254 * occurs, and leave it unchanged on success. 257 255 */ 258 - #define __get_mem_asm(instr, alt_instr, reg, x, addr, err, feature) \ 256 + #define __get_mem_asm(load, reg, x, addr, err) \ 259 257 asm volatile( \ 260 - "1:"ALTERNATIVE(instr " " reg "1, [%2]\n", \ 261 - alt_instr " " reg "1, [%2]\n", feature) \ 258 + "1: " load " " reg "1, [%2]\n" \ 262 259 "2:\n" \ 263 260 " .section .fixup, \"ax\"\n" \ 264 261 " .align 2\n" \ ··· 269 268 : "+r" (err), "=&r" (x) \ 270 269 : "r" (addr), "i" (-EFAULT)) 271 270 272 - #define __raw_get_mem(x, ptr, err) \ 271 + #define __raw_get_mem(ldr, x, ptr, err) \ 273 272 do { \ 274 273 unsigned long __gu_val; \ 275 274 switch (sizeof(*(ptr))) { \ 276 275 case 1: \ 277 - __get_mem_asm("ldrb", "ldtrb", "%w", __gu_val, (ptr), \ 278 - (err), ARM64_HAS_UAO); \ 276 + __get_mem_asm(ldr "b", "%w", __gu_val, (ptr), (err)); \ 279 277 break; \ 280 278 case 2: \ 281 - __get_mem_asm("ldrh", "ldtrh", "%w", __gu_val, (ptr), \ 282 - (err), ARM64_HAS_UAO); \ 279 + __get_mem_asm(ldr "h", "%w", __gu_val, (ptr), (err)); \ 283 280 break; \ 284 281 case 4: \ 285 - __get_mem_asm("ldr", "ldtr", "%w", __gu_val, (ptr), \ 286 - (err), ARM64_HAS_UAO); \ 282 + __get_mem_asm(ldr, "%w", __gu_val, (ptr), (err)); \ 287 283 break; \ 288 284 case 8: \ 289 - __get_mem_asm("ldr", "ldtr", "%x", __gu_val, (ptr), \ 290 - (err), ARM64_HAS_UAO); \ 285 + __get_mem_asm(ldr, "%x", __gu_val, (ptr), (err)); \ 291 286 break; \ 292 287 default: \ 293 288 BUILD_BUG(); \ ··· 295 298 do { \ 296 299 __chk_user_ptr(ptr); \ 297 300 uaccess_enable_not_uao(); \ 298 - __raw_get_mem(x, ptr, err); \ 301 + __raw_get_mem("ldtr", x, ptr, err); \ 299 302 uaccess_disable_not_uao(); \ 300 303 } while (0) 301 304 ··· 320 323 321 324 #define get_user __get_user 322 325 323 - #define __put_mem_asm(instr, alt_instr, reg, x, addr, err, feature) \ 326 + #define __get_kernel_nofault(dst, src, type, err_label) \ 327 + do { \ 328 + int __gkn_err = 0; \ 329 + \ 330 + __raw_get_mem("ldr", *((type *)(dst)), \ 331 + (__force type *)(src), __gkn_err); \ 332 + if (unlikely(__gkn_err)) \ 333 + goto err_label; \ 334 + } while (0) 335 + 336 + #define __put_mem_asm(store, reg, x, addr, err) \ 324 337 asm volatile( \ 325 - "1:"ALTERNATIVE(instr " " reg "1, [%2]\n", \ 326 - alt_instr " " reg "1, [%2]\n", feature) \ 338 + "1: " store " " reg "1, [%2]\n" \ 327 339 "2:\n" \ 328 340 " .section .fixup,\"ax\"\n" \ 329 341 " .align 2\n" \ ··· 343 337 : "+r" (err) \ 344 338 : "r" (x), "r" (addr), "i" (-EFAULT)) 345 339 346 - #define __raw_put_mem(x, ptr, err) \ 340 + #define __raw_put_mem(str, x, ptr, err) \ 347 341 do { \ 348 342 __typeof__(*(ptr)) __pu_val = (x); \ 349 343 switch (sizeof(*(ptr))) { \ 350 344 case 1: \ 351 - __put_mem_asm("strb", "sttrb", "%w", __pu_val, (ptr), \ 352 - (err), ARM64_HAS_UAO); \ 345 + __put_mem_asm(str "b", "%w", __pu_val, (ptr), (err)); \ 353 346 break; \ 354 347 case 2: \ 355 - __put_mem_asm("strh", "sttrh", "%w", __pu_val, (ptr), \ 356 - (err), ARM64_HAS_UAO); \ 348 + __put_mem_asm(str "h", "%w", __pu_val, (ptr), (err)); \ 357 349 break; \ 358 350 case 4: \ 359 - __put_mem_asm("str", "sttr", "%w", __pu_val, (ptr), \ 360 - (err), ARM64_HAS_UAO); \ 351 + __put_mem_asm(str, "%w", __pu_val, (ptr), (err)); \ 361 352 break; \ 362 353 case 8: \ 363 - __put_mem_asm("str", "sttr", "%x", __pu_val, (ptr), \ 364 - (err), ARM64_HAS_UAO); \ 354 + __put_mem_asm(str, "%x", __pu_val, (ptr), (err)); \ 365 355 break; \ 366 356 default: \ 367 357 BUILD_BUG(); \ ··· 368 366 do { \ 369 367 __chk_user_ptr(ptr); \ 370 368 uaccess_enable_not_uao(); \ 371 - __raw_put_mem(x, ptr, err); \ 369 + __raw_put_mem("sttr", x, ptr, err); \ 372 370 uaccess_disable_not_uao(); \ 373 371 } while (0) 374 372 ··· 392 390 }) 393 391 394 392 #define put_user __put_user 393 + 394 + #define __put_kernel_nofault(dst, src, type, err_label) \ 395 + do { \ 396 + int __pkn_err = 0; \ 397 + \ 398 + __raw_put_mem("str", *((type *)(src)), \ 399 + (__force type *)(dst), __pkn_err); \ 400 + if (unlikely(__pkn_err)) \ 401 + goto err_label; \ 402 + } while(0) 395 403 396 404 extern unsigned long __must_check __arch_copy_from_user(void *to, const void __user *from, unsigned long n); 397 405 #define raw_copy_from_user(to, from, n) \