Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

x86: rename and clean up __copy_from_user_inatomic_nocache()

Similarly to the previous commit, this renames the somewhat confusingly
named function. But in this case, it was at least less confusing: the
__copy_from_user_inatomic_nocache is indeed copying from user memory,
and it is indeed ok to be used in an atomic context, so it will not warn
about it.

But the previous commit also removed the NTB mis-use of the
__copy_from_user_inatomic_nocache() function, and as a result every
call-site is now _actually_ doing a real user copy. That means that we
can now do the proper user pointer verification too.

End result: add proper address checking, remove the double underscores,
and change the "nocache" to "nontemporal" to more accurately describe
what this x86-only function actually does. It might be worth noting
that only the target is non-temporal: the actual user accesses are
normal memory accesses.

Also worth noting is that non-x86 targets (and on older 32-bit x86 CPU's
before XMM2 in the Pentium III) we end up just falling back on a regular
user copy, so nothing can actually depend on the non-temporal semantics,
but that has always been true.

Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

+20 -19
+1 -1
arch/x86/include/asm/uaccess.h
··· 507 507 } ____cacheline_aligned_in_smp movsl_mask; 508 508 #endif 509 509 510 - #define ARCH_HAS_NOCACHE_UACCESS 1 510 + #define ARCH_HAS_NONTEMPORAL_UACCESS 1 511 511 512 512 /* 513 513 * The "unsafe" user accesses aren't really "unsafe", but the naming
+1 -7
arch/x86/include/asm/uaccess_32.h
··· 26 26 return __copy_user_ll(to, (__force const void *)from, n); 27 27 } 28 28 29 - static __always_inline unsigned long 30 - __copy_from_user_inatomic_nocache(void *to, const void __user *from, 31 - unsigned long n) 32 - { 33 - return __copy_from_user_ll_nocache_nozero(to, from, n); 34 - } 35 - 29 + unsigned long __must_check copy_from_user_inatomic_nontemporal(void *, const void __user *, unsigned long n); 36 30 unsigned long __must_check clear_user(void __user *mem, unsigned long len); 37 31 unsigned long __must_check __clear_user(void __user *mem, unsigned long len); 38 32
+2 -1
arch/x86/include/asm/uaccess_64.h
··· 152 152 extern long __copy_user_flushcache(void *dst, const void __user *src, unsigned size); 153 153 154 154 static inline int 155 - __copy_from_user_inatomic_nocache(void *dst, const void __user *src, 155 + copy_from_user_inatomic_nontemporal(void *dst, const void __user *src, 156 156 unsigned size) 157 157 { 158 158 long ret; 159 159 kasan_check_write(dst, size); 160 + src = mask_user_address(src); 160 161 stac(); 161 162 ret = copy_to_nontemporal(dst, (__force const void *)src, size); 162 163 clac();
+5 -4
arch/x86/lib/usercopy_32.c
··· 322 322 } 323 323 EXPORT_SYMBOL(__copy_user_ll); 324 324 325 - unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *from, 325 + unsigned long copy_from_user_inatomic_nontemporal(void *to, const void __user *from, 326 326 unsigned long n) 327 327 { 328 - __uaccess_begin_nospec(); 328 + if (!user_access_begin(from, n)) 329 + return n; 329 330 #ifdef CONFIG_X86_INTEL_USERCOPY 330 331 if (n > 64 && static_cpu_has(X86_FEATURE_XMM2)) 331 332 n = __copy_user_intel_nocache(to, from, n); ··· 335 334 #else 336 335 __copy_user(to, from, n); 337 336 #endif 338 - __uaccess_end(); 337 + user_access_end(); 339 338 return n; 340 339 } 341 - EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero); 340 + EXPORT_SYMBOL(copy_from_user_inatomic_nontemporal);
+1 -1
drivers/gpu/drm/i915/i915_gem.c
··· 520 520 521 521 /* We can use the cpu mem copy function because this is X86. */ 522 522 vaddr = io_mapping_map_atomic_wc(mapping, base); 523 - unwritten = __copy_from_user_inatomic_nocache((void __force *)vaddr + offset, 523 + unwritten = copy_from_user_inatomic_nontemporal((void __force *)vaddr + offset, 524 524 user_data, length); 525 525 io_mapping_unmap_atomic(vaddr); 526 526 if (unwritten) {
+1 -1
drivers/gpu/drm/qxl/qxl_ioctl.c
··· 184 184 185 185 /* TODO copy slow path code from i915 */ 186 186 fb_cmd = qxl_bo_kmap_atomic_page(qdev, cmd_bo, (release->release_offset & PAGE_MASK)); 187 - unwritten = __copy_from_user_inatomic_nocache 187 + unwritten = copy_from_user_inatomic_nontemporal 188 188 (fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_MASK), 189 189 u64_to_user_ptr(cmd->command), cmd->command_size); 190 190
+8 -3
include/linux/uaccess.h
··· 331 331 332 332 #endif /* CONFIG_ARCH_HAS_SUBPAGE_FAULTS */ 333 333 334 - #ifndef ARCH_HAS_NOCACHE_UACCESS 334 + #ifndef ARCH_HAS_NONTEMPORAL_UACCESS 335 335 336 336 static inline __must_check unsigned long 337 - __copy_from_user_inatomic_nocache(void *to, const void __user *from, 337 + copy_from_user_inatomic_nontemporal(void *to, const void __user *from, 338 338 unsigned long n) 339 339 { 340 + if (can_do_masked_user_access()) 341 + from = mask_user_address(from); 342 + else 343 + if (!access_ok(from, n)) 344 + return n; 340 345 return __copy_from_user_inatomic(to, from, n); 341 346 } 342 347 343 - #endif /* ARCH_HAS_NOCACHE_UACCESS */ 348 + #endif /* ARCH_HAS_NONTEMPORAL_UACCESS */ 344 349 345 350 extern __must_check int check_zeroed_user(const void __user *from, size_t size); 346 351
+1 -1
lib/iov_iter.c
··· 277 277 size_t copy_from_user_iter_nocache(void __user *iter_from, size_t progress, 278 278 size_t len, void *to, void *priv2) 279 279 { 280 - return __copy_from_user_inatomic_nocache(to + progress, iter_from, len); 280 + return copy_from_user_inatomic_nontemporal(to + progress, iter_from, len); 281 281 } 282 282 283 283 size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)