Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

x86-64/arm64/powerpc: clean up and rename __copy_from_user_flushcache

This finishes the work on these odd functions that were only implemented
by a handful of architectures.

The 'flushcache' function was only used from the iterator code, and
let's make it do the same thing that the nontemporal version does:
remove the two underscores and add the user address checking.

Yes, yes, the user address checking is also done at iovec import time,
but we have long since walked away from the old double-underscore thing
where we try to avoid address checking overhead at access time, and
these functions shouldn't be so special and old-fashioned.

The arm64 version already did the address check, in fact, so there it's
just a matter of renaming it. For powerpc and x86-64 we now do the
proper user access boilerplate.

Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

+17 -17
+1 -1
arch/arm64/include/asm/uaccess.h
··· 480 480 #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE 481 481 extern unsigned long __must_check __copy_user_flushcache(void *to, const void __user *from, unsigned long n); 482 482 483 - static inline int __copy_from_user_flushcache(void *dst, const void __user *src, unsigned size) 483 + static inline size_t copy_from_user_flushcache(void *dst, const void __user *src, size_t size) 484 484 { 485 485 kasan_check_write(dst, size); 486 486 return __copy_user_flushcache(dst, __uaccess_mask_ptr(src), size);
+1 -2
arch/powerpc/include/asm/uaccess.h
··· 405 405 } 406 406 #endif 407 407 408 - extern long __copy_from_user_flushcache(void *dst, const void __user *src, 409 - unsigned size); 408 + extern size_t copy_from_user_flushcache(void *dst, const void __user *src, size_t size); 410 409 411 410 static __must_check __always_inline bool user_access_begin(const void __user *ptr, size_t len) 412 411 {
+6 -5
arch/powerpc/lib/pmem.c
··· 66 66 /* 67 67 * CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE symbols 68 68 */ 69 - long __copy_from_user_flushcache(void *dest, const void __user *src, 70 - unsigned size) 69 + size_t copy_from_user_flushcache(void *dest, const void __user *src, 70 + size_t size) 71 71 { 72 - unsigned long copied, start = (unsigned long) dest; 72 + unsigned long not_copied, start = (unsigned long) dest; 73 73 74 - copied = __copy_from_user(dest, src, size); 74 + src = mask_user_address(src); 75 + not_copied = __copy_from_user(dest, src, size); 75 76 clean_pmem_range(start, start + size); 76 77 77 - return copied; 78 + return not_copied; 78 79 } 79 80 80 81 void memcpy_flushcache(void *dest, const void *src, size_t size)
+4 -4
arch/x86/include/asm/uaccess_64.h
··· 149 149 150 150 #define copy_to_nontemporal copy_to_nontemporal 151 151 extern size_t copy_to_nontemporal(void *dst, const void *src, size_t size); 152 - extern long __copy_user_flushcache(void *dst, const void __user *src, unsigned size); 152 + extern size_t copy_user_flushcache(void *dst, const void __user *src, size_t size); 153 153 154 154 static inline int 155 155 copy_from_user_inatomic_nontemporal(void *dst, const void __user *src, ··· 164 164 return ret; 165 165 } 166 166 167 - static inline int 168 - __copy_from_user_flushcache(void *dst, const void __user *src, unsigned size) 167 + static inline size_t 168 + copy_from_user_flushcache(void *dst, const void __user *src, size_t size) 169 169 { 170 170 kasan_check_write(dst, size); 171 - return __copy_user_flushcache(dst, src, size); 171 + return copy_user_flushcache(dst, src, size); 172 172 } 173 173 174 174 /*
+4 -4
arch/x86/lib/usercopy_64.c
··· 43 43 } 44 44 EXPORT_SYMBOL_GPL(arch_wb_cache_pmem); 45 45 46 - long __copy_user_flushcache(void *dst, const void __user *src, unsigned size) 46 + size_t copy_user_flushcache(void *dst, const void __user *src, size_t size) 47 47 { 48 48 unsigned long flushed, dest = (unsigned long) dst; 49 - long rc; 49 + unsigned long rc; 50 50 51 - stac(); 51 + src = masked_user_access_begin(src); 52 52 rc = copy_to_nontemporal(dst, (__force const void *)src, size); 53 - clac(); 53 + user_access_end(); 54 54 55 55 /* 56 56 * copy_to_nontemporal() uses non-temporal stores for the bulk
+1 -1
lib/iov_iter.c
··· 296 296 size_t copy_from_user_iter_flushcache(void __user *iter_from, size_t progress, 297 297 size_t len, void *to, void *priv2) 298 298 { 299 - return __copy_from_user_flushcache(to + progress, iter_from, len); 299 + return copy_from_user_flushcache(to + progress, iter_from, len); 300 300 } 301 301 302 302 static __always_inline