Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_X86_UACCESS_64_H
3#define _ASM_X86_UACCESS_64_H
4
5/*
6 * User space memory access functions
7 */
8#include <linux/compiler.h>
9#include <linux/lockdep.h>
10#include <linux/kasan-checks.h>
11#include <asm/alternative.h>
12#include <asm/cpufeatures.h>
13#include <asm/page.h>
14#include <asm/percpu.h>
15
16#ifdef MODULE
17 #define runtime_const_ptr(sym) (sym)
18#else
19 #include <asm/runtime-const.h>
20#endif
21extern unsigned long USER_PTR_MAX;
22
23#ifdef CONFIG_ADDRESS_MASKING
24/*
25 * Mask out tag bits from the address.
26 */
27static inline unsigned long __untagged_addr(unsigned long addr)
28{
29 asm_inline (ALTERNATIVE("", "and " __percpu_arg([mask]) ", %[addr]",
30 X86_FEATURE_LAM)
31 : [addr] "+r" (addr)
32 : [mask] "m" (__my_cpu_var(tlbstate_untag_mask)));
33
34 return addr;
35}
36
37#define untagged_addr(addr) ({ \
38 unsigned long __addr = (__force unsigned long)(addr); \
39 (__force __typeof__(addr))__untagged_addr(__addr); \
40})
41
42static inline unsigned long __untagged_addr_remote(struct mm_struct *mm,
43 unsigned long addr)
44{
45 mmap_assert_locked(mm);
46 return addr & (mm)->context.untag_mask;
47}
48
49#define untagged_addr_remote(mm, addr) ({ \
50 unsigned long __addr = (__force unsigned long)(addr); \
51 (__force __typeof__(addr))__untagged_addr_remote(mm, __addr); \
52})
53
54#endif
55
56#define valid_user_address(x) \
57 likely((__force unsigned long)(x) <= runtime_const_ptr(USER_PTR_MAX))
58
59/*
60 * Masking the user address is an alternative to a conditional
61 * user_access_begin that can avoid the fencing. This only works
62 * for dense accesses starting at the address.
63 */
64static inline void __user *mask_user_address(const void __user *ptr)
65{
66 void __user *ret;
67 asm("cmp %1,%0\n\t"
68 "cmova %1,%0"
69 :"=r" (ret)
70 :"r" (runtime_const_ptr(USER_PTR_MAX)),
71 "0" (ptr));
72 return ret;
73}
74#define masked_user_access_begin(x) ({ \
75 auto __masked_ptr = (x); \
76 __masked_ptr = mask_user_address(__masked_ptr); \
77 __uaccess_begin(); __masked_ptr; })
78
79/*
80 * User pointers can have tag bits on x86-64. This scheme tolerates
81 * arbitrary values in those bits rather then masking them off.
82 *
83 * Enforce two rules:
84 * 1. 'ptr' must be in the user part of the address space
85 * 2. 'ptr+size' must not overflow into kernel addresses
86 *
87 * Note that we always have at least one guard page between the
88 * max user address and the non-canonical gap, allowing us to
89 * ignore small sizes entirely.
90 *
91 * In fact, we could probably remove the size check entirely, since
92 * any kernel accesses will be in increasing address order starting
93 * at 'ptr'.
94 *
95 * That's a separate optimization, for now just handle the small
96 * constant case.
97 */
98static inline bool __access_ok(const void __user *ptr, unsigned long size)
99{
100 if (__builtin_constant_p(size <= PAGE_SIZE) && size <= PAGE_SIZE) {
101 return valid_user_address(ptr);
102 } else {
103 unsigned long sum = size + (__force unsigned long)ptr;
104
105 return valid_user_address(sum) && sum >= (__force unsigned long)ptr;
106 }
107}
108#define __access_ok __access_ok
109
110/*
111 * Copy To/From Userspace
112 */
113
114/* Handles exceptions in both to and from, but doesn't do access_ok */
115__must_check unsigned long
116rep_movs_alternative(void *to, const void *from, unsigned len);
117
118static __always_inline __must_check unsigned long
119copy_user_generic(void *to, const void *from, unsigned long len)
120{
121 stac();
122 /*
123 * If CPU has FSRM feature, use 'rep movs'.
124 * Otherwise, use rep_movs_alternative.
125 */
126 asm volatile(
127 "1:\n\t"
128 ALTERNATIVE("rep movsb",
129 "call rep_movs_alternative", ALT_NOT(X86_FEATURE_FSRM))
130 "2:\n"
131 _ASM_EXTABLE_UA(1b, 2b)
132 :"+c" (len), "+D" (to), "+S" (from), ASM_CALL_CONSTRAINT
133 : : "memory", "rax");
134 clac();
135 return len;
136}
137
138static __always_inline __must_check unsigned long
139raw_copy_from_user(void *dst, const void __user *src, unsigned long size)
140{
141 return copy_user_generic(dst, (__force void *)src, size);
142}
143
144static __always_inline __must_check unsigned long
145raw_copy_to_user(void __user *dst, const void *src, unsigned long size)
146{
147 return copy_user_generic((__force void *)dst, src, size);
148}
149
150#define copy_to_nontemporal copy_to_nontemporal
151extern size_t copy_to_nontemporal(void *dst, const void *src, size_t size);
152extern size_t copy_user_flushcache(void *dst, const void __user *src, size_t size);
153
154static inline int
155copy_from_user_inatomic_nontemporal(void *dst, const void __user *src,
156 unsigned size)
157{
158 long ret;
159 kasan_check_write(dst, size);
160 src = mask_user_address(src);
161 stac();
162 ret = copy_to_nontemporal(dst, (__force const void *)src, size);
163 clac();
164 return ret;
165}
166
167static inline size_t
168copy_from_user_flushcache(void *dst, const void __user *src, size_t size)
169{
170 kasan_check_write(dst, size);
171 return copy_user_flushcache(dst, src, size);
172}
173
174/*
175 * Zero Userspace.
176 */
177
178__must_check unsigned long
179rep_stos_alternative(void __user *addr, unsigned long len);
180
181static __always_inline __must_check unsigned long __clear_user(void __user *addr, unsigned long size)
182{
183 might_fault();
184 stac();
185
186 /*
187 * No memory constraint because it doesn't change any memory gcc
188 * knows about.
189 */
190 asm volatile(
191 "1:\n\t"
192 ALTERNATIVE("rep stosb",
193 "call rep_stos_alternative", ALT_NOT(X86_FEATURE_FSRS))
194 "2:\n"
195 _ASM_EXTABLE_UA(1b, 2b)
196 : "+c" (size), "+D" (addr), ASM_CALL_CONSTRAINT
197 : "a" (0));
198
199 clac();
200
201 return size;
202}
203
204static __always_inline unsigned long clear_user(void __user *to, unsigned long n)
205{
206 if (__access_ok(to, n))
207 return __clear_user(to, n);
208 return n;
209}
210#endif /* _ASM_X86_UACCESS_64_H */