Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ARCH_POWERPC_UACCESS_H
3#define _ARCH_POWERPC_UACCESS_H
4
5#include <asm/asm-compat.h>
6#include <asm/ppc_asm.h>
7#include <asm/processor.h>
8#include <asm/page.h>
9#include <asm/extable.h>
10
11/*
12 * The fs value determines whether argument validity checking should be
13 * performed or not. If get_fs() == USER_DS, checking is performed, with
14 * get_fs() == KERNEL_DS, checking is bypassed.
15 *
16 * For historical reasons, these macros are grossly misnamed.
17 *
18 * The fs/ds values are now the highest legal address in the "segment".
19 * This simplifies the checking in the routines below.
20 */
21
22#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
23
24#define KERNEL_DS MAKE_MM_SEG(~0UL)
25#ifdef __powerpc64__
26/* We use TASK_SIZE_USER64 as TASK_SIZE is not constant */
27#define USER_DS MAKE_MM_SEG(TASK_SIZE_USER64 - 1)
28#else
29#define USER_DS MAKE_MM_SEG(TASK_SIZE - 1)
30#endif
31
32#define get_ds() (KERNEL_DS)
33#define get_fs() (current->thread.addr_limit)
34
35static inline void set_fs(mm_segment_t fs)
36{
37 current->thread.addr_limit = fs;
38 /* On user-mode return check addr_limit (fs) is correct */
39 set_thread_flag(TIF_FSCHECK);
40}
41
42#define segment_eq(a, b) ((a).seg == (b).seg)
43
44#define user_addr_max() (get_fs().seg)
45
46#ifdef __powerpc64__
47/*
48 * This check is sufficient because there is a large enough
49 * gap between user addresses and the kernel addresses
50 */
51#define __access_ok(addr, size, segment) \
52 (((addr) <= (segment).seg) && ((size) <= (segment).seg))
53
54#else
55
56static inline int __access_ok(unsigned long addr, unsigned long size,
57 mm_segment_t seg)
58{
59 if (addr > seg.seg)
60 return 0;
61 return (size == 0 || size - 1 <= seg.seg - addr);
62}
63
64#endif
65
66#define access_ok(type, addr, size) \
67 (__chk_user_ptr(addr), \
68 __access_ok((__force unsigned long)(addr), (size), get_fs()))
69
70/*
71 * These are the main single-value transfer routines. They automatically
72 * use the right size if we just have the right pointer type.
73 *
74 * This gets kind of ugly. We want to return _two_ values in "get_user()"
75 * and yet we don't want to do any pointers, because that is too much
76 * of a performance impact. Thus we have a few rather ugly macros here,
77 * and hide all the ugliness from the user.
78 *
79 * The "__xxx" versions of the user access functions are versions that
80 * do not verify the address space, that must have been done previously
81 * with a separate "access_ok()" call (this is used when we do multiple
82 * accesses to the same area of user memory).
83 *
84 * As we use the same address space for kernel and user data on the
85 * PowerPC, we can just do these as direct assignments. (Of course, the
86 * exception handling means that it's no longer "just"...)
87 *
88 */
89#define get_user(x, ptr) \
90 __get_user_check((x), (ptr), sizeof(*(ptr)))
91#define put_user(x, ptr) \
92 __put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
93
94#define __get_user(x, ptr) \
95 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
96#define __put_user(x, ptr) \
97 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
98
99#define __get_user_inatomic(x, ptr) \
100 __get_user_nosleep((x), (ptr), sizeof(*(ptr)))
101#define __put_user_inatomic(x, ptr) \
102 __put_user_nosleep((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
103
104extern long __put_user_bad(void);
105
106/*
107 * We don't tell gcc that we are accessing memory, but this is OK
108 * because we do not write to any memory gcc knows about, so there
109 * are no aliasing issues.
110 */
111#define __put_user_asm(x, addr, err, op) \
112 __asm__ __volatile__( \
113 "1: " op " %1,0(%2) # put_user\n" \
114 "2:\n" \
115 ".section .fixup,\"ax\"\n" \
116 "3: li %0,%3\n" \
117 " b 2b\n" \
118 ".previous\n" \
119 EX_TABLE(1b, 3b) \
120 : "=r" (err) \
121 : "r" (x), "b" (addr), "i" (-EFAULT), "0" (err))
122
123#ifdef __powerpc64__
124#define __put_user_asm2(x, ptr, retval) \
125 __put_user_asm(x, ptr, retval, "std")
126#else /* __powerpc64__ */
127#define __put_user_asm2(x, addr, err) \
128 __asm__ __volatile__( \
129 "1: stw %1,0(%2)\n" \
130 "2: stw %1+1,4(%2)\n" \
131 "3:\n" \
132 ".section .fixup,\"ax\"\n" \
133 "4: li %0,%3\n" \
134 " b 3b\n" \
135 ".previous\n" \
136 EX_TABLE(1b, 4b) \
137 EX_TABLE(2b, 4b) \
138 : "=r" (err) \
139 : "r" (x), "b" (addr), "i" (-EFAULT), "0" (err))
140#endif /* __powerpc64__ */
141
142#define __put_user_size(x, ptr, size, retval) \
143do { \
144 retval = 0; \
145 switch (size) { \
146 case 1: __put_user_asm(x, ptr, retval, "stb"); break; \
147 case 2: __put_user_asm(x, ptr, retval, "sth"); break; \
148 case 4: __put_user_asm(x, ptr, retval, "stw"); break; \
149 case 8: __put_user_asm2(x, ptr, retval); break; \
150 default: __put_user_bad(); \
151 } \
152} while (0)
153
154#define __put_user_nocheck(x, ptr, size) \
155({ \
156 long __pu_err; \
157 __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
158 if (!is_kernel_addr((unsigned long)__pu_addr)) \
159 might_fault(); \
160 __chk_user_ptr(ptr); \
161 __put_user_size((x), __pu_addr, (size), __pu_err); \
162 __pu_err; \
163})
164
165#define __put_user_check(x, ptr, size) \
166({ \
167 long __pu_err = -EFAULT; \
168 __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
169 might_fault(); \
170 if (access_ok(VERIFY_WRITE, __pu_addr, size)) \
171 __put_user_size((x), __pu_addr, (size), __pu_err); \
172 __pu_err; \
173})
174
175#define __put_user_nosleep(x, ptr, size) \
176({ \
177 long __pu_err; \
178 __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
179 __chk_user_ptr(ptr); \
180 __put_user_size((x), __pu_addr, (size), __pu_err); \
181 __pu_err; \
182})
183
184
185extern long __get_user_bad(void);
186
187/*
188 * This does an atomic 128 byte aligned load from userspace.
189 * Upto caller to do enable_kernel_vmx() before calling!
190 */
191#define __get_user_atomic_128_aligned(kaddr, uaddr, err) \
192 __asm__ __volatile__( \
193 "1: lvx 0,0,%1 # get user\n" \
194 " stvx 0,0,%2 # put kernel\n" \
195 "2:\n" \
196 ".section .fixup,\"ax\"\n" \
197 "3: li %0,%3\n" \
198 " b 2b\n" \
199 ".previous\n" \
200 EX_TABLE(1b, 3b) \
201 : "=r" (err) \
202 : "b" (uaddr), "b" (kaddr), "i" (-EFAULT), "0" (err))
203
204#define __get_user_asm(x, addr, err, op) \
205 __asm__ __volatile__( \
206 "1: "op" %1,0(%2) # get_user\n" \
207 "2:\n" \
208 ".section .fixup,\"ax\"\n" \
209 "3: li %0,%3\n" \
210 " li %1,0\n" \
211 " b 2b\n" \
212 ".previous\n" \
213 EX_TABLE(1b, 3b) \
214 : "=r" (err), "=r" (x) \
215 : "b" (addr), "i" (-EFAULT), "0" (err))
216
217#ifdef __powerpc64__
218#define __get_user_asm2(x, addr, err) \
219 __get_user_asm(x, addr, err, "ld")
220#else /* __powerpc64__ */
221#define __get_user_asm2(x, addr, err) \
222 __asm__ __volatile__( \
223 "1: lwz %1,0(%2)\n" \
224 "2: lwz %1+1,4(%2)\n" \
225 "3:\n" \
226 ".section .fixup,\"ax\"\n" \
227 "4: li %0,%3\n" \
228 " li %1,0\n" \
229 " li %1+1,0\n" \
230 " b 3b\n" \
231 ".previous\n" \
232 EX_TABLE(1b, 4b) \
233 EX_TABLE(2b, 4b) \
234 : "=r" (err), "=&r" (x) \
235 : "b" (addr), "i" (-EFAULT), "0" (err))
236#endif /* __powerpc64__ */
237
238#define __get_user_size(x, ptr, size, retval) \
239do { \
240 retval = 0; \
241 __chk_user_ptr(ptr); \
242 if (size > sizeof(x)) \
243 (x) = __get_user_bad(); \
244 switch (size) { \
245 case 1: __get_user_asm(x, ptr, retval, "lbz"); break; \
246 case 2: __get_user_asm(x, ptr, retval, "lhz"); break; \
247 case 4: __get_user_asm(x, ptr, retval, "lwz"); break; \
248 case 8: __get_user_asm2(x, ptr, retval); break; \
249 default: (x) = __get_user_bad(); \
250 } \
251} while (0)
252
253#define __get_user_nocheck(x, ptr, size) \
254({ \
255 long __gu_err; \
256 unsigned long __gu_val; \
257 const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
258 __chk_user_ptr(ptr); \
259 if (!is_kernel_addr((unsigned long)__gu_addr)) \
260 might_fault(); \
261 barrier_nospec(); \
262 __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
263 (x) = (__typeof__(*(ptr)))__gu_val; \
264 __gu_err; \
265})
266
267#define __get_user_check(x, ptr, size) \
268({ \
269 long __gu_err = -EFAULT; \
270 unsigned long __gu_val = 0; \
271 const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
272 might_fault(); \
273 if (access_ok(VERIFY_READ, __gu_addr, (size))) { \
274 barrier_nospec(); \
275 __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
276 } \
277 (x) = (__force __typeof__(*(ptr)))__gu_val; \
278 __gu_err; \
279})
280
281#define __get_user_nosleep(x, ptr, size) \
282({ \
283 long __gu_err; \
284 unsigned long __gu_val; \
285 const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
286 __chk_user_ptr(ptr); \
287 barrier_nospec(); \
288 __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
289 (x) = (__force __typeof__(*(ptr)))__gu_val; \
290 __gu_err; \
291})
292
293
294/* more complex routines */
295
296extern unsigned long __copy_tofrom_user(void __user *to,
297 const void __user *from, unsigned long size);
298
299#ifdef __powerpc64__
300static inline unsigned long
301raw_copy_in_user(void __user *to, const void __user *from, unsigned long n)
302{
303 return __copy_tofrom_user(to, from, n);
304}
305#endif /* __powerpc64__ */
306
307static inline unsigned long raw_copy_from_user(void *to,
308 const void __user *from, unsigned long n)
309{
310 if (__builtin_constant_p(n) && (n <= 8)) {
311 unsigned long ret = 1;
312
313 switch (n) {
314 case 1:
315 barrier_nospec();
316 __get_user_size(*(u8 *)to, from, 1, ret);
317 break;
318 case 2:
319 barrier_nospec();
320 __get_user_size(*(u16 *)to, from, 2, ret);
321 break;
322 case 4:
323 barrier_nospec();
324 __get_user_size(*(u32 *)to, from, 4, ret);
325 break;
326 case 8:
327 barrier_nospec();
328 __get_user_size(*(u64 *)to, from, 8, ret);
329 break;
330 }
331 if (ret == 0)
332 return 0;
333 }
334
335 barrier_nospec();
336 return __copy_tofrom_user((__force void __user *)to, from, n);
337}
338
339static inline unsigned long raw_copy_to_user(void __user *to,
340 const void *from, unsigned long n)
341{
342 if (__builtin_constant_p(n) && (n <= 8)) {
343 unsigned long ret = 1;
344
345 switch (n) {
346 case 1:
347 __put_user_size(*(u8 *)from, (u8 __user *)to, 1, ret);
348 break;
349 case 2:
350 __put_user_size(*(u16 *)from, (u16 __user *)to, 2, ret);
351 break;
352 case 4:
353 __put_user_size(*(u32 *)from, (u32 __user *)to, 4, ret);
354 break;
355 case 8:
356 __put_user_size(*(u64 *)from, (u64 __user *)to, 8, ret);
357 break;
358 }
359 if (ret == 0)
360 return 0;
361 }
362
363 return __copy_tofrom_user(to, (__force const void __user *)from, n);
364}
365
366extern unsigned long __clear_user(void __user *addr, unsigned long size);
367
368static inline unsigned long clear_user(void __user *addr, unsigned long size)
369{
370 might_fault();
371 if (likely(access_ok(VERIFY_WRITE, addr, size)))
372 return __clear_user(addr, size);
373 return size;
374}
375
376extern long strncpy_from_user(char *dst, const char __user *src, long count);
377extern __must_check long strnlen_user(const char __user *str, long n);
378
379extern long __copy_from_user_flushcache(void *dst, const void __user *src,
380 unsigned size);
381extern void memcpy_page_flushcache(char *to, struct page *page, size_t offset,
382 size_t len);
383
384#endif /* _ARCH_POWERPC_UACCESS_H */