Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ARCH_POWERPC_UACCESS_H
3#define _ARCH_POWERPC_UACCESS_H
4
5#include <asm/ppc_asm.h>
6#include <asm/processor.h>
7#include <asm/page.h>
8#include <asm/extable.h>
9#include <asm/kup.h>
10
11/*
12 * The fs value determines whether argument validity checking should be
13 * performed or not. If get_fs() == USER_DS, checking is performed, with
14 * get_fs() == KERNEL_DS, checking is bypassed.
15 *
16 * For historical reasons, these macros are grossly misnamed.
17 *
18 * The fs/ds values are now the highest legal address in the "segment".
19 * This simplifies the checking in the routines below.
20 */
21
22#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
23
24#define KERNEL_DS MAKE_MM_SEG(~0UL)
25#ifdef __powerpc64__
26/* We use TASK_SIZE_USER64 as TASK_SIZE is not constant */
27#define USER_DS MAKE_MM_SEG(TASK_SIZE_USER64 - 1)
28#else
29#define USER_DS MAKE_MM_SEG(TASK_SIZE - 1)
30#endif
31
32#define get_fs() (current->thread.addr_limit)
33
34static inline void set_fs(mm_segment_t fs)
35{
36 current->thread.addr_limit = fs;
37 /* On user-mode return check addr_limit (fs) is correct */
38 set_thread_flag(TIF_FSCHECK);
39}
40
41#define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg)
42#define user_addr_max() (get_fs().seg)
43
44#ifdef __powerpc64__
45/*
46 * This check is sufficient because there is a large enough
47 * gap between user addresses and the kernel addresses
48 */
49#define __access_ok(addr, size, segment) \
50 (((addr) <= (segment).seg) && ((size) <= (segment).seg))
51
52#else
53
54static inline int __access_ok(unsigned long addr, unsigned long size,
55 mm_segment_t seg)
56{
57 if (addr > seg.seg)
58 return 0;
59 return (size == 0 || size - 1 <= seg.seg - addr);
60}
61
62#endif
63
64#define access_ok(addr, size) \
65 (__chk_user_ptr(addr), \
66 __access_ok((__force unsigned long)(addr), (size), get_fs()))
67
68/*
69 * These are the main single-value transfer routines. They automatically
70 * use the right size if we just have the right pointer type.
71 *
72 * This gets kind of ugly. We want to return _two_ values in "get_user()"
73 * and yet we don't want to do any pointers, because that is too much
74 * of a performance impact. Thus we have a few rather ugly macros here,
75 * and hide all the ugliness from the user.
76 *
77 * The "__xxx" versions of the user access functions are versions that
78 * do not verify the address space, that must have been done previously
79 * with a separate "access_ok()" call (this is used when we do multiple
80 * accesses to the same area of user memory).
81 *
82 * As we use the same address space for kernel and user data on the
83 * PowerPC, we can just do these as direct assignments. (Of course, the
84 * exception handling means that it's no longer "just"...)
85 *
86 */
87#define get_user(x, ptr) \
88 __get_user_check((x), (ptr), sizeof(*(ptr)))
89#define put_user(x, ptr) \
90 __put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
91
92#define __get_user(x, ptr) \
93 __get_user_nocheck((x), (ptr), sizeof(*(ptr)), true)
94#define __put_user(x, ptr) \
95 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
96#define __put_user_goto(x, ptr, label) \
97 __put_user_nocheck_goto((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)), label)
98
99#define __get_user_allowed(x, ptr) \
100 __get_user_nocheck((x), (ptr), sizeof(*(ptr)), false)
101
102#define __get_user_inatomic(x, ptr) \
103 __get_user_nosleep((x), (ptr), sizeof(*(ptr)))
104#define __put_user_inatomic(x, ptr) \
105 __put_user_nosleep((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
106
107#ifdef CONFIG_PPC64
108
109#define ___get_user_instr(gu_op, dest, ptr) \
110({ \
111 long __gui_ret = 0; \
112 unsigned long __gui_ptr = (unsigned long)ptr; \
113 struct ppc_inst __gui_inst; \
114 unsigned int __prefix, __suffix; \
115 __gui_ret = gu_op(__prefix, (unsigned int __user *)__gui_ptr); \
116 if (__gui_ret == 0) { \
117 if ((__prefix >> 26) == OP_PREFIX) { \
118 __gui_ret = gu_op(__suffix, \
119 (unsigned int __user *)__gui_ptr + 1); \
120 __gui_inst = ppc_inst_prefix(__prefix, \
121 __suffix); \
122 } else { \
123 __gui_inst = ppc_inst(__prefix); \
124 } \
125 if (__gui_ret == 0) \
126 (dest) = __gui_inst; \
127 } \
128 __gui_ret; \
129})
130
131#define get_user_instr(x, ptr) \
132 ___get_user_instr(get_user, x, ptr)
133
134#define __get_user_instr(x, ptr) \
135 ___get_user_instr(__get_user, x, ptr)
136
137#define __get_user_instr_inatomic(x, ptr) \
138 ___get_user_instr(__get_user_inatomic, x, ptr)
139
140#else /* !CONFIG_PPC64 */
141#define get_user_instr(x, ptr) \
142 get_user((x).val, (u32 __user *)(ptr))
143
144#define __get_user_instr(x, ptr) \
145 __get_user_nocheck((x).val, (u32 __user *)(ptr), sizeof(u32), true)
146
147#define __get_user_instr_inatomic(x, ptr) \
148 __get_user_nosleep((x).val, (u32 __user *)(ptr), sizeof(u32))
149
150#endif /* CONFIG_PPC64 */
151
152extern long __put_user_bad(void);
153
154/*
155 * We don't tell gcc that we are accessing memory, but this is OK
156 * because we do not write to any memory gcc knows about, so there
157 * are no aliasing issues.
158 */
159#define __put_user_asm(x, addr, err, op) \
160 __asm__ __volatile__( \
161 "1: " op " %1,0(%2) # put_user\n" \
162 "2:\n" \
163 ".section .fixup,\"ax\"\n" \
164 "3: li %0,%3\n" \
165 " b 2b\n" \
166 ".previous\n" \
167 EX_TABLE(1b, 3b) \
168 : "=r" (err) \
169 : "r" (x), "b" (addr), "i" (-EFAULT), "0" (err))
170
171#ifdef __powerpc64__
172#define __put_user_asm2(x, ptr, retval) \
173 __put_user_asm(x, ptr, retval, "std")
174#else /* __powerpc64__ */
175#define __put_user_asm2(x, addr, err) \
176 __asm__ __volatile__( \
177 "1: stw %1,0(%2)\n" \
178 "2: stw %1+1,4(%2)\n" \
179 "3:\n" \
180 ".section .fixup,\"ax\"\n" \
181 "4: li %0,%3\n" \
182 " b 3b\n" \
183 ".previous\n" \
184 EX_TABLE(1b, 4b) \
185 EX_TABLE(2b, 4b) \
186 : "=r" (err) \
187 : "r" (x), "b" (addr), "i" (-EFAULT), "0" (err))
188#endif /* __powerpc64__ */
189
190#define __put_user_size_allowed(x, ptr, size, retval) \
191do { \
192 retval = 0; \
193 switch (size) { \
194 case 1: __put_user_asm(x, ptr, retval, "stb"); break; \
195 case 2: __put_user_asm(x, ptr, retval, "sth"); break; \
196 case 4: __put_user_asm(x, ptr, retval, "stw"); break; \
197 case 8: __put_user_asm2(x, ptr, retval); break; \
198 default: __put_user_bad(); \
199 } \
200} while (0)
201
202#define __put_user_size(x, ptr, size, retval) \
203do { \
204 allow_write_to_user(ptr, size); \
205 __put_user_size_allowed(x, ptr, size, retval); \
206 prevent_write_to_user(ptr, size); \
207} while (0)
208
209#define __put_user_nocheck(x, ptr, size) \
210({ \
211 long __pu_err; \
212 __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
213 __typeof__(*(ptr)) __pu_val = (x); \
214 __typeof__(size) __pu_size = (size); \
215 \
216 if (!is_kernel_addr((unsigned long)__pu_addr)) \
217 might_fault(); \
218 __chk_user_ptr(__pu_addr); \
219 __put_user_size(__pu_val, __pu_addr, __pu_size, __pu_err); \
220 \
221 __pu_err; \
222})
223
224#define __put_user_check(x, ptr, size) \
225({ \
226 long __pu_err = -EFAULT; \
227 __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
228 __typeof__(*(ptr)) __pu_val = (x); \
229 __typeof__(size) __pu_size = (size); \
230 \
231 might_fault(); \
232 if (access_ok(__pu_addr, __pu_size)) \
233 __put_user_size(__pu_val, __pu_addr, __pu_size, __pu_err); \
234 \
235 __pu_err; \
236})
237
238#define __put_user_nosleep(x, ptr, size) \
239({ \
240 long __pu_err; \
241 __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
242 __typeof__(*(ptr)) __pu_val = (x); \
243 __typeof__(size) __pu_size = (size); \
244 \
245 __chk_user_ptr(__pu_addr); \
246 __put_user_size(__pu_val, __pu_addr, __pu_size, __pu_err); \
247 \
248 __pu_err; \
249})
250
251
252#define __put_user_asm_goto(x, addr, label, op) \
253 asm volatile goto( \
254 "1: " op "%U1%X1 %0,%1 # put_user\n" \
255 EX_TABLE(1b, %l2) \
256 : \
257 : "r" (x), "m" (*addr) \
258 : \
259 : label)
260
261#ifdef __powerpc64__
262#define __put_user_asm2_goto(x, ptr, label) \
263 __put_user_asm_goto(x, ptr, label, "std")
264#else /* __powerpc64__ */
265#define __put_user_asm2_goto(x, addr, label) \
266 asm volatile goto( \
267 "1: stw%X1 %0, %1\n" \
268 "2: stw%X1 %L0, %L1\n" \
269 EX_TABLE(1b, %l2) \
270 EX_TABLE(2b, %l2) \
271 : \
272 : "r" (x), "m" (*addr) \
273 : \
274 : label)
275#endif /* __powerpc64__ */
276
277#define __put_user_size_goto(x, ptr, size, label) \
278do { \
279 switch (size) { \
280 case 1: __put_user_asm_goto(x, ptr, label, "stb"); break; \
281 case 2: __put_user_asm_goto(x, ptr, label, "sth"); break; \
282 case 4: __put_user_asm_goto(x, ptr, label, "stw"); break; \
283 case 8: __put_user_asm2_goto(x, ptr, label); break; \
284 default: __put_user_bad(); \
285 } \
286} while (0)
287
288#define __put_user_nocheck_goto(x, ptr, size, label) \
289do { \
290 __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
291 if (!is_kernel_addr((unsigned long)__pu_addr)) \
292 might_fault(); \
293 __chk_user_ptr(ptr); \
294 __put_user_size_goto((x), __pu_addr, (size), label); \
295} while (0)
296
297
298extern long __get_user_bad(void);
299
300/*
301 * This does an atomic 128 byte aligned load from userspace.
302 * Upto caller to do enable_kernel_vmx() before calling!
303 */
304#define __get_user_atomic_128_aligned(kaddr, uaddr, err) \
305 __asm__ __volatile__( \
306 "1: lvx 0,0,%1 # get user\n" \
307 " stvx 0,0,%2 # put kernel\n" \
308 "2:\n" \
309 ".section .fixup,\"ax\"\n" \
310 "3: li %0,%3\n" \
311 " b 2b\n" \
312 ".previous\n" \
313 EX_TABLE(1b, 3b) \
314 : "=r" (err) \
315 : "b" (uaddr), "b" (kaddr), "i" (-EFAULT), "0" (err))
316
317#define __get_user_asm(x, addr, err, op) \
318 __asm__ __volatile__( \
319 "1: "op" %1,0(%2) # get_user\n" \
320 "2:\n" \
321 ".section .fixup,\"ax\"\n" \
322 "3: li %0,%3\n" \
323 " li %1,0\n" \
324 " b 2b\n" \
325 ".previous\n" \
326 EX_TABLE(1b, 3b) \
327 : "=r" (err), "=r" (x) \
328 : "b" (addr), "i" (-EFAULT), "0" (err))
329
330#ifdef __powerpc64__
331#define __get_user_asm2(x, addr, err) \
332 __get_user_asm(x, addr, err, "ld")
333#else /* __powerpc64__ */
334#define __get_user_asm2(x, addr, err) \
335 __asm__ __volatile__( \
336 "1: lwz %1,0(%2)\n" \
337 "2: lwz %1+1,4(%2)\n" \
338 "3:\n" \
339 ".section .fixup,\"ax\"\n" \
340 "4: li %0,%3\n" \
341 " li %1,0\n" \
342 " li %1+1,0\n" \
343 " b 3b\n" \
344 ".previous\n" \
345 EX_TABLE(1b, 4b) \
346 EX_TABLE(2b, 4b) \
347 : "=r" (err), "=&r" (x) \
348 : "b" (addr), "i" (-EFAULT), "0" (err))
349#endif /* __powerpc64__ */
350
351#define __get_user_size_allowed(x, ptr, size, retval) \
352do { \
353 retval = 0; \
354 __chk_user_ptr(ptr); \
355 if (size > sizeof(x)) \
356 (x) = __get_user_bad(); \
357 switch (size) { \
358 case 1: __get_user_asm(x, ptr, retval, "lbz"); break; \
359 case 2: __get_user_asm(x, ptr, retval, "lhz"); break; \
360 case 4: __get_user_asm(x, ptr, retval, "lwz"); break; \
361 case 8: __get_user_asm2(x, ptr, retval); break; \
362 default: (x) = __get_user_bad(); \
363 } \
364} while (0)
365
366#define __get_user_size(x, ptr, size, retval) \
367do { \
368 allow_read_from_user(ptr, size); \
369 __get_user_size_allowed(x, ptr, size, retval); \
370 prevent_read_from_user(ptr, size); \
371} while (0)
372
373/*
374 * This is a type: either unsigned long, if the argument fits into
375 * that type, or otherwise unsigned long long.
376 */
377#define __long_type(x) \
378 __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
379
380#define __get_user_nocheck(x, ptr, size, do_allow) \
381({ \
382 long __gu_err; \
383 __long_type(*(ptr)) __gu_val; \
384 __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
385 __typeof__(size) __gu_size = (size); \
386 \
387 __chk_user_ptr(__gu_addr); \
388 if (!is_kernel_addr((unsigned long)__gu_addr)) \
389 might_fault(); \
390 barrier_nospec(); \
391 if (do_allow) \
392 __get_user_size(__gu_val, __gu_addr, __gu_size, __gu_err); \
393 else \
394 __get_user_size_allowed(__gu_val, __gu_addr, __gu_size, __gu_err); \
395 (x) = (__typeof__(*(ptr)))__gu_val; \
396 \
397 __gu_err; \
398})
399
400#define __get_user_check(x, ptr, size) \
401({ \
402 long __gu_err = -EFAULT; \
403 __long_type(*(ptr)) __gu_val = 0; \
404 __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
405 __typeof__(size) __gu_size = (size); \
406 \
407 might_fault(); \
408 if (access_ok(__gu_addr, __gu_size)) { \
409 barrier_nospec(); \
410 __get_user_size(__gu_val, __gu_addr, __gu_size, __gu_err); \
411 } \
412 (x) = (__force __typeof__(*(ptr)))__gu_val; \
413 \
414 __gu_err; \
415})
416
417#define __get_user_nosleep(x, ptr, size) \
418({ \
419 long __gu_err; \
420 __long_type(*(ptr)) __gu_val; \
421 __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
422 __typeof__(size) __gu_size = (size); \
423 \
424 __chk_user_ptr(__gu_addr); \
425 barrier_nospec(); \
426 __get_user_size(__gu_val, __gu_addr, __gu_size, __gu_err); \
427 (x) = (__force __typeof__(*(ptr)))__gu_val; \
428 \
429 __gu_err; \
430})
431
432
433/* more complex routines */
434
435extern unsigned long __copy_tofrom_user(void __user *to,
436 const void __user *from, unsigned long size);
437
438#ifdef __powerpc64__
439static inline unsigned long
440raw_copy_in_user(void __user *to, const void __user *from, unsigned long n)
441{
442 unsigned long ret;
443
444 barrier_nospec();
445 allow_read_write_user(to, from, n);
446 ret = __copy_tofrom_user(to, from, n);
447 prevent_read_write_user(to, from, n);
448 return ret;
449}
450#endif /* __powerpc64__ */
451
452static inline unsigned long raw_copy_from_user(void *to,
453 const void __user *from, unsigned long n)
454{
455 unsigned long ret;
456 if (__builtin_constant_p(n) && (n <= 8)) {
457 ret = 1;
458
459 switch (n) {
460 case 1:
461 barrier_nospec();
462 __get_user_size(*(u8 *)to, from, 1, ret);
463 break;
464 case 2:
465 barrier_nospec();
466 __get_user_size(*(u16 *)to, from, 2, ret);
467 break;
468 case 4:
469 barrier_nospec();
470 __get_user_size(*(u32 *)to, from, 4, ret);
471 break;
472 case 8:
473 barrier_nospec();
474 __get_user_size(*(u64 *)to, from, 8, ret);
475 break;
476 }
477 if (ret == 0)
478 return 0;
479 }
480
481 barrier_nospec();
482 allow_read_from_user(from, n);
483 ret = __copy_tofrom_user((__force void __user *)to, from, n);
484 prevent_read_from_user(from, n);
485 return ret;
486}
487
488static inline unsigned long
489raw_copy_to_user_allowed(void __user *to, const void *from, unsigned long n)
490{
491 if (__builtin_constant_p(n) && (n <= 8)) {
492 unsigned long ret = 1;
493
494 switch (n) {
495 case 1:
496 __put_user_size_allowed(*(u8 *)from, (u8 __user *)to, 1, ret);
497 break;
498 case 2:
499 __put_user_size_allowed(*(u16 *)from, (u16 __user *)to, 2, ret);
500 break;
501 case 4:
502 __put_user_size_allowed(*(u32 *)from, (u32 __user *)to, 4, ret);
503 break;
504 case 8:
505 __put_user_size_allowed(*(u64 *)from, (u64 __user *)to, 8, ret);
506 break;
507 }
508 if (ret == 0)
509 return 0;
510 }
511
512 return __copy_tofrom_user(to, (__force const void __user *)from, n);
513}
514
515static inline unsigned long
516raw_copy_to_user(void __user *to, const void *from, unsigned long n)
517{
518 unsigned long ret;
519
520 allow_write_to_user(to, n);
521 ret = raw_copy_to_user_allowed(to, from, n);
522 prevent_write_to_user(to, n);
523 return ret;
524}
525
526static __always_inline unsigned long __must_check
527copy_to_user_mcsafe(void __user *to, const void *from, unsigned long n)
528{
529 if (likely(check_copy_size(from, n, true))) {
530 if (access_ok(to, n)) {
531 allow_write_to_user(to, n);
532 n = memcpy_mcsafe((void *)to, from, n);
533 prevent_write_to_user(to, n);
534 }
535 }
536
537 return n;
538}
539
540unsigned long __arch_clear_user(void __user *addr, unsigned long size);
541
542static inline unsigned long clear_user(void __user *addr, unsigned long size)
543{
544 unsigned long ret = size;
545 might_fault();
546 if (likely(access_ok(addr, size))) {
547 allow_write_to_user(addr, size);
548 ret = __arch_clear_user(addr, size);
549 prevent_write_to_user(addr, size);
550 }
551 return ret;
552}
553
554static inline unsigned long __clear_user(void __user *addr, unsigned long size)
555{
556 return clear_user(addr, size);
557}
558
559extern long strncpy_from_user(char *dst, const char __user *src, long count);
560extern __must_check long strnlen_user(const char __user *str, long n);
561
562extern long __copy_from_user_flushcache(void *dst, const void __user *src,
563 unsigned size);
564extern void memcpy_page_flushcache(char *to, struct page *page, size_t offset,
565 size_t len);
566
567static __must_check inline bool user_access_begin(const void __user *ptr, size_t len)
568{
569 if (unlikely(!access_ok(ptr, len)))
570 return false;
571 allow_read_write_user((void __user *)ptr, ptr, len);
572 return true;
573}
574#define user_access_begin user_access_begin
575#define user_access_end prevent_current_access_user
576#define user_access_save prevent_user_access_return
577#define user_access_restore restore_user_access
578
579static __must_check inline bool
580user_read_access_begin(const void __user *ptr, size_t len)
581{
582 if (unlikely(!access_ok(ptr, len)))
583 return false;
584 allow_read_from_user(ptr, len);
585 return true;
586}
587#define user_read_access_begin user_read_access_begin
588#define user_read_access_end prevent_current_read_from_user
589
590static __must_check inline bool
591user_write_access_begin(const void __user *ptr, size_t len)
592{
593 if (unlikely(!access_ok(ptr, len)))
594 return false;
595 allow_write_to_user((void __user *)ptr, len);
596 return true;
597}
598#define user_write_access_begin user_write_access_begin
599#define user_write_access_end prevent_current_write_to_user
600
601#define unsafe_op_wrap(op, err) do { if (unlikely(op)) goto err; } while (0)
602#define unsafe_get_user(x, p, e) unsafe_op_wrap(__get_user_allowed(x, p), e)
603#define unsafe_put_user(x, p, e) __put_user_goto(x, p, e)
604
605#define unsafe_copy_to_user(d, s, l, e) \
606do { \
607 u8 __user *_dst = (u8 __user *)(d); \
608 const u8 *_src = (const u8 *)(s); \
609 size_t _len = (l); \
610 int _i; \
611 \
612 for (_i = 0; _i < (_len & ~(sizeof(long) - 1)); _i += sizeof(long)) \
613 __put_user_goto(*(long*)(_src + _i), (long __user *)(_dst + _i), e);\
614 if (IS_ENABLED(CONFIG_PPC64) && (_len & 4)) { \
615 __put_user_goto(*(u32*)(_src + _i), (u32 __user *)(_dst + _i), e); \
616 _i += 4; \
617 } \
618 if (_len & 2) { \
619 __put_user_goto(*(u16*)(_src + _i), (u16 __user *)(_dst + _i), e); \
620 _i += 2; \
621 } \
622 if (_len & 1) \
623 __put_user_goto(*(u8*)(_src + _i), (u8 __user *)(_dst + _i), e);\
624} while (0)
625
626#endif /* _ARCH_POWERPC_UACCESS_H */