Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

ARM: 8091/2: add get_user() support for 8 byte types

Recent contributions, including to DRM and binder, introduce 64-bit
values in their interfaces. A common motivation for this is to allow
the same ABI for 32- and 64-bit userspaces (and therefore also a shared
ABI for 32/64 hybrid userspaces). Anyhow, the developers would like to
avoid gotchas like having to use copy_from_user().

This feature is already implemented on x86-32 and the majority of other
32-bit architectures. The current list of get_user_8 hold out
architectures are: arm, avr32, blackfin, m32r, metag, microblaze,
mn10300, sh.

Credit:

My name sits rather uneasily at the top of this patch. The v1 and
v2 versions of the patch were written by Rob Clark and to produce v4
I mostly copied code from Russell King and H. Peter Anvin. However I
have mangled the patch sufficiently that *blame* is rightfully mine
even if credit should more widely shared.

Changelog:

v5: updated to use the ret macro (requested by Russell King)
v4: remove an inlined add on big endian systems (spotted by Russell King),
used __ARMEB__ rather than BIG_ENDIAN (to match rest of file),
cleared r3 on EFAULT during __get_user_8.
v3: fix a couple of checkpatch issues
v2: pass correct size to check_uaccess, and better handling of narrowing
double word read with __get_user_xb() (Russell King's suggestion)
v1: original

Signed-off-by: Rob Clark <robdclark@gmail.com>
Signed-off-by: Daniel Thompson <daniel.thompson@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>

authored by

Daniel Thompson and committed by
Russell King
e38361d0 bc994c77

+55 -2
+19 -1
arch/arm/include/asm/uaccess.h
··· 107 107 extern int __get_user_1(void *); 108 108 extern int __get_user_2(void *); 109 109 extern int __get_user_4(void *); 110 + extern int __get_user_lo8(void *); 111 + extern int __get_user_8(void *); 110 112 111 113 #define __GUP_CLOBBER_1 "lr", "cc" 112 114 #ifdef CONFIG_CPU_USE_DOMAINS ··· 117 115 #define __GUP_CLOBBER_2 "lr", "cc" 118 116 #endif 119 117 #define __GUP_CLOBBER_4 "lr", "cc" 118 + #define __GUP_CLOBBER_lo8 "lr", "cc" 119 + #define __GUP_CLOBBER_8 "lr", "cc" 120 120 121 121 #define __get_user_x(__r2,__p,__e,__l,__s) \ 122 122 __asm__ __volatile__ ( \ ··· 129 125 : "0" (__p), "r" (__l) \ 130 126 : __GUP_CLOBBER_##__s) 131 127 128 + /* narrowing a double-word get into a single 32bit word register: */ 129 + #ifdef __ARMEB__ 130 + #define __get_user_xb(__r2, __p, __e, __l, __s) \ 131 + __get_user_x(__r2, __p, __e, __l, lo8) 132 + #else 133 + #define __get_user_xb __get_user_x 134 + #endif 135 + 132 136 #define __get_user_check(x,p) \ 133 137 ({ \ 134 138 unsigned long __limit = current_thread_info()->addr_limit - 1; \ 135 139 register const typeof(*(p)) __user *__p asm("r0") = (p);\ 136 - register unsigned long __r2 asm("r2"); \ 140 + register typeof(x) __r2 asm("r2"); \ 137 141 register unsigned long __l asm("r1") = __limit; \ 138 142 register int __e asm("r0"); \ 139 143 switch (sizeof(*(__p))) { \ ··· 153 141 break; \ 154 142 case 4: \ 155 143 __get_user_x(__r2, __p, __e, __l, 4); \ 144 + break; \ 145 + case 8: \ 146 + if (sizeof((x)) < 8) \ 147 + __get_user_xb(__r2, __p, __e, __l, 4); \ 148 + else \ 149 + __get_user_x(__r2, __p, __e, __l, 8); \ 156 150 break; \ 157 151 default: __e = __get_user_bad(); break; \ 158 152 } \
+36 -1
arch/arm/lib/getuser.S
··· 18 18 * Inputs: r0 contains the address 19 19 * r1 contains the address limit, which must be preserved 20 20 * Outputs: r0 is the error code 21 - * r2 contains the zero-extended value 21 + * r2, r3 contains the zero-extended value 22 22 * lr corrupted 23 23 * 24 24 * No other registers must be altered. (see <asm/uaccess.h> ··· 66 66 ret lr 67 67 ENDPROC(__get_user_4) 68 68 69 + ENTRY(__get_user_8) 70 + check_uaccess r0, 8, r1, r2, __get_user_bad 71 + #ifdef CONFIG_THUMB2_KERNEL 72 + 5: TUSER(ldr) r2, [r0] 73 + 6: TUSER(ldr) r3, [r0, #4] 74 + #else 75 + 5: TUSER(ldr) r2, [r0], #4 76 + 6: TUSER(ldr) r3, [r0] 77 + #endif 78 + mov r0, #0 79 + ret lr 80 + ENDPROC(__get_user_8) 81 + 82 + #ifdef __ARMEB__ 83 + ENTRY(__get_user_lo8) 84 + check_uaccess r0, 8, r1, r2, __get_user_bad 85 + #ifdef CONFIG_CPU_USE_DOMAINS 86 + add r0, r0, #4 87 + 7: ldrt r2, [r0] 88 + #else 89 + 7: ldr r2, [r0, #4] 90 + #endif 91 + mov r0, #0 92 + ret lr 93 + ENDPROC(__get_user_lo8) 94 + #endif 95 + 96 + __get_user_bad8: 97 + mov r3, #0 69 98 __get_user_bad: 70 99 mov r2, #0 71 100 mov r0, #-EFAULT 72 101 ret lr 73 102 ENDPROC(__get_user_bad) 103 + ENDPROC(__get_user_bad8) 74 104 75 105 .pushsection __ex_table, "a" 76 106 .long 1b, __get_user_bad 77 107 .long 2b, __get_user_bad 78 108 .long 3b, __get_user_bad 79 109 .long 4b, __get_user_bad 110 + .long 5b, __get_user_bad8 111 + .long 6b, __get_user_bad8 112 + #ifdef __ARMEB__ 113 + .long 7b, __get_user_bad 114 + #endif 80 115 .popsection