Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

ARM: 7527/1: uaccess: explicitly check __user pointer when !CPU_USE_DOMAINS

The {get,put}_user macros don't perform range checking on the provided
__user address when !CPU_HAS_DOMAINS.

This patch reworks the out-of-line assembly accessors to check the user
address against a specified limit, returning -EFAULT if is is out of
range.

[will: changed get_user register allocation to match put_user]
[rmk: fixed building on older ARM architectures]

Reported-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
Cc: stable@vger.kernel.org
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>

+56 -21
+8
arch/arm/include/asm/assembler.h
··· 320 320 .size \name , . - \name 321 321 .endm 322 322 323 + .macro check_uaccess, addr:req, size:req, limit:req, tmp:req, bad:req 324 + #ifndef CONFIG_CPU_USE_DOMAINS 325 + adds \tmp, \addr, #\size - 1 326 + sbcccs \tmp, \tmp, \limit 327 + bcs \bad 328 + #endif 329 + .endm 330 + 323 331 #endif /* __ASM_ASSEMBLER_H__ */
+27 -13
arch/arm/include/asm/uaccess.h
··· 101 101 extern int __get_user_2(void *); 102 102 extern int __get_user_4(void *); 103 103 104 - #define __get_user_x(__r2,__p,__e,__s,__i...) \ 104 + #define __GUP_CLOBBER_1 "lr", "cc" 105 + #ifdef CONFIG_CPU_USE_DOMAINS 106 + #define __GUP_CLOBBER_2 "ip", "lr", "cc" 107 + #else 108 + #define __GUP_CLOBBER_2 "lr", "cc" 109 + #endif 110 + #define __GUP_CLOBBER_4 "lr", "cc" 111 + 112 + #define __get_user_x(__r2,__p,__e,__l,__s) \ 105 113 __asm__ __volatile__ ( \ 106 114 __asmeq("%0", "r0") __asmeq("%1", "r2") \ 115 + __asmeq("%3", "r1") \ 107 116 "bl __get_user_" #__s \ 108 117 : "=&r" (__e), "=r" (__r2) \ 109 - : "0" (__p) \ 110 - : __i, "cc") 118 + : "0" (__p), "r" (__l) \ 119 + : __GUP_CLOBBER_##__s) 111 120 112 121 #define get_user(x,p) \ 113 122 ({ \ 123 + unsigned long __limit = current_thread_info()->addr_limit - 1; \ 114 124 register const typeof(*(p)) __user *__p asm("r0") = (p);\ 115 125 register unsigned long __r2 asm("r2"); \ 126 + register unsigned long __l asm("r1") = __limit; \ 116 127 register int __e asm("r0"); \ 117 128 switch (sizeof(*(__p))) { \ 118 129 case 1: \ 119 - __get_user_x(__r2, __p, __e, 1, "lr"); \ 120 - break; \ 130 + __get_user_x(__r2, __p, __e, __l, 1); \ 131 + break; \ 121 132 case 2: \ 122 - __get_user_x(__r2, __p, __e, 2, "r3", "lr"); \ 133 + __get_user_x(__r2, __p, __e, __l, 2); \ 123 134 break; \ 124 135 case 4: \ 125 - __get_user_x(__r2, __p, __e, 4, "lr"); \ 136 + __get_user_x(__r2, __p, __e, __l, 4); \ 126 137 break; \ 127 138 default: __e = __get_user_bad(); break; \ 128 139 } \ ··· 146 135 extern int __put_user_4(void *, unsigned int); 147 136 extern int __put_user_8(void *, unsigned long long); 148 137 149 - #define __put_user_x(__r2,__p,__e,__s) \ 138 + #define __put_user_x(__r2,__p,__e,__l,__s) \ 150 139 __asm__ __volatile__ ( \ 151 140 __asmeq("%0", "r0") __asmeq("%2", "r2") \ 141 + __asmeq("%3", "r1") \ 152 142 "bl __put_user_" #__s \ 153 143 : "=&r" (__e) \ 154 - : "0" (__p), "r" (__r2) \ 144 + : "0" (__p), "r" (__r2), "r" (__l) \ 155 145 : "ip", "lr", "cc") 156 146 157 147 #define put_user(x,p) \ 158 148 ({ \ 149 + unsigned long __limit = current_thread_info()->addr_limit - 1; \ 159 150 register const typeof(*(p)) __r2 asm("r2") = (x); \ 160 151 register const typeof(*(p)) __user *__p asm("r0") = (p);\ 152 + register unsigned long __l asm("r1") = __limit; \ 161 153 register int __e asm("r0"); \ 162 154 switch (sizeof(*(__p))) { \ 163 155 case 1: \ 164 - __put_user_x(__r2, __p, __e, 1); \ 156 + __put_user_x(__r2, __p, __e, __l, 1); \ 165 157 break; \ 166 158 case 2: \ 167 - __put_user_x(__r2, __p, __e, 2); \ 159 + __put_user_x(__r2, __p, __e, __l, 2); \ 168 160 break; \ 169 161 case 4: \ 170 - __put_user_x(__r2, __p, __e, 4); \ 162 + __put_user_x(__r2, __p, __e, __l, 4); \ 171 163 break; \ 172 164 case 8: \ 173 - __put_user_x(__r2, __p, __e, 8); \ 165 + __put_user_x(__r2, __p, __e, __l, 8); \ 174 166 break; \ 175 167 default: __e = __put_user_bad(); break; \ 176 168 } \
+15 -8
arch/arm/lib/getuser.S
··· 16 16 * __get_user_X 17 17 * 18 18 * Inputs: r0 contains the address 19 + * r1 contains the address limit, which must be preserved 19 20 * Outputs: r0 is the error code 20 - * r2, r3 contains the zero-extended value 21 + * r2 contains the zero-extended value 21 22 * lr corrupted 22 23 * 23 24 * No other registers must be altered. (see <asm/uaccess.h> ··· 28 27 * Note also that it is intended that __get_user_bad is not global. 29 28 */ 30 29 #include <linux/linkage.h> 30 + #include <asm/assembler.h> 31 31 #include <asm/errno.h> 32 32 #include <asm/domain.h> 33 33 34 34 ENTRY(__get_user_1) 35 + check_uaccess r0, 1, r1, r2, __get_user_bad 35 36 1: TUSER(ldrb) r2, [r0] 36 37 mov r0, #0 37 38 mov pc, lr 38 39 ENDPROC(__get_user_1) 39 40 40 41 ENTRY(__get_user_2) 41 - #ifdef CONFIG_THUMB2_KERNEL 42 - 2: TUSER(ldrb) r2, [r0] 43 - 3: TUSER(ldrb) r3, [r0, #1] 42 + check_uaccess r0, 2, r1, r2, __get_user_bad 43 + #ifdef CONFIG_CPU_USE_DOMAINS 44 + rb .req ip 45 + 2: ldrbt r2, [r0], #1 46 + 3: ldrbt rb, [r0], #0 44 47 #else 45 - 2: TUSER(ldrb) r2, [r0], #1 46 - 3: TUSER(ldrb) r3, [r0] 48 + rb .req r0 49 + 2: ldrb r2, [r0] 50 + 3: ldrb rb, [r0, #1] 47 51 #endif 48 52 #ifndef __ARMEB__ 49 - orr r2, r2, r3, lsl #8 53 + orr r2, r2, rb, lsl #8 50 54 #else 51 - orr r2, r3, r2, lsl #8 55 + orr r2, rb, r2, lsl #8 52 56 #endif 53 57 mov r0, #0 54 58 mov pc, lr 55 59 ENDPROC(__get_user_2) 56 60 57 61 ENTRY(__get_user_4) 62 + check_uaccess r0, 4, r1, r2, __get_user_bad 58 63 4: TUSER(ldr) r2, [r0] 59 64 mov r0, #0 60 65 mov pc, lr
+6
arch/arm/lib/putuser.S
··· 16 16 * __put_user_X 17 17 * 18 18 * Inputs: r0 contains the address 19 + * r1 contains the address limit, which must be preserved 19 20 * r2, r3 contains the value 20 21 * Outputs: r0 is the error code 21 22 * lr corrupted ··· 28 27 * Note also that it is intended that __put_user_bad is not global. 29 28 */ 30 29 #include <linux/linkage.h> 30 + #include <asm/assembler.h> 31 31 #include <asm/errno.h> 32 32 #include <asm/domain.h> 33 33 34 34 ENTRY(__put_user_1) 35 + check_uaccess r0, 1, r1, ip, __put_user_bad 35 36 1: TUSER(strb) r2, [r0] 36 37 mov r0, #0 37 38 mov pc, lr 38 39 ENDPROC(__put_user_1) 39 40 40 41 ENTRY(__put_user_2) 42 + check_uaccess r0, 2, r1, ip, __put_user_bad 41 43 mov ip, r2, lsr #8 42 44 #ifdef CONFIG_THUMB2_KERNEL 43 45 #ifndef __ARMEB__ ··· 64 60 ENDPROC(__put_user_2) 65 61 66 62 ENTRY(__put_user_4) 63 + check_uaccess r0, 4, r1, ip, __put_user_bad 67 64 4: TUSER(str) r2, [r0] 68 65 mov r0, #0 69 66 mov pc, lr 70 67 ENDPROC(__put_user_4) 71 68 72 69 ENTRY(__put_user_8) 70 + check_uaccess r0, 8, r1, ip, __put_user_bad 73 71 #ifdef CONFIG_THUMB2_KERNEL 74 72 5: TUSER(str) r2, [r0] 75 73 6: TUSER(str) r3, [r0, #4]