Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'asm-generic-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/arnd/asm-generic

Pull asm-generic uaccess.h cleanup from Arnd Bergmann:
"Like in 3.19, I once more have a multi-stage cleanup for one
asm-generic header file, this time the work was done by Michael
Tsirkin and cleans up the uaccess.h file in asm-generic, as well as
all architectures for which the respective maintainers did not pick up
his patches directly"

* tag 'asm-generic-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/arnd/asm-generic: (37 commits)
sparc32: nocheck uaccess coding style tweaks
sparc64: nocheck uaccess coding style tweaks
xtensa: macro whitespace fixes
sh: macro whitespace fixes
parisc: macro whitespace fixes
m68k: macro whitespace fixes
m32r: macro whitespace fixes
frv: macro whitespace fixes
cris: macro whitespace fixes
avr32: macro whitespace fixes
arm64: macro whitespace fixes
arm: macro whitespace fixes
alpha: macro whitespace fixes
blackfin: macro whitespace fixes
sparc64: uaccess_64 macro whitespace fixes
sparc32: uaccess_32 macro whitespace fixes
avr32: whitespace fix
sh: fix put_user sparse errors
metag: fix put_user sparse errors
ia64: fix put_user sparse errors
...

+570 -487
+43 -43
arch/alpha/include/asm/uaccess.h
··· 27 27 #define get_ds() (KERNEL_DS) 28 28 #define set_fs(x) (current_thread_info()->addr_limit = (x)) 29 29 30 - #define segment_eq(a,b) ((a).seg == (b).seg) 30 + #define segment_eq(a, b) ((a).seg == (b).seg) 31 31 32 32 /* 33 33 * Is a address valid? This does a straightforward calculation rather ··· 39 39 * - AND "addr+size" doesn't have any high-bits set 40 40 * - OR we are in kernel mode. 41 41 */ 42 - #define __access_ok(addr,size,segment) \ 42 + #define __access_ok(addr, size, segment) \ 43 43 (((segment).seg & (addr | size | (addr+size))) == 0) 44 44 45 - #define access_ok(type,addr,size) \ 45 + #define access_ok(type, addr, size) \ 46 46 ({ \ 47 47 __chk_user_ptr(addr); \ 48 - __access_ok(((unsigned long)(addr)),(size),get_fs()); \ 48 + __access_ok(((unsigned long)(addr)), (size), get_fs()); \ 49 49 }) 50 50 51 51 /* ··· 60 60 * (a) re-use the arguments for side effects (sizeof/typeof is ok) 61 61 * (b) require any knowledge of processes at this stage 62 62 */ 63 - #define put_user(x,ptr) \ 64 - __put_user_check((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)),get_fs()) 65 - #define get_user(x,ptr) \ 66 - __get_user_check((x),(ptr),sizeof(*(ptr)),get_fs()) 63 + #define put_user(x, ptr) \ 64 + __put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)), get_fs()) 65 + #define get_user(x, ptr) \ 66 + __get_user_check((x), (ptr), sizeof(*(ptr)), get_fs()) 67 67 68 68 /* 69 69 * The "__xxx" versions do not do address space checking, useful when 70 70 * doing multiple accesses to the same area (the programmer has to do the 71 71 * checks by hand with "access_ok()") 72 72 */ 73 - #define __put_user(x,ptr) \ 74 - __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr))) 75 - #define __get_user(x,ptr) \ 76 - __get_user_nocheck((x),(ptr),sizeof(*(ptr))) 73 + #define __put_user(x, ptr) \ 74 + __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) 75 + #define __get_user(x, ptr) \ 76 + __get_user_nocheck((x), (ptr), sizeof(*(ptr))) 77 77 78 78 /* 79 79 * The "lda %1, 2b-1b(%0)" bits are magic to get the assembler to ··· 84 84 85 85 extern void __get_user_unknown(void); 86 86 87 - #define __get_user_nocheck(x,ptr,size) \ 87 + #define __get_user_nocheck(x, ptr, size) \ 88 88 ({ \ 89 89 long __gu_err = 0; \ 90 90 unsigned long __gu_val; \ ··· 96 96 case 8: __get_user_64(ptr); break; \ 97 97 default: __get_user_unknown(); break; \ 98 98 } \ 99 - (x) = (__typeof__(*(ptr))) __gu_val; \ 99 + (x) = (__force __typeof__(*(ptr))) __gu_val; \ 100 100 __gu_err; \ 101 101 }) 102 102 103 - #define __get_user_check(x,ptr,size,segment) \ 103 + #define __get_user_check(x, ptr, size, segment) \ 104 104 ({ \ 105 105 long __gu_err = -EFAULT; \ 106 106 unsigned long __gu_val = 0; \ 107 107 const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ 108 - if (__access_ok((unsigned long)__gu_addr,size,segment)) { \ 108 + if (__access_ok((unsigned long)__gu_addr, size, segment)) { \ 109 109 __gu_err = 0; \ 110 110 switch (size) { \ 111 111 case 1: __get_user_8(__gu_addr); break; \ ··· 115 115 default: __get_user_unknown(); break; \ 116 116 } \ 117 117 } \ 118 - (x) = (__typeof__(*(ptr))) __gu_val; \ 118 + (x) = (__force __typeof__(*(ptr))) __gu_val; \ 119 119 __gu_err; \ 120 120 }) 121 121 ··· 201 201 202 202 extern void __put_user_unknown(void); 203 203 204 - #define __put_user_nocheck(x,ptr,size) \ 204 + #define __put_user_nocheck(x, ptr, size) \ 205 205 ({ \ 206 206 long __pu_err = 0; \ 207 207 __chk_user_ptr(ptr); \ 208 208 switch (size) { \ 209 - case 1: __put_user_8(x,ptr); break; \ 210 - case 2: __put_user_16(x,ptr); break; \ 211 - case 4: __put_user_32(x,ptr); break; \ 212 - case 8: __put_user_64(x,ptr); break; \ 209 + case 1: __put_user_8(x, ptr); break; \ 210 + case 2: __put_user_16(x, ptr); break; \ 211 + case 4: __put_user_32(x, ptr); break; \ 212 + case 8: __put_user_64(x, ptr); break; \ 213 213 default: __put_user_unknown(); break; \ 214 214 } \ 215 215 __pu_err; \ 216 216 }) 217 217 218 - #define __put_user_check(x,ptr,size,segment) \ 218 + #define __put_user_check(x, ptr, size, segment) \ 219 219 ({ \ 220 220 long __pu_err = -EFAULT; \ 221 221 __typeof__(*(ptr)) __user *__pu_addr = (ptr); \ 222 - if (__access_ok((unsigned long)__pu_addr,size,segment)) { \ 222 + if (__access_ok((unsigned long)__pu_addr, size, segment)) { \ 223 223 __pu_err = 0; \ 224 224 switch (size) { \ 225 - case 1: __put_user_8(x,__pu_addr); break; \ 226 - case 2: __put_user_16(x,__pu_addr); break; \ 227 - case 4: __put_user_32(x,__pu_addr); break; \ 228 - case 8: __put_user_64(x,__pu_addr); break; \ 225 + case 1: __put_user_8(x, __pu_addr); break; \ 226 + case 2: __put_user_16(x, __pu_addr); break; \ 227 + case 4: __put_user_32(x, __pu_addr); break; \ 228 + case 8: __put_user_64(x, __pu_addr); break; \ 229 229 default: __put_user_unknown(); break; \ 230 230 } \ 231 231 } \ ··· 237 237 * instead of writing: this is because they do not write to 238 238 * any memory gcc knows about, so there are no aliasing issues 239 239 */ 240 - #define __put_user_64(x,addr) \ 240 + #define __put_user_64(x, addr) \ 241 241 __asm__ __volatile__("1: stq %r2,%1\n" \ 242 242 "2:\n" \ 243 243 ".section __ex_table,\"a\"\n" \ ··· 247 247 : "=r"(__pu_err) \ 248 248 : "m" (__m(addr)), "rJ" (x), "0"(__pu_err)) 249 249 250 - #define __put_user_32(x,addr) \ 250 + #define __put_user_32(x, addr) \ 251 251 __asm__ __volatile__("1: stl %r2,%1\n" \ 252 252 "2:\n" \ 253 253 ".section __ex_table,\"a\"\n" \ ··· 260 260 #ifdef __alpha_bwx__ 261 261 /* Those lucky bastards with ev56 and later CPUs can do byte/word moves. */ 262 262 263 - #define __put_user_16(x,addr) \ 263 + #define __put_user_16(x, addr) \ 264 264 __asm__ __volatile__("1: stw %r2,%1\n" \ 265 265 "2:\n" \ 266 266 ".section __ex_table,\"a\"\n" \ ··· 270 270 : "=r"(__pu_err) \ 271 271 : "m"(__m(addr)), "rJ"(x), "0"(__pu_err)) 272 272 273 - #define __put_user_8(x,addr) \ 273 + #define __put_user_8(x, addr) \ 274 274 __asm__ __volatile__("1: stb %r2,%1\n" \ 275 275 "2:\n" \ 276 276 ".section __ex_table,\"a\"\n" \ ··· 283 283 /* Unfortunately, we can't get an unaligned access trap for the sub-word 284 284 write, so we have to do a general unaligned operation. */ 285 285 286 - #define __put_user_16(x,addr) \ 286 + #define __put_user_16(x, addr) \ 287 287 { \ 288 288 long __pu_tmp1, __pu_tmp2, __pu_tmp3, __pu_tmp4; \ 289 289 __asm__ __volatile__( \ ··· 308 308 " .long 4b - .\n" \ 309 309 " lda $31, 5b-4b(%0)\n" \ 310 310 ".previous" \ 311 - : "=r"(__pu_err), "=&r"(__pu_tmp1), \ 312 - "=&r"(__pu_tmp2), "=&r"(__pu_tmp3), \ 311 + : "=r"(__pu_err), "=&r"(__pu_tmp1), \ 312 + "=&r"(__pu_tmp2), "=&r"(__pu_tmp3), \ 313 313 "=&r"(__pu_tmp4) \ 314 314 : "r"(addr), "r"((unsigned long)(x)), "0"(__pu_err)); \ 315 315 } 316 316 317 - #define __put_user_8(x,addr) \ 317 + #define __put_user_8(x, addr) \ 318 318 { \ 319 319 long __pu_tmp1, __pu_tmp2; \ 320 320 __asm__ __volatile__( \ ··· 330 330 " .long 2b - .\n" \ 331 331 " lda $31, 3b-2b(%0)\n" \ 332 332 ".previous" \ 333 - : "=r"(__pu_err), \ 333 + : "=r"(__pu_err), \ 334 334 "=&r"(__pu_tmp1), "=&r"(__pu_tmp2) \ 335 335 : "r"((unsigned long)(x)), "r"(addr), "0"(__pu_err)); \ 336 336 } ··· 366 366 : "=r" (__cu_len), "=r" (__cu_from), "=r" (__cu_to) 367 367 : __module_address(__copy_user) 368 368 "0" (__cu_len), "1" (__cu_from), "2" (__cu_to) 369 - : "$1","$2","$3","$4","$5","$28","memory"); 369 + : "$1", "$2", "$3", "$4", "$5", "$28", "memory"); 370 370 371 371 return __cu_len; 372 372 } ··· 379 379 return len; 380 380 } 381 381 382 - #define __copy_to_user(to,from,n) \ 382 + #define __copy_to_user(to, from, n) \ 383 383 ({ \ 384 384 __chk_user_ptr(to); \ 385 - __copy_tofrom_user_nocheck((__force void *)(to),(from),(n)); \ 385 + __copy_tofrom_user_nocheck((__force void *)(to), (from), (n)); \ 386 386 }) 387 - #define __copy_from_user(to,from,n) \ 387 + #define __copy_from_user(to, from, n) \ 388 388 ({ \ 389 389 __chk_user_ptr(from); \ 390 - __copy_tofrom_user_nocheck((to),(__force void *)(from),(n)); \ 390 + __copy_tofrom_user_nocheck((to), (__force void *)(from), (n)); \ 391 391 }) 392 392 393 393 #define __copy_to_user_inatomic __copy_to_user ··· 418 418 : "=r"(__cl_len), "=r"(__cl_to) 419 419 : __module_address(__do_clear_user) 420 420 "0"(__cl_len), "1"(__cl_to) 421 - : "$1","$2","$3","$4","$5","$28","memory"); 421 + : "$1", "$2", "$3", "$4", "$5", "$28", "memory"); 422 422 return __cl_len; 423 423 } 424 424
+48 -48
arch/arm/include/asm/uaccess.h
··· 73 73 modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER); 74 74 } 75 75 76 - #define segment_eq(a,b) ((a) == (b)) 76 + #define segment_eq(a, b) ((a) == (b)) 77 77 78 78 #define __addr_ok(addr) ({ \ 79 79 unsigned long flag; \ ··· 84 84 (flag == 0); }) 85 85 86 86 /* We use 33-bit arithmetic here... */ 87 - #define __range_ok(addr,size) ({ \ 87 + #define __range_ok(addr, size) ({ \ 88 88 unsigned long flag, roksum; \ 89 89 __chk_user_ptr(addr); \ 90 90 __asm__("adds %1, %2, %3; sbcccs %1, %1, %0; movcc %0, #0" \ ··· 123 123 #define __GUP_CLOBBER_32t_8 "lr", "cc" 124 124 #define __GUP_CLOBBER_8 "lr", "cc" 125 125 126 - #define __get_user_x(__r2,__p,__e,__l,__s) \ 126 + #define __get_user_x(__r2, __p, __e, __l, __s) \ 127 127 __asm__ __volatile__ ( \ 128 128 __asmeq("%0", "r0") __asmeq("%1", "r2") \ 129 129 __asmeq("%3", "r1") \ ··· 134 134 135 135 /* narrowing a double-word get into a single 32bit word register: */ 136 136 #ifdef __ARMEB__ 137 - #define __get_user_x_32t(__r2, __p, __e, __l, __s) \ 137 + #define __get_user_x_32t(__r2, __p, __e, __l, __s) \ 138 138 __get_user_x(__r2, __p, __e, __l, 32t_8) 139 139 #else 140 140 #define __get_user_x_32t __get_user_x ··· 158 158 #endif 159 159 160 160 161 - #define __get_user_check(x,p) \ 161 + #define __get_user_check(x, p) \ 162 162 ({ \ 163 163 unsigned long __limit = current_thread_info()->addr_limit - 1; \ 164 164 register const typeof(*(p)) __user *__p asm("r0") = (p);\ ··· 196 196 __e; \ 197 197 }) 198 198 199 - #define get_user(x,p) \ 199 + #define get_user(x, p) \ 200 200 ({ \ 201 201 might_fault(); \ 202 - __get_user_check(x,p); \ 202 + __get_user_check(x, p); \ 203 203 }) 204 204 205 205 extern int __put_user_1(void *, unsigned int); ··· 207 207 extern int __put_user_4(void *, unsigned int); 208 208 extern int __put_user_8(void *, unsigned long long); 209 209 210 - #define __put_user_x(__r2,__p,__e,__l,__s) \ 210 + #define __put_user_x(__r2, __p, __e, __l, __s) \ 211 211 __asm__ __volatile__ ( \ 212 212 __asmeq("%0", "r0") __asmeq("%2", "r2") \ 213 213 __asmeq("%3", "r1") \ ··· 216 216 : "0" (__p), "r" (__r2), "r" (__l) \ 217 217 : "ip", "lr", "cc") 218 218 219 - #define __put_user_check(x,p) \ 219 + #define __put_user_check(x, p) \ 220 220 ({ \ 221 221 unsigned long __limit = current_thread_info()->addr_limit - 1; \ 222 222 const typeof(*(p)) __user *__tmp_p = (p); \ ··· 242 242 __e; \ 243 243 }) 244 244 245 - #define put_user(x,p) \ 245 + #define put_user(x, p) \ 246 246 ({ \ 247 247 might_fault(); \ 248 - __put_user_check(x,p); \ 248 + __put_user_check(x, p); \ 249 249 }) 250 250 251 251 #else /* CONFIG_MMU */ ··· 255 255 */ 256 256 #define USER_DS KERNEL_DS 257 257 258 - #define segment_eq(a,b) (1) 259 - #define __addr_ok(addr) ((void)(addr),1) 260 - #define __range_ok(addr,size) ((void)(addr),0) 258 + #define segment_eq(a, b) (1) 259 + #define __addr_ok(addr) ((void)(addr), 1) 260 + #define __range_ok(addr, size) ((void)(addr), 0) 261 261 #define get_fs() (KERNEL_DS) 262 262 263 263 static inline void set_fs(mm_segment_t fs) 264 264 { 265 265 } 266 266 267 - #define get_user(x,p) __get_user(x,p) 268 - #define put_user(x,p) __put_user(x,p) 267 + #define get_user(x, p) __get_user(x, p) 268 + #define put_user(x, p) __put_user(x, p) 269 269 270 270 #endif /* CONFIG_MMU */ 271 271 272 - #define access_ok(type,addr,size) (__range_ok(addr,size) == 0) 272 + #define access_ok(type, addr, size) (__range_ok(addr, size) == 0) 273 273 274 274 #define user_addr_max() \ 275 275 (segment_eq(get_fs(), KERNEL_DS) ? ~0UL : get_fs()) ··· 283 283 * error occurs, and leave it unchanged on success. Note that these 284 284 * versions are void (ie, don't return a value as such). 285 285 */ 286 - #define __get_user(x,ptr) \ 286 + #define __get_user(x, ptr) \ 287 287 ({ \ 288 288 long __gu_err = 0; \ 289 - __get_user_err((x),(ptr),__gu_err); \ 289 + __get_user_err((x), (ptr), __gu_err); \ 290 290 __gu_err; \ 291 291 }) 292 292 293 - #define __get_user_error(x,ptr,err) \ 293 + #define __get_user_error(x, ptr, err) \ 294 294 ({ \ 295 - __get_user_err((x),(ptr),err); \ 295 + __get_user_err((x), (ptr), err); \ 296 296 (void) 0; \ 297 297 }) 298 298 299 - #define __get_user_err(x,ptr,err) \ 299 + #define __get_user_err(x, ptr, err) \ 300 300 do { \ 301 301 unsigned long __gu_addr = (unsigned long)(ptr); \ 302 302 unsigned long __gu_val; \ 303 303 __chk_user_ptr(ptr); \ 304 304 might_fault(); \ 305 305 switch (sizeof(*(ptr))) { \ 306 - case 1: __get_user_asm_byte(__gu_val,__gu_addr,err); break; \ 307 - case 2: __get_user_asm_half(__gu_val,__gu_addr,err); break; \ 308 - case 4: __get_user_asm_word(__gu_val,__gu_addr,err); break; \ 306 + case 1: __get_user_asm_byte(__gu_val, __gu_addr, err); break; \ 307 + case 2: __get_user_asm_half(__gu_val, __gu_addr, err); break; \ 308 + case 4: __get_user_asm_word(__gu_val, __gu_addr, err); break; \ 309 309 default: (__gu_val) = __get_user_bad(); \ 310 310 } \ 311 311 (x) = (__typeof__(*(ptr)))__gu_val; \ 312 312 } while (0) 313 313 314 - #define __get_user_asm_byte(x,addr,err) \ 314 + #define __get_user_asm_byte(x, addr, err) \ 315 315 __asm__ __volatile__( \ 316 316 "1: " TUSER(ldrb) " %1,[%2],#0\n" \ 317 317 "2:\n" \ ··· 330 330 : "cc") 331 331 332 332 #ifndef __ARMEB__ 333 - #define __get_user_asm_half(x,__gu_addr,err) \ 333 + #define __get_user_asm_half(x, __gu_addr, err) \ 334 334 ({ \ 335 335 unsigned long __b1, __b2; \ 336 336 __get_user_asm_byte(__b1, __gu_addr, err); \ ··· 338 338 (x) = __b1 | (__b2 << 8); \ 339 339 }) 340 340 #else 341 - #define __get_user_asm_half(x,__gu_addr,err) \ 341 + #define __get_user_asm_half(x, __gu_addr, err) \ 342 342 ({ \ 343 343 unsigned long __b1, __b2; \ 344 344 __get_user_asm_byte(__b1, __gu_addr, err); \ ··· 347 347 }) 348 348 #endif 349 349 350 - #define __get_user_asm_word(x,addr,err) \ 350 + #define __get_user_asm_word(x, addr, err) \ 351 351 __asm__ __volatile__( \ 352 352 "1: " TUSER(ldr) " %1,[%2],#0\n" \ 353 353 "2:\n" \ ··· 365 365 : "r" (addr), "i" (-EFAULT) \ 366 366 : "cc") 367 367 368 - #define __put_user(x,ptr) \ 368 + #define __put_user(x, ptr) \ 369 369 ({ \ 370 370 long __pu_err = 0; \ 371 - __put_user_err((x),(ptr),__pu_err); \ 371 + __put_user_err((x), (ptr), __pu_err); \ 372 372 __pu_err; \ 373 373 }) 374 374 375 - #define __put_user_error(x,ptr,err) \ 375 + #define __put_user_error(x, ptr, err) \ 376 376 ({ \ 377 - __put_user_err((x),(ptr),err); \ 377 + __put_user_err((x), (ptr), err); \ 378 378 (void) 0; \ 379 379 }) 380 380 381 - #define __put_user_err(x,ptr,err) \ 381 + #define __put_user_err(x, ptr, err) \ 382 382 do { \ 383 383 unsigned long __pu_addr = (unsigned long)(ptr); \ 384 384 __typeof__(*(ptr)) __pu_val = (x); \ 385 385 __chk_user_ptr(ptr); \ 386 386 might_fault(); \ 387 387 switch (sizeof(*(ptr))) { \ 388 - case 1: __put_user_asm_byte(__pu_val,__pu_addr,err); break; \ 389 - case 2: __put_user_asm_half(__pu_val,__pu_addr,err); break; \ 390 - case 4: __put_user_asm_word(__pu_val,__pu_addr,err); break; \ 391 - case 8: __put_user_asm_dword(__pu_val,__pu_addr,err); break; \ 388 + case 1: __put_user_asm_byte(__pu_val, __pu_addr, err); break; \ 389 + case 2: __put_user_asm_half(__pu_val, __pu_addr, err); break; \ 390 + case 4: __put_user_asm_word(__pu_val, __pu_addr, err); break; \ 391 + case 8: __put_user_asm_dword(__pu_val, __pu_addr, err); break; \ 392 392 default: __put_user_bad(); \ 393 393 } \ 394 394 } while (0) 395 395 396 - #define __put_user_asm_byte(x,__pu_addr,err) \ 396 + #define __put_user_asm_byte(x, __pu_addr, err) \ 397 397 __asm__ __volatile__( \ 398 398 "1: " TUSER(strb) " %1,[%2],#0\n" \ 399 399 "2:\n" \ ··· 411 411 : "cc") 412 412 413 413 #ifndef __ARMEB__ 414 - #define __put_user_asm_half(x,__pu_addr,err) \ 414 + #define __put_user_asm_half(x, __pu_addr, err) \ 415 415 ({ \ 416 - unsigned long __temp = (unsigned long)(x); \ 416 + unsigned long __temp = (__force unsigned long)(x); \ 417 417 __put_user_asm_byte(__temp, __pu_addr, err); \ 418 418 __put_user_asm_byte(__temp >> 8, __pu_addr + 1, err); \ 419 419 }) 420 420 #else 421 - #define __put_user_asm_half(x,__pu_addr,err) \ 421 + #define __put_user_asm_half(x, __pu_addr, err) \ 422 422 ({ \ 423 - unsigned long __temp = (unsigned long)(x); \ 423 + unsigned long __temp = (__force unsigned long)(x); \ 424 424 __put_user_asm_byte(__temp >> 8, __pu_addr, err); \ 425 425 __put_user_asm_byte(__temp, __pu_addr + 1, err); \ 426 426 }) 427 427 #endif 428 428 429 - #define __put_user_asm_word(x,__pu_addr,err) \ 429 + #define __put_user_asm_word(x, __pu_addr, err) \ 430 430 __asm__ __volatile__( \ 431 431 "1: " TUSER(str) " %1,[%2],#0\n" \ 432 432 "2:\n" \ ··· 451 451 #define __reg_oper1 "%R2" 452 452 #endif 453 453 454 - #define __put_user_asm_dword(x,__pu_addr,err) \ 454 + #define __put_user_asm_dword(x, __pu_addr, err) \ 455 455 __asm__ __volatile__( \ 456 456 ARM( "1: " TUSER(str) " " __reg_oper1 ", [%1], #4\n" ) \ 457 457 ARM( "2: " TUSER(str) " " __reg_oper0 ", [%1]\n" ) \ ··· 480 480 extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n); 481 481 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n); 482 482 #else 483 - #define __copy_from_user(to,from,n) (memcpy(to, (void __force *)from, n), 0) 484 - #define __copy_to_user(to,from,n) (memcpy((void __force *)to, from, n), 0) 485 - #define __clear_user(addr,n) (memset((void __force *)addr, 0, n), 0) 483 + #define __copy_from_user(to, from, n) (memcpy(to, (void __force *)from, n), 0) 484 + #define __copy_to_user(to, from, n) (memcpy((void __force *)to, from, n), 0) 485 + #define __clear_user(addr, n) (memset((void __force *)addr, 0, n), 0) 486 486 #endif 487 487 488 488 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
+2 -2
arch/arm64/include/asm/uaccess.h
··· 63 63 current_thread_info()->addr_limit = fs; 64 64 } 65 65 66 - #define segment_eq(a,b) ((a) == (b)) 66 + #define segment_eq(a, b) ((a) == (b)) 67 67 68 68 /* 69 69 * Return 1 if addr < current->addr_limit, 0 otherwise. ··· 147 147 default: \ 148 148 BUILD_BUG(); \ 149 149 } \ 150 - (x) = (__typeof__(*(ptr)))__gu_val; \ 150 + (x) = (__force __typeof__(*(ptr)))__gu_val; \ 151 151 } while (0) 152 152 153 153 #define __get_user(x, ptr) \
+12 -12
arch/avr32/include/asm/uaccess.h
··· 26 26 * For historical reasons (Data Segment Register?), these macros are misnamed. 27 27 */ 28 28 #define MAKE_MM_SEG(s) ((mm_segment_t) { (s) }) 29 - #define segment_eq(a,b) ((a).is_user_space == (b).is_user_space) 29 + #define segment_eq(a, b) ((a).is_user_space == (b).is_user_space) 30 30 31 31 #define USER_ADDR_LIMIT 0x80000000 32 32 ··· 108 108 * 109 109 * Returns zero on success, or -EFAULT on error. 110 110 */ 111 - #define put_user(x,ptr) \ 112 - __put_user_check((x),(ptr),sizeof(*(ptr))) 111 + #define put_user(x, ptr) \ 112 + __put_user_check((x), (ptr), sizeof(*(ptr))) 113 113 114 114 /* 115 115 * get_user: - Get a simple variable from user space. ··· 128 128 * Returns zero on success, or -EFAULT on error. 129 129 * On error, the variable @x is set to zero. 130 130 */ 131 - #define get_user(x,ptr) \ 132 - __get_user_check((x),(ptr),sizeof(*(ptr))) 131 + #define get_user(x, ptr) \ 132 + __get_user_check((x), (ptr), sizeof(*(ptr))) 133 133 134 134 /* 135 135 * __put_user: - Write a simple value into user space, with less checking. ··· 150 150 * 151 151 * Returns zero on success, or -EFAULT on error. 152 152 */ 153 - #define __put_user(x,ptr) \ 154 - __put_user_nocheck((x),(ptr),sizeof(*(ptr))) 153 + #define __put_user(x, ptr) \ 154 + __put_user_nocheck((x), (ptr), sizeof(*(ptr))) 155 155 156 156 /* 157 157 * __get_user: - Get a simple variable from user space, with less checking. ··· 173 173 * Returns zero on success, or -EFAULT on error. 174 174 * On error, the variable @x is set to zero. 175 175 */ 176 - #define __get_user(x,ptr) \ 177 - __get_user_nocheck((x),(ptr),sizeof(*(ptr))) 176 + #define __get_user(x, ptr) \ 177 + __get_user_nocheck((x), (ptr), sizeof(*(ptr))) 178 178 179 179 extern int __get_user_bad(void); 180 180 extern int __put_user_bad(void); ··· 191 191 default: __gu_err = __get_user_bad(); break; \ 192 192 } \ 193 193 \ 194 - x = (typeof(*(ptr)))__gu_val; \ 194 + x = (__force typeof(*(ptr)))__gu_val; \ 195 195 __gu_err; \ 196 196 }) 197 197 ··· 222 222 } else { \ 223 223 __gu_err = -EFAULT; \ 224 224 } \ 225 - x = (typeof(*(ptr)))__gu_val; \ 225 + x = (__force typeof(*(ptr)))__gu_val; \ 226 226 __gu_err; \ 227 227 }) 228 228 ··· 278 278 __pu_err); \ 279 279 break; \ 280 280 case 8: \ 281 - __put_user_asm("d", __pu_addr, __pu_val, \ 281 + __put_user_asm("d", __pu_addr, __pu_val, \ 282 282 __pu_err); \ 283 283 break; \ 284 284 default: \
+16 -16
arch/blackfin/include/asm/uaccess.h
··· 27 27 current_thread_info()->addr_limit = fs; 28 28 } 29 29 30 - #define segment_eq(a,b) ((a) == (b)) 30 + #define segment_eq(a, b) ((a) == (b)) 31 31 32 32 #define VERIFY_READ 0 33 33 #define VERIFY_WRITE 1 ··· 68 68 * use the right size if we just have the right pointer type. 69 69 */ 70 70 71 - #define put_user(x,p) \ 71 + #define put_user(x, p) \ 72 72 ({ \ 73 73 int _err = 0; \ 74 74 typeof(*(p)) _x = (x); \ 75 - typeof(*(p)) __user *_p = (p); \ 75 + typeof(*(p)) __user *_p = (p); \ 76 76 if (!access_ok(VERIFY_WRITE, _p, sizeof(*(_p)))) {\ 77 77 _err = -EFAULT; \ 78 78 } \ ··· 89 89 break; \ 90 90 case 8: { \ 91 91 long _xl, _xh; \ 92 - _xl = ((long *)&_x)[0]; \ 93 - _xh = ((long *)&_x)[1]; \ 94 - __put_user_asm(_xl, ((long __user *)_p)+0, ); \ 95 - __put_user_asm(_xh, ((long __user *)_p)+1, ); \ 92 + _xl = ((__force long *)&_x)[0]; \ 93 + _xh = ((__force long *)&_x)[1]; \ 94 + __put_user_asm(_xl, ((__force long __user *)_p)+0, );\ 95 + __put_user_asm(_xh, ((__force long __user *)_p)+1, );\ 96 96 } break; \ 97 97 default: \ 98 98 _err = __put_user_bad(); \ ··· 102 102 _err; \ 103 103 }) 104 104 105 - #define __put_user(x,p) put_user(x,p) 105 + #define __put_user(x, p) put_user(x, p) 106 106 static inline int bad_user_access_length(void) 107 107 { 108 108 panic("bad_user_access_length"); ··· 121 121 122 122 #define __ptr(x) ((unsigned long __force *)(x)) 123 123 124 - #define __put_user_asm(x,p,bhw) \ 124 + #define __put_user_asm(x, p, bhw) \ 125 125 __asm__ (#bhw"[%1] = %0;\n\t" \ 126 126 : /* no outputs */ \ 127 - :"d" (x),"a" (__ptr(p)) : "memory") 127 + :"d" (x), "a" (__ptr(p)) : "memory") 128 128 129 129 #define get_user(x, ptr) \ 130 130 ({ \ ··· 136 136 BUILD_BUG_ON(ptr_size >= 8); \ 137 137 switch (ptr_size) { \ 138 138 case 1: \ 139 - __get_user_asm(_val, _p, B,(Z)); \ 139 + __get_user_asm(_val, _p, B, (Z)); \ 140 140 break; \ 141 141 case 2: \ 142 - __get_user_asm(_val, _p, W,(Z)); \ 142 + __get_user_asm(_val, _p, W, (Z)); \ 143 143 break; \ 144 144 case 4: \ 145 145 __get_user_asm(_val, _p, , ); \ ··· 147 147 } \ 148 148 } else \ 149 149 _err = -EFAULT; \ 150 - x = (typeof(*(ptr)))_val; \ 150 + x = (__force typeof(*(ptr)))_val; \ 151 151 _err; \ 152 152 }) 153 153 154 - #define __get_user(x,p) get_user(x,p) 154 + #define __get_user(x, p) get_user(x, p) 155 155 156 156 #define __get_user_bad() (bad_user_access_length(), (-EFAULT)) 157 157 ··· 168 168 #define __copy_to_user_inatomic __copy_to_user 169 169 #define __copy_from_user_inatomic __copy_from_user 170 170 171 - #define copy_to_user_ret(to,from,n,retval) ({ if (copy_to_user(to,from,n))\ 171 + #define copy_to_user_ret(to, from, n, retval) ({ if (copy_to_user(to, from, n))\ 172 172 return retval; }) 173 173 174 - #define copy_from_user_ret(to,from,n,retval) ({ if (copy_from_user(to,from,n))\ 174 + #define copy_from_user_ret(to, from, n, retval) ({ if (copy_from_user(to, from, n))\ 175 175 return retval; }) 176 176 177 177 static inline unsigned long __must_check
+1 -1
arch/frv/include/asm/segment.h
··· 31 31 32 32 #define get_ds() (KERNEL_DS) 33 33 #define get_fs() (__current_thread_info->addr_limit) 34 - #define segment_eq(a,b) ((a).seg == (b).seg) 34 + #define segment_eq(a, b) ((a).seg == (b).seg) 35 35 #define __kernel_ds_p() segment_eq(get_fs(), KERNEL_DS) 36 36 #define get_addr_limit() (get_fs().seg) 37 37
+6 -5
arch/ia64/include/asm/uaccess.h
··· 169 169 (err) = ia64_getreg(_IA64_REG_R8); \ 170 170 (val) = ia64_getreg(_IA64_REG_R9); \ 171 171 } while (0) 172 - # define __put_user_size(val, addr, n, err) \ 173 - do { \ 174 - __st_user("__ex_table", (unsigned long) addr, n, RELOC_TYPE, (unsigned long) (val)); \ 175 - (err) = ia64_getreg(_IA64_REG_R8); \ 172 + # define __put_user_size(val, addr, n, err) \ 173 + do { \ 174 + __st_user("__ex_table", (unsigned long) addr, n, RELOC_TYPE, \ 175 + (__force unsigned long) (val)); \ 176 + (err) = ia64_getreg(_IA64_REG_R8); \ 176 177 } while (0) 177 178 #endif /* !ASM_SUPPORTED */ 178 179 ··· 198 197 case 8: __get_user_size(__gu_val, __gu_ptr, 8, __gu_err); break; \ 199 198 default: __get_user_unknown(); break; \ 200 199 } \ 201 - (x) = (__typeof__(*(__gu_ptr))) __gu_val; \ 200 + (x) = (__force __typeof__(*(__gu_ptr))) __gu_val; \ 202 201 __gu_err; \ 203 202 }) 204 203
+44 -44
arch/m32r/include/asm/uaccess.h
··· 54 54 55 55 #endif /* not CONFIG_MMU */ 56 56 57 - #define segment_eq(a,b) ((a).seg == (b).seg) 57 + #define segment_eq(a, b) ((a).seg == (b).seg) 58 58 59 59 #define __addr_ok(addr) \ 60 60 ((unsigned long)(addr) < (current_thread_info()->addr_limit.seg)) ··· 68 68 * 69 69 * This needs 33-bit arithmetic. We have a carry... 70 70 */ 71 - #define __range_ok(addr,size) ({ \ 71 + #define __range_ok(addr, size) ({ \ 72 72 unsigned long flag, roksum; \ 73 73 __chk_user_ptr(addr); \ 74 74 asm ( \ ··· 103 103 * this function, memory access functions may still return -EFAULT. 104 104 */ 105 105 #ifdef CONFIG_MMU 106 - #define access_ok(type,addr,size) (likely(__range_ok(addr,size) == 0)) 106 + #define access_ok(type, addr, size) (likely(__range_ok(addr, size) == 0)) 107 107 #else 108 108 static inline int access_ok(int type, const void *addr, unsigned long size) 109 109 { ··· 167 167 * Returns zero on success, or -EFAULT on error. 168 168 * On error, the variable @x is set to zero. 169 169 */ 170 - #define get_user(x,ptr) \ 171 - __get_user_check((x),(ptr),sizeof(*(ptr))) 170 + #define get_user(x, ptr) \ 171 + __get_user_check((x), (ptr), sizeof(*(ptr))) 172 172 173 173 /** 174 174 * put_user: - Write a simple value into user space. ··· 186 186 * 187 187 * Returns zero on success, or -EFAULT on error. 188 188 */ 189 - #define put_user(x,ptr) \ 190 - __put_user_check((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr))) 189 + #define put_user(x, ptr) \ 190 + __put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) 191 191 192 192 /** 193 193 * __get_user: - Get a simple variable from user space, with less checking. ··· 209 209 * Returns zero on success, or -EFAULT on error. 210 210 * On error, the variable @x is set to zero. 211 211 */ 212 - #define __get_user(x,ptr) \ 213 - __get_user_nocheck((x),(ptr),sizeof(*(ptr))) 212 + #define __get_user(x, ptr) \ 213 + __get_user_nocheck((x), (ptr), sizeof(*(ptr))) 214 214 215 - #define __get_user_nocheck(x,ptr,size) \ 215 + #define __get_user_nocheck(x, ptr, size) \ 216 216 ({ \ 217 217 long __gu_err = 0; \ 218 218 unsigned long __gu_val; \ 219 219 might_fault(); \ 220 - __get_user_size(__gu_val,(ptr),(size),__gu_err); \ 221 - (x) = (__typeof__(*(ptr)))__gu_val; \ 220 + __get_user_size(__gu_val, (ptr), (size), __gu_err); \ 221 + (x) = (__force __typeof__(*(ptr)))__gu_val; \ 222 222 __gu_err; \ 223 223 }) 224 224 225 - #define __get_user_check(x,ptr,size) \ 225 + #define __get_user_check(x, ptr, size) \ 226 226 ({ \ 227 227 long __gu_err = -EFAULT; \ 228 228 unsigned long __gu_val = 0; \ 229 229 const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ 230 230 might_fault(); \ 231 - if (access_ok(VERIFY_READ,__gu_addr,size)) \ 232 - __get_user_size(__gu_val,__gu_addr,(size),__gu_err); \ 233 - (x) = (__typeof__(*(ptr)))__gu_val; \ 231 + if (access_ok(VERIFY_READ, __gu_addr, size)) \ 232 + __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \ 233 + (x) = (__force __typeof__(*(ptr)))__gu_val; \ 234 234 __gu_err; \ 235 235 }) 236 236 237 237 extern long __get_user_bad(void); 238 238 239 - #define __get_user_size(x,ptr,size,retval) \ 239 + #define __get_user_size(x, ptr, size, retval) \ 240 240 do { \ 241 241 retval = 0; \ 242 242 __chk_user_ptr(ptr); \ 243 243 switch (size) { \ 244 - case 1: __get_user_asm(x,ptr,retval,"ub"); break; \ 245 - case 2: __get_user_asm(x,ptr,retval,"uh"); break; \ 246 - case 4: __get_user_asm(x,ptr,retval,""); break; \ 244 + case 1: __get_user_asm(x, ptr, retval, "ub"); break; \ 245 + case 2: __get_user_asm(x, ptr, retval, "uh"); break; \ 246 + case 4: __get_user_asm(x, ptr, retval, ""); break; \ 247 247 default: (x) = __get_user_bad(); \ 248 248 } \ 249 249 } while (0) ··· 288 288 * 289 289 * Returns zero on success, or -EFAULT on error. 290 290 */ 291 - #define __put_user(x,ptr) \ 292 - __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr))) 291 + #define __put_user(x, ptr) \ 292 + __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) 293 293 294 294 295 - #define __put_user_nocheck(x,ptr,size) \ 295 + #define __put_user_nocheck(x, ptr, size) \ 296 296 ({ \ 297 297 long __pu_err; \ 298 298 might_fault(); \ 299 - __put_user_size((x),(ptr),(size),__pu_err); \ 299 + __put_user_size((x), (ptr), (size), __pu_err); \ 300 300 __pu_err; \ 301 301 }) 302 302 303 303 304 - #define __put_user_check(x,ptr,size) \ 304 + #define __put_user_check(x, ptr, size) \ 305 305 ({ \ 306 306 long __pu_err = -EFAULT; \ 307 307 __typeof__(*(ptr)) __user *__pu_addr = (ptr); \ 308 308 might_fault(); \ 309 - if (access_ok(VERIFY_WRITE,__pu_addr,size)) \ 310 - __put_user_size((x),__pu_addr,(size),__pu_err); \ 309 + if (access_ok(VERIFY_WRITE, __pu_addr, size)) \ 310 + __put_user_size((x), __pu_addr, (size), __pu_err); \ 311 311 __pu_err; \ 312 312 }) 313 313 ··· 366 366 367 367 extern void __put_user_bad(void); 368 368 369 - #define __put_user_size(x,ptr,size,retval) \ 369 + #define __put_user_size(x, ptr, size, retval) \ 370 370 do { \ 371 371 retval = 0; \ 372 372 __chk_user_ptr(ptr); \ 373 373 switch (size) { \ 374 - case 1: __put_user_asm(x,ptr,retval,"b"); break; \ 375 - case 2: __put_user_asm(x,ptr,retval,"h"); break; \ 376 - case 4: __put_user_asm(x,ptr,retval,""); break; \ 377 - case 8: __put_user_u64((__typeof__(*ptr))(x),ptr,retval); break;\ 374 + case 1: __put_user_asm(x, ptr, retval, "b"); break; \ 375 + case 2: __put_user_asm(x, ptr, retval, "h"); break; \ 376 + case 4: __put_user_asm(x, ptr, retval, ""); break; \ 377 + case 8: __put_user_u64((__typeof__(*ptr))(x), ptr, retval); break;\ 378 378 default: __put_user_bad(); \ 379 379 } \ 380 380 } while (0) ··· 421 421 422 422 /* Generic arbitrary sized copy. */ 423 423 /* Return the number of bytes NOT copied. */ 424 - #define __copy_user(to,from,size) \ 424 + #define __copy_user(to, from, size) \ 425 425 do { \ 426 426 unsigned long __dst, __src, __c; \ 427 427 __asm__ __volatile__ ( \ ··· 478 478 : "r14", "memory"); \ 479 479 } while (0) 480 480 481 - #define __copy_user_zeroing(to,from,size) \ 481 + #define __copy_user_zeroing(to, from, size) \ 482 482 do { \ 483 483 unsigned long __dst, __src, __c; \ 484 484 __asm__ __volatile__ ( \ ··· 548 548 static inline unsigned long __generic_copy_from_user_nocheck(void *to, 549 549 const void __user *from, unsigned long n) 550 550 { 551 - __copy_user_zeroing(to,from,n); 551 + __copy_user_zeroing(to, from, n); 552 552 return n; 553 553 } 554 554 555 555 static inline unsigned long __generic_copy_to_user_nocheck(void __user *to, 556 556 const void *from, unsigned long n) 557 557 { 558 - __copy_user(to,from,n); 558 + __copy_user(to, from, n); 559 559 return n; 560 560 } 561 561 ··· 576 576 * Returns number of bytes that could not be copied. 577 577 * On success, this will be zero. 578 578 */ 579 - #define __copy_to_user(to,from,n) \ 580 - __generic_copy_to_user_nocheck((to),(from),(n)) 579 + #define __copy_to_user(to, from, n) \ 580 + __generic_copy_to_user_nocheck((to), (from), (n)) 581 581 582 582 #define __copy_to_user_inatomic __copy_to_user 583 583 #define __copy_from_user_inatomic __copy_from_user ··· 595 595 * Returns number of bytes that could not be copied. 596 596 * On success, this will be zero. 597 597 */ 598 - #define copy_to_user(to,from,n) \ 598 + #define copy_to_user(to, from, n) \ 599 599 ({ \ 600 600 might_fault(); \ 601 - __generic_copy_to_user((to),(from),(n)); \ 601 + __generic_copy_to_user((to), (from), (n)); \ 602 602 }) 603 603 604 604 /** ··· 617 617 * If some data could not be copied, this function will pad the copied 618 618 * data to the requested size using zero bytes. 619 619 */ 620 - #define __copy_from_user(to,from,n) \ 621 - __generic_copy_from_user_nocheck((to),(from),(n)) 620 + #define __copy_from_user(to, from, n) \ 621 + __generic_copy_from_user_nocheck((to), (from), (n)) 622 622 623 623 /** 624 624 * copy_from_user: - Copy a block of data from user space. ··· 636 636 * If some data could not be copied, this function will pad the copied 637 637 * data to the requested size using zero bytes. 638 638 */ 639 - #define copy_from_user(to,from,n) \ 639 + #define copy_from_user(to, from, n) \ 640 640 ({ \ 641 641 might_fault(); \ 642 - __generic_copy_from_user((to),(from),(n)); \ 642 + __generic_copy_from_user((to), (from), (n)); \ 643 643 }) 644 644 645 645 long __must_check strncpy_from_user(char *dst, const char __user *src,
+1 -1
arch/m68k/include/asm/segment.h
··· 58 58 #define set_fs(x) (current_thread_info()->addr_limit = (x)) 59 59 #endif 60 60 61 - #define segment_eq(a,b) ((a).seg == (b).seg) 61 + #define segment_eq(a, b) ((a).seg == (b).seg) 62 62 63 63 #endif /* __ASSEMBLY__ */ 64 64
+20 -20
arch/m68k/include/asm/uaccess_mm.h
··· 128 128 #define put_user(x, ptr) __put_user(x, ptr) 129 129 130 130 131 - #define __get_user_asm(res, x, ptr, type, bwl, reg, err) ({ \ 132 - type __gu_val; \ 133 - asm volatile ("\n" \ 134 - "1: "MOVES"."#bwl" %2,%1\n" \ 135 - "2:\n" \ 136 - " .section .fixup,\"ax\"\n" \ 137 - " .even\n" \ 138 - "10: move.l %3,%0\n" \ 139 - " sub.l %1,%1\n" \ 140 - " jra 2b\n" \ 141 - " .previous\n" \ 142 - "\n" \ 143 - " .section __ex_table,\"a\"\n" \ 144 - " .align 4\n" \ 145 - " .long 1b,10b\n" \ 146 - " .previous" \ 147 - : "+d" (res), "=&" #reg (__gu_val) \ 148 - : "m" (*(ptr)), "i" (err)); \ 149 - (x) = (typeof(*(ptr)))(unsigned long)__gu_val; \ 131 + #define __get_user_asm(res, x, ptr, type, bwl, reg, err) ({ \ 132 + type __gu_val; \ 133 + asm volatile ("\n" \ 134 + "1: "MOVES"."#bwl" %2,%1\n" \ 135 + "2:\n" \ 136 + " .section .fixup,\"ax\"\n" \ 137 + " .even\n" \ 138 + "10: move.l %3,%0\n" \ 139 + " sub.l %1,%1\n" \ 140 + " jra 2b\n" \ 141 + " .previous\n" \ 142 + "\n" \ 143 + " .section __ex_table,\"a\"\n" \ 144 + " .align 4\n" \ 145 + " .long 1b,10b\n" \ 146 + " .previous" \ 147 + : "+d" (res), "=&" #reg (__gu_val) \ 148 + : "m" (*(ptr)), "i" (err)); \ 149 + (x) = (__force typeof(*(ptr)))(__force unsigned long)__gu_val; \ 150 150 }) 151 151 152 152 #define __get_user(x, ptr) \ ··· 188 188 "+a" (__gu_ptr) \ 189 189 : "i" (-EFAULT) \ 190 190 : "memory"); \ 191 - (x) = (typeof(*(ptr)))__gu_val; \ 191 + (x) = (__force typeof(*(ptr)))__gu_val; \ 192 192 break; \ 193 193 } */ \ 194 194 default: \
+15 -10
arch/metag/include/asm/uaccess.h
··· 107 107 extern long __put_user_asm_d(unsigned int x, void __user *addr); 108 108 extern long __put_user_asm_l(unsigned long long x, void __user *addr); 109 109 110 - #define __put_user_size(x, ptr, size, retval) \ 111 - do { \ 112 - retval = 0; \ 113 - switch (size) { \ 110 + #define __put_user_size(x, ptr, size, retval) \ 111 + do { \ 112 + retval = 0; \ 113 + switch (size) { \ 114 114 case 1: \ 115 - retval = __put_user_asm_b((unsigned int)x, ptr); break; \ 115 + retval = __put_user_asm_b((__force unsigned int)x, ptr);\ 116 + break; \ 116 117 case 2: \ 117 - retval = __put_user_asm_w((unsigned int)x, ptr); break; \ 118 + retval = __put_user_asm_w((__force unsigned int)x, ptr);\ 119 + break; \ 118 120 case 4: \ 119 - retval = __put_user_asm_d((unsigned int)x, ptr); break; \ 121 + retval = __put_user_asm_d((__force unsigned int)x, ptr);\ 122 + break; \ 120 123 case 8: \ 121 - retval = __put_user_asm_l((unsigned long long)x, ptr); break; \ 124 + retval = __put_user_asm_l((__force unsigned long long)x,\ 125 + ptr); \ 126 + break; \ 122 127 default: \ 123 128 __put_user_bad(); \ 124 129 } \ ··· 140 135 ({ \ 141 136 long __gu_err, __gu_val; \ 142 137 __get_user_size(__gu_val, (ptr), (size), __gu_err); \ 143 - (x) = (__typeof__(*(ptr)))__gu_val; \ 138 + (x) = (__force __typeof__(*(ptr)))__gu_val; \ 144 139 __gu_err; \ 145 140 }) 146 141 ··· 150 145 const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ 151 146 if (access_ok(VERIFY_READ, __gu_addr, size)) \ 152 147 __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \ 153 - (x) = (__typeof__(*(ptr)))__gu_val; \ 148 + (x) = (__force __typeof__(*(ptr)))__gu_val; \ 154 149 __gu_err; \ 155 150 }) 156 151
+2 -2
arch/openrisc/include/asm/uaccess.h
··· 192 192 ({ \ 193 193 long __gu_err, __gu_val; \ 194 194 __get_user_size(__gu_val, (ptr), (size), __gu_err); \ 195 - (x) = (__typeof__(*(ptr)))__gu_val; \ 195 + (x) = (__force __typeof__(*(ptr)))__gu_val; \ 196 196 __gu_err; \ 197 197 }) 198 198 ··· 202 202 const __typeof__(*(ptr)) * __gu_addr = (ptr); \ 203 203 if (access_ok(VERIFY_READ, __gu_addr, size)) \ 204 204 __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \ 205 - (x) = (__typeof__(*(ptr)))__gu_val; \ 205 + (x) = (__force __typeof__(*(ptr)))__gu_val; \ 206 206 __gu_err; \ 207 207 }) 208 208
+1 -1
arch/sh/include/asm/segment.h
··· 23 23 #define USER_DS KERNEL_DS 24 24 #endif 25 25 26 - #define segment_eq(a,b) ((a).seg == (b).seg) 26 + #define segment_eq(a, b) ((a).seg == (b).seg) 27 27 28 28 #define get_ds() (KERNEL_DS) 29 29
+2 -2
arch/sh/include/asm/uaccess.h
··· 60 60 const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ 61 61 __chk_user_ptr(ptr); \ 62 62 __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \ 63 - (x) = (__typeof__(*(ptr)))__gu_val; \ 63 + (x) = (__force __typeof__(*(ptr)))__gu_val; \ 64 64 __gu_err; \ 65 65 }) 66 66 ··· 71 71 const __typeof__(*(ptr)) *__gu_addr = (ptr); \ 72 72 if (likely(access_ok(VERIFY_READ, __gu_addr, (size)))) \ 73 73 __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \ 74 - (x) = (__typeof__(*(ptr)))__gu_val; \ 74 + (x) = (__force __typeof__(*(ptr)))__gu_val; \ 75 75 __gu_err; \ 76 76 }) 77 77
+4 -4
arch/sh/include/asm/uaccess_64.h
··· 59 59 switch (size) { \ 60 60 case 1: \ 61 61 retval = __put_user_asm_b((void *)&x, \ 62 - (long)ptr); \ 62 + (__force long)ptr); \ 63 63 break; \ 64 64 case 2: \ 65 65 retval = __put_user_asm_w((void *)&x, \ 66 - (long)ptr); \ 66 + (__force long)ptr); \ 67 67 break; \ 68 68 case 4: \ 69 69 retval = __put_user_asm_l((void *)&x, \ 70 - (long)ptr); \ 70 + (__force long)ptr); \ 71 71 break; \ 72 72 case 8: \ 73 73 retval = __put_user_asm_q((void *)&x, \ 74 - (long)ptr); \ 74 + (__force long)ptr); \ 75 75 break; \ 76 76 default: \ 77 77 __put_user_unknown(); \
+193 -130
arch/sparc/include/asm/uaccess_32.h
··· 37 37 #define get_fs() (current->thread.current_ds) 38 38 #define set_fs(val) ((current->thread.current_ds) = (val)) 39 39 40 - #define segment_eq(a,b) ((a).seg == (b).seg) 40 + #define segment_eq(a, b) ((a).seg == (b).seg) 41 41 42 42 /* We have there a nice not-mapped page at PAGE_OFFSET - PAGE_SIZE, so that this test 43 43 * can be fairly lightweight. ··· 46 46 */ 47 47 #define __user_ok(addr, size) ({ (void)(size); (addr) < STACK_TOP; }) 48 48 #define __kernel_ok (segment_eq(get_fs(), KERNEL_DS)) 49 - #define __access_ok(addr,size) (__user_ok((addr) & get_fs().seg,(size))) 50 - #define access_ok(type, addr, size) \ 49 + #define __access_ok(addr, size) (__user_ok((addr) & get_fs().seg, (size))) 50 + #define access_ok(type, addr, size) \ 51 51 ({ (void)(type); __access_ok((unsigned long)(addr), size); }) 52 52 53 53 /* ··· 91 91 * of a performance impact. Thus we have a few rather ugly macros here, 92 92 * and hide all the ugliness from the user. 93 93 */ 94 - #define put_user(x,ptr) ({ \ 95 - unsigned long __pu_addr = (unsigned long)(ptr); \ 96 - __chk_user_ptr(ptr); \ 97 - __put_user_check((__typeof__(*(ptr)))(x),__pu_addr,sizeof(*(ptr))); }) 94 + #define put_user(x, ptr) ({ \ 95 + unsigned long __pu_addr = (unsigned long)(ptr); \ 96 + __chk_user_ptr(ptr); \ 97 + __put_user_check((__typeof__(*(ptr)))(x), __pu_addr, sizeof(*(ptr))); \ 98 + }) 98 99 99 - #define get_user(x,ptr) ({ \ 100 - unsigned long __gu_addr = (unsigned long)(ptr); \ 101 - __chk_user_ptr(ptr); \ 102 - __get_user_check((x),__gu_addr,sizeof(*(ptr)),__typeof__(*(ptr))); }) 100 + #define get_user(x, ptr) ({ \ 101 + unsigned long __gu_addr = (unsigned long)(ptr); \ 102 + __chk_user_ptr(ptr); \ 103 + __get_user_check((x), __gu_addr, sizeof(*(ptr)), __typeof__(*(ptr))); \ 104 + }) 103 105 104 106 /* 105 107 * The "__xxx" versions do not do address space checking, useful when 106 108 * doing multiple accesses to the same area (the user has to do the 107 109 * checks by hand with "access_ok()") 108 110 */ 109 - #define __put_user(x,ptr) __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr))) 110 - #define __get_user(x,ptr) __get_user_nocheck((x),(ptr),sizeof(*(ptr)),__typeof__(*(ptr))) 111 + #define __put_user(x, ptr) \ 112 + __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) 113 + #define __get_user(x, ptr) \ 114 + __get_user_nocheck((x), (ptr), sizeof(*(ptr)), __typeof__(*(ptr))) 111 115 112 116 struct __large_struct { unsigned long buf[100]; }; 113 117 #define __m(x) ((struct __large_struct __user *)(x)) 114 118 115 - #define __put_user_check(x,addr,size) ({ \ 116 - register int __pu_ret; \ 117 - if (__access_ok(addr,size)) { \ 118 - switch (size) { \ 119 - case 1: __put_user_asm(x,b,addr,__pu_ret); break; \ 120 - case 2: __put_user_asm(x,h,addr,__pu_ret); break; \ 121 - case 4: __put_user_asm(x,,addr,__pu_ret); break; \ 122 - case 8: __put_user_asm(x,d,addr,__pu_ret); break; \ 123 - default: __pu_ret = __put_user_bad(); break; \ 124 - } } else { __pu_ret = -EFAULT; } __pu_ret; }) 119 + #define __put_user_check(x, addr, size) ({ \ 120 + register int __pu_ret; \ 121 + if (__access_ok(addr, size)) { \ 122 + switch (size) { \ 123 + case 1: \ 124 + __put_user_asm(x, b, addr, __pu_ret); \ 125 + break; \ 126 + case 2: \ 127 + __put_user_asm(x, h, addr, __pu_ret); \ 128 + break; \ 129 + case 4: \ 130 + __put_user_asm(x, , addr, __pu_ret); \ 131 + break; \ 132 + case 8: \ 133 + __put_user_asm(x, d, addr, __pu_ret); \ 134 + break; \ 135 + default: \ 136 + __pu_ret = __put_user_bad(); \ 137 + break; \ 138 + } \ 139 + } else { \ 140 + __pu_ret = -EFAULT; \ 141 + } \ 142 + __pu_ret; \ 143 + }) 125 144 126 - #define __put_user_nocheck(x,addr,size) ({ \ 127 - register int __pu_ret; \ 128 - switch (size) { \ 129 - case 1: __put_user_asm(x,b,addr,__pu_ret); break; \ 130 - case 2: __put_user_asm(x,h,addr,__pu_ret); break; \ 131 - case 4: __put_user_asm(x,,addr,__pu_ret); break; \ 132 - case 8: __put_user_asm(x,d,addr,__pu_ret); break; \ 133 - default: __pu_ret = __put_user_bad(); break; \ 134 - } __pu_ret; }) 145 + #define __put_user_nocheck(x, addr, size) ({ \ 146 + register int __pu_ret; \ 147 + switch (size) { \ 148 + case 1: __put_user_asm(x, b, addr, __pu_ret); break; \ 149 + case 2: __put_user_asm(x, h, addr, __pu_ret); break; \ 150 + case 4: __put_user_asm(x, , addr, __pu_ret); break; \ 151 + case 8: __put_user_asm(x, d, addr, __pu_ret); break; \ 152 + default: __pu_ret = __put_user_bad(); break; \ 153 + } \ 154 + __pu_ret; \ 155 + }) 135 156 136 - #define __put_user_asm(x,size,addr,ret) \ 157 + #define __put_user_asm(x, size, addr, ret) \ 137 158 __asm__ __volatile__( \ 138 - "/* Put user asm, inline. */\n" \ 139 - "1:\t" "st"#size " %1, %2\n\t" \ 140 - "clr %0\n" \ 141 - "2:\n\n\t" \ 142 - ".section .fixup,#alloc,#execinstr\n\t" \ 143 - ".align 4\n" \ 144 - "3:\n\t" \ 145 - "b 2b\n\t" \ 146 - " mov %3, %0\n\t" \ 147 - ".previous\n\n\t" \ 148 - ".section __ex_table,#alloc\n\t" \ 149 - ".align 4\n\t" \ 150 - ".word 1b, 3b\n\t" \ 151 - ".previous\n\n\t" \ 152 - : "=&r" (ret) : "r" (x), "m" (*__m(addr)), \ 153 - "i" (-EFAULT)) 159 + "/* Put user asm, inline. */\n" \ 160 + "1:\t" "st"#size " %1, %2\n\t" \ 161 + "clr %0\n" \ 162 + "2:\n\n\t" \ 163 + ".section .fixup,#alloc,#execinstr\n\t" \ 164 + ".align 4\n" \ 165 + "3:\n\t" \ 166 + "b 2b\n\t" \ 167 + " mov %3, %0\n\t" \ 168 + ".previous\n\n\t" \ 169 + ".section __ex_table,#alloc\n\t" \ 170 + ".align 4\n\t" \ 171 + ".word 1b, 3b\n\t" \ 172 + ".previous\n\n\t" \ 173 + : "=&r" (ret) : "r" (x), "m" (*__m(addr)), \ 174 + "i" (-EFAULT)) 154 175 155 176 int __put_user_bad(void); 156 177 157 - #define __get_user_check(x,addr,size,type) ({ \ 158 - register int __gu_ret; \ 159 - register unsigned long __gu_val; \ 160 - if (__access_ok(addr,size)) { \ 161 - switch (size) { \ 162 - case 1: __get_user_asm(__gu_val,ub,addr,__gu_ret); break; \ 163 - case 2: __get_user_asm(__gu_val,uh,addr,__gu_ret); break; \ 164 - case 4: __get_user_asm(__gu_val,,addr,__gu_ret); break; \ 165 - case 8: __get_user_asm(__gu_val,d,addr,__gu_ret); break; \ 166 - default: __gu_val = 0; __gu_ret = __get_user_bad(); break; \ 167 - } } else { __gu_val = 0; __gu_ret = -EFAULT; } x = (type) __gu_val; __gu_ret; }) 178 + #define __get_user_check(x, addr, size, type) ({ \ 179 + register int __gu_ret; \ 180 + register unsigned long __gu_val; \ 181 + if (__access_ok(addr, size)) { \ 182 + switch (size) { \ 183 + case 1: \ 184 + __get_user_asm(__gu_val, ub, addr, __gu_ret); \ 185 + break; \ 186 + case 2: \ 187 + __get_user_asm(__gu_val, uh, addr, __gu_ret); \ 188 + break; \ 189 + case 4: \ 190 + __get_user_asm(__gu_val, , addr, __gu_ret); \ 191 + break; \ 192 + case 8: \ 193 + __get_user_asm(__gu_val, d, addr, __gu_ret); \ 194 + break; \ 195 + default: \ 196 + __gu_val = 0; \ 197 + __gu_ret = __get_user_bad(); \ 198 + break; \ 199 + } \ 200 + } else { \ 201 + __gu_val = 0; \ 202 + __gu_ret = -EFAULT; \ 203 + } \ 204 + x = (__force type) __gu_val; \ 205 + __gu_ret; \ 206 + }) 168 207 169 - #define __get_user_check_ret(x,addr,size,type,retval) ({ \ 170 - register unsigned long __gu_val __asm__ ("l1"); \ 171 - if (__access_ok(addr,size)) { \ 172 - switch (size) { \ 173 - case 1: __get_user_asm_ret(__gu_val,ub,addr,retval); break; \ 174 - case 2: __get_user_asm_ret(__gu_val,uh,addr,retval); break; \ 175 - case 4: __get_user_asm_ret(__gu_val,,addr,retval); break; \ 176 - case 8: __get_user_asm_ret(__gu_val,d,addr,retval); break; \ 177 - default: if (__get_user_bad()) return retval; \ 178 - } x = (type) __gu_val; } else return retval; }) 208 + #define __get_user_check_ret(x, addr, size, type, retval) ({ \ 209 + register unsigned long __gu_val __asm__ ("l1"); \ 210 + if (__access_ok(addr, size)) { \ 211 + switch (size) { \ 212 + case 1: \ 213 + __get_user_asm_ret(__gu_val, ub, addr, retval); \ 214 + break; \ 215 + case 2: \ 216 + __get_user_asm_ret(__gu_val, uh, addr, retval); \ 217 + break; \ 218 + case 4: \ 219 + __get_user_asm_ret(__gu_val, , addr, retval); \ 220 + break; \ 221 + case 8: \ 222 + __get_user_asm_ret(__gu_val, d, addr, retval); \ 223 + break; \ 224 + default: \ 225 + if (__get_user_bad()) \ 226 + return retval; \ 227 + } \ 228 + x = (__force type) __gu_val; \ 229 + } else \ 230 + return retval; \ 231 + }) 179 232 180 - #define __get_user_nocheck(x,addr,size,type) ({ \ 181 - register int __gu_ret; \ 182 - register unsigned long __gu_val; \ 183 - switch (size) { \ 184 - case 1: __get_user_asm(__gu_val,ub,addr,__gu_ret); break; \ 185 - case 2: __get_user_asm(__gu_val,uh,addr,__gu_ret); break; \ 186 - case 4: __get_user_asm(__gu_val,,addr,__gu_ret); break; \ 187 - case 8: __get_user_asm(__gu_val,d,addr,__gu_ret); break; \ 188 - default: __gu_val = 0; __gu_ret = __get_user_bad(); break; \ 189 - } x = (type) __gu_val; __gu_ret; }) 233 + #define __get_user_nocheck(x, addr, size, type) ({ \ 234 + register int __gu_ret; \ 235 + register unsigned long __gu_val; \ 236 + switch (size) { \ 237 + case 1: __get_user_asm(__gu_val, ub, addr, __gu_ret); break; \ 238 + case 2: __get_user_asm(__gu_val, uh, addr, __gu_ret); break; \ 239 + case 4: __get_user_asm(__gu_val, , addr, __gu_ret); break; \ 240 + case 8: __get_user_asm(__gu_val, d, addr, __gu_ret); break; \ 241 + default: \ 242 + __gu_val = 0; \ 243 + __gu_ret = __get_user_bad(); \ 244 + break; \ 245 + } \ 246 + x = (__force type) __gu_val; \ 247 + __gu_ret; \ 248 + }) 190 249 191 - #define __get_user_nocheck_ret(x,addr,size,type,retval) ({ \ 192 - register unsigned long __gu_val __asm__ ("l1"); \ 193 - switch (size) { \ 194 - case 1: __get_user_asm_ret(__gu_val,ub,addr,retval); break; \ 195 - case 2: __get_user_asm_ret(__gu_val,uh,addr,retval); break; \ 196 - case 4: __get_user_asm_ret(__gu_val,,addr,retval); break; \ 197 - case 8: __get_user_asm_ret(__gu_val,d,addr,retval); break; \ 198 - default: if (__get_user_bad()) return retval; \ 199 - } x = (type) __gu_val; }) 250 + #define __get_user_nocheck_ret(x, addr, size, type, retval) ({ \ 251 + register unsigned long __gu_val __asm__ ("l1"); \ 252 + switch (size) { \ 253 + case 1: __get_user_asm_ret(__gu_val, ub, addr, retval); break; \ 254 + case 2: __get_user_asm_ret(__gu_val, uh, addr, retval); break; \ 255 + case 4: __get_user_asm_ret(__gu_val, , addr, retval); break; \ 256 + case 8: __get_user_asm_ret(__gu_val, d, addr, retval); break; \ 257 + default: \ 258 + if (__get_user_bad()) \ 259 + return retval; \ 260 + } \ 261 + x = (__force type) __gu_val; \ 262 + }) 200 263 201 - #define __get_user_asm(x,size,addr,ret) \ 264 + #define __get_user_asm(x, size, addr, ret) \ 202 265 __asm__ __volatile__( \ 203 - "/* Get user asm, inline. */\n" \ 204 - "1:\t" "ld"#size " %2, %1\n\t" \ 205 - "clr %0\n" \ 206 - "2:\n\n\t" \ 207 - ".section .fixup,#alloc,#execinstr\n\t" \ 208 - ".align 4\n" \ 209 - "3:\n\t" \ 210 - "clr %1\n\t" \ 211 - "b 2b\n\t" \ 212 - " mov %3, %0\n\n\t" \ 213 - ".previous\n\t" \ 214 - ".section __ex_table,#alloc\n\t" \ 215 - ".align 4\n\t" \ 216 - ".word 1b, 3b\n\n\t" \ 217 - ".previous\n\t" \ 218 - : "=&r" (ret), "=&r" (x) : "m" (*__m(addr)), \ 219 - "i" (-EFAULT)) 266 + "/* Get user asm, inline. */\n" \ 267 + "1:\t" "ld"#size " %2, %1\n\t" \ 268 + "clr %0\n" \ 269 + "2:\n\n\t" \ 270 + ".section .fixup,#alloc,#execinstr\n\t" \ 271 + ".align 4\n" \ 272 + "3:\n\t" \ 273 + "clr %1\n\t" \ 274 + "b 2b\n\t" \ 275 + " mov %3, %0\n\n\t" \ 276 + ".previous\n\t" \ 277 + ".section __ex_table,#alloc\n\t" \ 278 + ".align 4\n\t" \ 279 + ".word 1b, 3b\n\n\t" \ 280 + ".previous\n\t" \ 281 + : "=&r" (ret), "=&r" (x) : "m" (*__m(addr)), \ 282 + "i" (-EFAULT)) 220 283 221 - #define __get_user_asm_ret(x,size,addr,retval) \ 284 + #define __get_user_asm_ret(x, size, addr, retval) \ 222 285 if (__builtin_constant_p(retval) && retval == -EFAULT) \ 223 - __asm__ __volatile__( \ 224 - "/* Get user asm ret, inline. */\n" \ 225 - "1:\t" "ld"#size " %1, %0\n\n\t" \ 226 - ".section __ex_table,#alloc\n\t" \ 227 - ".align 4\n\t" \ 228 - ".word 1b,__ret_efault\n\n\t" \ 229 - ".previous\n\t" \ 230 - : "=&r" (x) : "m" (*__m(addr))); \ 286 + __asm__ __volatile__( \ 287 + "/* Get user asm ret, inline. */\n" \ 288 + "1:\t" "ld"#size " %1, %0\n\n\t" \ 289 + ".section __ex_table,#alloc\n\t" \ 290 + ".align 4\n\t" \ 291 + ".word 1b,__ret_efault\n\n\t" \ 292 + ".previous\n\t" \ 293 + : "=&r" (x) : "m" (*__m(addr))); \ 231 294 else \ 232 - __asm__ __volatile__( \ 233 - "/* Get user asm ret, inline. */\n" \ 234 - "1:\t" "ld"#size " %1, %0\n\n\t" \ 235 - ".section .fixup,#alloc,#execinstr\n\t" \ 236 - ".align 4\n" \ 237 - "3:\n\t" \ 238 - "ret\n\t" \ 239 - " restore %%g0, %2, %%o0\n\n\t" \ 240 - ".previous\n\t" \ 241 - ".section __ex_table,#alloc\n\t" \ 242 - ".align 4\n\t" \ 243 - ".word 1b, 3b\n\n\t" \ 244 - ".previous\n\t" \ 245 - : "=&r" (x) : "m" (*__m(addr)), "i" (retval)) 295 + __asm__ __volatile__( \ 296 + "/* Get user asm ret, inline. */\n" \ 297 + "1:\t" "ld"#size " %1, %0\n\n\t" \ 298 + ".section .fixup,#alloc,#execinstr\n\t" \ 299 + ".align 4\n" \ 300 + "3:\n\t" \ 301 + "ret\n\t" \ 302 + " restore %%g0, %2, %%o0\n\n\t" \ 303 + ".previous\n\t" \ 304 + ".section __ex_table,#alloc\n\t" \ 305 + ".align 4\n\t" \ 306 + ".word 1b, 3b\n\n\t" \ 307 + ".previous\n\t" \ 308 + : "=&r" (x) : "m" (*__m(addr)), "i" (retval)) 246 309 247 310 int __get_user_bad(void); 248 311
+114 -100
arch/sparc/include/asm/uaccess_64.h
··· 41 41 #define get_fs() ((mm_segment_t){(current_thread_info()->current_ds)}) 42 42 #define get_ds() (KERNEL_DS) 43 43 44 - #define segment_eq(a,b) ((a).seg == (b).seg) 44 + #define segment_eq(a, b) ((a).seg == (b).seg) 45 45 46 46 #define set_fs(val) \ 47 47 do { \ 48 - current_thread_info()->current_ds =(val).seg; \ 48 + current_thread_info()->current_ds = (val).seg; \ 49 49 __asm__ __volatile__ ("wr %%g0, %0, %%asi" : : "r" ((val).seg)); \ 50 50 } while(0) 51 51 ··· 88 88 * of a performance impact. Thus we have a few rather ugly macros here, 89 89 * and hide all the ugliness from the user. 90 90 */ 91 - #define put_user(x,ptr) ({ \ 92 - unsigned long __pu_addr = (unsigned long)(ptr); \ 93 - __chk_user_ptr(ptr); \ 94 - __put_user_nocheck((__typeof__(*(ptr)))(x),__pu_addr,sizeof(*(ptr))); }) 91 + #define put_user(x, ptr) ({ \ 92 + unsigned long __pu_addr = (unsigned long)(ptr); \ 93 + __chk_user_ptr(ptr); \ 94 + __put_user_nocheck((__typeof__(*(ptr)))(x), __pu_addr, sizeof(*(ptr)));\ 95 + }) 95 96 96 - #define get_user(x,ptr) ({ \ 97 - unsigned long __gu_addr = (unsigned long)(ptr); \ 98 - __chk_user_ptr(ptr); \ 99 - __get_user_nocheck((x),__gu_addr,sizeof(*(ptr)),__typeof__(*(ptr))); }) 97 + #define get_user(x, ptr) ({ \ 98 + unsigned long __gu_addr = (unsigned long)(ptr); \ 99 + __chk_user_ptr(ptr); \ 100 + __get_user_nocheck((x), __gu_addr, sizeof(*(ptr)), __typeof__(*(ptr)));\ 101 + }) 100 102 101 - #define __put_user(x,ptr) put_user(x,ptr) 102 - #define __get_user(x,ptr) get_user(x,ptr) 103 + #define __put_user(x, ptr) put_user(x, ptr) 104 + #define __get_user(x, ptr) get_user(x, ptr) 103 105 104 106 struct __large_struct { unsigned long buf[100]; }; 105 107 #define __m(x) ((struct __large_struct *)(x)) 106 108 107 - #define __put_user_nocheck(data,addr,size) ({ \ 108 - register int __pu_ret; \ 109 - switch (size) { \ 110 - case 1: __put_user_asm(data,b,addr,__pu_ret); break; \ 111 - case 2: __put_user_asm(data,h,addr,__pu_ret); break; \ 112 - case 4: __put_user_asm(data,w,addr,__pu_ret); break; \ 113 - case 8: __put_user_asm(data,x,addr,__pu_ret); break; \ 114 - default: __pu_ret = __put_user_bad(); break; \ 115 - } __pu_ret; }) 109 + #define __put_user_nocheck(data, addr, size) ({ \ 110 + register int __pu_ret; \ 111 + switch (size) { \ 112 + case 1: __put_user_asm(data, b, addr, __pu_ret); break; \ 113 + case 2: __put_user_asm(data, h, addr, __pu_ret); break; \ 114 + case 4: __put_user_asm(data, w, addr, __pu_ret); break; \ 115 + case 8: __put_user_asm(data, x, addr, __pu_ret); break; \ 116 + default: __pu_ret = __put_user_bad(); break; \ 117 + } \ 118 + __pu_ret; \ 119 + }) 116 120 117 - #define __put_user_asm(x,size,addr,ret) \ 121 + #define __put_user_asm(x, size, addr, ret) \ 118 122 __asm__ __volatile__( \ 119 - "/* Put user asm, inline. */\n" \ 120 - "1:\t" "st"#size "a %1, [%2] %%asi\n\t" \ 121 - "clr %0\n" \ 122 - "2:\n\n\t" \ 123 - ".section .fixup,#alloc,#execinstr\n\t" \ 124 - ".align 4\n" \ 125 - "3:\n\t" \ 126 - "sethi %%hi(2b), %0\n\t" \ 127 - "jmpl %0 + %%lo(2b), %%g0\n\t" \ 128 - " mov %3, %0\n\n\t" \ 129 - ".previous\n\t" \ 130 - ".section __ex_table,\"a\"\n\t" \ 131 - ".align 4\n\t" \ 132 - ".word 1b, 3b\n\t" \ 133 - ".previous\n\n\t" \ 134 - : "=r" (ret) : "r" (x), "r" (__m(addr)), \ 135 - "i" (-EFAULT)) 123 + "/* Put user asm, inline. */\n" \ 124 + "1:\t" "st"#size "a %1, [%2] %%asi\n\t" \ 125 + "clr %0\n" \ 126 + "2:\n\n\t" \ 127 + ".section .fixup,#alloc,#execinstr\n\t" \ 128 + ".align 4\n" \ 129 + "3:\n\t" \ 130 + "sethi %%hi(2b), %0\n\t" \ 131 + "jmpl %0 + %%lo(2b), %%g0\n\t" \ 132 + " mov %3, %0\n\n\t" \ 133 + ".previous\n\t" \ 134 + ".section __ex_table,\"a\"\n\t" \ 135 + ".align 4\n\t" \ 136 + ".word 1b, 3b\n\t" \ 137 + ".previous\n\n\t" \ 138 + : "=r" (ret) : "r" (x), "r" (__m(addr)), \ 139 + "i" (-EFAULT)) 136 140 137 141 int __put_user_bad(void); 138 142 139 - #define __get_user_nocheck(data,addr,size,type) ({ \ 140 - register int __gu_ret; \ 141 - register unsigned long __gu_val; \ 142 - switch (size) { \ 143 - case 1: __get_user_asm(__gu_val,ub,addr,__gu_ret); break; \ 144 - case 2: __get_user_asm(__gu_val,uh,addr,__gu_ret); break; \ 145 - case 4: __get_user_asm(__gu_val,uw,addr,__gu_ret); break; \ 146 - case 8: __get_user_asm(__gu_val,x,addr,__gu_ret); break; \ 147 - default: __gu_val = 0; __gu_ret = __get_user_bad(); break; \ 148 - } data = (type) __gu_val; __gu_ret; }) 143 + #define __get_user_nocheck(data, addr, size, type) ({ \ 144 + register int __gu_ret; \ 145 + register unsigned long __gu_val; \ 146 + switch (size) { \ 147 + case 1: __get_user_asm(__gu_val, ub, addr, __gu_ret); break; \ 148 + case 2: __get_user_asm(__gu_val, uh, addr, __gu_ret); break; \ 149 + case 4: __get_user_asm(__gu_val, uw, addr, __gu_ret); break; \ 150 + case 8: __get_user_asm(__gu_val, x, addr, __gu_ret); break; \ 151 + default: \ 152 + __gu_val = 0; \ 153 + __gu_ret = __get_user_bad(); \ 154 + break; \ 155 + } \ 156 + data = (__force type) __gu_val; \ 157 + __gu_ret; \ 158 + }) 149 159 150 - #define __get_user_nocheck_ret(data,addr,size,type,retval) ({ \ 151 - register unsigned long __gu_val __asm__ ("l1"); \ 152 - switch (size) { \ 153 - case 1: __get_user_asm_ret(__gu_val,ub,addr,retval); break; \ 154 - case 2: __get_user_asm_ret(__gu_val,uh,addr,retval); break; \ 155 - case 4: __get_user_asm_ret(__gu_val,uw,addr,retval); break; \ 156 - case 8: __get_user_asm_ret(__gu_val,x,addr,retval); break; \ 157 - default: if (__get_user_bad()) return retval; \ 158 - } data = (type) __gu_val; }) 160 + #define __get_user_nocheck_ret(data, addr, size, type, retval) ({ \ 161 + register unsigned long __gu_val __asm__ ("l1"); \ 162 + switch (size) { \ 163 + case 1: __get_user_asm_ret(__gu_val, ub, addr, retval); break; \ 164 + case 2: __get_user_asm_ret(__gu_val, uh, addr, retval); break; \ 165 + case 4: __get_user_asm_ret(__gu_val, uw, addr, retval); break; \ 166 + case 8: __get_user_asm_ret(__gu_val, x, addr, retval); break; \ 167 + default: \ 168 + if (__get_user_bad()) \ 169 + return retval; \ 170 + } \ 171 + data = (__force type) __gu_val; \ 172 + }) 159 173 160 - #define __get_user_asm(x,size,addr,ret) \ 174 + #define __get_user_asm(x, size, addr, ret) \ 161 175 __asm__ __volatile__( \ 162 - "/* Get user asm, inline. */\n" \ 163 - "1:\t" "ld"#size "a [%2] %%asi, %1\n\t" \ 164 - "clr %0\n" \ 165 - "2:\n\n\t" \ 166 - ".section .fixup,#alloc,#execinstr\n\t" \ 167 - ".align 4\n" \ 168 - "3:\n\t" \ 169 - "sethi %%hi(2b), %0\n\t" \ 170 - "clr %1\n\t" \ 171 - "jmpl %0 + %%lo(2b), %%g0\n\t" \ 172 - " mov %3, %0\n\n\t" \ 173 - ".previous\n\t" \ 174 - ".section __ex_table,\"a\"\n\t" \ 175 - ".align 4\n\t" \ 176 - ".word 1b, 3b\n\n\t" \ 177 - ".previous\n\t" \ 178 - : "=r" (ret), "=r" (x) : "r" (__m(addr)), \ 179 - "i" (-EFAULT)) 176 + "/* Get user asm, inline. */\n" \ 177 + "1:\t" "ld"#size "a [%2] %%asi, %1\n\t" \ 178 + "clr %0\n" \ 179 + "2:\n\n\t" \ 180 + ".section .fixup,#alloc,#execinstr\n\t" \ 181 + ".align 4\n" \ 182 + "3:\n\t" \ 183 + "sethi %%hi(2b), %0\n\t" \ 184 + "clr %1\n\t" \ 185 + "jmpl %0 + %%lo(2b), %%g0\n\t" \ 186 + " mov %3, %0\n\n\t" \ 187 + ".previous\n\t" \ 188 + ".section __ex_table,\"a\"\n\t" \ 189 + ".align 4\n\t" \ 190 + ".word 1b, 3b\n\n\t" \ 191 + ".previous\n\t" \ 192 + : "=r" (ret), "=r" (x) : "r" (__m(addr)), \ 193 + "i" (-EFAULT)) 180 194 181 - #define __get_user_asm_ret(x,size,addr,retval) \ 195 + #define __get_user_asm_ret(x, size, addr, retval) \ 182 196 if (__builtin_constant_p(retval) && retval == -EFAULT) \ 183 - __asm__ __volatile__( \ 184 - "/* Get user asm ret, inline. */\n" \ 185 - "1:\t" "ld"#size "a [%1] %%asi, %0\n\n\t" \ 186 - ".section __ex_table,\"a\"\n\t" \ 187 - ".align 4\n\t" \ 188 - ".word 1b,__ret_efault\n\n\t" \ 189 - ".previous\n\t" \ 190 - : "=r" (x) : "r" (__m(addr))); \ 197 + __asm__ __volatile__( \ 198 + "/* Get user asm ret, inline. */\n" \ 199 + "1:\t" "ld"#size "a [%1] %%asi, %0\n\n\t" \ 200 + ".section __ex_table,\"a\"\n\t" \ 201 + ".align 4\n\t" \ 202 + ".word 1b,__ret_efault\n\n\t" \ 203 + ".previous\n\t" \ 204 + : "=r" (x) : "r" (__m(addr))); \ 191 205 else \ 192 - __asm__ __volatile__( \ 193 - "/* Get user asm ret, inline. */\n" \ 194 - "1:\t" "ld"#size "a [%1] %%asi, %0\n\n\t" \ 195 - ".section .fixup,#alloc,#execinstr\n\t" \ 196 - ".align 4\n" \ 197 - "3:\n\t" \ 198 - "ret\n\t" \ 199 - " restore %%g0, %2, %%o0\n\n\t" \ 200 - ".previous\n\t" \ 201 - ".section __ex_table,\"a\"\n\t" \ 202 - ".align 4\n\t" \ 203 - ".word 1b, 3b\n\n\t" \ 204 - ".previous\n\t" \ 205 - : "=r" (x) : "r" (__m(addr)), "i" (retval)) 206 + __asm__ __volatile__( \ 207 + "/* Get user asm ret, inline. */\n" \ 208 + "1:\t" "ld"#size "a [%1] %%asi, %0\n\n\t" \ 209 + ".section .fixup,#alloc,#execinstr\n\t" \ 210 + ".align 4\n" \ 211 + "3:\n\t" \ 212 + "ret\n\t" \ 213 + " restore %%g0, %2, %%o0\n\n\t" \ 214 + ".previous\n\t" \ 215 + ".section __ex_table,\"a\"\n\t" \ 216 + ".align 4\n\t" \ 217 + ".word 1b, 3b\n\n\t" \ 218 + ".previous\n\t" \ 219 + : "=r" (x) : "r" (__m(addr)), "i" (retval)) 206 220 207 221 int __get_user_bad(void); 208 222
+1 -1
arch/x86/include/asm/uaccess.h
··· 179 179 asm volatile("call __get_user_%P3" \ 180 180 : "=a" (__ret_gu), "=r" (__val_gu) \ 181 181 : "0" (ptr), "i" (sizeof(*(ptr)))); \ 182 - (x) = (__typeof__(*(ptr))) __val_gu; \ 182 + (x) = (__force __typeof__(*(ptr))) __val_gu; \ 183 183 __ret_gu; \ 184 184 }) 185 185
+45 -45
arch/xtensa/include/asm/uaccess.h
··· 182 182 #define get_fs() (current->thread.current_ds) 183 183 #define set_fs(val) (current->thread.current_ds = (val)) 184 184 185 - #define segment_eq(a,b) ((a).seg == (b).seg) 185 + #define segment_eq(a, b) ((a).seg == (b).seg) 186 186 187 187 #define __kernel_ok (segment_eq(get_fs(), KERNEL_DS)) 188 - #define __user_ok(addr,size) \ 188 + #define __user_ok(addr, size) \ 189 189 (((size) <= TASK_SIZE)&&((addr) <= TASK_SIZE-(size))) 190 - #define __access_ok(addr,size) (__kernel_ok || __user_ok((addr),(size))) 191 - #define access_ok(type,addr,size) __access_ok((unsigned long)(addr),(size)) 190 + #define __access_ok(addr, size) (__kernel_ok || __user_ok((addr), (size))) 191 + #define access_ok(type, addr, size) __access_ok((unsigned long)(addr), (size)) 192 192 193 193 /* 194 194 * These are the main single-value transfer routines. They ··· 204 204 * (a) re-use the arguments for side effects (sizeof is ok) 205 205 * (b) require any knowledge of processes at this stage 206 206 */ 207 - #define put_user(x,ptr) __put_user_check((x),(ptr),sizeof(*(ptr))) 208 - #define get_user(x,ptr) __get_user_check((x),(ptr),sizeof(*(ptr))) 207 + #define put_user(x, ptr) __put_user_check((x), (ptr), sizeof(*(ptr))) 208 + #define get_user(x, ptr) __get_user_check((x), (ptr), sizeof(*(ptr))) 209 209 210 210 /* 211 211 * The "__xxx" versions of the user access functions are versions that ··· 213 213 * with a separate "access_ok()" call (this is used when we do multiple 214 214 * accesses to the same area of user memory). 215 215 */ 216 - #define __put_user(x,ptr) __put_user_nocheck((x),(ptr),sizeof(*(ptr))) 217 - #define __get_user(x,ptr) __get_user_nocheck((x),(ptr),sizeof(*(ptr))) 216 + #define __put_user(x, ptr) __put_user_nocheck((x), (ptr), sizeof(*(ptr))) 217 + #define __get_user(x, ptr) __get_user_nocheck((x), (ptr), sizeof(*(ptr))) 218 218 219 219 220 220 extern long __put_user_bad(void); 221 221 222 - #define __put_user_nocheck(x,ptr,size) \ 222 + #define __put_user_nocheck(x, ptr, size) \ 223 223 ({ \ 224 224 long __pu_err; \ 225 - __put_user_size((x),(ptr),(size),__pu_err); \ 225 + __put_user_size((x), (ptr), (size), __pu_err); \ 226 226 __pu_err; \ 227 227 }) 228 228 229 - #define __put_user_check(x,ptr,size) \ 230 - ({ \ 231 - long __pu_err = -EFAULT; \ 232 - __typeof__(*(ptr)) *__pu_addr = (ptr); \ 233 - if (access_ok(VERIFY_WRITE,__pu_addr,size)) \ 234 - __put_user_size((x),__pu_addr,(size),__pu_err); \ 235 - __pu_err; \ 229 + #define __put_user_check(x, ptr, size) \ 230 + ({ \ 231 + long __pu_err = -EFAULT; \ 232 + __typeof__(*(ptr)) *__pu_addr = (ptr); \ 233 + if (access_ok(VERIFY_WRITE, __pu_addr, size)) \ 234 + __put_user_size((x), __pu_addr, (size), __pu_err); \ 235 + __pu_err; \ 236 236 }) 237 237 238 - #define __put_user_size(x,ptr,size,retval) \ 238 + #define __put_user_size(x, ptr, size, retval) \ 239 239 do { \ 240 240 int __cb; \ 241 241 retval = 0; \ 242 242 switch (size) { \ 243 - case 1: __put_user_asm(x,ptr,retval,1,"s8i",__cb); break; \ 244 - case 2: __put_user_asm(x,ptr,retval,2,"s16i",__cb); break; \ 245 - case 4: __put_user_asm(x,ptr,retval,4,"s32i",__cb); break; \ 243 + case 1: __put_user_asm(x, ptr, retval, 1, "s8i", __cb); break; \ 244 + case 2: __put_user_asm(x, ptr, retval, 2, "s16i", __cb); break; \ 245 + case 4: __put_user_asm(x, ptr, retval, 4, "s32i", __cb); break; \ 246 246 case 8: { \ 247 247 __typeof__(*ptr) __v64 = x; \ 248 - retval = __copy_to_user(ptr,&__v64,8); \ 248 + retval = __copy_to_user(ptr, &__v64, 8); \ 249 249 break; \ 250 250 } \ 251 251 default: __put_user_bad(); \ ··· 316 316 :"=r" (err), "=r" (cb) \ 317 317 :"r" ((int)(x)), "r" (addr), "i" (-EFAULT), "0" (err)) 318 318 319 - #define __get_user_nocheck(x,ptr,size) \ 319 + #define __get_user_nocheck(x, ptr, size) \ 320 320 ({ \ 321 321 long __gu_err, __gu_val; \ 322 - __get_user_size(__gu_val,(ptr),(size),__gu_err); \ 323 - (x) = (__force __typeof__(*(ptr)))__gu_val; \ 322 + __get_user_size(__gu_val, (ptr), (size), __gu_err); \ 323 + (x) = (__force __typeof__(*(ptr)))__gu_val; \ 324 324 __gu_err; \ 325 325 }) 326 326 327 - #define __get_user_check(x,ptr,size) \ 327 + #define __get_user_check(x, ptr, size) \ 328 328 ({ \ 329 329 long __gu_err = -EFAULT, __gu_val = 0; \ 330 330 const __typeof__(*(ptr)) *__gu_addr = (ptr); \ 331 - if (access_ok(VERIFY_READ,__gu_addr,size)) \ 332 - __get_user_size(__gu_val,__gu_addr,(size),__gu_err); \ 333 - (x) = (__force __typeof__(*(ptr)))__gu_val; \ 331 + if (access_ok(VERIFY_READ, __gu_addr, size)) \ 332 + __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \ 333 + (x) = (__force __typeof__(*(ptr)))__gu_val; \ 334 334 __gu_err; \ 335 335 }) 336 336 337 337 extern long __get_user_bad(void); 338 338 339 - #define __get_user_size(x,ptr,size,retval) \ 339 + #define __get_user_size(x, ptr, size, retval) \ 340 340 do { \ 341 341 int __cb; \ 342 342 retval = 0; \ 343 343 switch (size) { \ 344 - case 1: __get_user_asm(x,ptr,retval,1,"l8ui",__cb); break; \ 345 - case 2: __get_user_asm(x,ptr,retval,2,"l16ui",__cb); break; \ 346 - case 4: __get_user_asm(x,ptr,retval,4,"l32i",__cb); break; \ 347 - case 8: retval = __copy_from_user(&x,ptr,8); break; \ 344 + case 1: __get_user_asm(x, ptr, retval, 1, "l8ui", __cb); break;\ 345 + case 2: __get_user_asm(x, ptr, retval, 2, "l16ui", __cb); break;\ 346 + case 4: __get_user_asm(x, ptr, retval, 4, "l32i", __cb); break;\ 347 + case 8: retval = __copy_from_user(&x, ptr, 8); break; \ 348 348 default: (x) = __get_user_bad(); \ 349 349 } \ 350 350 } while (0) ··· 390 390 */ 391 391 392 392 extern unsigned __xtensa_copy_user(void *to, const void *from, unsigned n); 393 - #define __copy_user(to,from,size) __xtensa_copy_user(to,from,size) 393 + #define __copy_user(to, from, size) __xtensa_copy_user(to, from, size) 394 394 395 395 396 396 static inline unsigned long 397 397 __generic_copy_from_user_nocheck(void *to, const void *from, unsigned long n) 398 398 { 399 - return __copy_user(to,from,n); 399 + return __copy_user(to, from, n); 400 400 } 401 401 402 402 static inline unsigned long 403 403 __generic_copy_to_user_nocheck(void *to, const void *from, unsigned long n) 404 404 { 405 - return __copy_user(to,from,n); 405 + return __copy_user(to, from, n); 406 406 } 407 407 408 408 static inline unsigned long ··· 410 410 { 411 411 prefetch(from); 412 412 if (access_ok(VERIFY_WRITE, to, n)) 413 - return __copy_user(to,from,n); 413 + return __copy_user(to, from, n); 414 414 return n; 415 415 } 416 416 ··· 419 419 { 420 420 prefetchw(to); 421 421 if (access_ok(VERIFY_READ, from, n)) 422 - return __copy_user(to,from,n); 422 + return __copy_user(to, from, n); 423 423 else 424 424 memset(to, 0, n); 425 425 return n; 426 426 } 427 427 428 - #define copy_to_user(to,from,n) __generic_copy_to_user((to),(from),(n)) 429 - #define copy_from_user(to,from,n) __generic_copy_from_user((to),(from),(n)) 430 - #define __copy_to_user(to,from,n) \ 431 - __generic_copy_to_user_nocheck((to),(from),(n)) 432 - #define __copy_from_user(to,from,n) \ 433 - __generic_copy_from_user_nocheck((to),(from),(n)) 428 + #define copy_to_user(to, from, n) __generic_copy_to_user((to), (from), (n)) 429 + #define copy_from_user(to, from, n) __generic_copy_from_user((to), (from), (n)) 430 + #define __copy_to_user(to, from, n) \ 431 + __generic_copy_to_user_nocheck((to), (from), (n)) 432 + #define __copy_from_user(to, from, n) \ 433 + __generic_copy_from_user_nocheck((to), (from), (n)) 434 434 #define __copy_to_user_inatomic __copy_to_user 435 435 #define __copy_from_user_inatomic __copy_from_user 436 436