Merge branch 'work.uaccess-unaligned' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs

Pull uacess-unaligned removal from Al Viro:
"That stuff had just one user, and an exotic one, at that - binfmt_flat
on arm and m68k"

* 'work.uaccess-unaligned' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs:
kill {__,}{get,put}_user_unaligned()
binfmt_flat: flat_{get,put}_addr_from_rp() should be able to fail

Changed files
+168 -450
arch
arm
include
arm64
include
asm
blackfin
include
asm
kernel
c6x
include
asm
h8300
include
asm
ia64
include
asm
m32r
include
asm
m68k
include
microblaze
include
asm
mips
include
asm
parisc
include
asm
powerpc
include
asm
s390
include
asm
sh
include
asm
sparc
include
tile
include
asm
x86
include
asm
xtensa
include
asm
fs
include
asm-generic
linux
+22 -3
arch/arm/include/asm/flat.h
··· 5 5 #ifndef __ARM_FLAT_H__ 6 6 #define __ARM_FLAT_H__ 7 7 8 + #include <linux/uaccess.h> 9 + 8 10 #define flat_argvp_envp_on_stack() 1 9 11 #define flat_old_ram_flag(flags) (flags) 10 12 #define flat_reloc_valid(reloc, size) ((reloc) <= (size)) 11 - #define flat_get_addr_from_rp(rp, relval, flags, persistent) \ 12 - ({ unsigned long __val; __get_user_unaligned(__val, rp); __val; }) 13 - #define flat_put_addr_at_rp(rp, val, relval) __put_user_unaligned(val, rp) 13 + 14 + static inline int flat_get_addr_from_rp(u32 __user *rp, u32 relval, u32 flags, 15 + u32 *addr, u32 *persistent) 16 + { 17 + #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 18 + return copy_from_user(addr, rp, 4) ? -EFAULT : 0; 19 + #else 20 + return get_user(*addr, rp); 21 + #endif 22 + } 23 + 24 + static inline int flat_put_addr_at_rp(u32 __user *rp, u32 addr, u32 rel) 25 + { 26 + #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 27 + return copy_to_user(rp, &addr, 4) ? -EFAULT : 0; 28 + #else 29 + return put_user(addr, rp); 30 + #endif 31 + } 32 + 14 33 #define flat_get_relocate_addr(rel) (rel) 15 34 #define flat_set_persistent(relval, p) 0 16 35
-7
arch/arm/include/asm/uaccess.h
··· 17 17 #include <asm/unified.h> 18 18 #include <asm/compiler.h> 19 19 20 - #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 21 - #include <asm-generic/uaccess-unaligned.h> 22 - #else 23 - #define __get_user_unaligned __get_user 24 - #define __put_user_unaligned __put_user 25 - #endif 26 - 27 20 #include <asm/extable.h> 28 21 29 22 /*
-4
arch/arm64/include/asm/uaccess.h
··· 254 254 (void)0; \ 255 255 }) 256 256 257 - #define __get_user_unaligned __get_user 258 - 259 257 #define get_user(x, ptr) \ 260 258 ({ \ 261 259 __typeof__(*(ptr)) __user *__p = (ptr); \ ··· 317 319 __put_user_err((x), (ptr), (err)); \ 318 320 (void)0; \ 319 321 }) 320 - 321 - #define __put_user_unaligned __put_user 322 322 323 323 #define put_user(x, ptr) \ 324 324 ({ \
+15 -10
arch/blackfin/include/asm/flat.h
··· 14 14 #define flat_argvp_envp_on_stack() 0 15 15 #define flat_old_ram_flag(flags) (flags) 16 16 17 - extern unsigned long bfin_get_addr_from_rp (unsigned long *ptr, 18 - unsigned long relval, 19 - unsigned long flags, 20 - unsigned long *persistent); 17 + extern unsigned long bfin_get_addr_from_rp (u32 *ptr, u32 relval, 18 + u32 flags, u32 *persistent); 21 19 22 - extern void bfin_put_addr_at_rp(unsigned long *ptr, unsigned long addr, 23 - unsigned long relval); 20 + extern void bfin_put_addr_at_rp(u32 *ptr, u32 addr, u32 relval); 24 21 25 22 /* The amount by which a relocation can exceed the program image limits 26 23 without being regarded as an error. */ 27 24 28 25 #define flat_reloc_valid(reloc, size) ((reloc) <= (size)) 29 26 30 - #define flat_get_addr_from_rp(rp, relval, flags, persistent) \ 31 - bfin_get_addr_from_rp(rp, relval, flags, persistent) 32 - #define flat_put_addr_at_rp(rp, val, relval) \ 33 - bfin_put_addr_at_rp(rp, val, relval) 27 + static inline int flat_get_addr_from_rp(u32 __user *rp, u32 relval, u32 flags, 28 + u32 *addr, u32 *persistent) 29 + { 30 + *addr = bfin_get_addr_from_rp(rp, relval, flags, persistent); 31 + return 0; 32 + } 33 + 34 + static inline int flat_put_addr_at_rp(u32 __user *rp, u32 val, u32 relval) 35 + { 36 + bfin_put_addr_at_rp(rp, val, relval); 37 + return 0; 38 + } 34 39 35 40 /* Convert a relocation entry into an address. */ 36 41 static inline unsigned long
+6 -7
arch/blackfin/kernel/flat.c
··· 13 13 #define FLAT_BFIN_RELOC_TYPE_16H_BIT 1 14 14 #define FLAT_BFIN_RELOC_TYPE_32_BIT 2 15 15 16 - unsigned long bfin_get_addr_from_rp(unsigned long *ptr, 17 - unsigned long relval, 18 - unsigned long flags, 19 - unsigned long *persistent) 16 + unsigned long bfin_get_addr_from_rp(u32 *ptr, 17 + u32 relval, 18 + u32 flags, 19 + u32 *persistent) 20 20 { 21 21 unsigned short *usptr = (unsigned short *)ptr; 22 22 int type = (relval >> 26) & 7; 23 - unsigned long val; 23 + u32 val; 24 24 25 25 switch (type) { 26 26 case FLAT_BFIN_RELOC_TYPE_16_BIT: ··· 59 59 * Insert the address ADDR into the symbol reference at RP; 60 60 * RELVAL is the raw relocation-table entry from which RP is derived 61 61 */ 62 - void bfin_put_addr_at_rp(unsigned long *ptr, unsigned long addr, 63 - unsigned long relval) 62 + void bfin_put_addr_at_rp(u32 *ptr, u32 addr, u32 relval) 64 63 { 65 64 unsigned short *usptr = (unsigned short *)ptr; 66 65 int type = (relval >> 26) & 7;
+13 -2
arch/c6x/include/asm/flat.h
··· 1 1 #ifndef __ASM_C6X_FLAT_H 2 2 #define __ASM_C6X_FLAT_H 3 3 4 + #include <asm/unaligned.h> 5 + 4 6 #define flat_argvp_envp_on_stack() 0 5 7 #define flat_old_ram_flag(flags) (flags) 6 8 #define flat_reloc_valid(reloc, size) ((reloc) <= (size)) 7 - #define flat_get_addr_from_rp(rp, relval, flags, p) get_unaligned(rp) 8 - #define flat_put_addr_at_rp(rp, val, relval) put_unaligned(val, rp) 9 + static inline int flat_get_addr_from_rp(u32 __user *rp, u32 relval, u32 flags, 10 + u32 *addr, u32 *persistent) 11 + { 12 + *addr = get_unaligned((__force u32 *)rp); 13 + return 0; 14 + } 15 + static inline int flat_put_addr_at_rp(u32 __user *rp, u32 addr, u32 rel) 16 + { 17 + put_unaligned(addr, (__force u32 *)rp); 18 + return 0; 19 + } 9 20 #define flat_get_relocate_addr(rel) (rel) 10 21 #define flat_set_persistent(relval, p) 0 11 22
+18 -6
arch/h8300/include/asm/flat.h
··· 5 5 #ifndef __H8300_FLAT_H__ 6 6 #define __H8300_FLAT_H__ 7 7 8 + #include <asm/unaligned.h> 9 + 8 10 #define flat_argvp_envp_on_stack() 1 9 11 #define flat_old_ram_flag(flags) 1 10 12 #define flat_reloc_valid(reloc, size) ((reloc) <= (size)) ··· 20 18 */ 21 19 22 20 #define flat_get_relocate_addr(rel) (rel & ~0x00000001) 23 - #define flat_get_addr_from_rp(rp, relval, flags, persistent) \ 24 - ({(void)persistent; \ 25 - get_unaligned(rp) & (((flags) & FLAT_FLAG_GOTPIC) ? \ 26 - 0xffffffff : 0x00ffffff); }) 27 - #define flat_put_addr_at_rp(rp, addr, rel) \ 28 - put_unaligned(((*(char *)(rp)) << 24) | ((addr) & 0x00ffffff), (rp)) 21 + static inline int flat_get_addr_from_rp(u32 __user *rp, u32 relval, u32 flags, 22 + u32 *addr, u32 *persistent) 23 + { 24 + u32 val = get_unaligned((__force u32 *)rp); 25 + if (!(flags & FLAT_FLAG_GOTPIC) 26 + val &= 0x00ffffff; 27 + *addr = val; 28 + return 0; 29 + } 30 + 31 + static inline int flat_put_addr_at_rp(u32 __user *rp, u32 addr, u32 rel) 32 + { 33 + u32 *p = (__force u32 *)rp; 34 + put_unaligned((addr & 0x00ffffff) | (*(char *)p << 24), p); 35 + return 0; 36 + } 29 37 30 38 #endif /* __H8300_FLAT_H__ */
-36
arch/ia64/include/asm/uaccess.h
··· 87 87 #define __put_user(x, ptr) __put_user_nocheck((__typeof__(*(ptr))) (x), (ptr), sizeof(*(ptr))) 88 88 #define __get_user(x, ptr) __get_user_nocheck((x), (ptr), sizeof(*(ptr))) 89 89 90 - extern long __put_user_unaligned_unknown (void); 91 - 92 - #define __put_user_unaligned(x, ptr) \ 93 - ({ \ 94 - long __ret; \ 95 - switch (sizeof(*(ptr))) { \ 96 - case 1: __ret = __put_user((x), (ptr)); break; \ 97 - case 2: __ret = (__put_user((x), (u8 __user *)(ptr))) \ 98 - | (__put_user((x) >> 8, ((u8 __user *)(ptr) + 1))); break; \ 99 - case 4: __ret = (__put_user((x), (u16 __user *)(ptr))) \ 100 - | (__put_user((x) >> 16, ((u16 __user *)(ptr) + 1))); break; \ 101 - case 8: __ret = (__put_user((x), (u32 __user *)(ptr))) \ 102 - | (__put_user((x) >> 32, ((u32 __user *)(ptr) + 1))); break; \ 103 - default: __ret = __put_user_unaligned_unknown(); \ 104 - } \ 105 - __ret; \ 106 - }) 107 - 108 - extern long __get_user_unaligned_unknown (void); 109 - 110 - #define __get_user_unaligned(x, ptr) \ 111 - ({ \ 112 - long __ret; \ 113 - switch (sizeof(*(ptr))) { \ 114 - case 1: __ret = __get_user((x), (ptr)); break; \ 115 - case 2: __ret = (__get_user((x), (u8 __user *)(ptr))) \ 116 - | (__get_user((x) >> 8, ((u8 __user *)(ptr) + 1))); break; \ 117 - case 4: __ret = (__get_user((x), (u16 __user *)(ptr))) \ 118 - | (__get_user((x) >> 16, ((u16 __user *)(ptr) + 1))); break; \ 119 - case 8: __ret = (__get_user((x), (u32 __user *)(ptr))) \ 120 - | (__get_user((x) >> 32, ((u32 __user *)(ptr) + 1))); break; \ 121 - default: __ret = __get_user_unaligned_unknown(); \ 122 - } \ 123 - __ret; \ 124 - }) 125 - 126 90 #ifdef ASM_SUPPORTED 127 91 struct __large_struct { unsigned long buf[100]; }; 128 92 # define __m(x) (*(struct __large_struct __user *)(x))
+8 -11
arch/m32r/include/asm/flat.h
··· 17 17 #define flat_set_persistent(relval, p) 0 18 18 #define flat_reloc_valid(reloc, size) \ 19 19 (((reloc) - textlen_for_m32r_lo16_data) <= (size)) 20 - #define flat_get_addr_from_rp(rp, relval, flags, persistent) \ 21 - m32r_flat_get_addr_from_rp(rp, relval, (text_len) ) 22 - 23 - #define flat_put_addr_at_rp(rp, addr, relval) \ 24 - m32r_flat_put_addr_at_rp(rp, addr, relval) 25 20 26 21 /* Convert a relocation entry into an address. */ 27 22 static inline unsigned long ··· 52 57 53 58 static unsigned long textlen_for_m32r_lo16_data = 0; 54 59 55 - static inline unsigned long m32r_flat_get_addr_from_rp (unsigned long *rp, 56 - unsigned long relval, 57 - unsigned long textlen) 60 + static inline unsigned long m32r_flat_get_addr_from_rp (u32 *rp, 61 + u32 relval, 62 + u32 textlen) 58 63 { 59 64 unsigned int reloc = flat_m32r_get_reloc_type (relval); 60 65 textlen_for_m32r_lo16_data = 0; ··· 95 100 return ~0; /* bogus value */ 96 101 } 97 102 98 - static inline void m32r_flat_put_addr_at_rp (unsigned long *rp, 99 - unsigned long addr, 100 - unsigned long relval) 103 + static inline void flat_put_addr_at_rp(u32 *rp, u32 addr, u32 relval) 101 104 { 102 105 unsigned int reloc = flat_m32r_get_reloc_type (relval); 103 106 if (reloc & 0xf0) { ··· 134 141 } 135 142 } 136 143 } 144 + 145 + // kludge - text_len is a local variable in the only user. 146 + #define flat_get_addr_from_rp(rp, relval, flags, addr, persistent) \ 147 + (m32r_flat_get_addr_from_rp(rp, relval, text_len), 0) 137 148 138 149 #endif /* __ASM_M32R_FLAT_H */
+20 -3
arch/m68k/include/asm/flat.h
··· 5 5 #ifndef __M68KNOMMU_FLAT_H__ 6 6 #define __M68KNOMMU_FLAT_H__ 7 7 8 + #include <linux/uaccess.h> 9 + 8 10 #define flat_argvp_envp_on_stack() 1 9 11 #define flat_old_ram_flag(flags) (flags) 10 12 #define flat_reloc_valid(reloc, size) ((reloc) <= (size)) 11 - #define flat_get_addr_from_rp(rp, relval, flags, p) \ 12 - ({ unsigned long __val; __get_user_unaligned(__val, rp); __val; }) 13 - #define flat_put_addr_at_rp(rp, val, relval) __put_user_unaligned(val, rp) 13 + static inline int flat_get_addr_from_rp(u32 __user *rp, u32 relval, u32 flags, 14 + u32 *addr, u32 *persistent) 15 + { 16 + #ifdef CONFIG_CPU_HAS_NO_UNALIGNED 17 + return copy_from_user(addr, rp, 4) ? -EFAULT : 0; 18 + #else 19 + return get_user(*addr, rp); 20 + #endif 21 + } 22 + 23 + static inline int flat_put_addr_at_rp(u32 __user *rp, u32 addr, u32 rel) 24 + { 25 + #ifdef CONFIG_CPU_HAS_NO_UNALIGNED 26 + return copy_to_user(rp, &addr, 4) ? -EFAULT : 0; 27 + #else 28 + return put_user(addr, rp); 29 + #endif 30 + } 14 31 #define flat_get_relocate_addr(rel) (rel) 15 32 16 33 static inline int flat_set_persistent(unsigned long relval,
-7
arch/m68k/include/asm/uaccess.h
··· 3 3 #else 4 4 #include <asm/uaccess_mm.h> 5 5 #endif 6 - 7 6 #include <asm/extable.h> 8 - #ifdef CONFIG_CPU_HAS_NO_UNALIGNED 9 - #include <asm-generic/uaccess-unaligned.h> 10 - #else 11 - #define __get_user_unaligned(x, ptr) __get_user((x), (ptr)) 12 - #define __put_user_unaligned(x, ptr) __put_user((x), (ptr)) 13 - #endif
+17 -17
arch/microblaze/include/asm/flat.h
··· 32 32 * reference 33 33 */ 34 34 35 - static inline unsigned long 36 - flat_get_addr_from_rp(unsigned long *rp, unsigned long relval, 37 - unsigned long flags, unsigned long *persistent) 35 + static inline int flat_get_addr_from_rp(u32 __user *rp, u32 relval, u32 flags, 36 + u32 *addr, u32 *persistent) 38 37 { 39 - unsigned long addr; 40 - (void)flags; 38 + u32 *p = (__force u32 *)rp; 41 39 42 40 /* Is it a split 64/32 reference? */ 43 41 if (relval & 0x80000000) { 44 42 /* Grab the two halves of the reference */ 45 - unsigned long val_hi, val_lo; 43 + u32 val_hi, val_lo; 46 44 47 - val_hi = get_unaligned(rp); 48 - val_lo = get_unaligned(rp+1); 45 + val_hi = get_unaligned(p); 46 + val_lo = get_unaligned(p+1); 49 47 50 48 /* Crack the address out */ 51 - addr = ((val_hi & 0xffff) << 16) + (val_lo & 0xffff); 49 + *addr = ((val_hi & 0xffff) << 16) + (val_lo & 0xffff); 52 50 } else { 53 51 /* Get the address straight out */ 54 - addr = get_unaligned(rp); 52 + *addr = get_unaligned(p); 55 53 } 56 54 57 - return addr; 55 + return 0; 58 56 } 59 57 60 58 /* ··· 61 63 */ 62 64 63 65 static inline void 64 - flat_put_addr_at_rp(unsigned long *rp, unsigned long addr, unsigned long relval) 66 + flat_put_addr_at_rp(u32 __user *rp, u32 addr, u32 relval) 65 67 { 68 + u32 *p = (__force u32 *)rp; 66 69 /* Is this a split 64/32 reloc? */ 67 70 if (relval & 0x80000000) { 68 71 /* Get the two "halves" */ 69 - unsigned long val_hi = get_unaligned(rp); 70 - unsigned long val_lo = get_unaligned(rp + 1); 72 + unsigned long val_hi = get_unaligned(p); 73 + unsigned long val_lo = get_unaligned(p + 1); 71 74 72 75 /* insert the address */ 73 76 val_hi = (val_hi & 0xffff0000) | addr >> 16; 74 77 val_lo = (val_lo & 0xffff0000) | (addr & 0xffff); 75 78 76 79 /* store the two halves back into memory */ 77 - put_unaligned(val_hi, rp); 78 - put_unaligned(val_lo, rp+1); 80 + put_unaligned(val_hi, p); 81 + put_unaligned(val_lo, p+1); 79 82 } else { 80 83 /* Put it straight in, no messing around */ 81 - put_unaligned(addr, rp); 84 + put_unaligned(addr, p); 82 85 } 86 + return 0; 83 87 } 84 88 85 89 #define flat_get_relocate_addr(rel) (rel & 0x7fffffff)
-277
arch/mips/include/asm/uaccess.h
··· 497 497 extern void __put_user_unknown(void); 498 498 499 499 /* 500 - * ul{b,h,w} are macros and there are no equivalent macros for EVA. 501 - * EVA unaligned access is handled in the ADE exception handler. 502 - */ 503 - #ifndef CONFIG_EVA 504 - /* 505 - * put_user_unaligned: - Write a simple value into user space. 506 - * @x: Value to copy to user space. 507 - * @ptr: Destination address, in user space. 508 - * 509 - * Context: User context only. This function may sleep if pagefaults are 510 - * enabled. 511 - * 512 - * This macro copies a single simple value from kernel space to user 513 - * space. It supports simple types like char and int, but not larger 514 - * data types like structures or arrays. 515 - * 516 - * @ptr must have pointer-to-simple-variable type, and @x must be assignable 517 - * to the result of dereferencing @ptr. 518 - * 519 - * Returns zero on success, or -EFAULT on error. 520 - */ 521 - #define put_user_unaligned(x,ptr) \ 522 - __put_user_unaligned_check((x),(ptr),sizeof(*(ptr))) 523 - 524 - /* 525 - * get_user_unaligned: - Get a simple variable from user space. 526 - * @x: Variable to store result. 527 - * @ptr: Source address, in user space. 528 - * 529 - * Context: User context only. This function may sleep if pagefaults are 530 - * enabled. 531 - * 532 - * This macro copies a single simple variable from user space to kernel 533 - * space. It supports simple types like char and int, but not larger 534 - * data types like structures or arrays. 535 - * 536 - * @ptr must have pointer-to-simple-variable type, and the result of 537 - * dereferencing @ptr must be assignable to @x without a cast. 538 - * 539 - * Returns zero on success, or -EFAULT on error. 540 - * On error, the variable @x is set to zero. 541 - */ 542 - #define get_user_unaligned(x,ptr) \ 543 - __get_user_unaligned_check((x),(ptr),sizeof(*(ptr))) 544 - 545 - /* 546 - * __put_user_unaligned: - Write a simple value into user space, with less checking. 547 - * @x: Value to copy to user space. 548 - * @ptr: Destination address, in user space. 549 - * 550 - * Context: User context only. This function may sleep if pagefaults are 551 - * enabled. 552 - * 553 - * This macro copies a single simple value from kernel space to user 554 - * space. It supports simple types like char and int, but not larger 555 - * data types like structures or arrays. 556 - * 557 - * @ptr must have pointer-to-simple-variable type, and @x must be assignable 558 - * to the result of dereferencing @ptr. 559 - * 560 - * Caller must check the pointer with access_ok() before calling this 561 - * function. 562 - * 563 - * Returns zero on success, or -EFAULT on error. 564 - */ 565 - #define __put_user_unaligned(x,ptr) \ 566 - __put_user_unaligned_nocheck((x),(ptr),sizeof(*(ptr))) 567 - 568 - /* 569 - * __get_user_unaligned: - Get a simple variable from user space, with less checking. 570 - * @x: Variable to store result. 571 - * @ptr: Source address, in user space. 572 - * 573 - * Context: User context only. This function may sleep if pagefaults are 574 - * enabled. 575 - * 576 - * This macro copies a single simple variable from user space to kernel 577 - * space. It supports simple types like char and int, but not larger 578 - * data types like structures or arrays. 579 - * 580 - * @ptr must have pointer-to-simple-variable type, and the result of 581 - * dereferencing @ptr must be assignable to @x without a cast. 582 - * 583 - * Caller must check the pointer with access_ok() before calling this 584 - * function. 585 - * 586 - * Returns zero on success, or -EFAULT on error. 587 - * On error, the variable @x is set to zero. 588 - */ 589 - #define __get_user_unaligned(x,ptr) \ 590 - __get_user_unaligned_nocheck((x),(ptr),sizeof(*(ptr))) 591 - 592 - /* 593 - * Yuck. We need two variants, one for 64bit operation and one 594 - * for 32 bit mode and old iron. 595 - */ 596 - #ifdef CONFIG_32BIT 597 - #define __GET_USER_UNALIGNED_DW(val, ptr) \ 598 - __get_user_unaligned_asm_ll32(val, ptr) 599 - #endif 600 - #ifdef CONFIG_64BIT 601 - #define __GET_USER_UNALIGNED_DW(val, ptr) \ 602 - __get_user_unaligned_asm(val, "uld", ptr) 603 - #endif 604 - 605 - extern void __get_user_unaligned_unknown(void); 606 - 607 - #define __get_user_unaligned_common(val, size, ptr) \ 608 - do { \ 609 - switch (size) { \ 610 - case 1: __get_data_asm(val, "lb", ptr); break; \ 611 - case 2: __get_data_unaligned_asm(val, "ulh", ptr); break; \ 612 - case 4: __get_data_unaligned_asm(val, "ulw", ptr); break; \ 613 - case 8: __GET_USER_UNALIGNED_DW(val, ptr); break; \ 614 - default: __get_user_unaligned_unknown(); break; \ 615 - } \ 616 - } while (0) 617 - 618 - #define __get_user_unaligned_nocheck(x,ptr,size) \ 619 - ({ \ 620 - int __gu_err; \ 621 - \ 622 - __get_user_unaligned_common((x), size, ptr); \ 623 - __gu_err; \ 624 - }) 625 - 626 - #define __get_user_unaligned_check(x,ptr,size) \ 627 - ({ \ 628 - int __gu_err = -EFAULT; \ 629 - const __typeof__(*(ptr)) __user * __gu_ptr = (ptr); \ 630 - \ 631 - if (likely(access_ok(VERIFY_READ, __gu_ptr, size))) \ 632 - __get_user_unaligned_common((x), size, __gu_ptr); \ 633 - \ 634 - __gu_err; \ 635 - }) 636 - 637 - #define __get_data_unaligned_asm(val, insn, addr) \ 638 - { \ 639 - long __gu_tmp; \ 640 - \ 641 - __asm__ __volatile__( \ 642 - "1: " insn " %1, %3 \n" \ 643 - "2: \n" \ 644 - " .insn \n" \ 645 - " .section .fixup,\"ax\" \n" \ 646 - "3: li %0, %4 \n" \ 647 - " move %1, $0 \n" \ 648 - " j 2b \n" \ 649 - " .previous \n" \ 650 - " .section __ex_table,\"a\" \n" \ 651 - " "__UA_ADDR "\t1b, 3b \n" \ 652 - " "__UA_ADDR "\t1b + 4, 3b \n" \ 653 - " .previous \n" \ 654 - : "=r" (__gu_err), "=r" (__gu_tmp) \ 655 - : "0" (0), "o" (__m(addr)), "i" (-EFAULT)); \ 656 - \ 657 - (val) = (__typeof__(*(addr))) __gu_tmp; \ 658 - } 659 - 660 - /* 661 - * Get a long long 64 using 32 bit registers. 662 - */ 663 - #define __get_user_unaligned_asm_ll32(val, addr) \ 664 - { \ 665 - unsigned long long __gu_tmp; \ 666 - \ 667 - __asm__ __volatile__( \ 668 - "1: ulw %1, (%3) \n" \ 669 - "2: ulw %D1, 4(%3) \n" \ 670 - " move %0, $0 \n" \ 671 - "3: \n" \ 672 - " .insn \n" \ 673 - " .section .fixup,\"ax\" \n" \ 674 - "4: li %0, %4 \n" \ 675 - " move %1, $0 \n" \ 676 - " move %D1, $0 \n" \ 677 - " j 3b \n" \ 678 - " .previous \n" \ 679 - " .section __ex_table,\"a\" \n" \ 680 - " " __UA_ADDR " 1b, 4b \n" \ 681 - " " __UA_ADDR " 1b + 4, 4b \n" \ 682 - " " __UA_ADDR " 2b, 4b \n" \ 683 - " " __UA_ADDR " 2b + 4, 4b \n" \ 684 - " .previous \n" \ 685 - : "=r" (__gu_err), "=&r" (__gu_tmp) \ 686 - : "0" (0), "r" (addr), "i" (-EFAULT)); \ 687 - (val) = (__typeof__(*(addr))) __gu_tmp; \ 688 - } 689 - 690 - /* 691 - * Yuck. We need two variants, one for 64bit operation and one 692 - * for 32 bit mode and old iron. 693 - */ 694 - #ifdef CONFIG_32BIT 695 - #define __PUT_USER_UNALIGNED_DW(ptr) __put_user_unaligned_asm_ll32(ptr) 696 - #endif 697 - #ifdef CONFIG_64BIT 698 - #define __PUT_USER_UNALIGNED_DW(ptr) __put_user_unaligned_asm("usd", ptr) 699 - #endif 700 - 701 - #define __put_user_unaligned_common(ptr, size) \ 702 - do { \ 703 - switch (size) { \ 704 - case 1: __put_data_asm("sb", ptr); break; \ 705 - case 2: __put_user_unaligned_asm("ush", ptr); break; \ 706 - case 4: __put_user_unaligned_asm("usw", ptr); break; \ 707 - case 8: __PUT_USER_UNALIGNED_DW(ptr); break; \ 708 - default: __put_user_unaligned_unknown(); break; \ 709 - } while (0) 710 - 711 - #define __put_user_unaligned_nocheck(x,ptr,size) \ 712 - ({ \ 713 - __typeof__(*(ptr)) __pu_val; \ 714 - int __pu_err = 0; \ 715 - \ 716 - __pu_val = (x); \ 717 - __put_user_unaligned_common(ptr, size); \ 718 - __pu_err; \ 719 - }) 720 - 721 - #define __put_user_unaligned_check(x,ptr,size) \ 722 - ({ \ 723 - __typeof__(*(ptr)) __user *__pu_addr = (ptr); \ 724 - __typeof__(*(ptr)) __pu_val = (x); \ 725 - int __pu_err = -EFAULT; \ 726 - \ 727 - if (likely(access_ok(VERIFY_WRITE, __pu_addr, size))) \ 728 - __put_user_unaligned_common(__pu_addr, size); \ 729 - \ 730 - __pu_err; \ 731 - }) 732 - 733 - #define __put_user_unaligned_asm(insn, ptr) \ 734 - { \ 735 - __asm__ __volatile__( \ 736 - "1: " insn " %z2, %3 # __put_user_unaligned_asm\n" \ 737 - "2: \n" \ 738 - " .insn \n" \ 739 - " .section .fixup,\"ax\" \n" \ 740 - "3: li %0, %4 \n" \ 741 - " j 2b \n" \ 742 - " .previous \n" \ 743 - " .section __ex_table,\"a\" \n" \ 744 - " " __UA_ADDR " 1b, 3b \n" \ 745 - " .previous \n" \ 746 - : "=r" (__pu_err) \ 747 - : "0" (0), "Jr" (__pu_val), "o" (__m(ptr)), \ 748 - "i" (-EFAULT)); \ 749 - } 750 - 751 - #define __put_user_unaligned_asm_ll32(ptr) \ 752 - { \ 753 - __asm__ __volatile__( \ 754 - "1: sw %2, (%3) # __put_user_unaligned_asm_ll32 \n" \ 755 - "2: sw %D2, 4(%3) \n" \ 756 - "3: \n" \ 757 - " .insn \n" \ 758 - " .section .fixup,\"ax\" \n" \ 759 - "4: li %0, %4 \n" \ 760 - " j 3b \n" \ 761 - " .previous \n" \ 762 - " .section __ex_table,\"a\" \n" \ 763 - " " __UA_ADDR " 1b, 4b \n" \ 764 - " " __UA_ADDR " 1b + 4, 4b \n" \ 765 - " " __UA_ADDR " 2b, 4b \n" \ 766 - " " __UA_ADDR " 2b + 4, 4b \n" \ 767 - " .previous" \ 768 - : "=r" (__pu_err) \ 769 - : "0" (0), "r" (__pu_val), "r" (ptr), \ 770 - "i" (-EFAULT)); \ 771 - } 772 - 773 - extern void __put_user_unaligned_unknown(void); 774 - #endif 775 - 776 - /* 777 500 * We're generating jump to subroutines which will be outside the range of 778 501 * jump instructions 779 502 */
-1
arch/parisc/include/asm/uaccess.h
··· 6 6 */ 7 7 #include <asm/page.h> 8 8 #include <asm/cache.h> 9 - #include <asm-generic/uaccess-unaligned.h> 10 9 11 10 #include <linux/bug.h> 12 11 #include <linux/string.h>
-3
arch/powerpc/include/asm/uaccess.h
··· 90 90 #define __put_user_inatomic(x, ptr) \ 91 91 __put_user_nosleep((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) 92 92 93 - #define __get_user_unaligned __get_user 94 - #define __put_user_unaligned __put_user 95 - 96 93 extern long __put_user_bad(void); 97 94 98 95 /*
-3
arch/s390/include/asm/uaccess.h
··· 249 249 250 250 int __get_user_bad(void) __attribute__((noreturn)); 251 251 252 - #define __put_user_unaligned __put_user 253 - #define __get_user_unaligned __get_user 254 - 255 252 unsigned long __must_check 256 253 raw_copy_in_user(void __user *to, const void __user *from, unsigned long n); 257 254
+13 -2
arch/sh/include/asm/flat.h
··· 12 12 #ifndef __ASM_SH_FLAT_H 13 13 #define __ASM_SH_FLAT_H 14 14 15 + #include <asm/unaligned.h> 16 + 15 17 #define flat_argvp_envp_on_stack() 0 16 18 #define flat_old_ram_flag(flags) (flags) 17 19 #define flat_reloc_valid(reloc, size) ((reloc) <= (size)) 18 - #define flat_get_addr_from_rp(rp, relval, flags, p) get_unaligned(rp) 19 - #define flat_put_addr_at_rp(rp, val, relval) put_unaligned(val,rp) 20 + static inline int flat_get_addr_from_rp(u32 __user *rp, u32 relval, u32 flags, 21 + u32 *addr, u32 *persistent) 22 + { 23 + *addr = get_unaligned((__force u32 *)rp); 24 + return 0; 25 + } 26 + static inline int flat_put_addr_at_rp(u32 __user *rp, u32 addr, u32 rel) 27 + { 28 + put_unaligned(addr, (__force u32 *)rp); 29 + return 0; 30 + } 20 31 #define flat_get_relocate_addr(rel) (rel) 21 32 #define flat_set_persistent(relval, p) ({ (void)p; 0; }) 22 33
-1
arch/sparc/include/asm/uaccess_64.h
··· 9 9 #include <linux/string.h> 10 10 #include <asm/asi.h> 11 11 #include <asm/spitfire.h> 12 - #include <asm-generic/uaccess-unaligned.h> 13 12 #include <asm/extable_64.h> 14 13 15 14 #include <asm/processor.h>
-1
arch/tile/include/asm/uaccess.h
··· 19 19 * User space memory access functions 20 20 */ 21 21 #include <linux/mm.h> 22 - #include <asm-generic/uaccess-unaligned.h> 23 22 #include <asm/processor.h> 24 23 #include <asm/page.h> 25 24
-3
arch/x86/include/asm/uaccess.h
··· 535 535 #define __put_user(x, ptr) \ 536 536 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) 537 537 538 - #define __get_user_unaligned __get_user 539 - #define __put_user_unaligned __put_user 540 - 541 538 /* 542 539 * {get|put}_user_try and catch 543 540 *
+13 -2
arch/xtensa/include/asm/flat.h
··· 1 1 #ifndef __ASM_XTENSA_FLAT_H 2 2 #define __ASM_XTENSA_FLAT_H 3 3 4 + #include <asm/unaligned.h> 5 + 4 6 #define flat_argvp_envp_on_stack() 0 5 7 #define flat_old_ram_flag(flags) (flags) 6 8 #define flat_reloc_valid(reloc, size) ((reloc) <= (size)) 7 - #define flat_get_addr_from_rp(rp, relval, flags, p) get_unaligned(rp) 8 - #define flat_put_addr_at_rp(rp, val, relval ) put_unaligned(val, rp) 9 + static inline int flat_get_addr_from_rp(u32 __user *rp, u32 relval, u32 flags, 10 + u32 *addr, u32 *persistent) 11 + { 12 + *addr = get_unaligned((__force u32 *)rp); 13 + return 0; 14 + } 15 + static inline int flat_put_addr_at_rp(u32 __user *rp, u32 addr, u32 rel) 16 + { 17 + put_unaligned(addr, (__force u32 *)rp); 18 + return 0; 19 + } 9 20 #define flat_get_relocate_addr(rel) (rel) 10 21 #define flat_set_persistent(relval, p) 0 11 22
+22 -17
fs/binfmt_flat.c
··· 422 422 { 423 423 struct flat_hdr *hdr; 424 424 unsigned long textpos, datapos, realdatastart; 425 - unsigned long text_len, data_len, bss_len, stack_len, full_data, flags; 425 + u32 text_len, data_len, bss_len, stack_len, full_data, flags; 426 426 unsigned long len, memp, memp_size, extra, rlim; 427 - unsigned long __user *reloc, *rp; 427 + u32 __user *reloc, *rp; 428 428 struct inode *inode; 429 429 int i, rev, relocs; 430 430 loff_t fpos; ··· 596 596 goto err; 597 597 } 598 598 599 - reloc = (unsigned long __user *) 599 + reloc = (u32 __user *) 600 600 (datapos + (ntohl(hdr->reloc_start) - text_len)); 601 601 memp = realdatastart; 602 602 memp_size = len; 603 603 } else { 604 604 605 - len = text_len + data_len + extra + MAX_SHARED_LIBS * sizeof(unsigned long); 605 + len = text_len + data_len + extra + MAX_SHARED_LIBS * sizeof(u32); 606 606 len = PAGE_ALIGN(len); 607 607 textpos = vm_mmap(NULL, 0, len, 608 608 PROT_READ | PROT_EXEC | PROT_WRITE, MAP_PRIVATE, 0); ··· 618 618 619 619 realdatastart = textpos + ntohl(hdr->data_start); 620 620 datapos = ALIGN(realdatastart + 621 - MAX_SHARED_LIBS * sizeof(unsigned long), 621 + MAX_SHARED_LIBS * sizeof(u32), 622 622 FLAT_DATA_ALIGN); 623 623 624 - reloc = (unsigned long __user *) 624 + reloc = (u32 __user *) 625 625 (datapos + (ntohl(hdr->reloc_start) - text_len)); 626 626 memp = textpos; 627 627 memp_size = len; ··· 694 694 ret = result; 695 695 pr_err("Unable to read code+data+bss, errno %d\n", ret); 696 696 vm_munmap(textpos, text_len + data_len + extra + 697 - MAX_SHARED_LIBS * sizeof(unsigned long)); 697 + MAX_SHARED_LIBS * sizeof(u32)); 698 698 goto err; 699 699 } 700 700 } ··· 754 754 * image. 755 755 */ 756 756 if (flags & FLAT_FLAG_GOTPIC) { 757 - for (rp = (unsigned long __user *)datapos; ; rp++) { 758 - unsigned long addr, rp_val; 757 + for (rp = (u32 __user *)datapos; ; rp++) { 758 + u32 addr, rp_val; 759 759 if (get_user(rp_val, rp)) 760 760 return -EFAULT; 761 761 if (rp_val == 0xffffffff) ··· 784 784 * __start to address 4 so that is okay). 785 785 */ 786 786 if (rev > OLD_FLAT_VERSION) { 787 - unsigned long __maybe_unused persistent = 0; 787 + u32 __maybe_unused persistent = 0; 788 788 for (i = 0; i < relocs; i++) { 789 - unsigned long addr, relval; 789 + u32 addr, relval; 790 790 791 791 /* 792 792 * Get the address of the pointer to be ··· 799 799 if (flat_set_persistent(relval, &persistent)) 800 800 continue; 801 801 addr = flat_get_relocate_addr(relval); 802 - rp = (unsigned long __user *)calc_reloc(addr, libinfo, id, 1); 803 - if (rp == (unsigned long __user *)RELOC_FAILED) { 802 + rp = (u32 __user *)calc_reloc(addr, libinfo, id, 1); 803 + if (rp == (u32 __user *)RELOC_FAILED) { 804 804 ret = -ENOEXEC; 805 805 goto err; 806 806 } 807 807 808 808 /* Get the pointer's value. */ 809 - addr = flat_get_addr_from_rp(rp, relval, flags, 810 - &persistent); 809 + ret = flat_get_addr_from_rp(rp, relval, flags, 810 + &addr, &persistent); 811 + if (unlikely(ret)) 812 + goto err; 813 + 811 814 if (addr != 0) { 812 815 /* 813 816 * Do the relocation. PIC relocs in the data section are ··· 825 822 } 826 823 827 824 /* Write back the relocated pointer. */ 828 - flat_put_addr_at_rp(rp, addr, relval); 825 + ret = flat_put_addr_at_rp(rp, addr, relval); 826 + if (unlikely(ret)) 827 + goto err; 829 828 } 830 829 } 831 830 } else { 832 831 for (i = 0; i < relocs; i++) { 833 - unsigned long relval; 832 + u32 relval; 834 833 if (get_user(relval, reloc + i)) 835 834 return -EFAULT; 836 835 relval = ntohl(relval);
-26
include/asm-generic/uaccess-unaligned.h
··· 1 - #ifndef __ASM_GENERIC_UACCESS_UNALIGNED_H 2 - #define __ASM_GENERIC_UACCESS_UNALIGNED_H 3 - 4 - /* 5 - * This macro should be used instead of __get_user() when accessing 6 - * values at locations that are not known to be aligned. 7 - */ 8 - #define __get_user_unaligned(x, ptr) \ 9 - ({ \ 10 - __typeof__ (*(ptr)) __x; \ 11 - __copy_from_user(&__x, (ptr), sizeof(*(ptr))) ? -EFAULT : 0; \ 12 - (x) = __x; \ 13 - }) 14 - 15 - 16 - /* 17 - * This macro should be used instead of __put_user() when accessing 18 - * values at locations that are not known to be aligned. 19 - */ 20 - #define __put_user_unaligned(x, ptr) \ 21 - ({ \ 22 - __typeof__ (*(ptr)) __x = (x); \ 23 - __copy_to_user((ptr), &__x, sizeof(*(ptr))) ? -EFAULT : 0; \ 24 - }) 25 - 26 - #endif /* __ASM_GENERIC_UACCESS_UNALIGNED_H */
+1 -1
include/linux/flat.h
··· 9 9 #ifndef _LINUX_FLAT_H 10 10 #define _LINUX_FLAT_H 11 11 12 - #include <asm/flat.h> 13 12 #include <uapi/linux/flat.h> 13 + #include <asm/flat.h> 14 14 15 15 /* 16 16 * While it would be nice to keep this header clean, users of older