Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/arnd/asm-generic

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/arnd/asm-generic:
add generic lib/checksum.c
asm-generic: add a generic uaccess.h
asm-generic: add generic NOMMU versions of some headers
asm-generic: add generic atomic.h and io.h
asm-generic: add legacy I/O header files
asm-generic: add generic versions of common headers
asm-generic: make bitops.h usable
asm-generic: make pci.h usable directly
asm-generic: make get_rtc_time overridable
asm-generic: rename page.h and uaccess.h
asm-generic: rename atomic.h to atomic-long.h
asm-generic: add a generic unistd.h
asm-generic: add generic ABI headers
asm-generic: add generic sysv ipc headers
asm-generic: introduce asm/bitsperlong.h
asm-generic: rename termios.h, signal.h and mman.h

+4727 -469
+1 -1
arch/alpha/include/asm/atomic.h
··· 256 #define smp_mb__before_atomic_inc() smp_mb() 257 #define smp_mb__after_atomic_inc() smp_mb() 258 259 - #include <asm-generic/atomic.h> 260 #endif /* _ALPHA_ATOMIC_H */
··· 256 #define smp_mb__before_atomic_inc() smp_mb() 257 #define smp_mb__after_atomic_inc() smp_mb() 258 259 + #include <asm-generic/atomic-long.h> 260 #endif /* _ALPHA_ATOMIC_H */
+8
arch/alpha/include/asm/bitsperlong.h
···
··· 1 + #ifndef __ASM_ALPHA_BITSPERLONG_H 2 + #define __ASM_ALPHA_BITSPERLONG_H 3 + 4 + #define __BITS_PER_LONG 64 5 + 6 + #include <asm-generic/bitsperlong.h> 7 + 8 + #endif /* __ASM_ALPHA_BITSPERLONG_H */
+1 -1
arch/alpha/include/asm/page.h
··· 93 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) 94 95 #include <asm-generic/memory_model.h> 96 - #include <asm-generic/page.h> 97 98 #endif /* _ALPHA_PAGE_H */
··· 93 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) 94 95 #include <asm-generic/memory_model.h> 96 + #include <asm-generic/getorder.h> 97 98 #endif /* _ALPHA_PAGE_H */
+1 -1
arch/alpha/include/asm/signal.h
··· 111 #define SIG_UNBLOCK 2 /* for unblocking signals */ 112 #define SIG_SETMASK 3 /* for setting the signal mask */ 113 114 - #include <asm-generic/signal.h> 115 116 #ifdef __KERNEL__ 117 struct osf_sigaction {
··· 111 #define SIG_UNBLOCK 2 /* for unblocking signals */ 112 #define SIG_SETMASK 3 /* for setting the signal mask */ 113 114 + #include <asm-generic/signal-defs.h> 115 116 #ifdef __KERNEL__ 117 struct osf_sigaction {
-3
arch/alpha/include/asm/types.h
··· 25 * These aren't exported outside the kernel to avoid name space clashes 26 */ 27 #ifdef __KERNEL__ 28 - 29 - #define BITS_PER_LONG 64 30 - 31 #ifndef __ASSEMBLY__ 32 33 typedef u64 dma_addr_t;
··· 25 * These aren't exported outside the kernel to avoid name space clashes 26 */ 27 #ifdef __KERNEL__ 28 #ifndef __ASSEMBLY__ 29 30 typedef u64 dma_addr_t;
+1 -1
arch/arm/include/asm/atomic.h
··· 249 #define smp_mb__before_atomic_inc() smp_mb() 250 #define smp_mb__after_atomic_inc() smp_mb() 251 252 - #include <asm-generic/atomic.h> 253 #endif 254 #endif
··· 249 #define smp_mb__before_atomic_inc() smp_mb() 250 #define smp_mb__after_atomic_inc() smp_mb() 251 252 + #include <asm-generic/atomic-long.h> 253 #endif 254 #endif
+1
arch/arm/include/asm/bitsperlong.h
···
··· 1 + #include <asm-generic/bitsperlong.h>
+1 -1
arch/arm/include/asm/mman.h
··· 1 #ifndef __ARM_MMAN_H__ 2 #define __ARM_MMAN_H__ 3 4 - #include <asm-generic/mman.h> 5 6 #define MAP_GROWSDOWN 0x0100 /* stack-like segment */ 7 #define MAP_DENYWRITE 0x0800 /* ETXTBSY */
··· 1 #ifndef __ARM_MMAN_H__ 2 #define __ARM_MMAN_H__ 3 4 + #include <asm-generic/mman-common.h> 5 6 #define MAP_GROWSDOWN 0x0100 /* stack-like segment */ 7 #define MAP_DENYWRITE 0x0800 /* ETXTBSY */
+1 -1
arch/arm/include/asm/page.h
··· 202 (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \ 203 VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) 204 205 - #include <asm-generic/page.h> 206 207 #endif
··· 202 (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \ 203 VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) 204 205 + #include <asm-generic/getorder.h> 206 207 #endif
+1 -1
arch/arm/include/asm/signal.h
··· 111 #define MINSIGSTKSZ 2048 112 #define SIGSTKSZ 8192 113 114 - #include <asm-generic/signal.h> 115 116 #ifdef __KERNEL__ 117 struct old_sigaction {
··· 111 #define MINSIGSTKSZ 2048 112 #define SIGSTKSZ 8192 113 114 + #include <asm-generic/signal-defs.h> 115 116 #ifdef __KERNEL__ 117 struct old_sigaction {
+1 -1
arch/avr32/include/asm/atomic.h
··· 196 #define smp_mb__before_atomic_inc() barrier() 197 #define smp_mb__after_atomic_inc() barrier() 198 199 - #include <asm-generic/atomic.h> 200 201 #endif /* __ASM_AVR32_ATOMIC_H */
··· 196 #define smp_mb__before_atomic_inc() barrier() 197 #define smp_mb__after_atomic_inc() barrier() 198 199 + #include <asm-generic/atomic-long.h> 200 201 #endif /* __ASM_AVR32_ATOMIC_H */
+1
arch/avr32/include/asm/bitsperlong.h
···
··· 1 + #include <asm-generic/bitsperlong.h>
+1 -1
arch/avr32/include/asm/mman.h
··· 1 #ifndef __ASM_AVR32_MMAN_H__ 2 #define __ASM_AVR32_MMAN_H__ 3 4 - #include <asm-generic/mman.h> 5 6 #define MAP_GROWSDOWN 0x0100 /* stack-like segment */ 7 #define MAP_DENYWRITE 0x0800 /* ETXTBSY */
··· 1 #ifndef __ASM_AVR32_MMAN_H__ 2 #define __ASM_AVR32_MMAN_H__ 3 4 + #include <asm-generic/mman-common.h> 5 6 #define MAP_GROWSDOWN 0x0100 /* stack-like segment */ 7 #define MAP_DENYWRITE 0x0800 /* ETXTBSY */
+1 -1
arch/avr32/include/asm/signal.h
··· 112 #define MINSIGSTKSZ 2048 113 #define SIGSTKSZ 8192 114 115 - #include <asm-generic/signal.h> 116 117 #ifdef __KERNEL__ 118 struct old_sigaction {
··· 112 #define MINSIGSTKSZ 2048 113 #define SIGSTKSZ 8192 114 115 + #include <asm-generic/signal-defs.h> 116 117 #ifdef __KERNEL__ 118 struct old_sigaction {
+1 -1
arch/avr32/include/asm/termios.h
··· 55 */ 56 #define INIT_C_CC "\003\034\177\025\004\0\1\0\021\023\032\0\022\017\027\026\0" 57 58 - #include <asm-generic/termios.h> 59 60 #endif /* __KERNEL__ */ 61
··· 55 */ 56 #define INIT_C_CC "\003\034\177\025\004\0\1\0\021\023\032\0\022\017\027\026\0" 57 58 + #include <asm-generic/termios-base.h> 59 60 #endif /* __KERNEL__ */ 61
+1 -1
arch/blackfin/include/asm/atomic.h
··· 208 #define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0) 209 #define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0) 210 211 - #include <asm-generic/atomic.h> 212 213 #endif /* __ARCH_BLACKFIN_ATOMIC __ */
··· 208 #define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0) 209 #define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0) 210 211 + #include <asm-generic/atomic-long.h> 212 213 #endif /* __ARCH_BLACKFIN_ATOMIC __ */
+1
arch/blackfin/include/asm/bitsperlong.h
···
··· 1 + #include <asm-generic/bitsperlong.h>
+1 -1
arch/blackfin/include/asm/page.h
··· 81 #define virt_addr_valid(kaddr) (((void *)(kaddr) >= (void *)PAGE_OFFSET) && \ 82 ((void *)(kaddr) < (void *)memory_end)) 83 84 - #include <asm-generic/page.h> 85 86 #endif /* __ASSEMBLY__ */ 87
··· 81 #define virt_addr_valid(kaddr) (((void *)(kaddr) >= (void *)PAGE_OFFSET) && \ 82 ((void *)(kaddr) < (void *)memory_end)) 83 84 + #include <asm-generic/getorder.h> 85 86 #endif /* __ASSEMBLY__ */ 87
+1 -1
arch/blackfin/include/asm/signal.h
··· 104 #define MINSIGSTKSZ 2048 105 #define SIGSTKSZ 8192 106 107 - #include <asm-generic/signal.h> 108 109 #ifdef __KERNEL__ 110 struct old_sigaction {
··· 104 #define MINSIGSTKSZ 2048 105 #define SIGSTKSZ 8192 106 107 + #include <asm-generic/signal-defs.h> 108 109 #ifdef __KERNEL__ 110 struct old_sigaction {
+1 -1
arch/cris/include/asm/atomic.h
··· 158 #define smp_mb__before_atomic_inc() barrier() 159 #define smp_mb__after_atomic_inc() barrier() 160 161 - #include <asm-generic/atomic.h> 162 #endif
··· 158 #define smp_mb__before_atomic_inc() barrier() 159 #define smp_mb__after_atomic_inc() barrier() 160 161 + #include <asm-generic/atomic-long.h> 162 #endif
+1
arch/cris/include/asm/bitsperlong.h
···
··· 1 + #include <asm-generic/bitsperlong.h>
+1 -1
arch/cris/include/asm/mman.h
··· 3 4 /* verbatim copy of asm-i386/ version */ 5 6 - #include <asm-generic/mman.h> 7 8 #define MAP_GROWSDOWN 0x0100 /* stack-like segment */ 9 #define MAP_DENYWRITE 0x0800 /* ETXTBSY */
··· 3 4 /* verbatim copy of asm-i386/ version */ 5 6 + #include <asm-generic/mman-common.h> 7 8 #define MAP_GROWSDOWN 0x0100 /* stack-like segment */ 9 #define MAP_DENYWRITE 0x0800 /* ETXTBSY */
+1 -1
arch/cris/include/asm/page.h
··· 68 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) 69 70 #include <asm-generic/memory_model.h> 71 - #include <asm-generic/page.h> 72 73 #endif /* _CRIS_PAGE_H */ 74
··· 68 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) 69 70 #include <asm-generic/memory_model.h> 71 + #include <asm-generic/getorder.h> 72 73 #endif /* _CRIS_PAGE_H */ 74
+1 -1
arch/cris/include/asm/signal.h
··· 106 #define MINSIGSTKSZ 2048 107 #define SIGSTKSZ 8192 108 109 - #include <asm-generic/signal.h> 110 111 #ifdef __KERNEL__ 112 struct old_sigaction {
··· 106 #define MINSIGSTKSZ 2048 107 #define SIGSTKSZ 8192 108 109 + #include <asm-generic/signal-defs.h> 110 111 #ifdef __KERNEL__ 112 struct old_sigaction {
+1 -1
arch/frv/include/asm/atomic.h
··· 194 195 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) 196 197 - #include <asm-generic/atomic.h> 198 #endif /* _ASM_ATOMIC_H */
··· 194 195 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) 196 197 + #include <asm-generic/atomic-long.h> 198 #endif /* _ASM_ATOMIC_H */
+1
arch/frv/include/asm/bitsperlong.h
···
··· 1 + #include <asm-generic/bitsperlong.h>
+1 -1
arch/frv/include/asm/mman.h
··· 1 #ifndef __ASM_MMAN_H__ 2 #define __ASM_MMAN_H__ 3 4 - #include <asm-generic/mman.h> 5 6 #define MAP_GROWSDOWN 0x0100 /* stack-like segment */ 7 #define MAP_DENYWRITE 0x0800 /* ETXTBSY */
··· 1 #ifndef __ASM_MMAN_H__ 2 #define __ASM_MMAN_H__ 3 4 + #include <asm-generic/mman-common.h> 5 6 #define MAP_GROWSDOWN 0x0100 /* stack-like segment */ 7 #define MAP_DENYWRITE 0x0800 /* ETXTBSY */
+1 -1
arch/frv/include/asm/page.h
··· 73 #endif /* __ASSEMBLY__ */ 74 75 #include <asm-generic/memory_model.h> 76 - #include <asm-generic/page.h> 77 78 #endif /* _ASM_PAGE_H */
··· 73 #endif /* __ASSEMBLY__ */ 74 75 #include <asm-generic/memory_model.h> 76 + #include <asm-generic/getorder.h> 77 78 #endif /* _ASM_PAGE_H */
+3 -10
arch/frv/include/asm/pci.h
··· 10 * 2 of the License, or (at your option) any later version. 11 */ 12 13 - #ifndef ASM_PCI_H 14 - #define ASM_PCI_H 15 16 #include <linux/mm.h> 17 #include <asm/scatterlist.h> ··· 42 43 /* Return the index of the PCI controller for device PDEV. */ 44 #define pci_controller_num(PDEV) (0) 45 - 46 - /* The PCI address space does equal the physical memory 47 - * address space. The networking and block device layers use 48 - * this boolean for bounce buffer decisions. 49 - */ 50 - #define PCI_DMA_BUS_IS_PHYS (1) 51 52 /* pci_unmap_{page,single} is a nop so... */ 53 #define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) ··· 105 sg_dma_address(&sg[i])+sg_dma_len(&sg[i])); 106 } 107 108 - 109 - #endif
··· 10 * 2 of the License, or (at your option) any later version. 11 */ 12 13 + #ifndef _ASM_FRV_PCI_H 14 + #define _ASM_FRV_PCI_H 15 16 #include <linux/mm.h> 17 #include <asm/scatterlist.h> ··· 42 43 /* Return the index of the PCI controller for device PDEV. */ 44 #define pci_controller_num(PDEV) (0) 45 46 /* pci_unmap_{page,single} is a nop so... */ 47 #define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) ··· 111 sg_dma_address(&sg[i])+sg_dma_len(&sg[i])); 112 } 113 114 + #endif /* _ASM_FRV_PCI_H */
+1 -1
arch/frv/include/asm/termios.h
··· 52 /* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */ 53 54 #ifdef __KERNEL__ 55 - #include <asm-generic/termios.h> 56 #endif 57 58 #endif /* _ASM_TERMIOS_H */
··· 52 /* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */ 53 54 #ifdef __KERNEL__ 55 + #include <asm-generic/termios-base.h> 56 #endif 57 58 #endif /* _ASM_TERMIOS_H */
+1 -1
arch/h8300/include/asm/atomic.h
··· 141 #define smp_mb__before_atomic_inc() barrier() 142 #define smp_mb__after_atomic_inc() barrier() 143 144 - #include <asm-generic/atomic.h> 145 #endif /* __ARCH_H8300_ATOMIC __ */
··· 141 #define smp_mb__before_atomic_inc() barrier() 142 #define smp_mb__after_atomic_inc() barrier() 143 144 + #include <asm-generic/atomic-long.h> 145 #endif /* __ARCH_H8300_ATOMIC __ */
+1
arch/h8300/include/asm/bitsperlong.h
···
··· 1 + #include <asm-generic/bitsperlong.h>
+1 -1
arch/h8300/include/asm/mman.h
··· 1 #ifndef __H8300_MMAN_H__ 2 #define __H8300_MMAN_H__ 3 4 - #include <asm-generic/mman.h> 5 6 #define MAP_GROWSDOWN 0x0100 /* stack-like segment */ 7 #define MAP_DENYWRITE 0x0800 /* ETXTBSY */
··· 1 #ifndef __H8300_MMAN_H__ 2 #define __H8300_MMAN_H__ 3 4 + #include <asm-generic/mman-common.h> 5 6 #define MAP_GROWSDOWN 0x0100 /* stack-like segment */ 7 #define MAP_DENYWRITE 0x0800 /* ETXTBSY */
+1 -1
arch/h8300/include/asm/page.h
··· 73 #endif /* __ASSEMBLY__ */ 74 75 #include <asm-generic/memory_model.h> 76 - #include <asm-generic/page.h> 77 78 #endif /* _H8300_PAGE_H */
··· 73 #endif /* __ASSEMBLY__ */ 74 75 #include <asm-generic/memory_model.h> 76 + #include <asm-generic/getorder.h> 77 78 #endif /* _H8300_PAGE_H */
+1 -1
arch/h8300/include/asm/signal.h
··· 105 #define MINSIGSTKSZ 2048 106 #define SIGSTKSZ 8192 107 108 - #include <asm-generic/signal.h> 109 110 #ifdef __KERNEL__ 111 struct old_sigaction {
··· 105 #define MINSIGSTKSZ 2048 106 #define SIGSTKSZ 8192 107 108 + #include <asm-generic/signal-defs.h> 109 110 #ifdef __KERNEL__ 111 struct old_sigaction {
+1 -1
arch/ia64/include/asm/atomic.h
··· 216 #define smp_mb__before_atomic_inc() barrier() 217 #define smp_mb__after_atomic_inc() barrier() 218 219 - #include <asm-generic/atomic.h> 220 #endif /* _ASM_IA64_ATOMIC_H */
··· 216 #define smp_mb__before_atomic_inc() barrier() 217 #define smp_mb__after_atomic_inc() barrier() 218 219 + #include <asm-generic/atomic-long.h> 220 #endif /* _ASM_IA64_ATOMIC_H */
+8
arch/ia64/include/asm/bitsperlong.h
···
··· 1 + #ifndef __ASM_IA64_BITSPERLONG_H 2 + #define __ASM_IA64_BITSPERLONG_H 3 + 4 + #define __BITS_PER_LONG 64 5 + 6 + #include <asm-generic/bitsperlong.h> 7 + 8 + #endif /* __ASM_IA64_BITSPERLONG_H */
+1 -1
arch/ia64/include/asm/mman.h
··· 8 * David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co 9 */ 10 11 - #include <asm-generic/mman.h> 12 13 #define MAP_GROWSDOWN 0x00100 /* stack-like segment */ 14 #define MAP_GROWSUP 0x00200 /* register stack-like segment */
··· 8 * David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co 9 */ 10 11 + #include <asm-generic/mman-common.h> 12 13 #define MAP_GROWSDOWN 0x00100 /* stack-like segment */ 14 #define MAP_GROWSUP 0x00200 /* register stack-like segment */
+1 -1
arch/ia64/include/asm/signal.h
··· 114 115 #endif /* __KERNEL__ */ 116 117 - #include <asm-generic/signal.h> 118 119 # ifndef __ASSEMBLY__ 120
··· 114 115 #endif /* __KERNEL__ */ 116 117 + #include <asm-generic/signal-defs.h> 118 119 # ifndef __ASSEMBLY__ 120
-7
arch/ia64/include/asm/types.h
··· 19 # define __IA64_UL(x) (x) 20 # define __IA64_UL_CONST(x) x 21 22 - # ifdef __KERNEL__ 23 - # define BITS_PER_LONG 64 24 - # endif 25 - 26 #else 27 # define __IA64_UL(x) ((unsigned long)(x)) 28 # define __IA64_UL_CONST(x) x##UL ··· 30 */ 31 # ifdef __KERNEL__ 32 33 - #define BITS_PER_LONG 64 34 - 35 /* DMA addresses are 64-bits wide, in general. */ 36 - 37 typedef u64 dma_addr_t; 38 39 # endif /* __KERNEL__ */
··· 19 # define __IA64_UL(x) (x) 20 # define __IA64_UL_CONST(x) x 21 22 #else 23 # define __IA64_UL(x) ((unsigned long)(x)) 24 # define __IA64_UL_CONST(x) x##UL ··· 34 */ 35 # ifdef __KERNEL__ 36 37 /* DMA addresses are 64-bits wide, in general. */ 38 typedef u64 dma_addr_t; 39 40 # endif /* __KERNEL__ */
+1 -1
arch/m32r/include/asm/atomic.h
··· 314 #define smp_mb__before_atomic_inc() barrier() 315 #define smp_mb__after_atomic_inc() barrier() 316 317 - #include <asm-generic/atomic.h> 318 #endif /* _ASM_M32R_ATOMIC_H */
··· 314 #define smp_mb__before_atomic_inc() barrier() 315 #define smp_mb__after_atomic_inc() barrier() 316 317 + #include <asm-generic/atomic-long.h> 318 #endif /* _ASM_M32R_ATOMIC_H */
+1
arch/m32r/include/asm/bitsperlong.h
···
··· 1 + #include <asm-generic/bitsperlong.h>
+1 -1
arch/m32r/include/asm/mman.h
··· 1 #ifndef __M32R_MMAN_H__ 2 #define __M32R_MMAN_H__ 3 4 - #include <asm-generic/mman.h> 5 6 #define MAP_GROWSDOWN 0x0100 /* stack-like segment */ 7 #define MAP_DENYWRITE 0x0800 /* ETXTBSY */
··· 1 #ifndef __M32R_MMAN_H__ 2 #define __M32R_MMAN_H__ 3 4 + #include <asm-generic/mman-common.h> 5 6 #define MAP_GROWSDOWN 0x0100 /* stack-like segment */ 7 #define MAP_DENYWRITE 0x0800 /* ETXTBSY */
+1 -1
arch/m32r/include/asm/page.h
··· 82 #define devmem_is_allowed(x) 1 83 84 #include <asm-generic/memory_model.h> 85 - #include <asm-generic/page.h> 86 87 #endif /* _ASM_M32R_PAGE_H */
··· 82 #define devmem_is_allowed(x) 1 83 84 #include <asm-generic/memory_model.h> 85 + #include <asm-generic/getorder.h> 86 87 #endif /* _ASM_M32R_PAGE_H */
-2
arch/m32r/include/asm/pci.h
··· 3 4 #include <asm-generic/pci.h> 5 6 - #define PCI_DMA_BUS_IS_PHYS (1) 7 - 8 #endif /* _ASM_M32R_PCI_H */
··· 3 4 #include <asm-generic/pci.h> 5 6 #endif /* _ASM_M32R_PCI_H */
+1 -1
arch/m32r/include/asm/signal.h
··· 107 #define MINSIGSTKSZ 2048 108 #define SIGSTKSZ 8192 109 110 - #include <asm-generic/signal.h> 111 112 #ifdef __KERNEL__ 113 struct old_sigaction {
··· 107 #define MINSIGSTKSZ 2048 108 #define SIGSTKSZ 8192 109 110 + #include <asm-generic/signal-defs.h> 111 112 #ifdef __KERNEL__ 113 struct old_sigaction {
+1 -1
arch/m68k/include/asm/atomic_mm.h
··· 192 #define smp_mb__before_atomic_inc() barrier() 193 #define smp_mb__after_atomic_inc() barrier() 194 195 - #include <asm-generic/atomic.h> 196 #endif /* __ARCH_M68K_ATOMIC __ */
··· 192 #define smp_mb__before_atomic_inc() barrier() 193 #define smp_mb__after_atomic_inc() barrier() 194 195 + #include <asm-generic/atomic-long.h> 196 #endif /* __ARCH_M68K_ATOMIC __ */
+1 -1
arch/m68k/include/asm/atomic_no.h
··· 151 #define atomic_dec_return(v) atomic_sub_return(1,(v)) 152 #define atomic_inc_return(v) atomic_add_return(1,(v)) 153 154 - #include <asm-generic/atomic.h> 155 #endif /* __ARCH_M68KNOMMU_ATOMIC __ */
··· 151 #define atomic_dec_return(v) atomic_sub_return(1,(v)) 152 #define atomic_inc_return(v) atomic_add_return(1,(v)) 153 154 + #include <asm-generic/atomic-long.h> 155 #endif /* __ARCH_M68KNOMMU_ATOMIC __ */
+1
arch/m68k/include/asm/bitsperlong.h
···
··· 1 + #include <asm-generic/bitsperlong.h>
+1 -1
arch/m68k/include/asm/mman.h
··· 1 #ifndef __M68K_MMAN_H__ 2 #define __M68K_MMAN_H__ 3 4 - #include <asm-generic/mman.h> 5 6 #define MAP_GROWSDOWN 0x0100 /* stack-like segment */ 7 #define MAP_DENYWRITE 0x0800 /* ETXTBSY */
··· 1 #ifndef __M68K_MMAN_H__ 2 #define __M68K_MMAN_H__ 3 4 + #include <asm-generic/mman-common.h> 5 6 #define MAP_GROWSDOWN 0x0100 /* stack-like segment */ 7 #define MAP_DENYWRITE 0x0800 /* ETXTBSY */
+1 -1
arch/m68k/include/asm/page_mm.h
··· 223 #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ 224 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) 225 226 - #include <asm-generic/page.h> 227 228 #endif /* _M68K_PAGE_H */
··· 223 #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ 224 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) 225 226 + #include <asm-generic/getorder.h> 227 228 #endif /* _M68K_PAGE_H */
+1 -1
arch/m68k/include/asm/page_no.h
··· 72 73 #endif /* __ASSEMBLY__ */ 74 75 - #include <asm-generic/page.h> 76 77 #endif /* _M68KNOMMU_PAGE_H */
··· 72 73 #endif /* __ASSEMBLY__ */ 74 75 + #include <asm-generic/getorder.h> 76 77 #endif /* _M68KNOMMU_PAGE_H */
+1 -1
arch/m68k/include/asm/signal.h
··· 103 #define MINSIGSTKSZ 2048 104 #define SIGSTKSZ 8192 105 106 - #include <asm-generic/signal.h> 107 108 #ifdef __KERNEL__ 109 struct old_sigaction {
··· 103 #define MINSIGSTKSZ 2048 104 #define SIGSTKSZ 8192 105 106 + #include <asm-generic/signal-defs.h> 107 108 #ifdef __KERNEL__ 109 struct old_sigaction {
+1 -1
arch/microblaze/include/asm/atomic.h
··· 118 #define smp_mb__before_atomic_inc() barrier() 119 #define smp_mb__after_atomic_inc() barrier() 120 121 - #include <asm-generic/atomic.h> 122 123 #endif /* _ASM_MICROBLAZE_ATOMIC_H */
··· 118 #define smp_mb__before_atomic_inc() barrier() 119 #define smp_mb__after_atomic_inc() barrier() 120 121 + #include <asm-generic/atomic-long.h> 122 123 #endif /* _ASM_MICROBLAZE_ATOMIC_H */
+1
arch/microblaze/include/asm/bitsperlong.h
···
··· 1 + #include <asm-generic/bitsperlong.h>
+1 -1
arch/microblaze/include/asm/page.h
··· 215 #endif /* __KERNEL__ */ 216 217 #include <asm-generic/memory_model.h> 218 - #include <asm-generic/page.h> 219 220 #endif /* _ASM_MICROBLAZE_PAGE_H */
··· 215 #endif /* __KERNEL__ */ 216 217 #include <asm-generic/memory_model.h> 218 + #include <asm-generic/getorder.h> 219 220 #endif /* _ASM_MICROBLAZE_PAGE_H */
+1 -1
arch/microblaze/include/asm/signal.h
··· 90 91 # ifndef __ASSEMBLY__ 92 # include <linux/types.h> 93 - # include <asm-generic/signal.h> 94 95 /* Avoid too many header ordering problems. */ 96 struct siginfo;
··· 90 91 # ifndef __ASSEMBLY__ 92 # include <linux/types.h> 93 + # include <asm-generic/signal-defs.h> 94 95 /* Avoid too many header ordering problems. */ 96 struct siginfo;
+1 -1
arch/microblaze/include/asm/termios.h
··· 81 82 #ifdef __KERNEL__ 83 84 - #include <asm-generic/termios.h> 85 86 #endif /* __KERNEL__ */ 87
··· 81 82 #ifdef __KERNEL__ 83 84 + #include <asm-generic/termios-base.h> 85 86 #endif /* __KERNEL__ */ 87
+1 -1
arch/mips/include/asm/atomic.h
··· 793 #define smp_mb__before_atomic_inc() smp_llsc_mb() 794 #define smp_mb__after_atomic_inc() smp_llsc_mb() 795 796 - #include <asm-generic/atomic.h> 797 798 #endif /* _ASM_ATOMIC_H */
··· 793 #define smp_mb__before_atomic_inc() smp_llsc_mb() 794 #define smp_mb__after_atomic_inc() smp_llsc_mb() 795 796 + #include <asm-generic/atomic-long.h> 797 798 #endif /* _ASM_ATOMIC_H */
+8
arch/mips/include/asm/bitsperlong.h
···
··· 1 + #ifndef __ASM_MIPS_BITSPERLONG_H 2 + #define __ASM_MIPS_BITSPERLONG_H 3 + 4 + #define __BITS_PER_LONG _MIPS_SZLONG 5 + 6 + #include <asm-generic/bitsperlong.h> 7 + 8 + #endif /* __ASM_MIPS_BITSPERLONG_H */
+1 -1
arch/mips/include/asm/page.h
··· 189 #define CAC_ADDR(addr) ((addr) - UNCAC_BASE + PAGE_OFFSET) 190 191 #include <asm-generic/memory_model.h> 192 - #include <asm-generic/page.h> 193 194 #endif /* _ASM_PAGE_H */
··· 189 #define CAC_ADDR(addr) ((addr) - UNCAC_BASE + PAGE_OFFSET) 190 191 #include <asm-generic/memory_model.h> 192 + #include <asm-generic/getorder.h> 193 194 #endif /* _ASM_PAGE_H */
+1 -1
arch/mips/include/asm/signal.h
··· 109 #define SIG_UNBLOCK 2 /* for unblocking signals */ 110 #define SIG_SETMASK 3 /* for setting the signal mask */ 111 112 - #include <asm-generic/signal.h> 113 114 struct sigaction { 115 unsigned int sa_flags;
··· 109 #define SIG_UNBLOCK 2 /* for unblocking signals */ 110 #define SIG_SETMASK 3 /* for setting the signal mask */ 111 112 + #include <asm-generic/signal-defs.h> 113 114 struct sigaction { 115 unsigned int sa_flags;
-3
arch/mips/include/asm/types.h
··· 31 * These aren't exported outside the kernel to avoid name space clashes 32 */ 33 #ifdef __KERNEL__ 34 - 35 - #define BITS_PER_LONG _MIPS_SZLONG 36 - 37 #ifndef __ASSEMBLY__ 38 39 #if (defined(CONFIG_HIGHMEM) && defined(CONFIG_64BIT_PHYS_ADDR)) \
··· 31 * These aren't exported outside the kernel to avoid name space clashes 32 */ 33 #ifdef __KERNEL__ 34 #ifndef __ASSEMBLY__ 35 36 #if (defined(CONFIG_HIGHMEM) && defined(CONFIG_64BIT_PHYS_ADDR)) \
+1 -1
arch/mn10300/include/asm/atomic.h
··· 151 #define smp_mb__before_atomic_inc() barrier() 152 #define smp_mb__after_atomic_inc() barrier() 153 154 - #include <asm-generic/atomic.h> 155 156 #endif /* __KERNEL__ */ 157 #endif /* _ASM_ATOMIC_H */
··· 151 #define smp_mb__before_atomic_inc() barrier() 152 #define smp_mb__after_atomic_inc() barrier() 153 154 + #include <asm-generic/atomic-long.h> 155 156 #endif /* __KERNEL__ */ 157 #endif /* _ASM_ATOMIC_H */
+1
arch/mn10300/include/asm/bitsperlong.h
···
··· 1 + #include <asm-generic/bitsperlong.h>
+1 -1
arch/mn10300/include/asm/mman.h
··· 12 #ifndef _ASM_MMAN_H 13 #define _ASM_MMAN_H 14 15 - #include <asm-generic/mman.h> 16 17 #define MAP_GROWSDOWN 0x0100 /* stack-like segment */ 18 #define MAP_DENYWRITE 0x0800 /* ETXTBSY */
··· 12 #ifndef _ASM_MMAN_H 13 #define _ASM_MMAN_H 14 15 + #include <asm-generic/mman-common.h> 16 17 #define MAP_GROWSDOWN 0x0100 /* stack-like segment */ 18 #define MAP_DENYWRITE 0x0800 /* ETXTBSY */
+1 -1
arch/mn10300/include/asm/signal.h
··· 115 #define MINSIGSTKSZ 2048 116 #define SIGSTKSZ 8192 117 118 - #include <asm-generic/signal.h> 119 120 #ifdef __KERNEL__ 121 struct old_sigaction {
··· 115 #define MINSIGSTKSZ 2048 116 #define SIGSTKSZ 8192 117 118 + #include <asm-generic/signal-defs.h> 119 120 #ifdef __KERNEL__ 121 struct old_sigaction {
+1 -1
arch/parisc/include/asm/atomic.h
··· 338 339 #endif /* CONFIG_64BIT */ 340 341 - #include <asm-generic/atomic.h> 342 343 #endif /* _ASM_PARISC_ATOMIC_H_ */
··· 338 339 #endif /* CONFIG_64BIT */ 340 341 + #include <asm-generic/atomic-long.h> 342 343 #endif /* _ASM_PARISC_ATOMIC_H_ */
+20
arch/parisc/include/asm/bitsperlong.h
···
··· 1 + #ifndef __ASM_PARISC_BITSPERLONG_H 2 + #define __ASM_PARISC_BITSPERLONG_H 3 + 4 + /* 5 + * using CONFIG_* outside of __KERNEL__ is wrong, 6 + * __LP64__ was also removed from headers, so what 7 + * is the right approach on parisc? 8 + * -arnd 9 + */ 10 + #if (defined(__KERNEL__) && defined(CONFIG_64BIT)) || defined (__LP64__) 11 + #define __BITS_PER_LONG 64 12 + #define SHIFT_PER_LONG 6 13 + #else 14 + #define __BITS_PER_LONG 32 15 + #define SHIFT_PER_LONG 5 16 + #endif 17 + 18 + #include <asm-generic/bitsperlong.h> 19 + 20 + #endif /* __ASM_PARISC_BITSPERLONG_H */
+1 -1
arch/parisc/include/asm/page.h
··· 159 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) 160 161 #include <asm-generic/memory_model.h> 162 - #include <asm-generic/page.h> 163 164 #endif /* _PARISC_PAGE_H */
··· 159 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) 160 161 #include <asm-generic/memory_model.h> 162 + #include <asm-generic/getorder.h> 163 164 #endif /* _PARISC_PAGE_H */
-8
arch/parisc/include/asm/types.h
··· 14 */ 15 #ifdef __KERNEL__ 16 17 - #ifdef CONFIG_64BIT 18 - #define BITS_PER_LONG 64 19 - #define SHIFT_PER_LONG 6 20 - #else 21 - #define BITS_PER_LONG 32 22 - #define SHIFT_PER_LONG 5 23 - #endif 24 - 25 #ifndef __ASSEMBLY__ 26 27 /* Dma addresses are 32-bits wide. */
··· 14 */ 15 #ifdef __KERNEL__ 16 17 #ifndef __ASSEMBLY__ 18 19 /* Dma addresses are 32-bits wide. */
+1 -1
arch/parisc/include/asm/uaccess.h
··· 7 #include <asm/page.h> 8 #include <asm/system.h> 9 #include <asm/cache.h> 10 - #include <asm-generic/uaccess.h> 11 12 #define VERIFY_READ 0 13 #define VERIFY_WRITE 1
··· 7 #include <asm/page.h> 8 #include <asm/system.h> 9 #include <asm/cache.h> 10 + #include <asm-generic/uaccess-unaligned.h> 11 12 #define VERIFY_READ 0 13 #define VERIFY_WRITE 1
+1 -1
arch/powerpc/include/asm/atomic.h
··· 472 473 #endif /* __powerpc64__ */ 474 475 - #include <asm-generic/atomic.h> 476 #endif /* __KERNEL__ */ 477 #endif /* _ASM_POWERPC_ATOMIC_H_ */
··· 472 473 #endif /* __powerpc64__ */ 474 475 + #include <asm-generic/atomic-long.h> 476 #endif /* __KERNEL__ */ 477 #endif /* _ASM_POWERPC_ATOMIC_H_ */
+12
arch/powerpc/include/asm/bitsperlong.h
···
··· 1 + #ifndef __ASM_POWERPC_BITSPERLONG_H 2 + #define __ASM_POWERPC_BITSPERLONG_H 3 + 4 + #if defined(__powerpc64__) 5 + # define __BITS_PER_LONG 64 6 + #else 7 + # define __BITS_PER_LONG 32 8 + #endif 9 + 10 + #include <asm-generic/bitsperlong.h> 11 + 12 + #endif /* __ASM_POWERPC_BITSPERLONG_H */
+1 -1
arch/powerpc/include/asm/mman.h
··· 1 #ifndef _ASM_POWERPC_MMAN_H 2 #define _ASM_POWERPC_MMAN_H 3 4 - #include <asm-generic/mman.h> 5 6 /* 7 * This program is free software; you can redistribute it and/or
··· 1 #ifndef _ASM_POWERPC_MMAN_H 2 #define _ASM_POWERPC_MMAN_H 3 4 + #include <asm-generic/mman-common.h> 5 6 /* 7 * This program is free software; you can redistribute it and/or
+1 -1
arch/powerpc/include/asm/page_32.h
··· 41 static inline void clear_page(void *page) { clear_pages(page, 0); } 42 extern void copy_page(void *to, void *from); 43 44 - #include <asm-generic/page.h> 45 46 #define PGD_T_LOG2 (__builtin_ffs(sizeof(pgd_t)) - 1) 47 #define PTE_T_LOG2 (__builtin_ffs(sizeof(pte_t)) - 1)
··· 41 static inline void clear_page(void *page) { clear_pages(page, 0); } 42 extern void copy_page(void *to, void *from); 43 44 + #include <asm-generic/getorder.h> 45 46 #define PGD_T_LOG2 (__builtin_ffs(sizeof(pgd_t)) - 1) 47 #define PTE_T_LOG2 (__builtin_ffs(sizeof(pte_t)) - 1)
+1 -1
arch/powerpc/include/asm/page_64.h
··· 180 (test_thread_flag(TIF_32BIT) ? \ 181 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64) 182 183 - #include <asm-generic/page.h> 184 185 #endif /* _ASM_POWERPC_PAGE_64_H */
··· 180 (test_thread_flag(TIF_32BIT) ? \ 181 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64) 182 183 + #include <asm-generic/getorder.h> 184 185 #endif /* _ASM_POWERPC_PAGE_64_H */
+1 -1
arch/powerpc/include/asm/signal.h
··· 94 #define MINSIGSTKSZ 2048 95 #define SIGSTKSZ 8192 96 97 - #include <asm-generic/signal.h> 98 99 struct old_sigaction { 100 __sighandler_t sa_handler;
··· 94 #define MINSIGSTKSZ 2048 95 #define SIGSTKSZ 8192 96 97 + #include <asm-generic/signal-defs.h> 98 99 struct old_sigaction { 100 __sighandler_t sa_handler;
+1 -1
arch/powerpc/include/asm/termios.h
··· 78 79 #ifdef __KERNEL__ 80 81 - #include <asm-generic/termios.h> 82 83 #endif /* __KERNEL__ */ 84
··· 78 79 #ifdef __KERNEL__ 80 81 + #include <asm-generic/termios-base.h> 82 83 #endif /* __KERNEL__ */ 84
-9
arch/powerpc/include/asm/types.h
··· 40 #endif /* __ASSEMBLY__ */ 41 42 #ifdef __KERNEL__ 43 - /* 44 - * These aren't exported outside the kernel to avoid name space clashes 45 - */ 46 - #ifdef __powerpc64__ 47 - #define BITS_PER_LONG 64 48 - #else 49 - #define BITS_PER_LONG 32 50 - #endif 51 - 52 #ifndef __ASSEMBLY__ 53 54 typedef __vector128 vector128;
··· 40 #endif /* __ASSEMBLY__ */ 41 42 #ifdef __KERNEL__ 43 #ifndef __ASSEMBLY__ 44 45 typedef __vector128 vector128;
+1 -1
arch/s390/include/asm/atomic.h
··· 275 #define smp_mb__before_atomic_inc() smp_mb() 276 #define smp_mb__after_atomic_inc() smp_mb() 277 278 - #include <asm-generic/atomic.h> 279 #endif /* __KERNEL__ */ 280 #endif /* __ARCH_S390_ATOMIC__ */
··· 275 #define smp_mb__before_atomic_inc() smp_mb() 276 #define smp_mb__after_atomic_inc() smp_mb() 277 278 + #include <asm-generic/atomic-long.h> 279 #endif /* __KERNEL__ */ 280 #endif /* __ARCH_S390_ATOMIC__ */
+13
arch/s390/include/asm/bitsperlong.h
···
··· 1 + #ifndef __ASM_S390_BITSPERLONG_H 2 + #define __ASM_S390_BITSPERLONG_H 3 + 4 + #ifndef __s390x__ 5 + #define __BITS_PER_LONG 32 6 + #else 7 + #define __BITS_PER_LONG 64 8 + #endif 9 + 10 + #include <asm-generic/bitsperlong.h> 11 + 12 + #endif /* __ASM_S390_BITSPERLONG_H */ 13 +
+1 -1
arch/s390/include/asm/mman.h
··· 9 #ifndef __S390_MMAN_H__ 10 #define __S390_MMAN_H__ 11 12 - #include <asm-generic/mman.h> 13 14 #define MAP_GROWSDOWN 0x0100 /* stack-like segment */ 15 #define MAP_DENYWRITE 0x0800 /* ETXTBSY */
··· 9 #ifndef __S390_MMAN_H__ 10 #define __S390_MMAN_H__ 11 12 + #include <asm-generic/mman-common.h> 13 14 #define MAP_GROWSDOWN 0x0100 /* stack-like segment */ 15 #define MAP_DENYWRITE 0x0800 /* ETXTBSY */
+1 -1
arch/s390/include/asm/page.h
··· 150 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) 151 152 #include <asm-generic/memory_model.h> 153 - #include <asm-generic/page.h> 154 155 #define __HAVE_ARCH_GATE_AREA 1 156
··· 150 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) 151 152 #include <asm-generic/memory_model.h> 153 + #include <asm-generic/getorder.h> 154 155 #define __HAVE_ARCH_GATE_AREA 1 156
+1 -1
arch/s390/include/asm/signal.h
··· 115 #define MINSIGSTKSZ 2048 116 #define SIGSTKSZ 8192 117 118 - #include <asm-generic/signal.h> 119 120 #ifdef __KERNEL__ 121 struct old_sigaction {
··· 115 #define MINSIGSTKSZ 2048 116 #define SIGSTKSZ 8192 117 118 + #include <asm-generic/signal-defs.h> 119 120 #ifdef __KERNEL__ 121 struct old_sigaction {
+1 -1
arch/s390/include/asm/termios.h
··· 60 #define user_termios_to_kernel_termios(k, u) copy_from_user(k, u, sizeof(struct termios2)) 61 #define kernel_termios_to_user_termios(u, k) copy_to_user(u, k, sizeof(struct termios2)) 62 63 - #include <asm-generic/termios.h> 64 65 #endif /* __KERNEL__ */ 66
··· 60 #define user_termios_to_kernel_termios(k, u) copy_from_user(k, u, sizeof(struct termios2)) 61 #define kernel_termios_to_user_termios(u, k) copy_to_user(u, k, sizeof(struct termios2)) 62 63 + #include <asm-generic/termios-base.h> 64 65 #endif /* __KERNEL__ */ 66
-6
arch/s390/include/asm/types.h
··· 28 */ 29 #ifdef __KERNEL__ 30 31 - #ifndef __s390x__ 32 - #define BITS_PER_LONG 32 33 - #else 34 - #define BITS_PER_LONG 64 35 - #endif 36 - 37 #ifndef __ASSEMBLY__ 38 39 typedef u64 dma64_addr_t;
··· 28 */ 29 #ifdef __KERNEL__ 30 31 #ifndef __ASSEMBLY__ 32 33 typedef u64 dma64_addr_t;
+1 -1
arch/sh/include/asm/atomic.h
··· 84 #define smp_mb__before_atomic_inc() barrier() 85 #define smp_mb__after_atomic_inc() barrier() 86 87 - #include <asm-generic/atomic.h> 88 #endif /* __ASM_SH_ATOMIC_H */
··· 84 #define smp_mb__before_atomic_inc() barrier() 85 #define smp_mb__after_atomic_inc() barrier() 86 87 + #include <asm-generic/atomic-long.h> 88 #endif /* __ASM_SH_ATOMIC_H */
+1
arch/sh/include/asm/bitsperlong.h
···
··· 1 + #include <asm-generic/bitsperlong.h>
+1 -1
arch/sh/include/asm/mman.h
··· 1 #ifndef __ASM_SH_MMAN_H 2 #define __ASM_SH_MMAN_H 3 4 - #include <asm-generic/mman.h> 5 6 #define MAP_GROWSDOWN 0x0100 /* stack-like segment */ 7 #define MAP_DENYWRITE 0x0800 /* ETXTBSY */
··· 1 #ifndef __ASM_SH_MMAN_H 2 #define __ASM_SH_MMAN_H 3 4 + #include <asm-generic/mman-common.h> 5 6 #define MAP_GROWSDOWN 0x0100 /* stack-like segment */ 7 #define MAP_DENYWRITE 0x0800 /* ETXTBSY */
+1 -1
arch/sh/include/asm/page.h
··· 163 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) 164 165 #include <asm-generic/memory_model.h> 166 - #include <asm-generic/page.h> 167 168 /* vDSO support */ 169 #ifdef CONFIG_VSYSCALL
··· 163 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) 164 165 #include <asm-generic/memory_model.h> 166 + #include <asm-generic/getorder.h> 167 168 /* vDSO support */ 169 #ifdef CONFIG_VSYSCALL
+1 -1
arch/sh/include/asm/signal.h
··· 106 #define MINSIGSTKSZ 2048 107 #define SIGSTKSZ 8192 108 109 - #include <asm-generic/signal.h> 110 111 #ifdef __KERNEL__ 112 struct old_sigaction {
··· 106 #define MINSIGSTKSZ 2048 107 #define SIGSTKSZ 8192 108 109 + #include <asm-generic/signal-defs.h> 110 111 #ifdef __KERNEL__ 112 struct old_sigaction {
+1 -1
arch/sparc/include/asm/atomic_32.h
··· 161 162 #endif /* !(__KERNEL__) */ 163 164 - #include <asm-generic/atomic.h> 165 #endif /* !(__ARCH_SPARC_ATOMIC__) */
··· 161 162 #endif /* !(__KERNEL__) */ 163 164 + #include <asm-generic/atomic-long.h> 165 #endif /* !(__ARCH_SPARC_ATOMIC__) */
+1 -1
arch/sparc/include/asm/atomic_64.h
··· 114 #define smp_mb__before_atomic_inc() barrier() 115 #define smp_mb__after_atomic_inc() barrier() 116 117 - #include <asm-generic/atomic.h> 118 #endif /* !(__ARCH_SPARC64_ATOMIC__) */
··· 114 #define smp_mb__before_atomic_inc() barrier() 115 #define smp_mb__after_atomic_inc() barrier() 116 117 + #include <asm-generic/atomic-long.h> 118 #endif /* !(__ARCH_SPARC64_ATOMIC__) */
+13
arch/sparc/include/asm/bitsperlong.h
···
··· 1 + #ifndef __ASM_ALPHA_BITSPERLONG_H 2 + #define __ASM_ALPHA_BITSPERLONG_H 3 + 4 + #if defined(__sparc__) && defined(__arch64__) 5 + #define __BITS_PER_LONG 64 6 + #else 7 + #define __BITS_PER_LONG 32 8 + #endif 9 + 10 + #include <asm-generic/bitsperlong.h> 11 + 12 + #endif /* __ASM_ALPHA_BITSPERLONG_H */ 13 +
+1 -1
arch/sparc/include/asm/mman.h
··· 1 #ifndef __SPARC_MMAN_H__ 2 #define __SPARC_MMAN_H__ 3 4 - #include <asm-generic/mman.h> 5 6 /* SunOS'ified... */ 7
··· 1 #ifndef __SPARC_MMAN_H__ 2 #define __SPARC_MMAN_H__ 3 4 + #include <asm-generic/mman-common.h> 5 6 /* SunOS'ified... */ 7
+1 -1
arch/sparc/include/asm/page_32.h
··· 152 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) 153 154 #include <asm-generic/memory_model.h> 155 - #include <asm-generic/page.h> 156 157 #endif /* _SPARC_PAGE_H */
··· 152 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) 153 154 #include <asm-generic/memory_model.h> 155 + #include <asm-generic/getorder.h> 156 157 #endif /* _SPARC_PAGE_H */
+1 -1
arch/sparc/include/asm/page_64.h
··· 132 #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ 133 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) 134 135 - #include <asm-generic/page.h> 136 137 #endif /* _SPARC64_PAGE_H */
··· 132 #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ 133 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) 134 135 + #include <asm-generic/getorder.h> 136 137 #endif /* _SPARC64_PAGE_H */
+1 -1
arch/sparc/include/asm/signal.h
··· 176 #define SA_STATIC_ALLOC 0x8000 177 #endif 178 179 - #include <asm-generic/signal.h> 180 181 struct __new_sigaction { 182 __sighandler_t sa_handler;
··· 176 #define SA_STATIC_ALLOC 0x8000 177 #endif 178 179 + #include <asm-generic/signal-defs.h> 180 181 struct __new_sigaction { 182 __sighandler_t sa_handler;
-4
arch/sparc/include/asm/types.h
··· 21 22 #ifdef __KERNEL__ 23 24 - #define BITS_PER_LONG 64 25 - 26 #ifndef __ASSEMBLY__ 27 28 /* Dma addresses come in generic and 64-bit flavours. */ ··· 43 #endif /* __ASSEMBLY__ */ 44 45 #ifdef __KERNEL__ 46 - 47 - #define BITS_PER_LONG 32 48 49 #ifndef __ASSEMBLY__ 50
··· 21 22 #ifdef __KERNEL__ 23 24 #ifndef __ASSEMBLY__ 25 26 /* Dma addresses come in generic and 64-bit flavours. */ ··· 45 #endif /* __ASSEMBLY__ */ 46 47 #ifdef __KERNEL__ 48 49 #ifndef __ASSEMBLY__ 50
+1 -1
arch/sparc/include/asm/uaccess_64.h
··· 12 #include <asm/asi.h> 13 #include <asm/system.h> 14 #include <asm/spitfire.h> 15 - #include <asm-generic/uaccess.h> 16 #endif 17 18 #ifndef __ASSEMBLY__
··· 12 #include <asm/asi.h> 13 #include <asm/system.h> 14 #include <asm/spitfire.h> 15 + #include <asm-generic/uaccess-unaligned.h> 16 #endif 17 18 #ifndef __ASSEMBLY__
+1 -1
arch/um/include/asm/page.h
··· 116 #define virt_addr_valid(v) pfn_valid(phys_to_pfn(__pa(v))) 117 118 #include <asm-generic/memory_model.h> 119 - #include <asm-generic/page.h> 120 121 #endif /* __ASSEMBLY__ */ 122 #endif /* __UM_PAGE_H */
··· 116 #define virt_addr_valid(v) pfn_valid(phys_to_pfn(__pa(v))) 117 118 #include <asm-generic/memory_model.h> 119 + #include <asm-generic/getorder.h> 120 121 #endif /* __ASSEMBLY__ */ 122 #endif /* __UM_PAGE_H */
+1 -1
arch/x86/include/asm/atomic_32.h
··· 483 return old_val < 0; 484 } 485 486 - #include <asm-generic/atomic.h> 487 #endif /* _ASM_X86_ATOMIC_32_H */
··· 483 return old_val < 0; 484 } 485 486 + #include <asm-generic/atomic-long.h> 487 #endif /* _ASM_X86_ATOMIC_32_H */
+1 -1
arch/x86/include/asm/atomic_64.h
··· 455 #define smp_mb__before_atomic_inc() barrier() 456 #define smp_mb__after_atomic_inc() barrier() 457 458 - #include <asm-generic/atomic.h> 459 #endif /* _ASM_X86_ATOMIC_64_H */
··· 455 #define smp_mb__before_atomic_inc() barrier() 456 #define smp_mb__after_atomic_inc() barrier() 457 458 + #include <asm-generic/atomic-long.h> 459 #endif /* _ASM_X86_ATOMIC_64_H */
+13
arch/x86/include/asm/bitsperlong.h
···
··· 1 + #ifndef __ASM_X86_BITSPERLONG_H 2 + #define __ASM_X86_BITSPERLONG_H 3 + 4 + #ifdef __x86_64__ 5 + # define __BITS_PER_LONG 64 6 + #else 7 + # define __BITS_PER_LONG 32 8 + #endif 9 + 10 + #include <asm-generic/bitsperlong.h> 11 + 12 + #endif /* __ASM_X86_BITSPERLONG_H */ 13 +
+1 -1
arch/x86/include/asm/mman.h
··· 1 #ifndef _ASM_X86_MMAN_H 2 #define _ASM_X86_MMAN_H 3 4 - #include <asm-generic/mman.h> 5 6 #define MAP_32BIT 0x40 /* only give out 32bit addresses */ 7
··· 1 #ifndef _ASM_X86_MMAN_H 2 #define _ASM_X86_MMAN_H 3 4 + #include <asm-generic/mman-common.h> 5 6 #define MAP_32BIT 0x40 /* only give out 32bit addresses */ 7
+1 -1
arch/x86/include/asm/page.h
··· 56 #endif /* __ASSEMBLY__ */ 57 58 #include <asm-generic/memory_model.h> 59 - #include <asm-generic/page.h> 60 61 #define __HAVE_ARCH_GATE_AREA 1 62
··· 56 #endif /* __ASSEMBLY__ */ 57 58 #include <asm-generic/memory_model.h> 59 + #include <asm-generic/getorder.h> 60 61 #define __HAVE_ARCH_GATE_AREA 1 62
+1 -1
arch/x86/include/asm/signal.h
··· 117 #define MINSIGSTKSZ 2048 118 #define SIGSTKSZ 8192 119 120 - #include <asm-generic/signal.h> 121 122 #ifndef __ASSEMBLY__ 123
··· 117 #define MINSIGSTKSZ 2048 118 #define SIGSTKSZ 8192 119 120 + #include <asm-generic/signal-defs.h> 121 122 #ifndef __ASSEMBLY__ 123
-6
arch/x86/include/asm/types.h
··· 14 */ 15 #ifdef __KERNEL__ 16 17 - #ifdef CONFIG_X86_32 18 - # define BITS_PER_LONG 32 19 - #else 20 - # define BITS_PER_LONG 64 21 - #endif 22 - 23 #ifndef __ASSEMBLY__ 24 25 typedef u64 dma64_addr_t;
··· 14 */ 15 #ifdef __KERNEL__ 16 17 #ifndef __ASSEMBLY__ 18 19 typedef u64 dma64_addr_t;
+1 -1
arch/xtensa/include/asm/atomic.h
··· 292 #define smp_mb__before_atomic_inc() barrier() 293 #define smp_mb__after_atomic_inc() barrier() 294 295 - #include <asm-generic/atomic.h> 296 #endif /* __KERNEL__ */ 297 298 #endif /* _XTENSA_ATOMIC_H */
··· 292 #define smp_mb__before_atomic_inc() barrier() 293 #define smp_mb__after_atomic_inc() barrier() 294 295 + #include <asm-generic/atomic-long.h> 296 #endif /* __KERNEL__ */ 297 298 #endif /* _XTENSA_ATOMIC_H */
+1
arch/xtensa/include/asm/bitsperlong.h
···
··· 1 + #include <asm-generic/bitsperlong.h>
+1 -1
arch/xtensa/include/asm/page.h
··· 129 130 #else 131 132 - # include <asm-generic/page.h> 133 134 #endif 135
··· 129 130 #else 131 132 + # include <asm-generic/getorder.h> 133 134 #endif 135
+22
include/asm-generic/Kbuild
··· 1 header-y += errno-base.h 2 header-y += errno.h 3 header-y += fcntl.h 4 header-y += ioctl.h 5 header-y += mman.h 6 header-y += poll.h 7 header-y += signal.h 8 header-y += statfs.h 9 10 unifdef-y += int-l64.h 11 unifdef-y += int-ll64.h
··· 1 + header-y += auxvec.h 2 + header-y += bitsperlong.h 3 header-y += errno-base.h 4 header-y += errno.h 5 header-y += fcntl.h 6 header-y += ioctl.h 7 + header-y += ioctls.h 8 + header-y += ipcbuf.h 9 + header-y += mman-common.h 10 header-y += mman.h 11 + header-y += msgbuf.h 12 + header-y += param.h 13 header-y += poll.h 14 + header-y += posix_types.h 15 + header-y += sembuf.h 16 + header-y += setup.h 17 + header-y += shmbuf.h 18 + header-y += shmparam.h 19 + header-y += signal-defs.h 20 header-y += signal.h 21 + header-y += socket.h 22 + header-y += sockios.h 23 + header-y += stat.h 24 header-y += statfs.h 25 + header-y += swab.h 26 + header-y += termbits.h 27 + header-y += termios.h 28 + header-y += types.h 29 + header-y += ucontext.h 30 + header-y += unistd.h 31 32 unifdef-y += int-l64.h 33 unifdef-y += int-ll64.h
+1
include/asm-generic/Kbuild.asm
··· 9 endif 10 unifdef-y += auxvec.h 11 unifdef-y += byteorder.h 12 unifdef-y += errno.h 13 unifdef-y += fcntl.h 14 unifdef-y += ioctl.h
··· 9 endif 10 unifdef-y += auxvec.h 11 unifdef-y += byteorder.h 12 + unifdef-y += bitsperlong.h 13 unifdef-y += errno.h 14 unifdef-y += fcntl.h 15 unifdef-y += ioctl.h
+258
include/asm-generic/atomic-long.h
···
··· 1 + #ifndef _ASM_GENERIC_ATOMIC_LONG_H 2 + #define _ASM_GENERIC_ATOMIC_LONG_H 3 + /* 4 + * Copyright (C) 2005 Silicon Graphics, Inc. 5 + * Christoph Lameter 6 + * 7 + * Allows to provide arch independent atomic definitions without the need to 8 + * edit all arch specific atomic.h files. 9 + */ 10 + 11 + #include <asm/types.h> 12 + 13 + /* 14 + * Suppport for atomic_long_t 15 + * 16 + * Casts for parameters are avoided for existing atomic functions in order to 17 + * avoid issues with cast-as-lval under gcc 4.x and other limitations that the 18 + * macros of a platform may have. 19 + */ 20 + 21 + #if BITS_PER_LONG == 64 22 + 23 + typedef atomic64_t atomic_long_t; 24 + 25 + #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i) 26 + 27 + static inline long atomic_long_read(atomic_long_t *l) 28 + { 29 + atomic64_t *v = (atomic64_t *)l; 30 + 31 + return (long)atomic64_read(v); 32 + } 33 + 34 + static inline void atomic_long_set(atomic_long_t *l, long i) 35 + { 36 + atomic64_t *v = (atomic64_t *)l; 37 + 38 + atomic64_set(v, i); 39 + } 40 + 41 + static inline void atomic_long_inc(atomic_long_t *l) 42 + { 43 + atomic64_t *v = (atomic64_t *)l; 44 + 45 + atomic64_inc(v); 46 + } 47 + 48 + static inline void atomic_long_dec(atomic_long_t *l) 49 + { 50 + atomic64_t *v = (atomic64_t *)l; 51 + 52 + atomic64_dec(v); 53 + } 54 + 55 + static inline void atomic_long_add(long i, atomic_long_t *l) 56 + { 57 + atomic64_t *v = (atomic64_t *)l; 58 + 59 + atomic64_add(i, v); 60 + } 61 + 62 + static inline void atomic_long_sub(long i, atomic_long_t *l) 63 + { 64 + atomic64_t *v = (atomic64_t *)l; 65 + 66 + atomic64_sub(i, v); 67 + } 68 + 69 + static inline int atomic_long_sub_and_test(long i, atomic_long_t *l) 70 + { 71 + atomic64_t *v = (atomic64_t *)l; 72 + 73 + return atomic64_sub_and_test(i, v); 74 + } 75 + 76 + static inline int atomic_long_dec_and_test(atomic_long_t *l) 77 + { 78 + atomic64_t *v = (atomic64_t *)l; 79 + 80 + return atomic64_dec_and_test(v); 81 + } 82 + 83 + static inline int atomic_long_inc_and_test(atomic_long_t *l) 84 + { 85 + atomic64_t *v = (atomic64_t *)l; 86 + 87 + return atomic64_inc_and_test(v); 88 + } 89 + 90 + static inline int atomic_long_add_negative(long i, atomic_long_t *l) 91 + { 92 + atomic64_t *v = (atomic64_t *)l; 93 + 94 + return atomic64_add_negative(i, v); 95 + } 96 + 97 + static inline long atomic_long_add_return(long i, atomic_long_t *l) 98 + { 99 + atomic64_t *v = (atomic64_t *)l; 100 + 101 + return (long)atomic64_add_return(i, v); 102 + } 103 + 104 + static inline long atomic_long_sub_return(long i, atomic_long_t *l) 105 + { 106 + atomic64_t *v = (atomic64_t *)l; 107 + 108 + return (long)atomic64_sub_return(i, v); 109 + } 110 + 111 + static inline long atomic_long_inc_return(atomic_long_t *l) 112 + { 113 + atomic64_t *v = (atomic64_t *)l; 114 + 115 + return (long)atomic64_inc_return(v); 116 + } 117 + 118 + static inline long atomic_long_dec_return(atomic_long_t *l) 119 + { 120 + atomic64_t *v = (atomic64_t *)l; 121 + 122 + return (long)atomic64_dec_return(v); 123 + } 124 + 125 + static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u) 126 + { 127 + atomic64_t *v = (atomic64_t *)l; 128 + 129 + return (long)atomic64_add_unless(v, a, u); 130 + } 131 + 132 + #define atomic_long_inc_not_zero(l) atomic64_inc_not_zero((atomic64_t *)(l)) 133 + 134 + #define atomic_long_cmpxchg(l, old, new) \ 135 + (atomic64_cmpxchg((atomic64_t *)(l), (old), (new))) 136 + #define atomic_long_xchg(v, new) \ 137 + (atomic64_xchg((atomic64_t *)(v), (new))) 138 + 139 + #else /* BITS_PER_LONG == 64 */ 140 + 141 + typedef atomic_t atomic_long_t; 142 + 143 + #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i) 144 + static inline long atomic_long_read(atomic_long_t *l) 145 + { 146 + atomic_t *v = (atomic_t *)l; 147 + 148 + return (long)atomic_read(v); 149 + } 150 + 151 + static inline void atomic_long_set(atomic_long_t *l, long i) 152 + { 153 + atomic_t *v = (atomic_t *)l; 154 + 155 + atomic_set(v, i); 156 + } 157 + 158 + static inline void atomic_long_inc(atomic_long_t *l) 159 + { 160 + atomic_t *v = (atomic_t *)l; 161 + 162 + atomic_inc(v); 163 + } 164 + 165 + static inline void atomic_long_dec(atomic_long_t *l) 166 + { 167 + atomic_t *v = (atomic_t *)l; 168 + 169 + atomic_dec(v); 170 + } 171 + 172 + static inline void atomic_long_add(long i, atomic_long_t *l) 173 + { 174 + atomic_t *v = (atomic_t *)l; 175 + 176 + atomic_add(i, v); 177 + } 178 + 179 + static inline void atomic_long_sub(long i, atomic_long_t *l) 180 + { 181 + atomic_t *v = (atomic_t *)l; 182 + 183 + atomic_sub(i, v); 184 + } 185 + 186 + static inline int atomic_long_sub_and_test(long i, atomic_long_t *l) 187 + { 188 + atomic_t *v = (atomic_t *)l; 189 + 190 + return atomic_sub_and_test(i, v); 191 + } 192 + 193 + static inline int atomic_long_dec_and_test(atomic_long_t *l) 194 + { 195 + atomic_t *v = (atomic_t *)l; 196 + 197 + return atomic_dec_and_test(v); 198 + } 199 + 200 + static inline int atomic_long_inc_and_test(atomic_long_t *l) 201 + { 202 + atomic_t *v = (atomic_t *)l; 203 + 204 + return atomic_inc_and_test(v); 205 + } 206 + 207 + static inline int atomic_long_add_negative(long i, atomic_long_t *l) 208 + { 209 + atomic_t *v = (atomic_t *)l; 210 + 211 + return atomic_add_negative(i, v); 212 + } 213 + 214 + static inline long atomic_long_add_return(long i, atomic_long_t *l) 215 + { 216 + atomic_t *v = (atomic_t *)l; 217 + 218 + return (long)atomic_add_return(i, v); 219 + } 220 + 221 + static inline long atomic_long_sub_return(long i, atomic_long_t *l) 222 + { 223 + atomic_t *v = (atomic_t *)l; 224 + 225 + return (long)atomic_sub_return(i, v); 226 + } 227 + 228 + static inline long atomic_long_inc_return(atomic_long_t *l) 229 + { 230 + atomic_t *v = (atomic_t *)l; 231 + 232 + return (long)atomic_inc_return(v); 233 + } 234 + 235 + static inline long atomic_long_dec_return(atomic_long_t *l) 236 + { 237 + atomic_t *v = (atomic_t *)l; 238 + 239 + return (long)atomic_dec_return(v); 240 + } 241 + 242 + static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u) 243 + { 244 + atomic_t *v = (atomic_t *)l; 245 + 246 + return (long)atomic_add_unless(v, a, u); 247 + } 248 + 249 + #define atomic_long_inc_not_zero(l) atomic_inc_not_zero((atomic_t *)(l)) 250 + 251 + #define atomic_long_cmpxchg(l, old, new) \ 252 + (atomic_cmpxchg((atomic_t *)(l), (old), (new))) 253 + #define atomic_long_xchg(v, new) \ 254 + (atomic_xchg((atomic_t *)(v), (new))) 255 + 256 + #endif /* BITS_PER_LONG == 64 */ 257 + 258 + #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
+124 -217
include/asm-generic/atomic.h
··· 1 - #ifndef _ASM_GENERIC_ATOMIC_H 2 - #define _ASM_GENERIC_ATOMIC_H 3 /* 4 - * Copyright (C) 2005 Silicon Graphics, Inc. 5 - * Christoph Lameter 6 * 7 - * Allows to provide arch independent atomic definitions without the need to 8 - * edit all arch specific atomic.h files. 9 */ 10 11 - #include <asm/types.h> 12 13 - /* 14 - * Suppport for atomic_long_t 15 * 16 - * Casts for parameters are avoided for existing atomic functions in order to 17 - * avoid issues with cast-as-lval under gcc 4.x and other limitations that the 18 - * macros of a platform may have. 19 */ 20 21 - #if BITS_PER_LONG == 64 22 23 - typedef atomic64_t atomic_long_t; 24 25 - #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i) 26 - 27 - static inline long atomic_long_read(atomic_long_t *l) 28 { 29 - atomic64_t *v = (atomic64_t *)l; 30 31 - return (long)atomic64_read(v); 32 } 33 34 - static inline void atomic_long_set(atomic_long_t *l, long i) 35 { 36 - atomic64_t *v = (atomic64_t *)l; 37 38 - atomic64_set(v, i); 39 } 40 41 - static inline void atomic_long_inc(atomic_long_t *l) 42 { 43 - atomic64_t *v = (atomic64_t *)l; 44 - 45 - atomic64_inc(v); 46 } 47 48 - static inline void atomic_long_dec(atomic_long_t *l) 49 { 50 - atomic64_t *v = (atomic64_t *)l; 51 - 52 - atomic64_dec(v); 53 } 54 55 - static inline void atomic_long_add(long i, atomic_long_t *l) 56 { 57 - atomic64_t *v = (atomic64_t *)l; 58 - 59 - atomic64_add(i, v); 60 } 61 62 - static inline void atomic_long_sub(long i, atomic_long_t *l) 63 { 64 - atomic64_t *v = (atomic64_t *)l; 65 - 66 - atomic64_sub(i, v); 67 } 68 69 - static inline int atomic_long_sub_and_test(long i, atomic_long_t *l) 70 { 71 - atomic64_t *v = (atomic64_t *)l; 72 - 73 - return atomic64_sub_and_test(i, v); 74 } 75 76 - static inline int atomic_long_dec_and_test(atomic_long_t *l) 77 - { 78 - atomic64_t *v = (atomic64_t *)l; 79 80 - return atomic64_dec_and_test(v); 81 } 82 83 - static inline int atomic_long_inc_and_test(atomic_long_t *l) 84 - { 85 - atomic64_t *v = (atomic64_t *)l; 86 87 - return atomic64_inc_and_test(v); 88 - } 89 90 - static inline int atomic_long_add_negative(long i, atomic_long_t *l) 91 - { 92 - atomic64_t *v = (atomic64_t *)l; 93 94 - return atomic64_add_negative(i, v); 95 - } 96 97 - static inline long atomic_long_add_return(long i, atomic_long_t *l) 98 - { 99 - atomic64_t *v = (atomic64_t *)l; 100 101 - return (long)atomic64_add_return(i, v); 102 - } 103 - 104 - static inline long atomic_long_sub_return(long i, atomic_long_t *l) 105 - { 106 - atomic64_t *v = (atomic64_t *)l; 107 - 108 - return (long)atomic64_sub_return(i, v); 109 - } 110 - 111 - static inline long atomic_long_inc_return(atomic_long_t *l) 112 - { 113 - atomic64_t *v = (atomic64_t *)l; 114 - 115 - return (long)atomic64_inc_return(v); 116 - } 117 - 118 - static inline long atomic_long_dec_return(atomic_long_t *l) 119 - { 120 - atomic64_t *v = (atomic64_t *)l; 121 - 122 - return (long)atomic64_dec_return(v); 123 - } 124 - 125 - static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u) 126 - { 127 - atomic64_t *v = (atomic64_t *)l; 128 - 129 - return (long)atomic64_add_unless(v, a, u); 130 - } 131 - 132 - #define atomic_long_inc_not_zero(l) atomic64_inc_not_zero((atomic64_t *)(l)) 133 - 134 - #define atomic_long_cmpxchg(l, old, new) \ 135 - (atomic64_cmpxchg((atomic64_t *)(l), (old), (new))) 136 - #define atomic_long_xchg(v, new) \ 137 - (atomic64_xchg((atomic64_t *)(v), (new))) 138 - 139 - #else /* BITS_PER_LONG == 64 */ 140 - 141 - typedef atomic_t atomic_long_t; 142 - 143 - #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i) 144 - static inline long atomic_long_read(atomic_long_t *l) 145 - { 146 - atomic_t *v = (atomic_t *)l; 147 - 148 - return (long)atomic_read(v); 149 - } 150 - 151 - static inline void atomic_long_set(atomic_long_t *l, long i) 152 - { 153 - atomic_t *v = (atomic_t *)l; 154 - 155 - atomic_set(v, i); 156 - } 157 - 158 - static inline void atomic_long_inc(atomic_long_t *l) 159 - { 160 - atomic_t *v = (atomic_t *)l; 161 - 162 - atomic_inc(v); 163 - } 164 - 165 - static inline void atomic_long_dec(atomic_long_t *l) 166 - { 167 - atomic_t *v = (atomic_t *)l; 168 - 169 - atomic_dec(v); 170 - } 171 - 172 - static inline void atomic_long_add(long i, atomic_long_t *l) 173 - { 174 - atomic_t *v = (atomic_t *)l; 175 - 176 - atomic_add(i, v); 177 - } 178 - 179 - static inline void atomic_long_sub(long i, atomic_long_t *l) 180 - { 181 - atomic_t *v = (atomic_t *)l; 182 - 183 - atomic_sub(i, v); 184 - } 185 - 186 - static inline int atomic_long_sub_and_test(long i, atomic_long_t *l) 187 - { 188 - atomic_t *v = (atomic_t *)l; 189 - 190 - return atomic_sub_and_test(i, v); 191 - } 192 - 193 - static inline int atomic_long_dec_and_test(atomic_long_t *l) 194 - { 195 - atomic_t *v = (atomic_t *)l; 196 - 197 - return atomic_dec_and_test(v); 198 - } 199 - 200 - static inline int atomic_long_inc_and_test(atomic_long_t *l) 201 - { 202 - atomic_t *v = (atomic_t *)l; 203 - 204 - return atomic_inc_and_test(v); 205 - } 206 - 207 - static inline int atomic_long_add_negative(long i, atomic_long_t *l) 208 - { 209 - atomic_t *v = (atomic_t *)l; 210 - 211 - return atomic_add_negative(i, v); 212 - } 213 - 214 - static inline long atomic_long_add_return(long i, atomic_long_t *l) 215 - { 216 - atomic_t *v = (atomic_t *)l; 217 - 218 - return (long)atomic_add_return(i, v); 219 - } 220 - 221 - static inline long atomic_long_sub_return(long i, atomic_long_t *l) 222 - { 223 - atomic_t *v = (atomic_t *)l; 224 - 225 - return (long)atomic_sub_return(i, v); 226 - } 227 - 228 - static inline long atomic_long_inc_return(atomic_long_t *l) 229 - { 230 - atomic_t *v = (atomic_t *)l; 231 - 232 - return (long)atomic_inc_return(v); 233 - } 234 - 235 - static inline long atomic_long_dec_return(atomic_long_t *l) 236 - { 237 - atomic_t *v = (atomic_t *)l; 238 - 239 - return (long)atomic_dec_return(v); 240 - } 241 - 242 - static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u) 243 - { 244 - atomic_t *v = (atomic_t *)l; 245 - 246 - return (long)atomic_add_unless(v, a, u); 247 - } 248 - 249 - #define atomic_long_inc_not_zero(l) atomic_inc_not_zero((atomic_t *)(l)) 250 - 251 - #define atomic_long_cmpxchg(l, old, new) \ 252 - (atomic_cmpxchg((atomic_t *)(l), (old), (new))) 253 - #define atomic_long_xchg(v, new) \ 254 - (atomic_xchg((atomic_t *)(v), (new))) 255 - 256 - #endif /* BITS_PER_LONG == 64 */ 257 - 258 - #endif /* _ASM_GENERIC_ATOMIC_H */
··· 1 /* 2 + * Generic C implementation of atomic counter operations 3 + * Originally implemented for MN10300. 4 * 5 + * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. 6 + * Written by David Howells (dhowells@redhat.com) 7 + * 8 + * This program is free software; you can redistribute it and/or 9 + * modify it under the terms of the GNU General Public Licence 10 + * as published by the Free Software Foundation; either version 11 + * 2 of the Licence, or (at your option) any later version. 12 + */ 13 + #ifndef __ASM_GENERIC_ATOMIC_H 14 + #define __ASM_GENERIC_ATOMIC_H 15 + 16 + #ifdef CONFIG_SMP 17 + #error not SMP safe 18 + #endif 19 + 20 + /* 21 + * Atomic operations that C can't guarantee us. Useful for 22 + * resource counting etc.. 23 */ 24 25 + #define ATOMIC_INIT(i) { (i) } 26 27 + #ifdef __KERNEL__ 28 + 29 + /** 30 + * atomic_read - read atomic variable 31 + * @v: pointer of type atomic_t 32 * 33 + * Atomically reads the value of @v. Note that the guaranteed 34 + * useful range of an atomic_t is only 24 bits. 35 */ 36 + #define atomic_read(v) ((v)->counter) 37 38 + /** 39 + * atomic_set - set atomic variable 40 + * @v: pointer of type atomic_t 41 + * @i: required value 42 + * 43 + * Atomically sets the value of @v to @i. Note that the guaranteed 44 + * useful range of an atomic_t is only 24 bits. 45 + */ 46 + #define atomic_set(v, i) (((v)->counter) = (i)) 47 48 + #include <asm/system.h> 49 50 + /** 51 + * atomic_add_return - add integer to atomic variable 52 + * @i: integer value to add 53 + * @v: pointer of type atomic_t 54 + * 55 + * Atomically adds @i to @v and returns the result 56 + * Note that the guaranteed useful range of an atomic_t is only 24 bits. 57 + */ 58 + static inline int atomic_add_return(int i, atomic_t *v) 59 { 60 + unsigned long flags; 61 + int temp; 62 63 + local_irq_save(flags); 64 + temp = v->counter; 65 + temp += i; 66 + v->counter = temp; 67 + local_irq_restore(flags); 68 + 69 + return temp; 70 } 71 72 + /** 73 + * atomic_sub_return - subtract integer from atomic variable 74 + * @i: integer value to subtract 75 + * @v: pointer of type atomic_t 76 + * 77 + * Atomically subtracts @i from @v and returns the result 78 + * Note that the guaranteed useful range of an atomic_t is only 24 bits. 79 + */ 80 + static inline int atomic_sub_return(int i, atomic_t *v) 81 { 82 + unsigned long flags; 83 + int temp; 84 85 + local_irq_save(flags); 86 + temp = v->counter; 87 + temp -= i; 88 + v->counter = temp; 89 + local_irq_restore(flags); 90 + 91 + return temp; 92 } 93 94 + static inline int atomic_add_negative(int i, atomic_t *v) 95 { 96 + return atomic_add_return(i, v) < 0; 97 } 98 99 + static inline void atomic_add(int i, atomic_t *v) 100 { 101 + atomic_add_return(i, v); 102 } 103 104 + static inline void atomic_sub(int i, atomic_t *v) 105 { 106 + atomic_sub_return(i, v); 107 } 108 109 + static inline void atomic_inc(atomic_t *v) 110 { 111 + atomic_add_return(1, v); 112 } 113 114 + static inline void atomic_dec(atomic_t *v) 115 { 116 + atomic_sub_return(1, v); 117 } 118 119 + #define atomic_dec_return(v) atomic_sub_return(1, (v)) 120 + #define atomic_inc_return(v) atomic_add_return(1, (v)) 121 122 + #define atomic_sub_and_test(i, v) (atomic_sub_return((i), (v)) == 0) 123 + #define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0) 124 + #define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0) 125 + 126 + #define atomic_add_unless(v, a, u) \ 127 + ({ \ 128 + int c, old; \ 129 + c = atomic_read(v); \ 130 + while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \ 131 + c = old; \ 132 + c != (u); \ 133 + }) 134 + 135 + #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) 136 + 137 + static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr) 138 + { 139 + unsigned long flags; 140 + 141 + mask = ~mask; 142 + local_irq_save(flags); 143 + *addr &= mask; 144 + local_irq_restore(flags); 145 } 146 147 + #define atomic_xchg(ptr, v) (xchg(&(ptr)->counter, (v))) 148 + #define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new))) 149 150 + #define cmpxchg_local(ptr, o, n) \ 151 + ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\ 152 + (unsigned long)(n), sizeof(*(ptr)))) 153 154 + #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) 155 156 + /* Assume that atomic operations are already serializing */ 157 + #define smp_mb__before_atomic_dec() barrier() 158 + #define smp_mb__after_atomic_dec() barrier() 159 + #define smp_mb__before_atomic_inc() barrier() 160 + #define smp_mb__after_atomic_inc() barrier() 161 162 + #include <asm-generic/atomic-long.h> 163 164 + #endif /* __KERNEL__ */ 165 + #endif /* __ASM_GENERIC_ATOMIC_H */
+8
include/asm-generic/auxvec.h
···
··· 1 + #ifndef __ASM_GENERIC_AUXVEC_H 2 + #define __ASM_GENERIC_AUXVEC_H 3 + /* 4 + * Not all architectures need their own auxvec.h, the most 5 + * common definitions are already in linux/auxvec.h. 6 + */ 7 + 8 + #endif /* __ASM_GENERIC_AUXVEC_H */
+18 -6
include/asm-generic/bitops.h
··· 1 - #ifndef _ASM_GENERIC_BITOPS_H_ 2 - #define _ASM_GENERIC_BITOPS_H_ 3 4 /* 5 * For the benefit of those who are trying to port Linux to another 6 * architecture, here are some C-language equivalents. You should 7 * recode these in the native assembly language, if at all possible. 8 - * 9 * C language equivalents written by Theodore Ts'o, 9/26/92 10 */ 11 12 - #include <asm-generic/bitops/atomic.h> 13 - #include <asm-generic/bitops/non-atomic.h> 14 #include <asm-generic/bitops/__ffs.h> 15 #include <asm-generic/bitops/ffz.h> 16 #include <asm-generic/bitops/fls.h> 17 #include <asm-generic/bitops/fls64.h> 18 #include <asm-generic/bitops/find.h> 19 ··· 36 #include <asm-generic/bitops/hweight.h> 37 #include <asm-generic/bitops/lock.h> 38 39 #include <asm-generic/bitops/ext2-non-atomic.h> 40 #include <asm-generic/bitops/ext2-atomic.h> 41 #include <asm-generic/bitops/minix.h> 42 43 - #endif /* _ASM_GENERIC_BITOPS_H */
··· 1 + #ifndef __ASM_GENERIC_BITOPS_H 2 + #define __ASM_GENERIC_BITOPS_H 3 4 /* 5 * For the benefit of those who are trying to port Linux to another 6 * architecture, here are some C-language equivalents. You should 7 * recode these in the native assembly language, if at all possible. 8 + * 9 * C language equivalents written by Theodore Ts'o, 9/26/92 10 */ 11 12 + #include <linux/irqflags.h> 13 + #include <linux/compiler.h> 14 + 15 + /* 16 + * clear_bit may not imply a memory barrier 17 + */ 18 + #ifndef smp_mb__before_clear_bit 19 + #define smp_mb__before_clear_bit() smp_mb() 20 + #define smp_mb__after_clear_bit() smp_mb() 21 + #endif 22 + 23 #include <asm-generic/bitops/__ffs.h> 24 #include <asm-generic/bitops/ffz.h> 25 #include <asm-generic/bitops/fls.h> 26 + #include <asm-generic/bitops/__fls.h> 27 #include <asm-generic/bitops/fls64.h> 28 #include <asm-generic/bitops/find.h> 29 ··· 26 #include <asm-generic/bitops/hweight.h> 27 #include <asm-generic/bitops/lock.h> 28 29 + #include <asm-generic/bitops/atomic.h> 30 + #include <asm-generic/bitops/non-atomic.h> 31 #include <asm-generic/bitops/ext2-non-atomic.h> 32 #include <asm-generic/bitops/ext2-atomic.h> 33 #include <asm-generic/bitops/minix.h> 34 35 + #endif /* __ASM_GENERIC_BITOPS_H */
+1
include/asm-generic/bitops/atomic.h
··· 2 #define _ASM_GENERIC_BITOPS_ATOMIC_H_ 3 4 #include <asm/types.h> 5 6 #ifdef CONFIG_SMP 7 #include <asm/spinlock.h>
··· 2 #define _ASM_GENERIC_BITOPS_ATOMIC_H_ 3 4 #include <asm/types.h> 5 + #include <asm/system.h> 6 7 #ifdef CONFIG_SMP 8 #include <asm/spinlock.h>
+32
include/asm-generic/bitsperlong.h
···
··· 1 + #ifndef __ASM_GENERIC_BITS_PER_LONG 2 + #define __ASM_GENERIC_BITS_PER_LONG 3 + 4 + /* 5 + * There seems to be no way of detecting this automatically from user 6 + * space, so 64 bit architectures should override this in their 7 + * bitsperlong.h. In particular, an architecture that supports 8 + * both 32 and 64 bit user space must not rely on CONFIG_64BIT 9 + * to decide it, but rather check a compiler provided macro. 10 + */ 11 + #ifndef __BITS_PER_LONG 12 + #define __BITS_PER_LONG 32 13 + #endif 14 + 15 + #ifdef __KERNEL__ 16 + 17 + #ifdef CONFIG_64BIT 18 + #define BITS_PER_LONG 64 19 + #else 20 + #define BITS_PER_LONG 32 21 + #endif /* CONFIG_64BIT */ 22 + 23 + /* 24 + * FIXME: The check currently breaks x86-64 build, so it's 25 + * temporarily disabled. Please fix x86-64 and reenable 26 + */ 27 + #if 0 && BITS_PER_LONG != __BITS_PER_LONG 28 + #error Inconsistent word size. Check asm/bitsperlong.h 29 + #endif 30 + 31 + #endif /* __KERNEL__ */ 32 + #endif /* __ASM_GENERIC_BITS_PER_LONG */
+10
include/asm-generic/bugs.h
···
··· 1 + #ifndef __ASM_GENERIC_BUGS_H 2 + #define __ASM_GENERIC_BUGS_H 3 + /* 4 + * This file is included by 'init/main.c' to check for 5 + * architecture-dependent bugs. 6 + */ 7 + 8 + static inline void check_bugs(void) { } 9 + 10 + #endif /* __ASM_GENERIC_BUGS_H */
+12
include/asm-generic/cache.h
···
··· 1 + #ifndef __ASM_GENERIC_CACHE_H 2 + #define __ASM_GENERIC_CACHE_H 3 + /* 4 + * 32 bytes appears to be the most common cache line size, 5 + * so make that the default here. Architectures with larger 6 + * cache lines need to provide their own cache.h. 7 + */ 8 + 9 + #define L1_CACHE_SHIFT 5 10 + #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) 11 + 12 + #endif /* __ASM_GENERIC_CACHE_H */
+30
include/asm-generic/cacheflush.h
···
··· 1 + #ifndef __ASM_CACHEFLUSH_H 2 + #define __ASM_CACHEFLUSH_H 3 + 4 + /* Keep includes the same across arches. */ 5 + #include <linux/mm.h> 6 + 7 + /* 8 + * The cache doesn't need to be flushed when TLB entries change when 9 + * the cache is mapped to physical memory, not virtual memory 10 + */ 11 + #define flush_cache_all() do { } while (0) 12 + #define flush_cache_mm(mm) do { } while (0) 13 + #define flush_cache_dup_mm(mm) do { } while (0) 14 + #define flush_cache_range(vma, start, end) do { } while (0) 15 + #define flush_cache_page(vma, vmaddr, pfn) do { } while (0) 16 + #define flush_dcache_page(page) do { } while (0) 17 + #define flush_dcache_mmap_lock(mapping) do { } while (0) 18 + #define flush_dcache_mmap_unlock(mapping) do { } while (0) 19 + #define flush_icache_range(start, end) do { } while (0) 20 + #define flush_icache_page(vma,pg) do { } while (0) 21 + #define flush_icache_user_range(vma,pg,adr,len) do { } while (0) 22 + #define flush_cache_vmap(start, end) do { } while (0) 23 + #define flush_cache_vunmap(start, end) do { } while (0) 24 + 25 + #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ 26 + memcpy(dst, src, len) 27 + #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ 28 + memcpy(dst, src, len) 29 + 30 + #endif /* __ASM_CACHEFLUSH_H */
+79
include/asm-generic/checksum.h
···
··· 1 + #ifndef __ASM_GENERIC_CHECKSUM_H 2 + #define __ASM_GENERIC_CHECKSUM_H 3 + 4 + /* 5 + * computes the checksum of a memory block at buff, length len, 6 + * and adds in "sum" (32-bit) 7 + * 8 + * returns a 32-bit number suitable for feeding into itself 9 + * or csum_tcpudp_magic 10 + * 11 + * this function must be called with even lengths, except 12 + * for the last fragment, which may be odd 13 + * 14 + * it's best to have buff aligned on a 32-bit boundary 15 + */ 16 + extern __wsum csum_partial(const void *buff, int len, __wsum sum); 17 + 18 + /* 19 + * the same as csum_partial, but copies from src while it 20 + * checksums 21 + * 22 + * here even more important to align src and dst on a 32-bit (or even 23 + * better 64-bit) boundary 24 + */ 25 + extern __wsum csum_partial_copy(const void *src, void *dst, int len, __wsum sum); 26 + 27 + /* 28 + * the same as csum_partial_copy, but copies from user space. 29 + * 30 + * here even more important to align src and dst on a 32-bit (or even 31 + * better 64-bit) boundary 32 + */ 33 + extern __wsum csum_partial_copy_from_user(const void __user *src, void *dst, 34 + int len, __wsum sum, int *csum_err); 35 + 36 + #define csum_partial_copy_nocheck(src, dst, len, sum) \ 37 + csum_partial_copy((src), (dst), (len), (sum)) 38 + 39 + /* 40 + * This is a version of ip_compute_csum() optimized for IP headers, 41 + * which always checksum on 4 octet boundaries. 42 + */ 43 + extern __sum16 ip_fast_csum(const void *iph, unsigned int ihl); 44 + 45 + /* 46 + * Fold a partial checksum 47 + */ 48 + static inline __sum16 csum_fold(__wsum csum) 49 + { 50 + u32 sum = (__force u32)csum; 51 + sum = (sum & 0xffff) + (sum >> 16); 52 + sum = (sum & 0xffff) + (sum >> 16); 53 + return (__force __sum16)~sum; 54 + } 55 + 56 + #ifndef csum_tcpudp_nofold 57 + /* 58 + * computes the checksum of the TCP/UDP pseudo-header 59 + * returns a 16-bit checksum, already complemented 60 + */ 61 + extern __wsum 62 + csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len, 63 + unsigned short proto, __wsum sum); 64 + #endif 65 + 66 + static inline __sum16 67 + csum_tcpudp_magic(__be32 saddr, __be32 daddr, unsigned short len, 68 + unsigned short proto, __wsum sum) 69 + { 70 + return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum)); 71 + } 72 + 73 + /* 74 + * this routine is used for miscellaneous IP-like checksums, mainly 75 + * in icmp.c 76 + */ 77 + extern __sum16 ip_compute_csum(const void *buff, int len); 78 + 79 + #endif /* __ASM_GENERIC_CHECKSUM_H */
+9
include/asm-generic/current.h
···
··· 1 + #ifndef __ASM_GENERIC_CURRENT_H 2 + #define __ASM_GENERIC_CURRENT_H 3 + 4 + #include <linux/thread_info.h> 5 + 6 + #define get_current() (current_thread_info()->task) 7 + #define current get_current() 8 + 9 + #endif /* __ASM_GENERIC_CURRENT_H */
+9
include/asm-generic/delay.h
···
··· 1 + #ifndef __ASM_GENERIC_DELAY_H 2 + #define __ASM_GENERIC_DELAY_H 3 + 4 + extern void __udelay(unsigned long usecs); 5 + extern void __delay(unsigned long loops); 6 + 7 + #define udelay(n) __udelay(n) 8 + 9 + #endif /* __ASM_GENERIC_DELAY_H */
+15
include/asm-generic/dma.h
···
··· 1 + #ifndef __ASM_GENERIC_DMA_H 2 + #define __ASM_GENERIC_DMA_H 3 + /* 4 + * This file traditionally describes the i8237 PC style DMA controller. 5 + * Most architectures don't have these any more and can get the minimal 6 + * implementation from kernel/dma.c by not defining MAX_DMA_CHANNELS. 7 + * 8 + * Some code relies on seeing MAX_DMA_ADDRESS though. 9 + */ 10 + #define MAX_DMA_ADDRESS PAGE_OFFSET 11 + 12 + extern int request_dma(unsigned int dmanr, const char *device_id); 13 + extern void free_dma(unsigned int dmanr); 14 + 15 + #endif /* __ASM_GENERIC_DMA_H */
+12
include/asm-generic/fb.h
···
··· 1 + #ifndef __ASM_GENERIC_FB_H_ 2 + #define __ASM_GENERIC_FB_H_ 3 + #include <linux/fb.h> 4 + 5 + #define fb_pgprotect(...) do {} while (0) 6 + 7 + static inline int fb_is_primary_device(struct fb_info *info) 8 + { 9 + return 0; 10 + } 11 + 12 + #endif /* __ASM_GENERIC_FB_H_ */
+24
include/asm-generic/getorder.h
···
··· 1 + #ifndef __ASM_GENERIC_GETORDER_H 2 + #define __ASM_GENERIC_GETORDER_H 3 + 4 + #ifndef __ASSEMBLY__ 5 + 6 + #include <linux/compiler.h> 7 + 8 + /* Pure 2^n version of get_order */ 9 + static inline __attribute_const__ int get_order(unsigned long size) 10 + { 11 + int order; 12 + 13 + size = (size - 1) >> (PAGE_SHIFT - 1); 14 + order = -1; 15 + do { 16 + size >>= 1; 17 + order++; 18 + } while (size); 19 + return order; 20 + } 21 + 22 + #endif /* __ASSEMBLY__ */ 23 + 24 + #endif /* __ASM_GENERIC_GETORDER_H */
+34
include/asm-generic/hardirq.h
···
··· 1 + #ifndef __ASM_GENERIC_HARDIRQ_H 2 + #define __ASM_GENERIC_HARDIRQ_H 3 + 4 + #include <linux/cache.h> 5 + #include <linux/threads.h> 6 + #include <linux/irq.h> 7 + 8 + typedef struct { 9 + unsigned long __softirq_pending; 10 + } ____cacheline_aligned irq_cpustat_t; 11 + 12 + #include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */ 13 + 14 + #ifndef HARDIRQ_BITS 15 + #define HARDIRQ_BITS 8 16 + #endif 17 + 18 + /* 19 + * The hardirq mask has to be large enough to have 20 + * space for potentially all IRQ sources in the system 21 + * nesting on a single CPU: 22 + */ 23 + #if (1 << HARDIRQ_BITS) < NR_IRQS 24 + # error HARDIRQ_BITS is too low! 25 + #endif 26 + 27 + #ifndef ack_bad_irq 28 + static inline void ack_bad_irq(unsigned int irq) 29 + { 30 + printk(KERN_CRIT "unexpected IRQ trap at vector %02x\n", irq); 31 + } 32 + #endif 33 + 34 + #endif /* __ASM_GENERIC_HARDIRQ_H */
+9
include/asm-generic/hw_irq.h
···
··· 1 + #ifndef __ASM_GENERIC_HW_IRQ_H 2 + #define __ASM_GENERIC_HW_IRQ_H 3 + /* 4 + * hw_irq.h has internal declarations for the low-level interrupt 5 + * controller, like the original i8259A. 6 + * In general, this is not needed for new architectures. 7 + */ 8 + 9 + #endif /* __ASM_GENERIC_HW_IRQ_H */
+2
include/asm-generic/int-l64.h
··· 8 #ifndef _ASM_GENERIC_INT_L64_H 9 #define _ASM_GENERIC_INT_L64_H 10 11 #ifndef __ASSEMBLY__ 12 /* 13 * __xx is ok: it doesn't pollute the POSIX namespace. Use these in the
··· 8 #ifndef _ASM_GENERIC_INT_L64_H 9 #define _ASM_GENERIC_INT_L64_H 10 11 + #include <asm/bitsperlong.h> 12 + 13 #ifndef __ASSEMBLY__ 14 /* 15 * __xx is ok: it doesn't pollute the POSIX namespace. Use these in the
+2
include/asm-generic/int-ll64.h
··· 8 #ifndef _ASM_GENERIC_INT_LL64_H 9 #define _ASM_GENERIC_INT_LL64_H 10 11 #ifndef __ASSEMBLY__ 12 /* 13 * __xx is ok: it doesn't pollute the POSIX namespace. Use these in the
··· 8 #ifndef _ASM_GENERIC_INT_LL64_H 9 #define _ASM_GENERIC_INT_LL64_H 10 11 + #include <asm/bitsperlong.h> 12 + 13 #ifndef __ASSEMBLY__ 14 /* 15 * __xx is ok: it doesn't pollute the POSIX namespace. Use these in the
+300
include/asm-generic/io.h
···
··· 1 + /* Generic I/O port emulation, based on MN10300 code 2 + * 3 + * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. 4 + * Written by David Howells (dhowells@redhat.com) 5 + * 6 + * This program is free software; you can redistribute it and/or 7 + * modify it under the terms of the GNU General Public Licence 8 + * as published by the Free Software Foundation; either version 9 + * 2 of the Licence, or (at your option) any later version. 10 + */ 11 + #ifndef __ASM_GENERIC_IO_H 12 + #define __ASM_GENERIC_IO_H 13 + 14 + #include <asm/page.h> /* I/O is all done through memory accesses */ 15 + #include <asm/cacheflush.h> 16 + #include <linux/types.h> 17 + 18 + #ifdef CONFIG_GENERIC_IOMAP 19 + #include <asm-generic/iomap.h> 20 + #endif 21 + 22 + #define mmiowb() do {} while (0) 23 + 24 + /*****************************************************************************/ 25 + /* 26 + * readX/writeX() are used to access memory mapped devices. On some 27 + * architectures the memory mapped IO stuff needs to be accessed 28 + * differently. On the simple architectures, we just read/write the 29 + * memory location directly. 30 + */ 31 + static inline u8 __raw_readb(const volatile void __iomem *addr) 32 + { 33 + return *(const volatile u8 __force *) addr; 34 + } 35 + 36 + static inline u16 __raw_readw(const volatile void __iomem *addr) 37 + { 38 + return *(const volatile u16 __force *) addr; 39 + } 40 + 41 + static inline u32 __raw_readl(const volatile void __iomem *addr) 42 + { 43 + return *(const volatile u32 __force *) addr; 44 + } 45 + 46 + #define readb __raw_readb 47 + #define readw(addr) __le16_to_cpu(__raw_readw(addr)) 48 + #define readl(addr) __le32_to_cpu(__raw_readl(addr)) 49 + 50 + static inline void __raw_writeb(u8 b, volatile void __iomem *addr) 51 + { 52 + *(volatile u8 __force *) addr = b; 53 + } 54 + 55 + static inline void __raw_writew(u16 b, volatile void __iomem *addr) 56 + { 57 + *(volatile u16 __force *) addr = b; 58 + } 59 + 60 + static inline void __raw_writel(u32 b, volatile void __iomem *addr) 61 + { 62 + *(volatile u32 __force *) addr = b; 63 + } 64 + 65 + #define writeb __raw_writeb 66 + #define writew(b,addr) __raw_writew(__cpu_to_le16(b),addr) 67 + #define writel(b,addr) __raw_writel(__cpu_to_le32(b),addr) 68 + 69 + #ifdef CONFIG_64BIT 70 + static inline u64 __raw_readq(const volatile void __iomem *addr) 71 + { 72 + return *(const volatile u64 __force *) addr; 73 + } 74 + #define readq(addr) __le64_to_cpu(__raw_readq(addr)) 75 + 76 + static inline void __raw_writeq(u64 b, volatile void __iomem *addr) 77 + { 78 + *(volatile u64 __force *) addr = b; 79 + } 80 + #define writeq(b,addr) __raw_writeq(__cpu_to_le64(b),addr) 81 + #endif 82 + 83 + /*****************************************************************************/ 84 + /* 85 + * traditional input/output functions 86 + */ 87 + 88 + static inline u8 inb(unsigned long addr) 89 + { 90 + return readb((volatile void __iomem *) addr); 91 + } 92 + 93 + static inline u16 inw(unsigned long addr) 94 + { 95 + return readw((volatile void __iomem *) addr); 96 + } 97 + 98 + static inline u32 inl(unsigned long addr) 99 + { 100 + return readl((volatile void __iomem *) addr); 101 + } 102 + 103 + static inline void outb(u8 b, unsigned long addr) 104 + { 105 + writeb(b, (volatile void __iomem *) addr); 106 + } 107 + 108 + static inline void outw(u16 b, unsigned long addr) 109 + { 110 + writew(b, (volatile void __iomem *) addr); 111 + } 112 + 113 + static inline void outl(u32 b, unsigned long addr) 114 + { 115 + writel(b, (volatile void __iomem *) addr); 116 + } 117 + 118 + #define inb_p(addr) inb(addr) 119 + #define inw_p(addr) inw(addr) 120 + #define inl_p(addr) inl(addr) 121 + #define outb_p(x, addr) outb((x), (addr)) 122 + #define outw_p(x, addr) outw((x), (addr)) 123 + #define outl_p(x, addr) outl((x), (addr)) 124 + 125 + static inline void insb(unsigned long addr, void *buffer, int count) 126 + { 127 + if (count) { 128 + u8 *buf = buffer; 129 + do { 130 + u8 x = inb(addr); 131 + *buf++ = x; 132 + } while (--count); 133 + } 134 + } 135 + 136 + static inline void insw(unsigned long addr, void *buffer, int count) 137 + { 138 + if (count) { 139 + u16 *buf = buffer; 140 + do { 141 + u16 x = inw(addr); 142 + *buf++ = x; 143 + } while (--count); 144 + } 145 + } 146 + 147 + static inline void insl(unsigned long addr, void *buffer, int count) 148 + { 149 + if (count) { 150 + u32 *buf = buffer; 151 + do { 152 + u32 x = inl(addr); 153 + *buf++ = x; 154 + } while (--count); 155 + } 156 + } 157 + 158 + static inline void outsb(unsigned long addr, const void *buffer, int count) 159 + { 160 + if (count) { 161 + const u8 *buf = buffer; 162 + do { 163 + outb(*buf++, addr); 164 + } while (--count); 165 + } 166 + } 167 + 168 + static inline void outsw(unsigned long addr, const void *buffer, int count) 169 + { 170 + if (count) { 171 + const u16 *buf = buffer; 172 + do { 173 + outw(*buf++, addr); 174 + } while (--count); 175 + } 176 + } 177 + 178 + static inline void outsl(unsigned long addr, const void *buffer, int count) 179 + { 180 + if (count) { 181 + const u32 *buf = buffer; 182 + do { 183 + outl(*buf++, addr); 184 + } while (--count); 185 + } 186 + } 187 + 188 + #ifndef CONFIG_GENERIC_IOMAP 189 + #define ioread8(addr) readb(addr) 190 + #define ioread16(addr) readw(addr) 191 + #define ioread32(addr) readl(addr) 192 + 193 + #define iowrite8(v, addr) writeb((v), (addr)) 194 + #define iowrite16(v, addr) writew((v), (addr)) 195 + #define iowrite32(v, addr) writel((v), (addr)) 196 + 197 + #define ioread8_rep(p, dst, count) \ 198 + insb((unsigned long) (p), (dst), (count)) 199 + #define ioread16_rep(p, dst, count) \ 200 + insw((unsigned long) (p), (dst), (count)) 201 + #define ioread32_rep(p, dst, count) \ 202 + insl((unsigned long) (p), (dst), (count)) 203 + 204 + #define iowrite8_rep(p, src, count) \ 205 + outsb((unsigned long) (p), (src), (count)) 206 + #define iowrite16_rep(p, src, count) \ 207 + outsw((unsigned long) (p), (src), (count)) 208 + #define iowrite32_rep(p, src, count) \ 209 + outsl((unsigned long) (p), (src), (count)) 210 + #endif /* CONFIG_GENERIC_IOMAP */ 211 + 212 + 213 + #define IO_SPACE_LIMIT 0xffffffff 214 + 215 + #ifdef __KERNEL__ 216 + 217 + #include <linux/vmalloc.h> 218 + #define __io_virt(x) ((void __force *) (x)) 219 + 220 + #ifndef CONFIG_GENERIC_IOMAP 221 + /* Create a virtual mapping cookie for a PCI BAR (memory or IO) */ 222 + struct pci_dev; 223 + extern void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max); 224 + static inline void pci_iounmap(struct pci_dev *dev, void __iomem *p) 225 + { 226 + } 227 + #endif /* CONFIG_GENERIC_IOMAP */ 228 + 229 + /* 230 + * Change virtual addresses to physical addresses and vv. 231 + * These are pretty trivial 232 + */ 233 + static inline unsigned long virt_to_phys(volatile void *address) 234 + { 235 + return __pa((unsigned long)address); 236 + } 237 + 238 + static inline void *phys_to_virt(unsigned long address) 239 + { 240 + return __va(address); 241 + } 242 + 243 + /* 244 + * Change "struct page" to physical address. 245 + */ 246 + static inline void __iomem *ioremap(phys_addr_t offset, unsigned long size) 247 + { 248 + return (void __iomem*) (unsigned long)offset; 249 + } 250 + 251 + #define __ioremap(offset, size, flags) ioremap(offset, size) 252 + 253 + #ifndef ioremap_nocache 254 + #define ioremap_nocache ioremap 255 + #endif 256 + 257 + #ifndef ioremap_wc 258 + #define ioremap_wc ioremap_nocache 259 + #endif 260 + 261 + static inline void iounmap(void *addr) 262 + { 263 + } 264 + 265 + #ifndef CONFIG_GENERIC_IOMAP 266 + static inline void __iomem *ioport_map(unsigned long port, unsigned int nr) 267 + { 268 + return (void __iomem *) port; 269 + } 270 + 271 + static inline void ioport_unmap(void __iomem *p) 272 + { 273 + } 274 + #else /* CONFIG_GENERIC_IOMAP */ 275 + extern void __iomem *ioport_map(unsigned long port, unsigned int nr); 276 + extern void ioport_unmap(void __iomem *p); 277 + #endif /* CONFIG_GENERIC_IOMAP */ 278 + 279 + #define xlate_dev_kmem_ptr(p) p 280 + #define xlate_dev_mem_ptr(p) ((void *) (p)) 281 + 282 + #ifndef virt_to_bus 283 + static inline unsigned long virt_to_bus(volatile void *address) 284 + { 285 + return ((unsigned long) address); 286 + } 287 + 288 + static inline void *bus_to_virt(unsigned long address) 289 + { 290 + return (void *) address; 291 + } 292 + #endif 293 + 294 + #define memset_io(a, b, c) memset(__io_virt(a), (b), (c)) 295 + #define memcpy_fromio(a, b, c) memcpy((a), __io_virt(b), (c)) 296 + #define memcpy_toio(a, b, c) memcpy(__io_virt(a), (b), (c)) 297 + 298 + #endif /* __KERNEL__ */ 299 + 300 + #endif /* __ASM_GENERIC_IO_H */
+110
include/asm-generic/ioctls.h
···
··· 1 + #ifndef __ASM_GENERIC_IOCTLS_H 2 + #define __ASM_GENERIC_IOCTLS_H 3 + 4 + #include <linux/ioctl.h> 5 + 6 + /* 7 + * These are the most common definitions for tty ioctl numbers. 8 + * Most of them do not use the recommended _IOC(), but there is 9 + * probably some source code out there hardcoding the number, 10 + * so we might as well use them for all new platforms. 11 + * 12 + * The architectures that use different values here typically 13 + * try to be compatible with some Unix variants for the same 14 + * architecture. 15 + */ 16 + 17 + /* 0x54 is just a magic number to make these relatively unique ('T') */ 18 + 19 + #define TCGETS 0x5401 20 + #define TCSETS 0x5402 21 + #define TCSETSW 0x5403 22 + #define TCSETSF 0x5404 23 + #define TCGETA 0x5405 24 + #define TCSETA 0x5406 25 + #define TCSETAW 0x5407 26 + #define TCSETAF 0x5408 27 + #define TCSBRK 0x5409 28 + #define TCXONC 0x540A 29 + #define TCFLSH 0x540B 30 + #define TIOCEXCL 0x540C 31 + #define TIOCNXCL 0x540D 32 + #define TIOCSCTTY 0x540E 33 + #define TIOCGPGRP 0x540F 34 + #define TIOCSPGRP 0x5410 35 + #define TIOCOUTQ 0x5411 36 + #define TIOCSTI 0x5412 37 + #define TIOCGWINSZ 0x5413 38 + #define TIOCSWINSZ 0x5414 39 + #define TIOCMGET 0x5415 40 + #define TIOCMBIS 0x5416 41 + #define TIOCMBIC 0x5417 42 + #define TIOCMSET 0x5418 43 + #define TIOCGSOFTCAR 0x5419 44 + #define TIOCSSOFTCAR 0x541A 45 + #define FIONREAD 0x541B 46 + #define TIOCINQ FIONREAD 47 + #define TIOCLINUX 0x541C 48 + #define TIOCCONS 0x541D 49 + #define TIOCGSERIAL 0x541E 50 + #define TIOCSSERIAL 0x541F 51 + #define TIOCPKT 0x5420 52 + #define FIONBIO 0x5421 53 + #define TIOCNOTTY 0x5422 54 + #define TIOCSETD 0x5423 55 + #define TIOCGETD 0x5424 56 + #define TCSBRKP 0x5425 /* Needed for POSIX tcsendbreak() */ 57 + #define TIOCSBRK 0x5427 /* BSD compatibility */ 58 + #define TIOCCBRK 0x5428 /* BSD compatibility */ 59 + #define TIOCGSID 0x5429 /* Return the session ID of FD */ 60 + #define TCGETS2 _IOR('T', 0x2A, struct termios2) 61 + #define TCSETS2 _IOW('T', 0x2B, struct termios2) 62 + #define TCSETSW2 _IOW('T', 0x2C, struct termios2) 63 + #define TCSETSF2 _IOW('T', 0x2D, struct termios2) 64 + #define TIOCGRS485 0x542E 65 + #define TIOCSRS485 0x542F 66 + #define TIOCGPTN _IOR('T', 0x30, unsigned int) /* Get Pty Number (of pty-mux device) */ 67 + #define TIOCSPTLCK _IOW('T', 0x31, int) /* Lock/unlock Pty */ 68 + #define TCGETX 0x5432 /* SYS5 TCGETX compatibility */ 69 + #define TCSETX 0x5433 70 + #define TCSETXF 0x5434 71 + #define TCSETXW 0x5435 72 + 73 + #define FIONCLEX 0x5450 74 + #define FIOCLEX 0x5451 75 + #define FIOASYNC 0x5452 76 + #define TIOCSERCONFIG 0x5453 77 + #define TIOCSERGWILD 0x5454 78 + #define TIOCSERSWILD 0x5455 79 + #define TIOCGLCKTRMIOS 0x5456 80 + #define TIOCSLCKTRMIOS 0x5457 81 + #define TIOCSERGSTRUCT 0x5458 /* For debugging only */ 82 + #define TIOCSERGETLSR 0x5459 /* Get line status register */ 83 + #define TIOCSERGETMULTI 0x545A /* Get multiport config */ 84 + #define TIOCSERSETMULTI 0x545B /* Set multiport config */ 85 + 86 + #define TIOCMIWAIT 0x545C /* wait for a change on serial input line(s) */ 87 + #define TIOCGICOUNT 0x545D /* read serial port inline interrupt counts */ 88 + 89 + /* 90 + * some architectures define FIOQSIZE as 0x545E, which is used for 91 + * TIOCGHAYESESP on others 92 + */ 93 + #ifndef FIOQSIZE 94 + # define TIOCGHAYESESP 0x545E /* Get Hayes ESP configuration */ 95 + # define TIOCSHAYESESP 0x545F /* Set Hayes ESP configuration */ 96 + # define FIOQSIZE 0x5460 97 + #endif 98 + 99 + /* Used for packet mode */ 100 + #define TIOCPKT_DATA 0 101 + #define TIOCPKT_FLUSHREAD 1 102 + #define TIOCPKT_FLUSHWRITE 2 103 + #define TIOCPKT_STOP 4 104 + #define TIOCPKT_START 8 105 + #define TIOCPKT_NOSTOP 16 106 + #define TIOCPKT_DOSTOP 32 107 + 108 + #define TIOCSER_TEMT 0x01 /* Transmitter physically empty */ 109 + 110 + #endif /* __ASM_GENERIC_IOCTLS_H */
+34
include/asm-generic/ipcbuf.h
···
··· 1 + #ifndef __ASM_GENERIC_IPCBUF_H 2 + #define __ASM_GENERIC_IPCBUF_H 3 + 4 + /* 5 + * The generic ipc64_perm structure: 6 + * Note extra padding because this structure is passed back and forth 7 + * between kernel and user space. 8 + * 9 + * ipc64_perm was originally meant to be architecture specific, but 10 + * everyone just ended up making identical copies without specific 11 + * optimizations, so we may just as well all use the same one. 12 + * 13 + * Pad space is left for: 14 + * - 32-bit mode_t on architectures that only had 16 bit 15 + * - 32-bit seq 16 + * - 2 miscellaneous 32-bit values 17 + */ 18 + 19 + struct ipc64_perm { 20 + __kernel_key_t key; 21 + __kernel_uid32_t uid; 22 + __kernel_gid32_t gid; 23 + __kernel_uid32_t cuid; 24 + __kernel_gid32_t cgid; 25 + __kernel_mode_t mode; 26 + /* pad if mode_t is u16: */ 27 + unsigned char __pad1[4 - sizeof(__kernel_mode_t)]; 28 + unsigned short seq; 29 + unsigned short __pad2; 30 + unsigned long __unused1; 31 + unsigned long __unused2; 32 + }; 33 + 34 + #endif /* __ASM_GENERIC_IPCBUF_H */
+18
include/asm-generic/irq.h
···
··· 1 + #ifndef __ASM_GENERIC_IRQ_H 2 + #define __ASM_GENERIC_IRQ_H 3 + 4 + /* 5 + * NR_IRQS is the upper bound of how many interrupts can be handled 6 + * in the platform. It is used to size the static irq_map array, 7 + * so don't make it too big. 8 + */ 9 + #ifndef NR_IRQS 10 + #define NR_IRQS 64 11 + #endif 12 + 13 + static inline int irq_canonicalize(int irq) 14 + { 15 + return irq; 16 + } 17 + 18 + #endif /* __ASM_GENERIC_IRQ_H */
+72
include/asm-generic/irqflags.h
···
··· 1 + #ifndef __ASM_GENERIC_IRQFLAGS_H 2 + #define __ASM_GENERIC_IRQFLAGS_H 3 + 4 + /* 5 + * All architectures should implement at least the first two functions, 6 + * usually inline assembly will be the best way. 7 + */ 8 + #ifndef RAW_IRQ_DISABLED 9 + #define RAW_IRQ_DISABLED 0 10 + #define RAW_IRQ_ENABLED 1 11 + #endif 12 + 13 + /* read interrupt enabled status */ 14 + #ifndef __raw_local_save_flags 15 + unsigned long __raw_local_save_flags(void); 16 + #endif 17 + 18 + /* set interrupt enabled status */ 19 + #ifndef raw_local_irq_restore 20 + void raw_local_irq_restore(unsigned long flags); 21 + #endif 22 + 23 + /* get status and disable interrupts */ 24 + #ifndef __raw_local_irq_save 25 + static inline unsigned long __raw_local_irq_save(void) 26 + { 27 + unsigned long flags; 28 + flags = __raw_local_save_flags(); 29 + raw_local_irq_restore(RAW_IRQ_DISABLED); 30 + return flags; 31 + } 32 + #endif 33 + 34 + /* test flags */ 35 + #ifndef raw_irqs_disabled_flags 36 + static inline int raw_irqs_disabled_flags(unsigned long flags) 37 + { 38 + return flags == RAW_IRQ_DISABLED; 39 + } 40 + #endif 41 + 42 + /* unconditionally enable interrupts */ 43 + #ifndef raw_local_irq_enable 44 + static inline void raw_local_irq_enable(void) 45 + { 46 + raw_local_irq_restore(RAW_IRQ_ENABLED); 47 + } 48 + #endif 49 + 50 + /* unconditionally disable interrupts */ 51 + #ifndef raw_local_irq_disable 52 + static inline void raw_local_irq_disable(void) 53 + { 54 + raw_local_irq_restore(RAW_IRQ_DISABLED); 55 + } 56 + #endif 57 + 58 + /* test hardware interrupt enable bit */ 59 + #ifndef raw_irqs_disabled 60 + static inline int raw_irqs_disabled(void) 61 + { 62 + return raw_irqs_disabled_flags(__raw_local_save_flags()); 63 + } 64 + #endif 65 + 66 + #define raw_local_save_flags(flags) \ 67 + do { (flags) = __raw_local_save_flags(); } while (0) 68 + 69 + #define raw_local_irq_save(flags) \ 70 + do { (flags) = __raw_local_irq_save(); } while (0) 71 + 72 + #endif /* __ASM_GENERIC_IRQFLAGS_H */
+32
include/asm-generic/kmap_types.h
···
··· 1 + #ifndef _ASM_GENERIC_KMAP_TYPES_H 2 + #define _ASM_GENERIC_KMAP_TYPES_H 3 + 4 + #ifdef CONFIG_DEBUG_HIGHMEM 5 + # define D(n) __KM_FENCE_##n , 6 + #else 7 + # define D(n) 8 + #endif 9 + 10 + enum km_type { 11 + D(0) KM_BOUNCE_READ, 12 + D(1) KM_SKB_SUNRPC_DATA, 13 + D(2) KM_SKB_DATA_SOFTIRQ, 14 + D(3) KM_USER0, 15 + D(4) KM_USER1, 16 + D(5) KM_BIO_SRC_IRQ, 17 + D(6) KM_BIO_DST_IRQ, 18 + D(7) KM_PTE0, 19 + D(8) KM_PTE1, 20 + D(9) KM_IRQ0, 21 + D(10) KM_IRQ1, 22 + D(11) KM_SOFTIRQ0, 23 + D(12) KM_SOFTIRQ1, 24 + D(13) KM_SYNC_ICACHE, 25 + D(14) KM_SYNC_DCACHE, 26 + D(15) KM_UML_USERCOPY, /* UML specific, for copy_*_user - used in do_op_one_page */ 27 + D(16) KM_TYPE_NR 28 + }; 29 + 30 + #undef D 31 + 32 + #endif
+8
include/asm-generic/linkage.h
···
··· 1 + #ifndef __ASM_GENERIC_LINKAGE_H 2 + #define __ASM_GENERIC_LINKAGE_H 3 + /* 4 + * linux/linkage.h provides reasonable defaults. 5 + * an architecture can override them by providing its own version. 6 + */ 7 + 8 + #endif /* __ASM_GENERIC_LINKAGE_H */
+41
include/asm-generic/mman-common.h
···
··· 1 + #ifndef __ASM_GENERIC_MMAN_COMMON_H 2 + #define __ASM_GENERIC_MMAN_COMMON_H 3 + 4 + /* 5 + Author: Michael S. Tsirkin <mst@mellanox.co.il>, Mellanox Technologies Ltd. 6 + Based on: asm-xxx/mman.h 7 + */ 8 + 9 + #define PROT_READ 0x1 /* page can be read */ 10 + #define PROT_WRITE 0x2 /* page can be written */ 11 + #define PROT_EXEC 0x4 /* page can be executed */ 12 + #define PROT_SEM 0x8 /* page may be used for atomic ops */ 13 + #define PROT_NONE 0x0 /* page can not be accessed */ 14 + #define PROT_GROWSDOWN 0x01000000 /* mprotect flag: extend change to start of growsdown vma */ 15 + #define PROT_GROWSUP 0x02000000 /* mprotect flag: extend change to end of growsup vma */ 16 + 17 + #define MAP_SHARED 0x01 /* Share changes */ 18 + #define MAP_PRIVATE 0x02 /* Changes are private */ 19 + #define MAP_TYPE 0x0f /* Mask for type of mapping */ 20 + #define MAP_FIXED 0x10 /* Interpret addr exactly */ 21 + #define MAP_ANONYMOUS 0x20 /* don't use a file */ 22 + 23 + #define MS_ASYNC 1 /* sync memory asynchronously */ 24 + #define MS_INVALIDATE 2 /* invalidate the caches */ 25 + #define MS_SYNC 4 /* synchronous memory sync */ 26 + 27 + #define MADV_NORMAL 0 /* no further special treatment */ 28 + #define MADV_RANDOM 1 /* expect random page references */ 29 + #define MADV_SEQUENTIAL 2 /* expect sequential page references */ 30 + #define MADV_WILLNEED 3 /* will need these pages */ 31 + #define MADV_DONTNEED 4 /* don't need these pages */ 32 + 33 + /* common parameters: try to keep these consistent across architectures */ 34 + #define MADV_REMOVE 9 /* remove these pages & resources */ 35 + #define MADV_DONTFORK 10 /* don't inherit across fork */ 36 + #define MADV_DOFORK 11 /* do inherit across fork */ 37 + 38 + /* compatibility flags */ 39 + #define MAP_FILE 0 40 + 41 + #endif /* __ASM_GENERIC_MMAN_COMMON_H */
+14 -37
include/asm-generic/mman.h
··· 1 - #ifndef _ASM_GENERIC_MMAN_H 2 - #define _ASM_GENERIC_MMAN_H 3 4 - /* 5 - Author: Michael S. Tsirkin <mst@mellanox.co.il>, Mellanox Technologies Ltd. 6 - Based on: asm-xxx/mman.h 7 - */ 8 9 - #define PROT_READ 0x1 /* page can be read */ 10 - #define PROT_WRITE 0x2 /* page can be written */ 11 - #define PROT_EXEC 0x4 /* page can be executed */ 12 - #define PROT_SEM 0x8 /* page may be used for atomic ops */ 13 - #define PROT_NONE 0x0 /* page can not be accessed */ 14 - #define PROT_GROWSDOWN 0x01000000 /* mprotect flag: extend change to start of growsdown vma */ 15 - #define PROT_GROWSUP 0x02000000 /* mprotect flag: extend change to end of growsup vma */ 16 17 - #define MAP_SHARED 0x01 /* Share changes */ 18 - #define MAP_PRIVATE 0x02 /* Changes are private */ 19 - #define MAP_TYPE 0x0f /* Mask for type of mapping */ 20 - #define MAP_FIXED 0x10 /* Interpret addr exactly */ 21 - #define MAP_ANONYMOUS 0x20 /* don't use a file */ 22 23 - #define MS_ASYNC 1 /* sync memory asynchronously */ 24 - #define MS_INVALIDATE 2 /* invalidate the caches */ 25 - #define MS_SYNC 4 /* synchronous memory sync */ 26 - 27 - #define MADV_NORMAL 0 /* no further special treatment */ 28 - #define MADV_RANDOM 1 /* expect random page references */ 29 - #define MADV_SEQUENTIAL 2 /* expect sequential page references */ 30 - #define MADV_WILLNEED 3 /* will need these pages */ 31 - #define MADV_DONTNEED 4 /* don't need these pages */ 32 - 33 - /* common parameters: try to keep these consistent across architectures */ 34 - #define MADV_REMOVE 9 /* remove these pages & resources */ 35 - #define MADV_DONTFORK 10 /* don't inherit across fork */ 36 - #define MADV_DOFORK 11 /* do inherit across fork */ 37 - 38 - /* compatibility flags */ 39 - #define MAP_FILE 0 40 - 41 - #endif
··· 1 + #ifndef __ASM_GENERIC_MMAN_H 2 + #define __ASM_GENERIC_MMAN_H 3 4 + #include <asm-generic/mman-common.h> 5 6 + #define MAP_GROWSDOWN 0x0100 /* stack-like segment */ 7 + #define MAP_DENYWRITE 0x0800 /* ETXTBSY */ 8 + #define MAP_EXECUTABLE 0x1000 /* mark it as an executable */ 9 + #define MAP_LOCKED 0x2000 /* pages are locked */ 10 + #define MAP_NORESERVE 0x4000 /* don't check for reservations */ 11 + #define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */ 12 + #define MAP_NONBLOCK 0x10000 /* do not block on IO */ 13 + #define MAP_STACK 0x20000 /* give out an address that is best suited for process/thread stacks */ 14 15 + #define MCL_CURRENT 1 /* lock all current mappings */ 16 + #define MCL_FUTURE 2 /* lock all future mappings */ 17 18 + #endif /* __ASM_GENERIC_MMAN_H */
+15
include/asm-generic/mmu.h
···
··· 1 + #ifndef __ASM_GENERIC_MMU_H 2 + #define __ASM_GENERIC_MMU_H 3 + 4 + /* 5 + * This is the mmu.h header for nommu implementations. 6 + * Architectures with an MMU need something more complex. 7 + */ 8 + #ifndef __ASSEMBLY__ 9 + typedef struct { 10 + struct vm_list_struct *vmlist; 11 + unsigned long end_brk; 12 + } mm_context_t; 13 + #endif 14 + 15 + #endif /* __ASM_GENERIC_MMU_H */
+45
include/asm-generic/mmu_context.h
···
··· 1 + #ifndef __ASM_GENERIC_MMU_CONTEXT_H 2 + #define __ASM_GENERIC_MMU_CONTEXT_H 3 + 4 + /* 5 + * Generic hooks for NOMMU architectures, which do not need to do 6 + * anything special here. 7 + */ 8 + 9 + #include <asm-generic/mm_hooks.h> 10 + 11 + struct task_struct; 12 + struct mm_struct; 13 + 14 + static inline void enter_lazy_tlb(struct mm_struct *mm, 15 + struct task_struct *tsk) 16 + { 17 + } 18 + 19 + static inline int init_new_context(struct task_struct *tsk, 20 + struct mm_struct *mm) 21 + { 22 + return 0; 23 + } 24 + 25 + static inline void destroy_context(struct mm_struct *mm) 26 + { 27 + } 28 + 29 + static inline void deactivate_mm(struct task_struct *task, 30 + struct mm_struct *mm) 31 + { 32 + } 33 + 34 + static inline void switch_mm(struct mm_struct *prev, 35 + struct mm_struct *next, 36 + struct task_struct *tsk) 37 + { 38 + } 39 + 40 + static inline void activate_mm(struct mm_struct *prev_mm, 41 + struct mm_struct *next_mm) 42 + { 43 + } 44 + 45 + #endif /* __ASM_GENERIC_MMU_CONTEXT_H */
+22
include/asm-generic/module.h
···
··· 1 + #ifndef __ASM_GENERIC_MODULE_H 2 + #define __ASM_GENERIC_MODULE_H 3 + 4 + /* 5 + * Many architectures just need a simple module 6 + * loader without arch specific data. 7 + */ 8 + struct mod_arch_specific 9 + { 10 + }; 11 + 12 + #ifdef CONFIG_64BIT 13 + #define Elf_Shdr Elf64_Shdr 14 + #define Elf_Sym Elf64_Sym 15 + #define Elf_Ehdr Elf64_Ehdr 16 + #else 17 + #define Elf_Shdr Elf32_Shdr 18 + #define Elf_Sym Elf32_Sym 19 + #define Elf_Ehdr Elf32_Ehdr 20 + #endif 21 + 22 + #endif /* __ASM_GENERIC_MODULE_H */
+47
include/asm-generic/msgbuf.h
···
··· 1 + #ifndef __ASM_GENERIC_MSGBUF_H 2 + #define __ASM_GENERIC_MSGBUF_H 3 + 4 + #include <asm/bitsperlong.h> 5 + /* 6 + * generic msqid64_ds structure. 7 + * 8 + * Note extra padding because this structure is passed back and forth 9 + * between kernel and user space. 10 + * 11 + * msqid64_ds was originally meant to be architecture specific, but 12 + * everyone just ended up making identical copies without specific 13 + * optimizations, so we may just as well all use the same one. 14 + * 15 + * 64 bit architectures typically define a 64 bit __kernel_time_t, 16 + * so they do not need the first three padding words. 17 + * On big-endian systems, the padding is in the wrong place. 18 + * 19 + * Pad space is left for: 20 + * - 64-bit time_t to solve y2038 problem 21 + * - 2 miscellaneous 32-bit values 22 + */ 23 + 24 + struct msqid64_ds { 25 + struct ipc64_perm msg_perm; 26 + __kernel_time_t msg_stime; /* last msgsnd time */ 27 + #if __BITS_PER_LONG != 64 28 + unsigned long __unused1; 29 + #endif 30 + __kernel_time_t msg_rtime; /* last msgrcv time */ 31 + #if __BITS_PER_LONG != 64 32 + unsigned long __unused2; 33 + #endif 34 + __kernel_time_t msg_ctime; /* last change time */ 35 + #if __BITS_PER_LONG != 64 36 + unsigned long __unused3; 37 + #endif 38 + unsigned long msg_cbytes; /* current number of bytes on queue */ 39 + unsigned long msg_qnum; /* number of messages in queue */ 40 + unsigned long msg_qbytes; /* max number of bytes on queue */ 41 + __kernel_pid_t msg_lspid; /* pid of last msgsnd */ 42 + __kernel_pid_t msg_lrpid; /* last receive pid */ 43 + unsigned long __unused4; 44 + unsigned long __unused5; 45 + }; 46 + 47 + #endif /* __ASM_GENERIC_MSGBUF_H */
+9
include/asm-generic/mutex.h
···
··· 1 + #ifndef __ASM_GENERIC_MUTEX_H 2 + #define __ASM_GENERIC_MUTEX_H 3 + /* 4 + * Pull in the generic implementation for the mutex fastpath, 5 + * which is a reasonable default on many architectures. 6 + */ 7 + 8 + #include <asm-generic/mutex-dec.h> 9 + #endif /* __ASM_GENERIC_MUTEX_H */
+92 -17
include/asm-generic/page.h
··· 1 - #ifndef _ASM_GENERIC_PAGE_H 2 - #define _ASM_GENERIC_PAGE_H 3 4 #ifndef __ASSEMBLY__ 5 6 - #include <linux/compiler.h> 7 8 - /* Pure 2^n version of get_order */ 9 - static __inline__ __attribute_const__ int get_order(unsigned long size) 10 - { 11 - int order; 12 13 - size = (size - 1) >> (PAGE_SHIFT - 1); 14 - order = -1; 15 - do { 16 - size >>= 1; 17 - order++; 18 - } while (size); 19 - return order; 20 - } 21 22 - #endif /* __ASSEMBLY__ */ 23 24 - #endif /* _ASM_GENERIC_PAGE_H */
··· 1 + #ifndef __ASM_GENERIC_PAGE_H 2 + #define __ASM_GENERIC_PAGE_H 3 + /* 4 + * Generic page.h implementation, for NOMMU architectures. 5 + * This provides the dummy definitions for the memory management. 6 + */ 7 + 8 + #ifdef CONFIG_MMU 9 + #error need to prove a real asm/page.h 10 + #endif 11 + 12 + 13 + /* PAGE_SHIFT determines the page size */ 14 + 15 + #define PAGE_SHIFT 12 16 + #ifdef __ASSEMBLY__ 17 + #define PAGE_SIZE (1 << PAGE_SHIFT) 18 + #else 19 + #define PAGE_SIZE (1UL << PAGE_SHIFT) 20 + #endif 21 + #define PAGE_MASK (~(PAGE_SIZE-1)) 22 + 23 + #include <asm/setup.h> 24 25 #ifndef __ASSEMBLY__ 26 27 + #define get_user_page(vaddr) __get_free_page(GFP_KERNEL) 28 + #define free_user_page(page, addr) free_page(addr) 29 30 + #define clear_page(page) memset((page), 0, PAGE_SIZE) 31 + #define copy_page(to,from) memcpy((to), (from), PAGE_SIZE) 32 33 + #define clear_user_page(page, vaddr, pg) clear_page(page) 34 + #define copy_user_page(to, from, vaddr, pg) copy_page(to, from) 35 36 + /* 37 + * These are used to make use of C type-checking.. 38 + */ 39 + typedef struct { 40 + unsigned long pte; 41 + } pte_t; 42 + typedef struct { 43 + unsigned long pmd[16]; 44 + } pmd_t; 45 + typedef struct { 46 + unsigned long pgd; 47 + } pgd_t; 48 + typedef struct { 49 + unsigned long pgprot; 50 + } pgprot_t; 51 + typedef struct page *pgtable_t; 52 53 + #define pte_val(x) ((x).pte) 54 + #define pmd_val(x) ((&x)->pmd[0]) 55 + #define pgd_val(x) ((x).pgd) 56 + #define pgprot_val(x) ((x).pgprot) 57 + 58 + #define __pte(x) ((pte_t) { (x) } ) 59 + #define __pmd(x) ((pmd_t) { (x) } ) 60 + #define __pgd(x) ((pgd_t) { (x) } ) 61 + #define __pgprot(x) ((pgprot_t) { (x) } ) 62 + 63 + extern unsigned long memory_start; 64 + extern unsigned long memory_end; 65 + 66 + #endif /* !__ASSEMBLY__ */ 67 + 68 + #ifdef CONFIG_KERNEL_RAM_BASE_ADDRESS 69 + #define PAGE_OFFSET (CONFIG_KERNEL_RAM_BASE_ADDRESS) 70 + #else 71 + #define PAGE_OFFSET (0) 72 + #endif 73 + 74 + #ifndef __ASSEMBLY__ 75 + 76 + #define __va(x) ((void *)((unsigned long)(x) + PAGE_OFFSET)) 77 + #define __pa(x) ((unsigned long) (x) - PAGE_OFFSET) 78 + 79 + #define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT) 80 + #define pfn_to_virt(pfn) __va((pfn) << PAGE_SHIFT) 81 + 82 + #define virt_to_page(addr) (mem_map + (((unsigned long)(addr)-PAGE_OFFSET) >> PAGE_SHIFT)) 83 + #define page_to_virt(page) ((((page) - mem_map) << PAGE_SHIFT) + PAGE_OFFSET) 84 + 85 + #ifndef page_to_phys 86 + #define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT) 87 + #endif 88 + 89 + #define pfn_valid(pfn) ((pfn) < max_mapnr) 90 + 91 + #define virt_addr_valid(kaddr) (((void *)(kaddr) >= (void *)PAGE_OFFSET) && \ 92 + ((void *)(kaddr) < (void *)memory_end)) 93 + 94 + #endif /* __ASSEMBLY__ */ 95 + 96 + #include <asm-generic/memory_model.h> 97 + #include <asm-generic/getorder.h> 98 + 99 + #endif /* __ASM_GENERIC_PAGE_H */
+24
include/asm-generic/param.h
···
··· 1 + #ifndef __ASM_GENERIC_PARAM_H 2 + #define __ASM_GENERIC_PARAM_H 3 + 4 + #ifdef __KERNEL__ 5 + # define HZ CONFIG_HZ /* Internal kernel timer frequency */ 6 + # define USER_HZ 100 /* some user interfaces are */ 7 + # define CLOCKS_PER_SEC (USER_HZ) /* in "ticks" like times() */ 8 + #endif 9 + 10 + #ifndef HZ 11 + #define HZ 100 12 + #endif 13 + 14 + #ifndef EXEC_PAGESIZE 15 + #define EXEC_PAGESIZE 4096 16 + #endif 17 + 18 + #ifndef NOGROUP 19 + #define NOGROUP (-1) 20 + #endif 21 + 22 + #define MAXHOSTNAMELEN 64 /* max length of hostname */ 23 + 24 + #endif /* __ASM_GENERIC_PARAM_H */
+23
include/asm-generic/parport.h
···
··· 1 + #ifndef __ASM_GENERIC_PARPORT_H 2 + #define __ASM_GENERIC_PARPORT_H 3 + 4 + /* 5 + * An ISA bus may have i8255 parallel ports at well-known 6 + * locations in the I/O space, which are scanned by 7 + * parport_pc_find_isa_ports. 8 + * 9 + * Without ISA support, the driver will only attach 10 + * to devices on the PCI bus. 11 + */ 12 + 13 + static int __devinit parport_pc_find_isa_ports(int autoirq, int autodma); 14 + static int __devinit parport_pc_find_nonpci_ports(int autoirq, int autodma) 15 + { 16 + #ifdef CONFIG_ISA 17 + return parport_pc_find_isa_ports(autoirq, autodma); 18 + #else 19 + return 0; 20 + #endif 21 + } 22 + 23 + #endif /* __ASM_GENERIC_PARPORT_H */
+8
include/asm-generic/pci.h
··· 52 } 53 #endif /* HAVE_ARCH_PCI_GET_LEGACY_IDE_IRQ */ 54 55 #endif
··· 52 } 53 #endif /* HAVE_ARCH_PCI_GET_LEGACY_IDE_IRQ */ 54 55 + /* 56 + * By default, assume that no iommu is in use and that the PCI 57 + * space is mapped to address physical 0. 58 + */ 59 + #ifndef PCI_DMA_BUS_IS_PHYS 60 + #define PCI_DMA_BUS_IS_PHYS (1) 61 #endif 62 + 63 + #endif /* _ASM_GENERIC_PCI_H */
+12
include/asm-generic/pgalloc.h
···
··· 1 + #ifndef __ASM_GENERIC_PGALLOC_H 2 + #define __ASM_GENERIC_PGALLOC_H 3 + /* 4 + * an empty file is enough for a nommu architecture 5 + */ 6 + #ifdef CONFIG_MMU 7 + #error need to implement an architecture specific asm/pgalloc.h 8 + #endif 9 + 10 + #define check_pgt_cache() do { } while (0) 11 + 12 + #endif /* __ASM_GENERIC_PGALLOC_H */
+165
include/asm-generic/posix_types.h
···
··· 1 + #ifndef __ASM_GENERIC_POSIX_TYPES_H 2 + #define __ASM_GENERIC_POSIX_TYPES_H 3 + 4 + #include <asm/bitsperlong.h> 5 + /* 6 + * This file is generally used by user-level software, so you need to 7 + * be a little careful about namespace pollution etc. 8 + * 9 + * First the types that are often defined in different ways across 10 + * architectures, so that you can override them. 11 + */ 12 + 13 + #ifndef __kernel_ino_t 14 + typedef unsigned long __kernel_ino_t; 15 + #endif 16 + 17 + #ifndef __kernel_mode_t 18 + typedef unsigned int __kernel_mode_t; 19 + #endif 20 + 21 + #ifndef __kernel_nlink_t 22 + typedef unsigned long __kernel_nlink_t; 23 + #endif 24 + 25 + #ifndef __kernel_pid_t 26 + typedef int __kernel_pid_t; 27 + #endif 28 + 29 + #ifndef __kernel_ipc_pid_t 30 + typedef int __kernel_ipc_pid_t; 31 + #endif 32 + 33 + #ifndef __kernel_uid_t 34 + typedef unsigned int __kernel_uid_t; 35 + typedef unsigned int __kernel_gid_t; 36 + #endif 37 + 38 + #ifndef __kernel_suseconds_t 39 + typedef long __kernel_suseconds_t; 40 + #endif 41 + 42 + #ifndef __kernel_daddr_t 43 + typedef int __kernel_daddr_t; 44 + #endif 45 + 46 + #ifndef __kernel_uid32_t 47 + typedef __kernel_uid_t __kernel_uid32_t; 48 + typedef __kernel_gid_t __kernel_gid32_t; 49 + #endif 50 + 51 + #ifndef __kernel_old_uid_t 52 + typedef __kernel_uid_t __kernel_old_uid_t; 53 + typedef __kernel_gid_t __kernel_old_gid_t; 54 + #endif 55 + 56 + #ifndef __kernel_old_dev_t 57 + typedef unsigned int __kernel_old_dev_t; 58 + #endif 59 + 60 + /* 61 + * Most 32 bit architectures use "unsigned int" size_t, 62 + * and all 64 bit architectures use "unsigned long" size_t. 63 + */ 64 + #ifndef __kernel_size_t 65 + #if __BITS_PER_LONG != 64 66 + typedef unsigned int __kernel_size_t; 67 + typedef int __kernel_ssize_t; 68 + typedef int __kernel_ptrdiff_t; 69 + #else 70 + typedef unsigned long __kernel_size_t; 71 + typedef long __kernel_ssize_t; 72 + typedef long __kernel_ptrdiff_t; 73 + #endif 74 + #endif 75 + 76 + /* 77 + * anything below here should be completely generic 78 + */ 79 + typedef long __kernel_off_t; 80 + typedef long long __kernel_loff_t; 81 + typedef long __kernel_time_t; 82 + typedef long __kernel_clock_t; 83 + typedef int __kernel_timer_t; 84 + typedef int __kernel_clockid_t; 85 + typedef char * __kernel_caddr_t; 86 + typedef unsigned short __kernel_uid16_t; 87 + typedef unsigned short __kernel_gid16_t; 88 + 89 + typedef struct { 90 + int val[2]; 91 + } __kernel_fsid_t; 92 + 93 + #ifdef __KERNEL__ 94 + 95 + #undef __FD_SET 96 + static inline void __FD_SET(unsigned long __fd, __kernel_fd_set *__fdsetp) 97 + { 98 + unsigned long __tmp = __fd / __NFDBITS; 99 + unsigned long __rem = __fd % __NFDBITS; 100 + __fdsetp->fds_bits[__tmp] |= (1UL<<__rem); 101 + } 102 + 103 + #undef __FD_CLR 104 + static inline void __FD_CLR(unsigned long __fd, __kernel_fd_set *__fdsetp) 105 + { 106 + unsigned long __tmp = __fd / __NFDBITS; 107 + unsigned long __rem = __fd % __NFDBITS; 108 + __fdsetp->fds_bits[__tmp] &= ~(1UL<<__rem); 109 + } 110 + 111 + #undef __FD_ISSET 112 + static inline int __FD_ISSET(unsigned long __fd, const __kernel_fd_set *__p) 113 + { 114 + unsigned long __tmp = __fd / __NFDBITS; 115 + unsigned long __rem = __fd % __NFDBITS; 116 + return (__p->fds_bits[__tmp] & (1UL<<__rem)) != 0; 117 + } 118 + 119 + /* 120 + * This will unroll the loop for the normal constant case (8 ints, 121 + * for a 256-bit fd_set) 122 + */ 123 + #undef __FD_ZERO 124 + static inline void __FD_ZERO(__kernel_fd_set *__p) 125 + { 126 + unsigned long *__tmp = __p->fds_bits; 127 + int __i; 128 + 129 + if (__builtin_constant_p(__FDSET_LONGS)) { 130 + switch (__FDSET_LONGS) { 131 + case 16: 132 + __tmp[ 0] = 0; __tmp[ 1] = 0; 133 + __tmp[ 2] = 0; __tmp[ 3] = 0; 134 + __tmp[ 4] = 0; __tmp[ 5] = 0; 135 + __tmp[ 6] = 0; __tmp[ 7] = 0; 136 + __tmp[ 8] = 0; __tmp[ 9] = 0; 137 + __tmp[10] = 0; __tmp[11] = 0; 138 + __tmp[12] = 0; __tmp[13] = 0; 139 + __tmp[14] = 0; __tmp[15] = 0; 140 + return; 141 + 142 + case 8: 143 + __tmp[ 0] = 0; __tmp[ 1] = 0; 144 + __tmp[ 2] = 0; __tmp[ 3] = 0; 145 + __tmp[ 4] = 0; __tmp[ 5] = 0; 146 + __tmp[ 6] = 0; __tmp[ 7] = 0; 147 + return; 148 + 149 + case 4: 150 + __tmp[ 0] = 0; __tmp[ 1] = 0; 151 + __tmp[ 2] = 0; __tmp[ 3] = 0; 152 + return; 153 + } 154 + } 155 + __i = __FDSET_LONGS; 156 + while (__i) { 157 + __i--; 158 + *__tmp = 0; 159 + __tmp++; 160 + } 161 + } 162 + 163 + #endif /* __KERNEL__ */ 164 + 165 + #endif /* __ASM_GENERIC_POSIX_TYPES_H */
+1 -1
include/asm-generic/rtc.h
··· 202 { 203 struct rtc_time h; 204 205 - __get_rtc_time(&h); 206 return h.tm_sec; 207 } 208
··· 202 { 203 struct rtc_time h; 204 205 + get_rtc_time(&h); 206 return h.tm_sec; 207 } 208
+43
include/asm-generic/scatterlist.h
···
··· 1 + #ifndef __ASM_GENERIC_SCATTERLIST_H 2 + #define __ASM_GENERIC_SCATTERLIST_H 3 + 4 + #include <linux/types.h> 5 + 6 + struct scatterlist { 7 + #ifdef CONFIG_DEBUG_SG 8 + unsigned long sg_magic; 9 + #endif 10 + unsigned long page_link; 11 + unsigned int offset; 12 + unsigned int length; 13 + dma_addr_t dma_address; 14 + unsigned int dma_length; 15 + }; 16 + 17 + /* 18 + * These macros should be used after a dma_map_sg call has been done 19 + * to get bus addresses of each of the SG entries and their lengths. 20 + * You should only work with the number of sg entries pci_map_sg 21 + * returns, or alternatively stop on the first sg_dma_len(sg) which 22 + * is 0. 23 + */ 24 + #define sg_dma_address(sg) ((sg)->dma_address) 25 + #ifndef sg_dma_len 26 + /* 27 + * Normally, you have an iommu on 64 bit machines, but not on 32 bit 28 + * machines. Architectures that are differnt should override this. 29 + */ 30 + #if __BITS_PER_LONG == 64 31 + #define sg_dma_len(sg) ((sg)->dma_length) 32 + #else 33 + #define sg_dma_len(sg) ((sg)->length) 34 + #endif /* 64 bit */ 35 + #endif /* sg_dma_len */ 36 + 37 + #ifndef ISA_DMA_THRESHOLD 38 + #define ISA_DMA_THRESHOLD (~0UL) 39 + #endif 40 + 41 + #define ARCH_HAS_SG_CHAIN 42 + 43 + #endif /* __ASM_GENERIC_SCATTERLIST_H */
+9
include/asm-generic/segment.h
···
··· 1 + #ifndef __ASM_GENERIC_SEGMENT_H 2 + #define __ASM_GENERIC_SEGMENT_H 3 + /* 4 + * Only here because we have some old header files that expect it... 5 + * 6 + * New architectures probably don't want to have their own version. 7 + */ 8 + 9 + #endif /* __ASM_GENERIC_SEGMENT_H */
+38
include/asm-generic/sembuf.h
···
··· 1 + #ifndef __ASM_GENERIC_SEMBUF_H 2 + #define __ASM_GENERIC_SEMBUF_H 3 + 4 + #include <asm/bitsperlong.h> 5 + 6 + /* 7 + * The semid64_ds structure for x86 architecture. 8 + * Note extra padding because this structure is passed back and forth 9 + * between kernel and user space. 10 + * 11 + * semid64_ds was originally meant to be architecture specific, but 12 + * everyone just ended up making identical copies without specific 13 + * optimizations, so we may just as well all use the same one. 14 + * 15 + * 64 bit architectures typically define a 64 bit __kernel_time_t, 16 + * so they do not need the first two padding words. 17 + * On big-endian systems, the padding is in the wrong place. 18 + * 19 + * Pad space is left for: 20 + * - 64-bit time_t to solve y2038 problem 21 + * - 2 miscellaneous 32-bit values 22 + */ 23 + struct semid64_ds { 24 + struct ipc64_perm sem_perm; /* permissions .. see ipc.h */ 25 + __kernel_time_t sem_otime; /* last semop time */ 26 + #if __BITS_PER_LONG != 64 27 + unsigned long __unused1; 28 + #endif 29 + __kernel_time_t sem_ctime; /* last change time */ 30 + #if __BITS_PER_LONG != 64 31 + unsigned long __unused2; 32 + #endif 33 + unsigned long sem_nsems; /* no. of semaphores in array */ 34 + unsigned long __unused3; 35 + unsigned long __unused4; 36 + }; 37 + 38 + #endif /* __ASM_GENERIC_SEMBUF_H */
+13
include/asm-generic/serial.h
···
··· 1 + #ifndef __ASM_GENERIC_SERIAL_H 2 + #define __ASM_GENERIC_SERIAL_H 3 + 4 + /* 5 + * This should not be an architecture specific #define, oh well. 6 + * 7 + * Traditionally, it just describes i8250 and related serial ports 8 + * that have this clock rate. 9 + */ 10 + 11 + #define BASE_BAUD (1843200 / 16) 12 + 13 + #endif /* __ASM_GENERIC_SERIAL_H */
+6
include/asm-generic/setup.h
···
··· 1 + #ifndef __ASM_GENERIC_SETUP_H 2 + #define __ASM_GENERIC_SETUP_H 3 + 4 + #define COMMAND_LINE_SIZE 512 5 + 6 + #endif /* __ASM_GENERIC_SETUP_H */
+59
include/asm-generic/shmbuf.h
···
··· 1 + #ifndef __ASM_GENERIC_SHMBUF_H 2 + #define __ASM_GENERIC_SHMBUF_H 3 + 4 + #include <asm/bitsperlong.h> 5 + 6 + /* 7 + * The shmid64_ds structure for x86 architecture. 8 + * Note extra padding because this structure is passed back and forth 9 + * between kernel and user space. 10 + * 11 + * shmid64_ds was originally meant to be architecture specific, but 12 + * everyone just ended up making identical copies without specific 13 + * optimizations, so we may just as well all use the same one. 14 + * 15 + * 64 bit architectures typically define a 64 bit __kernel_time_t, 16 + * so they do not need the first two padding words. 17 + * On big-endian systems, the padding is in the wrong place. 18 + * 19 + * 20 + * Pad space is left for: 21 + * - 64-bit time_t to solve y2038 problem 22 + * - 2 miscellaneous 32-bit values 23 + */ 24 + 25 + struct shmid64_ds { 26 + struct ipc64_perm shm_perm; /* operation perms */ 27 + size_t shm_segsz; /* size of segment (bytes) */ 28 + __kernel_time_t shm_atime; /* last attach time */ 29 + #if __BITS_PER_LONG != 64 30 + unsigned long __unused1; 31 + #endif 32 + __kernel_time_t shm_dtime; /* last detach time */ 33 + #if __BITS_PER_LONG != 64 34 + unsigned long __unused2; 35 + #endif 36 + __kernel_time_t shm_ctime; /* last change time */ 37 + #if __BITS_PER_LONG != 64 38 + unsigned long __unused3; 39 + #endif 40 + __kernel_pid_t shm_cpid; /* pid of creator */ 41 + __kernel_pid_t shm_lpid; /* pid of last operator */ 42 + unsigned long shm_nattch; /* no. of current attaches */ 43 + unsigned long __unused4; 44 + unsigned long __unused5; 45 + }; 46 + 47 + struct shminfo64 { 48 + unsigned long shmmax; 49 + unsigned long shmmin; 50 + unsigned long shmmni; 51 + unsigned long shmseg; 52 + unsigned long shmall; 53 + unsigned long __unused1; 54 + unsigned long __unused2; 55 + unsigned long __unused3; 56 + unsigned long __unused4; 57 + }; 58 + 59 + #endif /* __ASM_GENERIC_SHMBUF_H */
+6
include/asm-generic/shmparam.h
···
··· 1 + #ifndef __ASM_GENERIC_SHMPARAM_H 2 + #define __ASM_GENERIC_SHMPARAM_H 3 + 4 + #define SHMLBA PAGE_SIZE /* attach addr a multiple of this */ 5 + 6 + #endif /* _ASM_GENERIC_SHMPARAM_H */
+28
include/asm-generic/signal-defs.h
···
··· 1 + #ifndef __ASM_GENERIC_SIGNAL_DEFS_H 2 + #define __ASM_GENERIC_SIGNAL_DEFS_H 3 + 4 + #include <linux/compiler.h> 5 + 6 + #ifndef SIG_BLOCK 7 + #define SIG_BLOCK 0 /* for blocking signals */ 8 + #endif 9 + #ifndef SIG_UNBLOCK 10 + #define SIG_UNBLOCK 1 /* for unblocking signals */ 11 + #endif 12 + #ifndef SIG_SETMASK 13 + #define SIG_SETMASK 2 /* for setting the signal mask */ 14 + #endif 15 + 16 + #ifndef __ASSEMBLY__ 17 + typedef void __signalfn_t(int); 18 + typedef __signalfn_t __user *__sighandler_t; 19 + 20 + typedef void __restorefn_t(void); 21 + typedef __restorefn_t __user *__sigrestore_t; 22 + 23 + #define SIG_DFL ((__force __sighandler_t)0) /* default signal handling */ 24 + #define SIG_IGN ((__force __sighandler_t)1) /* ignore signal */ 25 + #define SIG_ERR ((__force __sighandler_t)-1) /* error return from signal */ 26 + #endif 27 + 28 + #endif /* __ASM_GENERIC_SIGNAL_DEFS_H */
+120 -17
include/asm-generic/signal.h
··· 1 #ifndef __ASM_GENERIC_SIGNAL_H 2 #define __ASM_GENERIC_SIGNAL_H 3 4 - #include <linux/compiler.h> 5 6 - #ifndef SIG_BLOCK 7 - #define SIG_BLOCK 0 /* for blocking signals */ 8 #endif 9 - #ifndef SIG_UNBLOCK 10 - #define SIG_UNBLOCK 1 /* for unblocking signals */ 11 - #endif 12 - #ifndef SIG_SETMASK 13 - #define SIG_SETMASK 2 /* for setting the signal mask */ 14 - #endif 15 16 #ifndef __ASSEMBLY__ 17 - typedef void __signalfn_t(int); 18 - typedef __signalfn_t __user *__sighandler_t; 19 20 - typedef void __restorefn_t(void); 21 - typedef __restorefn_t __user *__sigrestore_t; 22 23 - #define SIG_DFL ((__force __sighandler_t)0) /* default signal handling */ 24 - #define SIG_IGN ((__force __sighandler_t)1) /* ignore signal */ 25 - #define SIG_ERR ((__force __sighandler_t)-1) /* error return from signal */ 26 #endif 27 28 - #endif /* __ASM_GENERIC_SIGNAL_H */
··· 1 #ifndef __ASM_GENERIC_SIGNAL_H 2 #define __ASM_GENERIC_SIGNAL_H 3 4 + #include <linux/types.h> 5 6 + #define _NSIG 64 7 + #define _NSIG_BPW __BITS_PER_LONG 8 + #define _NSIG_WORDS (_NSIG / _NSIG_BPW) 9 + 10 + #define SIGHUP 1 11 + #define SIGINT 2 12 + #define SIGQUIT 3 13 + #define SIGILL 4 14 + #define SIGTRAP 5 15 + #define SIGABRT 6 16 + #define SIGIOT 6 17 + #define SIGBUS 7 18 + #define SIGFPE 8 19 + #define SIGKILL 9 20 + #define SIGUSR1 10 21 + #define SIGSEGV 11 22 + #define SIGUSR2 12 23 + #define SIGPIPE 13 24 + #define SIGALRM 14 25 + #define SIGTERM 15 26 + #define SIGSTKFLT 16 27 + #define SIGCHLD 17 28 + #define SIGCONT 18 29 + #define SIGSTOP 19 30 + #define SIGTSTP 20 31 + #define SIGTTIN 21 32 + #define SIGTTOU 22 33 + #define SIGURG 23 34 + #define SIGXCPU 24 35 + #define SIGXFSZ 25 36 + #define SIGVTALRM 26 37 + #define SIGPROF 27 38 + #define SIGWINCH 28 39 + #define SIGIO 29 40 + #define SIGPOLL SIGIO 41 + /* 42 + #define SIGLOST 29 43 + */ 44 + #define SIGPWR 30 45 + #define SIGSYS 31 46 + #define SIGUNUSED 31 47 + 48 + /* These should not be considered constants from userland. */ 49 + #define SIGRTMIN 32 50 + #ifndef SIGRTMAX 51 + #define SIGRTMAX _NSIG 52 #endif 53 + 54 + /* 55 + * SA_FLAGS values: 56 + * 57 + * SA_ONSTACK indicates that a registered stack_t will be used. 58 + * SA_RESTART flag to get restarting signals (which were the default long ago) 59 + * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop. 60 + * SA_RESETHAND clears the handler when the signal is delivered. 61 + * SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies. 62 + * SA_NODEFER prevents the current signal from being masked in the handler. 63 + * 64 + * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single 65 + * Unix names RESETHAND and NODEFER respectively. 66 + */ 67 + #define SA_NOCLDSTOP 0x00000001 68 + #define SA_NOCLDWAIT 0x00000002 69 + #define SA_SIGINFO 0x00000004 70 + #define SA_ONSTACK 0x08000000 71 + #define SA_RESTART 0x10000000 72 + #define SA_NODEFER 0x40000000 73 + #define SA_RESETHAND 0x80000000 74 + 75 + #define SA_NOMASK SA_NODEFER 76 + #define SA_ONESHOT SA_RESETHAND 77 + 78 + /* 79 + * New architectures should not define the obsolete 80 + * SA_RESTORER 0x04000000 81 + */ 82 + 83 + /* 84 + * sigaltstack controls 85 + */ 86 + #define SS_ONSTACK 1 87 + #define SS_DISABLE 2 88 + 89 + #define MINSIGSTKSZ 2048 90 + #define SIGSTKSZ 8192 91 92 #ifndef __ASSEMBLY__ 93 + typedef struct { 94 + unsigned long sig[_NSIG_WORDS]; 95 + } sigset_t; 96 97 + /* not actually used, but required for linux/syscalls.h */ 98 + typedef unsigned long old_sigset_t; 99 100 + #include <asm-generic/signal-defs.h> 101 + 102 + struct sigaction { 103 + __sighandler_t sa_handler; 104 + unsigned long sa_flags; 105 + #ifdef SA_RESTORER 106 + __sigrestore_t sa_restorer; 107 #endif 108 + sigset_t sa_mask; /* mask last for extensibility */ 109 + }; 110 111 + struct k_sigaction { 112 + struct sigaction sa; 113 + }; 114 + 115 + typedef struct sigaltstack { 116 + void __user *ss_sp; 117 + int ss_flags; 118 + size_t ss_size; 119 + } stack_t; 120 + 121 + #ifdef __KERNEL__ 122 + 123 + #include <asm/sigcontext.h> 124 + #undef __HAVE_ARCH_SIG_BITOPS 125 + 126 + #define ptrace_signal_deliver(regs, cookie) do { } while (0) 127 + 128 + #endif /* __KERNEL__ */ 129 + #endif /* __ASSEMBLY__ */ 130 + 131 + #endif /* _ASM_GENERIC_SIGNAL_H */
+63
include/asm-generic/socket.h
···
··· 1 + #ifndef __ASM_GENERIC_SOCKET_H 2 + #define __ASM_GENERIC_SOCKET_H 3 + 4 + #include <asm/sockios.h> 5 + 6 + /* For setsockopt(2) */ 7 + #define SOL_SOCKET 1 8 + 9 + #define SO_DEBUG 1 10 + #define SO_REUSEADDR 2 11 + #define SO_TYPE 3 12 + #define SO_ERROR 4 13 + #define SO_DONTROUTE 5 14 + #define SO_BROADCAST 6 15 + #define SO_SNDBUF 7 16 + #define SO_RCVBUF 8 17 + #define SO_SNDBUFFORCE 32 18 + #define SO_RCVBUFFORCE 33 19 + #define SO_KEEPALIVE 9 20 + #define SO_OOBINLINE 10 21 + #define SO_NO_CHECK 11 22 + #define SO_PRIORITY 12 23 + #define SO_LINGER 13 24 + #define SO_BSDCOMPAT 14 25 + /* To add :#define SO_REUSEPORT 15 */ 26 + 27 + #ifndef SO_PASSCRED /* powerpc only differs in these */ 28 + #define SO_PASSCRED 16 29 + #define SO_PEERCRED 17 30 + #define SO_RCVLOWAT 18 31 + #define SO_SNDLOWAT 19 32 + #define SO_RCVTIMEO 20 33 + #define SO_SNDTIMEO 21 34 + #endif 35 + 36 + /* Security levels - as per NRL IPv6 - don't actually do anything */ 37 + #define SO_SECURITY_AUTHENTICATION 22 38 + #define SO_SECURITY_ENCRYPTION_TRANSPORT 23 39 + #define SO_SECURITY_ENCRYPTION_NETWORK 24 40 + 41 + #define SO_BINDTODEVICE 25 42 + 43 + /* Socket filtering */ 44 + #define SO_ATTACH_FILTER 26 45 + #define SO_DETACH_FILTER 27 46 + 47 + #define SO_PEERNAME 28 48 + #define SO_TIMESTAMP 29 49 + #define SCM_TIMESTAMP SO_TIMESTAMP 50 + 51 + #define SO_ACCEPTCONN 30 52 + 53 + #define SO_PEERSEC 31 54 + #define SO_PASSSEC 34 55 + #define SO_TIMESTAMPNS 35 56 + #define SCM_TIMESTAMPNS SO_TIMESTAMPNS 57 + 58 + #define SO_MARK 36 59 + 60 + #define SO_TIMESTAMPING 37 61 + #define SCM_TIMESTAMPING SO_TIMESTAMPING 62 + 63 + #endif /* __ASM_GENERIC_SOCKET_H */
+13
include/asm-generic/sockios.h
···
··· 1 + #ifndef __ASM_GENERIC_SOCKIOS_H 2 + #define __ASM_GENERIC_SOCKIOS_H 3 + 4 + /* Socket-level I/O control calls. */ 5 + #define FIOSETOWN 0x8901 6 + #define SIOCSPGRP 0x8902 7 + #define FIOGETOWN 0x8903 8 + #define SIOCGPGRP 0x8904 9 + #define SIOCATMARK 0x8905 10 + #define SIOCGSTAMP 0x8906 /* Get stamp (timeval) */ 11 + #define SIOCGSTAMPNS 0x8907 /* Get stamp (timespec) */ 12 + 13 + #endif /* __ASM_GENERIC_SOCKIOS_H */
+11
include/asm-generic/spinlock.h
···
··· 1 + #ifndef __ASM_GENERIC_SPINLOCK_H 2 + #define __ASM_GENERIC_SPINLOCK_H 3 + /* 4 + * You need to implement asm/spinlock.h for SMP support. The generic 5 + * version does not handle SMP. 6 + */ 7 + #ifdef CONFIG_SMP 8 + #error need an architecture specific asm/spinlock.h 9 + #endif 10 + 11 + #endif /* __ASM_GENERIC_SPINLOCK_H */
+72
include/asm-generic/stat.h
···
··· 1 + #ifndef __ASM_GENERIC_STAT_H 2 + #define __ASM_GENERIC_STAT_H 3 + 4 + /* 5 + * Everybody gets this wrong and has to stick with it for all 6 + * eternity. Hopefully, this version gets used by new architectures 7 + * so they don't fall into the same traps. 8 + * 9 + * stat64 is copied from powerpc64, with explicit padding added. 10 + * stat is the same structure layout on 64-bit, without the 'long long' 11 + * types. 12 + * 13 + * By convention, 64 bit architectures use the stat interface, while 14 + * 32 bit architectures use the stat64 interface. Note that we don't 15 + * provide an __old_kernel_stat here, which new architecture should 16 + * not have to start with. 17 + */ 18 + 19 + #include <asm/bitsperlong.h> 20 + 21 + #define STAT_HAVE_NSEC 1 22 + 23 + struct stat { 24 + unsigned long st_dev; /* Device. */ 25 + unsigned long st_ino; /* File serial number. */ 26 + unsigned int st_mode; /* File mode. */ 27 + unsigned int st_nlink; /* Link count. */ 28 + unsigned int st_uid; /* User ID of the file's owner. */ 29 + unsigned int st_gid; /* Group ID of the file's group. */ 30 + unsigned long st_rdev; /* Device number, if device. */ 31 + unsigned long __pad1; 32 + long st_size; /* Size of file, in bytes. */ 33 + int st_blksize; /* Optimal block size for I/O. */ 34 + int __pad2; 35 + long st_blocks; /* Number 512-byte blocks allocated. */ 36 + int st_atime; /* Time of last access. */ 37 + unsigned int st_atime_nsec; 38 + int st_mtime; /* Time of last modification. */ 39 + unsigned int st_mtime_nsec; 40 + int st_ctime; /* Time of last status change. */ 41 + unsigned int st_ctime_nsec; 42 + unsigned int __unused4; 43 + unsigned int __unused5; 44 + }; 45 + 46 + #if __BITS_PER_LONG != 64 47 + /* This matches struct stat64 in glibc2.1. Only used for 32 bit. */ 48 + struct stat64 { 49 + unsigned long long st_dev; /* Device. */ 50 + unsigned long long st_ino; /* File serial number. */ 51 + unsigned int st_mode; /* File mode. */ 52 + unsigned int st_nlink; /* Link count. */ 53 + unsigned int st_uid; /* User ID of the file's owner. */ 54 + unsigned int st_gid; /* Group ID of the file's group. */ 55 + unsigned long long st_rdev; /* Device number, if device. */ 56 + unsigned long long __pad1; 57 + long long st_size; /* Size of file, in bytes. */ 58 + int st_blksize; /* Optimal block size for I/O. */ 59 + int __pad2; 60 + long long st_blocks; /* Number 512-byte blocks allocated. */ 61 + int st_atime; /* Time of last access. */ 62 + unsigned int st_atime_nsec; 63 + int st_mtime; /* Time of last modification. */ 64 + unsigned int st_mtime_nsec; 65 + int st_ctime; /* Time of last status change. */ 66 + unsigned int st_ctime_nsec; 67 + unsigned int __unused4; 68 + unsigned int __unused5; 69 + }; 70 + #endif 71 + 72 + #endif /* __ASM_GENERIC_STAT_H */
+10
include/asm-generic/string.h
···
··· 1 + #ifndef __ASM_GENERIC_STRING_H 2 + #define __ASM_GENERIC_STRING_H 3 + /* 4 + * The kernel provides all required functions in lib/string.c 5 + * 6 + * Architectures probably want to provide at least their own optimized 7 + * memcpy and memset functions though. 8 + */ 9 + 10 + #endif /* __ASM_GENERIC_STRING_H */
+18
include/asm-generic/swab.h
···
··· 1 + #ifndef _ASM_GENERIC_SWAB_H 2 + #define _ASM_GENERIC_SWAB_H 3 + 4 + #include <asm/bitsperlong.h> 5 + 6 + /* 7 + * 32 bit architectures typically (but not always) want to 8 + * set __SWAB_64_THRU_32__. In user space, this is only 9 + * valid if the compiler supports 64 bit data types. 10 + */ 11 + 12 + #if __BITS_PER_LONG == 32 13 + #if defined(__GNUC__) && !defined(__STRICT_ANSI__) || defined(__KERNEL__) 14 + #define __SWAB_64_THRU_32__ 15 + #endif 16 + #endif 17 + 18 + #endif /* _ASM_GENERIC_SWAB_H */
+60
include/asm-generic/syscalls.h
···
··· 1 + #ifndef __ASM_GENERIC_SYSCALLS_H 2 + #define __ASM_GENERIC_SYSCALLS_H 3 + 4 + #include <linux/compiler.h> 5 + #include <linux/linkage.h> 6 + 7 + /* 8 + * Calling conventions for these system calls can differ, so 9 + * it's possible to override them. 10 + */ 11 + #ifndef sys_clone 12 + asmlinkage long sys_clone(unsigned long clone_flags, unsigned long newsp, 13 + void __user *parent_tid, void __user *child_tid, 14 + struct pt_regs *regs); 15 + #endif 16 + 17 + #ifndef sys_fork 18 + asmlinkage long sys_fork(struct pt_regs *regs); 19 + #endif 20 + 21 + #ifndef sys_vfork 22 + asmlinkage long sys_vfork(struct pt_regs *regs); 23 + #endif 24 + 25 + #ifndef sys_execve 26 + asmlinkage long sys_execve(char __user *filename, char __user * __user *argv, 27 + char __user * __user *envp, struct pt_regs *regs); 28 + #endif 29 + 30 + #ifndef sys_mmap2 31 + asmlinkage long sys_mmap2(unsigned long addr, unsigned long len, 32 + unsigned long prot, unsigned long flags, 33 + unsigned long fd, unsigned long pgoff); 34 + #endif 35 + 36 + #ifndef sys_mmap 37 + asmlinkage long sys_mmap(unsigned long addr, unsigned long len, 38 + unsigned long prot, unsigned long flags, 39 + unsigned long fd, off_t pgoff); 40 + #endif 41 + 42 + #ifndef sys_sigaltstack 43 + asmlinkage long sys_sigaltstack(const stack_t __user *, stack_t __user *, 44 + struct pt_regs *); 45 + #endif 46 + 47 + #ifndef sys_rt_sigreturn 48 + asmlinkage long sys_rt_sigreturn(struct pt_regs *regs); 49 + #endif 50 + 51 + #ifndef sys_rt_sigsuspend 52 + asmlinkage long sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize); 53 + #endif 54 + 55 + #ifndef sys_rt_sigaction 56 + asmlinkage long sys_rt_sigaction(int sig, const struct sigaction __user *act, 57 + struct sigaction __user *oact, size_t sigsetsize); 58 + #endif 59 + 60 + #endif /* __ASM_GENERIC_SYSCALLS_H */
+161
include/asm-generic/system.h
···
··· 1 + /* Generic system definitions, based on MN10300 definitions. 2 + * 3 + * It should be possible to use these on really simple architectures, 4 + * but it serves more as a starting point for new ports. 5 + * 6 + * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. 7 + * Written by David Howells (dhowells@redhat.com) 8 + * 9 + * This program is free software; you can redistribute it and/or 10 + * modify it under the terms of the GNU General Public Licence 11 + * as published by the Free Software Foundation; either version 12 + * 2 of the Licence, or (at your option) any later version. 13 + */ 14 + #ifndef __ASM_GENERIC_SYSTEM_H 15 + #define __ASM_GENERIC_SYSTEM_H 16 + 17 + #ifdef __KERNEL__ 18 + #ifndef __ASSEMBLY__ 19 + 20 + #include <linux/types.h> 21 + #include <linux/irqflags.h> 22 + 23 + #include <asm/cmpxchg-local.h> 24 + 25 + struct task_struct; 26 + 27 + /* context switching is now performed out-of-line in switch_to.S */ 28 + extern struct task_struct *__switch_to(struct task_struct *, 29 + struct task_struct *); 30 + #define switch_to(prev, next, last) \ 31 + do { \ 32 + ((last) = __switch_to((prev), (next))); \ 33 + } while (0) 34 + 35 + #define arch_align_stack(x) (x) 36 + 37 + #define nop() asm volatile ("nop") 38 + 39 + #endif /* !__ASSEMBLY__ */ 40 + 41 + /* 42 + * Force strict CPU ordering. 43 + * And yes, this is required on UP too when we're talking 44 + * to devices. 45 + * 46 + * This implementation only contains a compiler barrier. 47 + */ 48 + 49 + #define mb() asm volatile ("": : :"memory") 50 + #define rmb() mb() 51 + #define wmb() asm volatile ("": : :"memory") 52 + 53 + #ifdef CONFIG_SMP 54 + #define smp_mb() mb() 55 + #define smp_rmb() rmb() 56 + #define smp_wmb() wmb() 57 + #else 58 + #define smp_mb() barrier() 59 + #define smp_rmb() barrier() 60 + #define smp_wmb() barrier() 61 + #endif 62 + 63 + #define set_mb(var, value) do { var = value; mb(); } while (0) 64 + #define set_wmb(var, value) do { var = value; wmb(); } while (0) 65 + 66 + #define read_barrier_depends() do {} while (0) 67 + #define smp_read_barrier_depends() do {} while (0) 68 + 69 + /* 70 + * we make sure local_irq_enable() doesn't cause priority inversion 71 + */ 72 + #ifndef __ASSEMBLY__ 73 + 74 + /* This function doesn't exist, so you'll get a linker error 75 + * if something tries to do an invalid xchg(). */ 76 + extern void __xchg_called_with_bad_pointer(void); 77 + 78 + static inline 79 + unsigned long __xchg(unsigned long x, volatile void *ptr, int size) 80 + { 81 + unsigned long ret, flags; 82 + 83 + switch (size) { 84 + case 1: 85 + #ifdef __xchg_u8 86 + return __xchg_u8(x, ptr); 87 + #else 88 + local_irq_save(flags); 89 + ret = *(volatile u8 *)ptr; 90 + *(volatile u8 *)ptr = x; 91 + local_irq_restore(flags); 92 + return ret; 93 + #endif /* __xchg_u8 */ 94 + 95 + case 2: 96 + #ifdef __xchg_u16 97 + return __xchg_u16(x, ptr); 98 + #else 99 + local_irq_save(flags); 100 + ret = *(volatile u16 *)ptr; 101 + *(volatile u16 *)ptr = x; 102 + local_irq_restore(flags); 103 + return ret; 104 + #endif /* __xchg_u16 */ 105 + 106 + case 4: 107 + #ifdef __xchg_u32 108 + return __xchg_u32(x, ptr); 109 + #else 110 + local_irq_save(flags); 111 + ret = *(volatile u32 *)ptr; 112 + *(volatile u32 *)ptr = x; 113 + local_irq_restore(flags); 114 + return ret; 115 + #endif /* __xchg_u32 */ 116 + 117 + #ifdef CONFIG_64BIT 118 + case 8: 119 + #ifdef __xchg_u64 120 + return __xchg_u64(x, ptr); 121 + #else 122 + local_irq_save(flags); 123 + ret = *(volatile u64 *)ptr; 124 + *(volatile u64 *)ptr = x; 125 + local_irq_restore(flags); 126 + return ret; 127 + #endif /* __xchg_u64 */ 128 + #endif /* CONFIG_64BIT */ 129 + 130 + default: 131 + __xchg_called_with_bad_pointer(); 132 + return x; 133 + } 134 + } 135 + 136 + #define xchg(ptr, x) \ 137 + ((__typeof__(*(ptr))) __xchg((unsigned long)(x), (ptr), sizeof(*(ptr)))) 138 + 139 + static inline unsigned long __cmpxchg(volatile unsigned long *m, 140 + unsigned long old, unsigned long new) 141 + { 142 + unsigned long retval; 143 + unsigned long flags; 144 + 145 + local_irq_save(flags); 146 + retval = *m; 147 + if (retval == old) 148 + *m = new; 149 + local_irq_restore(flags); 150 + return retval; 151 + } 152 + 153 + #define cmpxchg(ptr, o, n) \ 154 + ((__typeof__(*(ptr))) __cmpxchg((unsigned long *)(ptr), \ 155 + (unsigned long)(o), \ 156 + (unsigned long)(n))) 157 + 158 + #endif /* !__ASSEMBLY__ */ 159 + 160 + #endif /* __KERNEL__ */ 161 + #endif /* __ASM_GENERIC_SYSTEM_H */
+198
include/asm-generic/termbits.h
···
··· 1 + #ifndef __ASM_GENERIC_TERMBITS_H 2 + #define __ASM_GENERIC_TERMBITS_H 3 + 4 + #include <linux/posix_types.h> 5 + 6 + typedef unsigned char cc_t; 7 + typedef unsigned int speed_t; 8 + typedef unsigned int tcflag_t; 9 + 10 + #define NCCS 19 11 + struct termios { 12 + tcflag_t c_iflag; /* input mode flags */ 13 + tcflag_t c_oflag; /* output mode flags */ 14 + tcflag_t c_cflag; /* control mode flags */ 15 + tcflag_t c_lflag; /* local mode flags */ 16 + cc_t c_line; /* line discipline */ 17 + cc_t c_cc[NCCS]; /* control characters */ 18 + }; 19 + 20 + struct termios2 { 21 + tcflag_t c_iflag; /* input mode flags */ 22 + tcflag_t c_oflag; /* output mode flags */ 23 + tcflag_t c_cflag; /* control mode flags */ 24 + tcflag_t c_lflag; /* local mode flags */ 25 + cc_t c_line; /* line discipline */ 26 + cc_t c_cc[NCCS]; /* control characters */ 27 + speed_t c_ispeed; /* input speed */ 28 + speed_t c_ospeed; /* output speed */ 29 + }; 30 + 31 + struct ktermios { 32 + tcflag_t c_iflag; /* input mode flags */ 33 + tcflag_t c_oflag; /* output mode flags */ 34 + tcflag_t c_cflag; /* control mode flags */ 35 + tcflag_t c_lflag; /* local mode flags */ 36 + cc_t c_line; /* line discipline */ 37 + cc_t c_cc[NCCS]; /* control characters */ 38 + speed_t c_ispeed; /* input speed */ 39 + speed_t c_ospeed; /* output speed */ 40 + }; 41 + 42 + /* c_cc characters */ 43 + #define VINTR 0 44 + #define VQUIT 1 45 + #define VERASE 2 46 + #define VKILL 3 47 + #define VEOF 4 48 + #define VTIME 5 49 + #define VMIN 6 50 + #define VSWTC 7 51 + #define VSTART 8 52 + #define VSTOP 9 53 + #define VSUSP 10 54 + #define VEOL 11 55 + #define VREPRINT 12 56 + #define VDISCARD 13 57 + #define VWERASE 14 58 + #define VLNEXT 15 59 + #define VEOL2 16 60 + 61 + /* c_iflag bits */ 62 + #define IGNBRK 0000001 63 + #define BRKINT 0000002 64 + #define IGNPAR 0000004 65 + #define PARMRK 0000010 66 + #define INPCK 0000020 67 + #define ISTRIP 0000040 68 + #define INLCR 0000100 69 + #define IGNCR 0000200 70 + #define ICRNL 0000400 71 + #define IUCLC 0001000 72 + #define IXON 0002000 73 + #define IXANY 0004000 74 + #define IXOFF 0010000 75 + #define IMAXBEL 0020000 76 + #define IUTF8 0040000 77 + 78 + /* c_oflag bits */ 79 + #define OPOST 0000001 80 + #define OLCUC 0000002 81 + #define ONLCR 0000004 82 + #define OCRNL 0000010 83 + #define ONOCR 0000020 84 + #define ONLRET 0000040 85 + #define OFILL 0000100 86 + #define OFDEL 0000200 87 + #define NLDLY 0000400 88 + #define NL0 0000000 89 + #define NL1 0000400 90 + #define CRDLY 0003000 91 + #define CR0 0000000 92 + #define CR1 0001000 93 + #define CR2 0002000 94 + #define CR3 0003000 95 + #define TABDLY 0014000 96 + #define TAB0 0000000 97 + #define TAB1 0004000 98 + #define TAB2 0010000 99 + #define TAB3 0014000 100 + #define XTABS 0014000 101 + #define BSDLY 0020000 102 + #define BS0 0000000 103 + #define BS1 0020000 104 + #define VTDLY 0040000 105 + #define VT0 0000000 106 + #define VT1 0040000 107 + #define FFDLY 0100000 108 + #define FF0 0000000 109 + #define FF1 0100000 110 + 111 + /* c_cflag bit meaning */ 112 + #define CBAUD 0010017 113 + #define B0 0000000 /* hang up */ 114 + #define B50 0000001 115 + #define B75 0000002 116 + #define B110 0000003 117 + #define B134 0000004 118 + #define B150 0000005 119 + #define B200 0000006 120 + #define B300 0000007 121 + #define B600 0000010 122 + #define B1200 0000011 123 + #define B1800 0000012 124 + #define B2400 0000013 125 + #define B4800 0000014 126 + #define B9600 0000015 127 + #define B19200 0000016 128 + #define B38400 0000017 129 + #define EXTA B19200 130 + #define EXTB B38400 131 + #define CSIZE 0000060 132 + #define CS5 0000000 133 + #define CS6 0000020 134 + #define CS7 0000040 135 + #define CS8 0000060 136 + #define CSTOPB 0000100 137 + #define CREAD 0000200 138 + #define PARENB 0000400 139 + #define PARODD 0001000 140 + #define HUPCL 0002000 141 + #define CLOCAL 0004000 142 + #define CBAUDEX 0010000 143 + #define BOTHER 0010000 144 + #define B57600 0010001 145 + #define B115200 0010002 146 + #define B230400 0010003 147 + #define B460800 0010004 148 + #define B500000 0010005 149 + #define B576000 0010006 150 + #define B921600 0010007 151 + #define B1000000 0010010 152 + #define B1152000 0010011 153 + #define B1500000 0010012 154 + #define B2000000 0010013 155 + #define B2500000 0010014 156 + #define B3000000 0010015 157 + #define B3500000 0010016 158 + #define B4000000 0010017 159 + #define CIBAUD 002003600000 /* input baud rate */ 160 + #define CMSPAR 010000000000 /* mark or space (stick) parity */ 161 + #define CRTSCTS 020000000000 /* flow control */ 162 + 163 + #define IBSHIFT 16 /* Shift from CBAUD to CIBAUD */ 164 + 165 + /* c_lflag bits */ 166 + #define ISIG 0000001 167 + #define ICANON 0000002 168 + #define XCASE 0000004 169 + #define ECHO 0000010 170 + #define ECHOE 0000020 171 + #define ECHOK 0000040 172 + #define ECHONL 0000100 173 + #define NOFLSH 0000200 174 + #define TOSTOP 0000400 175 + #define ECHOCTL 0001000 176 + #define ECHOPRT 0002000 177 + #define ECHOKE 0004000 178 + #define FLUSHO 0010000 179 + #define PENDIN 0040000 180 + #define IEXTEN 0100000 181 + 182 + /* tcflow() and TCXONC use these */ 183 + #define TCOOFF 0 184 + #define TCOON 1 185 + #define TCIOFF 2 186 + #define TCION 3 187 + 188 + /* tcflush() and TCFLSH use these */ 189 + #define TCIFLUSH 0 190 + #define TCOFLUSH 1 191 + #define TCIOFLUSH 2 192 + 193 + /* tcsetattr uses these */ 194 + #define TCSANOW 0 195 + #define TCSADRAIN 1 196 + #define TCSAFLUSH 2 197 + 198 + #endif /* __ASM_GENERIC_TERMBITS_H */
+77
include/asm-generic/termios-base.h
···
··· 1 + /* termios.h: generic termios/termio user copying/translation 2 + */ 3 + 4 + #ifndef _ASM_GENERIC_TERMIOS_BASE_H 5 + #define _ASM_GENERIC_TERMIOS_BASE_H 6 + 7 + #include <asm/uaccess.h> 8 + 9 + #ifndef __ARCH_TERMIO_GETPUT 10 + 11 + /* 12 + * Translate a "termio" structure into a "termios". Ugh. 13 + */ 14 + static inline int user_termio_to_kernel_termios(struct ktermios *termios, 15 + struct termio __user *termio) 16 + { 17 + unsigned short tmp; 18 + 19 + if (get_user(tmp, &termio->c_iflag) < 0) 20 + goto fault; 21 + termios->c_iflag = (0xffff0000 & termios->c_iflag) | tmp; 22 + 23 + if (get_user(tmp, &termio->c_oflag) < 0) 24 + goto fault; 25 + termios->c_oflag = (0xffff0000 & termios->c_oflag) | tmp; 26 + 27 + if (get_user(tmp, &termio->c_cflag) < 0) 28 + goto fault; 29 + termios->c_cflag = (0xffff0000 & termios->c_cflag) | tmp; 30 + 31 + if (get_user(tmp, &termio->c_lflag) < 0) 32 + goto fault; 33 + termios->c_lflag = (0xffff0000 & termios->c_lflag) | tmp; 34 + 35 + if (get_user(termios->c_line, &termio->c_line) < 0) 36 + goto fault; 37 + 38 + if (copy_from_user(termios->c_cc, termio->c_cc, NCC) != 0) 39 + goto fault; 40 + 41 + return 0; 42 + 43 + fault: 44 + return -EFAULT; 45 + } 46 + 47 + /* 48 + * Translate a "termios" structure into a "termio". Ugh. 49 + */ 50 + static inline int kernel_termios_to_user_termio(struct termio __user *termio, 51 + struct ktermios *termios) 52 + { 53 + if (put_user(termios->c_iflag, &termio->c_iflag) < 0 || 54 + put_user(termios->c_oflag, &termio->c_oflag) < 0 || 55 + put_user(termios->c_cflag, &termio->c_cflag) < 0 || 56 + put_user(termios->c_lflag, &termio->c_lflag) < 0 || 57 + put_user(termios->c_line, &termio->c_line) < 0 || 58 + copy_to_user(termio->c_cc, termios->c_cc, NCC) != 0) 59 + return -EFAULT; 60 + 61 + return 0; 62 + } 63 + 64 + #ifndef user_termios_to_kernel_termios 65 + #define user_termios_to_kernel_termios(k, u) copy_from_user(k, u, sizeof(struct termios)) 66 + #endif 67 + 68 + #ifndef kernel_termios_to_user_termios 69 + #define kernel_termios_to_user_termios(u, k) copy_to_user(u, k, sizeof(struct termios)) 70 + #endif 71 + 72 + #define user_termios_to_kernel_termios_1(k, u) copy_from_user(k, u, sizeof(struct termios)) 73 + #define kernel_termios_to_user_termios_1(u, k) copy_to_user(u, k, sizeof(struct termios)) 74 + 75 + #endif /* __ARCH_TERMIO_GETPUT */ 76 + 77 + #endif /* _ASM_GENERIC_TERMIOS_BASE_H */
+91 -14
include/asm-generic/termios.h
··· 1 - /* termios.h: generic termios/termio user copying/translation 2 - */ 3 - 4 #ifndef _ASM_GENERIC_TERMIOS_H 5 #define _ASM_GENERIC_TERMIOS_H 6 7 #include <asm/uaccess.h> 8 9 - #ifndef __ARCH_TERMIO_GETPUT 10 11 /* 12 * Translate a "termio" structure into a "termios". Ugh. 13 */ 14 static inline int user_termio_to_kernel_termios(struct ktermios *termios, 15 - struct termio __user *termio) 16 { 17 unsigned short tmp; 18 ··· 111 return 0; 112 } 113 114 - #ifndef user_termios_to_kernel_termios 115 - #define user_termios_to_kernel_termios(k, u) copy_from_user(k, u, sizeof(struct termios)) 116 - #endif 117 118 - #ifndef kernel_termios_to_user_termios 119 - #define kernel_termios_to_user_termios(u, k) copy_to_user(u, k, sizeof(struct termios)) 120 - #endif 121 122 - #define user_termios_to_kernel_termios_1(k, u) copy_from_user(k, u, sizeof(struct termios)) 123 - #define kernel_termios_to_user_termios_1(u, k) copy_to_user(u, k, sizeof(struct termios)) 124 125 - #endif /* __ARCH_TERMIO_GETPUT */ 126 127 #endif /* _ASM_GENERIC_TERMIOS_H */
··· 1 #ifndef _ASM_GENERIC_TERMIOS_H 2 #define _ASM_GENERIC_TERMIOS_H 3 + /* 4 + * Most architectures have straight copies of the x86 code, with 5 + * varying levels of bug fixes on top. Usually it's a good idea 6 + * to use this generic version instead, but be careful to avoid 7 + * ABI changes. 8 + * New architectures should not provide their own version. 9 + */ 10 + 11 + #include <asm/termbits.h> 12 + #include <asm/ioctls.h> 13 + 14 + struct winsize { 15 + unsigned short ws_row; 16 + unsigned short ws_col; 17 + unsigned short ws_xpixel; 18 + unsigned short ws_ypixel; 19 + }; 20 + 21 + #define NCC 8 22 + struct termio { 23 + unsigned short c_iflag; /* input mode flags */ 24 + unsigned short c_oflag; /* output mode flags */ 25 + unsigned short c_cflag; /* control mode flags */ 26 + unsigned short c_lflag; /* local mode flags */ 27 + unsigned char c_line; /* line discipline */ 28 + unsigned char c_cc[NCC]; /* control characters */ 29 + }; 30 + 31 + /* modem lines */ 32 + #define TIOCM_LE 0x001 33 + #define TIOCM_DTR 0x002 34 + #define TIOCM_RTS 0x004 35 + #define TIOCM_ST 0x008 36 + #define TIOCM_SR 0x010 37 + #define TIOCM_CTS 0x020 38 + #define TIOCM_CAR 0x040 39 + #define TIOCM_RNG 0x080 40 + #define TIOCM_DSR 0x100 41 + #define TIOCM_CD TIOCM_CAR 42 + #define TIOCM_RI TIOCM_RNG 43 + #define TIOCM_OUT1 0x2000 44 + #define TIOCM_OUT2 0x4000 45 + #define TIOCM_LOOP 0x8000 46 + 47 + /* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */ 48 + 49 + #ifdef __KERNEL__ 50 51 #include <asm/uaccess.h> 52 53 + /* intr=^C quit=^\ erase=del kill=^U 54 + eof=^D vtime=\0 vmin=\1 sxtc=\0 55 + start=^Q stop=^S susp=^Z eol=\0 56 + reprint=^R discard=^U werase=^W lnext=^V 57 + eol2=\0 58 + */ 59 + #define INIT_C_CC "\003\034\177\025\004\0\1\0\021\023\032\0\022\017\027\026\0" 60 61 /* 62 * Translate a "termio" structure into a "termios". Ugh. 63 */ 64 static inline int user_termio_to_kernel_termios(struct ktermios *termios, 65 + const struct termio __user *termio) 66 { 67 unsigned short tmp; 68 ··· 61 return 0; 62 } 63 64 + #ifdef TCGETS2 65 + static inline int user_termios_to_kernel_termios(struct ktermios *k, 66 + struct termios2 __user *u) 67 + { 68 + return copy_from_user(k, u, sizeof(struct termios2)); 69 + } 70 71 + static inline int kernel_termios_to_user_termios(struct termios2 __user *u, 72 + struct ktermios *k) 73 + { 74 + return copy_to_user(u, k, sizeof(struct termios2)); 75 + } 76 77 + static inline int user_termios_to_kernel_termios_1(struct ktermios *k, 78 + struct termios __user *u) 79 + { 80 + return copy_from_user(k, u, sizeof(struct termios)); 81 + } 82 83 + static inline int kernel_termios_to_user_termios_1(struct termios __user *u, 84 + struct ktermios *k) 85 + { 86 + return copy_to_user(u, k, sizeof(struct termios)); 87 + } 88 + #else /* TCGETS2 */ 89 + static inline int user_termios_to_kernel_termios(struct ktermios *k, 90 + struct termios __user *u) 91 + { 92 + return copy_from_user(k, u, sizeof(struct termios)); 93 + } 94 + 95 + static inline int kernel_termios_to_user_termios(struct termios __user *u, 96 + struct ktermios *k) 97 + { 98 + return copy_to_user(u, k, sizeof(struct termios)); 99 + } 100 + #endif /* TCGETS2 */ 101 + 102 + #endif /* __KERNEL__ */ 103 104 #endif /* _ASM_GENERIC_TERMIOS_H */
+22
include/asm-generic/timex.h
···
··· 1 + #ifndef __ASM_GENERIC_TIMEX_H 2 + #define __ASM_GENERIC_TIMEX_H 3 + 4 + /* 5 + * If you have a cycle counter, return the value here. 6 + */ 7 + typedef unsigned long cycles_t; 8 + #ifndef get_cycles 9 + static inline cycles_t get_cycles(void) 10 + { 11 + return 0; 12 + } 13 + #endif 14 + 15 + /* 16 + * Architectures are encouraged to implement read_current_timer 17 + * and define this in order to avoid the expensive delay loop 18 + * calibration during boot. 19 + */ 20 + #undef ARCH_HAS_READ_CURRENT_TIMER 21 + 22 + #endif /* __ASM_GENERIC_TIMEX_H */
+18
include/asm-generic/tlbflush.h
···
··· 1 + #ifndef __ASM_GENERIC_TLBFLUSH_H 2 + #define __ASM_GENERIC_TLBFLUSH_H 3 + /* 4 + * This is a dummy tlbflush implementation that can be used on all 5 + * nommu architectures. 6 + * If you have an MMU, you need to write your own functions. 7 + */ 8 + #ifdef CONFIG_MMU 9 + #error need to implement an architecture specific asm/tlbflush.h 10 + #endif 11 + 12 + static inline void flush_tlb_mm(struct mm_struct *mm) 13 + { 14 + BUG(); 15 + } 16 + 17 + 18 + #endif /* __ASM_GENERIC_TLBFLUSH_H */
+42
include/asm-generic/types.h
···
··· 1 + #ifndef _ASM_GENERIC_TYPES_H 2 + #define _ASM_GENERIC_TYPES_H 3 + /* 4 + * int-ll64 is used practically everywhere now, 5 + * so use it as a reasonable default. 6 + */ 7 + #include <asm-generic/int-ll64.h> 8 + 9 + #ifndef __ASSEMBLY__ 10 + 11 + typedef unsigned short umode_t; 12 + 13 + #endif /* __ASSEMBLY__ */ 14 + 15 + /* 16 + * These aren't exported outside the kernel to avoid name space clashes 17 + */ 18 + #ifdef __KERNEL__ 19 + #ifndef __ASSEMBLY__ 20 + /* 21 + * DMA addresses may be very different from physical addresses 22 + * and pointers. i386 and powerpc may have 64 bit DMA on 32 bit 23 + * systems, while sparc64 uses 32 bit DMA addresses for 64 bit 24 + * physical addresses. 25 + * This default defines dma_addr_t to have the same size as 26 + * phys_addr_t, which is the most common way. 27 + * Do not define the dma64_addr_t type, which never really 28 + * worked. 29 + */ 30 + #ifndef dma_addr_t 31 + #ifdef CONFIG_PHYS_ADDR_T_64BIT 32 + typedef u64 dma_addr_t; 33 + #else 34 + typedef u32 dma_addr_t; 35 + #endif /* CONFIG_PHYS_ADDR_T_64BIT */ 36 + #endif /* dma_addr_t */ 37 + 38 + #endif /* __ASSEMBLY__ */ 39 + 40 + #endif /* __KERNEL__ */ 41 + 42 + #endif /* _ASM_GENERIC_TYPES_H */
+26
include/asm-generic/uaccess-unaligned.h
···
··· 1 + #ifndef __ASM_GENERIC_UACCESS_UNALIGNED_H 2 + #define __ASM_GENERIC_UACCESS_UNALIGNED_H 3 + 4 + /* 5 + * This macro should be used instead of __get_user() when accessing 6 + * values at locations that are not known to be aligned. 7 + */ 8 + #define __get_user_unaligned(x, ptr) \ 9 + ({ \ 10 + __typeof__ (*(ptr)) __x; \ 11 + __copy_from_user(&__x, (ptr), sizeof(*(ptr))) ? -EFAULT : 0; \ 12 + (x) = __x; \ 13 + }) 14 + 15 + 16 + /* 17 + * This macro should be used instead of __put_user() when accessing 18 + * values at locations that are not known to be aligned. 19 + */ 20 + #define __put_user_unaligned(x, ptr) \ 21 + ({ \ 22 + __typeof__ (*(ptr)) __x = (x); \ 23 + __copy_to_user((ptr), &__x, sizeof(*(ptr))) ? -EFAULT : 0; \ 24 + }) 25 + 26 + #endif /* __ASM_GENERIC_UACCESS_UNALIGNED_H */
+316 -17
include/asm-generic/uaccess.h
··· 1 - #ifndef _ASM_GENERIC_UACCESS_H_ 2 - #define _ASM_GENERIC_UACCESS_H_ 3 4 /* 5 - * This macro should be used instead of __get_user() when accessing 6 - * values at locations that are not known to be aligned. 7 */ 8 - #define __get_user_unaligned(x, ptr) \ 9 - ({ \ 10 - __typeof__ (*(ptr)) __x; \ 11 - __copy_from_user(&__x, (ptr), sizeof(*(ptr))) ? -EFAULT : 0; \ 12 - (x) = __x; \ 13 - }) 14 15 16 /* 17 - * This macro should be used instead of __put_user() when accessing 18 - * values at locations that are not known to be aligned. 19 */ 20 - #define __put_user_unaligned(x, ptr) \ 21 - ({ \ 22 - __typeof__ (*(ptr)) __x = (x); \ 23 - __copy_to_user((ptr), &__x, sizeof(*(ptr))) ? -EFAULT : 0; \ 24 }) 25 26 - #endif /* _ASM_GENERIC_UACCESS_H */
··· 1 + #ifndef __ASM_GENERIC_UACCESS_H 2 + #define __ASM_GENERIC_UACCESS_H 3 4 /* 5 + * User space memory access functions, these should work 6 + * on a ny machine that has kernel and user data in the same 7 + * address space, e.g. all NOMMU machines. 8 */ 9 + #include <linux/sched.h> 10 + #include <linux/mm.h> 11 + #include <linux/string.h> 12 13 + #include <asm/segment.h> 14 + 15 + #define MAKE_MM_SEG(s) ((mm_segment_t) { (s) }) 16 + 17 + #ifndef KERNEL_DS 18 + #define KERNEL_DS MAKE_MM_SEG(~0UL) 19 + #endif 20 + 21 + #ifndef USER_DS 22 + #define USER_DS MAKE_MM_SEG(TASK_SIZE - 1) 23 + #endif 24 + 25 + #ifndef get_fs 26 + #define get_ds() (KERNEL_DS) 27 + #define get_fs() (current_thread_info()->addr_limit) 28 + 29 + static inline void set_fs(mm_segment_t fs) 30 + { 31 + current_thread_info()->addr_limit = fs; 32 + } 33 + #endif 34 + 35 + #define segment_eq(a, b) ((a).seg == (b).seg) 36 + 37 + #define VERIFY_READ 0 38 + #define VERIFY_WRITE 1 39 + 40 + #define access_ok(type, addr, size) __access_ok((unsigned long)(addr),(size)) 41 42 /* 43 + * The architecture should really override this if possible, at least 44 + * doing a check on the get_fs() 45 */ 46 + #ifndef __access_ok 47 + static inline int __access_ok(unsigned long addr, unsigned long size) 48 + { 49 + return 1; 50 + } 51 + #endif 52 + 53 + /* 54 + * The exception table consists of pairs of addresses: the first is the 55 + * address of an instruction that is allowed to fault, and the second is 56 + * the address at which the program should continue. No registers are 57 + * modified, so it is entirely up to the continuation code to figure out 58 + * what to do. 59 + * 60 + * All the routines below use bits of fixup code that are out of line 61 + * with the main instruction path. This means when everything is well, 62 + * we don't even have to jump over them. Further, they do not intrude 63 + * on our cache or tlb entries. 64 + */ 65 + 66 + struct exception_table_entry 67 + { 68 + unsigned long insn, fixup; 69 + }; 70 + 71 + /* Returns 0 if exception not found and fixup otherwise. */ 72 + extern unsigned long search_exception_table(unsigned long); 73 + 74 + /* 75 + * architectures with an MMU should override these two 76 + */ 77 + #ifndef __copy_from_user 78 + static inline __must_check long __copy_from_user(void *to, 79 + const void __user * from, unsigned long n) 80 + { 81 + if (__builtin_constant_p(n)) { 82 + switch(n) { 83 + case 1: 84 + *(u8 *)to = *(u8 __force *)from; 85 + return 0; 86 + case 2: 87 + *(u16 *)to = *(u16 __force *)from; 88 + return 0; 89 + case 4: 90 + *(u32 *)to = *(u32 __force *)from; 91 + return 0; 92 + #ifdef CONFIG_64BIT 93 + case 8: 94 + *(u64 *)to = *(u64 __force *)from; 95 + return 0; 96 + #endif 97 + default: 98 + break; 99 + } 100 + } 101 + 102 + memcpy(to, (const void __force *)from, n); 103 + return 0; 104 + } 105 + #endif 106 + 107 + #ifndef __copy_to_user 108 + static inline __must_check long __copy_to_user(void __user *to, 109 + const void *from, unsigned long n) 110 + { 111 + if (__builtin_constant_p(n)) { 112 + switch(n) { 113 + case 1: 114 + *(u8 __force *)to = *(u8 *)from; 115 + return 0; 116 + case 2: 117 + *(u16 __force *)to = *(u16 *)from; 118 + return 0; 119 + case 4: 120 + *(u32 __force *)to = *(u32 *)from; 121 + return 0; 122 + #ifdef CONFIG_64BIT 123 + case 8: 124 + *(u64 __force *)to = *(u64 *)from; 125 + return 0; 126 + #endif 127 + default: 128 + break; 129 + } 130 + } 131 + 132 + memcpy((void __force *)to, from, n); 133 + return 0; 134 + } 135 + #endif 136 + 137 + /* 138 + * These are the main single-value transfer routines. They automatically 139 + * use the right size if we just have the right pointer type. 140 + * This version just falls back to copy_{from,to}_user, which should 141 + * provide a fast-path for small values. 142 + */ 143 + #define __put_user(x, ptr) \ 144 + ({ \ 145 + __typeof__(*(ptr)) __x = (x); \ 146 + int __pu_err = -EFAULT; \ 147 + __chk_user_ptr(ptr); \ 148 + switch (sizeof (*(ptr))) { \ 149 + case 1: \ 150 + case 2: \ 151 + case 4: \ 152 + case 8: \ 153 + __pu_err = __put_user_fn(sizeof (*(ptr)), \ 154 + ptr, &__x); \ 155 + break; \ 156 + default: \ 157 + __put_user_bad(); \ 158 + break; \ 159 + } \ 160 + __pu_err; \ 161 }) 162 163 + #define put_user(x, ptr) \ 164 + ({ \ 165 + might_sleep(); \ 166 + __access_ok(ptr, sizeof (*ptr)) ? \ 167 + __put_user(x, ptr) : \ 168 + -EFAULT; \ 169 + }) 170 + 171 + static inline int __put_user_fn(size_t size, void __user *ptr, void *x) 172 + { 173 + size = __copy_to_user(ptr, x, size); 174 + return size ? -EFAULT : size; 175 + } 176 + 177 + extern int __put_user_bad(void) __attribute__((noreturn)); 178 + 179 + #define __get_user(x, ptr) \ 180 + ({ \ 181 + int __gu_err = -EFAULT; \ 182 + __chk_user_ptr(ptr); \ 183 + switch (sizeof(*(ptr))) { \ 184 + case 1: { \ 185 + unsigned char __x; \ 186 + __gu_err = __get_user_fn(sizeof (*(ptr)), \ 187 + ptr, &__x); \ 188 + (x) = *(__force __typeof__(*(ptr)) *) &__x; \ 189 + break; \ 190 + }; \ 191 + case 2: { \ 192 + unsigned short __x; \ 193 + __gu_err = __get_user_fn(sizeof (*(ptr)), \ 194 + ptr, &__x); \ 195 + (x) = *(__force __typeof__(*(ptr)) *) &__x; \ 196 + break; \ 197 + }; \ 198 + case 4: { \ 199 + unsigned int __x; \ 200 + __gu_err = __get_user_fn(sizeof (*(ptr)), \ 201 + ptr, &__x); \ 202 + (x) = *(__force __typeof__(*(ptr)) *) &__x; \ 203 + break; \ 204 + }; \ 205 + case 8: { \ 206 + unsigned long long __x; \ 207 + __gu_err = __get_user_fn(sizeof (*(ptr)), \ 208 + ptr, &__x); \ 209 + (x) = *(__force __typeof__(*(ptr)) *) &__x; \ 210 + break; \ 211 + }; \ 212 + default: \ 213 + __get_user_bad(); \ 214 + break; \ 215 + } \ 216 + __gu_err; \ 217 + }) 218 + 219 + #define get_user(x, ptr) \ 220 + ({ \ 221 + might_sleep(); \ 222 + __access_ok(ptr, sizeof (*ptr)) ? \ 223 + __get_user(x, ptr) : \ 224 + -EFAULT; \ 225 + }) 226 + 227 + static inline int __get_user_fn(size_t size, const void __user *ptr, void *x) 228 + { 229 + size = __copy_from_user(x, ptr, size); 230 + return size ? -EFAULT : size; 231 + } 232 + 233 + extern int __get_user_bad(void) __attribute__((noreturn)); 234 + 235 + #ifndef __copy_from_user_inatomic 236 + #define __copy_from_user_inatomic __copy_from_user 237 + #endif 238 + 239 + #ifndef __copy_to_user_inatomic 240 + #define __copy_to_user_inatomic __copy_to_user 241 + #endif 242 + 243 + static inline long copy_from_user(void *to, 244 + const void __user * from, unsigned long n) 245 + { 246 + might_sleep(); 247 + if (__access_ok(from, n)) 248 + return __copy_from_user(to, from, n); 249 + else 250 + return n; 251 + } 252 + 253 + static inline long copy_to_user(void __user *to, 254 + const void *from, unsigned long n) 255 + { 256 + might_sleep(); 257 + if (__access_ok(to, n)) 258 + return __copy_to_user(to, from, n); 259 + else 260 + return n; 261 + } 262 + 263 + /* 264 + * Copy a null terminated string from userspace. 265 + */ 266 + #ifndef __strncpy_from_user 267 + static inline long 268 + __strncpy_from_user(char *dst, const char __user *src, long count) 269 + { 270 + char *tmp; 271 + strncpy(dst, (const char __force *)src, count); 272 + for (tmp = dst; *tmp && count > 0; tmp++, count--) 273 + ; 274 + return (tmp - dst); 275 + } 276 + #endif 277 + 278 + static inline long 279 + strncpy_from_user(char *dst, const char __user *src, long count) 280 + { 281 + if (!__access_ok(src, 1)) 282 + return -EFAULT; 283 + return __strncpy_from_user(dst, src, count); 284 + } 285 + 286 + /* 287 + * Return the size of a string (including the ending 0) 288 + * 289 + * Return 0 on exception, a value greater than N if too long 290 + */ 291 + #ifndef strnlen_user 292 + static inline long strnlen_user(const char __user *src, long n) 293 + { 294 + return strlen((void * __force)src) + 1; 295 + } 296 + #endif 297 + 298 + static inline long strlen_user(const char __user *src) 299 + { 300 + return strnlen_user(src, 32767); 301 + } 302 + 303 + /* 304 + * Zero Userspace 305 + */ 306 + #ifndef __clear_user 307 + static inline __must_check unsigned long 308 + __clear_user(void __user *to, unsigned long n) 309 + { 310 + memset((void __force *)to, 0, n); 311 + return 0; 312 + } 313 + #endif 314 + 315 + static inline __must_check unsigned long 316 + clear_user(void __user *to, unsigned long n) 317 + { 318 + might_sleep(); 319 + if (!__access_ok(to, n)) 320 + return n; 321 + 322 + return __clear_user(to, n); 323 + } 324 + 325 + #endif /* __ASM_GENERIC_UACCESS_H */
+12
include/asm-generic/ucontext.h
···
··· 1 + #ifndef __ASM_GENERIC_UCONTEXT_H 2 + #define __ASM_GENERIC_UCONTEXT_H 3 + 4 + struct ucontext { 5 + unsigned long uc_flags; 6 + struct ucontext *uc_link; 7 + stack_t uc_stack; 8 + struct sigcontext uc_mcontext; 9 + sigset_t uc_sigmask; /* mask last for extensibility */ 10 + }; 11 + 12 + #endif /* __ASM_GENERIC_UCONTEXT_H */
+30
include/asm-generic/unaligned.h
···
··· 1 + #ifndef __ASM_GENERIC_UNALIGNED_H 2 + #define __ASM_GENERIC_UNALIGNED_H 3 + 4 + /* 5 + * This is the most generic implementation of unaligned accesses 6 + * and should work almost anywhere. 7 + * 8 + * If an architecture can handle unaligned accesses in hardware, 9 + * it may want to use the linux/unaligned/access_ok.h implementation 10 + * instead. 11 + */ 12 + #include <asm/byteorder.h> 13 + 14 + #if defined(__LITTLE_ENDIAN) 15 + # include <linux/unaligned/le_struct.h> 16 + # include <linux/unaligned/be_byteshift.h> 17 + # include <linux/unaligned/generic.h> 18 + # define get_unaligned __get_unaligned_le 19 + # define put_unaligned __put_unaligned_le 20 + #elif defined(__BIG_ENDIAN) 21 + # include <linux/unaligned/be_struct.h> 22 + # include <linux/unaligned/le_byteshift.h> 23 + # include <linux/unaligned/generic.h> 24 + # define get_unaligned __get_unaligned_be 25 + # define put_unaligned __put_unaligned_be 26 + #else 27 + # error need to define endianess 28 + #endif 29 + 30 + #endif /* __ASM_GENERIC_UNALIGNED_H */
+854
include/asm-generic/unistd.h
···
··· 1 + #if !defined(_ASM_GENERIC_UNISTD_H) || defined(__SYSCALL) 2 + #define _ASM_GENERIC_UNISTD_H 3 + 4 + #include <asm/bitsperlong.h> 5 + 6 + /* 7 + * This file contains the system call numbers, based on the 8 + * layout of the x86-64 architecture, which embeds the 9 + * pointer to the syscall in the table. 10 + * 11 + * As a basic principle, no duplication of functionality 12 + * should be added, e.g. we don't use lseek when llseek 13 + * is present. New architectures should use this file 14 + * and implement the less feature-full calls in user space. 15 + */ 16 + 17 + #ifndef __SYSCALL 18 + #define __SYSCALL(x, y) 19 + #endif 20 + 21 + #if __BITS_PER_LONG == 32 22 + #define __SC_3264(_nr, _32, _64) __SYSCALL(_nr, _32) 23 + #else 24 + #define __SC_3264(_nr, _32, _64) __SYSCALL(_nr, _64) 25 + #endif 26 + 27 + #define __NR_io_setup 0 28 + __SYSCALL(__NR_io_setup, sys_io_setup) 29 + #define __NR_io_destroy 1 30 + __SYSCALL(__NR_io_destroy, sys_io_destroy) 31 + #define __NR_io_submit 2 32 + __SYSCALL(__NR_io_submit, sys_io_submit) 33 + #define __NR_io_cancel 3 34 + __SYSCALL(__NR_io_cancel, sys_io_cancel) 35 + #define __NR_io_getevents 4 36 + __SYSCALL(__NR_io_getevents, sys_io_getevents) 37 + 38 + /* fs/xattr.c */ 39 + #define __NR_setxattr 5 40 + __SYSCALL(__NR_setxattr, sys_setxattr) 41 + #define __NR_lsetxattr 6 42 + __SYSCALL(__NR_lsetxattr, sys_lsetxattr) 43 + #define __NR_fsetxattr 7 44 + __SYSCALL(__NR_fsetxattr, sys_fsetxattr) 45 + #define __NR_getxattr 8 46 + __SYSCALL(__NR_getxattr, sys_getxattr) 47 + #define __NR_lgetxattr 9 48 + __SYSCALL(__NR_lgetxattr, sys_lgetxattr) 49 + #define __NR_fgetxattr 10 50 + __SYSCALL(__NR_fgetxattr, sys_fgetxattr) 51 + #define __NR_listxattr 11 52 + __SYSCALL(__NR_listxattr, sys_listxattr) 53 + #define __NR_llistxattr 12 54 + __SYSCALL(__NR_llistxattr, sys_llistxattr) 55 + #define __NR_flistxattr 13 56 + __SYSCALL(__NR_flistxattr, sys_flistxattr) 57 + #define __NR_removexattr 14 58 + __SYSCALL(__NR_removexattr, sys_removexattr) 59 + #define __NR_lremovexattr 15 60 + __SYSCALL(__NR_lremovexattr, sys_lremovexattr) 61 + #define __NR_fremovexattr 16 62 + __SYSCALL(__NR_fremovexattr, sys_fremovexattr) 63 + 64 + /* fs/dcache.c */ 65 + #define __NR_getcwd 17 66 + __SYSCALL(__NR_getcwd, sys_getcwd) 67 + 68 + /* fs/cookies.c */ 69 + #define __NR_lookup_dcookie 18 70 + __SYSCALL(__NR_lookup_dcookie, sys_lookup_dcookie) 71 + 72 + /* fs/eventfd.c */ 73 + #define __NR_eventfd2 19 74 + __SYSCALL(__NR_eventfd2, sys_eventfd2) 75 + 76 + /* fs/eventpoll.c */ 77 + #define __NR_epoll_create1 20 78 + __SYSCALL(__NR_epoll_create1, sys_epoll_create1) 79 + #define __NR_epoll_ctl 21 80 + __SYSCALL(__NR_epoll_ctl, sys_epoll_ctl) 81 + #define __NR_epoll_pwait 22 82 + __SYSCALL(__NR_epoll_pwait, sys_epoll_pwait) 83 + 84 + /* fs/fcntl.c */ 85 + #define __NR_dup 23 86 + __SYSCALL(__NR_dup, sys_dup) 87 + #define __NR_dup3 24 88 + __SYSCALL(__NR_dup3, sys_dup3) 89 + #define __NR3264_fcntl 25 90 + __SC_3264(__NR3264_fcntl, sys_fcntl64, sys_fcntl) 91 + 92 + /* fs/inotify_user.c */ 93 + #define __NR_inotify_init1 26 94 + __SYSCALL(__NR_inotify_init1, sys_inotify_init1) 95 + #define __NR_inotify_add_watch 27 96 + __SYSCALL(__NR_inotify_add_watch, sys_inotify_add_watch) 97 + #define __NR_inotify_rm_watch 28 98 + __SYSCALL(__NR_inotify_rm_watch, sys_inotify_rm_watch) 99 + 100 + /* fs/ioctl.c */ 101 + #define __NR_ioctl 29 102 + __SYSCALL(__NR_ioctl, sys_ioctl) 103 + 104 + /* fs/ioprio.c */ 105 + #define __NR_ioprio_set 30 106 + __SYSCALL(__NR_ioprio_set, sys_ioprio_set) 107 + #define __NR_ioprio_get 31 108 + __SYSCALL(__NR_ioprio_get, sys_ioprio_get) 109 + 110 + /* fs/locks.c */ 111 + #define __NR_flock 32 112 + __SYSCALL(__NR_flock, sys_flock) 113 + 114 + /* fs/namei.c */ 115 + #define __NR_mknodat 33 116 + __SYSCALL(__NR_mknodat, sys_mknodat) 117 + #define __NR_mkdirat 34 118 + __SYSCALL(__NR_mkdirat, sys_mkdirat) 119 + #define __NR_unlinkat 35 120 + __SYSCALL(__NR_unlinkat, sys_unlinkat) 121 + #define __NR_symlinkat 36 122 + __SYSCALL(__NR_symlinkat, sys_symlinkat) 123 + #define __NR_linkat 37 124 + __SYSCALL(__NR_linkat, sys_linkat) 125 + #define __NR_renameat 38 126 + __SYSCALL(__NR_renameat, sys_renameat) 127 + 128 + /* fs/namespace.c */ 129 + #define __NR_umount2 39 130 + __SYSCALL(__NR_umount2, sys_umount) 131 + #define __NR_mount 40 132 + __SYSCALL(__NR_mount, sys_mount) 133 + #define __NR_pivot_root 41 134 + __SYSCALL(__NR_pivot_root, sys_pivot_root) 135 + 136 + /* fs/nfsctl.c */ 137 + #define __NR_nfsservctl 42 138 + __SYSCALL(__NR_nfsservctl, sys_nfsservctl) 139 + 140 + /* fs/open.c */ 141 + #define __NR3264_statfs 43 142 + __SC_3264(__NR3264_statfs, sys_statfs64, sys_statfs) 143 + #define __NR3264_fstatfs 44 144 + __SC_3264(__NR3264_fstatfs, sys_fstatfs64, sys_fstatfs) 145 + #define __NR3264_truncate 45 146 + __SC_3264(__NR3264_truncate, sys_truncate64, sys_truncate) 147 + #define __NR3264_ftruncate 46 148 + __SC_3264(__NR3264_ftruncate, sys_ftruncate64, sys_ftruncate) 149 + 150 + #define __NR_fallocate 47 151 + __SYSCALL(__NR_fallocate, sys_fallocate) 152 + #define __NR_faccessat 48 153 + __SYSCALL(__NR_faccessat, sys_faccessat) 154 + #define __NR_chdir 49 155 + __SYSCALL(__NR_chdir, sys_chdir) 156 + #define __NR_fchdir 50 157 + __SYSCALL(__NR_fchdir, sys_fchdir) 158 + #define __NR_chroot 51 159 + __SYSCALL(__NR_chroot, sys_chroot) 160 + #define __NR_fchmod 52 161 + __SYSCALL(__NR_fchmod, sys_fchmod) 162 + #define __NR_fchmodat 53 163 + __SYSCALL(__NR_fchmodat, sys_fchmodat) 164 + #define __NR_fchownat 54 165 + __SYSCALL(__NR_fchownat, sys_fchownat) 166 + #define __NR_fchown 55 167 + __SYSCALL(__NR_fchown, sys_fchown) 168 + #define __NR_openat 56 169 + __SYSCALL(__NR_openat, sys_openat) 170 + #define __NR_close 57 171 + __SYSCALL(__NR_close, sys_close) 172 + #define __NR_vhangup 58 173 + __SYSCALL(__NR_vhangup, sys_vhangup) 174 + 175 + /* fs/pipe.c */ 176 + #define __NR_pipe2 59 177 + __SYSCALL(__NR_pipe2, sys_pipe2) 178 + 179 + /* fs/quota.c */ 180 + #define __NR_quotactl 60 181 + __SYSCALL(__NR_quotactl, sys_quotactl) 182 + 183 + /* fs/readdir.c */ 184 + #define __NR_getdents64 61 185 + __SYSCALL(__NR_getdents64, sys_getdents64) 186 + 187 + /* fs/read_write.c */ 188 + #define __NR3264_lseek 62 189 + __SC_3264(__NR3264_lseek, sys_llseek, sys_lseek) 190 + #define __NR_read 63 191 + __SYSCALL(__NR_read, sys_read) 192 + #define __NR_write 64 193 + __SYSCALL(__NR_write, sys_write) 194 + #define __NR_readv 65 195 + __SYSCALL(__NR_readv, sys_readv) 196 + #define __NR_writev 66 197 + __SYSCALL(__NR_writev, sys_writev) 198 + #define __NR_pread64 67 199 + __SYSCALL(__NR_pread64, sys_pread64) 200 + #define __NR_pwrite64 68 201 + __SYSCALL(__NR_pwrite64, sys_pwrite64) 202 + #define __NR_preadv 69 203 + __SYSCALL(__NR_preadv, sys_preadv) 204 + #define __NR_pwritev 70 205 + __SYSCALL(__NR_pwritev, sys_pwritev) 206 + 207 + /* fs/sendfile.c */ 208 + #define __NR3264_sendfile 71 209 + __SC_3264(__NR3264_sendfile, sys_sendfile64, sys_sendfile) 210 + 211 + /* fs/select.c */ 212 + #define __NR_pselect6 72 213 + __SYSCALL(__NR_pselect6, sys_pselect6) 214 + #define __NR_ppoll 73 215 + __SYSCALL(__NR_ppoll, sys_ppoll) 216 + 217 + /* fs/signalfd.c */ 218 + #define __NR_signalfd4 74 219 + __SYSCALL(__NR_signalfd4, sys_signalfd4) 220 + 221 + /* fs/splice.c */ 222 + #define __NR_vmsplice 75 223 + __SYSCALL(__NR_vmsplice, sys_vmsplice) 224 + #define __NR_splice 76 225 + __SYSCALL(__NR_splice, sys_splice) 226 + #define __NR_tee 77 227 + __SYSCALL(__NR_tee, sys_tee) 228 + 229 + /* fs/stat.c */ 230 + #define __NR_readlinkat 78 231 + __SYSCALL(__NR_readlinkat, sys_readlinkat) 232 + #define __NR3264_fstatat 79 233 + __SC_3264(__NR3264_fstatat, sys_fstatat64, sys_newfstatat) 234 + #define __NR3264_fstat 80 235 + __SC_3264(__NR3264_fstat, sys_fstat64, sys_newfstat) 236 + 237 + /* fs/sync.c */ 238 + #define __NR_sync 81 239 + __SYSCALL(__NR_sync, sys_sync) 240 + #define __NR_fsync 82 241 + __SYSCALL(__NR_fsync, sys_fsync) 242 + #define __NR_fdatasync 83 243 + __SYSCALL(__NR_fdatasync, sys_fdatasync) 244 + #define __NR_sync_file_range 84 245 + __SYSCALL(__NR_sync_file_range, sys_sync_file_range) /* .long sys_sync_file_range2, */ 246 + 247 + /* fs/timerfd.c */ 248 + #define __NR_timerfd_create 85 249 + __SYSCALL(__NR_timerfd_create, sys_timerfd_create) 250 + #define __NR_timerfd_settime 86 251 + __SYSCALL(__NR_timerfd_settime, sys_timerfd_settime) 252 + #define __NR_timerfd_gettime 87 253 + __SYSCALL(__NR_timerfd_gettime, sys_timerfd_gettime) 254 + 255 + /* fs/utimes.c */ 256 + #define __NR_utimensat 88 257 + __SYSCALL(__NR_utimensat, sys_utimensat) 258 + 259 + /* kernel/acct.c */ 260 + #define __NR_acct 89 261 + __SYSCALL(__NR_acct, sys_acct) 262 + 263 + /* kernel/capability.c */ 264 + #define __NR_capget 90 265 + __SYSCALL(__NR_capget, sys_capget) 266 + #define __NR_capset 91 267 + __SYSCALL(__NR_capset, sys_capset) 268 + 269 + /* kernel/exec_domain.c */ 270 + #define __NR_personality 92 271 + __SYSCALL(__NR_personality, sys_personality) 272 + 273 + /* kernel/exit.c */ 274 + #define __NR_exit 93 275 + __SYSCALL(__NR_exit, sys_exit) 276 + #define __NR_exit_group 94 277 + __SYSCALL(__NR_exit_group, sys_exit_group) 278 + #define __NR_waitid 95 279 + __SYSCALL(__NR_waitid, sys_waitid) 280 + 281 + /* kernel/fork.c */ 282 + #define __NR_set_tid_address 96 283 + __SYSCALL(__NR_set_tid_address, sys_set_tid_address) 284 + #define __NR_unshare 97 285 + __SYSCALL(__NR_unshare, sys_unshare) 286 + 287 + /* kernel/futex.c */ 288 + #define __NR_futex 98 289 + __SYSCALL(__NR_futex, sys_futex) 290 + #define __NR_set_robust_list 99 291 + __SYSCALL(__NR_set_robust_list, sys_set_robust_list) 292 + #define __NR_get_robust_list 100 293 + __SYSCALL(__NR_get_robust_list, sys_get_robust_list) 294 + 295 + /* kernel/hrtimer.c */ 296 + #define __NR_nanosleep 101 297 + __SYSCALL(__NR_nanosleep, sys_nanosleep) 298 + 299 + /* kernel/itimer.c */ 300 + #define __NR_getitimer 102 301 + __SYSCALL(__NR_getitimer, sys_getitimer) 302 + #define __NR_setitimer 103 303 + __SYSCALL(__NR_setitimer, sys_setitimer) 304 + 305 + /* kernel/kexec.c */ 306 + #define __NR_kexec_load 104 307 + __SYSCALL(__NR_kexec_load, sys_kexec_load) 308 + 309 + /* kernel/module.c */ 310 + #define __NR_init_module 105 311 + __SYSCALL(__NR_init_module, sys_init_module) 312 + #define __NR_delete_module 106 313 + __SYSCALL(__NR_delete_module, sys_delete_module) 314 + 315 + /* kernel/posix-timers.c */ 316 + #define __NR_timer_create 107 317 + __SYSCALL(__NR_timer_create, sys_timer_create) 318 + #define __NR_timer_gettime 108 319 + __SYSCALL(__NR_timer_gettime, sys_timer_gettime) 320 + #define __NR_timer_getoverrun 109 321 + __SYSCALL(__NR_timer_getoverrun, sys_timer_getoverrun) 322 + #define __NR_timer_settime 110 323 + __SYSCALL(__NR_timer_settime, sys_timer_settime) 324 + #define __NR_timer_delete 111 325 + __SYSCALL(__NR_timer_delete, sys_timer_delete) 326 + #define __NR_clock_settime 112 327 + __SYSCALL(__NR_clock_settime, sys_clock_settime) 328 + #define __NR_clock_gettime 113 329 + __SYSCALL(__NR_clock_gettime, sys_clock_gettime) 330 + #define __NR_clock_getres 114 331 + __SYSCALL(__NR_clock_getres, sys_clock_getres) 332 + #define __NR_clock_nanosleep 115 333 + __SYSCALL(__NR_clock_nanosleep, sys_clock_nanosleep) 334 + 335 + /* kernel/printk.c */ 336 + #define __NR_syslog 116 337 + __SYSCALL(__NR_syslog, sys_syslog) 338 + 339 + /* kernel/ptrace.c */ 340 + #define __NR_ptrace 117 341 + __SYSCALL(__NR_ptrace, sys_ptrace) 342 + 343 + /* kernel/sched.c */ 344 + #define __NR_sched_setparam 118 345 + __SYSCALL(__NR_sched_setparam, sys_sched_setparam) 346 + #define __NR_sched_setscheduler 119 347 + __SYSCALL(__NR_sched_setscheduler, sys_sched_setscheduler) 348 + #define __NR_sched_getscheduler 120 349 + __SYSCALL(__NR_sched_getscheduler, sys_sched_getscheduler) 350 + #define __NR_sched_getparam 121 351 + __SYSCALL(__NR_sched_getparam, sys_sched_getparam) 352 + #define __NR_sched_setaffinity 122 353 + __SYSCALL(__NR_sched_setaffinity, sys_sched_setaffinity) 354 + #define __NR_sched_getaffinity 123 355 + __SYSCALL(__NR_sched_getaffinity, sys_sched_getaffinity) 356 + #define __NR_sched_yield 124 357 + __SYSCALL(__NR_sched_yield, sys_sched_yield) 358 + #define __NR_sched_get_priority_max 125 359 + __SYSCALL(__NR_sched_get_priority_max, sys_sched_get_priority_max) 360 + #define __NR_sched_get_priority_min 126 361 + __SYSCALL(__NR_sched_get_priority_min, sys_sched_get_priority_min) 362 + #define __NR_sched_rr_get_interval 127 363 + __SYSCALL(__NR_sched_rr_get_interval, sys_sched_rr_get_interval) 364 + 365 + /* kernel/signal.c */ 366 + #define __NR_restart_syscall 128 367 + __SYSCALL(__NR_restart_syscall, sys_restart_syscall) 368 + #define __NR_kill 129 369 + __SYSCALL(__NR_kill, sys_kill) 370 + #define __NR_tkill 130 371 + __SYSCALL(__NR_tkill, sys_tkill) 372 + #define __NR_tgkill 131 373 + __SYSCALL(__NR_tgkill, sys_tgkill) 374 + #define __NR_sigaltstack 132 375 + __SYSCALL(__NR_sigaltstack, sys_sigaltstack) 376 + #define __NR_rt_sigsuspend 133 377 + __SYSCALL(__NR_rt_sigsuspend, sys_rt_sigsuspend) /* __ARCH_WANT_SYS_RT_SIGSUSPEND */ 378 + #define __NR_rt_sigaction 134 379 + __SYSCALL(__NR_rt_sigaction, sys_rt_sigaction) /* __ARCH_WANT_SYS_RT_SIGACTION */ 380 + #define __NR_rt_sigprocmask 135 381 + __SYSCALL(__NR_rt_sigprocmask, sys_rt_sigprocmask) 382 + #define __NR_rt_sigpending 136 383 + __SYSCALL(__NR_rt_sigpending, sys_rt_sigpending) 384 + #define __NR_rt_sigtimedwait 137 385 + __SYSCALL(__NR_rt_sigtimedwait, sys_rt_sigtimedwait) 386 + #define __NR_rt_sigqueueinfo 138 387 + __SYSCALL(__NR_rt_sigqueueinfo, sys_rt_sigqueueinfo) 388 + #define __NR_rt_sigreturn 139 389 + __SYSCALL(__NR_rt_sigreturn, sys_rt_sigreturn) /* sys_rt_sigreturn_wrapper, */ 390 + 391 + /* kernel/sys.c */ 392 + #define __NR_setpriority 140 393 + __SYSCALL(__NR_setpriority, sys_setpriority) 394 + #define __NR_getpriority 141 395 + __SYSCALL(__NR_getpriority, sys_getpriority) 396 + #define __NR_reboot 142 397 + __SYSCALL(__NR_reboot, sys_reboot) 398 + #define __NR_setregid 143 399 + __SYSCALL(__NR_setregid, sys_setregid) 400 + #define __NR_setgid 144 401 + __SYSCALL(__NR_setgid, sys_setgid) 402 + #define __NR_setreuid 145 403 + __SYSCALL(__NR_setreuid, sys_setreuid) 404 + #define __NR_setuid 146 405 + __SYSCALL(__NR_setuid, sys_setuid) 406 + #define __NR_setresuid 147 407 + __SYSCALL(__NR_setresuid, sys_setresuid) 408 + #define __NR_getresuid 148 409 + __SYSCALL(__NR_getresuid, sys_getresuid) 410 + #define __NR_setresgid 149 411 + __SYSCALL(__NR_setresgid, sys_setresgid) 412 + #define __NR_getresgid 150 413 + __SYSCALL(__NR_getresgid, sys_getresgid) 414 + #define __NR_setfsuid 151 415 + __SYSCALL(__NR_setfsuid, sys_setfsuid) 416 + #define __NR_setfsgid 152 417 + __SYSCALL(__NR_setfsgid, sys_setfsgid) 418 + #define __NR_times 153 419 + __SYSCALL(__NR_times, sys_times) 420 + #define __NR_setpgid 154 421 + __SYSCALL(__NR_setpgid, sys_setpgid) 422 + #define __NR_getpgid 155 423 + __SYSCALL(__NR_getpgid, sys_getpgid) 424 + #define __NR_getsid 156 425 + __SYSCALL(__NR_getsid, sys_getsid) 426 + #define __NR_setsid 157 427 + __SYSCALL(__NR_setsid, sys_setsid) 428 + #define __NR_getgroups 158 429 + __SYSCALL(__NR_getgroups, sys_getgroups) 430 + #define __NR_setgroups 159 431 + __SYSCALL(__NR_setgroups, sys_setgroups) 432 + #define __NR_uname 160 433 + __SYSCALL(__NR_uname, sys_newuname) 434 + #define __NR_sethostname 161 435 + __SYSCALL(__NR_sethostname, sys_sethostname) 436 + #define __NR_setdomainname 162 437 + __SYSCALL(__NR_setdomainname, sys_setdomainname) 438 + #define __NR_getrlimit 163 439 + __SYSCALL(__NR_getrlimit, sys_getrlimit) 440 + #define __NR_setrlimit 164 441 + __SYSCALL(__NR_setrlimit, sys_setrlimit) 442 + #define __NR_getrusage 165 443 + __SYSCALL(__NR_getrusage, sys_getrusage) 444 + #define __NR_umask 166 445 + __SYSCALL(__NR_umask, sys_umask) 446 + #define __NR_prctl 167 447 + __SYSCALL(__NR_prctl, sys_prctl) 448 + #define __NR_getcpu 168 449 + __SYSCALL(__NR_getcpu, sys_getcpu) 450 + 451 + /* kernel/time.c */ 452 + #define __NR_gettimeofday 169 453 + __SYSCALL(__NR_gettimeofday, sys_gettimeofday) 454 + #define __NR_settimeofday 170 455 + __SYSCALL(__NR_settimeofday, sys_settimeofday) 456 + #define __NR_adjtimex 171 457 + __SYSCALL(__NR_adjtimex, sys_adjtimex) 458 + 459 + /* kernel/timer.c */ 460 + #define __NR_getpid 172 461 + __SYSCALL(__NR_getpid, sys_getpid) 462 + #define __NR_getppid 173 463 + __SYSCALL(__NR_getppid, sys_getppid) 464 + #define __NR_getuid 174 465 + __SYSCALL(__NR_getuid, sys_getuid) 466 + #define __NR_geteuid 175 467 + __SYSCALL(__NR_geteuid, sys_geteuid) 468 + #define __NR_getgid 176 469 + __SYSCALL(__NR_getgid, sys_getgid) 470 + #define __NR_getegid 177 471 + __SYSCALL(__NR_getegid, sys_getegid) 472 + #define __NR_gettid 178 473 + __SYSCALL(__NR_gettid, sys_gettid) 474 + #define __NR_sysinfo 179 475 + __SYSCALL(__NR_sysinfo, sys_sysinfo) 476 + 477 + /* ipc/mqueue.c */ 478 + #define __NR_mq_open 180 479 + __SYSCALL(__NR_mq_open, sys_mq_open) 480 + #define __NR_mq_unlink 181 481 + __SYSCALL(__NR_mq_unlink, sys_mq_unlink) 482 + #define __NR_mq_timedsend 182 483 + __SYSCALL(__NR_mq_timedsend, sys_mq_timedsend) 484 + #define __NR_mq_timedreceive 183 485 + __SYSCALL(__NR_mq_timedreceive, sys_mq_timedreceive) 486 + #define __NR_mq_notify 184 487 + __SYSCALL(__NR_mq_notify, sys_mq_notify) 488 + #define __NR_mq_getsetattr 185 489 + __SYSCALL(__NR_mq_getsetattr, sys_mq_getsetattr) 490 + 491 + /* ipc/msg.c */ 492 + #define __NR_msgget 186 493 + __SYSCALL(__NR_msgget, sys_msgget) 494 + #define __NR_msgctl 187 495 + __SYSCALL(__NR_msgctl, sys_msgctl) 496 + #define __NR_msgrcv 188 497 + __SYSCALL(__NR_msgrcv, sys_msgrcv) 498 + #define __NR_msgsnd 189 499 + __SYSCALL(__NR_msgsnd, sys_msgsnd) 500 + 501 + /* ipc/sem.c */ 502 + #define __NR_semget 190 503 + __SYSCALL(__NR_semget, sys_semget) 504 + #define __NR_semctl 191 505 + __SYSCALL(__NR_semctl, sys_semctl) 506 + #define __NR_semtimedop 192 507 + __SYSCALL(__NR_semtimedop, sys_semtimedop) 508 + #define __NR_semop 193 509 + __SYSCALL(__NR_semop, sys_semop) 510 + 511 + /* ipc/shm.c */ 512 + #define __NR_shmget 194 513 + __SYSCALL(__NR_shmget, sys_shmget) 514 + #define __NR_shmctl 195 515 + __SYSCALL(__NR_shmctl, sys_shmctl) 516 + #define __NR_shmat 196 517 + __SYSCALL(__NR_shmat, sys_shmat) 518 + #define __NR_shmdt 197 519 + __SYSCALL(__NR_shmdt, sys_shmdt) 520 + 521 + /* net/socket.c */ 522 + #define __NR_socket 198 523 + __SYSCALL(__NR_socket, sys_socket) 524 + #define __NR_socketpair 199 525 + __SYSCALL(__NR_socketpair, sys_socketpair) 526 + #define __NR_bind 200 527 + __SYSCALL(__NR_bind, sys_bind) 528 + #define __NR_listen 201 529 + __SYSCALL(__NR_listen, sys_listen) 530 + #define __NR_accept 202 531 + __SYSCALL(__NR_accept, sys_accept) 532 + #define __NR_connect 203 533 + __SYSCALL(__NR_connect, sys_connect) 534 + #define __NR_getsockname 204 535 + __SYSCALL(__NR_getsockname, sys_getsockname) 536 + #define __NR_getpeername 205 537 + __SYSCALL(__NR_getpeername, sys_getpeername) 538 + #define __NR_sendto 206 539 + __SYSCALL(__NR_sendto, sys_sendto) 540 + #define __NR_recvfrom 207 541 + __SYSCALL(__NR_recvfrom, sys_recvfrom) 542 + #define __NR_setsockopt 208 543 + __SYSCALL(__NR_setsockopt, sys_setsockopt) 544 + #define __NR_getsockopt 209 545 + __SYSCALL(__NR_getsockopt, sys_getsockopt) 546 + #define __NR_shutdown 210 547 + __SYSCALL(__NR_shutdown, sys_shutdown) 548 + #define __NR_sendmsg 211 549 + __SYSCALL(__NR_sendmsg, sys_sendmsg) 550 + #define __NR_recvmsg 212 551 + __SYSCALL(__NR_recvmsg, sys_recvmsg) 552 + 553 + /* mm/filemap.c */ 554 + #define __NR_readahead 213 555 + __SYSCALL(__NR_readahead, sys_readahead) 556 + 557 + /* mm/nommu.c, also with MMU */ 558 + #define __NR_brk 214 559 + __SYSCALL(__NR_brk, sys_brk) 560 + #define __NR_munmap 215 561 + __SYSCALL(__NR_munmap, sys_munmap) 562 + #define __NR_mremap 216 563 + __SYSCALL(__NR_mremap, sys_mremap) 564 + 565 + /* security/keys/keyctl.c */ 566 + #define __NR_add_key 217 567 + __SYSCALL(__NR_add_key, sys_add_key) 568 + #define __NR_request_key 218 569 + __SYSCALL(__NR_request_key, sys_request_key) 570 + #define __NR_keyctl 219 571 + __SYSCALL(__NR_keyctl, sys_keyctl) 572 + 573 + /* arch/example/kernel/sys_example.c */ 574 + #define __NR_clone 220 575 + __SYSCALL(__NR_clone, sys_clone) /* .long sys_clone_wrapper */ 576 + #define __NR_execve 221 577 + __SYSCALL(__NR_execve, sys_execve) /* .long sys_execve_wrapper */ 578 + 579 + #define __NR3264_mmap 222 580 + __SC_3264(__NR3264_mmap, sys_mmap2, sys_mmap) 581 + /* mm/fadvise.c */ 582 + #define __NR3264_fadvise64 223 583 + __SC_3264(__NR3264_fadvise64, sys_fadvise64_64, sys_fadvise64) 584 + 585 + /* mm/, CONFIG_MMU only */ 586 + #ifndef __ARCH_NOMMU 587 + #define __NR_swapon 224 588 + __SYSCALL(__NR_swapon, sys_swapon) 589 + #define __NR_swapoff 225 590 + __SYSCALL(__NR_swapoff, sys_swapoff) 591 + #define __NR_mprotect 226 592 + __SYSCALL(__NR_mprotect, sys_mprotect) 593 + #define __NR_msync 227 594 + __SYSCALL(__NR_msync, sys_msync) 595 + #define __NR_mlock 228 596 + __SYSCALL(__NR_mlock, sys_mlock) 597 + #define __NR_munlock 229 598 + __SYSCALL(__NR_munlock, sys_munlock) 599 + #define __NR_mlockall 230 600 + __SYSCALL(__NR_mlockall, sys_mlockall) 601 + #define __NR_munlockall 231 602 + __SYSCALL(__NR_munlockall, sys_munlockall) 603 + #define __NR_mincore 232 604 + __SYSCALL(__NR_mincore, sys_mincore) 605 + #define __NR_madvise 233 606 + __SYSCALL(__NR_madvise, sys_madvise) 607 + #define __NR_remap_file_pages 234 608 + __SYSCALL(__NR_remap_file_pages, sys_remap_file_pages) 609 + #define __NR_mbind 235 610 + __SYSCALL(__NR_mbind, sys_mbind) 611 + #define __NR_get_mempolicy 236 612 + __SYSCALL(__NR_get_mempolicy, sys_get_mempolicy) 613 + #define __NR_set_mempolicy 237 614 + __SYSCALL(__NR_set_mempolicy, sys_set_mempolicy) 615 + #define __NR_migrate_pages 238 616 + __SYSCALL(__NR_migrate_pages, sys_migrate_pages) 617 + #define __NR_move_pages 239 618 + __SYSCALL(__NR_move_pages, sys_move_pages) 619 + #endif 620 + 621 + #undef __NR_syscalls 622 + #define __NR_syscalls 240 623 + 624 + /* 625 + * All syscalls below here should go away really, 626 + * these are provided for both review and as a porting 627 + * help for the C library version. 628 + * 629 + * Last chance: are any of these important enought to 630 + * enable by default? 631 + */ 632 + #ifdef __ARCH_WANT_SYSCALL_NO_AT 633 + #define __NR_open 1024 634 + __SYSCALL(__NR_open, sys_open) 635 + #define __NR_link 1025 636 + __SYSCALL(__NR_link, sys_link) 637 + #define __NR_unlink 1026 638 + __SYSCALL(__NR_unlink, sys_unlink) 639 + #define __NR_mknod 1027 640 + __SYSCALL(__NR_mknod, sys_mknod) 641 + #define __NR_chmod 1028 642 + __SYSCALL(__NR_chmod, sys_chmod) 643 + #define __NR_chown 1029 644 + __SYSCALL(__NR_chown, sys_chown) 645 + #define __NR_mkdir 1030 646 + __SYSCALL(__NR_mkdir, sys_mkdir) 647 + #define __NR_rmdir 1031 648 + __SYSCALL(__NR_rmdir, sys_rmdir) 649 + #define __NR_lchown 1032 650 + __SYSCALL(__NR_lchown, sys_lchown) 651 + #define __NR_access 1033 652 + __SYSCALL(__NR_access, sys_access) 653 + #define __NR_rename 1034 654 + __SYSCALL(__NR_rename, sys_rename) 655 + #define __NR_readlink 1035 656 + __SYSCALL(__NR_readlink, sys_readlink) 657 + #define __NR_symlink 1036 658 + __SYSCALL(__NR_symlink, sys_symlink) 659 + #define __NR_utimes 1037 660 + __SYSCALL(__NR_utimes, sys_utimes) 661 + #define __NR3264_stat 1038 662 + __SC_3264(__NR3264_stat, sys_stat64, sys_newstat) 663 + #define __NR3264_lstat 1039 664 + __SC_3264(__NR3264_lstat, sys_lstat64, sys_newlstat) 665 + 666 + #undef __NR_syscalls 667 + #define __NR_syscalls (__NR3264_lstat+1) 668 + #endif /* __ARCH_WANT_SYSCALL_NO_AT */ 669 + 670 + #ifdef __ARCH_WANT_SYSCALL_NO_FLAGS 671 + #define __NR_pipe 1040 672 + __SYSCALL(__NR_pipe, sys_pipe) 673 + #define __NR_dup2 1041 674 + __SYSCALL(__NR_dup2, sys_dup2) 675 + #define __NR_epoll_create 1042 676 + __SYSCALL(__NR_epoll_create, sys_epoll_create) 677 + #define __NR_inotify_init 1043 678 + __SYSCALL(__NR_inotify_init, sys_inotify_init) 679 + #define __NR_eventfd 1044 680 + __SYSCALL(__NR_eventfd, sys_eventfd) 681 + #define __NR_signalfd 1045 682 + __SYSCALL(__NR_signalfd, sys_signalfd) 683 + 684 + #undef __NR_syscalls 685 + #define __NR_syscalls (__NR_signalfd+1) 686 + #endif /* __ARCH_WANT_SYSCALL_NO_FLAGS */ 687 + 688 + #if __BITS_PER_LONG == 32 && defined(__ARCH_WANT_SYSCALL_OFF_T) 689 + #define __NR_sendfile 1046 690 + __SYSCALL(__NR_sendfile, sys_sendfile) 691 + #define __NR_ftruncate 1047 692 + __SYSCALL(__NR_ftruncate, sys_ftruncate) 693 + #define __NR_truncate 1048 694 + __SYSCALL(__NR_truncate, sys_truncate) 695 + #define __NR_stat 1049 696 + __SYSCALL(__NR_stat, sys_newstat) 697 + #define __NR_lstat 1050 698 + __SYSCALL(__NR_lstat, sys_newlstat) 699 + #define __NR_fstat 1051 700 + __SYSCALL(__NR_fstat, sys_newfstat) 701 + #define __NR_fcntl 1052 702 + __SYSCALL(__NR_fcntl, sys_fcntl) 703 + #define __NR_fadvise64 1053 704 + #define __ARCH_WANT_SYS_FADVISE64 705 + __SYSCALL(__NR_fadvise64, sys_fadvise64) 706 + #define __NR_newfstatat 1054 707 + #define __ARCH_WANT_SYS_NEWFSTATAT 708 + __SYSCALL(__NR_newfstatat, sys_newfstatat) 709 + #define __NR_fstatfs 1055 710 + __SYSCALL(__NR_fstatfs, sys_fstatfs) 711 + #define __NR_statfs 1056 712 + __SYSCALL(__NR_statfs, sys_statfs) 713 + #define __NR_lseek 1057 714 + __SYSCALL(__NR_lseek, sys_lseek) 715 + #define __NR_mmap 1058 716 + __SYSCALL(__NR_mmap, sys_mmap) 717 + 718 + #undef __NR_syscalls 719 + #define __NR_syscalls (__NR_mmap+1) 720 + #endif /* 32 bit off_t syscalls */ 721 + 722 + #ifdef __ARCH_WANT_SYSCALL_DEPRECATED 723 + #define __NR_alarm 1059 724 + #define __ARCH_WANT_SYS_ALARM 725 + __SYSCALL(__NR_alarm, sys_alarm) 726 + #define __NR_getpgrp 1060 727 + #define __ARCH_WANT_SYS_GETPGRP 728 + __SYSCALL(__NR_getpgrp, sys_getpgrp) 729 + #define __NR_pause 1061 730 + #define __ARCH_WANT_SYS_PAUSE 731 + __SYSCALL(__NR_pause, sys_pause) 732 + #define __NR_time 1062 733 + #define __ARCH_WANT_SYS_TIME 734 + __SYSCALL(__NR_time, sys_time) 735 + #define __NR_utime 1063 736 + #define __ARCH_WANT_SYS_UTIME 737 + __SYSCALL(__NR_utime, sys_utime) 738 + 739 + #define __NR_creat 1064 740 + __SYSCALL(__NR_creat, sys_creat) 741 + #define __NR_getdents 1065 742 + #define __ARCH_WANT_SYS_GETDENTS 743 + __SYSCALL(__NR_getdents, sys_getdents) 744 + #define __NR_futimesat 1066 745 + __SYSCALL(__NR_futimesat, sys_futimesat) 746 + #define __NR_select 1067 747 + #define __ARCH_WANT_SYS_SELECT 748 + __SYSCALL(__NR_select, sys_select) 749 + #define __NR_poll 1068 750 + __SYSCALL(__NR_poll, sys_poll) 751 + #define __NR_epoll_wait 1069 752 + __SYSCALL(__NR_epoll_wait, sys_epoll_wait) 753 + #define __NR_ustat 1070 754 + __SYSCALL(__NR_ustat, sys_ustat) 755 + #define __NR_vfork 1071 756 + __SYSCALL(__NR_vfork, sys_vfork) 757 + #define __NR_wait4 1072 758 + __SYSCALL(__NR_wait4, sys_wait4) 759 + #define __NR_recv 1073 760 + __SYSCALL(__NR_recv, sys_recv) 761 + #define __NR_send 1074 762 + __SYSCALL(__NR_send, sys_send) 763 + #define __NR_bdflush 1075 764 + __SYSCALL(__NR_bdflush, sys_bdflush) 765 + #define __NR_umount 1076 766 + __SYSCALL(__NR_umount, sys_oldumount) 767 + #define __ARCH_WANT_SYS_OLDUMOUNT 768 + #define __NR_uselib 1077 769 + __SYSCALL(__NR_uselib, sys_uselib) 770 + #define __NR__sysctl 1078 771 + __SYSCALL(__NR__sysctl, sys_sysctl) 772 + 773 + #define __NR_fork 1079 774 + #ifdef CONFIG_MMU 775 + __SYSCALL(__NR_fork, sys_fork) 776 + #else 777 + __SYSCALL(__NR_fork, sys_ni_syscall) 778 + #endif /* CONFIG_MMU */ 779 + 780 + #undef __NR_syscalls 781 + #define __NR_syscalls (__NR_fork+1) 782 + 783 + #endif /* __ARCH_WANT_SYSCALL_DEPRECATED */ 784 + 785 + /* 786 + * 32 bit systems traditionally used different 787 + * syscalls for off_t and loff_t arguments, while 788 + * 64 bit systems only need the off_t version. 789 + * For new 32 bit platforms, there is no need to 790 + * implement the old 32 bit off_t syscalls, so 791 + * they take different names. 792 + * Here we map the numbers so that both versions 793 + * use the same syscall table layout. 794 + */ 795 + #if __BITS_PER_LONG == 64 796 + #define __NR_fcntl __NR3264_fcntl 797 + #define __NR_statfs __NR3264_statfs 798 + #define __NR_fstatfs __NR3264_fstatfs 799 + #define __NR_truncate __NR3264_truncate 800 + #define __NR_ftruncate __NR3264_truncate 801 + #define __NR_lseek __NR3264_lseek 802 + #define __NR_sendfile __NR3264_sendfile 803 + #define __NR_newfstatat __NR3264_fstatat 804 + #define __NR_fstat __NR3264_fstat 805 + #define __NR_mmap __NR3264_mmap 806 + #define __NR_fadvise64 __NR3264_fadvise64 807 + #ifdef __NR3264_stat 808 + #define __NR_stat __NR3264_stat 809 + #define __NR_lstat __NR3264_lstat 810 + #endif 811 + #else 812 + #define __NR_fcntl64 __NR3264_fcntl 813 + #define __NR_statfs64 __NR3264_statfs 814 + #define __NR_fstatfs64 __NR3264_fstatfs 815 + #define __NR_truncate64 __NR3264_truncate 816 + #define __NR_ftruncate64 __NR3264_truncate 817 + #define __NR_llseek __NR3264_lseek 818 + #define __NR_sendfile64 __NR3264_sendfile 819 + #define __NR_fstatat64 __NR3264_fstatat 820 + #define __NR_fstat64 __NR3264_fstat 821 + #define __NR_mmap2 __NR3264_mmap 822 + #define __NR_fadvise64_64 __NR3264_fadvise64 823 + #ifdef __NR3264_stat 824 + #define __NR_stat64 __NR3264_stat 825 + #define __NR_lstat64 __NR3264_lstat 826 + #endif 827 + #endif 828 + 829 + #ifdef __KERNEL__ 830 + 831 + /* 832 + * These are required system calls, we should 833 + * invert the logic eventually and let them 834 + * be selected by default. 835 + */ 836 + #if __BITS_PER_LONG == 32 837 + #define __ARCH_WANT_STAT64 838 + #define __ARCH_WANT_SYS_LLSEEK 839 + #endif 840 + #define __ARCH_WANT_SYS_RT_SIGACTION 841 + #define __ARCH_WANT_SYS_RT_SIGSUSPEND 842 + 843 + /* 844 + * "Conditional" syscalls 845 + * 846 + * What we want is __attribute__((weak,alias("sys_ni_syscall"))), 847 + * but it doesn't work on all toolchains, so we just do it by hand 848 + */ 849 + #ifndef cond_syscall 850 + #define cond_syscall(x) asm(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall") 851 + #endif 852 + 853 + #endif /* __KERNEL__ */ 854 + #endif /* _ASM_GENERIC_UNISTD_H */
+8
include/asm-generic/user.h
···
··· 1 + #ifndef __ASM_GENERIC_USER_H 2 + #define __ASM_GENERIC_USER_H 3 + /* 4 + * This file may define a 'struct user' structure. However, it it only 5 + * used for a.out file, which are not supported on new architectures. 6 + */ 7 + 8 + #endif /* __ASM_GENERIC_USER_H */
+24
include/asm-generic/vga.h
···
··· 1 + /* 2 + * Access to VGA videoram 3 + * 4 + * (c) 1998 Martin Mares <mj@ucw.cz> 5 + */ 6 + #ifndef __ASM_GENERIC_VGA_H 7 + #define __ASM_GENERIC_VGA_H 8 + 9 + /* 10 + * On most architectures that support VGA, we can just 11 + * recalculate addresses and then access the videoram 12 + * directly without any black magic. 13 + * 14 + * Everyone else needs to ioremap the address and use 15 + * proper I/O accesses. 16 + */ 17 + #ifndef VGA_MAP_MEM 18 + #define VGA_MAP_MEM(x, s) (unsigned long)phys_to_virt(x) 19 + #endif 20 + 21 + #define vga_readb(x) (*(x)) 22 + #define vga_writeb(x, y) (*(y) = (x)) 23 + 24 + #endif /* _ASM_GENERIC_VGA_H */
+2
lib/Makefile
··· 93 94 obj-$(CONFIG_DMA_API_DEBUG) += dma-debug.o 95 96 hostprogs-y := gen_crc32table 97 clean-files := crc32table.h 98
··· 93 94 obj-$(CONFIG_DMA_API_DEBUG) += dma-debug.o 95 96 + obj-$(CONFIG_GENERIC_CSUM) += checksum.o 97 + 98 hostprogs-y := gen_crc32table 99 clean-files := crc32table.h 100
+193
lib/checksum.c
···
··· 1 + /* 2 + * 3 + * INET An implementation of the TCP/IP protocol suite for the LINUX 4 + * operating system. INET is implemented using the BSD Socket 5 + * interface as the means of communication with the user level. 6 + * 7 + * IP/TCP/UDP checksumming routines 8 + * 9 + * Authors: Jorge Cwik, <jorge@laser.satlink.net> 10 + * Arnt Gulbrandsen, <agulbra@nvg.unit.no> 11 + * Tom May, <ftom@netcom.com> 12 + * Andreas Schwab, <schwab@issan.informatik.uni-dortmund.de> 13 + * Lots of code moved from tcp.c and ip.c; see those files 14 + * for more names. 15 + * 16 + * 03/02/96 Jes Sorensen, Andreas Schwab, Roman Hodek: 17 + * Fixed some nasty bugs, causing some horrible crashes. 18 + * A: At some points, the sum (%0) was used as 19 + * length-counter instead of the length counter 20 + * (%1). Thanks to Roman Hodek for pointing this out. 21 + * B: GCC seems to mess up if one uses too many 22 + * data-registers to hold input values and one tries to 23 + * specify d0 and d1 as scratch registers. Letting gcc 24 + * choose these registers itself solves the problem. 25 + * 26 + * This program is free software; you can redistribute it and/or 27 + * modify it under the terms of the GNU General Public License 28 + * as published by the Free Software Foundation; either version 29 + * 2 of the License, or (at your option) any later version. 30 + */ 31 + 32 + /* Revised by Kenneth Albanowski for m68knommu. Basic problem: unaligned access 33 + kills, so most of the assembly has to go. */ 34 + 35 + #include <linux/module.h> 36 + #include <net/checksum.h> 37 + 38 + #include <asm/byteorder.h> 39 + 40 + static inline unsigned short from32to16(unsigned long x) 41 + { 42 + /* add up 16-bit and 16-bit for 16+c bit */ 43 + x = (x & 0xffff) + (x >> 16); 44 + /* add up carry.. */ 45 + x = (x & 0xffff) + (x >> 16); 46 + return x; 47 + } 48 + 49 + static unsigned int do_csum(const unsigned char *buff, int len) 50 + { 51 + int odd, count; 52 + unsigned long result = 0; 53 + 54 + if (len <= 0) 55 + goto out; 56 + odd = 1 & (unsigned long) buff; 57 + if (odd) { 58 + result = *buff; 59 + len--; 60 + buff++; 61 + } 62 + count = len >> 1; /* nr of 16-bit words.. */ 63 + if (count) { 64 + if (2 & (unsigned long) buff) { 65 + result += *(unsigned short *) buff; 66 + count--; 67 + len -= 2; 68 + buff += 2; 69 + } 70 + count >>= 1; /* nr of 32-bit words.. */ 71 + if (count) { 72 + unsigned long carry = 0; 73 + do { 74 + unsigned long w = *(unsigned long *) buff; 75 + count--; 76 + buff += 4; 77 + result += carry; 78 + result += w; 79 + carry = (w > result); 80 + } while (count); 81 + result += carry; 82 + result = (result & 0xffff) + (result >> 16); 83 + } 84 + if (len & 2) { 85 + result += *(unsigned short *) buff; 86 + buff += 2; 87 + } 88 + } 89 + if (len & 1) 90 + result += (*buff << 8); 91 + result = from32to16(result); 92 + if (odd) 93 + result = ((result >> 8) & 0xff) | ((result & 0xff) << 8); 94 + out: 95 + return result; 96 + } 97 + 98 + /* 99 + * This is a version of ip_compute_csum() optimized for IP headers, 100 + * which always checksum on 4 octet boundaries. 101 + */ 102 + __sum16 ip_fast_csum(const void *iph, unsigned int ihl) 103 + { 104 + return (__force __sum16)~do_csum(iph, ihl*4); 105 + } 106 + EXPORT_SYMBOL(ip_fast_csum); 107 + 108 + /* 109 + * computes the checksum of a memory block at buff, length len, 110 + * and adds in "sum" (32-bit) 111 + * 112 + * returns a 32-bit number suitable for feeding into itself 113 + * or csum_tcpudp_magic 114 + * 115 + * this function must be called with even lengths, except 116 + * for the last fragment, which may be odd 117 + * 118 + * it's best to have buff aligned on a 32-bit boundary 119 + */ 120 + __wsum csum_partial(const void *buff, int len, __wsum wsum) 121 + { 122 + unsigned int sum = (__force unsigned int)wsum; 123 + unsigned int result = do_csum(buff, len); 124 + 125 + /* add in old sum, and carry.. */ 126 + result += sum; 127 + if (sum > result) 128 + result += 1; 129 + return (__force __wsum)result; 130 + } 131 + EXPORT_SYMBOL(csum_partial); 132 + 133 + /* 134 + * this routine is used for miscellaneous IP-like checksums, mainly 135 + * in icmp.c 136 + */ 137 + __sum16 ip_compute_csum(const void *buff, int len) 138 + { 139 + return (__force __sum16)~do_csum(buff, len); 140 + } 141 + EXPORT_SYMBOL(ip_compute_csum); 142 + 143 + /* 144 + * copy from fs while checksumming, otherwise like csum_partial 145 + */ 146 + __wsum 147 + csum_partial_copy_from_user(const void __user *src, void *dst, int len, 148 + __wsum sum, int *csum_err) 149 + { 150 + int missing; 151 + 152 + missing = __copy_from_user(dst, src, len); 153 + if (missing) { 154 + memset(dst + len - missing, 0, missing); 155 + *csum_err = -EFAULT; 156 + } else 157 + *csum_err = 0; 158 + 159 + return csum_partial(dst, len, sum); 160 + } 161 + EXPORT_SYMBOL(csum_partial_copy_from_user); 162 + 163 + /* 164 + * copy from ds while checksumming, otherwise like csum_partial 165 + */ 166 + __wsum 167 + csum_partial_copy(const void *src, void *dst, int len, __wsum sum) 168 + { 169 + memcpy(dst, src, len); 170 + return csum_partial(dst, len, sum); 171 + } 172 + EXPORT_SYMBOL(csum_partial_copy); 173 + 174 + #ifndef csum_tcpudp_nofold 175 + __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, 176 + unsigned short len, 177 + unsigned short proto, 178 + __wsum sum) 179 + { 180 + unsigned long long s = (__force u32)sum; 181 + 182 + s += (__force u32)saddr; 183 + s += (__force u32)daddr; 184 + #ifdef __BIG_ENDIAN 185 + s += proto + len; 186 + #else 187 + s += (proto + len) << 8; 188 + #endif 189 + s += (s >> 32); 190 + return (__force __wsum)s; 191 + } 192 + EXPORT_SYMBOL(csum_tcpudp_nofold); 193 + #endif
+89 -3
scripts/checksyscalls.sh
··· 14 #include <asm/types.h> 15 #include <asm/unistd.h> 16 17 /* System calls for 32-bit kernels only */ 18 #if BITS_PER_LONG == 64 19 #define __IGNORE_sendfile64 ··· 78 #define __IGNORE_fstatat64 79 #define __IGNORE_fstatfs64 80 #define __IGNORE_statfs64 81 #endif 82 83 /* i386-specific or historical system calls */ ··· 111 #define __IGNORE_idle 112 #define __IGNORE_modify_ldt 113 #define __IGNORE_ugetrlimit 114 - #define __IGNORE_mmap2 115 #define __IGNORE_vm86 116 #define __IGNORE_vm86old 117 #define __IGNORE_set_thread_area ··· 121 #define __IGNORE_oldlstat 122 #define __IGNORE_oldolduname 123 #define __IGNORE_olduname 124 - #define __IGNORE_umount2 125 #define __IGNORE_umount 126 #define __IGNORE_waitpid 127 #define __IGNORE_stime ··· 140 #define __IGNORE__llseek 141 #define __IGNORE__newselect 142 #define __IGNORE_create_module 143 - #define __IGNORE_delete_module 144 #define __IGNORE_query_module 145 #define __IGNORE_get_kernel_syms 146 /* ... including the "new" 32-bit uid syscalls */ 147 #define __IGNORE_lchown32 148 #define __IGNORE_getuid32 ··· 166 #define __IGNORE_setgid32 167 #define __IGNORE_setfsuid32 168 #define __IGNORE_setfsgid32 169 170 /* sync_file_range had a stupid ABI. Allow sync_file_range2 instead */ 171 #ifdef __NR_sync_file_range2
··· 14 #include <asm/types.h> 15 #include <asm/unistd.h> 16 17 + /* *at */ 18 + #define __IGNORE_open /* openat */ 19 + #define __IGNORE_link /* linkat */ 20 + #define __IGNORE_unlink /* unlinkat */ 21 + #define __IGNORE_mknod /* mknodat */ 22 + #define __IGNORE_chmod /* fchmodat */ 23 + #define __IGNORE_chown /* fchownat */ 24 + #define __IGNORE_mkdir /* mkdirat */ 25 + #define __IGNORE_rmdir /* unlinkat */ 26 + #define __IGNORE_lchown /* fchownat */ 27 + #define __IGNORE_access /* faccessat */ 28 + #define __IGNORE_rename /* renameat */ 29 + #define __IGNORE_readlink /* readlinkat */ 30 + #define __IGNORE_symlink /* symlinkat */ 31 + #define __IGNORE_utimes /* futimesat */ 32 + #if BITS_PER_LONG == 64 33 + #define __IGNORE_stat /* fstatat */ 34 + #define __IGNORE_lstat /* fstatat */ 35 + #else 36 + #define __IGNORE_stat64 /* fstatat64 */ 37 + #define __IGNORE_lstat64 /* fstatat64 */ 38 + #endif 39 + 40 + /* CLOEXEC flag */ 41 + #define __IGNORE_pipe /* pipe2 */ 42 + #define __IGNORE_dup2 /* dup3 */ 43 + #define __IGNORE_epoll_create /* epoll_create1 */ 44 + #define __IGNORE_inotify_init /* inotify_init1 */ 45 + #define __IGNORE_eventfd /* eventfd2 */ 46 + #define __IGNORE_signalfd /* signalfd4 */ 47 + 48 + /* MMU */ 49 + #ifndef CONFIG_MMU 50 + #define __IGNORE_madvise 51 + #define __IGNORE_mbind 52 + #define __IGNORE_mincore 53 + #define __IGNORE_mlock 54 + #define __IGNORE_mlockall 55 + #define __IGNORE_munlock 56 + #define __IGNORE_munlockall 57 + #define __IGNORE_mprotect 58 + #define __IGNORE_msync 59 + #define __IGNORE_migrate_pages 60 + #define __IGNORE_move_pages 61 + #define __IGNORE_remap_file_pages 62 + #define __IGNORE_get_mempolicy 63 + #define __IGNORE_set_mempolicy 64 + #define __IGNORE_swapoff 65 + #define __IGNORE_swapon 66 + #endif 67 + 68 /* System calls for 32-bit kernels only */ 69 #if BITS_PER_LONG == 64 70 #define __IGNORE_sendfile64 ··· 27 #define __IGNORE_fstatat64 28 #define __IGNORE_fstatfs64 29 #define __IGNORE_statfs64 30 + #define __IGNORE_llseek 31 + #define __IGNORE_mmap2 32 + #else 33 + #define __IGNORE_sendfile 34 + #define __IGNORE_ftruncate 35 + #define __IGNORE_truncate 36 + #define __IGNORE_stat 37 + #define __IGNORE_lstat 38 + #define __IGNORE_fstat 39 + #define __IGNORE_fcntl 40 + #define __IGNORE_fadvise64 41 + #define __IGNORE_newfstatat 42 + #define __IGNORE_fstatfs 43 + #define __IGNORE_statfs 44 + #define __IGNORE_lseek 45 + #define __IGNORE_mmap 46 #endif 47 48 /* i386-specific or historical system calls */ ··· 44 #define __IGNORE_idle 45 #define __IGNORE_modify_ldt 46 #define __IGNORE_ugetrlimit 47 #define __IGNORE_vm86 48 #define __IGNORE_vm86old 49 #define __IGNORE_set_thread_area ··· 55 #define __IGNORE_oldlstat 56 #define __IGNORE_oldolduname 57 #define __IGNORE_olduname 58 #define __IGNORE_umount 59 #define __IGNORE_waitpid 60 #define __IGNORE_stime ··· 75 #define __IGNORE__llseek 76 #define __IGNORE__newselect 77 #define __IGNORE_create_module 78 #define __IGNORE_query_module 79 #define __IGNORE_get_kernel_syms 80 + #define __IGNORE_sysfs 81 + #define __IGNORE_uselib 82 + #define __IGNORE__sysctl 83 + 84 /* ... including the "new" 32-bit uid syscalls */ 85 #define __IGNORE_lchown32 86 #define __IGNORE_getuid32 ··· 98 #define __IGNORE_setgid32 99 #define __IGNORE_setfsuid32 100 #define __IGNORE_setfsgid32 101 + 102 + /* these can be expressed using other calls */ 103 + #define __IGNORE_alarm /* setitimer */ 104 + #define __IGNORE_creat /* open */ 105 + #define __IGNORE_fork /* clone */ 106 + #define __IGNORE_futimesat /* utimensat */ 107 + #define __IGNORE_getpgrp /* getpgid */ 108 + #define __IGNORE_getdents /* getdents64 */ 109 + #define __IGNORE_pause /* sigsuspend */ 110 + #define __IGNORE_poll /* ppoll */ 111 + #define __IGNORE_select /* pselect6 */ 112 + #define __IGNORE_epoll_wait /* epoll_pwait */ 113 + #define __IGNORE_time /* gettimeofday */ 114 + #define __IGNORE_uname /* newuname */ 115 + #define __IGNORE_ustat /* statfs */ 116 + #define __IGNORE_utime /* utimes */ 117 + #define __IGNORE_vfork /* clone */ 118 + #define __IGNORE_wait4 /* waitid */ 119 120 /* sync_file_range had a stupid ABI. Allow sync_file_range2 instead */ 121 #ifdef __NR_sync_file_range2