Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

x86: create _types.h counterparts for page*.h

Signed-off-by: Jeremy Fitzhardinge <jeremy@goop.org>

authored by

Jeremy Fitzhardinge and committed by
Jeremy Fitzhardinge
51c78eb3 1484096c

+389 -248
+3 -61
arch/x86/include/asm/page.h
··· 1 1 #ifndef _ASM_X86_PAGE_H 2 2 #define _ASM_X86_PAGE_H 3 3 4 - #include <linux/const.h> 5 - 6 - /* PAGE_SHIFT determines the page size */ 7 - #define PAGE_SHIFT 12 8 - #define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT) 9 - #define PAGE_MASK (~(PAGE_SIZE-1)) 10 - 11 4 #ifdef __KERNEL__ 12 5 13 - #define __PHYSICAL_MASK ((phys_addr_t)(1ULL << __PHYSICAL_MASK_SHIFT) - 1) 14 - #define __VIRTUAL_MASK ((1UL << __VIRTUAL_MASK_SHIFT) - 1) 15 - 16 - /* Cast PAGE_MASK to a signed type so that it is sign-extended if 17 - virtual addresses are 32-bits but physical addresses are larger 18 - (ie, 32-bit PAE). */ 19 - #define PHYSICAL_PAGE_MASK (((signed long)PAGE_MASK) & __PHYSICAL_MASK) 20 - 21 - /* PTE_PFN_MASK extracts the PFN from a (pte|pmd|pud|pgd)val_t */ 22 - #define PTE_PFN_MASK ((pteval_t)PHYSICAL_PAGE_MASK) 23 - 24 - /* PTE_FLAGS_MASK extracts the flags from a (pte|pmd|pud|pgd)val_t */ 25 - #define PTE_FLAGS_MASK (~PTE_PFN_MASK) 26 - 27 - #define PMD_PAGE_SIZE (_AC(1, UL) << PMD_SHIFT) 28 - #define PMD_PAGE_MASK (~(PMD_PAGE_SIZE-1)) 29 - 30 - #define HPAGE_SHIFT PMD_SHIFT 31 - #define HPAGE_SIZE (_AC(1,UL) << HPAGE_SHIFT) 32 - #define HPAGE_MASK (~(HPAGE_SIZE - 1)) 33 - #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) 34 - 35 - #define HUGE_MAX_HSTATE 2 36 - 37 - #ifndef __ASSEMBLY__ 38 - #include <linux/types.h> 39 - #endif 6 + #include <asm/page_types.h> 40 7 41 8 #ifdef CONFIG_X86_64 42 9 #include <asm/page_64.h> ··· 11 44 #include <asm/page_32.h> 12 45 #endif /* CONFIG_X86_64 */ 13 46 14 - #define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET) 15 - 16 - #define VM_DATA_DEFAULT_FLAGS \ 17 - (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \ 18 - VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) 19 - 20 - 21 47 #ifndef __ASSEMBLY__ 22 - 23 - typedef struct { pgdval_t pgd; } pgd_t; 24 - typedef struct { pgprotval_t pgprot; } pgprot_t; 25 - 26 - extern int page_is_ram(unsigned long pagenr); 27 - extern int pagerange_is_ram(unsigned long start, unsigned long end); 28 - extern int devmem_is_allowed(unsigned long pagenr); 29 - extern void map_devmem(unsigned long pfn, unsigned long size, 30 - pgprot_t vma_prot); 31 - extern void unmap_devmem(unsigned long pfn, unsigned long size, 32 - pgprot_t vma_prot); 33 - 34 - extern unsigned long max_low_pfn_mapped; 35 - extern unsigned long max_pfn_mapped; 36 48 37 49 struct page; 38 50 39 51 static inline void clear_user_page(void *page, unsigned long vaddr, 40 - struct page *pg) 52 + struct page *pg) 41 53 { 42 54 clear_page(page); 43 55 } 44 56 45 57 static inline void copy_user_page(void *to, void *from, unsigned long vaddr, 46 - struct page *topage) 58 + struct page *topage) 47 59 { 48 60 copy_page(to, from); 49 61 } ··· 48 102 49 103 #if PAGETABLE_LEVELS >= 3 50 104 #if PAGETABLE_LEVELS == 4 51 - typedef struct { pudval_t pud; } pud_t; 52 - 53 105 static inline pud_t native_make_pud(pmdval_t val) 54 106 { 55 107 return (pud_t) { val }; ··· 70 126 { 71 127 return native_pud_val(pud) & PTE_FLAGS_MASK; 72 128 } 73 - 74 - typedef struct { pmdval_t pmd; } pmd_t; 75 129 76 130 static inline pmd_t native_make_pmd(pmdval_t val) 77 131 {
+2 -87
arch/x86/include/asm/page_32.h
··· 1 1 #ifndef _ASM_X86_PAGE_32_H 2 2 #define _ASM_X86_PAGE_32_H 3 3 4 - /* 5 - * This handles the memory map. 6 - * 7 - * A __PAGE_OFFSET of 0xC0000000 means that the kernel has 8 - * a virtual address space of one gigabyte, which limits the 9 - * amount of physical memory you can use to about 950MB. 10 - * 11 - * If you want more physical memory than this then see the CONFIG_HIGHMEM4G 12 - * and CONFIG_HIGHMEM64G options in the kernel configuration. 13 - */ 14 - #define __PAGE_OFFSET _AC(CONFIG_PAGE_OFFSET, UL) 15 - 16 - #ifdef CONFIG_4KSTACKS 17 - #define THREAD_ORDER 0 18 - #else 19 - #define THREAD_ORDER 1 20 - #endif 21 - #define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER) 22 - 23 - #define STACKFAULT_STACK 0 24 - #define DOUBLEFAULT_STACK 1 25 - #define NMI_STACK 0 26 - #define DEBUG_STACK 0 27 - #define MCE_STACK 0 28 - #define N_EXCEPTION_STACKS 1 29 - 30 - #ifdef CONFIG_X86_PAE 31 - /* 44=32+12, the limit we can fit into an unsigned long pfn */ 32 - #define __PHYSICAL_MASK_SHIFT 44 33 - #define __VIRTUAL_MASK_SHIFT 32 34 - #define PAGETABLE_LEVELS 3 35 - 36 - #ifndef __ASSEMBLY__ 37 - typedef u64 pteval_t; 38 - typedef u64 pmdval_t; 39 - typedef u64 pudval_t; 40 - typedef u64 pgdval_t; 41 - typedef u64 pgprotval_t; 42 - 43 - typedef union { 44 - struct { 45 - unsigned long pte_low, pte_high; 46 - }; 47 - pteval_t pte; 48 - } pte_t; 49 - #endif /* __ASSEMBLY__ 50 - */ 51 - #else /* !CONFIG_X86_PAE */ 52 - #define __PHYSICAL_MASK_SHIFT 32 53 - #define __VIRTUAL_MASK_SHIFT 32 54 - #define PAGETABLE_LEVELS 2 55 - 56 - #ifndef __ASSEMBLY__ 57 - typedef unsigned long pteval_t; 58 - typedef unsigned long pmdval_t; 59 - typedef unsigned long pudval_t; 60 - typedef unsigned long pgdval_t; 61 - typedef unsigned long pgprotval_t; 62 - 63 - typedef union { 64 - pteval_t pte; 65 - pteval_t pte_low; 66 - } pte_t; 67 - 68 - #endif /* __ASSEMBLY__ */ 69 - #endif /* CONFIG_X86_PAE */ 70 - 71 - #ifndef __ASSEMBLY__ 72 - typedef struct page *pgtable_t; 73 - #endif 4 + #include <asm/page_32_types.h> 74 5 75 6 #ifdef CONFIG_HUGETLB_PAGE 76 7 #define HAVE_ARCH_HUGETLB_UNMAPPED_AREA 77 8 #endif 78 9 79 - #ifndef __ASSEMBLY__ 80 10 #define __phys_addr_nodebug(x) ((x) - PAGE_OFFSET) 81 11 #ifdef CONFIG_DEBUG_VIRTUAL 82 12 extern unsigned long __phys_addr(unsigned long); ··· 19 89 #define pfn_valid(pfn) ((pfn) < max_mapnr) 20 90 #endif /* CONFIG_FLATMEM */ 21 91 22 - extern int nx_enabled; 23 - 24 - /* 25 - * This much address space is reserved for vmalloc() and iomap() 26 - * as well as fixmap mappings. 27 - */ 28 - extern unsigned int __VMALLOC_RESERVE; 29 - extern int sysctl_legacy_va_layout; 30 - 31 - extern void find_low_pfn_range(void); 32 - extern unsigned long init_memory_mapping(unsigned long start, 33 - unsigned long end); 34 - extern void initmem_init(unsigned long, unsigned long); 35 - extern void free_initmem(void); 36 - extern void setup_bootmem_allocator(void); 37 - 92 + #ifndef __ASSEMBLY__ 38 93 39 94 #ifdef CONFIG_X86_USE_3DNOW 40 95 #include <asm/mmx.h>
+90
arch/x86/include/asm/page_32_types.h
··· 1 + #ifndef _ASM_X86_PAGE_32_DEFS_H 2 + #define _ASM_X86_PAGE_32_DEFS_H 3 + 4 + #include <linux/const.h> 5 + 6 + /* 7 + * This handles the memory map. 8 + * 9 + * A __PAGE_OFFSET of 0xC0000000 means that the kernel has 10 + * a virtual address space of one gigabyte, which limits the 11 + * amount of physical memory you can use to about 950MB. 12 + * 13 + * If you want more physical memory than this then see the CONFIG_HIGHMEM4G 14 + * and CONFIG_HIGHMEM64G options in the kernel configuration. 15 + */ 16 + #define __PAGE_OFFSET _AC(CONFIG_PAGE_OFFSET, UL) 17 + 18 + #ifdef CONFIG_4KSTACKS 19 + #define THREAD_ORDER 0 20 + #else 21 + #define THREAD_ORDER 1 22 + #endif 23 + #define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER) 24 + 25 + #define STACKFAULT_STACK 0 26 + #define DOUBLEFAULT_STACK 1 27 + #define NMI_STACK 0 28 + #define DEBUG_STACK 0 29 + #define MCE_STACK 0 30 + #define N_EXCEPTION_STACKS 1 31 + 32 + #ifdef CONFIG_X86_PAE 33 + /* 44=32+12, the limit we can fit into an unsigned long pfn */ 34 + #define __PHYSICAL_MASK_SHIFT 44 35 + #define __VIRTUAL_MASK_SHIFT 32 36 + #define PAGETABLE_LEVELS 3 37 + 38 + #else /* !CONFIG_X86_PAE */ 39 + #define __PHYSICAL_MASK_SHIFT 32 40 + #define __VIRTUAL_MASK_SHIFT 32 41 + #define PAGETABLE_LEVELS 2 42 + #endif /* CONFIG_X86_PAE */ 43 + 44 + #ifndef __ASSEMBLY__ 45 + 46 + #ifdef CONFIG_X86_PAE 47 + typedef u64 pteval_t; 48 + typedef u64 pmdval_t; 49 + typedef u64 pudval_t; 50 + typedef u64 pgdval_t; 51 + typedef u64 pgprotval_t; 52 + 53 + typedef union { 54 + struct { 55 + unsigned long pte_low, pte_high; 56 + }; 57 + pteval_t pte; 58 + } pte_t; 59 + #else /* !CONFIG_X86_PAE */ 60 + typedef unsigned long pteval_t; 61 + typedef unsigned long pmdval_t; 62 + typedef unsigned long pudval_t; 63 + typedef unsigned long pgdval_t; 64 + typedef unsigned long pgprotval_t; 65 + 66 + typedef union { 67 + pteval_t pte; 68 + pteval_t pte_low; 69 + } pte_t; 70 + #endif /* CONFIG_X86_PAE */ 71 + 72 + extern int nx_enabled; 73 + 74 + /* 75 + * This much address space is reserved for vmalloc() and iomap() 76 + * as well as fixmap mappings. 77 + */ 78 + extern unsigned int __VMALLOC_RESERVE; 79 + extern int sysctl_legacy_va_layout; 80 + 81 + extern void find_low_pfn_range(void); 82 + extern unsigned long init_memory_mapping(unsigned long start, 83 + unsigned long end); 84 + extern void initmem_init(unsigned long, unsigned long); 85 + extern void free_initmem(void); 86 + extern void setup_bootmem_allocator(void); 87 + 88 + #endif /* !__ASSEMBLY__ */ 89 + 90 + #endif /* _ASM_X86_PAGE_32_DEFS_H */
+1 -100
arch/x86/include/asm/page_64.h
··· 1 1 #ifndef _ASM_X86_PAGE_64_H 2 2 #define _ASM_X86_PAGE_64_H 3 3 4 - #define PAGETABLE_LEVELS 4 5 - 6 - #define THREAD_ORDER 1 7 - #define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER) 8 - #define CURRENT_MASK (~(THREAD_SIZE - 1)) 9 - 10 - #define EXCEPTION_STACK_ORDER 0 11 - #define EXCEPTION_STKSZ (PAGE_SIZE << EXCEPTION_STACK_ORDER) 12 - 13 - #define DEBUG_STACK_ORDER (EXCEPTION_STACK_ORDER + 1) 14 - #define DEBUG_STKSZ (PAGE_SIZE << DEBUG_STACK_ORDER) 15 - 16 - #define IRQ_STACK_ORDER 2 17 - #define IRQ_STACK_SIZE (PAGE_SIZE << IRQ_STACK_ORDER) 18 - 19 - #define STACKFAULT_STACK 1 20 - #define DOUBLEFAULT_STACK 2 21 - #define NMI_STACK 3 22 - #define DEBUG_STACK 4 23 - #define MCE_STACK 5 24 - #define N_EXCEPTION_STACKS 5 /* hw limit: 7 */ 25 - 26 - #define PUD_PAGE_SIZE (_AC(1, UL) << PUD_SHIFT) 27 - #define PUD_PAGE_MASK (~(PUD_PAGE_SIZE-1)) 28 - 29 - /* 30 - * Set __PAGE_OFFSET to the most negative possible address + 31 - * PGDIR_SIZE*16 (pgd slot 272). The gap is to allow a space for a 32 - * hypervisor to fit. Choosing 16 slots here is arbitrary, but it's 33 - * what Xen requires. 34 - */ 35 - #define __PAGE_OFFSET _AC(0xffff880000000000, UL) 36 - 37 - #define __PHYSICAL_START CONFIG_PHYSICAL_START 38 - #define __KERNEL_ALIGN 0x200000 39 - 40 - /* 41 - * Make sure kernel is aligned to 2MB address. Catching it at compile 42 - * time is better. Change your config file and compile the kernel 43 - * for a 2MB aligned address (CONFIG_PHYSICAL_START) 44 - */ 45 - #if (CONFIG_PHYSICAL_START % __KERNEL_ALIGN) != 0 46 - #error "CONFIG_PHYSICAL_START must be a multiple of 2MB" 47 - #endif 48 - 49 - #define __START_KERNEL (__START_KERNEL_map + __PHYSICAL_START) 50 - #define __START_KERNEL_map _AC(0xffffffff80000000, UL) 51 - 52 - /* See Documentation/x86_64/mm.txt for a description of the memory map. */ 53 - #define __PHYSICAL_MASK_SHIFT 46 54 - #define __VIRTUAL_MASK_SHIFT 48 55 - 56 - /* 57 - * Kernel image size is limited to 512 MB (see level2_kernel_pgt in 58 - * arch/x86/kernel/head_64.S), and it is mapped here: 59 - */ 60 - #define KERNEL_IMAGE_SIZE (512 * 1024 * 1024) 61 - #define KERNEL_IMAGE_START _AC(0xffffffff80000000, UL) 62 - 63 - #ifndef __ASSEMBLY__ 64 - void clear_page(void *page); 65 - void copy_page(void *to, void *from); 66 - 67 - /* duplicated to the one in bootmem.h */ 68 - extern unsigned long max_pfn; 69 - extern unsigned long phys_base; 70 - 71 - extern unsigned long __phys_addr(unsigned long); 72 - #define __phys_reloc_hide(x) (x) 73 - 74 - /* 75 - * These are used to make use of C type-checking.. 76 - */ 77 - typedef unsigned long pteval_t; 78 - typedef unsigned long pmdval_t; 79 - typedef unsigned long pudval_t; 80 - typedef unsigned long pgdval_t; 81 - typedef unsigned long pgprotval_t; 82 - 83 - typedef struct page *pgtable_t; 84 - 85 - typedef struct { pteval_t pte; } pte_t; 86 - 87 - #define vmemmap ((struct page *)VMEMMAP_START) 88 - 89 - extern unsigned long init_memory_mapping(unsigned long start, 90 - unsigned long end); 91 - 92 - extern void initmem_init(unsigned long start_pfn, unsigned long end_pfn); 93 - extern void free_initmem(void); 94 - 95 - extern void init_extra_mapping_uc(unsigned long phys, unsigned long size); 96 - extern void init_extra_mapping_wb(unsigned long phys, unsigned long size); 97 - 98 - #endif /* !__ASSEMBLY__ */ 99 - 100 - #ifdef CONFIG_FLATMEM 101 - #define pfn_valid(pfn) ((pfn) < max_pfn) 102 - #endif 103 - 4 + #include <asm/page_64_types.h> 104 5 105 6 #endif /* _ASM_X86_PAGE_64_H */
+114
arch/x86/include/asm/page_64.h.rej
··· 1 + *************** 2 + *** 1,105 **** 3 + #ifndef _ASM_X86_PAGE_64_H 4 + #define _ASM_X86_PAGE_64_H 5 + 6 + - #define PAGETABLE_LEVELS 4 7 + - 8 + - #define THREAD_ORDER 1 9 + - #define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER) 10 + - #define CURRENT_MASK (~(THREAD_SIZE - 1)) 11 + - 12 + - #define EXCEPTION_STACK_ORDER 0 13 + - #define EXCEPTION_STKSZ (PAGE_SIZE << EXCEPTION_STACK_ORDER) 14 + - 15 + - #define DEBUG_STACK_ORDER (EXCEPTION_STACK_ORDER + 1) 16 + - #define DEBUG_STKSZ (PAGE_SIZE << DEBUG_STACK_ORDER) 17 + - 18 + - #define IRQSTACK_ORDER 2 19 + - #define IRQSTACKSIZE (PAGE_SIZE << IRQSTACK_ORDER) 20 + - 21 + - #define STACKFAULT_STACK 1 22 + - #define DOUBLEFAULT_STACK 2 23 + - #define NMI_STACK 3 24 + - #define DEBUG_STACK 4 25 + - #define MCE_STACK 5 26 + - #define N_EXCEPTION_STACKS 5 /* hw limit: 7 */ 27 + - 28 + - #define PUD_PAGE_SIZE (_AC(1, UL) << PUD_SHIFT) 29 + - #define PUD_PAGE_MASK (~(PUD_PAGE_SIZE-1)) 30 + - 31 + - /* 32 + - * Set __PAGE_OFFSET to the most negative possible address + 33 + - * PGDIR_SIZE*16 (pgd slot 272). The gap is to allow a space for a 34 + - * hypervisor to fit. Choosing 16 slots here is arbitrary, but it's 35 + - * what Xen requires. 36 + - */ 37 + - #define __PAGE_OFFSET _AC(0xffff880000000000, UL) 38 + - 39 + - #define __PHYSICAL_START CONFIG_PHYSICAL_START 40 + - #define __KERNEL_ALIGN 0x200000 41 + - 42 + - /* 43 + - * Make sure kernel is aligned to 2MB address. Catching it at compile 44 + - * time is better. Change your config file and compile the kernel 45 + - * for a 2MB aligned address (CONFIG_PHYSICAL_START) 46 + - */ 47 + - #if (CONFIG_PHYSICAL_START % __KERNEL_ALIGN) != 0 48 + - #error "CONFIG_PHYSICAL_START must be a multiple of 2MB" 49 + - #endif 50 + - 51 + - #define __START_KERNEL (__START_KERNEL_map + __PHYSICAL_START) 52 + - #define __START_KERNEL_map _AC(0xffffffff80000000, UL) 53 + - 54 + - /* See Documentation/x86_64/mm.txt for a description of the memory map. */ 55 + - #define __PHYSICAL_MASK_SHIFT 46 56 + - #define __VIRTUAL_MASK_SHIFT 48 57 + - 58 + - /* 59 + - * Kernel image size is limited to 512 MB (see level2_kernel_pgt in 60 + - * arch/x86/kernel/head_64.S), and it is mapped here: 61 + - */ 62 + - #define KERNEL_IMAGE_SIZE (512 * 1024 * 1024) 63 + - #define KERNEL_IMAGE_START _AC(0xffffffff80000000, UL) 64 + - 65 + - #ifndef __ASSEMBLY__ 66 + - void clear_page(void *page); 67 + - void copy_page(void *to, void *from); 68 + - 69 + - /* duplicated to the one in bootmem.h */ 70 + - extern unsigned long max_pfn; 71 + - extern unsigned long phys_base; 72 + - 73 + - extern unsigned long __phys_addr(unsigned long); 74 + - #define __phys_reloc_hide(x) (x) 75 + - 76 + - /* 77 + - * These are used to make use of C type-checking.. 78 + - */ 79 + - typedef unsigned long pteval_t; 80 + - typedef unsigned long pmdval_t; 81 + - typedef unsigned long pudval_t; 82 + - typedef unsigned long pgdval_t; 83 + - typedef unsigned long pgprotval_t; 84 + - 85 + - typedef struct page *pgtable_t; 86 + - 87 + - typedef struct { pteval_t pte; } pte_t; 88 + - 89 + - #define vmemmap ((struct page *)VMEMMAP_START) 90 + - 91 + - extern unsigned long init_memory_mapping(unsigned long start, 92 + - unsigned long end); 93 + - 94 + - extern void initmem_init(unsigned long start_pfn, unsigned long end_pfn); 95 + - extern void free_initmem(void); 96 + - 97 + - extern void init_extra_mapping_uc(unsigned long phys, unsigned long size); 98 + - extern void init_extra_mapping_wb(unsigned long phys, unsigned long size); 99 + - 100 + - #endif /* !__ASSEMBLY__ */ 101 + - 102 + - #ifdef CONFIG_FLATMEM 103 + - #define pfn_valid(pfn) ((pfn) < max_pfn) 104 + - #endif 105 + - 106 + 107 + #endif /* _ASM_X86_PAGE_64_H */ 108 + --- 1,6 ---- 109 + #ifndef _ASM_X86_PAGE_64_H 110 + #define _ASM_X86_PAGE_64_H 111 + 112 + + #include <asm/page_64_types.h> 113 + 114 + #endif /* _ASM_X86_PAGE_64_H */
+102
arch/x86/include/asm/page_64_types.h
··· 1 + #ifndef _ASM_X86_PAGE_64_DEFS_H 2 + #define _ASM_X86_PAGE_64_DEFS_H 3 + 4 + #define PAGETABLE_LEVELS 4 5 + 6 + #define THREAD_ORDER 1 7 + #define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER) 8 + #define CURRENT_MASK (~(THREAD_SIZE - 1)) 9 + 10 + #define EXCEPTION_STACK_ORDER 0 11 + #define EXCEPTION_STKSZ (PAGE_SIZE << EXCEPTION_STACK_ORDER) 12 + 13 + #define DEBUG_STACK_ORDER (EXCEPTION_STACK_ORDER + 1) 14 + #define DEBUG_STKSZ (PAGE_SIZE << DEBUG_STACK_ORDER) 15 + 16 + #define IRQ_STACK_ORDER 2 17 + #define IRQ_STACK_SIZE (PAGE_SIZE << IRQ_STACK_ORDER) 18 + 19 + #define STACKFAULT_STACK 1 20 + #define DOUBLEFAULT_STACK 2 21 + #define NMI_STACK 3 22 + #define DEBUG_STACK 4 23 + #define MCE_STACK 5 24 + #define N_EXCEPTION_STACKS 5 /* hw limit: 7 */ 25 + 26 + #define PUD_PAGE_SIZE (_AC(1, UL) << PUD_SHIFT) 27 + #define PUD_PAGE_MASK (~(PUD_PAGE_SIZE-1)) 28 + 29 + /* 30 + * Set __PAGE_OFFSET to the most negative possible address + 31 + * PGDIR_SIZE*16 (pgd slot 272). The gap is to allow a space for a 32 + * hypervisor to fit. Choosing 16 slots here is arbitrary, but it's 33 + * what Xen requires. 34 + */ 35 + #define __PAGE_OFFSET _AC(0xffff880000000000, UL) 36 + 37 + #define __PHYSICAL_START CONFIG_PHYSICAL_START 38 + #define __KERNEL_ALIGN 0x200000 39 + 40 + /* 41 + * Make sure kernel is aligned to 2MB address. Catching it at compile 42 + * time is better. Change your config file and compile the kernel 43 + * for a 2MB aligned address (CONFIG_PHYSICAL_START) 44 + */ 45 + #if (CONFIG_PHYSICAL_START % __KERNEL_ALIGN) != 0 46 + #error "CONFIG_PHYSICAL_START must be a multiple of 2MB" 47 + #endif 48 + 49 + #define __START_KERNEL (__START_KERNEL_map + __PHYSICAL_START) 50 + #define __START_KERNEL_map _AC(0xffffffff80000000, UL) 51 + 52 + /* See Documentation/x86_64/mm.txt for a description of the memory map. */ 53 + #define __PHYSICAL_MASK_SHIFT 46 54 + #define __VIRTUAL_MASK_SHIFT 48 55 + 56 + /* 57 + * Kernel image size is limited to 512 MB (see level2_kernel_pgt in 58 + * arch/x86/kernel/head_64.S), and it is mapped here: 59 + */ 60 + #define KERNEL_IMAGE_SIZE (512 * 1024 * 1024) 61 + #define KERNEL_IMAGE_START _AC(0xffffffff80000000, UL) 62 + 63 + #ifndef __ASSEMBLY__ 64 + void clear_page(void *page); 65 + void copy_page(void *to, void *from); 66 + 67 + /* duplicated to the one in bootmem.h */ 68 + extern unsigned long max_pfn; 69 + extern unsigned long phys_base; 70 + 71 + extern unsigned long __phys_addr(unsigned long); 72 + #define __phys_reloc_hide(x) (x) 73 + 74 + /* 75 + * These are used to make use of C type-checking.. 76 + */ 77 + typedef unsigned long pteval_t; 78 + typedef unsigned long pmdval_t; 79 + typedef unsigned long pudval_t; 80 + typedef unsigned long pgdval_t; 81 + typedef unsigned long pgprotval_t; 82 + 83 + typedef struct { pteval_t pte; } pte_t; 84 + 85 + #define vmemmap ((struct page *)VMEMMAP_START) 86 + 87 + extern unsigned long init_memory_mapping(unsigned long start, 88 + unsigned long end); 89 + 90 + extern void initmem_init(unsigned long start_pfn, unsigned long end_pfn); 91 + extern void free_initmem(void); 92 + 93 + extern void init_extra_mapping_uc(unsigned long phys, unsigned long size); 94 + extern void init_extra_mapping_wb(unsigned long phys, unsigned long size); 95 + 96 + #endif /* !__ASSEMBLY__ */ 97 + 98 + #ifdef CONFIG_FLATMEM 99 + #define pfn_valid(pfn) ((pfn) < max_pfn) 100 + #endif 101 + 102 + #endif /* _ASM_X86_PAGE_64_DEFS_H */
+77
arch/x86/include/asm/page_types.h
··· 1 + #ifndef _ASM_X86_PAGE_DEFS_H 2 + #define _ASM_X86_PAGE_DEFS_H 3 + 4 + #include <linux/const.h> 5 + 6 + /* PAGE_SHIFT determines the page size */ 7 + #define PAGE_SHIFT 12 8 + #define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT) 9 + #define PAGE_MASK (~(PAGE_SIZE-1)) 10 + 11 + #define __PHYSICAL_MASK ((phys_addr_t)(1ULL << __PHYSICAL_MASK_SHIFT) - 1) 12 + #define __VIRTUAL_MASK ((1UL << __VIRTUAL_MASK_SHIFT) - 1) 13 + 14 + /* Cast PAGE_MASK to a signed type so that it is sign-extended if 15 + virtual addresses are 32-bits but physical addresses are larger 16 + (ie, 32-bit PAE). */ 17 + #define PHYSICAL_PAGE_MASK (((signed long)PAGE_MASK) & __PHYSICAL_MASK) 18 + 19 + /* PTE_PFN_MASK extracts the PFN from a (pte|pmd|pud|pgd)val_t */ 20 + #define PTE_PFN_MASK ((pteval_t)PHYSICAL_PAGE_MASK) 21 + 22 + /* PTE_FLAGS_MASK extracts the flags from a (pte|pmd|pud|pgd)val_t */ 23 + #define PTE_FLAGS_MASK (~PTE_PFN_MASK) 24 + 25 + #define PMD_PAGE_SIZE (_AC(1, UL) << PMD_SHIFT) 26 + #define PMD_PAGE_MASK (~(PMD_PAGE_SIZE-1)) 27 + 28 + #define HPAGE_SHIFT PMD_SHIFT 29 + #define HPAGE_SIZE (_AC(1,UL) << HPAGE_SHIFT) 30 + #define HPAGE_MASK (~(HPAGE_SIZE - 1)) 31 + #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) 32 + 33 + #define HUGE_MAX_HSTATE 2 34 + 35 + #define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET) 36 + 37 + #define VM_DATA_DEFAULT_FLAGS \ 38 + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \ 39 + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) 40 + 41 + #ifdef CONFIG_X86_64 42 + #include <asm/page_64_types.h> 43 + #else 44 + #include <asm/page_32_types.h> 45 + #endif /* CONFIG_X86_64 */ 46 + 47 + #ifndef __ASSEMBLY__ 48 + 49 + #include <linux/types.h> 50 + 51 + typedef struct { pgdval_t pgd; } pgd_t; 52 + typedef struct { pgprotval_t pgprot; } pgprot_t; 53 + 54 + #if PAGETABLE_LEVELS > 3 55 + typedef struct { pudval_t pud; } pud_t; 56 + #endif 57 + 58 + #if PAGETABLE_LEVELS > 2 59 + typedef struct { pmdval_t pmd; } pmd_t; 60 + #endif 61 + 62 + typedef struct page *pgtable_t; 63 + 64 + extern int page_is_ram(unsigned long pagenr); 65 + extern int pagerange_is_ram(unsigned long start, unsigned long end); 66 + extern int devmem_is_allowed(unsigned long pagenr); 67 + extern void map_devmem(unsigned long pfn, unsigned long size, 68 + pgprot_t vma_prot); 69 + extern void unmap_devmem(unsigned long pfn, unsigned long size, 70 + pgprot_t vma_prot); 71 + 72 + extern unsigned long max_low_pfn_mapped; 73 + extern unsigned long max_pfn_mapped; 74 + 75 + #endif /* !__ASSEMBLY__ */ 76 + 77 + #endif /* _ASM_X86_PAGE_DEFS_H */