Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[PATCH] powerpc: Merge page.h

Merge asm-ppc/page.h and asm-ppc64/page.h into asm-powerpc/page.h,
asm-powerpc/page_32.h and asm-powerpc/page_64.h

Built for PPC (common_defconfig), with ARCH=powerpc, mostly built with
ARCH=ppc (other things break the build). Built and booted on P5 LPAR
for PPC64 with ARCH=ppc/powerpc (pseries_defconfig). Mostly built for
iSeries powerpc.

Signed-off-by: Michael Ellerman <michael@ellerman.id.au>
Signed-off-by: Paul Mackerras <paulus@samba.org>

authored by

Michael Ellerman and committed by
Paul Mackerras
5cd16ee9 ee90f62b

+391
+179
include/asm-powerpc/page.h
··· 1 + #ifndef _ASM_POWERPC_PAGE_H 2 + #define _ASM_POWERPC_PAGE_H 3 + 4 + /* 5 + * Copyright (C) 2001,2005 IBM Corporation. 6 + * 7 + * This program is free software; you can redistribute it and/or 8 + * modify it under the terms of the GNU General Public License 9 + * as published by the Free Software Foundation; either version 10 + * 2 of the License, or (at your option) any later version. 11 + */ 12 + 13 + #ifdef __KERNEL__ 14 + #include <linux/config.h> 15 + #include <asm/asm-compat.h> 16 + 17 + /* 18 + * On PPC32 page size is 4K. For PPC64 we support either 4K or 64K software 19 + * page size. When using 64K pages however, whether we are really supporting 20 + * 64K pages in HW or not is irrelevant to those definitions. 21 + */ 22 + #ifdef CONFIG_PPC_64K_PAGES 23 + #define PAGE_SHIFT 16 24 + #else 25 + #define PAGE_SHIFT 12 26 + #endif 27 + 28 + #define PAGE_SIZE (ASM_CONST(1) << PAGE_SHIFT) 29 + 30 + /* We do define AT_SYSINFO_EHDR but don't use the gate mechanism */ 31 + #define __HAVE_ARCH_GATE_AREA 1 32 + 33 + /* 34 + * Subtle: (1 << PAGE_SHIFT) is an int, not an unsigned long. So if we 35 + * assign PAGE_MASK to a larger type it gets extended the way we want 36 + * (i.e. with 1s in the high bits) 37 + */ 38 + #define PAGE_MASK (~((1 << PAGE_SHIFT) - 1)) 39 + 40 + #define PAGE_OFFSET ASM_CONST(CONFIG_KERNEL_START) 41 + #define KERNELBASE PAGE_OFFSET 42 + 43 + #ifdef CONFIG_DISCONTIGMEM 44 + #define page_to_pfn(page) discontigmem_page_to_pfn(page) 45 + #define pfn_to_page(pfn) discontigmem_pfn_to_page(pfn) 46 + #define pfn_valid(pfn) discontigmem_pfn_valid(pfn) 47 + #endif 48 + 49 + #ifdef CONFIG_FLATMEM 50 + #define pfn_to_page(pfn) (mem_map + (pfn)) 51 + #define page_to_pfn(page) ((unsigned long)((page) - mem_map)) 52 + #define pfn_valid(pfn) ((pfn) < max_mapnr) 53 + #endif 54 + 55 + #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) 56 + #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) 57 + #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) 58 + 59 + #define __va(x) ((void *)((unsigned long)(x) + KERNELBASE)) 60 + #define __pa(x) ((unsigned long)(x) - PAGE_OFFSET) 61 + 62 + /* 63 + * Unfortunately the PLT is in the BSS in the PPC32 ELF ABI, 64 + * and needs to be executable. This means the whole heap ends 65 + * up being executable. 66 + */ 67 + #define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \ 68 + VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) 69 + 70 + #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \ 71 + VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) 72 + 73 + #ifdef __powerpc64__ 74 + #include <asm/page_64.h> 75 + #else 76 + #include <asm/page_32.h> 77 + #endif 78 + 79 + /* align addr on a size boundary - adjust address up/down if needed */ 80 + #define _ALIGN_UP(addr,size) (((addr)+((size)-1))&(~((size)-1))) 81 + #define _ALIGN_DOWN(addr,size) ((addr)&(~((size)-1))) 82 + 83 + /* align addr on a size boundary - adjust address up if needed */ 84 + #define _ALIGN(addr,size) _ALIGN_UP(addr,size) 85 + 86 + /* to align the pointer to the (next) page boundary */ 87 + #define PAGE_ALIGN(addr) _ALIGN(addr, PAGE_SIZE) 88 + 89 + #ifndef __ASSEMBLY__ 90 + 91 + #undef STRICT_MM_TYPECHECKS 92 + 93 + #ifdef STRICT_MM_TYPECHECKS 94 + /* These are used to make use of C type-checking. */ 95 + 96 + /* PTE level */ 97 + typedef struct { pte_basic_t pte; } pte_t; 98 + #define pte_val(x) ((x).pte) 99 + #define __pte(x) ((pte_t) { (x) }) 100 + 101 + /* 64k pages additionally define a bigger "real PTE" type that gathers 102 + * the "second half" part of the PTE for pseudo 64k pages 103 + */ 104 + #ifdef CONFIG_PPC_64K_PAGES 105 + typedef struct { pte_t pte; unsigned long hidx; } real_pte_t; 106 + #else 107 + typedef struct { pte_t pte; } real_pte_t; 108 + #endif 109 + 110 + /* PMD level */ 111 + typedef struct { unsigned long pmd; } pmd_t; 112 + #define pmd_val(x) ((x).pmd) 113 + #define __pmd(x) ((pmd_t) { (x) }) 114 + 115 + /* PUD level exusts only on 4k pages */ 116 + #ifndef CONFIG_PPC_64K_PAGES 117 + typedef struct { unsigned long pud; } pud_t; 118 + #define pud_val(x) ((x).pud) 119 + #define __pud(x) ((pud_t) { (x) }) 120 + #endif 121 + 122 + /* PGD level */ 123 + typedef struct { unsigned long pgd; } pgd_t; 124 + #define pgd_val(x) ((x).pgd) 125 + #define __pgd(x) ((pgd_t) { (x) }) 126 + 127 + /* Page protection bits */ 128 + typedef struct { unsigned long pgprot; } pgprot_t; 129 + #define pgprot_val(x) ((x).pgprot) 130 + #define __pgprot(x) ((pgprot_t) { (x) }) 131 + 132 + #else 133 + 134 + /* 135 + * .. while these make it easier on the compiler 136 + */ 137 + 138 + typedef pte_basic_t pte_t; 139 + #define pte_val(x) (x) 140 + #define __pte(x) (x) 141 + 142 + #ifdef CONFIG_PPC_64K_PAGES 143 + typedef struct { pte_t pte; unsigned long hidx; } real_pte_t; 144 + #else 145 + typedef unsigned long real_pte_t; 146 + #endif 147 + 148 + 149 + typedef unsigned long pmd_t; 150 + #define pmd_val(x) (x) 151 + #define __pmd(x) (x) 152 + 153 + #ifndef CONFIG_PPC_64K_PAGES 154 + typedef unsigned long pud_t; 155 + #define pud_val(x) (x) 156 + #define __pud(x) (x) 157 + #endif 158 + 159 + typedef unsigned long pgd_t; 160 + #define pgd_val(x) (x) 161 + #define pgprot_val(x) (x) 162 + 163 + typedef unsigned long pgprot_t; 164 + #define __pgd(x) (x) 165 + #define __pgprot(x) (x) 166 + 167 + #endif 168 + 169 + struct page; 170 + extern void clear_user_page(void *page, unsigned long vaddr, struct page *pg); 171 + extern void copy_user_page(void *to, void *from, unsigned long vaddr, 172 + struct page *p); 173 + extern int page_is_ram(unsigned long pfn); 174 + 175 + #endif /* __ASSEMBLY__ */ 176 + 177 + #endif /* __KERNEL__ */ 178 + 179 + #endif /* _ASM_POWERPC_PAGE_H */
+38
include/asm-powerpc/page_32.h
··· 1 + #ifndef _ASM_POWERPC_PAGE_32_H 2 + #define _ASM_POWERPC_PAGE_32_H 3 + 4 + #define VM_DATA_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS32 5 + 6 + #ifndef __ASSEMBLY__ 7 + /* 8 + * The basic type of a PTE - 64 bits for those CPUs with > 32 bit 9 + * physical addressing. For now this just the IBM PPC440. 10 + */ 11 + #ifdef CONFIG_PTE_64BIT 12 + typedef unsigned long long pte_basic_t; 13 + #define PTE_SHIFT (PAGE_SHIFT - 3) /* 512 ptes per page */ 14 + #define PTE_FMT "%16Lx" 15 + #else 16 + typedef unsigned long pte_basic_t; 17 + #define PTE_SHIFT (PAGE_SHIFT - 2) /* 1024 ptes per page */ 18 + #define PTE_FMT "%.8lx" 19 + #endif 20 + 21 + struct page; 22 + extern void clear_pages(void *page, int order); 23 + static inline void clear_page(void *page) { clear_pages(page, 0); } 24 + extern void copy_page(void *to, void *from); 25 + 26 + /* Pure 2^n version of get_order */ 27 + extern __inline__ int get_order(unsigned long size) 28 + { 29 + int lz; 30 + 31 + size = (size-1) >> PAGE_SHIFT; 32 + asm ("cntlzw %0,%1" : "=r" (lz) : "r" (size)); 33 + return 32 - lz; 34 + } 35 + 36 + #endif /* __ASSEMBLY__ */ 37 + 38 + #endif /* _ASM_POWERPC_PAGE_32_H */
+174
include/asm-powerpc/page_64.h
··· 1 + #ifndef _ASM_POWERPC_PAGE_64_H 2 + #define _ASM_POWERPC_PAGE_64_H 3 + 4 + /* 5 + * Copyright (C) 2001 PPC64 Team, IBM Corp 6 + * 7 + * This program is free software; you can redistribute it and/or 8 + * modify it under the terms of the GNU General Public License 9 + * as published by the Free Software Foundation; either version 10 + * 2 of the License, or (at your option) any later version. 11 + */ 12 + 13 + /* 14 + * We always define HW_PAGE_SHIFT to 12 as use of 64K pages remains Linux 15 + * specific, every notion of page number shared with the firmware, TCEs, 16 + * iommu, etc... still uses a page size of 4K. 17 + */ 18 + #define HW_PAGE_SHIFT 12 19 + #define HW_PAGE_SIZE (ASM_CONST(1) << HW_PAGE_SHIFT) 20 + #define HW_PAGE_MASK (~(HW_PAGE_SIZE-1)) 21 + 22 + /* 23 + * PAGE_FACTOR is the number of bits factor between PAGE_SHIFT and 24 + * HW_PAGE_SHIFT, that is 4K pages. 25 + */ 26 + #define PAGE_FACTOR (PAGE_SHIFT - HW_PAGE_SHIFT) 27 + 28 + #define REGION_SIZE 4UL 29 + #define REGION_SHIFT 60UL 30 + #define REGION_MASK (((1UL<<REGION_SIZE)-1UL)<<REGION_SHIFT) 31 + 32 + #define VMALLOCBASE ASM_CONST(0xD000000000000000) 33 + #define VMALLOC_REGION_ID (VMALLOCBASE >> REGION_SHIFT) 34 + #define KERNEL_REGION_ID (KERNELBASE >> REGION_SHIFT) 35 + #define USER_REGION_ID (0UL) 36 + #define REGION_ID(ea) (((unsigned long)(ea)) >> REGION_SHIFT) 37 + 38 + /* Segment size */ 39 + #define SID_SHIFT 28 40 + #define SID_MASK 0xfffffffffUL 41 + #define ESID_MASK 0xfffffffff0000000UL 42 + #define GET_ESID(x) (((x) >> SID_SHIFT) & SID_MASK) 43 + 44 + #ifndef __ASSEMBLY__ 45 + #include <asm/cache.h> 46 + 47 + typedef unsigned long pte_basic_t; 48 + 49 + static __inline__ void clear_page(void *addr) 50 + { 51 + unsigned long lines, line_size; 52 + 53 + line_size = ppc64_caches.dline_size; 54 + lines = ppc64_caches.dlines_per_page; 55 + 56 + __asm__ __volatile__( 57 + "mtctr %1 # clear_page\n\ 58 + 1: dcbz 0,%0\n\ 59 + add %0,%0,%3\n\ 60 + bdnz+ 1b" 61 + : "=r" (addr) 62 + : "r" (lines), "0" (addr), "r" (line_size) 63 + : "ctr", "memory"); 64 + } 65 + 66 + extern void copy_4K_page(void *to, void *from); 67 + 68 + #ifdef CONFIG_PPC_64K_PAGES 69 + static inline void copy_page(void *to, void *from) 70 + { 71 + unsigned int i; 72 + for (i=0; i < (1 << (PAGE_SHIFT - 12)); i++) { 73 + copy_4K_page(to, from); 74 + to += 4096; 75 + from += 4096; 76 + } 77 + } 78 + #else /* CONFIG_PPC_64K_PAGES */ 79 + static inline void copy_page(void *to, void *from) 80 + { 81 + copy_4K_page(to, from); 82 + } 83 + #endif /* CONFIG_PPC_64K_PAGES */ 84 + 85 + /* Log 2 of page table size */ 86 + extern u64 ppc64_pft_size; 87 + 88 + /* Large pages size */ 89 + extern unsigned int HPAGE_SHIFT; 90 + #define HPAGE_SIZE ((1UL) << HPAGE_SHIFT) 91 + #define HPAGE_MASK (~(HPAGE_SIZE - 1)) 92 + #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) 93 + 94 + #endif /* __ASSEMBLY__ */ 95 + 96 + #ifdef CONFIG_HUGETLB_PAGE 97 + 98 + #define HTLB_AREA_SHIFT 40 99 + #define HTLB_AREA_SIZE (1UL << HTLB_AREA_SHIFT) 100 + #define GET_HTLB_AREA(x) ((x) >> HTLB_AREA_SHIFT) 101 + 102 + #define LOW_ESID_MASK(addr, len) (((1U << (GET_ESID(addr+len-1)+1)) \ 103 + - (1U << GET_ESID(addr))) & 0xffff) 104 + #define HTLB_AREA_MASK(addr, len) (((1U << (GET_HTLB_AREA(addr+len-1)+1)) \ 105 + - (1U << GET_HTLB_AREA(addr))) & 0xffff) 106 + 107 + #define ARCH_HAS_HUGEPAGE_ONLY_RANGE 108 + #define ARCH_HAS_PREPARE_HUGEPAGE_RANGE 109 + #define ARCH_HAS_SETCLEAR_HUGE_PTE 110 + 111 + #define touches_hugepage_low_range(mm, addr, len) \ 112 + (LOW_ESID_MASK((addr), (len)) & (mm)->context.low_htlb_areas) 113 + #define touches_hugepage_high_range(mm, addr, len) \ 114 + (HTLB_AREA_MASK((addr), (len)) & (mm)->context.high_htlb_areas) 115 + 116 + #define __within_hugepage_low_range(addr, len, segmask) \ 117 + ((LOW_ESID_MASK((addr), (len)) | (segmask)) == (segmask)) 118 + #define within_hugepage_low_range(addr, len) \ 119 + __within_hugepage_low_range((addr), (len), \ 120 + current->mm->context.low_htlb_areas) 121 + #define __within_hugepage_high_range(addr, len, zonemask) \ 122 + ((HTLB_AREA_MASK((addr), (len)) | (zonemask)) == (zonemask)) 123 + #define within_hugepage_high_range(addr, len) \ 124 + __within_hugepage_high_range((addr), (len), \ 125 + current->mm->context.high_htlb_areas) 126 + 127 + #define is_hugepage_only_range(mm, addr, len) \ 128 + (touches_hugepage_high_range((mm), (addr), (len)) || \ 129 + touches_hugepage_low_range((mm), (addr), (len))) 130 + #define HAVE_ARCH_HUGETLB_UNMAPPED_AREA 131 + 132 + #define in_hugepage_area(context, addr) \ 133 + (cpu_has_feature(CPU_FTR_16M_PAGE) && \ 134 + ( ((1 << GET_HTLB_AREA(addr)) & (context).high_htlb_areas) || \ 135 + ( ((addr) < 0x100000000L) && \ 136 + ((1 << GET_ESID(addr)) & (context).low_htlb_areas) ) ) ) 137 + 138 + #else /* !CONFIG_HUGETLB_PAGE */ 139 + 140 + #define in_hugepage_area(mm, addr) 0 141 + 142 + #endif /* !CONFIG_HUGETLB_PAGE */ 143 + 144 + #ifdef MODULE 145 + #define __page_aligned __attribute__((__aligned__(PAGE_SIZE))) 146 + #else 147 + #define __page_aligned \ 148 + __attribute__((__aligned__(PAGE_SIZE), \ 149 + __section__(".data.page_aligned"))) 150 + #endif 151 + 152 + #define VM_DATA_DEFAULT_FLAGS \ 153 + (test_thread_flag(TIF_32BIT) ? \ 154 + VM_DATA_DEFAULT_FLAGS32 : VM_DATA_DEFAULT_FLAGS64) 155 + 156 + /* 157 + * This is the default if a program doesn't have a PT_GNU_STACK 158 + * program header entry. The PPC64 ELF ABI has a non executable stack 159 + * stack by default, so in the absense of a PT_GNU_STACK program header 160 + * we turn execute permission off. 161 + */ 162 + #define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \ 163 + VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) 164 + 165 + #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \ 166 + VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) 167 + 168 + #define VM_STACK_DEFAULT_FLAGS \ 169 + (test_thread_flag(TIF_32BIT) ? \ 170 + VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64) 171 + 172 + #include <asm-generic/page.h> 173 + 174 + #endif /* _ASM_POWERPC_PAGE_64_H */