Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[POWERPC] Remove arch/powerpc's dependence on asm-ppc/pg{alloc,table}.h

Currently, all 32-bit powerpc platforms use asm-ppc/pgtable.h and
asm-ppc/pgalloc.h, even when otherwise compiled with ARCH=powerpc.
Those asm-ppc files are a fairly nasty tangle of #ifdefs including a
bunch of things which shouldn't be necessary any more in arch/powerpc.

Cleaning up that mess is going to take a while, but this patch is a
first step. It separates the asm-powerpc/pg{alloc,table}.h into 64
bit and 32 bit versions in asm-powerpc, which the basic .h files in
asm-powerpc select based on config. We make a few tiny tweaks to the
innards of the files along the way, making the outermost ifdefs
(double-inclusion protection and __KERNEL__) a little cleaner, and
#including asm-generic/pgtable.h from the top-level
asm-powerpc/pgtable.h (since both the old 32-bit and 64-bit versions
ended with such an #include).

Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Signed-off-by: Paul Mackerras <paulus@samba.org>

authored by

David Gibson and committed by
Paul Mackerras
f88df14b 69d48b40

+1532 -646
+41
include/asm-powerpc/pgalloc-32.h
··· 1 + #ifndef _ASM_POWERPC_PGALLOC_32_H 2 + #define _ASM_POWERPC_PGALLOC_32_H 3 + 4 + #include <linux/threads.h> 5 + 6 + extern void __bad_pte(pmd_t *pmd); 7 + 8 + extern pgd_t *pgd_alloc(struct mm_struct *mm); 9 + extern void pgd_free(pgd_t *pgd); 10 + 11 + /* 12 + * We don't have any real pmd's, and this code never triggers because 13 + * the pgd will always be present.. 14 + */ 15 + #define pmd_alloc_one(mm,address) ({ BUG(); ((pmd_t *)2); }) 16 + #define pmd_free(x) do { } while (0) 17 + #define __pmd_free_tlb(tlb,x) do { } while (0) 18 + #define pgd_populate(mm, pmd, pte) BUG() 19 + 20 + #ifndef CONFIG_BOOKE 21 + #define pmd_populate_kernel(mm, pmd, pte) \ 22 + (pmd_val(*(pmd)) = __pa(pte) | _PMD_PRESENT) 23 + #define pmd_populate(mm, pmd, pte) \ 24 + (pmd_val(*(pmd)) = (page_to_pfn(pte) << PAGE_SHIFT) | _PMD_PRESENT) 25 + #else 26 + #define pmd_populate_kernel(mm, pmd, pte) \ 27 + (pmd_val(*(pmd)) = (unsigned long)pte | _PMD_PRESENT) 28 + #define pmd_populate(mm, pmd, pte) \ 29 + (pmd_val(*(pmd)) = (unsigned long)lowmem_page_address(pte) | _PMD_PRESENT) 30 + #endif 31 + 32 + extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr); 33 + extern struct page *pte_alloc_one(struct mm_struct *mm, unsigned long addr); 34 + extern void pte_free_kernel(pte_t *pte); 35 + extern void pte_free(struct page *pte); 36 + 37 + #define __pte_free_tlb(tlb, pte) pte_free((pte)) 38 + 39 + #define check_pgt_cache() do { } while (0) 40 + 41 + #endif /* _ASM_POWERPC_PGALLOC_32_H */
+152
include/asm-powerpc/pgalloc-64.h
··· 1 + #ifndef _ASM_POWERPC_PGALLOC_64_H 2 + #define _ASM_POWERPC_PGALLOC_64_H 3 + /* 4 + * This program is free software; you can redistribute it and/or 5 + * modify it under the terms of the GNU General Public License 6 + * as published by the Free Software Foundation; either version 7 + * 2 of the License, or (at your option) any later version. 8 + */ 9 + 10 + #include <linux/mm.h> 11 + #include <linux/slab.h> 12 + #include <linux/cpumask.h> 13 + #include <linux/percpu.h> 14 + 15 + extern struct kmem_cache *pgtable_cache[]; 16 + 17 + #ifdef CONFIG_PPC_64K_PAGES 18 + #define PTE_CACHE_NUM 0 19 + #define PMD_CACHE_NUM 1 20 + #define PGD_CACHE_NUM 2 21 + #define HUGEPTE_CACHE_NUM 3 22 + #else 23 + #define PTE_CACHE_NUM 0 24 + #define PMD_CACHE_NUM 1 25 + #define PUD_CACHE_NUM 1 26 + #define PGD_CACHE_NUM 0 27 + #define HUGEPTE_CACHE_NUM 2 28 + #endif 29 + 30 + static inline pgd_t *pgd_alloc(struct mm_struct *mm) 31 + { 32 + return kmem_cache_alloc(pgtable_cache[PGD_CACHE_NUM], GFP_KERNEL); 33 + } 34 + 35 + static inline void pgd_free(pgd_t *pgd) 36 + { 37 + kmem_cache_free(pgtable_cache[PGD_CACHE_NUM], pgd); 38 + } 39 + 40 + #ifndef CONFIG_PPC_64K_PAGES 41 + 42 + #define pgd_populate(MM, PGD, PUD) pgd_set(PGD, PUD) 43 + 44 + static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) 45 + { 46 + return kmem_cache_alloc(pgtable_cache[PUD_CACHE_NUM], 47 + GFP_KERNEL|__GFP_REPEAT); 48 + } 49 + 50 + static inline void pud_free(pud_t *pud) 51 + { 52 + kmem_cache_free(pgtable_cache[PUD_CACHE_NUM], pud); 53 + } 54 + 55 + static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) 56 + { 57 + pud_set(pud, (unsigned long)pmd); 58 + } 59 + 60 + #define pmd_populate(mm, pmd, pte_page) \ 61 + pmd_populate_kernel(mm, pmd, page_address(pte_page)) 62 + #define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, (unsigned long)(pte)) 63 + 64 + 65 + #else /* CONFIG_PPC_64K_PAGES */ 66 + 67 + #define pud_populate(mm, pud, pmd) pud_set(pud, (unsigned long)pmd) 68 + 69 + static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, 70 + pte_t *pte) 71 + { 72 + pmd_set(pmd, (unsigned long)pte); 73 + } 74 + 75 + #define pmd_populate(mm, pmd, pte_page) \ 76 + pmd_populate_kernel(mm, pmd, page_address(pte_page)) 77 + 78 + #endif /* CONFIG_PPC_64K_PAGES */ 79 + 80 + static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) 81 + { 82 + return kmem_cache_alloc(pgtable_cache[PMD_CACHE_NUM], 83 + GFP_KERNEL|__GFP_REPEAT); 84 + } 85 + 86 + static inline void pmd_free(pmd_t *pmd) 87 + { 88 + kmem_cache_free(pgtable_cache[PMD_CACHE_NUM], pmd); 89 + } 90 + 91 + static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, 92 + unsigned long address) 93 + { 94 + return kmem_cache_alloc(pgtable_cache[PTE_CACHE_NUM], 95 + GFP_KERNEL|__GFP_REPEAT); 96 + } 97 + 98 + static inline struct page *pte_alloc_one(struct mm_struct *mm, 99 + unsigned long address) 100 + { 101 + return virt_to_page(pte_alloc_one_kernel(mm, address)); 102 + } 103 + 104 + static inline void pte_free_kernel(pte_t *pte) 105 + { 106 + kmem_cache_free(pgtable_cache[PTE_CACHE_NUM], pte); 107 + } 108 + 109 + static inline void pte_free(struct page *ptepage) 110 + { 111 + pte_free_kernel(page_address(ptepage)); 112 + } 113 + 114 + #define PGF_CACHENUM_MASK 0x3 115 + 116 + typedef struct pgtable_free { 117 + unsigned long val; 118 + } pgtable_free_t; 119 + 120 + static inline pgtable_free_t pgtable_free_cache(void *p, int cachenum, 121 + unsigned long mask) 122 + { 123 + BUG_ON(cachenum > PGF_CACHENUM_MASK); 124 + 125 + return (pgtable_free_t){.val = ((unsigned long) p & ~mask) | cachenum}; 126 + } 127 + 128 + static inline void pgtable_free(pgtable_free_t pgf) 129 + { 130 + void *p = (void *)(pgf.val & ~PGF_CACHENUM_MASK); 131 + int cachenum = pgf.val & PGF_CACHENUM_MASK; 132 + 133 + kmem_cache_free(pgtable_cache[cachenum], p); 134 + } 135 + 136 + extern void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf); 137 + 138 + #define __pte_free_tlb(tlb, ptepage) \ 139 + pgtable_free_tlb(tlb, pgtable_free_cache(page_address(ptepage), \ 140 + PTE_CACHE_NUM, PTE_TABLE_SIZE-1)) 141 + #define __pmd_free_tlb(tlb, pmd) \ 142 + pgtable_free_tlb(tlb, pgtable_free_cache(pmd, \ 143 + PMD_CACHE_NUM, PMD_TABLE_SIZE-1)) 144 + #ifndef CONFIG_PPC_64K_PAGES 145 + #define __pud_free_tlb(tlb, pud) \ 146 + pgtable_free_tlb(tlb, pgtable_free_cache(pud, \ 147 + PUD_CACHE_NUM, PUD_TABLE_SIZE-1)) 148 + #endif /* CONFIG_PPC_64K_PAGES */ 149 + 150 + #define check_pgt_cache() do { } while (0) 151 + 152 + #endif /* _ASM_POWERPC_PGALLOC_64_H */
+3 -151
include/asm-powerpc/pgalloc.h
··· 2 2 #define _ASM_POWERPC_PGALLOC_H 3 3 #ifdef __KERNEL__ 4 4 5 - #ifndef CONFIG_PPC64 6 - #include <asm-ppc/pgalloc.h> 5 + #ifdef CONFIG_PPC64 6 + #include <asm/pgalloc-64.h> 7 7 #else 8 - 9 - #include <linux/mm.h> 10 - #include <linux/slab.h> 11 - #include <linux/cpumask.h> 12 - #include <linux/percpu.h> 13 - 14 - extern struct kmem_cache *pgtable_cache[]; 15 - 16 - #ifdef CONFIG_PPC_64K_PAGES 17 - #define PTE_CACHE_NUM 0 18 - #define PMD_CACHE_NUM 1 19 - #define PGD_CACHE_NUM 2 20 - #define HUGEPTE_CACHE_NUM 3 21 - #else 22 - #define PTE_CACHE_NUM 0 23 - #define PMD_CACHE_NUM 1 24 - #define PUD_CACHE_NUM 1 25 - #define PGD_CACHE_NUM 0 26 - #define HUGEPTE_CACHE_NUM 2 8 + #include <asm/pgalloc-32.h> 27 9 #endif 28 10 29 - /* 30 - * This program is free software; you can redistribute it and/or 31 - * modify it under the terms of the GNU General Public License 32 - * as published by the Free Software Foundation; either version 33 - * 2 of the License, or (at your option) any later version. 34 - */ 35 - 36 - static inline pgd_t *pgd_alloc(struct mm_struct *mm) 37 - { 38 - return kmem_cache_alloc(pgtable_cache[PGD_CACHE_NUM], GFP_KERNEL); 39 - } 40 - 41 - static inline void pgd_free(pgd_t *pgd) 42 - { 43 - kmem_cache_free(pgtable_cache[PGD_CACHE_NUM], pgd); 44 - } 45 - 46 - #ifndef CONFIG_PPC_64K_PAGES 47 - 48 - #define pgd_populate(MM, PGD, PUD) pgd_set(PGD, PUD) 49 - 50 - static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) 51 - { 52 - return kmem_cache_alloc(pgtable_cache[PUD_CACHE_NUM], 53 - GFP_KERNEL|__GFP_REPEAT); 54 - } 55 - 56 - static inline void pud_free(pud_t *pud) 57 - { 58 - kmem_cache_free(pgtable_cache[PUD_CACHE_NUM], pud); 59 - } 60 - 61 - static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) 62 - { 63 - pud_set(pud, (unsigned long)pmd); 64 - } 65 - 66 - #define pmd_populate(mm, pmd, pte_page) \ 67 - pmd_populate_kernel(mm, pmd, page_address(pte_page)) 68 - #define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, (unsigned long)(pte)) 69 - 70 - 71 - #else /* CONFIG_PPC_64K_PAGES */ 72 - 73 - #define pud_populate(mm, pud, pmd) pud_set(pud, (unsigned long)pmd) 74 - 75 - static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, 76 - pte_t *pte) 77 - { 78 - pmd_set(pmd, (unsigned long)pte); 79 - } 80 - 81 - #define pmd_populate(mm, pmd, pte_page) \ 82 - pmd_populate_kernel(mm, pmd, page_address(pte_page)) 83 - 84 - #endif /* CONFIG_PPC_64K_PAGES */ 85 - 86 - static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) 87 - { 88 - return kmem_cache_alloc(pgtable_cache[PMD_CACHE_NUM], 89 - GFP_KERNEL|__GFP_REPEAT); 90 - } 91 - 92 - static inline void pmd_free(pmd_t *pmd) 93 - { 94 - kmem_cache_free(pgtable_cache[PMD_CACHE_NUM], pmd); 95 - } 96 - 97 - static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, 98 - unsigned long address) 99 - { 100 - return kmem_cache_alloc(pgtable_cache[PTE_CACHE_NUM], 101 - GFP_KERNEL|__GFP_REPEAT); 102 - } 103 - 104 - static inline struct page *pte_alloc_one(struct mm_struct *mm, 105 - unsigned long address) 106 - { 107 - return virt_to_page(pte_alloc_one_kernel(mm, address)); 108 - } 109 - 110 - static inline void pte_free_kernel(pte_t *pte) 111 - { 112 - kmem_cache_free(pgtable_cache[PTE_CACHE_NUM], pte); 113 - } 114 - 115 - static inline void pte_free(struct page *ptepage) 116 - { 117 - pte_free_kernel(page_address(ptepage)); 118 - } 119 - 120 - #define PGF_CACHENUM_MASK 0x3 121 - 122 - typedef struct pgtable_free { 123 - unsigned long val; 124 - } pgtable_free_t; 125 - 126 - static inline pgtable_free_t pgtable_free_cache(void *p, int cachenum, 127 - unsigned long mask) 128 - { 129 - BUG_ON(cachenum > PGF_CACHENUM_MASK); 130 - 131 - return (pgtable_free_t){.val = ((unsigned long) p & ~mask) | cachenum}; 132 - } 133 - 134 - static inline void pgtable_free(pgtable_free_t pgf) 135 - { 136 - void *p = (void *)(pgf.val & ~PGF_CACHENUM_MASK); 137 - int cachenum = pgf.val & PGF_CACHENUM_MASK; 138 - 139 - kmem_cache_free(pgtable_cache[cachenum], p); 140 - } 141 - 142 - extern void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf); 143 - 144 - #define __pte_free_tlb(tlb, ptepage) \ 145 - pgtable_free_tlb(tlb, pgtable_free_cache(page_address(ptepage), \ 146 - PTE_CACHE_NUM, PTE_TABLE_SIZE-1)) 147 - #define __pmd_free_tlb(tlb, pmd) \ 148 - pgtable_free_tlb(tlb, pgtable_free_cache(pmd, \ 149 - PMD_CACHE_NUM, PMD_TABLE_SIZE-1)) 150 - #ifndef CONFIG_PPC_64K_PAGES 151 - #define __pud_free_tlb(tlb, pud) \ 152 - pgtable_free_tlb(tlb, pgtable_free_cache(pud, \ 153 - PUD_CACHE_NUM, PUD_TABLE_SIZE-1)) 154 - #endif /* CONFIG_PPC_64K_PAGES */ 155 - 156 - #define check_pgt_cache() do { } while (0) 157 - 158 - #endif /* CONFIG_PPC64 */ 159 11 #endif /* __KERNEL__ */ 160 12 #endif /* _ASM_POWERPC_PGALLOC_H */
+3
include/asm-powerpc/pgtable-4k.h
··· 1 + #ifndef _ASM_POWERPC_PGTABLE_4K_H 2 + #define _ASM_POWERPC_PGTABLE_4K_H 1 3 /* 2 4 * Entries per page directory level. The PTE level must use a 64b record 3 5 * for each page table entry. The PMD and PGD level use a 32b record for ··· 102 100 103 101 #define remap_4k_pfn(vma, addr, pfn, prot) \ 104 102 remap_pfn_range((vma), (addr), (pfn), PAGE_SIZE, (prot)) 103 + #endif /* _ASM_POWERPC_PGTABLE_4K_H */
-5
include/asm-powerpc/pgtable-64k.h
··· 1 1 #ifndef _ASM_POWERPC_PGTABLE_64K_H 2 2 #define _ASM_POWERPC_PGTABLE_64K_H 3 - #ifdef __KERNEL__ 4 3 5 4 #include <asm-generic/pgtable-nopud.h> 6 5 ··· 64 65 /* Bits to mask out from a PGD/PUD to get to the PMD page */ 65 66 #define PUD_MASKED_BITS 0x1ff 66 67 67 - #ifndef __ASSEMBLY__ 68 - 69 68 /* Manipulate "rpte" values */ 70 69 #define __real_pte(e,p) ((real_pte_t) { \ 71 70 (e), pte_val(*((p) + PTRS_PER_PTE)) }) ··· 95 98 remap_pfn_range((vma), (addr), (pfn), PAGE_SIZE, \ 96 99 __pgprot(pgprot_val((prot)) | _PAGE_4K_PFN)) 97 100 98 - #endif /* __ASSEMBLY__ */ 99 - #endif /* __KERNEL__ */ 100 101 #endif /* _ASM_POWERPC_PGTABLE_64K_H */
+838
include/asm-powerpc/pgtable-ppc32.h
··· 1 + #ifndef _ASM_POWERPC_PGTABLE_PPC32_H 2 + #define _ASM_POWERPC_PGTABLE_PPC32_H 3 + 4 + #include <asm-generic/4level-fixup.h> 5 + 6 + #ifndef __ASSEMBLY__ 7 + #include <linux/sched.h> 8 + #include <linux/threads.h> 9 + #include <asm/processor.h> /* For TASK_SIZE */ 10 + #include <asm/mmu.h> 11 + #include <asm/page.h> 12 + #include <asm/io.h> /* For sub-arch specific PPC_PIN_SIZE */ 13 + struct mm_struct; 14 + 15 + extern unsigned long va_to_phys(unsigned long address); 16 + extern pte_t *va_to_pte(unsigned long address); 17 + extern unsigned long ioremap_bot, ioremap_base; 18 + #endif /* __ASSEMBLY__ */ 19 + 20 + /* 21 + * The PowerPC MMU uses a hash table containing PTEs, together with 22 + * a set of 16 segment registers (on 32-bit implementations), to define 23 + * the virtual to physical address mapping. 24 + * 25 + * We use the hash table as an extended TLB, i.e. a cache of currently 26 + * active mappings. We maintain a two-level page table tree, much 27 + * like that used by the i386, for the sake of the Linux memory 28 + * management code. Low-level assembler code in hashtable.S 29 + * (procedure hash_page) is responsible for extracting ptes from the 30 + * tree and putting them into the hash table when necessary, and 31 + * updating the accessed and modified bits in the page table tree. 32 + */ 33 + 34 + /* 35 + * The PowerPC MPC8xx uses a TLB with hardware assisted, software tablewalk. 36 + * We also use the two level tables, but we can put the real bits in them 37 + * needed for the TLB and tablewalk. These definitions require Mx_CTR.PPM = 0, 38 + * Mx_CTR.PPCS = 0, and MD_CTR.TWAM = 1. The level 2 descriptor has 39 + * additional page protection (when Mx_CTR.PPCS = 1) that allows TLB hit 40 + * based upon user/super access. The TLB does not have accessed nor write 41 + * protect. We assume that if the TLB get loaded with an entry it is 42 + * accessed, and overload the changed bit for write protect. We use 43 + * two bits in the software pte that are supposed to be set to zero in 44 + * the TLB entry (24 and 25) for these indicators. Although the level 1 45 + * descriptor contains the guarded and writethrough/copyback bits, we can 46 + * set these at the page level since they get copied from the Mx_TWC 47 + * register when the TLB entry is loaded. We will use bit 27 for guard, since 48 + * that is where it exists in the MD_TWC, and bit 26 for writethrough. 49 + * These will get masked from the level 2 descriptor at TLB load time, and 50 + * copied to the MD_TWC before it gets loaded. 51 + * Large page sizes added. We currently support two sizes, 4K and 8M. 52 + * This also allows a TLB hander optimization because we can directly 53 + * load the PMD into MD_TWC. The 8M pages are only used for kernel 54 + * mapping of well known areas. The PMD (PGD) entries contain control 55 + * flags in addition to the address, so care must be taken that the 56 + * software no longer assumes these are only pointers. 57 + */ 58 + 59 + /* 60 + * At present, all PowerPC 400-class processors share a similar TLB 61 + * architecture. The instruction and data sides share a unified, 62 + * 64-entry, fully-associative TLB which is maintained totally under 63 + * software control. In addition, the instruction side has a 64 + * hardware-managed, 4-entry, fully-associative TLB which serves as a 65 + * first level to the shared TLB. These two TLBs are known as the UTLB 66 + * and ITLB, respectively (see "mmu.h" for definitions). 67 + */ 68 + 69 + /* 70 + * The normal case is that PTEs are 32-bits and we have a 1-page 71 + * 1024-entry pgdir pointing to 1-page 1024-entry PTE pages. -- paulus 72 + * 73 + * For any >32-bit physical address platform, we can use the following 74 + * two level page table layout where the pgdir is 8KB and the MS 13 bits 75 + * are an index to the second level table. The combined pgdir/pmd first 76 + * level has 2048 entries and the second level has 512 64-bit PTE entries. 77 + * -Matt 78 + */ 79 + /* PMD_SHIFT determines the size of the area mapped by the PTE pages */ 80 + #define PMD_SHIFT (PAGE_SHIFT + PTE_SHIFT) 81 + #define PMD_SIZE (1UL << PMD_SHIFT) 82 + #define PMD_MASK (~(PMD_SIZE-1)) 83 + 84 + /* PGDIR_SHIFT determines what a top-level page table entry can map */ 85 + #define PGDIR_SHIFT PMD_SHIFT 86 + #define PGDIR_SIZE (1UL << PGDIR_SHIFT) 87 + #define PGDIR_MASK (~(PGDIR_SIZE-1)) 88 + 89 + /* 90 + * entries per page directory level: our page-table tree is two-level, so 91 + * we don't really have any PMD directory. 92 + */ 93 + #define PTRS_PER_PTE (1 << PTE_SHIFT) 94 + #define PTRS_PER_PMD 1 95 + #define PTRS_PER_PGD (1 << (32 - PGDIR_SHIFT)) 96 + 97 + #define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE) 98 + #define FIRST_USER_ADDRESS 0 99 + 100 + #define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT) 101 + #define KERNEL_PGD_PTRS (PTRS_PER_PGD-USER_PGD_PTRS) 102 + 103 + #define pte_ERROR(e) \ 104 + printk("%s:%d: bad pte "PTE_FMT".\n", __FILE__, __LINE__, pte_val(e)) 105 + #define pmd_ERROR(e) \ 106 + printk("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e)) 107 + #define pgd_ERROR(e) \ 108 + printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e)) 109 + 110 + /* 111 + * Just any arbitrary offset to the start of the vmalloc VM area: the 112 + * current 64MB value just means that there will be a 64MB "hole" after the 113 + * physical memory until the kernel virtual memory starts. That means that 114 + * any out-of-bounds memory accesses will hopefully be caught. 115 + * The vmalloc() routines leaves a hole of 4kB between each vmalloced 116 + * area for the same reason. ;) 117 + * 118 + * We no longer map larger than phys RAM with the BATs so we don't have 119 + * to worry about the VMALLOC_OFFSET causing problems. We do have to worry 120 + * about clashes between our early calls to ioremap() that start growing down 121 + * from ioremap_base being run into the VM area allocations (growing upwards 122 + * from VMALLOC_START). For this reason we have ioremap_bot to check when 123 + * we actually run into our mappings setup in the early boot with the VM 124 + * system. This really does become a problem for machines with good amounts 125 + * of RAM. -- Cort 126 + */ 127 + #define VMALLOC_OFFSET (0x1000000) /* 16M */ 128 + #ifdef PPC_PIN_SIZE 129 + #define VMALLOC_START (((_ALIGN((long)high_memory, PPC_PIN_SIZE) + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))) 130 + #else 131 + #define VMALLOC_START ((((long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))) 132 + #endif 133 + #define VMALLOC_END ioremap_bot 134 + 135 + /* 136 + * Bits in a linux-style PTE. These match the bits in the 137 + * (hardware-defined) PowerPC PTE as closely as possible. 138 + */ 139 + 140 + #if defined(CONFIG_40x) 141 + 142 + /* There are several potential gotchas here. The 40x hardware TLBLO 143 + field looks like this: 144 + 145 + 0 1 2 3 4 ... 18 19 20 21 22 23 24 25 26 27 28 29 30 31 146 + RPN..................... 0 0 EX WR ZSEL....... W I M G 147 + 148 + Where possible we make the Linux PTE bits match up with this 149 + 150 + - bits 20 and 21 must be cleared, because we use 4k pages (40x can 151 + support down to 1k pages), this is done in the TLBMiss exception 152 + handler. 153 + - We use only zones 0 (for kernel pages) and 1 (for user pages) 154 + of the 16 available. Bit 24-26 of the TLB are cleared in the TLB 155 + miss handler. Bit 27 is PAGE_USER, thus selecting the correct 156 + zone. 157 + - PRESENT *must* be in the bottom two bits because swap cache 158 + entries use the top 30 bits. Because 40x doesn't support SMP 159 + anyway, M is irrelevant so we borrow it for PAGE_PRESENT. Bit 30 160 + is cleared in the TLB miss handler before the TLB entry is loaded. 161 + - All other bits of the PTE are loaded into TLBLO without 162 + modification, leaving us only the bits 20, 21, 24, 25, 26, 30 for 163 + software PTE bits. We actually use use bits 21, 24, 25, and 164 + 30 respectively for the software bits: ACCESSED, DIRTY, RW, and 165 + PRESENT. 166 + */ 167 + 168 + /* Definitions for 40x embedded chips. */ 169 + #define _PAGE_GUARDED 0x001 /* G: page is guarded from prefetch */ 170 + #define _PAGE_FILE 0x001 /* when !present: nonlinear file mapping */ 171 + #define _PAGE_PRESENT 0x002 /* software: PTE contains a translation */ 172 + #define _PAGE_NO_CACHE 0x004 /* I: caching is inhibited */ 173 + #define _PAGE_WRITETHRU 0x008 /* W: caching is write-through */ 174 + #define _PAGE_USER 0x010 /* matches one of the zone permission bits */ 175 + #define _PAGE_RW 0x040 /* software: Writes permitted */ 176 + #define _PAGE_DIRTY 0x080 /* software: dirty page */ 177 + #define _PAGE_HWWRITE 0x100 /* hardware: Dirty & RW, set in exception */ 178 + #define _PAGE_HWEXEC 0x200 /* hardware: EX permission */ 179 + #define _PAGE_ACCESSED 0x400 /* software: R: page referenced */ 180 + 181 + #define _PMD_PRESENT 0x400 /* PMD points to page of PTEs */ 182 + #define _PMD_BAD 0x802 183 + #define _PMD_SIZE 0x0e0 /* size field, != 0 for large-page PMD entry */ 184 + #define _PMD_SIZE_4M 0x0c0 185 + #define _PMD_SIZE_16M 0x0e0 186 + #define PMD_PAGE_SIZE(pmdval) (1024 << (((pmdval) & _PMD_SIZE) >> 4)) 187 + 188 + #elif defined(CONFIG_44x) 189 + /* 190 + * Definitions for PPC440 191 + * 192 + * Because of the 3 word TLB entries to support 36-bit addressing, 193 + * the attribute are difficult to map in such a fashion that they 194 + * are easily loaded during exception processing. I decided to 195 + * organize the entry so the ERPN is the only portion in the 196 + * upper word of the PTE and the attribute bits below are packed 197 + * in as sensibly as they can be in the area below a 4KB page size 198 + * oriented RPN. This at least makes it easy to load the RPN and 199 + * ERPN fields in the TLB. -Matt 200 + * 201 + * Note that these bits preclude future use of a page size 202 + * less than 4KB. 203 + * 204 + * 205 + * PPC 440 core has following TLB attribute fields; 206 + * 207 + * TLB1: 208 + * 0 1 2 3 4 ... 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 209 + * RPN................................. - - - - - - ERPN....... 210 + * 211 + * TLB2: 212 + * 0 1 2 3 4 ... 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 213 + * - - - - - - U0 U1 U2 U3 W I M G E - UX UW UR SX SW SR 214 + * 215 + * There are some constrains and options, to decide mapping software bits 216 + * into TLB entry. 217 + * 218 + * - PRESENT *must* be in the bottom three bits because swap cache 219 + * entries use the top 29 bits for TLB2. 220 + * 221 + * - FILE *must* be in the bottom three bits because swap cache 222 + * entries use the top 29 bits for TLB2. 223 + * 224 + * - CACHE COHERENT bit (M) has no effect on PPC440 core, because it 225 + * doesn't support SMP. So we can use this as software bit, like 226 + * DIRTY. 227 + * 228 + * With the PPC 44x Linux implementation, the 0-11th LSBs of the PTE are used 229 + * for memory protection related functions (see PTE structure in 230 + * include/asm-ppc/mmu.h). The _PAGE_XXX definitions in this file map to the 231 + * above bits. Note that the bit values are CPU specific, not architecture 232 + * specific. 233 + * 234 + * The kernel PTE entry holds an arch-dependent swp_entry structure under 235 + * certain situations. In other words, in such situations some portion of 236 + * the PTE bits are used as a swp_entry. In the PPC implementation, the 237 + * 3-24th LSB are shared with swp_entry, however the 0-2nd three LSB still 238 + * hold protection values. That means the three protection bits are 239 + * reserved for both PTE and SWAP entry at the most significant three 240 + * LSBs. 241 + * 242 + * There are three protection bits available for SWAP entry: 243 + * _PAGE_PRESENT 244 + * _PAGE_FILE 245 + * _PAGE_HASHPTE (if HW has) 246 + * 247 + * So those three bits have to be inside of 0-2nd LSB of PTE. 248 + * 249 + */ 250 + 251 + #define _PAGE_PRESENT 0x00000001 /* S: PTE valid */ 252 + #define _PAGE_RW 0x00000002 /* S: Write permission */ 253 + #define _PAGE_FILE 0x00000004 /* S: nonlinear file mapping */ 254 + #define _PAGE_ACCESSED 0x00000008 /* S: Page referenced */ 255 + #define _PAGE_HWWRITE 0x00000010 /* H: Dirty & RW */ 256 + #define _PAGE_HWEXEC 0x00000020 /* H: Execute permission */ 257 + #define _PAGE_USER 0x00000040 /* S: User page */ 258 + #define _PAGE_ENDIAN 0x00000080 /* H: E bit */ 259 + #define _PAGE_GUARDED 0x00000100 /* H: G bit */ 260 + #define _PAGE_DIRTY 0x00000200 /* S: Page dirty */ 261 + #define _PAGE_NO_CACHE 0x00000400 /* H: I bit */ 262 + #define _PAGE_WRITETHRU 0x00000800 /* H: W bit */ 263 + 264 + /* TODO: Add large page lowmem mapping support */ 265 + #define _PMD_PRESENT 0 266 + #define _PMD_PRESENT_MASK (PAGE_MASK) 267 + #define _PMD_BAD (~PAGE_MASK) 268 + 269 + /* ERPN in a PTE never gets cleared, ignore it */ 270 + #define _PTE_NONE_MASK 0xffffffff00000000ULL 271 + 272 + #elif defined(CONFIG_FSL_BOOKE) 273 + /* 274 + MMU Assist Register 3: 275 + 276 + 32 33 34 35 36 ... 50 51 52 53 54 55 56 57 58 59 60 61 62 63 277 + RPN...................... 0 0 U0 U1 U2 U3 UX SX UW SW UR SR 278 + 279 + - PRESENT *must* be in the bottom three bits because swap cache 280 + entries use the top 29 bits. 281 + 282 + - FILE *must* be in the bottom three bits because swap cache 283 + entries use the top 29 bits. 284 + */ 285 + 286 + /* Definitions for FSL Book-E Cores */ 287 + #define _PAGE_PRESENT 0x00001 /* S: PTE contains a translation */ 288 + #define _PAGE_USER 0x00002 /* S: User page (maps to UR) */ 289 + #define _PAGE_FILE 0x00002 /* S: when !present: nonlinear file mapping */ 290 + #define _PAGE_ACCESSED 0x00004 /* S: Page referenced */ 291 + #define _PAGE_HWWRITE 0x00008 /* H: Dirty & RW, set in exception */ 292 + #define _PAGE_RW 0x00010 /* S: Write permission */ 293 + #define _PAGE_HWEXEC 0x00020 /* H: UX permission */ 294 + 295 + #define _PAGE_ENDIAN 0x00040 /* H: E bit */ 296 + #define _PAGE_GUARDED 0x00080 /* H: G bit */ 297 + #define _PAGE_COHERENT 0x00100 /* H: M bit */ 298 + #define _PAGE_NO_CACHE 0x00200 /* H: I bit */ 299 + #define _PAGE_WRITETHRU 0x00400 /* H: W bit */ 300 + 301 + #ifdef CONFIG_PTE_64BIT 302 + #define _PAGE_DIRTY 0x08000 /* S: Page dirty */ 303 + 304 + /* ERPN in a PTE never gets cleared, ignore it */ 305 + #define _PTE_NONE_MASK 0xffffffffffff0000ULL 306 + #else 307 + #define _PAGE_DIRTY 0x00800 /* S: Page dirty */ 308 + #endif 309 + 310 + #define _PMD_PRESENT 0 311 + #define _PMD_PRESENT_MASK (PAGE_MASK) 312 + #define _PMD_BAD (~PAGE_MASK) 313 + 314 + #elif defined(CONFIG_8xx) 315 + /* Definitions for 8xx embedded chips. */ 316 + #define _PAGE_PRESENT 0x0001 /* Page is valid */ 317 + #define _PAGE_FILE 0x0002 /* when !present: nonlinear file mapping */ 318 + #define _PAGE_NO_CACHE 0x0002 /* I: cache inhibit */ 319 + #define _PAGE_SHARED 0x0004 /* No ASID (context) compare */ 320 + 321 + /* These five software bits must be masked out when the entry is loaded 322 + * into the TLB. 323 + */ 324 + #define _PAGE_EXEC 0x0008 /* software: i-cache coherency required */ 325 + #define _PAGE_GUARDED 0x0010 /* software: guarded access */ 326 + #define _PAGE_DIRTY 0x0020 /* software: page changed */ 327 + #define _PAGE_RW 0x0040 /* software: user write access allowed */ 328 + #define _PAGE_ACCESSED 0x0080 /* software: page referenced */ 329 + 330 + /* Setting any bits in the nibble with the follow two controls will 331 + * require a TLB exception handler change. It is assumed unused bits 332 + * are always zero. 333 + */ 334 + #define _PAGE_HWWRITE 0x0100 /* h/w write enable: never set in Linux PTE */ 335 + #define _PAGE_USER 0x0800 /* One of the PP bits, the other is USER&~RW */ 336 + 337 + #define _PMD_PRESENT 0x0001 338 + #define _PMD_BAD 0x0ff0 339 + #define _PMD_PAGE_MASK 0x000c 340 + #define _PMD_PAGE_8M 0x000c 341 + 342 + /* 343 + * The 8xx TLB miss handler allegedly sets _PAGE_ACCESSED in the PTE 344 + * for an address even if _PAGE_PRESENT is not set, as a performance 345 + * optimization. This is a bug if you ever want to use swap unless 346 + * _PAGE_ACCESSED is 2, which it isn't, or unless you have 8xx-specific 347 + * definitions for __swp_entry etc. below, which would be gross. 348 + * -- paulus 349 + */ 350 + #define _PTE_NONE_MASK _PAGE_ACCESSED 351 + 352 + #else /* CONFIG_6xx */ 353 + /* Definitions for 60x, 740/750, etc. */ 354 + #define _PAGE_PRESENT 0x001 /* software: pte contains a translation */ 355 + #define _PAGE_HASHPTE 0x002 /* hash_page has made an HPTE for this pte */ 356 + #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */ 357 + #define _PAGE_USER 0x004 /* usermode access allowed */ 358 + #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */ 359 + #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */ 360 + #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */ 361 + #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */ 362 + #define _PAGE_DIRTY 0x080 /* C: page changed */ 363 + #define _PAGE_ACCESSED 0x100 /* R: page referenced */ 364 + #define _PAGE_EXEC 0x200 /* software: i-cache coherency required */ 365 + #define _PAGE_RW 0x400 /* software: user write access allowed */ 366 + 367 + #define _PTE_NONE_MASK _PAGE_HASHPTE 368 + 369 + #define _PMD_PRESENT 0 370 + #define _PMD_PRESENT_MASK (PAGE_MASK) 371 + #define _PMD_BAD (~PAGE_MASK) 372 + #endif 373 + 374 + /* 375 + * Some bits are only used on some cpu families... 376 + */ 377 + #ifndef _PAGE_HASHPTE 378 + #define _PAGE_HASHPTE 0 379 + #endif 380 + #ifndef _PTE_NONE_MASK 381 + #define _PTE_NONE_MASK 0 382 + #endif 383 + #ifndef _PAGE_SHARED 384 + #define _PAGE_SHARED 0 385 + #endif 386 + #ifndef _PAGE_HWWRITE 387 + #define _PAGE_HWWRITE 0 388 + #endif 389 + #ifndef _PAGE_HWEXEC 390 + #define _PAGE_HWEXEC 0 391 + #endif 392 + #ifndef _PAGE_EXEC 393 + #define _PAGE_EXEC 0 394 + #endif 395 + #ifndef _PMD_PRESENT_MASK 396 + #define _PMD_PRESENT_MASK _PMD_PRESENT 397 + #endif 398 + #ifndef _PMD_SIZE 399 + #define _PMD_SIZE 0 400 + #define PMD_PAGE_SIZE(pmd) bad_call_to_PMD_PAGE_SIZE() 401 + #endif 402 + 403 + #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY) 404 + 405 + /* 406 + * Note: the _PAGE_COHERENT bit automatically gets set in the hardware 407 + * PTE if CONFIG_SMP is defined (hash_page does this); there is no need 408 + * to have it in the Linux PTE, and in fact the bit could be reused for 409 + * another purpose. -- paulus. 410 + */ 411 + 412 + #ifdef CONFIG_44x 413 + #define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_GUARDED) 414 + #else 415 + #define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED) 416 + #endif 417 + #define _PAGE_WRENABLE (_PAGE_RW | _PAGE_DIRTY | _PAGE_HWWRITE) 418 + #define _PAGE_KERNEL (_PAGE_BASE | _PAGE_SHARED | _PAGE_WRENABLE) 419 + 420 + #ifdef CONFIG_PPC_STD_MMU 421 + /* On standard PPC MMU, no user access implies kernel read/write access, 422 + * so to write-protect kernel memory we must turn on user access */ 423 + #define _PAGE_KERNEL_RO (_PAGE_BASE | _PAGE_SHARED | _PAGE_USER) 424 + #else 425 + #define _PAGE_KERNEL_RO (_PAGE_BASE | _PAGE_SHARED) 426 + #endif 427 + 428 + #define _PAGE_IO (_PAGE_KERNEL | _PAGE_NO_CACHE | _PAGE_GUARDED) 429 + #define _PAGE_RAM (_PAGE_KERNEL | _PAGE_HWEXEC) 430 + 431 + #if defined(CONFIG_KGDB) || defined(CONFIG_XMON) || defined(CONFIG_BDI_SWITCH) 432 + /* We want the debuggers to be able to set breakpoints anywhere, so 433 + * don't write protect the kernel text */ 434 + #define _PAGE_RAM_TEXT _PAGE_RAM 435 + #else 436 + #define _PAGE_RAM_TEXT (_PAGE_KERNEL_RO | _PAGE_HWEXEC) 437 + #endif 438 + 439 + #define PAGE_NONE __pgprot(_PAGE_BASE) 440 + #define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER) 441 + #define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC) 442 + #define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW) 443 + #define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_EXEC) 444 + #define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER) 445 + #define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC) 446 + 447 + #define PAGE_KERNEL __pgprot(_PAGE_RAM) 448 + #define PAGE_KERNEL_NOCACHE __pgprot(_PAGE_IO) 449 + 450 + /* 451 + * The PowerPC can only do execute protection on a segment (256MB) basis, 452 + * not on a page basis. So we consider execute permission the same as read. 453 + * Also, write permissions imply read permissions. 454 + * This is the closest we can get.. 455 + */ 456 + #define __P000 PAGE_NONE 457 + #define __P001 PAGE_READONLY_X 458 + #define __P010 PAGE_COPY 459 + #define __P011 PAGE_COPY_X 460 + #define __P100 PAGE_READONLY 461 + #define __P101 PAGE_READONLY_X 462 + #define __P110 PAGE_COPY 463 + #define __P111 PAGE_COPY_X 464 + 465 + #define __S000 PAGE_NONE 466 + #define __S001 PAGE_READONLY_X 467 + #define __S010 PAGE_SHARED 468 + #define __S011 PAGE_SHARED_X 469 + #define __S100 PAGE_READONLY 470 + #define __S101 PAGE_READONLY_X 471 + #define __S110 PAGE_SHARED 472 + #define __S111 PAGE_SHARED_X 473 + 474 + #ifndef __ASSEMBLY__ 475 + /* Make sure we get a link error if PMD_PAGE_SIZE is ever called on a 476 + * kernel without large page PMD support */ 477 + extern unsigned long bad_call_to_PMD_PAGE_SIZE(void); 478 + 479 + /* 480 + * Conversions between PTE values and page frame numbers. 481 + */ 482 + 483 + /* in some case we want to additionaly adjust where the pfn is in the pte to 484 + * allow room for more flags */ 485 + #if defined(CONFIG_FSL_BOOKE) && defined(CONFIG_PTE_64BIT) 486 + #define PFN_SHIFT_OFFSET (PAGE_SHIFT + 8) 487 + #else 488 + #define PFN_SHIFT_OFFSET (PAGE_SHIFT) 489 + #endif 490 + 491 + #define pte_pfn(x) (pte_val(x) >> PFN_SHIFT_OFFSET) 492 + #define pte_page(x) pfn_to_page(pte_pfn(x)) 493 + 494 + #define pfn_pte(pfn, prot) __pte(((pte_basic_t)(pfn) << PFN_SHIFT_OFFSET) |\ 495 + pgprot_val(prot)) 496 + #define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot) 497 + 498 + /* 499 + * ZERO_PAGE is a global shared page that is always zero: used 500 + * for zero-mapped memory areas etc.. 501 + */ 502 + extern unsigned long empty_zero_page[1024]; 503 + #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) 504 + 505 + #endif /* __ASSEMBLY__ */ 506 + 507 + #define pte_none(pte) ((pte_val(pte) & ~_PTE_NONE_MASK) == 0) 508 + #define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT) 509 + #define pte_clear(mm,addr,ptep) do { set_pte_at((mm), (addr), (ptep), __pte(0)); } while (0) 510 + 511 + #define pmd_none(pmd) (!pmd_val(pmd)) 512 + #define pmd_bad(pmd) (pmd_val(pmd) & _PMD_BAD) 513 + #define pmd_present(pmd) (pmd_val(pmd) & _PMD_PRESENT_MASK) 514 + #define pmd_clear(pmdp) do { pmd_val(*(pmdp)) = 0; } while (0) 515 + 516 + #ifndef __ASSEMBLY__ 517 + /* 518 + * The "pgd_xxx()" functions here are trivial for a folded two-level 519 + * setup: the pgd is never bad, and a pmd always exists (as it's folded 520 + * into the pgd entry) 521 + */ 522 + static inline int pgd_none(pgd_t pgd) { return 0; } 523 + static inline int pgd_bad(pgd_t pgd) { return 0; } 524 + static inline int pgd_present(pgd_t pgd) { return 1; } 525 + #define pgd_clear(xp) do { } while (0) 526 + 527 + #define pgd_page_vaddr(pgd) \ 528 + ((unsigned long) __va(pgd_val(pgd) & PAGE_MASK)) 529 + 530 + /* 531 + * The following only work if pte_present() is true. 532 + * Undefined behaviour if not.. 533 + */ 534 + static inline int pte_read(pte_t pte) { return pte_val(pte) & _PAGE_USER; } 535 + static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; } 536 + static inline int pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_EXEC; } 537 + static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; } 538 + static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } 539 + static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; } 540 + 541 + static inline void pte_uncache(pte_t pte) { pte_val(pte) |= _PAGE_NO_CACHE; } 542 + static inline void pte_cache(pte_t pte) { pte_val(pte) &= ~_PAGE_NO_CACHE; } 543 + 544 + static inline pte_t pte_rdprotect(pte_t pte) { 545 + pte_val(pte) &= ~_PAGE_USER; return pte; } 546 + static inline pte_t pte_wrprotect(pte_t pte) { 547 + pte_val(pte) &= ~(_PAGE_RW | _PAGE_HWWRITE); return pte; } 548 + static inline pte_t pte_exprotect(pte_t pte) { 549 + pte_val(pte) &= ~_PAGE_EXEC; return pte; } 550 + static inline pte_t pte_mkclean(pte_t pte) { 551 + pte_val(pte) &= ~(_PAGE_DIRTY | _PAGE_HWWRITE); return pte; } 552 + static inline pte_t pte_mkold(pte_t pte) { 553 + pte_val(pte) &= ~_PAGE_ACCESSED; return pte; } 554 + 555 + static inline pte_t pte_mkread(pte_t pte) { 556 + pte_val(pte) |= _PAGE_USER; return pte; } 557 + static inline pte_t pte_mkexec(pte_t pte) { 558 + pte_val(pte) |= _PAGE_USER | _PAGE_EXEC; return pte; } 559 + static inline pte_t pte_mkwrite(pte_t pte) { 560 + pte_val(pte) |= _PAGE_RW; return pte; } 561 + static inline pte_t pte_mkdirty(pte_t pte) { 562 + pte_val(pte) |= _PAGE_DIRTY; return pte; } 563 + static inline pte_t pte_mkyoung(pte_t pte) { 564 + pte_val(pte) |= _PAGE_ACCESSED; return pte; } 565 + 566 + static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) 567 + { 568 + pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot); 569 + return pte; 570 + } 571 + 572 + /* 573 + * When flushing the tlb entry for a page, we also need to flush the hash 574 + * table entry. flush_hash_pages is assembler (for speed) in hashtable.S. 575 + */ 576 + extern int flush_hash_pages(unsigned context, unsigned long va, 577 + unsigned long pmdval, int count); 578 + 579 + /* Add an HPTE to the hash table */ 580 + extern void add_hash_page(unsigned context, unsigned long va, 581 + unsigned long pmdval); 582 + 583 + /* 584 + * Atomic PTE updates. 585 + * 586 + * pte_update clears and sets bit atomically, and returns 587 + * the old pte value. In the 64-bit PTE case we lock around the 588 + * low PTE word since we expect ALL flag bits to be there 589 + */ 590 + #ifndef CONFIG_PTE_64BIT 591 + static inline unsigned long pte_update(pte_t *p, unsigned long clr, 592 + unsigned long set) 593 + { 594 + unsigned long old, tmp; 595 + 596 + __asm__ __volatile__("\ 597 + 1: lwarx %0,0,%3\n\ 598 + andc %1,%0,%4\n\ 599 + or %1,%1,%5\n" 600 + PPC405_ERR77(0,%3) 601 + " stwcx. %1,0,%3\n\ 602 + bne- 1b" 603 + : "=&r" (old), "=&r" (tmp), "=m" (*p) 604 + : "r" (p), "r" (clr), "r" (set), "m" (*p) 605 + : "cc" ); 606 + return old; 607 + } 608 + #else 609 + static inline unsigned long long pte_update(pte_t *p, unsigned long clr, 610 + unsigned long set) 611 + { 612 + unsigned long long old; 613 + unsigned long tmp; 614 + 615 + __asm__ __volatile__("\ 616 + 1: lwarx %L0,0,%4\n\ 617 + lwzx %0,0,%3\n\ 618 + andc %1,%L0,%5\n\ 619 + or %1,%1,%6\n" 620 + PPC405_ERR77(0,%3) 621 + " stwcx. %1,0,%4\n\ 622 + bne- 1b" 623 + : "=&r" (old), "=&r" (tmp), "=m" (*p) 624 + : "r" (p), "r" ((unsigned long)(p) + 4), "r" (clr), "r" (set), "m" (*p) 625 + : "cc" ); 626 + return old; 627 + } 628 + #endif 629 + 630 + /* 631 + * set_pte stores a linux PTE into the linux page table. 632 + * On machines which use an MMU hash table we avoid changing the 633 + * _PAGE_HASHPTE bit. 634 + */ 635 + static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, 636 + pte_t *ptep, pte_t pte) 637 + { 638 + #if _PAGE_HASHPTE != 0 639 + pte_update(ptep, ~_PAGE_HASHPTE, pte_val(pte) & ~_PAGE_HASHPTE); 640 + #else 641 + *ptep = pte; 642 + #endif 643 + } 644 + 645 + /* 646 + * 2.6 calles this without flushing the TLB entry, this is wrong 647 + * for our hash-based implementation, we fix that up here 648 + */ 649 + #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG 650 + static inline int __ptep_test_and_clear_young(unsigned int context, unsigned long addr, pte_t *ptep) 651 + { 652 + unsigned long old; 653 + old = pte_update(ptep, _PAGE_ACCESSED, 0); 654 + #if _PAGE_HASHPTE != 0 655 + if (old & _PAGE_HASHPTE) { 656 + unsigned long ptephys = __pa(ptep) & PAGE_MASK; 657 + flush_hash_pages(context, addr, ptephys, 1); 658 + } 659 + #endif 660 + return (old & _PAGE_ACCESSED) != 0; 661 + } 662 + #define ptep_test_and_clear_young(__vma, __addr, __ptep) \ 663 + __ptep_test_and_clear_young((__vma)->vm_mm->context.id, __addr, __ptep) 664 + 665 + #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY 666 + static inline int ptep_test_and_clear_dirty(struct vm_area_struct *vma, 667 + unsigned long addr, pte_t *ptep) 668 + { 669 + return (pte_update(ptep, (_PAGE_DIRTY | _PAGE_HWWRITE), 0) & _PAGE_DIRTY) != 0; 670 + } 671 + 672 + #define __HAVE_ARCH_PTEP_GET_AND_CLEAR 673 + static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, 674 + pte_t *ptep) 675 + { 676 + return __pte(pte_update(ptep, ~_PAGE_HASHPTE, 0)); 677 + } 678 + 679 + #define __HAVE_ARCH_PTEP_SET_WRPROTECT 680 + static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, 681 + pte_t *ptep) 682 + { 683 + pte_update(ptep, (_PAGE_RW | _PAGE_HWWRITE), 0); 684 + } 685 + 686 + #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS 687 + static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry, int dirty) 688 + { 689 + unsigned long bits = pte_val(entry) & 690 + (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW); 691 + pte_update(ptep, 0, bits); 692 + } 693 + 694 + #define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \ 695 + do { \ 696 + __ptep_set_access_flags(__ptep, __entry, __dirty); \ 697 + flush_tlb_page_nohash(__vma, __address); \ 698 + } while(0) 699 + 700 + /* 701 + * Macro to mark a page protection value as "uncacheable". 702 + */ 703 + #define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) | _PAGE_NO_CACHE | _PAGE_GUARDED)) 704 + 705 + struct file; 706 + extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, 707 + unsigned long size, pgprot_t vma_prot); 708 + #define __HAVE_PHYS_MEM_ACCESS_PROT 709 + 710 + #define __HAVE_ARCH_PTE_SAME 711 + #define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HASHPTE) == 0) 712 + 713 + /* 714 + * Note that on Book E processors, the pmd contains the kernel virtual 715 + * (lowmem) address of the pte page. The physical address is less useful 716 + * because everything runs with translation enabled (even the TLB miss 717 + * handler). On everything else the pmd contains the physical address 718 + * of the pte page. -- paulus 719 + */ 720 + #ifndef CONFIG_BOOKE 721 + #define pmd_page_vaddr(pmd) \ 722 + ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK)) 723 + #define pmd_page(pmd) \ 724 + (mem_map + (pmd_val(pmd) >> PAGE_SHIFT)) 725 + #else 726 + #define pmd_page_vaddr(pmd) \ 727 + ((unsigned long) (pmd_val(pmd) & PAGE_MASK)) 728 + #define pmd_page(pmd) \ 729 + (mem_map + (__pa(pmd_val(pmd)) >> PAGE_SHIFT)) 730 + #endif 731 + 732 + /* to find an entry in a kernel page-table-directory */ 733 + #define pgd_offset_k(address) pgd_offset(&init_mm, address) 734 + 735 + /* to find an entry in a page-table-directory */ 736 + #define pgd_index(address) ((address) >> PGDIR_SHIFT) 737 + #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address)) 738 + 739 + /* Find an entry in the second-level page table.. */ 740 + static inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address) 741 + { 742 + return (pmd_t *) dir; 743 + } 744 + 745 + /* Find an entry in the third-level page table.. */ 746 + #define pte_index(address) \ 747 + (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) 748 + #define pte_offset_kernel(dir, addr) \ 749 + ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(addr)) 750 + #define pte_offset_map(dir, addr) \ 751 + ((pte_t *) kmap_atomic(pmd_page(*(dir)), KM_PTE0) + pte_index(addr)) 752 + #define pte_offset_map_nested(dir, addr) \ 753 + ((pte_t *) kmap_atomic(pmd_page(*(dir)), KM_PTE1) + pte_index(addr)) 754 + 755 + #define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0) 756 + #define pte_unmap_nested(pte) kunmap_atomic(pte, KM_PTE1) 757 + 758 + extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; 759 + 760 + extern void paging_init(void); 761 + 762 + /* 763 + * Encode and decode a swap entry. 764 + * Note that the bits we use in a PTE for representing a swap entry 765 + * must not include the _PAGE_PRESENT bit, the _PAGE_FILE bit, or the 766 + *_PAGE_HASHPTE bit (if used). -- paulus 767 + */ 768 + #define __swp_type(entry) ((entry).val & 0x1f) 769 + #define __swp_offset(entry) ((entry).val >> 5) 770 + #define __swp_entry(type, offset) ((swp_entry_t) { (type) | ((offset) << 5) }) 771 + #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 3 }) 772 + #define __swp_entry_to_pte(x) ((pte_t) { (x).val << 3 }) 773 + 774 + /* Encode and decode a nonlinear file mapping entry */ 775 + #define PTE_FILE_MAX_BITS 29 776 + #define pte_to_pgoff(pte) (pte_val(pte) >> 3) 777 + #define pgoff_to_pte(off) ((pte_t) { ((off) << 3) | _PAGE_FILE }) 778 + 779 + /* CONFIG_APUS */ 780 + /* For virtual address to physical address conversion */ 781 + extern void cache_clear(__u32 addr, int length); 782 + extern void cache_push(__u32 addr, int length); 783 + extern int mm_end_of_chunk (unsigned long addr, int len); 784 + extern unsigned long iopa(unsigned long addr); 785 + extern unsigned long mm_ptov(unsigned long addr) __attribute_const__; 786 + 787 + /* Values for nocacheflag and cmode */ 788 + /* These are not used by the APUS kernel_map, but prevents 789 + compilation errors. */ 790 + #define KERNELMAP_FULL_CACHING 0 791 + #define KERNELMAP_NOCACHE_SER 1 792 + #define KERNELMAP_NOCACHE_NONSER 2 793 + #define KERNELMAP_NO_COPYBACK 3 794 + 795 + /* 796 + * Map some physical address range into the kernel address space. 797 + */ 798 + extern unsigned long kernel_map(unsigned long paddr, unsigned long size, 799 + int nocacheflag, unsigned long *memavailp ); 800 + 801 + /* 802 + * Set cache mode of (kernel space) address range. 803 + */ 804 + extern void kernel_set_cachemode (unsigned long address, unsigned long size, 805 + unsigned int cmode); 806 + 807 + /* Needs to be defined here and not in linux/mm.h, as it is arch dependent */ 808 + #define kern_addr_valid(addr) (1) 809 + 810 + #ifdef CONFIG_PHYS_64BIT 811 + extern int remap_pfn_range(struct vm_area_struct *vma, unsigned long from, 812 + unsigned long paddr, unsigned long size, pgprot_t prot); 813 + 814 + static inline int io_remap_pfn_range(struct vm_area_struct *vma, 815 + unsigned long vaddr, 816 + unsigned long pfn, 817 + unsigned long size, 818 + pgprot_t prot) 819 + { 820 + phys_addr_t paddr64 = fixup_bigphys_addr(pfn << PAGE_SHIFT, size); 821 + return remap_pfn_range(vma, vaddr, paddr64 >> PAGE_SHIFT, size, prot); 822 + } 823 + #else 824 + #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ 825 + remap_pfn_range(vma, vaddr, pfn, size, prot) 826 + #endif 827 + 828 + /* 829 + * No page table caches to initialise 830 + */ 831 + #define pgtable_cache_init() do { } while (0) 832 + 833 + extern int get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep, 834 + pmd_t **pmdp); 835 + 836 + #endif /* !__ASSEMBLY__ */ 837 + 838 + #endif /* _ASM_POWERPC_PGTABLE_PPC32_H */
+492
include/asm-powerpc/pgtable-ppc64.h
··· 1 + #ifndef _ASM_POWERPC_PGTABLE_PPC64_H_ 2 + #define _ASM_POWERPC_PGTABLE_PPC64_H_ 3 + /* 4 + * This file contains the functions and defines necessary to modify and use 5 + * the ppc64 hashed page table. 6 + */ 7 + 8 + #ifndef __ASSEMBLY__ 9 + #include <linux/stddef.h> 10 + #include <asm/processor.h> /* For TASK_SIZE */ 11 + #include <asm/mmu.h> 12 + #include <asm/page.h> 13 + #include <asm/tlbflush.h> 14 + struct mm_struct; 15 + #endif /* __ASSEMBLY__ */ 16 + 17 + #ifdef CONFIG_PPC_64K_PAGES 18 + #include <asm/pgtable-64k.h> 19 + #else 20 + #include <asm/pgtable-4k.h> 21 + #endif 22 + 23 + #define FIRST_USER_ADDRESS 0 24 + 25 + /* 26 + * Size of EA range mapped by our pagetables. 27 + */ 28 + #define PGTABLE_EADDR_SIZE (PTE_INDEX_SIZE + PMD_INDEX_SIZE + \ 29 + PUD_INDEX_SIZE + PGD_INDEX_SIZE + PAGE_SHIFT) 30 + #define PGTABLE_RANGE (1UL << PGTABLE_EADDR_SIZE) 31 + 32 + #if TASK_SIZE_USER64 > PGTABLE_RANGE 33 + #error TASK_SIZE_USER64 exceeds pagetable range 34 + #endif 35 + 36 + #if TASK_SIZE_USER64 > (1UL << (USER_ESID_BITS + SID_SHIFT)) 37 + #error TASK_SIZE_USER64 exceeds user VSID range 38 + #endif 39 + 40 + /* 41 + * Define the address range of the vmalloc VM area. 42 + */ 43 + #define VMALLOC_START ASM_CONST(0xD000000000000000) 44 + #define VMALLOC_SIZE ASM_CONST(0x80000000000) 45 + #define VMALLOC_END (VMALLOC_START + VMALLOC_SIZE) 46 + 47 + /* 48 + * Define the address range of the imalloc VM area. 49 + */ 50 + #define PHBS_IO_BASE VMALLOC_END 51 + #define IMALLOC_BASE (PHBS_IO_BASE + 0x80000000ul) /* Reserve 2 gigs for PHBs */ 52 + #define IMALLOC_END (VMALLOC_START + PGTABLE_RANGE) 53 + 54 + /* 55 + * Region IDs 56 + */ 57 + #define REGION_SHIFT 60UL 58 + #define REGION_MASK (0xfUL << REGION_SHIFT) 59 + #define REGION_ID(ea) (((unsigned long)(ea)) >> REGION_SHIFT) 60 + 61 + #define VMALLOC_REGION_ID (REGION_ID(VMALLOC_START)) 62 + #define KERNEL_REGION_ID (REGION_ID(PAGE_OFFSET)) 63 + #define USER_REGION_ID (0UL) 64 + 65 + /* 66 + * Common bits in a linux-style PTE. These match the bits in the 67 + * (hardware-defined) PowerPC PTE as closely as possible. Additional 68 + * bits may be defined in pgtable-*.h 69 + */ 70 + #define _PAGE_PRESENT 0x0001 /* software: pte contains a translation */ 71 + #define _PAGE_USER 0x0002 /* matches one of the PP bits */ 72 + #define _PAGE_FILE 0x0002 /* (!present only) software: pte holds file offset */ 73 + #define _PAGE_EXEC 0x0004 /* No execute on POWER4 and newer (we invert) */ 74 + #define _PAGE_GUARDED 0x0008 75 + #define _PAGE_COHERENT 0x0010 /* M: enforce memory coherence (SMP systems) */ 76 + #define _PAGE_NO_CACHE 0x0020 /* I: cache inhibit */ 77 + #define _PAGE_WRITETHRU 0x0040 /* W: cache write-through */ 78 + #define _PAGE_DIRTY 0x0080 /* C: page changed */ 79 + #define _PAGE_ACCESSED 0x0100 /* R: page referenced */ 80 + #define _PAGE_RW 0x0200 /* software: user write access allowed */ 81 + #define _PAGE_HASHPTE 0x0400 /* software: pte has an associated HPTE */ 82 + #define _PAGE_BUSY 0x0800 /* software: PTE & hash are busy */ 83 + 84 + #define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_COHERENT) 85 + 86 + #define _PAGE_WRENABLE (_PAGE_RW | _PAGE_DIRTY) 87 + 88 + /* __pgprot defined in asm-powerpc/page.h */ 89 + #define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED) 90 + 91 + #define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_RW | _PAGE_USER) 92 + #define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_RW | _PAGE_USER | _PAGE_EXEC) 93 + #define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER) 94 + #define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC) 95 + #define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER) 96 + #define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC) 97 + #define PAGE_KERNEL __pgprot(_PAGE_BASE | _PAGE_WRENABLE) 98 + #define PAGE_KERNEL_CI __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \ 99 + _PAGE_WRENABLE | _PAGE_NO_CACHE | _PAGE_GUARDED) 100 + #define PAGE_KERNEL_EXEC __pgprot(_PAGE_BASE | _PAGE_WRENABLE | _PAGE_EXEC) 101 + 102 + #define PAGE_AGP __pgprot(_PAGE_BASE | _PAGE_WRENABLE | _PAGE_NO_CACHE) 103 + #define HAVE_PAGE_AGP 104 + 105 + /* PTEIDX nibble */ 106 + #define _PTEIDX_SECONDARY 0x8 107 + #define _PTEIDX_GROUP_IX 0x7 108 + 109 + 110 + /* 111 + * POWER4 and newer have per page execute protection, older chips can only 112 + * do this on a segment (256MB) basis. 113 + * 114 + * Also, write permissions imply read permissions. 115 + * This is the closest we can get.. 116 + * 117 + * Note due to the way vm flags are laid out, the bits are XWR 118 + */ 119 + #define __P000 PAGE_NONE 120 + #define __P001 PAGE_READONLY 121 + #define __P010 PAGE_COPY 122 + #define __P011 PAGE_COPY 123 + #define __P100 PAGE_READONLY_X 124 + #define __P101 PAGE_READONLY_X 125 + #define __P110 PAGE_COPY_X 126 + #define __P111 PAGE_COPY_X 127 + 128 + #define __S000 PAGE_NONE 129 + #define __S001 PAGE_READONLY 130 + #define __S010 PAGE_SHARED 131 + #define __S011 PAGE_SHARED 132 + #define __S100 PAGE_READONLY_X 133 + #define __S101 PAGE_READONLY_X 134 + #define __S110 PAGE_SHARED_X 135 + #define __S111 PAGE_SHARED_X 136 + 137 + #ifndef __ASSEMBLY__ 138 + 139 + /* 140 + * ZERO_PAGE is a global shared page that is always zero: used 141 + * for zero-mapped memory areas etc.. 142 + */ 143 + extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)]; 144 + #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) 145 + #endif /* __ASSEMBLY__ */ 146 + 147 + #ifdef CONFIG_HUGETLB_PAGE 148 + 149 + #define HAVE_ARCH_UNMAPPED_AREA 150 + #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN 151 + 152 + #endif 153 + 154 + #ifndef __ASSEMBLY__ 155 + 156 + /* 157 + * Conversion functions: convert a page and protection to a page entry, 158 + * and a page entry and page directory to the page they refer to. 159 + * 160 + * mk_pte takes a (struct page *) as input 161 + */ 162 + #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) 163 + 164 + static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot) 165 + { 166 + pte_t pte; 167 + 168 + 169 + pte_val(pte) = (pfn << PTE_RPN_SHIFT) | pgprot_val(pgprot); 170 + return pte; 171 + } 172 + 173 + #define pte_modify(_pte, newprot) \ 174 + (__pte((pte_val(_pte) & _PAGE_CHG_MASK) | pgprot_val(newprot))) 175 + 176 + #define pte_none(pte) ((pte_val(pte) & ~_PAGE_HPTEFLAGS) == 0) 177 + #define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT) 178 + 179 + /* pte_clear moved to later in this file */ 180 + 181 + #define pte_pfn(x) ((unsigned long)((pte_val(x)>>PTE_RPN_SHIFT))) 182 + #define pte_page(x) pfn_to_page(pte_pfn(x)) 183 + 184 + #define PMD_BAD_BITS (PTE_TABLE_SIZE-1) 185 + #define PUD_BAD_BITS (PMD_TABLE_SIZE-1) 186 + 187 + #define pmd_set(pmdp, pmdval) (pmd_val(*(pmdp)) = (pmdval)) 188 + #define pmd_none(pmd) (!pmd_val(pmd)) 189 + #define pmd_bad(pmd) (!is_kernel_addr(pmd_val(pmd)) \ 190 + || (pmd_val(pmd) & PMD_BAD_BITS)) 191 + #define pmd_present(pmd) (pmd_val(pmd) != 0) 192 + #define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0) 193 + #define pmd_page_vaddr(pmd) (pmd_val(pmd) & ~PMD_MASKED_BITS) 194 + #define pmd_page(pmd) virt_to_page(pmd_page_vaddr(pmd)) 195 + 196 + #define pud_set(pudp, pudval) (pud_val(*(pudp)) = (pudval)) 197 + #define pud_none(pud) (!pud_val(pud)) 198 + #define pud_bad(pud) (!is_kernel_addr(pud_val(pud)) \ 199 + || (pud_val(pud) & PUD_BAD_BITS)) 200 + #define pud_present(pud) (pud_val(pud) != 0) 201 + #define pud_clear(pudp) (pud_val(*(pudp)) = 0) 202 + #define pud_page_vaddr(pud) (pud_val(pud) & ~PUD_MASKED_BITS) 203 + #define pud_page(pud) virt_to_page(pud_page_vaddr(pud)) 204 + 205 + #define pgd_set(pgdp, pudp) ({pgd_val(*(pgdp)) = (unsigned long)(pudp);}) 206 + 207 + /* 208 + * Find an entry in a page-table-directory. We combine the address region 209 + * (the high order N bits) and the pgd portion of the address. 210 + */ 211 + /* to avoid overflow in free_pgtables we don't use PTRS_PER_PGD here */ 212 + #define pgd_index(address) (((address) >> (PGDIR_SHIFT)) & 0x1ff) 213 + 214 + #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address)) 215 + 216 + #define pmd_offset(pudp,addr) \ 217 + (((pmd_t *) pud_page_vaddr(*(pudp))) + (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))) 218 + 219 + #define pte_offset_kernel(dir,addr) \ 220 + (((pte_t *) pmd_page_vaddr(*(dir))) + (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))) 221 + 222 + #define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr)) 223 + #define pte_offset_map_nested(dir,addr) pte_offset_kernel((dir), (addr)) 224 + #define pte_unmap(pte) do { } while(0) 225 + #define pte_unmap_nested(pte) do { } while(0) 226 + 227 + /* to find an entry in a kernel page-table-directory */ 228 + /* This now only contains the vmalloc pages */ 229 + #define pgd_offset_k(address) pgd_offset(&init_mm, address) 230 + 231 + /* 232 + * The following only work if pte_present() is true. 233 + * Undefined behaviour if not.. 234 + */ 235 + static inline int pte_read(pte_t pte) { return pte_val(pte) & _PAGE_USER;} 236 + static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW;} 237 + static inline int pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_EXEC;} 238 + static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY;} 239 + static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED;} 240 + static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE;} 241 + 242 + static inline void pte_uncache(pte_t pte) { pte_val(pte) |= _PAGE_NO_CACHE; } 243 + static inline void pte_cache(pte_t pte) { pte_val(pte) &= ~_PAGE_NO_CACHE; } 244 + 245 + static inline pte_t pte_rdprotect(pte_t pte) { 246 + pte_val(pte) &= ~_PAGE_USER; return pte; } 247 + static inline pte_t pte_exprotect(pte_t pte) { 248 + pte_val(pte) &= ~_PAGE_EXEC; return pte; } 249 + static inline pte_t pte_wrprotect(pte_t pte) { 250 + pte_val(pte) &= ~(_PAGE_RW); return pte; } 251 + static inline pte_t pte_mkclean(pte_t pte) { 252 + pte_val(pte) &= ~(_PAGE_DIRTY); return pte; } 253 + static inline pte_t pte_mkold(pte_t pte) { 254 + pte_val(pte) &= ~_PAGE_ACCESSED; return pte; } 255 + static inline pte_t pte_mkread(pte_t pte) { 256 + pte_val(pte) |= _PAGE_USER; return pte; } 257 + static inline pte_t pte_mkexec(pte_t pte) { 258 + pte_val(pte) |= _PAGE_USER | _PAGE_EXEC; return pte; } 259 + static inline pte_t pte_mkwrite(pte_t pte) { 260 + pte_val(pte) |= _PAGE_RW; return pte; } 261 + static inline pte_t pte_mkdirty(pte_t pte) { 262 + pte_val(pte) |= _PAGE_DIRTY; return pte; } 263 + static inline pte_t pte_mkyoung(pte_t pte) { 264 + pte_val(pte) |= _PAGE_ACCESSED; return pte; } 265 + static inline pte_t pte_mkhuge(pte_t pte) { 266 + return pte; } 267 + 268 + /* Atomic PTE updates */ 269 + static inline unsigned long pte_update(struct mm_struct *mm, 270 + unsigned long addr, 271 + pte_t *ptep, unsigned long clr, 272 + int huge) 273 + { 274 + unsigned long old, tmp; 275 + 276 + __asm__ __volatile__( 277 + "1: ldarx %0,0,%3 # pte_update\n\ 278 + andi. %1,%0,%6\n\ 279 + bne- 1b \n\ 280 + andc %1,%0,%4 \n\ 281 + stdcx. %1,0,%3 \n\ 282 + bne- 1b" 283 + : "=&r" (old), "=&r" (tmp), "=m" (*ptep) 284 + : "r" (ptep), "r" (clr), "m" (*ptep), "i" (_PAGE_BUSY) 285 + : "cc" ); 286 + 287 + if (old & _PAGE_HASHPTE) 288 + hpte_need_flush(mm, addr, ptep, old, huge); 289 + return old; 290 + } 291 + 292 + static inline int __ptep_test_and_clear_young(struct mm_struct *mm, 293 + unsigned long addr, pte_t *ptep) 294 + { 295 + unsigned long old; 296 + 297 + if ((pte_val(*ptep) & (_PAGE_ACCESSED | _PAGE_HASHPTE)) == 0) 298 + return 0; 299 + old = pte_update(mm, addr, ptep, _PAGE_ACCESSED, 0); 300 + return (old & _PAGE_ACCESSED) != 0; 301 + } 302 + #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG 303 + #define ptep_test_and_clear_young(__vma, __addr, __ptep) \ 304 + ({ \ 305 + int __r; \ 306 + __r = __ptep_test_and_clear_young((__vma)->vm_mm, __addr, __ptep); \ 307 + __r; \ 308 + }) 309 + 310 + /* 311 + * On RW/DIRTY bit transitions we can avoid flushing the hpte. For the 312 + * moment we always flush but we need to fix hpte_update and test if the 313 + * optimisation is worth it. 314 + */ 315 + static inline int __ptep_test_and_clear_dirty(struct mm_struct *mm, 316 + unsigned long addr, pte_t *ptep) 317 + { 318 + unsigned long old; 319 + 320 + if ((pte_val(*ptep) & _PAGE_DIRTY) == 0) 321 + return 0; 322 + old = pte_update(mm, addr, ptep, _PAGE_DIRTY, 0); 323 + return (old & _PAGE_DIRTY) != 0; 324 + } 325 + #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY 326 + #define ptep_test_and_clear_dirty(__vma, __addr, __ptep) \ 327 + ({ \ 328 + int __r; \ 329 + __r = __ptep_test_and_clear_dirty((__vma)->vm_mm, __addr, __ptep); \ 330 + __r; \ 331 + }) 332 + 333 + #define __HAVE_ARCH_PTEP_SET_WRPROTECT 334 + static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, 335 + pte_t *ptep) 336 + { 337 + unsigned long old; 338 + 339 + if ((pte_val(*ptep) & _PAGE_RW) == 0) 340 + return; 341 + old = pte_update(mm, addr, ptep, _PAGE_RW, 0); 342 + } 343 + 344 + /* 345 + * We currently remove entries from the hashtable regardless of whether 346 + * the entry was young or dirty. The generic routines only flush if the 347 + * entry was young or dirty which is not good enough. 348 + * 349 + * We should be more intelligent about this but for the moment we override 350 + * these functions and force a tlb flush unconditionally 351 + */ 352 + #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH 353 + #define ptep_clear_flush_young(__vma, __address, __ptep) \ 354 + ({ \ 355 + int __young = __ptep_test_and_clear_young((__vma)->vm_mm, __address, \ 356 + __ptep); \ 357 + __young; \ 358 + }) 359 + 360 + #define __HAVE_ARCH_PTEP_CLEAR_DIRTY_FLUSH 361 + #define ptep_clear_flush_dirty(__vma, __address, __ptep) \ 362 + ({ \ 363 + int __dirty = __ptep_test_and_clear_dirty((__vma)->vm_mm, __address, \ 364 + __ptep); \ 365 + __dirty; \ 366 + }) 367 + 368 + #define __HAVE_ARCH_PTEP_GET_AND_CLEAR 369 + static inline pte_t ptep_get_and_clear(struct mm_struct *mm, 370 + unsigned long addr, pte_t *ptep) 371 + { 372 + unsigned long old = pte_update(mm, addr, ptep, ~0UL, 0); 373 + return __pte(old); 374 + } 375 + 376 + static inline void pte_clear(struct mm_struct *mm, unsigned long addr, 377 + pte_t * ptep) 378 + { 379 + pte_update(mm, addr, ptep, ~0UL, 0); 380 + } 381 + 382 + /* 383 + * set_pte stores a linux PTE into the linux page table. 384 + */ 385 + static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, 386 + pte_t *ptep, pte_t pte) 387 + { 388 + if (pte_present(*ptep)) 389 + pte_clear(mm, addr, ptep); 390 + pte = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS); 391 + *ptep = pte; 392 + } 393 + 394 + /* Set the dirty and/or accessed bits atomically in a linux PTE, this 395 + * function doesn't need to flush the hash entry 396 + */ 397 + #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS 398 + static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry, int dirty) 399 + { 400 + unsigned long bits = pte_val(entry) & 401 + (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC); 402 + unsigned long old, tmp; 403 + 404 + __asm__ __volatile__( 405 + "1: ldarx %0,0,%4\n\ 406 + andi. %1,%0,%6\n\ 407 + bne- 1b \n\ 408 + or %0,%3,%0\n\ 409 + stdcx. %0,0,%4\n\ 410 + bne- 1b" 411 + :"=&r" (old), "=&r" (tmp), "=m" (*ptep) 412 + :"r" (bits), "r" (ptep), "m" (*ptep), "i" (_PAGE_BUSY) 413 + :"cc"); 414 + } 415 + #define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \ 416 + do { \ 417 + __ptep_set_access_flags(__ptep, __entry, __dirty); \ 418 + flush_tlb_page_nohash(__vma, __address); \ 419 + } while(0) 420 + 421 + /* 422 + * Macro to mark a page protection value as "uncacheable". 423 + */ 424 + #define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) | _PAGE_NO_CACHE | _PAGE_GUARDED)) 425 + 426 + struct file; 427 + extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, 428 + unsigned long size, pgprot_t vma_prot); 429 + #define __HAVE_PHYS_MEM_ACCESS_PROT 430 + 431 + #define __HAVE_ARCH_PTE_SAME 432 + #define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HPTEFLAGS) == 0) 433 + 434 + #define pte_ERROR(e) \ 435 + printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e)) 436 + #define pmd_ERROR(e) \ 437 + printk("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e)) 438 + #define pgd_ERROR(e) \ 439 + printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e)) 440 + 441 + extern pgd_t swapper_pg_dir[]; 442 + 443 + extern void paging_init(void); 444 + 445 + /* Encode and de-code a swap entry */ 446 + #define __swp_type(entry) (((entry).val >> 1) & 0x3f) 447 + #define __swp_offset(entry) ((entry).val >> 8) 448 + #define __swp_entry(type, offset) ((swp_entry_t){((type)<< 1)|((offset)<<8)}) 449 + #define __pte_to_swp_entry(pte) ((swp_entry_t){pte_val(pte) >> PTE_RPN_SHIFT}) 450 + #define __swp_entry_to_pte(x) ((pte_t) { (x).val << PTE_RPN_SHIFT }) 451 + #define pte_to_pgoff(pte) (pte_val(pte) >> PTE_RPN_SHIFT) 452 + #define pgoff_to_pte(off) ((pte_t) {((off) << PTE_RPN_SHIFT)|_PAGE_FILE}) 453 + #define PTE_FILE_MAX_BITS (BITS_PER_LONG - PTE_RPN_SHIFT) 454 + 455 + /* 456 + * kern_addr_valid is intended to indicate whether an address is a valid 457 + * kernel address. Most 32-bit archs define it as always true (like this) 458 + * but most 64-bit archs actually perform a test. What should we do here? 459 + * The only use is in fs/ncpfs/dir.c 460 + */ 461 + #define kern_addr_valid(addr) (1) 462 + 463 + #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ 464 + remap_pfn_range(vma, vaddr, pfn, size, prot) 465 + 466 + void pgtable_cache_init(void); 467 + 468 + /* 469 + * find_linux_pte returns the address of a linux pte for a given 470 + * effective address and directory. If not found, it returns zero. 471 + */static inline pte_t *find_linux_pte(pgd_t *pgdir, unsigned long ea) 472 + { 473 + pgd_t *pg; 474 + pud_t *pu; 475 + pmd_t *pm; 476 + pte_t *pt = NULL; 477 + 478 + pg = pgdir + pgd_index(ea); 479 + if (!pgd_none(*pg)) { 480 + pu = pud_offset(pg, ea); 481 + if (!pud_none(*pu)) { 482 + pm = pmd_offset(pu, ea); 483 + if (pmd_present(*pm)) 484 + pt = pte_offset_kernel(pm, ea); 485 + } 486 + } 487 + return pt; 488 + } 489 + 490 + #endif /* __ASSEMBLY__ */ 491 + 492 + #endif /* _ASM_POWERPC_PGTABLE_PPC64_H_ */
+3 -490
include/asm-powerpc/pgtable.h
··· 2 2 #define _ASM_POWERPC_PGTABLE_H 3 3 #ifdef __KERNEL__ 4 4 5 - #ifndef CONFIG_PPC64 6 - #include <asm-ppc/pgtable.h> 5 + #if defined(CONFIG_PPC64) 6 + # include <asm/pgtable-ppc64.h> 7 7 #else 8 - 9 - /* 10 - * This file contains the functions and defines necessary to modify and use 11 - * the ppc64 hashed page table. 12 - */ 13 - 14 - #ifndef __ASSEMBLY__ 15 - #include <linux/stddef.h> 16 - #include <asm/processor.h> /* For TASK_SIZE */ 17 - #include <asm/mmu.h> 18 - #include <asm/page.h> 19 - #include <asm/tlbflush.h> 20 - struct mm_struct; 21 - #endif /* __ASSEMBLY__ */ 22 - 23 - #ifdef CONFIG_PPC_64K_PAGES 24 - #include <asm/pgtable-64k.h> 25 - #else 26 - #include <asm/pgtable-4k.h> 27 - #endif 28 - 29 - #define FIRST_USER_ADDRESS 0 30 - 31 - /* 32 - * Size of EA range mapped by our pagetables. 33 - */ 34 - #define PGTABLE_EADDR_SIZE (PTE_INDEX_SIZE + PMD_INDEX_SIZE + \ 35 - PUD_INDEX_SIZE + PGD_INDEX_SIZE + PAGE_SHIFT) 36 - #define PGTABLE_RANGE (1UL << PGTABLE_EADDR_SIZE) 37 - 38 - #if TASK_SIZE_USER64 > PGTABLE_RANGE 39 - #error TASK_SIZE_USER64 exceeds pagetable range 40 - #endif 41 - 42 - #if TASK_SIZE_USER64 > (1UL << (USER_ESID_BITS + SID_SHIFT)) 43 - #error TASK_SIZE_USER64 exceeds user VSID range 44 - #endif 45 - 46 - /* 47 - * Define the address range of the vmalloc VM area. 48 - */ 49 - #define VMALLOC_START ASM_CONST(0xD000000000000000) 50 - #define VMALLOC_SIZE ASM_CONST(0x80000000000) 51 - #define VMALLOC_END (VMALLOC_START + VMALLOC_SIZE) 52 - 53 - /* 54 - * Define the address range of the imalloc VM area. 55 - */ 56 - #define PHBS_IO_BASE VMALLOC_END 57 - #define IMALLOC_BASE (PHBS_IO_BASE + 0x80000000ul) /* Reserve 2 gigs for PHBs */ 58 - #define IMALLOC_END (VMALLOC_START + PGTABLE_RANGE) 59 - 60 - /* 61 - * Region IDs 62 - */ 63 - #define REGION_SHIFT 60UL 64 - #define REGION_MASK (0xfUL << REGION_SHIFT) 65 - #define REGION_ID(ea) (((unsigned long)(ea)) >> REGION_SHIFT) 66 - 67 - #define VMALLOC_REGION_ID (REGION_ID(VMALLOC_START)) 68 - #define KERNEL_REGION_ID (REGION_ID(PAGE_OFFSET)) 69 - #define USER_REGION_ID (0UL) 70 - 71 - /* 72 - * Common bits in a linux-style PTE. These match the bits in the 73 - * (hardware-defined) PowerPC PTE as closely as possible. Additional 74 - * bits may be defined in pgtable-*.h 75 - */ 76 - #define _PAGE_PRESENT 0x0001 /* software: pte contains a translation */ 77 - #define _PAGE_USER 0x0002 /* matches one of the PP bits */ 78 - #define _PAGE_FILE 0x0002 /* (!present only) software: pte holds file offset */ 79 - #define _PAGE_EXEC 0x0004 /* No execute on POWER4 and newer (we invert) */ 80 - #define _PAGE_GUARDED 0x0008 81 - #define _PAGE_COHERENT 0x0010 /* M: enforce memory coherence (SMP systems) */ 82 - #define _PAGE_NO_CACHE 0x0020 /* I: cache inhibit */ 83 - #define _PAGE_WRITETHRU 0x0040 /* W: cache write-through */ 84 - #define _PAGE_DIRTY 0x0080 /* C: page changed */ 85 - #define _PAGE_ACCESSED 0x0100 /* R: page referenced */ 86 - #define _PAGE_RW 0x0200 /* software: user write access allowed */ 87 - #define _PAGE_HASHPTE 0x0400 /* software: pte has an associated HPTE */ 88 - #define _PAGE_BUSY 0x0800 /* software: PTE & hash are busy */ 89 - 90 - #define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_COHERENT) 91 - 92 - #define _PAGE_WRENABLE (_PAGE_RW | _PAGE_DIRTY) 93 - 94 - /* __pgprot defined in asm-powerpc/page.h */ 95 - #define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED) 96 - 97 - #define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_RW | _PAGE_USER) 98 - #define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_RW | _PAGE_USER | _PAGE_EXEC) 99 - #define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER) 100 - #define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC) 101 - #define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER) 102 - #define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC) 103 - #define PAGE_KERNEL __pgprot(_PAGE_BASE | _PAGE_WRENABLE) 104 - #define PAGE_KERNEL_CI __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \ 105 - _PAGE_WRENABLE | _PAGE_NO_CACHE | _PAGE_GUARDED) 106 - #define PAGE_KERNEL_EXEC __pgprot(_PAGE_BASE | _PAGE_WRENABLE | _PAGE_EXEC) 107 - 108 - #define PAGE_AGP __pgprot(_PAGE_BASE | _PAGE_WRENABLE | _PAGE_NO_CACHE) 109 - #define HAVE_PAGE_AGP 110 - 111 - /* PTEIDX nibble */ 112 - #define _PTEIDX_SECONDARY 0x8 113 - #define _PTEIDX_GROUP_IX 0x7 114 - 115 - 116 - /* 117 - * POWER4 and newer have per page execute protection, older chips can only 118 - * do this on a segment (256MB) basis. 119 - * 120 - * Also, write permissions imply read permissions. 121 - * This is the closest we can get.. 122 - * 123 - * Note due to the way vm flags are laid out, the bits are XWR 124 - */ 125 - #define __P000 PAGE_NONE 126 - #define __P001 PAGE_READONLY 127 - #define __P010 PAGE_COPY 128 - #define __P011 PAGE_COPY 129 - #define __P100 PAGE_READONLY_X 130 - #define __P101 PAGE_READONLY_X 131 - #define __P110 PAGE_COPY_X 132 - #define __P111 PAGE_COPY_X 133 - 134 - #define __S000 PAGE_NONE 135 - #define __S001 PAGE_READONLY 136 - #define __S010 PAGE_SHARED 137 - #define __S011 PAGE_SHARED 138 - #define __S100 PAGE_READONLY_X 139 - #define __S101 PAGE_READONLY_X 140 - #define __S110 PAGE_SHARED_X 141 - #define __S111 PAGE_SHARED_X 142 - 143 - #ifndef __ASSEMBLY__ 144 - 145 - /* 146 - * ZERO_PAGE is a global shared page that is always zero: used 147 - * for zero-mapped memory areas etc.. 148 - */ 149 - extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)]; 150 - #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) 151 - #endif /* __ASSEMBLY__ */ 152 - 153 - #ifdef CONFIG_HUGETLB_PAGE 154 - 155 - #define HAVE_ARCH_UNMAPPED_AREA 156 - #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN 157 - 8 + # include <asm/pgtable-ppc32.h> 158 9 #endif 159 10 160 11 #ifndef __ASSEMBLY__ 161 - 162 - /* 163 - * Conversion functions: convert a page and protection to a page entry, 164 - * and a page entry and page directory to the page they refer to. 165 - * 166 - * mk_pte takes a (struct page *) as input 167 - */ 168 - #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) 169 - 170 - static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot) 171 - { 172 - pte_t pte; 173 - 174 - 175 - pte_val(pte) = (pfn << PTE_RPN_SHIFT) | pgprot_val(pgprot); 176 - return pte; 177 - } 178 - 179 - #define pte_modify(_pte, newprot) \ 180 - (__pte((pte_val(_pte) & _PAGE_CHG_MASK) | pgprot_val(newprot))) 181 - 182 - #define pte_none(pte) ((pte_val(pte) & ~_PAGE_HPTEFLAGS) == 0) 183 - #define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT) 184 - 185 - /* pte_clear moved to later in this file */ 186 - 187 - #define pte_pfn(x) ((unsigned long)((pte_val(x)>>PTE_RPN_SHIFT))) 188 - #define pte_page(x) pfn_to_page(pte_pfn(x)) 189 - 190 - #define PMD_BAD_BITS (PTE_TABLE_SIZE-1) 191 - #define PUD_BAD_BITS (PMD_TABLE_SIZE-1) 192 - 193 - #define pmd_set(pmdp, pmdval) (pmd_val(*(pmdp)) = (pmdval)) 194 - #define pmd_none(pmd) (!pmd_val(pmd)) 195 - #define pmd_bad(pmd) (!is_kernel_addr(pmd_val(pmd)) \ 196 - || (pmd_val(pmd) & PMD_BAD_BITS)) 197 - #define pmd_present(pmd) (pmd_val(pmd) != 0) 198 - #define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0) 199 - #define pmd_page_vaddr(pmd) (pmd_val(pmd) & ~PMD_MASKED_BITS) 200 - #define pmd_page(pmd) virt_to_page(pmd_page_vaddr(pmd)) 201 - 202 - #define pud_set(pudp, pudval) (pud_val(*(pudp)) = (pudval)) 203 - #define pud_none(pud) (!pud_val(pud)) 204 - #define pud_bad(pud) (!is_kernel_addr(pud_val(pud)) \ 205 - || (pud_val(pud) & PUD_BAD_BITS)) 206 - #define pud_present(pud) (pud_val(pud) != 0) 207 - #define pud_clear(pudp) (pud_val(*(pudp)) = 0) 208 - #define pud_page_vaddr(pud) (pud_val(pud) & ~PUD_MASKED_BITS) 209 - #define pud_page(pud) virt_to_page(pud_page_vaddr(pud)) 210 - 211 - #define pgd_set(pgdp, pudp) ({pgd_val(*(pgdp)) = (unsigned long)(pudp);}) 212 - 213 - /* 214 - * Find an entry in a page-table-directory. We combine the address region 215 - * (the high order N bits) and the pgd portion of the address. 216 - */ 217 - /* to avoid overflow in free_pgtables we don't use PTRS_PER_PGD here */ 218 - #define pgd_index(address) (((address) >> (PGDIR_SHIFT)) & 0x1ff) 219 - 220 - #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address)) 221 - 222 - #define pmd_offset(pudp,addr) \ 223 - (((pmd_t *) pud_page_vaddr(*(pudp))) + (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))) 224 - 225 - #define pte_offset_kernel(dir,addr) \ 226 - (((pte_t *) pmd_page_vaddr(*(dir))) + (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))) 227 - 228 - #define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr)) 229 - #define pte_offset_map_nested(dir,addr) pte_offset_kernel((dir), (addr)) 230 - #define pte_unmap(pte) do { } while(0) 231 - #define pte_unmap_nested(pte) do { } while(0) 232 - 233 - /* to find an entry in a kernel page-table-directory */ 234 - /* This now only contains the vmalloc pages */ 235 - #define pgd_offset_k(address) pgd_offset(&init_mm, address) 236 - 237 - /* 238 - * The following only work if pte_present() is true. 239 - * Undefined behaviour if not.. 240 - */ 241 - static inline int pte_read(pte_t pte) { return pte_val(pte) & _PAGE_USER;} 242 - static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW;} 243 - static inline int pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_EXEC;} 244 - static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY;} 245 - static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED;} 246 - static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE;} 247 - 248 - static inline void pte_uncache(pte_t pte) { pte_val(pte) |= _PAGE_NO_CACHE; } 249 - static inline void pte_cache(pte_t pte) { pte_val(pte) &= ~_PAGE_NO_CACHE; } 250 - 251 - static inline pte_t pte_rdprotect(pte_t pte) { 252 - pte_val(pte) &= ~_PAGE_USER; return pte; } 253 - static inline pte_t pte_exprotect(pte_t pte) { 254 - pte_val(pte) &= ~_PAGE_EXEC; return pte; } 255 - static inline pte_t pte_wrprotect(pte_t pte) { 256 - pte_val(pte) &= ~(_PAGE_RW); return pte; } 257 - static inline pte_t pte_mkclean(pte_t pte) { 258 - pte_val(pte) &= ~(_PAGE_DIRTY); return pte; } 259 - static inline pte_t pte_mkold(pte_t pte) { 260 - pte_val(pte) &= ~_PAGE_ACCESSED; return pte; } 261 - static inline pte_t pte_mkread(pte_t pte) { 262 - pte_val(pte) |= _PAGE_USER; return pte; } 263 - static inline pte_t pte_mkexec(pte_t pte) { 264 - pte_val(pte) |= _PAGE_USER | _PAGE_EXEC; return pte; } 265 - static inline pte_t pte_mkwrite(pte_t pte) { 266 - pte_val(pte) |= _PAGE_RW; return pte; } 267 - static inline pte_t pte_mkdirty(pte_t pte) { 268 - pte_val(pte) |= _PAGE_DIRTY; return pte; } 269 - static inline pte_t pte_mkyoung(pte_t pte) { 270 - pte_val(pte) |= _PAGE_ACCESSED; return pte; } 271 - static inline pte_t pte_mkhuge(pte_t pte) { 272 - return pte; } 273 - 274 - /* Atomic PTE updates */ 275 - static inline unsigned long pte_update(struct mm_struct *mm, 276 - unsigned long addr, 277 - pte_t *ptep, unsigned long clr, 278 - int huge) 279 - { 280 - unsigned long old, tmp; 281 - 282 - __asm__ __volatile__( 283 - "1: ldarx %0,0,%3 # pte_update\n\ 284 - andi. %1,%0,%6\n\ 285 - bne- 1b \n\ 286 - andc %1,%0,%4 \n\ 287 - stdcx. %1,0,%3 \n\ 288 - bne- 1b" 289 - : "=&r" (old), "=&r" (tmp), "=m" (*ptep) 290 - : "r" (ptep), "r" (clr), "m" (*ptep), "i" (_PAGE_BUSY) 291 - : "cc" ); 292 - 293 - if (old & _PAGE_HASHPTE) 294 - hpte_need_flush(mm, addr, ptep, old, huge); 295 - return old; 296 - } 297 - 298 - static inline int __ptep_test_and_clear_young(struct mm_struct *mm, 299 - unsigned long addr, pte_t *ptep) 300 - { 301 - unsigned long old; 302 - 303 - if ((pte_val(*ptep) & (_PAGE_ACCESSED | _PAGE_HASHPTE)) == 0) 304 - return 0; 305 - old = pte_update(mm, addr, ptep, _PAGE_ACCESSED, 0); 306 - return (old & _PAGE_ACCESSED) != 0; 307 - } 308 - #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG 309 - #define ptep_test_and_clear_young(__vma, __addr, __ptep) \ 310 - ({ \ 311 - int __r; \ 312 - __r = __ptep_test_and_clear_young((__vma)->vm_mm, __addr, __ptep); \ 313 - __r; \ 314 - }) 315 - 316 - /* 317 - * On RW/DIRTY bit transitions we can avoid flushing the hpte. For the 318 - * moment we always flush but we need to fix hpte_update and test if the 319 - * optimisation is worth it. 320 - */ 321 - static inline int __ptep_test_and_clear_dirty(struct mm_struct *mm, 322 - unsigned long addr, pte_t *ptep) 323 - { 324 - unsigned long old; 325 - 326 - if ((pte_val(*ptep) & _PAGE_DIRTY) == 0) 327 - return 0; 328 - old = pte_update(mm, addr, ptep, _PAGE_DIRTY, 0); 329 - return (old & _PAGE_DIRTY) != 0; 330 - } 331 - #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY 332 - #define ptep_test_and_clear_dirty(__vma, __addr, __ptep) \ 333 - ({ \ 334 - int __r; \ 335 - __r = __ptep_test_and_clear_dirty((__vma)->vm_mm, __addr, __ptep); \ 336 - __r; \ 337 - }) 338 - 339 - #define __HAVE_ARCH_PTEP_SET_WRPROTECT 340 - static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, 341 - pte_t *ptep) 342 - { 343 - unsigned long old; 344 - 345 - if ((pte_val(*ptep) & _PAGE_RW) == 0) 346 - return; 347 - old = pte_update(mm, addr, ptep, _PAGE_RW, 0); 348 - } 349 - 350 - /* 351 - * We currently remove entries from the hashtable regardless of whether 352 - * the entry was young or dirty. The generic routines only flush if the 353 - * entry was young or dirty which is not good enough. 354 - * 355 - * We should be more intelligent about this but for the moment we override 356 - * these functions and force a tlb flush unconditionally 357 - */ 358 - #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH 359 - #define ptep_clear_flush_young(__vma, __address, __ptep) \ 360 - ({ \ 361 - int __young = __ptep_test_and_clear_young((__vma)->vm_mm, __address, \ 362 - __ptep); \ 363 - __young; \ 364 - }) 365 - 366 - #define __HAVE_ARCH_PTEP_CLEAR_DIRTY_FLUSH 367 - #define ptep_clear_flush_dirty(__vma, __address, __ptep) \ 368 - ({ \ 369 - int __dirty = __ptep_test_and_clear_dirty((__vma)->vm_mm, __address, \ 370 - __ptep); \ 371 - __dirty; \ 372 - }) 373 - 374 - #define __HAVE_ARCH_PTEP_GET_AND_CLEAR 375 - static inline pte_t ptep_get_and_clear(struct mm_struct *mm, 376 - unsigned long addr, pte_t *ptep) 377 - { 378 - unsigned long old = pte_update(mm, addr, ptep, ~0UL, 0); 379 - return __pte(old); 380 - } 381 - 382 - static inline void pte_clear(struct mm_struct *mm, unsigned long addr, 383 - pte_t * ptep) 384 - { 385 - pte_update(mm, addr, ptep, ~0UL, 0); 386 - } 387 - 388 - /* 389 - * set_pte stores a linux PTE into the linux page table. 390 - */ 391 - static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, 392 - pte_t *ptep, pte_t pte) 393 - { 394 - if (pte_present(*ptep)) 395 - pte_clear(mm, addr, ptep); 396 - pte = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS); 397 - *ptep = pte; 398 - } 399 - 400 - /* Set the dirty and/or accessed bits atomically in a linux PTE, this 401 - * function doesn't need to flush the hash entry 402 - */ 403 - #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS 404 - static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry, int dirty) 405 - { 406 - unsigned long bits = pte_val(entry) & 407 - (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC); 408 - unsigned long old, tmp; 409 - 410 - __asm__ __volatile__( 411 - "1: ldarx %0,0,%4\n\ 412 - andi. %1,%0,%6\n\ 413 - bne- 1b \n\ 414 - or %0,%3,%0\n\ 415 - stdcx. %0,0,%4\n\ 416 - bne- 1b" 417 - :"=&r" (old), "=&r" (tmp), "=m" (*ptep) 418 - :"r" (bits), "r" (ptep), "m" (*ptep), "i" (_PAGE_BUSY) 419 - :"cc"); 420 - } 421 - #define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \ 422 - do { \ 423 - __ptep_set_access_flags(__ptep, __entry, __dirty); \ 424 - flush_tlb_page_nohash(__vma, __address); \ 425 - } while(0) 426 - 427 - /* 428 - * Macro to mark a page protection value as "uncacheable". 429 - */ 430 - #define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) | _PAGE_NO_CACHE | _PAGE_GUARDED)) 431 - 432 - struct file; 433 - extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, 434 - unsigned long size, pgprot_t vma_prot); 435 - #define __HAVE_PHYS_MEM_ACCESS_PROT 436 - 437 - #define __HAVE_ARCH_PTE_SAME 438 - #define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HPTEFLAGS) == 0) 439 - 440 - #define pte_ERROR(e) \ 441 - printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e)) 442 - #define pmd_ERROR(e) \ 443 - printk("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e)) 444 - #define pgd_ERROR(e) \ 445 - printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e)) 446 - 447 - extern pgd_t swapper_pg_dir[]; 448 - 449 - extern void paging_init(void); 450 - 451 - /* Encode and de-code a swap entry */ 452 - #define __swp_type(entry) (((entry).val >> 1) & 0x3f) 453 - #define __swp_offset(entry) ((entry).val >> 8) 454 - #define __swp_entry(type, offset) ((swp_entry_t){((type)<< 1)|((offset)<<8)}) 455 - #define __pte_to_swp_entry(pte) ((swp_entry_t){pte_val(pte) >> PTE_RPN_SHIFT}) 456 - #define __swp_entry_to_pte(x) ((pte_t) { (x).val << PTE_RPN_SHIFT }) 457 - #define pte_to_pgoff(pte) (pte_val(pte) >> PTE_RPN_SHIFT) 458 - #define pgoff_to_pte(off) ((pte_t) {((off) << PTE_RPN_SHIFT)|_PAGE_FILE}) 459 - #define PTE_FILE_MAX_BITS (BITS_PER_LONG - PTE_RPN_SHIFT) 460 - 461 - /* 462 - * kern_addr_valid is intended to indicate whether an address is a valid 463 - * kernel address. Most 32-bit archs define it as always true (like this) 464 - * but most 64-bit archs actually perform a test. What should we do here? 465 - * The only use is in fs/ncpfs/dir.c 466 - */ 467 - #define kern_addr_valid(addr) (1) 468 - 469 - #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ 470 - remap_pfn_range(vma, vaddr, pfn, size, prot) 471 - 472 - void pgtable_cache_init(void); 473 - 474 - /* 475 - * find_linux_pte returns the address of a linux pte for a given 476 - * effective address and directory. If not found, it returns zero. 477 - */static inline pte_t *find_linux_pte(pgd_t *pgdir, unsigned long ea) 478 - { 479 - pgd_t *pg; 480 - pud_t *pu; 481 - pmd_t *pm; 482 - pte_t *pt = NULL; 483 - 484 - pg = pgdir + pgd_index(ea); 485 - if (!pgd_none(*pg)) { 486 - pu = pud_offset(pg, ea); 487 - if (!pud_none(*pu)) { 488 - pm = pmd_offset(pu, ea); 489 - if (pmd_present(*pm)) 490 - pt = pte_offset_kernel(pm, ea); 491 - } 492 - } 493 - return pt; 494 - } 495 - 496 - 497 12 #include <asm-generic/pgtable.h> 498 - 499 13 #endif /* __ASSEMBLY__ */ 500 14 501 - #endif /* CONFIG_PPC64 */ 502 15 #endif /* __KERNEL__ */ 503 16 #endif /* _ASM_POWERPC_PGTABLE_H */