Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
at v3.14-rc4 270 lines 9.1 kB view raw
1/* 2 * arch/arm/include/asm/pgtable-3level.h 3 * 4 * Copyright (C) 2011 ARM Ltd. 5 * Author: Catalin Marinas <catalin.marinas@arm.com> 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, write to the Free Software 18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 19 */ 20#ifndef _ASM_PGTABLE_3LEVEL_H 21#define _ASM_PGTABLE_3LEVEL_H 22 23/* 24 * With LPAE, there are 3 levels of page tables. Each level has 512 entries of 25 * 8 bytes each, occupying a 4K page. The first level table covers a range of 26 * 512GB, each entry representing 1GB. Since we are limited to 4GB input 27 * address range, only 4 entries in the PGD are used. 28 * 29 * There are enough spare bits in a page table entry for the kernel specific 30 * state. 31 */ 32#define PTRS_PER_PTE 512 33#define PTRS_PER_PMD 512 34#define PTRS_PER_PGD 4 35 36#define PTE_HWTABLE_PTRS (0) 37#define PTE_HWTABLE_OFF (0) 38#define PTE_HWTABLE_SIZE (PTRS_PER_PTE * sizeof(u64)) 39 40/* 41 * PGDIR_SHIFT determines the size a top-level page table entry can map. 42 */ 43#define PGDIR_SHIFT 30 44 45/* 46 * PMD_SHIFT determines the size a middle-level page table entry can map. 47 */ 48#define PMD_SHIFT 21 49 50#define PMD_SIZE (1UL << PMD_SHIFT) 51#define PMD_MASK (~((1 << PMD_SHIFT) - 1)) 52#define PGDIR_SIZE (1UL << PGDIR_SHIFT) 53#define PGDIR_MASK (~((1 << PGDIR_SHIFT) - 1)) 54 55/* 56 * section address mask and size definitions. 57 */ 58#define SECTION_SHIFT 21 59#define SECTION_SIZE (1UL << SECTION_SHIFT) 60#define SECTION_MASK (~((1 << SECTION_SHIFT) - 1)) 61 62#define USER_PTRS_PER_PGD (PAGE_OFFSET / PGDIR_SIZE) 63 64/* 65 * Hugetlb definitions. 66 */ 67#define HPAGE_SHIFT PMD_SHIFT 68#define HPAGE_SIZE (_AC(1, UL) << HPAGE_SHIFT) 69#define HPAGE_MASK (~(HPAGE_SIZE - 1)) 70#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) 71 72/* 73 * "Linux" PTE definitions for LPAE. 74 * 75 * These bits overlap with the hardware bits but the naming is preserved for 76 * consistency with the classic page table format. 77 */ 78#define L_PTE_VALID (_AT(pteval_t, 1) << 0) /* Valid */ 79#define L_PTE_PRESENT (_AT(pteval_t, 3) << 0) /* Present */ 80#define L_PTE_FILE (_AT(pteval_t, 1) << 2) /* only when !PRESENT */ 81#define L_PTE_USER (_AT(pteval_t, 1) << 6) /* AP[1] */ 82#define L_PTE_RDONLY (_AT(pteval_t, 1) << 7) /* AP[2] */ 83#define L_PTE_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */ 84#define L_PTE_YOUNG (_AT(pteval_t, 1) << 10) /* AF */ 85#define L_PTE_XN (_AT(pteval_t, 1) << 54) /* XN */ 86#define L_PTE_DIRTY (_AT(pteval_t, 1) << 55) /* unused */ 87#define L_PTE_SPECIAL (_AT(pteval_t, 1) << 56) /* unused */ 88#define L_PTE_NONE (_AT(pteval_t, 1) << 57) /* PROT_NONE */ 89 90#define PMD_SECT_VALID (_AT(pmdval_t, 1) << 0) 91#define PMD_SECT_DIRTY (_AT(pmdval_t, 1) << 55) 92#define PMD_SECT_SPLITTING (_AT(pmdval_t, 1) << 56) 93#define PMD_SECT_NONE (_AT(pmdval_t, 1) << 57) 94 95/* 96 * To be used in assembly code with the upper page attributes. 97 */ 98#define L_PTE_XN_HIGH (1 << (54 - 32)) 99#define L_PTE_DIRTY_HIGH (1 << (55 - 32)) 100 101/* 102 * AttrIndx[2:0] encoding (mapping attributes defined in the MAIR* registers). 103 */ 104#define L_PTE_MT_UNCACHED (_AT(pteval_t, 0) << 2) /* strongly ordered */ 105#define L_PTE_MT_BUFFERABLE (_AT(pteval_t, 1) << 2) /* normal non-cacheable */ 106#define L_PTE_MT_WRITETHROUGH (_AT(pteval_t, 2) << 2) /* normal inner write-through */ 107#define L_PTE_MT_WRITEBACK (_AT(pteval_t, 3) << 2) /* normal inner write-back */ 108#define L_PTE_MT_WRITEALLOC (_AT(pteval_t, 7) << 2) /* normal inner write-alloc */ 109#define L_PTE_MT_DEV_SHARED (_AT(pteval_t, 4) << 2) /* device */ 110#define L_PTE_MT_DEV_NONSHARED (_AT(pteval_t, 4) << 2) /* device */ 111#define L_PTE_MT_DEV_WC (_AT(pteval_t, 1) << 2) /* normal non-cacheable */ 112#define L_PTE_MT_DEV_CACHED (_AT(pteval_t, 3) << 2) /* normal inner write-back */ 113#define L_PTE_MT_MASK (_AT(pteval_t, 7) << 2) 114 115/* 116 * Software PGD flags. 117 */ 118#define L_PGD_SWAPPER (_AT(pgdval_t, 1) << 55) /* swapper_pg_dir entry */ 119 120/* 121 * 2nd stage PTE definitions for LPAE. 122 */ 123#define L_PTE_S2_MT_UNCACHED (_AT(pteval_t, 0x0) << 2) /* strongly ordered */ 124#define L_PTE_S2_MT_WRITETHROUGH (_AT(pteval_t, 0xa) << 2) /* normal inner write-through */ 125#define L_PTE_S2_MT_WRITEBACK (_AT(pteval_t, 0xf) << 2) /* normal inner write-back */ 126#define L_PTE_S2_MT_DEV_SHARED (_AT(pteval_t, 0x1) << 2) /* device */ 127#define L_PTE_S2_MT_MASK (_AT(pteval_t, 0xf) << 2) 128 129#define L_PTE_S2_RDONLY (_AT(pteval_t, 1) << 6) /* HAP[1] */ 130#define L_PTE_S2_RDWR (_AT(pteval_t, 3) << 6) /* HAP[2:1] */ 131 132#define L_PMD_S2_RDWR (_AT(pmdval_t, 3) << 6) /* HAP[2:1] */ 133 134/* 135 * Hyp-mode PL2 PTE definitions for LPAE. 136 */ 137#define L_PTE_HYP L_PTE_USER 138 139#ifndef __ASSEMBLY__ 140 141#define pud_none(pud) (!pud_val(pud)) 142#define pud_bad(pud) (!(pud_val(pud) & 2)) 143#define pud_present(pud) (pud_val(pud)) 144#define pmd_table(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \ 145 PMD_TYPE_TABLE) 146#define pmd_sect(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \ 147 PMD_TYPE_SECT) 148#define pmd_large(pmd) pmd_sect(pmd) 149 150#define pud_clear(pudp) \ 151 do { \ 152 *pudp = __pud(0); \ 153 clean_pmd_entry(pudp); \ 154 } while (0) 155 156#define set_pud(pudp, pud) \ 157 do { \ 158 *pudp = pud; \ 159 flush_pmd_entry(pudp); \ 160 } while (0) 161 162static inline pmd_t *pud_page_vaddr(pud_t pud) 163{ 164 return __va(pud_val(pud) & PHYS_MASK & (s32)PAGE_MASK); 165} 166 167/* Find an entry in the second-level page table.. */ 168#define pmd_index(addr) (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1)) 169static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr) 170{ 171 return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(addr); 172} 173 174#define pmd_bad(pmd) (!(pmd_val(pmd) & 2)) 175 176#define copy_pmd(pmdpd,pmdps) \ 177 do { \ 178 *pmdpd = *pmdps; \ 179 flush_pmd_entry(pmdpd); \ 180 } while (0) 181 182#define pmd_clear(pmdp) \ 183 do { \ 184 *pmdp = __pmd(0); \ 185 clean_pmd_entry(pmdp); \ 186 } while (0) 187 188/* 189 * For 3 levels of paging the PTE_EXT_NG bit will be set for user address ptes 190 * that are written to a page table but not for ptes created with mk_pte. 191 * 192 * In hugetlb_no_page, a new huge pte (new_pte) is generated and passed to 193 * hugetlb_cow, where it is compared with an entry in a page table. 194 * This comparison test fails erroneously leading ultimately to a memory leak. 195 * 196 * To correct this behaviour, we mask off PTE_EXT_NG for any pte that is 197 * present before running the comparison. 198 */ 199#define __HAVE_ARCH_PTE_SAME 200#define pte_same(pte_a,pte_b) ((pte_present(pte_a) ? pte_val(pte_a) & ~PTE_EXT_NG \ 201 : pte_val(pte_a)) \ 202 == (pte_present(pte_b) ? pte_val(pte_b) & ~PTE_EXT_NG \ 203 : pte_val(pte_b))) 204 205#define set_pte_ext(ptep,pte,ext) cpu_set_pte_ext(ptep,__pte(pte_val(pte)|(ext))) 206 207#define pte_huge(pte) (pte_val(pte) && !(pte_val(pte) & PTE_TABLE_BIT)) 208#define pte_mkhuge(pte) (__pte(pte_val(pte) & ~PTE_TABLE_BIT)) 209 210#define pmd_young(pmd) (pmd_val(pmd) & PMD_SECT_AF) 211 212#define __HAVE_ARCH_PMD_WRITE 213#define pmd_write(pmd) (!(pmd_val(pmd) & PMD_SECT_RDONLY)) 214 215#define pmd_hugewillfault(pmd) (!pmd_young(pmd) || !pmd_write(pmd)) 216#define pmd_thp_or_huge(pmd) (pmd_huge(pmd) || pmd_trans_huge(pmd)) 217 218#ifdef CONFIG_TRANSPARENT_HUGEPAGE 219#define pmd_trans_huge(pmd) (pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT)) 220#define pmd_trans_splitting(pmd) (pmd_val(pmd) & PMD_SECT_SPLITTING) 221#endif 222 223#define PMD_BIT_FUNC(fn,op) \ 224static inline pmd_t pmd_##fn(pmd_t pmd) { pmd_val(pmd) op; return pmd; } 225 226PMD_BIT_FUNC(wrprotect, |= PMD_SECT_RDONLY); 227PMD_BIT_FUNC(mkold, &= ~PMD_SECT_AF); 228PMD_BIT_FUNC(mksplitting, |= PMD_SECT_SPLITTING); 229PMD_BIT_FUNC(mkwrite, &= ~PMD_SECT_RDONLY); 230PMD_BIT_FUNC(mkdirty, |= PMD_SECT_DIRTY); 231PMD_BIT_FUNC(mkyoung, |= PMD_SECT_AF); 232 233#define pmd_mkhuge(pmd) (__pmd(pmd_val(pmd) & ~PMD_TABLE_BIT)) 234 235#define pmd_pfn(pmd) (((pmd_val(pmd) & PMD_MASK) & PHYS_MASK) >> PAGE_SHIFT) 236#define pfn_pmd(pfn,prot) (__pmd(((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))) 237#define mk_pmd(page,prot) pfn_pmd(page_to_pfn(page),prot) 238 239/* represent a notpresent pmd by zero, this is used by pmdp_invalidate */ 240#define pmd_mknotpresent(pmd) (__pmd(0)) 241 242static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) 243{ 244 const pmdval_t mask = PMD_SECT_USER | PMD_SECT_XN | PMD_SECT_RDONLY | 245 PMD_SECT_VALID | PMD_SECT_NONE; 246 pmd_val(pmd) = (pmd_val(pmd) & ~mask) | (pgprot_val(newprot) & mask); 247 return pmd; 248} 249 250static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr, 251 pmd_t *pmdp, pmd_t pmd) 252{ 253 BUG_ON(addr >= TASK_SIZE); 254 255 /* create a faulting entry if PROT_NONE protected */ 256 if (pmd_val(pmd) & PMD_SECT_NONE) 257 pmd_val(pmd) &= ~PMD_SECT_VALID; 258 259 *pmdp = __pmd(pmd_val(pmd) | PMD_SECT_nG); 260 flush_pmd_entry(pmdp); 261} 262 263static inline int has_transparent_hugepage(void) 264{ 265 return 1; 266} 267 268#endif /* __ASSEMBLY__ */ 269 270#endif /* _ASM_PGTABLE_3LEVEL_H */