Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

ARC: mm: disintegrate pgtable.h into levels and flags

- pgtable-bits-arcv2.h (MMU specific page table flags)
- pgtable-levels.h (paging levels)

No functional changes, but paves way for easy addition of new MMU code
with different bits and levels etc

Signed-off-by: Vineet Gupta <vgupta@kernel.org>

+248 -273
+149
arch/arc/include/asm/pgtable-bits-arcv2.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0-only */ 2 + /* 3 + * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) 4 + */ 5 + 6 + /* 7 + * page table flags for software walked/managed MMUv3 (ARC700) and MMUv4 (HS) 8 + * There correspond to the corresponding bits in the TLB 9 + */ 10 + 11 + #ifndef _ASM_ARC_PGTABLE_BITS_ARCV2_H 12 + #define _ASM_ARC_PGTABLE_BITS_ARCV2_H 13 + 14 + #ifdef CONFIG_ARC_CACHE_PAGES 15 + #define _PAGE_CACHEABLE (1 << 0) /* Cached (H) */ 16 + #else 17 + #define _PAGE_CACHEABLE 0 18 + #endif 19 + 20 + #define _PAGE_EXECUTE (1 << 1) /* User Execute (H) */ 21 + #define _PAGE_WRITE (1 << 2) /* User Write (H) */ 22 + #define _PAGE_READ (1 << 3) /* User Read (H) */ 23 + #define _PAGE_ACCESSED (1 << 4) /* Accessed (s) */ 24 + #define _PAGE_DIRTY (1 << 5) /* Modified (s) */ 25 + #define _PAGE_SPECIAL (1 << 6) 26 + #define _PAGE_GLOBAL (1 << 8) /* ASID agnostic (H) */ 27 + #define _PAGE_PRESENT (1 << 9) /* PTE/TLB Valid (H) */ 28 + 29 + #ifdef CONFIG_ARC_MMU_V4 30 + #define _PAGE_HW_SZ (1 << 10) /* Normal/super (H) */ 31 + #else 32 + #define _PAGE_HW_SZ 0 33 + #endif 34 + 35 + /* Defaults for every user page */ 36 + #define ___DEF (_PAGE_PRESENT | _PAGE_CACHEABLE) 37 + 38 + /* Set of bits not changed in pte_modify */ 39 + #define _PAGE_CHG_MASK (PAGE_MASK_PHYS | _PAGE_ACCESSED | _PAGE_DIRTY | \ 40 + _PAGE_SPECIAL) 41 + 42 + /* More Abbrevaited helpers */ 43 + #define PAGE_U_NONE __pgprot(___DEF) 44 + #define PAGE_U_R __pgprot(___DEF | _PAGE_READ) 45 + #define PAGE_U_W_R __pgprot(___DEF | _PAGE_READ | _PAGE_WRITE) 46 + #define PAGE_U_X_R __pgprot(___DEF | _PAGE_READ | _PAGE_EXECUTE) 47 + #define PAGE_U_X_W_R __pgprot(___DEF \ 48 + | _PAGE_READ | _PAGE_WRITE | _PAGE_EXECUTE) 49 + #define PAGE_KERNEL __pgprot(___DEF | _PAGE_GLOBAL \ 50 + | _PAGE_READ | _PAGE_WRITE | _PAGE_EXECUTE) 51 + 52 + #define PAGE_SHARED PAGE_U_W_R 53 + 54 + #define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) & ~_PAGE_CACHEABLE)) 55 + 56 + /* 57 + * Mapping of vm_flags (Generic VM) to PTE flags (arch specific) 58 + * 59 + * Certain cases have 1:1 mapping 60 + * e.g. __P101 means VM_READ, VM_EXEC and !VM_SHARED 61 + * which directly corresponds to PAGE_U_X_R 62 + * 63 + * Other rules which cause the divergence from 1:1 mapping 64 + * 65 + * 1. Although ARC700 can do exclusive execute/write protection (meaning R 66 + * can be tracked independet of X/W unlike some other CPUs), still to 67 + * keep things consistent with other archs: 68 + * -Write implies Read: W => R 69 + * -Execute implies Read: X => R 70 + * 71 + * 2. Pvt Writable doesn't have Write Enabled initially: Pvt-W => !W 72 + * This is to enable COW mechanism 73 + */ 74 + /* xwr */ 75 + #define __P000 PAGE_U_NONE 76 + #define __P001 PAGE_U_R 77 + #define __P010 PAGE_U_R /* Pvt-W => !W */ 78 + #define __P011 PAGE_U_R /* Pvt-W => !W */ 79 + #define __P100 PAGE_U_X_R /* X => R */ 80 + #define __P101 PAGE_U_X_R 81 + #define __P110 PAGE_U_X_R /* Pvt-W => !W and X => R */ 82 + #define __P111 PAGE_U_X_R /* Pvt-W => !W */ 83 + 84 + #define __S000 PAGE_U_NONE 85 + #define __S001 PAGE_U_R 86 + #define __S010 PAGE_U_W_R /* W => R */ 87 + #define __S011 PAGE_U_W_R 88 + #define __S100 PAGE_U_X_R /* X => R */ 89 + #define __S101 PAGE_U_X_R 90 + #define __S110 PAGE_U_X_W_R /* X => R */ 91 + #define __S111 PAGE_U_X_W_R 92 + 93 + #ifndef __ASSEMBLY__ 94 + 95 + #define pte_write(pte) (pte_val(pte) & _PAGE_WRITE) 96 + #define pte_dirty(pte) (pte_val(pte) & _PAGE_DIRTY) 97 + #define pte_young(pte) (pte_val(pte) & _PAGE_ACCESSED) 98 + #define pte_special(pte) (pte_val(pte) & _PAGE_SPECIAL) 99 + 100 + #define PTE_BIT_FUNC(fn, op) \ 101 + static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; } 102 + 103 + PTE_BIT_FUNC(mknotpresent, &= ~(_PAGE_PRESENT)); 104 + PTE_BIT_FUNC(wrprotect, &= ~(_PAGE_WRITE)); 105 + PTE_BIT_FUNC(mkwrite, |= (_PAGE_WRITE)); 106 + PTE_BIT_FUNC(mkclean, &= ~(_PAGE_DIRTY)); 107 + PTE_BIT_FUNC(mkdirty, |= (_PAGE_DIRTY)); 108 + PTE_BIT_FUNC(mkold, &= ~(_PAGE_ACCESSED)); 109 + PTE_BIT_FUNC(mkyoung, |= (_PAGE_ACCESSED)); 110 + PTE_BIT_FUNC(mkspecial, |= (_PAGE_SPECIAL)); 111 + PTE_BIT_FUNC(mkhuge, |= (_PAGE_HW_SZ)); 112 + 113 + static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) 114 + { 115 + return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot)); 116 + } 117 + 118 + static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, 119 + pte_t *ptep, pte_t pteval) 120 + { 121 + set_pte(ptep, pteval); 122 + } 123 + 124 + void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, 125 + pte_t *ptep); 126 + 127 + /* Encode swap {type,off} tuple into PTE 128 + * We reserve 13 bits for 5-bit @type, keeping bits 12-5 zero, ensuring that 129 + * PAGE_PRESENT is zero in a PTE holding swap "identifier" 130 + */ 131 + #define __swp_entry(type, off) ((swp_entry_t) \ 132 + { ((type) & 0x1f) | ((off) << 13) }) 133 + 134 + /* Decode a PTE containing swap "identifier "into constituents */ 135 + #define __swp_type(pte_lookalike) (((pte_lookalike).val) & 0x1f) 136 + #define __swp_offset(pte_lookalike) ((pte_lookalike).val >> 13) 137 + 138 + #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) 139 + #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) 140 + 141 + #define kern_addr_valid(addr) (1) 142 + 143 + #ifdef CONFIG_TRANSPARENT_HUGEPAGE 144 + #include <asm/hugepage.h> 145 + #endif 146 + 147 + #endif /* __ASSEMBLY__ */ 148 + 149 + #endif
+95
arch/arc/include/asm/pgtable-levels.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0-only */ 2 + /* 3 + * Copyright (C) 2020 Synopsys, Inc. (www.synopsys.com) 4 + */ 5 + 6 + /* 7 + * Helpers for implemenintg paging levels 8 + */ 9 + 10 + #ifndef _ASM_ARC_PGTABLE_LEVELS_H 11 + #define _ASM_ARC_PGTABLE_LEVELS_H 12 + 13 + /* 14 + * 2 level paging setup for software walked MMUv3 (ARC700) and MMUv4 (HS) 15 + * 16 + * [31] 32 bit virtual address [0] 17 + * ------------------------------------------------------- 18 + * | | <---------- PGDIR_SHIFT ----------> | 19 + * | | | <-- PAGE_SHIFT --> | 20 + * ------------------------------------------------------- 21 + * | | | 22 + * | | --> off in page frame 23 + * | ---> index into Page Table 24 + * ----> index into Page Directory 25 + * 26 + * Given software walk, the vaddr split is arbitrary set to 11:8:13 27 + * However enabling of super page in a 2 level regime pegs PGDIR_SHIFT to 28 + * super page size. 29 + */ 30 + 31 + #if defined(CONFIG_ARC_HUGEPAGE_16M) 32 + #define PGDIR_SHIFT 24 33 + #elif defined(CONFIG_ARC_HUGEPAGE_2M) 34 + #define PGDIR_SHIFT 21 35 + #else 36 + /* 37 + * No Super page case 38 + * Default value provides 11:8:13 (8K), 11:9:12 (4K) 39 + */ 40 + #define PGDIR_SHIFT 21 41 + 42 + #endif 43 + 44 + #define PGDIR_SIZE BIT(PGDIR_SHIFT) /* vaddr span, not PDG sz */ 45 + #define PGDIR_MASK (~(PGDIR_SIZE - 1)) 46 + 47 + #define PTRS_PER_PGD BIT(32 - PGDIR_SHIFT) 48 + 49 + #define PTRS_PER_PTE BIT(PGDIR_SHIFT - PAGE_SHIFT) 50 + 51 + #ifndef __ASSEMBLY__ 52 + 53 + #include <asm-generic/pgtable-nopmd.h> 54 + 55 + /* 56 + * 1st level paging: pgd 57 + */ 58 + #define pgd_index(addr) ((addr) >> PGDIR_SHIFT) 59 + #define pgd_offset(mm, addr) (((mm)->pgd) + pgd_index(addr)) 60 + #define pgd_offset_k(addr) pgd_offset(&init_mm, addr) 61 + #define pgd_ERROR(e) \ 62 + pr_crit("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e)) 63 + 64 + /* 65 + * Due to the strange way generic pgtable level folding works, in a 2 level 66 + * setup, pmd_val() returns pgd, so these pmd_* macros actually work on pgd 67 + */ 68 + #define pmd_none(x) (!pmd_val(x)) 69 + #define pmd_bad(x) ((pmd_val(x) & ~PAGE_MASK)) 70 + #define pmd_present(x) (pmd_val(x)) 71 + #define pmd_clear(xp) do { pmd_val(*(xp)) = 0; } while (0) 72 + #define pmd_page_vaddr(pmd) (pmd_val(pmd) & PAGE_MASK) 73 + #define pmd_page(pmd) virt_to_page(pmd_page_vaddr(pmd)) 74 + #define set_pmd(pmdp, pmd) (*(pmdp) = pmd) 75 + #define pmd_pgtable(pmd) ((pgtable_t) pmd_page_vaddr(pmd)) 76 + 77 + #define pte_ERROR(e) \ 78 + pr_crit("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e)) 79 + 80 + #define pte_none(x) (!pte_val(x)) 81 + #define pte_present(x) (pte_val(x) & _PAGE_PRESENT) 82 + #define pte_clear(mm,addr,ptep) set_pte_at(mm, addr, ptep, __pte(0)) 83 + #define pte_page(pte) pfn_to_page(pte_pfn(pte)) 84 + #define set_pte(ptep, pte) ((*(ptep)) = (pte)) 85 + #define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT) 86 + #define pfn_pte(pfn, prot) __pte(__pfn_to_phys(pfn) | pgprot_val(prot)) 87 + #define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot) 88 + 89 + #ifdef CONFIG_ISA_ARCV2 90 + #define pmd_leaf(x) (pmd_val(x) & _PAGE_HW_SZ) 91 + #endif 92 + 93 + #endif /* !__ASSEMBLY__ */ 94 + 95 + #endif
+4 -273
arch/arc/include/asm/pgtable.h
··· 1 1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 2 /* 3 3 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) 4 - * 5 - * vineetg: May 2011 6 - * -Folded PAGE_PRESENT (used by VM) and PAGE_VALID (used by MMU) into 1. 7 - * They are semantically the same although in different contexts 8 - * VALID marks a TLB entry exists and it will only happen if PRESENT 9 - * - Utilise some unused free bits to confine PTE flags to 12 bits 10 - * This is a must for 4k pg-sz 11 - * 12 - * vineetg: Mar 2011 - changes to accommodate MMU TLB Page Descriptor mods 13 - * -TLB Locking never really existed, except for initial specs 14 - * -SILENT_xxx not needed for our port 15 - * -Per my request, MMU V3 changes the layout of some of the bits 16 - * to avoid a few shifts in TLB Miss handlers. 17 - * 18 - * vineetg: April 2010 19 - * -PGD entry no longer contains any flags. If empty it is 0, otherwise has 20 - * Pg-Tbl ptr. Thus pmd_present(), pmd_valid(), pmd_set( ) become simpler 21 - * 22 - * vineetg: April 2010 23 - * -Switched form 8:11:13 split for page table lookup to 11:8:13 24 - * -this speeds up page table allocation itself as we now have to memset 1K 25 - * instead of 8k per page table. 26 - * -TODO: Right now page table alloc is 8K and rest 7K is unused 27 - * need to optimise it 28 - * 29 - * Amit Bhor, Sameer Dhavale: Codito Technologies 2004 30 4 */ 31 5 32 6 #ifndef _ASM_ARC_PGTABLE_H 33 7 #define _ASM_ARC_PGTABLE_H 34 8 35 9 #include <linux/bits.h> 36 - #include <asm-generic/pgtable-nopmd.h> 10 + 11 + #include <asm/pgtable-levels.h> 12 + #include <asm/pgtable-bits-arcv2.h> 37 13 #include <asm/page.h> 38 14 #include <asm/mmu.h> 39 - 40 - /************************************************************************** 41 - * Page Table Flags 42 - * 43 - * ARC700 MMU only deals with softare managed TLB entries. 44 - * Page Tables are purely for Linux VM's consumption and the bits below are 45 - * suited to that (uniqueness). Hence some are not implemented in the TLB and 46 - * some have different value in TLB. 47 - * e.g. MMU v2: K_READ bit is 8 and so is GLOBAL (possible because they live in 48 - * seperate PD0 and PD1, which combined forms a translation entry) 49 - * while for PTE perspective, they are 8 and 9 respectively 50 - * with MMU v3: Most bits (except SHARED) represent the exact hardware pos 51 - * (saves some bit shift ops in TLB Miss hdlrs) 52 - */ 53 - 54 - #define _PAGE_CACHEABLE (1<<0) /* Page is cached (H) */ 55 - #define _PAGE_EXECUTE (1<<1) /* Page has user execute perm (H) */ 56 - #define _PAGE_WRITE (1<<2) /* Page has user write perm (H) */ 57 - #define _PAGE_READ (1<<3) /* Page has user read perm (H) */ 58 - #define _PAGE_ACCESSED (1<<4) /* Page is accessed (S) */ 59 - #define _PAGE_DIRTY (1<<5) /* Page modified (dirty) (S) */ 60 - #define _PAGE_SPECIAL (1<<6) 61 - 62 - #define _PAGE_GLOBAL (1<<8) /* Page is global (H) */ 63 - #define _PAGE_PRESENT (1<<9) /* TLB entry is valid (H) */ 64 - 65 - #ifdef CONFIG_ARC_MMU_V4 66 - #define _PAGE_HW_SZ (1<<10) /* Page Size indicator (H): 0 normal, 1 super */ 67 - #endif 68 - 69 - #define _PAGE_SHARED_CODE (1<<11) /* Shared Code page with cmn vaddr 70 - usable for shared TLB entries (H) */ 71 - /* vmalloc permissions */ 72 - #define _K_PAGE_PERMS (_PAGE_EXECUTE | _PAGE_WRITE | _PAGE_READ | \ 73 - _PAGE_GLOBAL | _PAGE_PRESENT) 74 - 75 - #ifndef CONFIG_ARC_CACHE_PAGES 76 - #undef _PAGE_CACHEABLE 77 - #define _PAGE_CACHEABLE 0 78 - #endif 79 - 80 - #ifndef _PAGE_HW_SZ 81 - #define _PAGE_HW_SZ 0 82 - #endif 83 - 84 - /* Defaults for every user page */ 85 - #define ___DEF (_PAGE_PRESENT | _PAGE_CACHEABLE) 86 - 87 - /* Set of bits not changed in pte_modify */ 88 - #define _PAGE_CHG_MASK (PAGE_MASK_PHYS | _PAGE_ACCESSED | _PAGE_DIRTY | \ 89 - _PAGE_SPECIAL) 90 - /* More Abbrevaited helpers */ 91 - #define PAGE_U_NONE __pgprot(___DEF) 92 - #define PAGE_U_R __pgprot(___DEF | _PAGE_READ) 93 - #define PAGE_U_W_R __pgprot(___DEF | _PAGE_READ | _PAGE_WRITE) 94 - #define PAGE_U_X_R __pgprot(___DEF | _PAGE_READ | _PAGE_EXECUTE) 95 - #define PAGE_U_X_W_R __pgprot(___DEF | _PAGE_READ | _PAGE_WRITE | \ 96 - _PAGE_EXECUTE) 97 - 98 - #define PAGE_SHARED PAGE_U_W_R 99 - 100 - /* While kernel runs out of unstranslated space, vmalloc/modules use a chunk of 101 - * user vaddr space - visible in all addr spaces, but kernel mode only 102 - * Thus Global, all-kernel-access, no-user-access, cached 103 - */ 104 - #define PAGE_KERNEL __pgprot(_K_PAGE_PERMS | _PAGE_CACHEABLE) 105 - 106 - /************************************************************************** 107 - * Mapping of vm_flags (Generic VM) to PTE flags (arch specific) 108 - * 109 - * Certain cases have 1:1 mapping 110 - * e.g. __P101 means VM_READ, VM_EXEC and !VM_SHARED 111 - * which directly corresponds to PAGE_U_X_R 112 - * 113 - * Other rules which cause the divergence from 1:1 mapping 114 - * 115 - * 1. Although ARC700 can do exclusive execute/write protection (meaning R 116 - * can be tracked independet of X/W unlike some other CPUs), still to 117 - * keep things consistent with other archs: 118 - * -Write implies Read: W => R 119 - * -Execute implies Read: X => R 120 - * 121 - * 2. Pvt Writable doesn't have Write Enabled initially: Pvt-W => !W 122 - * This is to enable COW mechanism 123 - */ 124 - /* xwr */ 125 - #define __P000 PAGE_U_NONE 126 - #define __P001 PAGE_U_R 127 - #define __P010 PAGE_U_R /* Pvt-W => !W */ 128 - #define __P011 PAGE_U_R /* Pvt-W => !W */ 129 - #define __P100 PAGE_U_X_R /* X => R */ 130 - #define __P101 PAGE_U_X_R 131 - #define __P110 PAGE_U_X_R /* Pvt-W => !W and X => R */ 132 - #define __P111 PAGE_U_X_R /* Pvt-W => !W */ 133 - 134 - #define __S000 PAGE_U_NONE 135 - #define __S001 PAGE_U_R 136 - #define __S010 PAGE_U_W_R /* W => R */ 137 - #define __S011 PAGE_U_W_R 138 - #define __S100 PAGE_U_X_R /* X => R */ 139 - #define __S101 PAGE_U_X_R 140 - #define __S110 PAGE_U_X_W_R /* X => R */ 141 - #define __S111 PAGE_U_X_W_R 142 - 143 - /**************************************************************** 144 - * 2 tier (PGD:PTE) software page walker 145 - * 146 - * [31] 32 bit virtual address [0] 147 - * ------------------------------------------------------- 148 - * | | <------------ PGDIR_SHIFT ----------> | 149 - * | | | 150 - * | BITS_FOR_PGD | BITS_FOR_PTE | <-- PAGE_SHIFT --> | 151 - * ------------------------------------------------------- 152 - * | | | 153 - * | | --> off in page frame 154 - * | ---> index into Page Table 155 - * ----> index into Page Directory 156 - * 157 - * In a single page size configuration, only PAGE_SHIFT is fixed 158 - * So both PGD and PTE sizing can be tweaked 159 - * e.g. 8K page (PAGE_SHIFT 13) can have 160 - * - PGDIR_SHIFT 21 -> 11:8:13 address split 161 - * - PGDIR_SHIFT 24 -> 8:11:13 address split 162 - * 163 - * If Super Page is configured, PGDIR_SHIFT becomes fixed too, 164 - * so the sizing flexibility is gone. 165 - */ 166 - 167 - #if defined(CONFIG_ARC_HUGEPAGE_16M) 168 - #define PGDIR_SHIFT 24 169 - #elif defined(CONFIG_ARC_HUGEPAGE_2M) 170 - #define PGDIR_SHIFT 21 171 - #else 172 - /* 173 - * Only Normal page support so "hackable" (see comment above) 174 - * Default value provides 11:8:13 (8K), 11:9:12 (4K) 175 - */ 176 - #define PGDIR_SHIFT 21 177 - #endif 178 - 179 - #define BITS_FOR_PTE (PGDIR_SHIFT - PAGE_SHIFT) 180 - #define BITS_FOR_PGD (32 - PGDIR_SHIFT) 181 - 182 - #define PGDIR_SIZE BIT(PGDIR_SHIFT) /* vaddr span, not PDG sz */ 183 - #define PGDIR_MASK (~(PGDIR_SIZE-1)) 184 - 185 - #define PTRS_PER_PTE BIT(BITS_FOR_PTE) 186 - #define PTRS_PER_PGD BIT(BITS_FOR_PGD) 187 15 188 16 /* 189 17 * Number of entries a user land program use. ··· 19 191 */ 20 192 #define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE) 21 193 22 - 23 - /**************************************************************** 24 - * Bucket load of VM Helpers 25 - */ 26 - 27 194 #ifndef __ASSEMBLY__ 28 195 29 - #define pte_ERROR(e) \ 30 - pr_crit("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e)) 31 - #define pgd_ERROR(e) \ 32 - pr_crit("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e)) 33 - 34 - /* the zero page used for uninitialized and anonymous pages */ 35 196 extern char empty_zero_page[PAGE_SIZE]; 36 197 #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) 37 198 38 - #define set_pte(pteptr, pteval) ((*(pteptr)) = (pteval)) 39 - #define set_pmd(pmdptr, pmdval) (*(pmdptr) = pmdval) 40 - 41 - /* find the page descriptor of the Page Tbl ref by PMD entry */ 42 - #define pmd_page(pmd) virt_to_page(pmd_val(pmd) & PAGE_MASK) 43 - 44 - /* find the logical addr (phy for ARC) of the Page Tbl ref by PMD entry */ 45 - #define pmd_page_vaddr(pmd) (pmd_val(pmd) & PAGE_MASK) 46 - 47 - #define pte_none(x) (!pte_val(x)) 48 - #define pte_present(x) (pte_val(x) & _PAGE_PRESENT) 49 - #define pte_clear(mm, addr, ptep) set_pte_at(mm, addr, ptep, __pte(0)) 50 - 51 - #define pmd_none(x) (!pmd_val(x)) 52 - #define pmd_bad(x) ((pmd_val(x) & ~PAGE_MASK)) 53 - #define pmd_present(x) (pmd_val(x)) 54 - #define pmd_leaf(x) (pmd_val(x) & _PAGE_HW_SZ) 55 - #define pmd_clear(xp) do { pmd_val(*(xp)) = 0; } while (0) 56 - 57 - #define pte_page(pte) pfn_to_page(pte_pfn(pte)) 58 - #define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot) 59 - #define pfn_pte(pfn, prot) __pte(__pfn_to_phys(pfn) | pgprot_val(prot)) 60 - 61 - /* Don't use virt_to_pfn for macros below: could cause truncations for PAE40*/ 62 - #define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT) 63 - 64 - /* Zoo of pte_xxx function */ 65 - #define pte_read(pte) (pte_val(pte) & _PAGE_READ) 66 - #define pte_write(pte) (pte_val(pte) & _PAGE_WRITE) 67 - #define pte_dirty(pte) (pte_val(pte) & _PAGE_DIRTY) 68 - #define pte_young(pte) (pte_val(pte) & _PAGE_ACCESSED) 69 - #define pte_special(pte) (pte_val(pte) & _PAGE_SPECIAL) 70 - 71 - #define PTE_BIT_FUNC(fn, op) \ 72 - static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; } 73 - 74 - PTE_BIT_FUNC(mknotpresent, &= ~(_PAGE_PRESENT)); 75 - PTE_BIT_FUNC(wrprotect, &= ~(_PAGE_WRITE)); 76 - PTE_BIT_FUNC(mkwrite, |= (_PAGE_WRITE)); 77 - PTE_BIT_FUNC(mkclean, &= ~(_PAGE_DIRTY)); 78 - PTE_BIT_FUNC(mkdirty, |= (_PAGE_DIRTY)); 79 - PTE_BIT_FUNC(mkold, &= ~(_PAGE_ACCESSED)); 80 - PTE_BIT_FUNC(mkyoung, |= (_PAGE_ACCESSED)); 81 - PTE_BIT_FUNC(exprotect, &= ~(_PAGE_EXECUTE)); 82 - PTE_BIT_FUNC(mkexec, |= (_PAGE_EXECUTE)); 83 - PTE_BIT_FUNC(mkspecial, |= (_PAGE_SPECIAL)); 84 - PTE_BIT_FUNC(mkhuge, |= (_PAGE_HW_SZ)); 85 - 86 - static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) 87 - { 88 - return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot)); 89 - } 199 + extern pgd_t swapper_pg_dir[] __aligned(PAGE_SIZE); 90 200 91 201 /* Macro to mark a page protection as uncacheable */ 92 202 #define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) & ~_PAGE_CACHEABLE)) 93 203 94 - static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, 95 - pte_t *ptep, pte_t pteval) 96 - { 97 - set_pte(ptep, pteval); 98 - } 99 - 100 204 extern pgd_t swapper_pg_dir[] __aligned(PAGE_SIZE); 101 - void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, 102 - pte_t *ptep); 103 - 104 - /* Encode swap {type,off} tuple into PTE 105 - * We reserve 13 bits for 5-bit @type, keeping bits 12-5 zero, ensuring that 106 - * PAGE_PRESENT is zero in a PTE holding swap "identifier" 107 - */ 108 - #define __swp_entry(type, off) ((swp_entry_t) { \ 109 - ((type) & 0x1f) | ((off) << 13) }) 110 - 111 - /* Decode a PTE containing swap "identifier "into constituents */ 112 - #define __swp_type(pte_lookalike) (((pte_lookalike).val) & 0x1f) 113 - #define __swp_offset(pte_lookalike) ((pte_lookalike).val >> 13) 114 - 115 - /* NOPs, to keep generic kernel happy */ 116 - #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) 117 - #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) 118 - 119 - #define kern_addr_valid(addr) (1) 120 - 121 - #define pmd_pgtable(pmd) ((pgtable_t) pmd_page_vaddr(pmd)) 122 - 123 - /* 124 - * remap a physical page `pfn' of size `size' with page protection `prot' 125 - * into virtual address `from' 126 - */ 127 - #ifdef CONFIG_TRANSPARENT_HUGEPAGE 128 - #include <asm/hugepage.h> 129 - #endif 130 205 131 206 /* to cope with aliasing VIPT cache */ 132 207 #define HAVE_ARCH_UNMAPPED_AREA