Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

powerpc/mm: Tweak PTE bit combination definitions

This patch tweaks the way some PTE bit combinations are defined, in such a
way that the 32 and 64-bit variant become almost identical and that will
make it easier to bring in a new common pte-* file for the new variant
of the Book3-E support.

The combination of bits defining access to kernel pages are now clearly
separated from the combination used by userspace and the core VM. The
resulting generated code should remain identical unless I made a mistake.

Note: While at it, I removed a non-sensical statement related to CONFIG_KGDB
in ppc_mmu_32.c which could cause kernel mappings to be user accessible when
that option is enabled. Probably something that bitrot.

Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>

+88 -75
+1 -1
arch/powerpc/include/asm/fixmap.h
··· 61 61 * Some hardware wants to get fixmapped without caching. 62 62 */ 63 63 #define set_fixmap_nocache(idx, phys) \ 64 - __set_fixmap(idx, phys, PAGE_KERNEL_NOCACHE) 64 + __set_fixmap(idx, phys, PAGE_KERNEL_NCG) 65 65 66 66 #define clear_fixmap(idx) \ 67 67 __set_fixmap(idx, 0, __pgprot(0))
+19 -20
arch/powerpc/include/asm/pgtable-ppc32.h
··· 144 144 #define PMD_PAGE_SIZE(pmd) bad_call_to_PMD_PAGE_SIZE() 145 145 #endif 146 146 147 + #ifndef _PAGE_KERNEL_RO 148 + #define _PAGE_KERNEL_RO 0 149 + #endif 150 + #ifndef _PAGE_KERNEL_RW 151 + #define _PAGE_KERNEL_RW (_PAGE_DIRTY | _PAGE_RW | _PAGE_HWWRITE) 152 + #endif 153 + 147 154 #define _PAGE_HPTEFLAGS _PAGE_HASHPTE 148 155 149 156 /* Location of the PFN in the PTE. Most platforms use the same as _PAGE_SHIFT ··· 193 186 #else 194 187 #define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED) 195 188 #endif 196 - #define _PAGE_BASE_NC (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_NO_CACHE) 189 + #define _PAGE_BASE_NC (_PAGE_PRESENT | _PAGE_ACCESSED) 197 190 198 - #define _PAGE_WRENABLE (_PAGE_RW | _PAGE_DIRTY | _PAGE_HWWRITE) 199 - #define _PAGE_KERNEL (_PAGE_BASE | _PAGE_SHARED | _PAGE_WRENABLE) 200 - #define _PAGE_KERNEL_NC (_PAGE_BASE_NC | _PAGE_SHARED | _PAGE_WRENABLE) 201 - 202 - #ifdef CONFIG_PPC_STD_MMU 203 - /* On standard PPC MMU, no user access implies kernel read/write access, 204 - * so to write-protect kernel memory we must turn on user access */ 205 - #define _PAGE_KERNEL_RO (_PAGE_BASE | _PAGE_SHARED | _PAGE_USER) 206 - #else 207 - #define _PAGE_KERNEL_RO (_PAGE_BASE | _PAGE_SHARED) 208 - #endif 209 - 210 - #define _PAGE_IO (_PAGE_KERNEL_NC | _PAGE_GUARDED) 211 - #define _PAGE_RAM (_PAGE_KERNEL | _PAGE_HWEXEC) 191 + /* Permission masks used for kernel mappings */ 192 + #define PAGE_KERNEL __pgprot(_PAGE_BASE | _PAGE_KERNEL_RW) 193 + #define PAGE_KERNEL_NC __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | \ 194 + _PAGE_NO_CACHE) 195 + #define PAGE_KERNEL_NCG __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | \ 196 + _PAGE_NO_CACHE | _PAGE_GUARDED) 197 + #define PAGE_KERNEL_X __pgprot(_PAGE_BASE | _PAGE_KERNEL_RW | _PAGE_EXEC) 198 + #define PAGE_KERNEL_RO __pgprot(_PAGE_BASE | _PAGE_KERNEL_RO) 199 + #define PAGE_KERNEL_ROX __pgprot(_PAGE_BASE | _PAGE_KERNEL_RO | _PAGE_EXEC) 212 200 213 201 #if defined(CONFIG_KGDB) || defined(CONFIG_XMON) || defined(CONFIG_BDI_SWITCH) ||\ 214 202 defined(CONFIG_KPROBES) 215 203 /* We want the debuggers to be able to set breakpoints anywhere, so 216 204 * don't write protect the kernel text */ 217 - #define _PAGE_RAM_TEXT _PAGE_RAM 205 + #define PAGE_KERNEL_TEXT PAGE_KERNEL_X 218 206 #else 219 - #define _PAGE_RAM_TEXT (_PAGE_KERNEL_RO | _PAGE_HWEXEC) 207 + #define PAGE_KERNEL_TEXT PAGE_KERNEL_ROX 220 208 #endif 221 209 222 210 #define PAGE_NONE __pgprot(_PAGE_BASE) ··· 221 219 #define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_EXEC) 222 220 #define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER) 223 221 #define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC) 224 - 225 - #define PAGE_KERNEL __pgprot(_PAGE_RAM) 226 - #define PAGE_KERNEL_NOCACHE __pgprot(_PAGE_IO) 227 222 228 223 /* 229 224 * The PowerPC can only do execute protection on a segment (256MB) basis,
+29 -17
arch/powerpc/include/asm/pgtable-ppc64.h
··· 81 81 */ 82 82 #include <asm/pte-hash64.h> 83 83 84 - /* To make some generic powerpc code happy */ 85 - #ifndef _PAGE_HWEXEC 86 - #define _PAGE_HWEXEC 0 87 - #endif 88 - 89 84 /* Some other useful definitions */ 90 85 #define PTE_RPN_MAX (1UL << (64 - PTE_RPN_SHIFT)) 91 86 #define PTE_RPN_MASK (~((1UL<<PTE_RPN_SHIFT)-1)) ··· 91 96 #define _PAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HPTEFLAGS | _PAGE_DIRTY | \ 92 97 _PAGE_ACCESSED | _PAGE_SPECIAL) 93 98 99 + #define _PAGE_BASE_NC (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_PSIZE) 100 + #define _PAGE_BASE (_PAGE_BASE_NC | _PAGE_COHERENT) 94 101 95 102 96 - /* __pgprot defined in arch/powerpc/include/asm/page.h */ 97 - #define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED) 98 - 99 - #define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_RW | _PAGE_USER) 100 - #define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_RW | _PAGE_USER | _PAGE_EXEC) 103 + /* Permission masks used to generate the __P and __S table, 104 + * 105 + * Note:__pgprot is defined in arch/powerpc/include/asm/page.h 106 + */ 107 + #define PAGE_NONE __pgprot(_PAGE_BASE) 108 + #define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW) 109 + #define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_EXEC) 101 110 #define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER) 102 111 #define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC) 103 112 #define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER) 104 113 #define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC) 105 - #define PAGE_KERNEL __pgprot(_PAGE_BASE | _PAGE_WRENABLE) 106 - #define PAGE_KERNEL_CI __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \ 107 - _PAGE_WRENABLE | _PAGE_NO_CACHE | _PAGE_GUARDED) 108 - #define PAGE_KERNEL_EXEC __pgprot(_PAGE_BASE | _PAGE_WRENABLE | _PAGE_EXEC) 109 114 110 - #define PAGE_AGP __pgprot(_PAGE_BASE | _PAGE_WRENABLE | _PAGE_NO_CACHE) 111 - #define HAVE_PAGE_AGP 115 + /* Permission masks used for kernel mappings */ 116 + #define PAGE_KERNEL __pgprot(_PAGE_BASE | _PAGE_KERNEL_RW) 117 + #define PAGE_KERNEL_NC __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | \ 118 + _PAGE_NO_CACHE) 119 + #define PAGE_KERNEL_NCG __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | \ 120 + _PAGE_NO_CACHE | _PAGE_GUARDED) 121 + #define PAGE_KERNEL_X __pgprot(_PAGE_BASE | _PAGE_KERNEL_RW | _PAGE_EXEC) 122 + #define PAGE_KERNEL_RO __pgprot(_PAGE_BASE | _PAGE_KERNEL_RO) 123 + #define PAGE_KERNEL_ROX __pgprot(_PAGE_BASE | _PAGE_KERNEL_RO | _PAGE_EXEC) 124 + 125 + /* Protection bits for use by pte_pgprot() */ 126 + #define PAGE_PROT_BITS (_PAGE_GUARDED | _PAGE_COHERENT | \ 127 + _PAGE_NO_CACHE | _PAGE_WRITETHRU | \ 128 + _PAGE_4K_PFN | _PAGE_USER | _PAGE_RW | \ 129 + _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_EXEC) 130 + 112 131 113 132 /* We always have _PAGE_SPECIAL on 64 bit */ 114 133 #define __HAVE_ARCH_PTE_SPECIAL 115 134 135 + /* Make modules code happy. We don't set RO yet */ 136 + #define PAGE_KERNEL_EXEC PAGE_KERNEL_X 116 137 117 138 /* 118 139 * POWER4 and newer have per page execute protection, older chips can only ··· 406 395 static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry) 407 396 { 408 397 unsigned long bits = pte_val(entry) & 409 - (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC); 398 + (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | 399 + _PAGE_EXEC | _PAGE_HWEXEC); 410 400 unsigned long old, tmp; 411 401 412 402 __asm__ __volatile__(
+4
arch/powerpc/include/asm/pgtable.h
··· 25 25 # include <asm/pgtable-ppc32.h> 26 26 #endif 27 27 28 + /* Special mapping for AGP */ 29 + #define PAGE_AGP (PAGE_KERNEL_NC) 30 + #define HAVE_PAGE_AGP 31 + 28 32 #ifndef __ASSEMBLY__ 29 33 30 34 /* Insert a PTE, top-level function is out of line. It uses an inline
+3
arch/powerpc/include/asm/pte-8xx.h
··· 59 59 /* Until my rework is finished, 8xx still needs atomic PTE updates */ 60 60 #define PTE_ATOMIC_UPDATES 1 61 61 62 + /* We need to add _PAGE_SHARED to kernel pages */ 63 + #define _PAGE_KERNEL_RO (_PAGE_SHARED) 64 + #define _PAGE_KERNEL_RW (_PAGE_DIRTY | _PAGE_RW | _PAGE_HWWRITE) 62 65 63 66 #endif /* __KERNEL__ */ 64 67 #endif /* _ASM_POWERPC_PTE_8xx_H */
-1
arch/powerpc/include/asm/pte-hash32.h
··· 44 44 /* Hash table based platforms need atomic updates of the linux PTE */ 45 45 #define PTE_ATOMIC_UPDATES 1 46 46 47 - 48 47 #endif /* __KERNEL__ */ 49 48 #endif /* _ASM_POWERPC_PTE_HASH32_H */
-3
arch/powerpc/include/asm/pte-hash64-4k.h
··· 8 8 #define _PAGE_F_GIX _PAGE_GROUP_IX 9 9 #define _PAGE_SPECIAL 0x10000 /* software: special page */ 10 10 11 - /* There is no 4K PFN hack on 4K pages */ 12 - #define _PAGE_4K_PFN 0 13 - 14 11 /* PTE flags to conserve for HPTE identification */ 15 12 #define _PAGE_HPTEFLAGS (_PAGE_BUSY | _PAGE_HASHPTE | \ 16 13 _PAGE_SECONDARY | _PAGE_GROUP_IX)
+26 -21
arch/powerpc/include/asm/pte-hash64.h
··· 6 6 * Common bits between 4K and 64K pages in a linux-style PTE. 7 7 * These match the bits in the (hardware-defined) PowerPC PTE as closely 8 8 * as possible. Additional bits may be defined in pgtable-hash64-*.h 9 + * 10 + * Note: We only support user read/write permissions. Supervisor always 11 + * have full read/write to pages above PAGE_OFFSET (pages below that 12 + * always use the user access permissions). 13 + * 14 + * We could create separate kernel read-only if we used the 3 PP bits 15 + * combinations that newer processors provide but we currently don't. 9 16 */ 10 - #define _PAGE_PRESENT 0x0001 /* software: pte contains a translation */ 11 - #define _PAGE_USER 0x0002 /* matches one of the PP bits */ 12 - #define _PAGE_FILE 0x0002 /* (!present only) software: pte holds file offset */ 13 - #define _PAGE_EXEC 0x0004 /* No execute on POWER4 and newer (we invert) */ 14 - #define _PAGE_GUARDED 0x0008 15 - #define _PAGE_COHERENT 0x0010 /* M: enforce memory coherence (SMP systems) */ 16 - #define _PAGE_NO_CACHE 0x0020 /* I: cache inhibit */ 17 - #define _PAGE_WRITETHRU 0x0040 /* W: cache write-through */ 18 - #define _PAGE_DIRTY 0x0080 /* C: page changed */ 19 - #define _PAGE_ACCESSED 0x0100 /* R: page referenced */ 20 - #define _PAGE_RW 0x0200 /* software: user write access allowed */ 21 - #define _PAGE_BUSY 0x0800 /* software: PTE & hash are busy */ 17 + #define _PAGE_PRESENT 0x0001 /* software: pte contains a translation */ 18 + #define _PAGE_USER 0x0002 /* matches one of the PP bits */ 19 + #define _PAGE_FILE 0x0002 /* (!present only) software: pte holds file offset */ 20 + #define _PAGE_EXEC 0x0004 /* No execute on POWER4 and newer (we invert) */ 21 + #define _PAGE_GUARDED 0x0008 22 + #define _PAGE_COHERENT 0x0010 /* M: enforce memory coherence (SMP systems) */ 23 + #define _PAGE_NO_CACHE 0x0020 /* I: cache inhibit */ 24 + #define _PAGE_WRITETHRU 0x0040 /* W: cache write-through */ 25 + #define _PAGE_DIRTY 0x0080 /* C: page changed */ 26 + #define _PAGE_ACCESSED 0x0100 /* R: page referenced */ 27 + #define _PAGE_RW 0x0200 /* software: user write access allowed */ 28 + #define _PAGE_BUSY 0x0800 /* software: PTE & hash are busy */ 29 + 30 + /* No separate kernel read-only */ 31 + #define _PAGE_KERNEL_RW (_PAGE_RW | _PAGE_DIRTY) /* user access blocked by key */ 32 + #define _PAGE_KERNEL_RO _PAGE_KERNEL_RW 22 33 23 34 /* Strong Access Ordering */ 24 - #define _PAGE_SAO (_PAGE_WRITETHRU | _PAGE_NO_CACHE | _PAGE_COHERENT) 35 + #define _PAGE_SAO (_PAGE_WRITETHRU | _PAGE_NO_CACHE | _PAGE_COHERENT) 25 36 26 - #define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_COHERENT) 27 - 28 - #define _PAGE_WRENABLE (_PAGE_RW | _PAGE_DIRTY) 37 + /* No page size encoding in the linux PTE */ 38 + #define _PAGE_PSIZE 0 29 39 30 40 /* PTEIDX nibble */ 31 41 #define _PTEIDX_SECONDARY 0x8 32 42 #define _PTEIDX_GROUP_IX 0x7 33 - 34 - #define PAGE_PROT_BITS (_PAGE_GUARDED | _PAGE_COHERENT | \ 35 - _PAGE_NO_CACHE | _PAGE_WRITETHRU | \ 36 - _PAGE_4K_PFN | _PAGE_RW | _PAGE_USER | \ 37 - _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_EXEC) 38 43 39 44 40 45 #ifdef CONFIG_PPC_64K_PAGES
+1 -1
arch/powerpc/mm/fsl_booke_mmu.c
··· 162 162 phys_addr_t phys = memstart_addr; 163 163 164 164 while (cam[tlbcam_index] && tlbcam_index < ARRAY_SIZE(cam)) { 165 - settlbcam(tlbcam_index, virt, phys, cam[tlbcam_index], _PAGE_KERNEL, 0); 165 + settlbcam(tlbcam_index, virt, phys, cam[tlbcam_index], PAGE_KERNEL_X, 0); 166 166 virt += cam[tlbcam_index]; 167 167 phys += cam[tlbcam_index]; 168 168 tlbcam_index++;
+2 -2
arch/powerpc/mm/pgtable_32.c
··· 164 164 165 165 /* Make sure we have the base flags */ 166 166 if ((flags & _PAGE_PRESENT) == 0) 167 - flags |= _PAGE_KERNEL; 167 + flags |= PAGE_KERNEL; 168 168 169 169 /* Non-cacheable page cannot be coherent */ 170 170 if (flags & _PAGE_NO_CACHE) ··· 296 296 p = memstart_addr + s; 297 297 for (; s < total_lowmem; s += PAGE_SIZE) { 298 298 ktext = ((char *) v >= _stext && (char *) v < etext); 299 - f = ktext ?_PAGE_RAM_TEXT : _PAGE_RAM; 299 + f = ktext ? PAGE_KERNEL_TEXT : PAGE_KERNEL; 300 300 map_page(v, p, f); 301 301 #ifdef CONFIG_PPC_STD_MMU_32 302 302 if (ktext)
+2 -8
arch/powerpc/mm/ppc_mmu_32.c
··· 74 74 75 75 unsigned long __init mmu_mapin_ram(void) 76 76 { 77 - #ifdef CONFIG_POWER4 78 - return 0; 79 - #else 80 77 unsigned long tot, bl, done; 81 78 unsigned long max_size = (256<<20); 82 79 ··· 92 95 break; 93 96 } 94 97 95 - setbat(2, PAGE_OFFSET, 0, bl, _PAGE_RAM); 98 + setbat(2, PAGE_OFFSET, 0, bl, PAGE_KERNEL_X); 96 99 done = (unsigned long)bat_addrs[2].limit - PAGE_OFFSET + 1; 97 100 if ((done < tot) && !bat_addrs[3].limit) { 98 101 /* use BAT3 to cover a bit more */ ··· 100 103 for (bl = 128<<10; bl < max_size; bl <<= 1) 101 104 if (bl * 2 > tot) 102 105 break; 103 - setbat(3, PAGE_OFFSET+done, done, bl, _PAGE_RAM); 106 + setbat(3, PAGE_OFFSET+done, done, bl, PAGE_KERNEL_X); 104 107 done = (unsigned long)bat_addrs[3].limit - PAGE_OFFSET + 1; 105 108 } 106 109 107 110 return done; 108 - #endif 109 111 } 110 112 111 113 /* ··· 132 136 wimgxpp |= (flags & _PAGE_RW)? BPP_RW: BPP_RX; 133 137 bat[1].batu = virt | (bl << 2) | 2; /* Vs=1, Vp=0 */ 134 138 bat[1].batl = BAT_PHYS_ADDR(phys) | wimgxpp; 135 - #ifndef CONFIG_KGDB /* want user access for breakpoints */ 136 139 if (flags & _PAGE_USER) 137 - #endif 138 140 bat[1].batu |= 1; /* Vp = 1 */ 139 141 if (flags & _PAGE_GUARDED) { 140 142 /* G bit must be zero in IBATs */
+1 -1
arch/powerpc/sysdev/cpm_common.c
··· 56 56 { 57 57 if (cpm_udbg_txdesc) { 58 58 #ifdef CONFIG_CPM2 59 - setbat(1, 0xf0000000, 0xf0000000, 1024*1024, _PAGE_IO); 59 + setbat(1, 0xf0000000, 0xf0000000, 1024*1024, PAGE_KERNEL_NCG); 60 60 #endif 61 61 udbg_putc = udbg_putc_cpm; 62 62 }