Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

arm64: Clean up the default pgprot setting

The primary aim of this patchset is to remove the pgprot_default and
prot_sect_default global variables and rely strictly on predefined
values. The original goal was to be able to run SMP kernels on UP
hardware by not setting the Shareability bit. However, it is unlikely to
see UP ARMv8 hardware and even if we do, the Shareability bit is no
longer assumed to disable cacheable accesses.

A side effect is that the device mappings now have the Shareability
attribute set. The hardware, however, should ignore it since Device
accesses are always Outer Shareable.

Following the removal of the two global variables, there is some PROT_*
macro reshuffling and cleanup, including the __PAGE_* macros (replaced
by PAGE_*).

Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Acked-by: Will Deacon <will.deacon@arm.com>

+48 -93
-8
arch/arm64/include/asm/io.h
··· 230 230 extern void __iounmap(volatile void __iomem *addr); 231 231 extern void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size); 232 232 233 - #define PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_DIRTY) 234 - #define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_DEVICE_nGnRE)) 235 - #define PROT_NORMAL_NC (PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL_NC)) 236 - #define PROT_NORMAL (PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL)) 237 - 238 233 #define ioremap(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE)) 239 234 #define ioremap_nocache(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE)) 240 235 #define ioremap_wc(addr, size) __ioremap((addr), (size), __pgprot(PROT_NORMAL_NC)) 241 236 #define iounmap __iounmap 242 - 243 - #define PROT_SECT_DEFAULT (PMD_TYPE_SECT | PMD_SECT_AF) 244 - #define PROT_SECT_DEVICE_nGnRE (PROT_SECT_DEFAULT | PTE_PXN | PTE_UXN | PMD_ATTRINDX(MT_DEVICE_nGnRE)) 245 237 246 238 #define ARCH_HAS_IOREMAP_WC 247 239 #include <asm-generic/iomap.h>
+45 -49
arch/arm64/include/asm/pgtable.h
··· 52 52 #endif 53 53 #define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd_val(pgd)) 54 54 55 - /* 56 - * The pgprot_* and protection_map entries will be fixed up at runtime to 57 - * include the cachable and bufferable bits based on memory policy, as well as 58 - * any architecture dependent bits like global/ASID and SMP shared mapping 59 - * bits. 60 - */ 61 - #define _PAGE_DEFAULT PTE_TYPE_PAGE | PTE_AF 55 + #ifdef CONFIG_SMP 56 + #define PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED) 57 + #define PROT_SECT_DEFAULT (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S) 58 + #else 59 + #define PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF) 60 + #define PROT_SECT_DEFAULT (PMD_TYPE_SECT | PMD_SECT_AF) 61 + #endif 62 62 63 - extern pgprot_t pgprot_default; 63 + #define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_DEVICE_nGnRE)) 64 + #define PROT_NORMAL_NC (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_NORMAL_NC)) 65 + #define PROT_NORMAL (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_NORMAL)) 64 66 65 - #define __pgprot_modify(prot,mask,bits) \ 66 - __pgprot((pgprot_val(prot) & ~(mask)) | (bits)) 67 + #define PROT_SECT_DEVICE_nGnRE (PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_DEVICE_nGnRE)) 68 + #define PROT_SECT_NORMAL (PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_NORMAL)) 69 + #define PROT_SECT_NORMAL_EXEC (PROT_SECT_DEFAULT | PMD_SECT_UXN | PMD_ATTRINDX(MT_NORMAL)) 67 70 68 - #define _MOD_PROT(p, b) __pgprot_modify(p, 0, b) 71 + #define _PAGE_DEFAULT (PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL)) 69 72 70 - #define PAGE_NONE __pgprot_modify(pgprot_default, PTE_TYPE_MASK, PTE_PROT_NONE | PTE_PXN | PTE_UXN) 71 - #define PAGE_SHARED _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE) 72 - #define PAGE_SHARED_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_WRITE) 73 - #define PAGE_COPY _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN) 74 - #define PAGE_COPY_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN) 75 - #define PAGE_READONLY _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN) 76 - #define PAGE_READONLY_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN) 77 - #define PAGE_KERNEL _MOD_PROT(pgprot_default, PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE) 78 - #define PAGE_KERNEL_EXEC _MOD_PROT(pgprot_default, PTE_UXN | PTE_DIRTY | PTE_WRITE) 73 + #define PAGE_KERNEL __pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE) 74 + #define PAGE_KERNEL_EXEC __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE) 79 75 80 - #define PAGE_HYP _MOD_PROT(pgprot_default, PTE_HYP) 76 + #define PAGE_HYP __pgprot(_PAGE_DEFAULT | PTE_HYP) 81 77 #define PAGE_HYP_DEVICE __pgprot(PROT_DEVICE_nGnRE | PTE_HYP) 82 78 83 - #define PAGE_S2 __pgprot_modify(pgprot_default, PTE_S2_MEMATTR_MASK, PTE_S2_MEMATTR(MT_S2_NORMAL) | PTE_S2_RDONLY) 79 + #define PAGE_S2 __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_NORMAL) | PTE_S2_RDONLY) 84 80 #define PAGE_S2_DEVICE __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_DEVICE_nGnRE) | PTE_S2_RDWR | PTE_UXN) 85 81 86 - #define __PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_TYPE_MASK) | PTE_PROT_NONE | PTE_PXN | PTE_UXN) 87 - #define __PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE) 88 - #define __PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_WRITE) 89 - #define __PAGE_COPY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN) 90 - #define __PAGE_COPY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN) 91 - #define __PAGE_READONLY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN) 92 - #define __PAGE_READONLY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN) 93 - #define __PAGE_EXECONLY __pgprot(_PAGE_DEFAULT | PTE_NG | PTE_PXN) 82 + #define PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_TYPE_MASK) | PTE_PROT_NONE | PTE_PXN | PTE_UXN) 83 + #define PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE) 84 + #define PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_WRITE) 85 + #define PAGE_COPY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN) 86 + #define PAGE_COPY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN) 87 + #define PAGE_READONLY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN) 88 + #define PAGE_READONLY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN) 89 + #define PAGE_EXECONLY __pgprot(_PAGE_DEFAULT | PTE_NG | PTE_PXN) 94 90 95 - #endif /* __ASSEMBLY__ */ 91 + #define __P000 PAGE_NONE 92 + #define __P001 PAGE_READONLY 93 + #define __P010 PAGE_COPY 94 + #define __P011 PAGE_COPY 95 + #define __P100 PAGE_EXECONLY 96 + #define __P101 PAGE_READONLY_EXEC 97 + #define __P110 PAGE_COPY_EXEC 98 + #define __P111 PAGE_COPY_EXEC 96 99 97 - #define __P000 __PAGE_NONE 98 - #define __P001 __PAGE_READONLY 99 - #define __P010 __PAGE_COPY 100 - #define __P011 __PAGE_COPY 101 - #define __P100 __PAGE_EXECONLY 102 - #define __P101 __PAGE_READONLY_EXEC 103 - #define __P110 __PAGE_COPY_EXEC 104 - #define __P111 __PAGE_COPY_EXEC 100 + #define __S000 PAGE_NONE 101 + #define __S001 PAGE_READONLY 102 + #define __S010 PAGE_SHARED 103 + #define __S011 PAGE_SHARED 104 + #define __S100 PAGE_EXECONLY 105 + #define __S101 PAGE_READONLY_EXEC 106 + #define __S110 PAGE_SHARED_EXEC 107 + #define __S111 PAGE_SHARED_EXEC 105 108 106 - #define __S000 __PAGE_NONE 107 - #define __S001 __PAGE_READONLY 108 - #define __S010 __PAGE_SHARED 109 - #define __S011 __PAGE_SHARED 110 - #define __S100 __PAGE_EXECONLY 111 - #define __S101 __PAGE_READONLY_EXEC 112 - #define __S110 __PAGE_SHARED_EXEC 113 - #define __S111 __PAGE_SHARED_EXEC 114 - 115 - #ifndef __ASSEMBLY__ 116 109 /* 117 110 * ZERO_PAGE is a global shared page that is always zero: used 118 111 * for zero-mapped memory areas etc.. ··· 266 273 { 267 274 return 1; 268 275 } 276 + 277 + #define __pgprot_modify(prot,mask,bits) \ 278 + __pgprot((pgprot_val(prot) & ~(mask)) | (bits)) 269 279 270 280 /* 271 281 * Mark the prot value as uncacheable and unbufferable.
-1
arch/arm64/kernel/setup.c
··· 376 376 377 377 *cmdline_p = boot_command_line; 378 378 379 - init_mem_pgprot(); 380 379 early_ioremap_init(); 381 380 382 381 parse_early_param();
+1 -1
arch/arm64/mm/dma-mapping.c
··· 115 115 for (i = 0; i < (size >> PAGE_SHIFT); i++) 116 116 map[i] = page + i; 117 117 coherent_ptr = vmap(map, size >> PAGE_SHIFT, VM_MAP, 118 - __get_dma_pgprot(attrs, pgprot_default, false)); 118 + __get_dma_pgprot(attrs, __pgprot(PROT_NORMAL_NC), false)); 119 119 kfree(map); 120 120 if (!coherent_ptr) 121 121 goto no_map;
+2 -34
arch/arm64/mm/mmu.c
··· 43 43 struct page *empty_zero_page; 44 44 EXPORT_SYMBOL(empty_zero_page); 45 45 46 - pgprot_t pgprot_default; 47 - EXPORT_SYMBOL(pgprot_default); 48 - 49 - static pmdval_t prot_sect_kernel; 50 - 51 46 struct cachepolicy { 52 47 const char policy[16]; 53 48 u64 mair; ··· 117 122 } 118 123 early_param("cachepolicy", early_cachepolicy); 119 124 120 - /* 121 - * Adjust the PMD section entries according to the CPU in use. 122 - */ 123 - void __init init_mem_pgprot(void) 124 - { 125 - pteval_t default_pgprot; 126 - int i; 127 - 128 - default_pgprot = PTE_ATTRINDX(MT_NORMAL); 129 - prot_sect_kernel = PMD_TYPE_SECT | PMD_SECT_AF | PMD_ATTRINDX(MT_NORMAL); 130 - 131 - #ifdef CONFIG_SMP 132 - /* 133 - * Mark memory with the "shared" attribute for SMP systems 134 - */ 135 - default_pgprot |= PTE_SHARED; 136 - prot_sect_kernel |= PMD_SECT_S; 137 - #endif 138 - 139 - for (i = 0; i < 16; i++) { 140 - unsigned long v = pgprot_val(protection_map[i]); 141 - protection_map[i] = __pgprot(v | default_pgprot); 142 - } 143 - 144 - pgprot_default = __pgprot(PTE_TYPE_PAGE | PTE_AF | default_pgprot); 145 - } 146 - 147 125 pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, 148 126 unsigned long size, pgprot_t vma_prot) 149 127 { ··· 173 205 /* try section mapping first */ 174 206 if (((addr | next | phys) & ~SECTION_MASK) == 0) { 175 207 pmd_t old_pmd =*pmd; 176 - set_pmd(pmd, __pmd(phys | prot_sect_kernel)); 208 + set_pmd(pmd, __pmd(phys | PROT_SECT_NORMAL_EXEC)); 177 209 /* 178 210 * Check for previous table entries created during 179 211 * boot (__create_page_tables) and flush them. ··· 385 417 if (!p) 386 418 return -ENOMEM; 387 419 388 - set_pmd(pmd, __pmd(__pa(p) | prot_sect_kernel)); 420 + set_pmd(pmd, __pmd(__pa(p) | PROT_SECT_NORMAL)); 389 421 } else 390 422 vmemmap_verify((pte_t *)pmd, node, addr, next); 391 423 } while (addr = next, addr != end);