Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[ARM] mm 8: define mem_types table L1 bit 4 to be for ARMv6

Change the memory types table to define the L1 descriptor bit 4 to
be in terms of the ARMv6 definition - execute never.

Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>

authored by

Russell King and committed by
Russell King
9ef79635 0058ca32

+20 -24
+20 -24
arch/arm/mm/mmu.c
··· 181 181 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | 182 182 L_PTE_WRITE, 183 183 .prot_l1 = PMD_TYPE_TABLE, 184 - .prot_sect = PMD_TYPE_SECT | PMD_BIT4 | PMD_SECT_UNCACHED | 184 + .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_UNCACHED | 185 185 PMD_SECT_AP_WRITE, 186 186 .domain = DOMAIN_IO, 187 187 }, 188 188 [MT_CACHECLEAN] = { 189 - .prot_sect = PMD_TYPE_SECT | PMD_BIT4, 189 + .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN, 190 190 .domain = DOMAIN_KERNEL, 191 191 }, 192 192 [MT_MINICLEAN] = { 193 - .prot_sect = PMD_TYPE_SECT | PMD_BIT4 | PMD_SECT_MINICACHE, 193 + .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_MINICACHE, 194 194 .domain = DOMAIN_KERNEL, 195 195 }, 196 196 [MT_LOW_VECTORS] = { ··· 206 206 .domain = DOMAIN_USER, 207 207 }, 208 208 [MT_MEMORY] = { 209 - .prot_sect = PMD_TYPE_SECT | PMD_BIT4 | PMD_SECT_AP_WRITE, 209 + .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, 210 210 .domain = DOMAIN_KERNEL, 211 211 }, 212 212 [MT_ROM] = { 213 - .prot_sect = PMD_TYPE_SECT | PMD_BIT4, 213 + .prot_sect = PMD_TYPE_SECT, 214 214 .domain = DOMAIN_KERNEL, 215 215 }, 216 216 [MT_IXP2000_DEVICE] = { /* IXP2400 requires XCB=101 for on-chip I/O */ 217 217 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | 218 218 L_PTE_WRITE, 219 219 .prot_l1 = PMD_TYPE_TABLE, 220 - .prot_sect = PMD_TYPE_SECT | PMD_BIT4 | PMD_SECT_UNCACHED | 220 + .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_UNCACHED | 221 221 PMD_SECT_AP_WRITE | PMD_SECT_BUFFERABLE | 222 222 PMD_SECT_TEX(1), 223 223 .domain = DOMAIN_IO, 224 224 }, 225 225 [MT_NONSHARED_DEVICE] = { 226 226 .prot_l1 = PMD_TYPE_TABLE, 227 - .prot_sect = PMD_TYPE_SECT | PMD_BIT4 | PMD_SECT_NONSHARED_DEV | 227 + .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_NONSHARED_DEV | 228 228 PMD_SECT_AP_WRITE, 229 229 .domain = DOMAIN_IO, 230 230 } ··· 260 260 } 261 261 262 262 /* 263 - * Xscale must not have PMD bit 4 set for section mappings. 263 + * ARMv5 and lower, bit 4 must be set for page tables. 264 + * (was: cache "update-able on write" bit on ARM610) 265 + * However, Xscale cores require this bit to be cleared. 264 266 */ 265 - if (cpu_is_xscale()) 266 - for (i = 0; i < ARRAY_SIZE(mem_types); i++) 267 + if (cpu_is_xscale()) { 268 + for (i = 0; i < ARRAY_SIZE(mem_types); i++) { 267 269 mem_types[i].prot_sect &= ~PMD_BIT4; 268 - 269 - /* 270 - * ARMv5 and lower, excluding Xscale, bit 4 must be set for 271 - * page tables. 272 - */ 273 - if (cpu_arch < CPU_ARCH_ARMv6 && !cpu_is_xscale()) 274 - for (i = 0; i < ARRAY_SIZE(mem_types); i++) 270 + mem_types[i].prot_l1 &= ~PMD_BIT4; 271 + } 272 + } else if (cpu_arch < CPU_ARCH_ARMv6) { 273 + for (i = 0; i < ARRAY_SIZE(mem_types); i++) { 275 274 if (mem_types[i].prot_l1) 276 275 mem_types[i].prot_l1 |= PMD_BIT4; 276 + if (mem_types[i].prot_sect) 277 + mem_types[i].prot_sect |= PMD_BIT4; 278 + } 279 + } 277 280 278 281 cp = &cache_policies[cachepolicy]; 279 282 kern_pgprot = user_pgprot = cp->pte; ··· 296 293 * ARMv6 and above have extended page tables. 297 294 */ 298 295 if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) { 299 - /* 300 - * bit 4 becomes XN which we must clear for the 301 - * kernel memory mapping. 302 - */ 303 - mem_types[MT_MEMORY].prot_sect &= ~PMD_SECT_XN; 304 - mem_types[MT_ROM].prot_sect &= ~PMD_SECT_XN; 305 - 306 296 /* 307 297 * Mark cache clean areas and XIP ROM read only 308 298 * from SVC mode and no access from userspace.