Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

ARM: add permission annotations to MT_MEMORY* mapping types

Document the permissions which the various MT_MEMORY* mapping types
will provide.

Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>

+38 -36
+14 -12
arch/arm/include/asm/mach/map.h
··· 22 22 }; 23 23 24 24 /* types 0-3 are defined in asm/io.h */ 25 - #define MT_UNCACHED 4 26 - #define MT_CACHECLEAN 5 27 - #define MT_MINICLEAN 6 28 - #define MT_LOW_VECTORS 7 29 - #define MT_HIGH_VECTORS 8 30 - #define MT_MEMORY 9 31 - #define MT_ROM 10 32 - #define MT_MEMORY_NONCACHED 11 33 - #define MT_MEMORY_DTCM 12 34 - #define MT_MEMORY_ITCM 13 35 - #define MT_MEMORY_SO 14 36 - #define MT_MEMORY_DMA_READY 15 25 + enum { 26 + MT_UNCACHED = 4, 27 + MT_CACHECLEAN, 28 + MT_MINICLEAN, 29 + MT_LOW_VECTORS, 30 + MT_HIGH_VECTORS, 31 + MT_MEMORY_RWX, 32 + MT_ROM, 33 + MT_MEMORY_RWX_NONCACHED, 34 + MT_MEMORY_RW_DTCM, 35 + MT_MEMORY_RWX_ITCM, 36 + MT_MEMORY_RW_SO, 37 + MT_MEMORY_DMA_READY, 38 + }; 37 39 38 40 #ifdef CONFIG_MMU 39 41 extern void iotable_init(struct map_desc *, int);
+2 -2
arch/arm/kernel/tcm.c
··· 52 52 .virtual = DTCM_OFFSET, 53 53 .pfn = __phys_to_pfn(DTCM_OFFSET), 54 54 .length = 0, 55 - .type = MT_MEMORY_DTCM 55 + .type = MT_MEMORY_RW_DTCM 56 56 } 57 57 }; 58 58 ··· 61 61 .virtual = ITCM_OFFSET, 62 62 .pfn = __phys_to_pfn(ITCM_OFFSET), 63 63 .length = 0, 64 - .type = MT_MEMORY_ITCM 64 + .type = MT_MEMORY_RWX_ITCM, 65 65 } 66 66 }; 67 67
+1 -1
arch/arm/mach-at91/setup.c
··· 81 81 82 82 desc->pfn = __phys_to_pfn(base); 83 83 desc->length = length; 84 - desc->type = MT_MEMORY_NONCACHED; 84 + desc->type = MT_MEMORY_RWX_NONCACHED; 85 85 86 86 pr_info("AT91: sram at 0x%lx of 0x%x mapped at 0x%lx\n", 87 87 base, length, desc->virtual);
+2 -2
arch/arm/mach-omap2/io.c
··· 244 244 .virtual = OMAP4_SRAM_VA, 245 245 .pfn = __phys_to_pfn(OMAP4_SRAM_PA), 246 246 .length = PAGE_SIZE, 247 - .type = MT_MEMORY_SO, 247 + .type = MT_MEMORY_RW_SO, 248 248 }, 249 249 #endif 250 250 ··· 282 282 .virtual = OMAP4_SRAM_VA, 283 283 .pfn = __phys_to_pfn(OMAP4_SRAM_PA), 284 284 .length = PAGE_SIZE, 285 - .type = MT_MEMORY_SO, 285 + .type = MT_MEMORY_RW_SO, 286 286 }, 287 287 #endif 288 288 };
+1 -1
arch/arm/mach-omap2/omap4-common.c
··· 88 88 dram_io_desc[0].virtual = OMAP4_DRAM_BARRIER_VA; 89 89 dram_io_desc[0].pfn = __phys_to_pfn(paddr); 90 90 dram_io_desc[0].length = size; 91 - dram_io_desc[0].type = MT_MEMORY_SO; 91 + dram_io_desc[0].type = MT_MEMORY_RW_SO; 92 92 iotable_init(dram_io_desc, ARRAY_SIZE(dram_io_desc)); 93 93 dram_sync = (void __iomem *) dram_io_desc[0].virtual; 94 94 sram_sync = (void __iomem *) OMAP4_SRAM_VA;
+1 -1
arch/arm/mach-ux500/setup.h
··· 43 43 .virtual = IO_ADDRESS(x), \ 44 44 .pfn = __phys_to_pfn(x), \ 45 45 .length = sz, \ 46 - .type = MT_MEMORY, \ 46 + .type = MT_MEMORY_RWX, \ 47 47 } 48 48 49 49 extern struct smp_operations ux500_smp_ops;
+2 -2
arch/arm/mm/ioremap.c
··· 392 392 unsigned int mtype; 393 393 394 394 if (cached) 395 - mtype = MT_MEMORY; 395 + mtype = MT_MEMORY_RWX; 396 396 else 397 - mtype = MT_MEMORY_NONCACHED; 397 + mtype = MT_MEMORY_RWX_NONCACHED; 398 398 399 399 return __arm_ioremap_caller(phys_addr, size, mtype, 400 400 __builtin_return_address(0));
+15 -15
arch/arm/mm/mmu.c
··· 287 287 .prot_l1 = PMD_TYPE_TABLE, 288 288 .domain = DOMAIN_USER, 289 289 }, 290 - [MT_MEMORY] = { 290 + [MT_MEMORY_RWX] = { 291 291 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY, 292 292 .prot_l1 = PMD_TYPE_TABLE, 293 293 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, ··· 297 297 .prot_sect = PMD_TYPE_SECT, 298 298 .domain = DOMAIN_KERNEL, 299 299 }, 300 - [MT_MEMORY_NONCACHED] = { 300 + [MT_MEMORY_RWX_NONCACHED] = { 301 301 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | 302 302 L_PTE_MT_BUFFERABLE, 303 303 .prot_l1 = PMD_TYPE_TABLE, 304 304 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, 305 305 .domain = DOMAIN_KERNEL, 306 306 }, 307 - [MT_MEMORY_DTCM] = { 307 + [MT_MEMORY_RW_DTCM] = { 308 308 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | 309 309 L_PTE_XN, 310 310 .prot_l1 = PMD_TYPE_TABLE, 311 311 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN, 312 312 .domain = DOMAIN_KERNEL, 313 313 }, 314 - [MT_MEMORY_ITCM] = { 314 + [MT_MEMORY_RWX_ITCM] = { 315 315 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY, 316 316 .prot_l1 = PMD_TYPE_TABLE, 317 317 .domain = DOMAIN_KERNEL, 318 318 }, 319 - [MT_MEMORY_SO] = { 319 + [MT_MEMORY_RW_SO] = { 320 320 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | 321 321 L_PTE_MT_UNCACHED | L_PTE_XN, 322 322 .prot_l1 = PMD_TYPE_TABLE, ··· 487 487 mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED; 488 488 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S; 489 489 mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED; 490 - mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S; 491 - mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED; 490 + mem_types[MT_MEMORY_RWX].prot_sect |= PMD_SECT_S; 491 + mem_types[MT_MEMORY_RWX].prot_pte |= L_PTE_SHARED; 492 492 mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED; 493 - mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S; 494 - mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED; 493 + mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= PMD_SECT_S; 494 + mem_types[MT_MEMORY_RWX_NONCACHED].prot_pte |= L_PTE_SHARED; 495 495 } 496 496 } 497 497 ··· 502 502 if (cpu_arch >= CPU_ARCH_ARMv6) { 503 503 if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) { 504 504 /* Non-cacheable Normal is XCB = 001 */ 505 - mem_types[MT_MEMORY_NONCACHED].prot_sect |= 505 + mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= 506 506 PMD_SECT_BUFFERED; 507 507 } else { 508 508 /* For both ARMv6 and non-TEX-remapping ARMv7 */ 509 - mem_types[MT_MEMORY_NONCACHED].prot_sect |= 509 + mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= 510 510 PMD_SECT_TEX(1); 511 511 } 512 512 } else { 513 - mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE; 513 + mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE; 514 514 } 515 515 516 516 #ifdef CONFIG_ARM_LPAE ··· 543 543 544 544 mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask; 545 545 mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask; 546 - mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd; 547 - mem_types[MT_MEMORY].prot_pte |= kern_pgprot; 546 + mem_types[MT_MEMORY_RWX].prot_sect |= ecc_mask | cp->pmd; 547 + mem_types[MT_MEMORY_RWX].prot_pte |= kern_pgprot; 548 548 mem_types[MT_MEMORY_DMA_READY].prot_pte |= kern_pgprot; 549 - mem_types[MT_MEMORY_NONCACHED].prot_sect |= ecc_mask; 549 + mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= ecc_mask; 550 550 mem_types[MT_ROM].prot_sect |= cp->pmd; 551 551 552 552 switch (cp->pmd) {