Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

ARM: implement basic NX support for kernel lowmem mappings

Add basic NX support for kernel lowmem mappings. We mark any section
which does not overlap kernel text as non-executable, preventing it
from being used to write code and then execute directly from there.

This does not change the alignment of the sections, so the kernel
image doesn't grow significantly via this change, so we can do this
without needing a config option.

Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>

+51 -5
+1
arch/arm/include/asm/mach/map.h
··· 29 29 MT_LOW_VECTORS, 30 30 MT_HIGH_VECTORS, 31 31 MT_MEMORY_RWX, 32 + MT_MEMORY_RW, 32 33 MT_ROM, 33 34 MT_MEMORY_RWX_NONCACHED, 34 35 MT_MEMORY_RW_DTCM,
+50 -5
arch/arm/mm/mmu.c
··· 22 22 #include <asm/cputype.h> 23 23 #include <asm/sections.h> 24 24 #include <asm/cachetype.h> 25 + #include <asm/sections.h> 25 26 #include <asm/setup.h> 26 27 #include <asm/smp_plat.h> 27 28 #include <asm/tlb.h> ··· 294 293 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, 295 294 .domain = DOMAIN_KERNEL, 296 295 }, 296 + [MT_MEMORY_RW] = { 297 + .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | 298 + L_PTE_XN, 299 + .prot_l1 = PMD_TYPE_TABLE, 300 + .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, 301 + .domain = DOMAIN_KERNEL, 302 + }, 297 303 [MT_ROM] = { 298 304 .prot_sect = PMD_TYPE_SECT, 299 305 .domain = DOMAIN_KERNEL, ··· 418 410 mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_XN; 419 411 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_XN; 420 412 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_XN; 413 + 414 + /* Also setup NX memory mapping */ 415 + mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_XN; 421 416 } 422 417 if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) { 423 418 /* ··· 500 489 mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED; 501 490 mem_types[MT_MEMORY_RWX].prot_sect |= PMD_SECT_S; 502 491 mem_types[MT_MEMORY_RWX].prot_pte |= L_PTE_SHARED; 492 + mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_S; 493 + mem_types[MT_MEMORY_RW].prot_pte |= L_PTE_SHARED; 503 494 mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED; 504 495 mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= PMD_SECT_S; 505 496 mem_types[MT_MEMORY_RWX_NONCACHED].prot_pte |= L_PTE_SHARED; ··· 558 545 mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask; 559 546 mem_types[MT_MEMORY_RWX].prot_sect |= ecc_mask | cp->pmd; 560 547 mem_types[MT_MEMORY_RWX].prot_pte |= kern_pgprot; 548 + mem_types[MT_MEMORY_RW].prot_sect |= ecc_mask | cp->pmd; 549 + mem_types[MT_MEMORY_RW].prot_pte |= kern_pgprot; 561 550 mem_types[MT_MEMORY_DMA_READY].prot_pte |= kern_pgprot; 562 551 mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= ecc_mask; 563 552 mem_types[MT_ROM].prot_sect |= cp->pmd; ··· 1311 1296 static void __init map_lowmem(void) 1312 1297 { 1313 1298 struct memblock_region *reg; 1299 + unsigned long kernel_x_start = round_down(__pa(_stext), SECTION_SIZE); 1300 + unsigned long kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE); 1314 1301 1315 1302 /* Map all the lowmem memory banks. */ 1316 1303 for_each_memblock(memory, reg) { ··· 1325 1308 if (start >= end) 1326 1309 break; 1327 1310 1328 - map.pfn = __phys_to_pfn(start); 1329 - map.virtual = __phys_to_virt(start); 1330 - map.length = end - start; 1331 - map.type = MT_MEMORY; 1311 + if (end < kernel_x_start || start >= kernel_x_end) { 1312 + map.pfn = __phys_to_pfn(start); 1313 + map.virtual = __phys_to_virt(start); 1314 + map.length = end - start; 1315 + map.type = MT_MEMORY_RWX; 1332 1316 1333 - create_mapping(&map); 1317 + create_mapping(&map); 1318 + } else { 1319 + /* This better cover the entire kernel */ 1320 + if (start < kernel_x_start) { 1321 + map.pfn = __phys_to_pfn(start); 1322 + map.virtual = __phys_to_virt(start); 1323 + map.length = kernel_x_start - start; 1324 + map.type = MT_MEMORY_RW; 1325 + 1326 + create_mapping(&map); 1327 + } 1328 + 1329 + map.pfn = __phys_to_pfn(kernel_x_start); 1330 + map.virtual = __phys_to_virt(kernel_x_start); 1331 + map.length = kernel_x_end - kernel_x_start; 1332 + map.type = MT_MEMORY_RWX; 1333 + 1334 + create_mapping(&map); 1335 + 1336 + if (kernel_x_end < end) { 1337 + map.pfn = __phys_to_pfn(kernel_x_end); 1338 + map.virtual = __phys_to_virt(kernel_x_end); 1339 + map.length = end - kernel_x_end; 1340 + map.type = MT_MEMORY_RW; 1341 + 1342 + create_mapping(&map); 1343 + } 1344 + } 1334 1345 } 1335 1346 } 1336 1347