Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

ARM: 8235/1: Support for the PXN CPU feature on ARMv7

Modern ARMv7-A/R cores optionally implement below new
hardware feature:

- PXN:
Privileged execute-never(PXN) is a security feature. PXN bit
determines whether the processor can execute software from
the region. This is effective solution against ret2usr attack.
On an implementation that does not include the LPAE, PXN is
optionally supported.

This patch set PXN bit on user page table for preventing
user code execution with privilege mode.

Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Jungseung Lee <js07.lee@gmail.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>

authored by

Jungseung Lee and committed by
Russell King
1d4d3715 44cb09c2

+29 -2
+9 -1
arch/arm/include/asm/pgalloc.h
··· 157 157 static inline void 158 158 pmd_populate(struct mm_struct *mm, pmd_t *pmdp, pgtable_t ptep) 159 159 { 160 - __pmd_populate(pmdp, page_to_phys(ptep), _PAGE_USER_TABLE); 160 + extern pmdval_t user_pmd_table; 161 + pmdval_t prot; 162 + 163 + if (__LINUX_ARM_ARCH__ >= 6 && !IS_ENABLED(CONFIG_ARM_LPAE)) 164 + prot = user_pmd_table; 165 + else 166 + prot = _PAGE_USER_TABLE; 167 + 168 + __pmd_populate(pmdp, page_to_phys(ptep), prot); 161 169 } 162 170 #define pmd_pgtable(pmd) pmd_page(pmd) 163 171
+2
arch/arm/include/asm/pgtable-2level-hwdef.h
··· 20 20 #define PMD_TYPE_FAULT (_AT(pmdval_t, 0) << 0) 21 21 #define PMD_TYPE_TABLE (_AT(pmdval_t, 1) << 0) 22 22 #define PMD_TYPE_SECT (_AT(pmdval_t, 2) << 0) 23 + #define PMD_PXNTABLE (_AT(pmdval_t, 1) << 2) /* v7 */ 23 24 #define PMD_BIT4 (_AT(pmdval_t, 1) << 4) 24 25 #define PMD_DOMAIN(x) (_AT(pmdval_t, (x)) << 5) 25 26 #define PMD_PROTECTION (_AT(pmdval_t, 1) << 9) /* v5 */ 26 27 /* 27 28 * - section 28 29 */ 30 + #define PMD_SECT_PXN (_AT(pmdval_t, 1) << 0) /* v7 */ 29 31 #define PMD_SECT_BUFFERABLE (_AT(pmdval_t, 1) << 2) 30 32 #define PMD_SECT_CACHEABLE (_AT(pmdval_t, 1) << 3) 31 33 #define PMD_SECT_XN (_AT(pmdval_t, 1) << 4) /* v6 */
+1
arch/arm/include/asm/pgtable-3level-hwdef.h
··· 76 76 #define PTE_EXT_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */ 77 77 #define PTE_EXT_AF (_AT(pteval_t, 1) << 10) /* Access Flag */ 78 78 #define PTE_EXT_NG (_AT(pteval_t, 1) << 11) /* nG */ 79 + #define PTE_EXT_PXN (_AT(pteval_t, 1) << 53) /* PXN */ 79 80 #define PTE_EXT_XN (_AT(pteval_t, 1) << 54) /* XN */ 80 81 81 82 /*
+17 -1
arch/arm/mm/mmu.c
··· 52 52 */ 53 53 pmd_t *top_pmd; 54 54 55 + pmdval_t user_pmd_table = _PAGE_USER_TABLE; 56 + 55 57 #define CPOLICY_UNCACHED 0 56 58 #define CPOLICY_BUFFERED 1 57 59 #define CPOLICY_WRITETHROUGH 2 ··· 530 528 hyp_device_pgprot = mem_types[MT_DEVICE].prot_pte; 531 529 s2_device_pgprot = mem_types[MT_DEVICE].prot_pte_s2; 532 530 531 + #ifndef CONFIG_ARM_LPAE 533 532 /* 534 533 * We don't use domains on ARMv6 (since this causes problems with 535 534 * v6/v7 kernels), so we must use a separate memory type for user 536 535 * r/o, kernel r/w to map the vectors page. 537 536 */ 538 - #ifndef CONFIG_ARM_LPAE 539 537 if (cpu_arch == CPU_ARCH_ARMv6) 540 538 vecs_pgprot |= L_PTE_MT_VECTORS; 539 + 540 + /* 541 + * Check is it with support for the PXN bit 542 + * in the Short-descriptor translation table format descriptors. 543 + */ 544 + if (cpu_arch == CPU_ARCH_ARMv7 && 545 + (read_cpuid_ext(CPUID_EXT_MMFR0) & 0xF) == 4) { 546 + user_pmd_table |= PMD_PXNTABLE; 547 + } 541 548 #endif 542 549 543 550 /* ··· 616 605 } 617 606 kern_pgprot |= PTE_EXT_AF; 618 607 vecs_pgprot |= PTE_EXT_AF; 608 + 609 + /* 610 + * Set PXN for user mappings 611 + */ 612 + user_pgprot |= PTE_EXT_PXN; 619 613 #endif 620 614 621 615 for (i = 0; i < 16; i++) {