···4141fffe0000 fffe7fff ITCM mapping area for platforms with4242 ITCM mounted inside the CPU.43434444-ffc00000 ffdfffff Fixmap mapping region. Addresses provided4444+ffc00000 ffefffff Fixmap mapping region. Addresses provided4545 by fix_to_virt() will be located here.46464747fee00000 feffffff Mapping of PCI I/O space. This is a static
+10
arch/arm/include/asm/cacheflush.h
···487487int set_memory_x(unsigned long addr, int numpages);488488int set_memory_nx(unsigned long addr, int numpages);489489490490+#ifdef CONFIG_DEBUG_RODATA491491+void mark_rodata_ro(void);492492+void set_kernel_text_rw(void);493493+void set_kernel_text_ro(void);494494+#else495495+static inline void set_kernel_text_rw(void) { }496496+static inline void set_kernel_text_ro(void) { }497497+#endif498498+490499void flush_uprobe_xol_access(struct page *page, unsigned long uaddr,491500 void *kaddr, unsigned long len);501501+492502#endif
···10081008 help10091009 This option specifies the architecture can support big endian10101010 operation.10111011+10121012+config ARM_KERNMEM_PERMS10131013+ bool "Restrict kernel memory permissions"10141014+ help10151015+ If this is set, kernel memory other than kernel text (and rodata)10161016+ will be made non-executable. The tradeoff is that each region is10171017+ padded to section-size (1MiB) boundaries (because their permissions10181018+ are different and splitting the 1M pages into 4K ones causes TLB10191019+ performance problems), wasting memory.10201020+10211021+config DEBUG_RODATA10221022+ bool "Make kernel text and rodata read-only"10231023+ depends on ARM_KERNMEM_PERMS10241024+ default y10251025+ help10261026+ If this is set, kernel text and rodata will be made read-only. This10271027+ is to help catch accidental or malicious attempts to change the10281028+ kernel's executable code. Additionally splits rodata from kernel10291029+ text so it can be made explicitly non-executable. This creates10301030+ another section-size padded region, so it can waste more memory10311031+ space while gaining the read-only protections.
+8-7
arch/arm/mm/highmem.c
···1818#include <asm/tlbflush.h>1919#include "mm.h"20202121-pte_t *fixmap_page_table;2222-2321static inline void set_fixmap_pte(int idx, pte_t pte)2422{2523 unsigned long vaddr = __fix_to_virt(idx);2626- set_pte_ext(fixmap_page_table + idx, pte, 0);2424+ pte_t *ptep = pte_offset_kernel(pmd_off_k(vaddr), vaddr);2525+2626+ set_pte_ext(ptep, pte, 0);2727 local_flush_tlb_kernel_page(vaddr);2828}29293030static inline pte_t get_fixmap_pte(unsigned long vaddr)3131{3232- unsigned long idx = __virt_to_fix(vaddr);3333- return *(fixmap_page_table + idx);3232+ pte_t *ptep = pte_offset_kernel(pmd_off_k(vaddr), vaddr);3333+3434+ return *ptep;3435}35363637void *kmap(struct page *page)···8584 * With debugging enabled, kunmap_atomic forces that entry to 0.8685 * Make sure it was indeed properly unmapped.8786 */8888- BUG_ON(!pte_none(*(fixmap_page_table + idx)));8787+ BUG_ON(!pte_none(get_fixmap_pte(vaddr)));8988#endif9089 /*9190 * When debugging is off, kunmap_atomic leaves the previous mapping···135134 idx = type + KM_TYPE_NR * smp_processor_id();136135 vaddr = __fix_to_virt(idx);137136#ifdef CONFIG_DEBUG_HIGHMEM138138- BUG_ON(!pte_none(*(fixmap_page_table + idx)));137137+ BUG_ON(!pte_none(get_fixmap_pte(vaddr)));139138#endif140139 set_fixmap_pte(idx, pfn_pte(pfn, kmap_prot));141140
+147-2
arch/arm/mm/init.c
···2929#include <asm/prom.h>3030#include <asm/sections.h>3131#include <asm/setup.h>3232+#include <asm/system_info.h>3233#include <asm/tlb.h>3334#include <asm/fixmap.h>3435···571570 MLK(DTCM_OFFSET, (unsigned long) dtcm_end),572571 MLK(ITCM_OFFSET, (unsigned long) itcm_end),573572#endif574574- MLK(FIXADDR_START, FIXADDR_TOP),573573+ MLK(FIXADDR_START, FIXADDR_END),575574 MLM(VMALLOC_START, VMALLOC_END),576575 MLM(PAGE_OFFSET, (unsigned long)high_memory),577576#ifdef CONFIG_HIGHMEM···616615 }617616}618617619619-void free_initmem(void)618618+#ifdef CONFIG_ARM_KERNMEM_PERMS619619+struct section_perm {620620+ unsigned long start;621621+ unsigned long end;622622+ pmdval_t mask;623623+ pmdval_t prot;624624+ pmdval_t clear;625625+};626626+627627+static struct section_perm nx_perms[] = {628628+ /* Make pages tables, etc before _stext RW (set NX). */629629+ {630630+ .start = PAGE_OFFSET,631631+ .end = (unsigned long)_stext,632632+ .mask = ~PMD_SECT_XN,633633+ .prot = PMD_SECT_XN,634634+ },635635+ /* Make init RW (set NX). */636636+ {637637+ .start = (unsigned long)__init_begin,638638+ .end = (unsigned long)_sdata,639639+ .mask = ~PMD_SECT_XN,640640+ .prot = PMD_SECT_XN,641641+ },642642+#ifdef CONFIG_DEBUG_RODATA643643+ /* Make rodata NX (set RO in ro_perms below). */644644+ {645645+ .start = (unsigned long)__start_rodata,646646+ .end = (unsigned long)__init_begin,647647+ .mask = ~PMD_SECT_XN,648648+ .prot = PMD_SECT_XN,649649+ },650650+#endif651651+};652652+653653+#ifdef CONFIG_DEBUG_RODATA654654+static struct section_perm ro_perms[] = {655655+ /* Make kernel code and rodata RX (set RO). */656656+ {657657+ .start = (unsigned long)_stext,658658+ .end = (unsigned long)__init_begin,659659+#ifdef CONFIG_ARM_LPAE660660+ .mask = ~PMD_SECT_RDONLY,661661+ .prot = PMD_SECT_RDONLY,662662+#else663663+ .mask = ~(PMD_SECT_APX | PMD_SECT_AP_WRITE),664664+ .prot = PMD_SECT_APX | PMD_SECT_AP_WRITE,665665+ .clear = PMD_SECT_AP_WRITE,666666+#endif667667+ },668668+};669669+#endif670670+671671+/*672672+ * Updates section permissions only for the current mm (sections are673673+ * copied into each mm). During startup, this is the init_mm. Is only674674+ * safe to be called with preemption disabled, as under stop_machine().675675+ */676676+static inline void section_update(unsigned long addr, pmdval_t mask,677677+ pmdval_t prot)678678+{679679+ struct mm_struct *mm;680680+ pmd_t *pmd;681681+682682+ mm = current->active_mm;683683+ pmd = pmd_offset(pud_offset(pgd_offset(mm, addr), addr), addr);684684+685685+#ifdef CONFIG_ARM_LPAE686686+ pmd[0] = __pmd((pmd_val(pmd[0]) & mask) | prot);687687+#else688688+ if (addr & SECTION_SIZE)689689+ pmd[1] = __pmd((pmd_val(pmd[1]) & mask) | prot);690690+ else691691+ pmd[0] = __pmd((pmd_val(pmd[0]) & mask) | prot);692692+#endif693693+ flush_pmd_entry(pmd);694694+ local_flush_tlb_kernel_range(addr, addr + SECTION_SIZE);695695+}696696+697697+/* Make sure extended page tables are in use. */698698+static inline bool arch_has_strict_perms(void)699699+{700700+ if (cpu_architecture() < CPU_ARCH_ARMv6)701701+ return false;702702+703703+ return !!(get_cr() & CR_XP);704704+}705705+706706+#define set_section_perms(perms, field) { \707707+ size_t i; \708708+ unsigned long addr; \709709+ \710710+ if (!arch_has_strict_perms()) \711711+ return; \712712+ \713713+ for (i = 0; i < ARRAY_SIZE(perms); i++) { \714714+ if (!IS_ALIGNED(perms[i].start, SECTION_SIZE) || \715715+ !IS_ALIGNED(perms[i].end, SECTION_SIZE)) { \716716+ pr_err("BUG: section %lx-%lx not aligned to %lx\n", \717717+ perms[i].start, perms[i].end, \718718+ SECTION_SIZE); \719719+ continue; \720720+ } \721721+ \722722+ for (addr = perms[i].start; \723723+ addr < perms[i].end; \724724+ addr += SECTION_SIZE) \725725+ section_update(addr, perms[i].mask, \726726+ perms[i].field); \727727+ } \728728+}729729+730730+static inline void fix_kernmem_perms(void)731731+{732732+ set_section_perms(nx_perms, prot);733733+}734734+735735+#ifdef CONFIG_DEBUG_RODATA736736+void mark_rodata_ro(void)737737+{738738+ set_section_perms(ro_perms, prot);739739+}740740+741741+void set_kernel_text_rw(void)742742+{743743+ set_section_perms(ro_perms, clear);744744+}745745+746746+void set_kernel_text_ro(void)747747+{748748+ set_section_perms(ro_perms, prot);749749+}750750+#endif /* CONFIG_DEBUG_RODATA */751751+752752+#else753753+static inline void fix_kernmem_perms(void) { }754754+#endif /* CONFIG_ARM_KERNMEM_PERMS */755755+756756+void free_tcmmem(void)620757{621758#ifdef CONFIG_HAVE_TCM622759 extern char __tcm_start, __tcm_end;···762623 poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start);763624 free_reserved_area(&__tcm_start, &__tcm_end, -1, "TCM link");764625#endif626626+}627627+628628+void free_initmem(void)629629+{630630+ fix_kernmem_perms();631631+ free_tcmmem();765632766633 poison_init_mem(__init_begin, __init_end - __init_begin);767634 if (!machine_is_integrator() && !machine_is_cintegrator())
+35-4
arch/arm/mm/mmu.c
···2222#include <asm/cputype.h>2323#include <asm/sections.h>2424#include <asm/cachetype.h>2525+#include <asm/fixmap.h>2526#include <asm/sections.h>2627#include <asm/setup.h>2728#include <asm/smp_plat.h>···392391SET_MEMORY_FN(rw, pte_set_rw)393392SET_MEMORY_FN(x, pte_set_x)394393SET_MEMORY_FN(nx, pte_set_nx)394394+395395+/*396396+ * To avoid TLB flush broadcasts, this uses local_flush_tlb_kernel_range().397397+ * As a result, this can only be called with preemption disabled, as under398398+ * stop_machine().399399+ */400400+void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot)401401+{402402+ unsigned long vaddr = __fix_to_virt(idx);403403+ pte_t *pte = pte_offset_kernel(pmd_off_k(vaddr), vaddr);404404+405405+ /* Make sure fixmap region does not exceed available allocation. */406406+ BUILD_BUG_ON(FIXADDR_START + (__end_of_fixed_addresses * PAGE_SIZE) >407407+ FIXADDR_END);408408+ BUG_ON(idx >= __end_of_fixed_addresses);409409+410410+ if (pgprot_val(prot))411411+ set_pte_at(NULL, vaddr, pte,412412+ pfn_pte(phys >> PAGE_SHIFT, prot));413413+ else414414+ pte_clear(NULL, vaddr, pte);415415+ local_flush_tlb_kernel_range(vaddr, vaddr + PAGE_SIZE);416416+}395417396418/*397419 * Adjust the PMD section entries according to the CPU in use.···13501326#ifdef CONFIG_HIGHMEM13511327 pkmap_page_table = early_pte_alloc(pmd_off_k(PKMAP_BASE),13521328 PKMAP_BASE, _PAGE_KERNEL_TABLE);13531353-13541354- fixmap_page_table = early_pte_alloc(pmd_off_k(FIXADDR_START),13551355- FIXADDR_START, _PAGE_KERNEL_TABLE);13561329#endif13301330+13311331+ early_pte_alloc(pmd_off_k(FIXADDR_START), FIXADDR_START,13321332+ _PAGE_KERNEL_TABLE);13571333}1358133413591335static void __init map_lowmem(void)···13731349 if (start >= end)13741350 break;1375135113761376- if (end < kernel_x_start || start >= kernel_x_end) {13521352+ if (end < kernel_x_start) {13771353 map.pfn = __phys_to_pfn(start);13781354 map.virtual = __phys_to_virt(start);13791355 map.length = end - start;13801356 map.type = MT_MEMORY_RWX;13571357+13581358+ create_mapping(&map);13591359+ } else if (start >= kernel_x_end) {13601360+ map.pfn = __phys_to_pfn(start);13611361+ map.virtual = __phys_to_virt(start);13621362+ map.length = end - start;13631363+ map.type = MT_MEMORY_RW;1381136413821365 create_mapping(&map);13831366 } else {