···4141fffe0000 fffe7fff ITCM mapping area for platforms with4242 ITCM mounted inside the CPU.43434444-ffc00000 ffdfffff Fixmap mapping region. Addresses provided4444+ffc00000 ffefffff Fixmap mapping region. Addresses provided4545 by fix_to_virt() will be located here.46464747fee00000 feffffff Mapping of PCI I/O space. This is a static
+10
arch/arm/include/asm/cacheflush.h
···487487int set_memory_x(unsigned long addr, int numpages);488488int set_memory_nx(unsigned long addr, int numpages);489489490490+#ifdef CONFIG_DEBUG_RODATA491491+void mark_rodata_ro(void);492492+void set_kernel_text_rw(void);493493+void set_kernel_text_ro(void);494494+#else495495+static inline void set_kernel_text_rw(void) { }496496+static inline void set_kernel_text_ro(void) { }497497+#endif498498+490499void flush_uprobe_xol_access(struct page *page, unsigned long uaddr,491500 void *kaddr, unsigned long len);501501+492502#endif
···10091009 help10101010 This option specifies the architecture can support big endian10111011 operation.10121012+10131013+config ARM_KERNMEM_PERMS10141014+ bool "Restrict kernel memory permissions"10151015+ help10161016+ If this is set, kernel memory other than kernel text (and rodata)10171017+ will be made non-executable. The tradeoff is that each region is10181018+ padded to section-size (1MiB) boundaries (because their permissions10191019+ are different and splitting the 1M pages into 4K ones causes TLB10201020+ performance problems), wasting memory.10211021+10221022+config DEBUG_RODATA10231023+ bool "Make kernel text and rodata read-only"10241024+ depends on ARM_KERNMEM_PERMS10251025+ default y10261026+ help10271027+ If this is set, kernel text and rodata will be made read-only. This10281028+ is to help catch accidental or malicious attempts to change the10291029+ kernel's executable code. Additionally splits rodata from kernel10301030+ text so it can be made explicitly non-executable. This creates10311031+ another section-size padded region, so it can waste more memory10321032+ space while gaining the read-only protections.
+8-7
arch/arm/mm/highmem.c
···1818#include <asm/tlbflush.h>1919#include "mm.h"20202121-pte_t *fixmap_page_table;2222-2321static inline void set_fixmap_pte(int idx, pte_t pte)2422{2523 unsigned long vaddr = __fix_to_virt(idx);2626- set_pte_ext(fixmap_page_table + idx, pte, 0);2424+ pte_t *ptep = pte_offset_kernel(pmd_off_k(vaddr), vaddr);2525+2626+ set_pte_ext(ptep, pte, 0);2727 local_flush_tlb_kernel_page(vaddr);2828}29293030static inline pte_t get_fixmap_pte(unsigned long vaddr)3131{3232- unsigned long idx = __virt_to_fix(vaddr);3333- return *(fixmap_page_table + idx);3232+ pte_t *ptep = pte_offset_kernel(pmd_off_k(vaddr), vaddr);3333+3434+ return *ptep;3435}35363637void *kmap(struct page *page)···8584 * With debugging enabled, kunmap_atomic forces that entry to 0.8685 * Make sure it was indeed properly unmapped.8786 */8888- BUG_ON(!pte_none(*(fixmap_page_table + idx)));8787+ BUG_ON(!pte_none(get_fixmap_pte(vaddr)));8988#endif9089 /*9190 * When debugging is off, kunmap_atomic leaves the previous mapping···138137 idx = type + KM_TYPE_NR * smp_processor_id();139138 vaddr = __fix_to_virt(idx);140139#ifdef CONFIG_DEBUG_HIGHMEM141141- BUG_ON(!pte_none(*(fixmap_page_table + idx)));140140+ BUG_ON(!pte_none(get_fixmap_pte(vaddr)));142141#endif143142 set_fixmap_pte(idx, pfn_pte(pfn, kmap_prot));144143
+147-2
arch/arm/mm/init.c
···2929#include <asm/prom.h>3030#include <asm/sections.h>3131#include <asm/setup.h>3232+#include <asm/system_info.h>3233#include <asm/tlb.h>3334#include <asm/fixmap.h>3435···571570 MLK(DTCM_OFFSET, (unsigned long) dtcm_end),572571 MLK(ITCM_OFFSET, (unsigned long) itcm_end),573572#endif574574- MLK(FIXADDR_START, FIXADDR_TOP),573573+ MLK(FIXADDR_START, FIXADDR_END),575574 MLM(VMALLOC_START, VMALLOC_END),576575 MLM(PAGE_OFFSET, (unsigned long)high_memory),577576#ifdef CONFIG_HIGHMEM···616615 }617616}618617619619-void free_initmem(void)618618+#ifdef CONFIG_ARM_KERNMEM_PERMS619619+struct section_perm {620620+ unsigned long start;621621+ unsigned long end;622622+ pmdval_t mask;623623+ pmdval_t prot;624624+ pmdval_t clear;625625+};626626+627627+static struct section_perm nx_perms[] = {628628+ /* Make pages tables, etc before _stext RW (set NX). */629629+ {630630+ .start = PAGE_OFFSET,631631+ .end = (unsigned long)_stext,632632+ .mask = ~PMD_SECT_XN,633633+ .prot = PMD_SECT_XN,634634+ },635635+ /* Make init RW (set NX). */636636+ {637637+ .start = (unsigned long)__init_begin,638638+ .end = (unsigned long)_sdata,639639+ .mask = ~PMD_SECT_XN,640640+ .prot = PMD_SECT_XN,641641+ },642642+#ifdef CONFIG_DEBUG_RODATA643643+ /* Make rodata NX (set RO in ro_perms below). */644644+ {645645+ .start = (unsigned long)__start_rodata,646646+ .end = (unsigned long)__init_begin,647647+ .mask = ~PMD_SECT_XN,648648+ .prot = PMD_SECT_XN,649649+ },650650+#endif651651+};652652+653653+#ifdef CONFIG_DEBUG_RODATA654654+static struct section_perm ro_perms[] = {655655+ /* Make kernel code and rodata RX (set RO). */656656+ {657657+ .start = (unsigned long)_stext,658658+ .end = (unsigned long)__init_begin,659659+#ifdef CONFIG_ARM_LPAE660660+ .mask = ~PMD_SECT_RDONLY,661661+ .prot = PMD_SECT_RDONLY,662662+#else663663+ .mask = ~(PMD_SECT_APX | PMD_SECT_AP_WRITE),664664+ .prot = PMD_SECT_APX | PMD_SECT_AP_WRITE,665665+ .clear = PMD_SECT_AP_WRITE,666666+#endif667667+ },668668+};669669+#endif670670+671671+/*672672+ * Updates section permissions only for the current mm (sections are673673+ * copied into each mm). During startup, this is the init_mm. Is only674674+ * safe to be called with preemption disabled, as under stop_machine().675675+ */676676+static inline void section_update(unsigned long addr, pmdval_t mask,677677+ pmdval_t prot)678678+{679679+ struct mm_struct *mm;680680+ pmd_t *pmd;681681+682682+ mm = current->active_mm;683683+ pmd = pmd_offset(pud_offset(pgd_offset(mm, addr), addr), addr);684684+685685+#ifdef CONFIG_ARM_LPAE686686+ pmd[0] = __pmd((pmd_val(pmd[0]) & mask) | prot);687687+#else688688+ if (addr & SECTION_SIZE)689689+ pmd[1] = __pmd((pmd_val(pmd[1]) & mask) | prot);690690+ else691691+ pmd[0] = __pmd((pmd_val(pmd[0]) & mask) | prot);692692+#endif693693+ flush_pmd_entry(pmd);694694+ local_flush_tlb_kernel_range(addr, addr + SECTION_SIZE);695695+}696696+697697+/* Make sure extended page tables are in use. */698698+static inline bool arch_has_strict_perms(void)699699+{700700+ if (cpu_architecture() < CPU_ARCH_ARMv6)701701+ return false;702702+703703+ return !!(get_cr() & CR_XP);704704+}705705+706706+#define set_section_perms(perms, field) { \707707+ size_t i; \708708+ unsigned long addr; \709709+ \710710+ if (!arch_has_strict_perms()) \711711+ return; \712712+ \713713+ for (i = 0; i < ARRAY_SIZE(perms); i++) { \714714+ if (!IS_ALIGNED(perms[i].start, SECTION_SIZE) || \715715+ !IS_ALIGNED(perms[i].end, SECTION_SIZE)) { \716716+ pr_err("BUG: section %lx-%lx not aligned to %lx\n", \717717+ perms[i].start, perms[i].end, \718718+ SECTION_SIZE); \719719+ continue; \720720+ } \721721+ \722722+ for (addr = perms[i].start; \723723+ addr < perms[i].end; \724724+ addr += SECTION_SIZE) \725725+ section_update(addr, perms[i].mask, \726726+ perms[i].field); \727727+ } \728728+}729729+730730+static inline void fix_kernmem_perms(void)731731+{732732+ set_section_perms(nx_perms, prot);733733+}734734+735735+#ifdef CONFIG_DEBUG_RODATA736736+void mark_rodata_ro(void)737737+{738738+ set_section_perms(ro_perms, prot);739739+}740740+741741+void set_kernel_text_rw(void)742742+{743743+ set_section_perms(ro_perms, clear);744744+}745745+746746+void set_kernel_text_ro(void)747747+{748748+ set_section_perms(ro_perms, prot);749749+}750750+#endif /* CONFIG_DEBUG_RODATA */751751+752752+#else753753+static inline void fix_kernmem_perms(void) { }754754+#endif /* CONFIG_ARM_KERNMEM_PERMS */755755+756756+void free_tcmmem(void)620757{621758#ifdef CONFIG_HAVE_TCM622759 extern char __tcm_start, __tcm_end;···762623 poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start);763624 free_reserved_area(&__tcm_start, &__tcm_end, -1, "TCM link");764625#endif626626+}627627+628628+void free_initmem(void)629629+{630630+ fix_kernmem_perms();631631+ free_tcmmem();765632766633 poison_init_mem(__init_begin, __init_end - __init_begin);767634 if (!machine_is_integrator() && !machine_is_cintegrator())
+35-4
arch/arm/mm/mmu.c
···2222#include <asm/cputype.h>2323#include <asm/sections.h>2424#include <asm/cachetype.h>2525+#include <asm/fixmap.h>2526#include <asm/sections.h>2627#include <asm/setup.h>2728#include <asm/smp_plat.h>···356355 return type < ARRAY_SIZE(mem_types) ? &mem_types[type] : NULL;357356}358357EXPORT_SYMBOL(get_mem_type);358358+359359+/*360360+ * To avoid TLB flush broadcasts, this uses local_flush_tlb_kernel_range().361361+ * As a result, this can only be called with preemption disabled, as under362362+ * stop_machine().363363+ */364364+void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot)365365+{366366+ unsigned long vaddr = __fix_to_virt(idx);367367+ pte_t *pte = pte_offset_kernel(pmd_off_k(vaddr), vaddr);368368+369369+ /* Make sure fixmap region does not exceed available allocation. */370370+ BUILD_BUG_ON(FIXADDR_START + (__end_of_fixed_addresses * PAGE_SIZE) >371371+ FIXADDR_END);372372+ BUG_ON(idx >= __end_of_fixed_addresses);373373+374374+ if (pgprot_val(prot))375375+ set_pte_at(NULL, vaddr, pte,376376+ pfn_pte(phys >> PAGE_SHIFT, prot));377377+ else378378+ pte_clear(NULL, vaddr, pte);379379+ local_flush_tlb_kernel_range(vaddr, vaddr + PAGE_SIZE);380380+}359381360382/*361383 * Adjust the PMD section entries according to the CPU in use.···13201296#ifdef CONFIG_HIGHMEM13211297 pkmap_page_table = early_pte_alloc(pmd_off_k(PKMAP_BASE),13221298 PKMAP_BASE, _PAGE_KERNEL_TABLE);13231323-13241324- fixmap_page_table = early_pte_alloc(pmd_off_k(FIXADDR_START),13251325- FIXADDR_START, _PAGE_KERNEL_TABLE);13261299#endif13001300+13011301+ early_pte_alloc(pmd_off_k(FIXADDR_START), FIXADDR_START,13021302+ _PAGE_KERNEL_TABLE);13271303}1328130413291305static void __init map_lowmem(void)···13431319 if (start >= end)13441320 break;1345132113461346- if (end < kernel_x_start || start >= kernel_x_end) {13221322+ if (end < kernel_x_start) {13471323 map.pfn = __phys_to_pfn(start);13481324 map.virtual = __phys_to_virt(start);13491325 map.length = end - start;13501326 map.type = MT_MEMORY_RWX;13271327+13281328+ create_mapping(&map);13291329+ } else if (start >= kernel_x_end) {13301330+ map.pfn = __phys_to_pfn(start);13311331+ map.virtual = __phys_to_virt(start);13321332+ map.length = end - start;13331333+ map.type = MT_MEMORY_RW;1351133413521335 create_mapping(&map);13531336 } else {