Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'devel-stable' into for-next

+394 -57
+1 -1
Documentation/arm/memory.txt
··· 41 41 fffe0000 fffe7fff ITCM mapping area for platforms with 42 42 ITCM mounted inside the CPU. 43 43 44 - ffc00000 ffdfffff Fixmap mapping region. Addresses provided 44 + ffc00000 ffefffff Fixmap mapping region. Addresses provided 45 45 by fix_to_virt() will be located here. 46 46 47 47 fee00000 feffffff Mapping of PCI I/O space. This is a static
+10
arch/arm/include/asm/cacheflush.h
··· 487 487 int set_memory_x(unsigned long addr, int numpages); 488 488 int set_memory_nx(unsigned long addr, int numpages); 489 489 490 + #ifdef CONFIG_DEBUG_RODATA 491 + void mark_rodata_ro(void); 492 + void set_kernel_text_rw(void); 493 + void set_kernel_text_ro(void); 494 + #else 495 + static inline void set_kernel_text_rw(void) { } 496 + static inline void set_kernel_text_ro(void) { } 497 + #endif 498 + 490 499 void flush_uprobe_xol_access(struct page *page, unsigned long uaddr, 491 500 void *kaddr, unsigned long len); 501 + 492 502 #endif
+14 -17
arch/arm/include/asm/fixmap.h
··· 2 2 #define _ASM_FIXMAP_H 3 3 4 4 #define FIXADDR_START 0xffc00000UL 5 - #define FIXADDR_TOP 0xffe00000UL 6 - #define FIXADDR_SIZE (FIXADDR_TOP - FIXADDR_START) 5 + #define FIXADDR_END 0xfff00000UL 6 + #define FIXADDR_TOP (FIXADDR_END - PAGE_SIZE) 7 7 8 - #define FIX_KMAP_NR_PTES (FIXADDR_SIZE >> PAGE_SHIFT) 8 + #include <asm/kmap_types.h> 9 9 10 - #define __fix_to_virt(x) (FIXADDR_START + ((x) << PAGE_SHIFT)) 11 - #define __virt_to_fix(x) (((x) - FIXADDR_START) >> PAGE_SHIFT) 10 + enum fixed_addresses { 11 + FIX_KMAP_BEGIN, 12 + FIX_KMAP_END = FIX_KMAP_BEGIN + (KM_TYPE_NR * NR_CPUS) - 1, 12 13 13 - extern void __this_fixmap_does_not_exist(void); 14 + /* Support writing RO kernel text via kprobes, jump labels, etc. */ 15 + FIX_TEXT_POKE0, 16 + FIX_TEXT_POKE1, 14 17 15 - static inline unsigned long fix_to_virt(const unsigned int idx) 16 - { 17 - if (idx >= FIX_KMAP_NR_PTES) 18 - __this_fixmap_does_not_exist(); 19 - return __fix_to_virt(idx); 20 - } 18 + __end_of_fixed_addresses 19 + }; 21 20 22 - static inline unsigned int virt_to_fix(const unsigned long vaddr) 23 - { 24 - BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START); 25 - return __virt_to_fix(vaddr); 26 - } 21 + void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot); 22 + 23 + #include <asm-generic/fixmap.h> 27 24 28 25 #endif
+1 -1
arch/arm/kernel/Makefile
··· 68 68 endif 69 69 obj-$(CONFIG_OABI_COMPAT) += sys_oabi-compat.o 70 70 obj-$(CONFIG_ARM_THUMBEE) += thumbee.o 71 - obj-$(CONFIG_KGDB) += kgdb.o 71 + obj-$(CONFIG_KGDB) += kgdb.o patch.o 72 72 obj-$(CONFIG_ARM_UNWIND) += unwind.o 73 73 obj-$(CONFIG_HAVE_TCM) += tcm.o 74 74 obj-$(CONFIG_OF) += devtree.o
+19
arch/arm/kernel/ftrace.c
··· 15 15 #include <linux/ftrace.h> 16 16 #include <linux/uaccess.h> 17 17 #include <linux/module.h> 18 + #include <linux/stop_machine.h> 18 19 19 20 #include <asm/cacheflush.h> 20 21 #include <asm/opcodes.h> ··· 35 34 #define OLD_FTRACE_ADDR ((unsigned long) ftrace_caller_old) 36 35 37 36 #define OLD_NOP 0xe1a00000 /* mov r0, r0 */ 37 + 38 + static int __ftrace_modify_code(void *data) 39 + { 40 + int *command = data; 41 + 42 + set_kernel_text_rw(); 43 + ftrace_modify_all_code(*command); 44 + set_kernel_text_ro(); 45 + 46 + return 0; 47 + } 48 + 49 + void arch_ftrace_update_code(int command) 50 + { 51 + stop_machine(__ftrace_modify_code, &command, NULL); 52 + } 38 53 39 54 static unsigned long ftrace_nop_replace(struct dyn_ftrace *rec) 40 55 { ··· 90 73 int ftrace_arch_code_modify_post_process(void) 91 74 { 92 75 set_all_modules_text_ro(); 76 + /* Make sure any TLB misses during machine stop are cleared. */ 77 + flush_tlb_all(); 93 78 return 0; 94 79 } 95 80
+1 -1
arch/arm/kernel/jump_label.c
··· 19 19 insn = arm_gen_nop(); 20 20 21 21 if (is_static) 22 - __patch_text(addr, insn); 22 + __patch_text_early(addr, insn); 23 23 else 24 24 patch_text(addr, insn); 25 25 }
+29
arch/arm/kernel/kgdb.c
··· 12 12 #include <linux/irq.h> 13 13 #include <linux/kdebug.h> 14 14 #include <linux/kgdb.h> 15 + #include <linux/uaccess.h> 16 + 15 17 #include <asm/traps.h> 18 + 19 + #include "patch.h" 16 20 17 21 struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] = 18 22 { ··· 246 242 unregister_undef_hook(&kgdb_brkpt_hook); 247 243 unregister_undef_hook(&kgdb_compiled_brkpt_hook); 248 244 unregister_die_notifier(&kgdb_notifier); 245 + } 246 + 247 + int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt) 248 + { 249 + int err; 250 + 251 + /* patch_text() only supports int-sized breakpoints */ 252 + BUILD_BUG_ON(sizeof(int) != BREAK_INSTR_SIZE); 253 + 254 + err = probe_kernel_read(bpt->saved_instr, (char *)bpt->bpt_addr, 255 + BREAK_INSTR_SIZE); 256 + if (err) 257 + return err; 258 + 259 + patch_text((void *)bpt->bpt_addr, 260 + *(unsigned int *)arch_kgdb_ops.gdb_bpt_instr); 261 + 262 + return err; 263 + } 264 + 265 + int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt) 266 + { 267 + patch_text((void *)bpt->bpt_addr, *(unsigned int *)bpt->saved_instr); 268 + 269 + return 0; 249 270 } 250 271 251 272 /*
+5 -4
arch/arm/kernel/machine_kexec.c
··· 29 29 30 30 static atomic_t waiting_for_crash_ipi; 31 31 32 + static unsigned long dt_mem; 32 33 /* 33 34 * Provide a dummy crash_notes definition while crash dump arrives to arm. 34 35 * This prevents breakage of crash_notes attribute in kernel/ksysfs.c. ··· 65 64 return err; 66 65 67 66 if (be32_to_cpu(header) == OF_DT_HEADER) 68 - kexec_boot_atags = current_segment->mem; 67 + dt_mem = current_segment->mem; 69 68 } 70 69 return 0; 71 70 } ··· 164 163 reboot_code_buffer = page_address(image->control_code_page); 165 164 166 165 /* Prepare parameters for reboot_code_buffer*/ 166 + set_kernel_text_rw(); 167 167 kexec_start_address = image->start; 168 168 kexec_indirection_page = page_list; 169 169 kexec_mach_type = machine_arch_type; 170 - if (!kexec_boot_atags) 171 - kexec_boot_atags = image->start - KEXEC_ARM_ZIMAGE_OFFSET + KEXEC_ARM_ATAGS_OFFSET; 172 - 170 + kexec_boot_atags = dt_mem ?: image->start - KEXEC_ARM_ZIMAGE_OFFSET 171 + + KEXEC_ARM_ATAGS_OFFSET; 173 172 174 173 /* copy our kernel relocation code to the control code page */ 175 174 reboot_entry = fncpy(reboot_code_buffer,
+73 -19
arch/arm/kernel/patch.c
··· 1 1 #include <linux/kernel.h> 2 + #include <linux/spinlock.h> 2 3 #include <linux/kprobes.h> 4 + #include <linux/mm.h> 3 5 #include <linux/stop_machine.h> 4 6 5 7 #include <asm/cacheflush.h> 8 + #include <asm/fixmap.h> 6 9 #include <asm/smp_plat.h> 7 10 #include <asm/opcodes.h> 8 11 ··· 16 13 unsigned int insn; 17 14 }; 18 15 19 - void __kprobes __patch_text(void *addr, unsigned int insn) 16 + static DEFINE_SPINLOCK(patch_lock); 17 + 18 + static void __kprobes *patch_map(void *addr, int fixmap, unsigned long *flags) 19 + __acquires(&patch_lock) 20 + { 21 + unsigned int uintaddr = (uintptr_t) addr; 22 + bool module = !core_kernel_text(uintaddr); 23 + struct page *page; 24 + 25 + if (module && IS_ENABLED(CONFIG_DEBUG_SET_MODULE_RONX)) 26 + page = vmalloc_to_page(addr); 27 + else if (!module && IS_ENABLED(CONFIG_DEBUG_RODATA)) 28 + page = virt_to_page(addr); 29 + else 30 + return addr; 31 + 32 + if (flags) 33 + spin_lock_irqsave(&patch_lock, *flags); 34 + else 35 + __acquire(&patch_lock); 36 + 37 + set_fixmap(fixmap, page_to_phys(page)); 38 + 39 + return (void *) (__fix_to_virt(fixmap) + (uintaddr & ~PAGE_MASK)); 40 + } 41 + 42 + static void __kprobes patch_unmap(int fixmap, unsigned long *flags) 43 + __releases(&patch_lock) 44 + { 45 + clear_fixmap(fixmap); 46 + 47 + if (flags) 48 + spin_unlock_irqrestore(&patch_lock, *flags); 49 + else 50 + __release(&patch_lock); 51 + } 52 + 53 + void __kprobes __patch_text_real(void *addr, unsigned int insn, bool remap) 20 54 { 21 55 bool thumb2 = IS_ENABLED(CONFIG_THUMB2_KERNEL); 56 + unsigned int uintaddr = (uintptr_t) addr; 57 + bool twopage = false; 58 + unsigned long flags; 59 + void *waddr = addr; 22 60 int size; 23 61 62 + if (remap) 63 + waddr = patch_map(addr, FIX_TEXT_POKE0, &flags); 64 + else 65 + __acquire(&patch_lock); 66 + 24 67 if (thumb2 && __opcode_is_thumb16(insn)) { 25 - *(u16 *)addr = __opcode_to_mem_thumb16(insn); 68 + *(u16 *)waddr = __opcode_to_mem_thumb16(insn); 26 69 size = sizeof(u16); 27 - } else if (thumb2 && ((uintptr_t)addr & 2)) { 70 + } else if (thumb2 && (uintaddr & 2)) { 28 71 u16 first = __opcode_thumb32_first(insn); 29 72 u16 second = __opcode_thumb32_second(insn); 30 - u16 *addrh = addr; 73 + u16 *addrh0 = waddr; 74 + u16 *addrh1 = waddr + 2; 31 75 32 - addrh[0] = __opcode_to_mem_thumb16(first); 33 - addrh[1] = __opcode_to_mem_thumb16(second); 76 + twopage = (uintaddr & ~PAGE_MASK) == PAGE_SIZE - 2; 77 + if (twopage && remap) 78 + addrh1 = patch_map(addr + 2, FIX_TEXT_POKE1, NULL); 79 + 80 + *addrh0 = __opcode_to_mem_thumb16(first); 81 + *addrh1 = __opcode_to_mem_thumb16(second); 82 + 83 + if (twopage && addrh1 != addr + 2) { 84 + flush_kernel_vmap_range(addrh1, 2); 85 + patch_unmap(FIX_TEXT_POKE1, NULL); 86 + } 34 87 35 88 size = sizeof(u32); 36 89 } else { ··· 95 36 else 96 37 insn = __opcode_to_mem_arm(insn); 97 38 98 - *(u32 *)addr = insn; 39 + *(u32 *)waddr = insn; 99 40 size = sizeof(u32); 100 41 } 42 + 43 + if (waddr != addr) { 44 + flush_kernel_vmap_range(waddr, twopage ? size / 2 : size); 45 + patch_unmap(FIX_TEXT_POKE0, &flags); 46 + } else 47 + __release(&patch_lock); 101 48 102 49 flush_icache_range((uintptr_t)(addr), 103 50 (uintptr_t)(addr) + size); ··· 125 60 .insn = insn, 126 61 }; 127 62 128 - if (cache_ops_need_broadcast()) { 129 - stop_machine(patch_text_stop_machine, &patch, cpu_online_mask); 130 - } else { 131 - bool straddles_word = IS_ENABLED(CONFIG_THUMB2_KERNEL) 132 - && __opcode_is_thumb32(insn) 133 - && ((uintptr_t)addr & 2); 134 - 135 - if (straddles_word) 136 - stop_machine(patch_text_stop_machine, &patch, NULL); 137 - else 138 - __patch_text(addr, insn); 139 - } 63 + stop_machine(patch_text_stop_machine, &patch, NULL); 140 64 }
+11 -1
arch/arm/kernel/patch.h
··· 2 2 #define _ARM_KERNEL_PATCH_H 3 3 4 4 void patch_text(void *addr, unsigned int insn); 5 - void __patch_text(void *addr, unsigned int insn); 5 + void __patch_text_real(void *addr, unsigned int insn, bool remap); 6 + 7 + static inline void __patch_text(void *addr, unsigned int insn) 8 + { 9 + __patch_text_real(addr, insn, true); 10 + } 11 + 12 + static inline void __patch_text_early(void *addr, unsigned int insn) 13 + { 14 + __patch_text_real(addr, insn, false); 15 + } 6 16 7 17 #endif
+19
arch/arm/kernel/vmlinux.lds.S
··· 8 8 #include <asm/thread_info.h> 9 9 #include <asm/memory.h> 10 10 #include <asm/page.h> 11 + #ifdef CONFIG_ARM_KERNMEM_PERMS 12 + #include <asm/pgtable.h> 13 + #endif 11 14 12 15 #define PROC_INFO \ 13 16 . = ALIGN(4); \ ··· 93 90 _text = .; 94 91 HEAD_TEXT 95 92 } 93 + 94 + #ifdef CONFIG_ARM_KERNMEM_PERMS 95 + . = ALIGN(1<<SECTION_SHIFT); 96 + #endif 97 + 96 98 .text : { /* Real text segment */ 97 99 _stext = .; /* Text and read-only data */ 98 100 __exception_text_start = .; ··· 120 112 ARM_CPU_KEEP(PROC_INFO) 121 113 } 122 114 115 + #ifdef CONFIG_DEBUG_RODATA 116 + . = ALIGN(1<<SECTION_SHIFT); 117 + #endif 123 118 RO_DATA(PAGE_SIZE) 124 119 125 120 . = ALIGN(4); ··· 156 145 _etext = .; /* End of text and rodata section */ 157 146 158 147 #ifndef CONFIG_XIP_KERNEL 148 + # ifdef CONFIG_ARM_KERNMEM_PERMS 149 + . = ALIGN(1<<SECTION_SHIFT); 150 + # else 159 151 . = ALIGN(PAGE_SIZE); 152 + # endif 160 153 __init_begin = .; 161 154 #endif 162 155 /* ··· 234 219 __data_loc = ALIGN(4); /* location in binary */ 235 220 . = PAGE_OFFSET + TEXT_OFFSET; 236 221 #else 222 + #ifdef CONFIG_ARM_KERNMEM_PERMS 223 + . = ALIGN(1<<SECTION_SHIFT); 224 + #else 237 225 . = ALIGN(THREAD_SIZE); 226 + #endif 238 227 __init_end = .; 239 228 __data_loc = .; 240 229 #endif
+21
arch/arm/mm/Kconfig
··· 1009 1009 help 1010 1010 This option specifies the architecture can support big endian 1011 1011 operation. 1012 + 1013 + config ARM_KERNMEM_PERMS 1014 + bool "Restrict kernel memory permissions" 1015 + help 1016 + If this is set, kernel memory other than kernel text (and rodata) 1017 + will be made non-executable. The tradeoff is that each region is 1018 + padded to section-size (1MiB) boundaries (because their permissions 1019 + are different and splitting the 1M pages into 4K ones causes TLB 1020 + performance problems), wasting memory. 1021 + 1022 + config DEBUG_RODATA 1023 + bool "Make kernel text and rodata read-only" 1024 + depends on ARM_KERNMEM_PERMS 1025 + default y 1026 + help 1027 + If this is set, kernel text and rodata will be made read-only. This 1028 + is to help catch accidental or malicious attempts to change the 1029 + kernel's executable code. Additionally splits rodata from kernel 1030 + text so it can be made explicitly non-executable. This creates 1031 + another section-size padded region, so it can waste more memory 1032 + space while gaining the read-only protections.
+8 -7
arch/arm/mm/highmem.c
··· 18 18 #include <asm/tlbflush.h> 19 19 #include "mm.h" 20 20 21 - pte_t *fixmap_page_table; 22 - 23 21 static inline void set_fixmap_pte(int idx, pte_t pte) 24 22 { 25 23 unsigned long vaddr = __fix_to_virt(idx); 26 - set_pte_ext(fixmap_page_table + idx, pte, 0); 24 + pte_t *ptep = pte_offset_kernel(pmd_off_k(vaddr), vaddr); 25 + 26 + set_pte_ext(ptep, pte, 0); 27 27 local_flush_tlb_kernel_page(vaddr); 28 28 } 29 29 30 30 static inline pte_t get_fixmap_pte(unsigned long vaddr) 31 31 { 32 - unsigned long idx = __virt_to_fix(vaddr); 33 - return *(fixmap_page_table + idx); 32 + pte_t *ptep = pte_offset_kernel(pmd_off_k(vaddr), vaddr); 33 + 34 + return *ptep; 34 35 } 35 36 36 37 void *kmap(struct page *page) ··· 85 84 * With debugging enabled, kunmap_atomic forces that entry to 0. 86 85 * Make sure it was indeed properly unmapped. 87 86 */ 88 - BUG_ON(!pte_none(*(fixmap_page_table + idx))); 87 + BUG_ON(!pte_none(get_fixmap_pte(vaddr))); 89 88 #endif 90 89 /* 91 90 * When debugging is off, kunmap_atomic leaves the previous mapping ··· 138 137 idx = type + KM_TYPE_NR * smp_processor_id(); 139 138 vaddr = __fix_to_virt(idx); 140 139 #ifdef CONFIG_DEBUG_HIGHMEM 141 - BUG_ON(!pte_none(*(fixmap_page_table + idx))); 140 + BUG_ON(!pte_none(get_fixmap_pte(vaddr))); 142 141 #endif 143 142 set_fixmap_pte(idx, pfn_pte(pfn, kmap_prot)); 144 143
+147 -2
arch/arm/mm/init.c
··· 29 29 #include <asm/prom.h> 30 30 #include <asm/sections.h> 31 31 #include <asm/setup.h> 32 + #include <asm/system_info.h> 32 33 #include <asm/tlb.h> 33 34 #include <asm/fixmap.h> 34 35 ··· 571 570 MLK(DTCM_OFFSET, (unsigned long) dtcm_end), 572 571 MLK(ITCM_OFFSET, (unsigned long) itcm_end), 573 572 #endif 574 - MLK(FIXADDR_START, FIXADDR_TOP), 573 + MLK(FIXADDR_START, FIXADDR_END), 575 574 MLM(VMALLOC_START, VMALLOC_END), 576 575 MLM(PAGE_OFFSET, (unsigned long)high_memory), 577 576 #ifdef CONFIG_HIGHMEM ··· 616 615 } 617 616 } 618 617 619 - void free_initmem(void) 618 + #ifdef CONFIG_ARM_KERNMEM_PERMS 619 + struct section_perm { 620 + unsigned long start; 621 + unsigned long end; 622 + pmdval_t mask; 623 + pmdval_t prot; 624 + pmdval_t clear; 625 + }; 626 + 627 + static struct section_perm nx_perms[] = { 628 + /* Make pages tables, etc before _stext RW (set NX). */ 629 + { 630 + .start = PAGE_OFFSET, 631 + .end = (unsigned long)_stext, 632 + .mask = ~PMD_SECT_XN, 633 + .prot = PMD_SECT_XN, 634 + }, 635 + /* Make init RW (set NX). */ 636 + { 637 + .start = (unsigned long)__init_begin, 638 + .end = (unsigned long)_sdata, 639 + .mask = ~PMD_SECT_XN, 640 + .prot = PMD_SECT_XN, 641 + }, 642 + #ifdef CONFIG_DEBUG_RODATA 643 + /* Make rodata NX (set RO in ro_perms below). */ 644 + { 645 + .start = (unsigned long)__start_rodata, 646 + .end = (unsigned long)__init_begin, 647 + .mask = ~PMD_SECT_XN, 648 + .prot = PMD_SECT_XN, 649 + }, 650 + #endif 651 + }; 652 + 653 + #ifdef CONFIG_DEBUG_RODATA 654 + static struct section_perm ro_perms[] = { 655 + /* Make kernel code and rodata RX (set RO). */ 656 + { 657 + .start = (unsigned long)_stext, 658 + .end = (unsigned long)__init_begin, 659 + #ifdef CONFIG_ARM_LPAE 660 + .mask = ~PMD_SECT_RDONLY, 661 + .prot = PMD_SECT_RDONLY, 662 + #else 663 + .mask = ~(PMD_SECT_APX | PMD_SECT_AP_WRITE), 664 + .prot = PMD_SECT_APX | PMD_SECT_AP_WRITE, 665 + .clear = PMD_SECT_AP_WRITE, 666 + #endif 667 + }, 668 + }; 669 + #endif 670 + 671 + /* 672 + * Updates section permissions only for the current mm (sections are 673 + * copied into each mm). During startup, this is the init_mm. Is only 674 + * safe to be called with preemption disabled, as under stop_machine(). 675 + */ 676 + static inline void section_update(unsigned long addr, pmdval_t mask, 677 + pmdval_t prot) 678 + { 679 + struct mm_struct *mm; 680 + pmd_t *pmd; 681 + 682 + mm = current->active_mm; 683 + pmd = pmd_offset(pud_offset(pgd_offset(mm, addr), addr), addr); 684 + 685 + #ifdef CONFIG_ARM_LPAE 686 + pmd[0] = __pmd((pmd_val(pmd[0]) & mask) | prot); 687 + #else 688 + if (addr & SECTION_SIZE) 689 + pmd[1] = __pmd((pmd_val(pmd[1]) & mask) | prot); 690 + else 691 + pmd[0] = __pmd((pmd_val(pmd[0]) & mask) | prot); 692 + #endif 693 + flush_pmd_entry(pmd); 694 + local_flush_tlb_kernel_range(addr, addr + SECTION_SIZE); 695 + } 696 + 697 + /* Make sure extended page tables are in use. */ 698 + static inline bool arch_has_strict_perms(void) 699 + { 700 + if (cpu_architecture() < CPU_ARCH_ARMv6) 701 + return false; 702 + 703 + return !!(get_cr() & CR_XP); 704 + } 705 + 706 + #define set_section_perms(perms, field) { \ 707 + size_t i; \ 708 + unsigned long addr; \ 709 + \ 710 + if (!arch_has_strict_perms()) \ 711 + return; \ 712 + \ 713 + for (i = 0; i < ARRAY_SIZE(perms); i++) { \ 714 + if (!IS_ALIGNED(perms[i].start, SECTION_SIZE) || \ 715 + !IS_ALIGNED(perms[i].end, SECTION_SIZE)) { \ 716 + pr_err("BUG: section %lx-%lx not aligned to %lx\n", \ 717 + perms[i].start, perms[i].end, \ 718 + SECTION_SIZE); \ 719 + continue; \ 720 + } \ 721 + \ 722 + for (addr = perms[i].start; \ 723 + addr < perms[i].end; \ 724 + addr += SECTION_SIZE) \ 725 + section_update(addr, perms[i].mask, \ 726 + perms[i].field); \ 727 + } \ 728 + } 729 + 730 + static inline void fix_kernmem_perms(void) 731 + { 732 + set_section_perms(nx_perms, prot); 733 + } 734 + 735 + #ifdef CONFIG_DEBUG_RODATA 736 + void mark_rodata_ro(void) 737 + { 738 + set_section_perms(ro_perms, prot); 739 + } 740 + 741 + void set_kernel_text_rw(void) 742 + { 743 + set_section_perms(ro_perms, clear); 744 + } 745 + 746 + void set_kernel_text_ro(void) 747 + { 748 + set_section_perms(ro_perms, prot); 749 + } 750 + #endif /* CONFIG_DEBUG_RODATA */ 751 + 752 + #else 753 + static inline void fix_kernmem_perms(void) { } 754 + #endif /* CONFIG_ARM_KERNMEM_PERMS */ 755 + 756 + void free_tcmmem(void) 620 757 { 621 758 #ifdef CONFIG_HAVE_TCM 622 759 extern char __tcm_start, __tcm_end; ··· 762 623 poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start); 763 624 free_reserved_area(&__tcm_start, &__tcm_end, -1, "TCM link"); 764 625 #endif 626 + } 627 + 628 + void free_initmem(void) 629 + { 630 + fix_kernmem_perms(); 631 + free_tcmmem(); 765 632 766 633 poison_init_mem(__init_begin, __init_end - __init_begin); 767 634 if (!machine_is_integrator() && !machine_is_cintegrator())
+35 -4
arch/arm/mm/mmu.c
··· 22 22 #include <asm/cputype.h> 23 23 #include <asm/sections.h> 24 24 #include <asm/cachetype.h> 25 + #include <asm/fixmap.h> 25 26 #include <asm/sections.h> 26 27 #include <asm/setup.h> 27 28 #include <asm/smp_plat.h> ··· 356 355 return type < ARRAY_SIZE(mem_types) ? &mem_types[type] : NULL; 357 356 } 358 357 EXPORT_SYMBOL(get_mem_type); 358 + 359 + /* 360 + * To avoid TLB flush broadcasts, this uses local_flush_tlb_kernel_range(). 361 + * As a result, this can only be called with preemption disabled, as under 362 + * stop_machine(). 363 + */ 364 + void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot) 365 + { 366 + unsigned long vaddr = __fix_to_virt(idx); 367 + pte_t *pte = pte_offset_kernel(pmd_off_k(vaddr), vaddr); 368 + 369 + /* Make sure fixmap region does not exceed available allocation. */ 370 + BUILD_BUG_ON(FIXADDR_START + (__end_of_fixed_addresses * PAGE_SIZE) > 371 + FIXADDR_END); 372 + BUG_ON(idx >= __end_of_fixed_addresses); 373 + 374 + if (pgprot_val(prot)) 375 + set_pte_at(NULL, vaddr, pte, 376 + pfn_pte(phys >> PAGE_SHIFT, prot)); 377 + else 378 + pte_clear(NULL, vaddr, pte); 379 + local_flush_tlb_kernel_range(vaddr, vaddr + PAGE_SIZE); 380 + } 359 381 360 382 /* 361 383 * Adjust the PMD section entries according to the CPU in use. ··· 1320 1296 #ifdef CONFIG_HIGHMEM 1321 1297 pkmap_page_table = early_pte_alloc(pmd_off_k(PKMAP_BASE), 1322 1298 PKMAP_BASE, _PAGE_KERNEL_TABLE); 1323 - 1324 - fixmap_page_table = early_pte_alloc(pmd_off_k(FIXADDR_START), 1325 - FIXADDR_START, _PAGE_KERNEL_TABLE); 1326 1299 #endif 1300 + 1301 + early_pte_alloc(pmd_off_k(FIXADDR_START), FIXADDR_START, 1302 + _PAGE_KERNEL_TABLE); 1327 1303 } 1328 1304 1329 1305 static void __init map_lowmem(void) ··· 1343 1319 if (start >= end) 1344 1320 break; 1345 1321 1346 - if (end < kernel_x_start || start >= kernel_x_end) { 1322 + if (end < kernel_x_start) { 1347 1323 map.pfn = __phys_to_pfn(start); 1348 1324 map.virtual = __phys_to_virt(start); 1349 1325 map.length = end - start; 1350 1326 map.type = MT_MEMORY_RWX; 1327 + 1328 + create_mapping(&map); 1329 + } else if (start >= kernel_x_end) { 1330 + map.pfn = __phys_to_pfn(start); 1331 + map.virtual = __phys_to_virt(start); 1332 + map.length = end - start; 1333 + map.type = MT_MEMORY_RW; 1351 1334 1352 1335 create_mapping(&map); 1353 1336 } else {