Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'for-linus' of git://git.armlinux.org.uk/~rmk/linux-arm

Pull ARM development updates from Russell King:

- Make it clear __swp_entry_to_pte() uses PTE_TYPE_FAULT

- Updates for setting vmalloc size via command line to resolve an issue
with the 8MiB hole not properly being accounted for, and clean up the
code.

- ftrace support for module PLTs

- Spelling fixes

- kbuild updates for removing generated files and pattern rules for
generating files

- Clang/llvm updates

- Change the way the kernel is mapped, placing it in vmalloc space
instead.

- Remove arm_pm_restart from arm and aarch64.

* tag 'for-linus' of git://git.armlinux.org.uk/~rmk/linux-arm: (29 commits)
ARM: 9098/1: ftrace: MODULE_PLT: Fix build problem without DYNAMIC_FTRACE
ARM: 9097/1: mmu: Declare section start/end correctly
ARM: 9096/1: Remove arm_pm_restart()
ARM: 9095/1: ARM64: Remove arm_pm_restart()
ARM: 9094/1: Register with kernel restart handler
ARM: 9093/1: drivers: firmwapsci: Register with kernel restart handler
ARM: 9092/1: xen: Register with kernel restart handler
ARM: 9091/1: Revert "mm: qsd8x50: Fix incorrect permission faults"
ARM: 9090/1: Map the lowmem and kernel separately
ARM: 9089/1: Define kernel physical section start and end
ARM: 9088/1: Split KERNEL_OFFSET from PAGE_OFFSET
ARM: 9087/1: kprobes: test-thumb: fix for LLVM_IAS=1
ARM: 9086/1: syscalls: use pattern rules to generate syscall headers
ARM: 9085/1: remove unneeded abi parameter to syscallnr.sh
ARM: 9084/1: simplify the build rule of mach-types.h
ARM: 9083/1: uncompress: atags_to_fdt: Spelling s/REturn/Return/
ARM: 9082/1: [v2] mark prepare_page_table as __init
ARM: 9079/1: ftrace: Add MODULE_PLTS support
ARM: 9078/1: Add warn suppress parameter to arm_gen_branch_link()
ARM: 9077/1: PLT: Move struct plt_entries definition to header
...

+323 -159
+2
arch/arm/Kconfig.debug
··· 66 66 config UNWINDER_ARM 67 67 bool "ARM EABI stack unwinder" 68 68 depends on AEABI && !FUNCTION_GRAPH_TRACER 69 + # https://github.com/ClangBuiltLinux/linux/issues/732 70 + depends on !LD_IS_LLD || LLD_VERSION >= 110000 69 71 select ARM_UNWIND 70 72 help 71 73 This option enables stack unwinding support in the kernel
+1 -1
arch/arm/boot/compressed/Makefile
··· 100 100 lib1funcs.o ashldi3.o bswapsdi2.o \ 101 101 head.o $(OBJS) 102 102 103 - clean-files += piggy_data lib1funcs.S ashldi3.S bswapsdi2.S hyp-stub.S 103 + clean-files += lib1funcs.S ashldi3.S bswapsdi2.S hyp-stub.S 104 104 105 105 KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING 106 106
+1 -1
arch/arm/boot/compressed/atags_to_fdt.c
··· 121 121 /* 122 122 * Convert and fold provided ATAGs into the provided FDT. 123 123 * 124 - * REturn values: 124 + * Return values: 125 125 * = 0 -> pretend success 126 126 * = 1 -> bad ATAG (may retry with another possible ATAG pointer) 127 127 * < 0 -> error from libfdt
+3
arch/arm/include/asm/ftrace.h
··· 15 15 16 16 #ifdef CONFIG_DYNAMIC_FTRACE 17 17 struct dyn_arch_ftrace { 18 + #ifdef CONFIG_ARM_MODULE_PLTS 19 + struct module *mod; 20 + #endif 18 21 }; 19 22 20 23 static inline unsigned long ftrace_call_adjust(unsigned long addr)
+4 -4
arch/arm/include/asm/insn.h
··· 13 13 } 14 14 15 15 unsigned long 16 - __arm_gen_branch(unsigned long pc, unsigned long addr, bool link); 16 + __arm_gen_branch(unsigned long pc, unsigned long addr, bool link, bool warn); 17 17 18 18 static inline unsigned long 19 19 arm_gen_branch(unsigned long pc, unsigned long addr) 20 20 { 21 - return __arm_gen_branch(pc, addr, false); 21 + return __arm_gen_branch(pc, addr, false, true); 22 22 } 23 23 24 24 static inline unsigned long 25 - arm_gen_branch_link(unsigned long pc, unsigned long addr) 25 + arm_gen_branch_link(unsigned long pc, unsigned long addr, bool warn) 26 26 { 27 - return __arm_gen_branch(pc, addr, true); 27 + return __arm_gen_branch(pc, addr, true, warn); 28 28 } 29 29 30 30 #endif
+14 -1
arch/arm/include/asm/memory.h
··· 20 20 #endif 21 21 #include <asm/kasan_def.h> 22 22 23 - /* PAGE_OFFSET - the virtual address of the start of the kernel image */ 23 + /* 24 + * PAGE_OFFSET: the virtual address of the start of lowmem, memory above 25 + * the virtual address range for userspace. 26 + * KERNEL_OFFSET: the virtual address of the start of the kernel image. 27 + * we may further offset this with TEXT_OFFSET in practice. 28 + */ 24 29 #define PAGE_OFFSET UL(CONFIG_PAGE_OFFSET) 30 + #define KERNEL_OFFSET (PAGE_OFFSET) 25 31 26 32 #ifdef CONFIG_MMU 27 33 ··· 157 151 #define PLAT_PHYS_OFFSET UL(CONFIG_PHYS_OFFSET) 158 152 159 153 #ifndef __ASSEMBLY__ 154 + 155 + /* 156 + * Physical start and end address of the kernel sections. These addresses are 157 + * 2MB-aligned to match the section mappings placed over the kernel. 158 + */ 159 + extern u32 kernel_sec_start; 160 + extern u32 kernel_sec_end; 160 161 161 162 /* 162 163 * Physical vs virtual RAM address space conversion. These are
+10
arch/arm/include/asm/module.h
··· 19 19 }; 20 20 #endif 21 21 22 + #define PLT_ENT_STRIDE L1_CACHE_BYTES 23 + #define PLT_ENT_COUNT (PLT_ENT_STRIDE / sizeof(u32)) 24 + #define PLT_ENT_SIZE (sizeof(struct plt_entries) / PLT_ENT_COUNT) 25 + 26 + struct plt_entries { 27 + u32 ldr[PLT_ENT_COUNT]; 28 + u32 lit[PLT_ENT_COUNT]; 29 + }; 30 + 22 31 struct mod_plt_sec { 23 32 struct elf32_shdr *plt; 33 + struct plt_entries *plt_ent; 24 34 int plt_count; 25 35 }; 26 36
+1 -1
arch/arm/include/asm/pgtable.h
··· 306 306 #define __swp_entry(type,offset) ((swp_entry_t) { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) }) 307 307 308 308 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) 309 - #define __swp_entry_to_pte(swp) ((pte_t) { (swp).val }) 309 + #define __swp_entry_to_pte(swp) __pte((swp).val | PTE_TYPE_FAULT) 310 310 311 311 /* 312 312 * It is an error for the kernel to have more swap files than we can
-1
arch/arm/include/asm/system_misc.h
··· 13 13 extern void cpu_init(void); 14 14 15 15 void soft_restart(unsigned long); 16 - extern void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd); 17 16 extern void (*arm_pm_idle)(void); 18 17 19 18 #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
+38 -8
arch/arm/kernel/ftrace.c
··· 68 68 return 0; 69 69 } 70 70 71 - static unsigned long ftrace_call_replace(unsigned long pc, unsigned long addr) 71 + static unsigned long ftrace_call_replace(unsigned long pc, unsigned long addr, 72 + bool warn) 72 73 { 73 - return arm_gen_branch_link(pc, addr); 74 + return arm_gen_branch_link(pc, addr, warn); 74 75 } 75 76 76 77 static int ftrace_modify_code(unsigned long pc, unsigned long old, ··· 105 104 int ret; 106 105 107 106 pc = (unsigned long)&ftrace_call; 108 - new = ftrace_call_replace(pc, (unsigned long)func); 107 + new = ftrace_call_replace(pc, (unsigned long)func, true); 109 108 110 109 ret = ftrace_modify_code(pc, 0, new, false); 111 110 112 111 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS 113 112 if (!ret) { 114 113 pc = (unsigned long)&ftrace_regs_call; 115 - new = ftrace_call_replace(pc, (unsigned long)func); 114 + new = ftrace_call_replace(pc, (unsigned long)func, true); 116 115 117 116 ret = ftrace_modify_code(pc, 0, new, false); 118 117 } ··· 125 124 { 126 125 unsigned long new, old; 127 126 unsigned long ip = rec->ip; 127 + unsigned long aaddr = adjust_address(rec, addr); 128 + struct module *mod = NULL; 129 + 130 + #ifdef CONFIG_ARM_MODULE_PLTS 131 + mod = rec->arch.mod; 132 + #endif 128 133 129 134 old = ftrace_nop_replace(rec); 130 135 131 - new = ftrace_call_replace(ip, adjust_address(rec, addr)); 136 + new = ftrace_call_replace(ip, aaddr, !mod); 137 + #ifdef CONFIG_ARM_MODULE_PLTS 138 + if (!new && mod) { 139 + aaddr = get_module_plt(mod, ip, aaddr); 140 + new = ftrace_call_replace(ip, aaddr, true); 141 + } 142 + #endif 132 143 133 144 return ftrace_modify_code(rec->ip, old, new, true); 134 145 } ··· 153 140 unsigned long new, old; 154 141 unsigned long ip = rec->ip; 155 142 156 - old = ftrace_call_replace(ip, adjust_address(rec, old_addr)); 143 + old = ftrace_call_replace(ip, adjust_address(rec, old_addr), true); 157 144 158 - new = ftrace_call_replace(ip, adjust_address(rec, addr)); 145 + new = ftrace_call_replace(ip, adjust_address(rec, addr), true); 159 146 160 147 return ftrace_modify_code(rec->ip, old, new, true); 161 148 } ··· 165 152 int ftrace_make_nop(struct module *mod, 166 153 struct dyn_ftrace *rec, unsigned long addr) 167 154 { 155 + unsigned long aaddr = adjust_address(rec, addr); 168 156 unsigned long ip = rec->ip; 169 157 unsigned long old; 170 158 unsigned long new; 171 159 int ret; 172 160 173 - old = ftrace_call_replace(ip, adjust_address(rec, addr)); 161 + #ifdef CONFIG_ARM_MODULE_PLTS 162 + /* mod is only supplied during module loading */ 163 + if (!mod) 164 + mod = rec->arch.mod; 165 + else 166 + rec->arch.mod = mod; 167 + #endif 168 + 169 + old = ftrace_call_replace(ip, aaddr, 170 + !IS_ENABLED(CONFIG_ARM_MODULE_PLTS) || !mod); 171 + #ifdef CONFIG_ARM_MODULE_PLTS 172 + if (!old && mod) { 173 + aaddr = get_module_plt(mod, ip, aaddr); 174 + old = ftrace_call_replace(ip, aaddr, true); 175 + } 176 + #endif 177 + 174 178 new = ftrace_nop_replace(rec); 175 179 ret = ftrace_modify_code(ip, old, new, true); 176 180
+25 -5
arch/arm/kernel/head.S
··· 23 23 #if defined(CONFIG_DEBUG_LL) && !defined(CONFIG_DEBUG_SEMIHOSTING) 24 24 #include CONFIG_DEBUG_LL_INCLUDE 25 25 #endif 26 - 27 26 /* 28 27 * swapper_pg_dir is the virtual address of the initial page table. 29 28 * We place the page tables 16K below KERNEL_RAM_VADDR. Therefore, we must ··· 30 31 * the least significant 16 bits to be 0x8000, but we could probably 31 32 * relax this restriction to KERNEL_RAM_VADDR >= PAGE_OFFSET + 0x4000. 32 33 */ 33 - #define KERNEL_RAM_VADDR (PAGE_OFFSET + TEXT_OFFSET) 34 + #define KERNEL_RAM_VADDR (KERNEL_OFFSET + TEXT_OFFSET) 34 35 #if (KERNEL_RAM_VADDR & 0xffff) != 0x8000 35 36 #error KERNEL_RAM_VADDR must start at 0xXXXX8000 36 37 #endif ··· 46 47 47 48 .globl swapper_pg_dir 48 49 .equ swapper_pg_dir, KERNEL_RAM_VADDR - PG_DIR_SIZE 50 + 51 + /* 52 + * This needs to be assigned at runtime when the linker symbols are 53 + * resolved. 54 + */ 55 + .pushsection .data 56 + .align 2 57 + .globl kernel_sec_start 58 + .globl kernel_sec_end 59 + kernel_sec_start: 60 + .long 0 61 + kernel_sec_end: 62 + .long 0 63 + .popsection 49 64 50 65 .macro pgtbl, rd, phys 51 66 add \rd, \phys, #TEXT_OFFSET ··· 243 230 blo 1b 244 231 245 232 /* 246 - * Map our RAM from the start to the end of the kernel .bss section. 233 + * The main matter: map in the kernel using section mappings, and 234 + * set two variables to indicate the physical start and end of the 235 + * kernel. 247 236 */ 248 - add r0, r4, #PAGE_OFFSET >> (SECTION_SHIFT - PMD_ORDER) 237 + add r0, r4, #KERNEL_OFFSET >> (SECTION_SHIFT - PMD_ORDER) 249 238 ldr r6, =(_end - 1) 250 - orr r3, r8, r7 239 + adr_l r5, kernel_sec_start @ _pa(kernel_sec_start) 240 + str r8, [r5] @ Save physical start of kernel 241 + orr r3, r8, r7 @ Add the MMU flags 251 242 add r6, r4, r6, lsr #(SECTION_SHIFT - PMD_ORDER) 252 243 1: str r3, [r0], #1 << PMD_ORDER 253 244 add r3, r3, #1 << SECTION_SHIFT 254 245 cmp r0, r6 255 246 bls 1b 247 + eor r3, r3, r7 @ Remove the MMU flags 248 + adr_l r5, kernel_sec_end @ _pa(kernel_sec_end) 249 + str r3, [r5] @ Save physical end of kernel 256 250 257 251 #ifdef CONFIG_XIP_KERNEL 258 252 /*
+10 -9
arch/arm/kernel/insn.c
··· 3 3 #include <linux/kernel.h> 4 4 #include <asm/opcodes.h> 5 5 6 - static unsigned long 7 - __arm_gen_branch_thumb2(unsigned long pc, unsigned long addr, bool link) 6 + static unsigned long __arm_gen_branch_thumb2(unsigned long pc, 7 + unsigned long addr, bool link, 8 + bool warn) 8 9 { 9 10 unsigned long s, j1, j2, i1, i2, imm10, imm11; 10 11 unsigned long first, second; ··· 13 12 14 13 offset = (long)addr - (long)(pc + 4); 15 14 if (offset < -16777216 || offset > 16777214) { 16 - WARN_ON_ONCE(1); 15 + WARN_ON_ONCE(warn); 17 16 return 0; 18 17 } 19 18 ··· 34 33 return __opcode_thumb32_compose(first, second); 35 34 } 36 35 37 - static unsigned long 38 - __arm_gen_branch_arm(unsigned long pc, unsigned long addr, bool link) 36 + static unsigned long __arm_gen_branch_arm(unsigned long pc, unsigned long addr, 37 + bool link, bool warn) 39 38 { 40 39 unsigned long opcode = 0xea000000; 41 40 long offset; ··· 45 44 46 45 offset = (long)addr - (long)(pc + 8); 47 46 if (unlikely(offset < -33554432 || offset > 33554428)) { 48 - WARN_ON_ONCE(1); 47 + WARN_ON_ONCE(warn); 49 48 return 0; 50 49 } 51 50 ··· 55 54 } 56 55 57 56 unsigned long 58 - __arm_gen_branch(unsigned long pc, unsigned long addr, bool link) 57 + __arm_gen_branch(unsigned long pc, unsigned long addr, bool link, bool warn) 59 58 { 60 59 if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) 61 - return __arm_gen_branch_thumb2(pc, addr, link); 60 + return __arm_gen_branch_thumb2(pc, addr, link, warn); 62 61 else 63 - return __arm_gen_branch_arm(pc, addr, link); 62 + return __arm_gen_branch_arm(pc, addr, link, warn); 64 63 }
+38 -11
arch/arm/kernel/module-plts.c
··· 4 4 */ 5 5 6 6 #include <linux/elf.h> 7 + #include <linux/ftrace.h> 7 8 #include <linux/kernel.h> 8 9 #include <linux/module.h> 9 10 #include <linux/sort.h> ··· 12 11 13 12 #include <asm/cache.h> 14 13 #include <asm/opcodes.h> 15 - 16 - #define PLT_ENT_STRIDE L1_CACHE_BYTES 17 - #define PLT_ENT_COUNT (PLT_ENT_STRIDE / sizeof(u32)) 18 - #define PLT_ENT_SIZE (sizeof(struct plt_entries) / PLT_ENT_COUNT) 19 14 20 15 #ifdef CONFIG_THUMB2_KERNEL 21 16 #define PLT_ENT_LDR __opcode_to_mem_thumb32(0xf8dff000 | \ ··· 21 24 (PLT_ENT_STRIDE - 8)) 22 25 #endif 23 26 24 - struct plt_entries { 25 - u32 ldr[PLT_ENT_COUNT]; 26 - u32 lit[PLT_ENT_COUNT]; 27 + static const u32 fixed_plts[] = { 28 + #ifdef CONFIG_DYNAMIC_FTRACE 29 + FTRACE_ADDR, 30 + MCOUNT_ADDR, 31 + #endif 27 32 }; 28 33 29 34 static bool in_init(const struct module *mod, unsigned long loc) ··· 33 34 return loc - (u32)mod->init_layout.base < mod->init_layout.size; 34 35 } 35 36 37 + static void prealloc_fixed(struct mod_plt_sec *pltsec, struct plt_entries *plt) 38 + { 39 + int i; 40 + 41 + if (!ARRAY_SIZE(fixed_plts) || pltsec->plt_count) 42 + return; 43 + pltsec->plt_count = ARRAY_SIZE(fixed_plts); 44 + 45 + for (i = 0; i < ARRAY_SIZE(plt->ldr); ++i) 46 + plt->ldr[i] = PLT_ENT_LDR; 47 + 48 + BUILD_BUG_ON(sizeof(fixed_plts) > sizeof(plt->lit)); 49 + memcpy(plt->lit, fixed_plts, sizeof(fixed_plts)); 50 + } 51 + 36 52 u32 get_module_plt(struct module *mod, unsigned long loc, Elf32_Addr val) 37 53 { 38 54 struct mod_plt_sec *pltsec = !in_init(mod, loc) ? &mod->arch.core : 39 55 &mod->arch.init; 56 + struct plt_entries *plt; 57 + int idx; 40 58 41 - struct plt_entries *plt = (struct plt_entries *)pltsec->plt->sh_addr; 42 - int idx = 0; 59 + /* cache the address, ELF header is available only during module load */ 60 + if (!pltsec->plt_ent) 61 + pltsec->plt_ent = (struct plt_entries *)pltsec->plt->sh_addr; 62 + plt = pltsec->plt_ent; 43 63 64 + prealloc_fixed(pltsec, plt); 65 + 66 + for (idx = 0; idx < ARRAY_SIZE(fixed_plts); ++idx) 67 + if (plt->lit[idx] == val) 68 + return (u32)&plt->ldr[idx]; 69 + 70 + idx = 0; 44 71 /* 45 72 * Look for an existing entry pointing to 'val'. Given that the 46 73 * relocations are sorted, this will be the last entry we allocated. ··· 214 189 int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, 215 190 char *secstrings, struct module *mod) 216 191 { 217 - unsigned long core_plts = 0; 218 - unsigned long init_plts = 0; 192 + unsigned long core_plts = ARRAY_SIZE(fixed_plts); 193 + unsigned long init_plts = ARRAY_SIZE(fixed_plts); 219 194 Elf32_Shdr *s, *sechdrs_end = sechdrs + ehdr->e_shnum; 220 195 Elf32_Sym *syms = NULL; 221 196 ··· 270 245 mod->arch.core.plt->sh_size = round_up(core_plts * PLT_ENT_SIZE, 271 246 sizeof(struct plt_entries)); 272 247 mod->arch.core.plt_count = 0; 248 + mod->arch.core.plt_ent = NULL; 273 249 274 250 mod->arch.init.plt->sh_type = SHT_NOBITS; 275 251 mod->arch.init.plt->sh_flags = SHF_EXECINSTR | SHF_ALLOC; ··· 278 252 mod->arch.init.plt->sh_size = round_up(init_plts * PLT_ENT_SIZE, 279 253 sizeof(struct plt_entries)); 280 254 mod->arch.init.plt_count = 0; 255 + mod->arch.init.plt_ent = NULL; 281 256 282 257 pr_debug("%s: plt=%x, init.plt=%x\n", __func__, 283 258 mod->arch.core.plt->sh_size, mod->arch.init.plt->sh_size);
+1 -5
arch/arm/kernel/reboot.c
··· 18 18 /* 19 19 * Function pointers to optional machine specific functions 20 20 */ 21 - void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd); 22 21 void (*pm_power_off)(void); 23 22 EXPORT_SYMBOL(pm_power_off); 24 23 ··· 137 138 local_irq_disable(); 138 139 smp_send_stop(); 139 140 140 - if (arm_pm_restart) 141 - arm_pm_restart(reboot_mode, cmd); 142 - else 143 - do_kernel_restart(cmd); 141 + do_kernel_restart(cmd); 144 142 145 143 /* Give a grace period for failure to restart of 1s */ 146 144 mdelay(1000);
+18 -2
arch/arm/kernel/setup.c
··· 1083 1083 #endif 1084 1084 } 1085 1085 1086 + static void (*__arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd); 1087 + 1088 + static int arm_restart(struct notifier_block *nb, unsigned long action, 1089 + void *data) 1090 + { 1091 + __arm_pm_restart(action, data); 1092 + return NOTIFY_DONE; 1093 + } 1094 + 1095 + static struct notifier_block arm_restart_nb = { 1096 + .notifier_call = arm_restart, 1097 + .priority = 128, 1098 + }; 1099 + 1086 1100 void __init setup_arch(char **cmdline_p) 1087 1101 { 1088 1102 const struct machine_desc *mdesc = NULL; ··· 1165 1151 kasan_init(); 1166 1152 request_standard_resources(mdesc); 1167 1153 1168 - if (mdesc->restart) 1169 - arm_pm_restart = mdesc->restart; 1154 + if (mdesc->restart) { 1155 + __arm_pm_restart = mdesc->restart; 1156 + register_restart_handler(&arm_restart_nb); 1157 + } 1170 1158 1171 1159 unflatten_device_tree(); 1172 1160
+1 -1
arch/arm/kernel/vmlinux.lds.S
··· 47 47 #endif 48 48 } 49 49 50 - . = PAGE_OFFSET + TEXT_OFFSET; 50 + . = KERNEL_OFFSET + TEXT_OFFSET; 51 51 .head.text : { 52 52 _text = .; 53 53 HEAD_TEXT
-2
arch/arm/mm/Kconfig
··· 601 601 config CPU_TLB_V7 602 602 bool 603 603 604 - config VERIFY_PERMISSION_FAULT 605 - bool 606 604 endif 607 605 608 606 config CPU_HAS_ASID
-26
arch/arm/mm/abort-ev7.S
··· 17 17 mrc p15, 0, r1, c5, c0, 0 @ get FSR 18 18 mrc p15, 0, r0, c6, c0, 0 @ get FAR 19 19 uaccess_disable ip @ disable userspace access 20 - 21 - /* 22 - * V6 code adjusts the returned DFSR. 23 - * New designs should not need to patch up faults. 24 - */ 25 - 26 - #if defined(CONFIG_VERIFY_PERMISSION_FAULT) 27 - /* 28 - * Detect erroneous permission failures and fix 29 - */ 30 - ldr r3, =0x40d @ On permission fault 31 - and r3, r1, r3 32 - cmp r3, #0x0d 33 - bne do_DataAbort 34 - 35 - mcr p15, 0, r0, c7, c8, 0 @ Retranslate FAR 36 - isb 37 - mrc p15, 0, ip, c7, c4, 0 @ Read the PAR 38 - and r3, ip, #0x7b @ On translation fault 39 - cmp r3, #0x0b 40 - bne do_DataAbort 41 - bic r1, r1, #0xf @ Fix up FSR FS[5:0] 42 - and ip, ip, #0x7e 43 - orr r1, r1, ip, LSR #1 44 - #endif 45 - 46 20 b do_DataAbort 47 21 ENDPROC(v7_early_abort)
+123 -43
arch/arm/mm/mmu.c
··· 1121 1121 } 1122 1122 #endif 1123 1123 1124 - static void * __initdata vmalloc_min = 1125 - (void *)(VMALLOC_END - (240 << 20) - VMALLOC_OFFSET); 1124 + static unsigned long __initdata vmalloc_size = 240 * SZ_1M; 1126 1125 1127 1126 /* 1128 1127 * vmalloc=size forces the vmalloc area to be exactly 'size' 1129 1128 * bytes. This can be used to increase (or decrease) the vmalloc 1130 - * area - the default is 240m. 1129 + * area - the default is 240MiB. 1131 1130 */ 1132 1131 static int __init early_vmalloc(char *arg) 1133 1132 { 1134 1133 unsigned long vmalloc_reserve = memparse(arg, NULL); 1134 + unsigned long vmalloc_max; 1135 1135 1136 1136 if (vmalloc_reserve < SZ_16M) { 1137 1137 vmalloc_reserve = SZ_16M; 1138 - pr_warn("vmalloc area too small, limiting to %luMB\n", 1138 + pr_warn("vmalloc area is too small, limiting to %luMiB\n", 1139 1139 vmalloc_reserve >> 20); 1140 1140 } 1141 1141 1142 - if (vmalloc_reserve > VMALLOC_END - (PAGE_OFFSET + SZ_32M)) { 1143 - vmalloc_reserve = VMALLOC_END - (PAGE_OFFSET + SZ_32M); 1144 - pr_warn("vmalloc area is too big, limiting to %luMB\n", 1142 + vmalloc_max = VMALLOC_END - (PAGE_OFFSET + SZ_32M + VMALLOC_OFFSET); 1143 + if (vmalloc_reserve > vmalloc_max) { 1144 + vmalloc_reserve = vmalloc_max; 1145 + pr_warn("vmalloc area is too big, limiting to %luMiB\n", 1145 1146 vmalloc_reserve >> 20); 1146 1147 } 1147 1148 1148 - vmalloc_min = (void *)(VMALLOC_END - vmalloc_reserve); 1149 + vmalloc_size = vmalloc_reserve; 1149 1150 return 0; 1150 1151 } 1151 1152 early_param("vmalloc", early_vmalloc); ··· 1166 1165 * and may itself be outside the valid range for which phys_addr_t 1167 1166 * and therefore __pa() is defined. 1168 1167 */ 1169 - vmalloc_limit = (u64)(uintptr_t)vmalloc_min - PAGE_OFFSET + PHYS_OFFSET; 1168 + vmalloc_limit = (u64)VMALLOC_END - vmalloc_size - VMALLOC_OFFSET - 1169 + PAGE_OFFSET + PHYS_OFFSET; 1170 1170 1171 1171 /* 1172 1172 * The first usable region must be PMD aligned. Mark its start ··· 1248 1246 memblock_set_current_limit(memblock_limit); 1249 1247 } 1250 1248 1251 - static inline void prepare_page_table(void) 1249 + static __init void prepare_page_table(void) 1252 1250 { 1253 1251 unsigned long addr; 1254 1252 phys_addr_t end; ··· 1459 1457 1460 1458 static void __init map_lowmem(void) 1461 1459 { 1462 - phys_addr_t kernel_x_start = round_down(__pa(KERNEL_START), SECTION_SIZE); 1463 - phys_addr_t kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE); 1464 1460 phys_addr_t start, end; 1465 1461 u64 i; 1466 1462 ··· 1466 1466 for_each_mem_range(i, &start, &end) { 1467 1467 struct map_desc map; 1468 1468 1469 + pr_debug("map lowmem start: 0x%08llx, end: 0x%08llx\n", 1470 + (long long)start, (long long)end); 1469 1471 if (end > arm_lowmem_limit) 1470 1472 end = arm_lowmem_limit; 1471 1473 if (start >= end) 1472 1474 break; 1473 1475 1474 - if (end < kernel_x_start) { 1475 - map.pfn = __phys_to_pfn(start); 1476 - map.virtual = __phys_to_virt(start); 1477 - map.length = end - start; 1478 - map.type = MT_MEMORY_RWX; 1476 + /* 1477 + * If our kernel image is in the VMALLOC area we need to remove 1478 + * the kernel physical memory from lowmem since the kernel will 1479 + * be mapped separately. 1480 + * 1481 + * The kernel will typically be at the very start of lowmem, 1482 + * but any placement relative to memory ranges is possible. 1483 + * 1484 + * If the memblock contains the kernel, we have to chisel out 1485 + * the kernel memory from it and map each part separately. We 1486 + * get 6 different theoretical cases: 1487 + * 1488 + * +--------+ +--------+ 1489 + * +-- start --+ +--------+ | Kernel | | Kernel | 1490 + * | | | Kernel | | case 2 | | case 5 | 1491 + * | | | case 1 | +--------+ | | +--------+ 1492 + * | Memory | +--------+ | | | Kernel | 1493 + * | range | +--------+ | | | case 6 | 1494 + * | | | Kernel | +--------+ | | +--------+ 1495 + * | | | case 3 | | Kernel | | | 1496 + * +-- end ----+ +--------+ | case 4 | | | 1497 + * +--------+ +--------+ 1498 + */ 1479 1499 1480 - create_mapping(&map); 1481 - } else if (start >= kernel_x_end) { 1482 - map.pfn = __phys_to_pfn(start); 1483 - map.virtual = __phys_to_virt(start); 1484 - map.length = end - start; 1485 - map.type = MT_MEMORY_RW; 1500 + /* Case 5: kernel covers range, don't map anything, should be rare */ 1501 + if ((start > kernel_sec_start) && (end < kernel_sec_end)) 1502 + break; 1486 1503 1487 - create_mapping(&map); 1488 - } else { 1489 - /* This better cover the entire kernel */ 1490 - if (start < kernel_x_start) { 1504 + /* Cases where the kernel is starting inside the range */ 1505 + if ((kernel_sec_start >= start) && (kernel_sec_start <= end)) { 1506 + /* Case 6: kernel is embedded in the range, we need two mappings */ 1507 + if ((start < kernel_sec_start) && (end > kernel_sec_end)) { 1508 + /* Map memory below the kernel */ 1491 1509 map.pfn = __phys_to_pfn(start); 1492 1510 map.virtual = __phys_to_virt(start); 1493 - map.length = kernel_x_start - start; 1511 + map.length = kernel_sec_start - start; 1494 1512 map.type = MT_MEMORY_RW; 1495 - 1496 1513 create_mapping(&map); 1497 - } 1498 - 1499 - map.pfn = __phys_to_pfn(kernel_x_start); 1500 - map.virtual = __phys_to_virt(kernel_x_start); 1501 - map.length = kernel_x_end - kernel_x_start; 1502 - map.type = MT_MEMORY_RWX; 1503 - 1504 - create_mapping(&map); 1505 - 1506 - if (kernel_x_end < end) { 1507 - map.pfn = __phys_to_pfn(kernel_x_end); 1508 - map.virtual = __phys_to_virt(kernel_x_end); 1509 - map.length = end - kernel_x_end; 1514 + /* Map memory above the kernel */ 1515 + map.pfn = __phys_to_pfn(kernel_sec_end); 1516 + map.virtual = __phys_to_virt(kernel_sec_end); 1517 + map.length = end - kernel_sec_end; 1510 1518 map.type = MT_MEMORY_RW; 1511 - 1512 1519 create_mapping(&map); 1520 + break; 1513 1521 } 1522 + /* Case 1: kernel and range start at the same address, should be common */ 1523 + if (kernel_sec_start == start) 1524 + start = kernel_sec_end; 1525 + /* Case 3: kernel and range end at the same address, should be rare */ 1526 + if (kernel_sec_end == end) 1527 + end = kernel_sec_start; 1528 + } else if ((kernel_sec_start < start) && (kernel_sec_end > start) && (kernel_sec_end < end)) { 1529 + /* Case 2: kernel ends inside range, starts below it */ 1530 + start = kernel_sec_end; 1531 + } else if ((kernel_sec_start > start) && (kernel_sec_start < end) && (kernel_sec_end > end)) { 1532 + /* Case 4: kernel starts inside range, ends above it */ 1533 + end = kernel_sec_start; 1514 1534 } 1535 + map.pfn = __phys_to_pfn(start); 1536 + map.virtual = __phys_to_virt(start); 1537 + map.length = end - start; 1538 + map.type = MT_MEMORY_RW; 1539 + create_mapping(&map); 1515 1540 } 1541 + } 1542 + 1543 + static void __init map_kernel(void) 1544 + { 1545 + /* 1546 + * We use the well known kernel section start and end and split the area in the 1547 + * middle like this: 1548 + * . . 1549 + * | RW memory | 1550 + * +----------------+ kernel_x_start 1551 + * | Executable | 1552 + * | kernel memory | 1553 + * +----------------+ kernel_x_end / kernel_nx_start 1554 + * | Non-executable | 1555 + * | kernel memory | 1556 + * +----------------+ kernel_nx_end 1557 + * | RW memory | 1558 + * . . 1559 + * 1560 + * Notice that we are dealing with section sized mappings here so all of this 1561 + * will be bumped to the closest section boundary. This means that some of the 1562 + * non-executable part of the kernel memory is actually mapped as executable. 1563 + * This will only persist until we turn on proper memory management later on 1564 + * and we remap the whole kernel with page granularity. 1565 + */ 1566 + phys_addr_t kernel_x_start = kernel_sec_start; 1567 + phys_addr_t kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE); 1568 + phys_addr_t kernel_nx_start = kernel_x_end; 1569 + phys_addr_t kernel_nx_end = kernel_sec_end; 1570 + struct map_desc map; 1571 + 1572 + map.pfn = __phys_to_pfn(kernel_x_start); 1573 + map.virtual = __phys_to_virt(kernel_x_start); 1574 + map.length = kernel_x_end - kernel_x_start; 1575 + map.type = MT_MEMORY_RWX; 1576 + create_mapping(&map); 1577 + 1578 + /* If the nx part is small it may end up covered by the tail of the RWX section */ 1579 + if (kernel_x_end == kernel_nx_end) 1580 + return; 1581 + 1582 + map.pfn = __phys_to_pfn(kernel_nx_start); 1583 + map.virtual = __phys_to_virt(kernel_nx_start); 1584 + map.length = kernel_nx_end - kernel_nx_start; 1585 + map.type = MT_MEMORY_RW; 1586 + create_mapping(&map); 1516 1587 } 1517 1588 1518 1589 #ifdef CONFIG_ARM_PV_FIXUP ··· 1716 1645 { 1717 1646 void *zero_page; 1718 1647 1648 + pr_debug("physical kernel sections: 0x%08x-0x%08x\n", 1649 + kernel_sec_start, kernel_sec_end); 1650 + 1719 1651 prepare_page_table(); 1720 1652 map_lowmem(); 1721 1653 memblock_set_current_limit(arm_lowmem_limit); 1654 + pr_debug("lowmem limit is %08llx\n", (long long)arm_lowmem_limit); 1655 + /* 1656 + * After this point early_alloc(), i.e. the memblock allocator, can 1657 + * be used 1658 + */ 1659 + map_kernel(); 1722 1660 dma_contiguous_remap(); 1723 1661 early_fixmap_shutdown(); 1724 1662 devicemaps_init(mdesc);
+5 -5
arch/arm/probes/kprobes/test-thumb.c
··· 441 441 "3: mvn r0, r0 \n\t" 442 442 "2: nop \n\t") 443 443 444 - TEST_RX("tbh [pc, r",7, (9f-(1f+4))>>1,"]", 444 + TEST_RX("tbh [pc, r",7, (9f-(1f+4))>>1,", lsl #1]", 445 445 "9: \n\t" 446 446 ".short (2f-1b-4)>>1 \n\t" 447 447 ".short (3f-1b-4)>>1 \n\t" 448 448 "3: mvn r0, r0 \n\t" 449 449 "2: nop \n\t") 450 450 451 - TEST_RX("tbh [pc, r",12, ((9f-(1f+4))>>1)+1,"]", 451 + TEST_RX("tbh [pc, r",12, ((9f-(1f+4))>>1)+1,", lsl #1]", 452 452 "9: \n\t" 453 453 ".short (2f-1b-4)>>1 \n\t" 454 454 ".short (3f-1b-4)>>1 \n\t" 455 455 "3: mvn r0, r0 \n\t" 456 456 "2: nop \n\t") 457 457 458 - TEST_RRX("tbh [r",1,9f, ", r",14,1,"]", 458 + TEST_RRX("tbh [r",1,9f, ", r",14,1,", lsl #1]", 459 459 "9: \n\t" 460 460 ".short (2f-1b-4)>>1 \n\t" 461 461 ".short (3f-1b-4)>>1 \n\t" ··· 468 468 469 469 TEST_UNSUPPORTED("strexb r0, r1, [r2]") 470 470 TEST_UNSUPPORTED("strexh r0, r1, [r2]") 471 - TEST_UNSUPPORTED("strexd r0, r1, [r2]") 471 + TEST_UNSUPPORTED("strexd r0, r1, r2, [r2]") 472 472 TEST_UNSUPPORTED("ldrexb r0, [r1]") 473 473 TEST_UNSUPPORTED("ldrexh r0, [r1]") 474 - TEST_UNSUPPORTED("ldrexd r0, [r1]") 474 + TEST_UNSUPPORTED("ldrexd r0, r1, [r1]") 475 475 476 476 TEST_GROUP("Data-processing (shifted register) and (modified immediate)") 477 477
+6 -19
arch/arm/tools/Makefile
··· 33 33 $(shell [ -d '$(uapi)' ] || mkdir -p '$(uapi)') 34 34 35 35 quiet_cmd_gen_mach = GEN $@ 36 - cmd_gen_mach = mkdir -p $(dir $@) && \ 37 - $(AWK) -f $(filter-out $(PHONY),$^) > $@ 36 + cmd_gen_mach = $(AWK) -f $(real-prereqs) > $@ 38 37 39 38 $(kapi)/mach-types.h: $(src)/gen-mach-types $(src)/mach-types FORCE 40 39 $(call if_changed,gen_mach) 41 40 42 41 quiet_cmd_syshdr = SYSHDR $@ 43 - cmd_syshdr = $(CONFIG_SHELL) $(syshdr) --abis $(abis) \ 42 + cmd_syshdr = $(CONFIG_SHELL) $(syshdr) --abis common,$* \ 44 43 --offset __NR_SYSCALL_BASE $< $@ 45 44 46 45 quiet_cmd_systbl = SYSTBL $@ 47 - cmd_systbl = $(CONFIG_SHELL) $(systbl) --abis $(abis) $< $@ 46 + cmd_systbl = $(CONFIG_SHELL) $(systbl) --abis common,$* $< $@ 48 47 49 48 quiet_cmd_sysnr = SYSNR $@ 50 - cmd_sysnr = $(CONFIG_SHELL) '$(sysnr)' '$<' '$@' \ 51 - '$(syshdr_abi_$(basetarget))' 49 + cmd_sysnr = $(CONFIG_SHELL) $(sysnr) $< $@ 52 50 53 - $(uapi)/unistd-oabi.h: abis := common,oabi 54 - $(uapi)/unistd-oabi.h: $(syscall) $(syshdr) FORCE 51 + $(uapi)/unistd-%.h: $(syscall) $(syshdr) FORCE 55 52 $(call if_changed,syshdr) 56 53 57 - $(uapi)/unistd-eabi.h: abis := common,eabi 58 - $(uapi)/unistd-eabi.h: $(syscall) $(syshdr) FORCE 59 - $(call if_changed,syshdr) 60 - 61 - sysnr_abi_unistd-nr := common,oabi,eabi,compat 62 54 $(kapi)/unistd-nr.h: $(syscall) $(sysnr) FORCE 63 55 $(call if_changed,sysnr) 64 56 65 - $(gen)/calls-oabi.S: abis := common,oabi 66 - $(gen)/calls-oabi.S: $(syscall) $(systbl) FORCE 67 - $(call if_changed,systbl) 68 - 69 - $(gen)/calls-eabi.S: abis := common,eabi 70 - $(gen)/calls-eabi.S: $(syscall) $(systbl) FORCE 57 + $(gen)/calls-%.S: $(syscall) $(systbl) FORCE 71 58 $(call if_changed,systbl)
+1 -2
arch/arm/tools/syscallnr.sh
··· 2 2 # SPDX-License-Identifier: GPL-2.0 3 3 in="$1" 4 4 out="$2" 5 - my_abis=`echo "($3)" | tr ',' '|'` 6 5 align=1 7 6 8 7 fileguard=_ASM_ARM_`basename "$out" | sed \ 9 8 -e 'y/abcdefghijklmnopqrstuvwxyz/ABCDEFGHIJKLMNOPQRSTUVWXYZ/' \ 10 9 -e 's/[^A-Z0-9_]/_/g' -e 's/__/_/g'` 11 10 12 - grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | tail -n1 | ( 11 + grep -E "^[0-9A-Fa-fXx]+[[:space:]]+" "$in" | sort -n | tail -n1 | ( 13 12 echo "#ifndef ${fileguard} 14 13 #define ${fileguard} 1 15 14
+10 -2
arch/arm/xen/enlighten.c
··· 29 29 #include <linux/cpu.h> 30 30 #include <linux/console.h> 31 31 #include <linux/pvclock_gtod.h> 32 + #include <linux/reboot.h> 32 33 #include <linux/time64.h> 33 34 #include <linux/timekeeping.h> 34 35 #include <linux/timekeeper_internal.h> ··· 182 181 BUG_ON(rc); 183 182 } 184 183 185 - static void xen_restart(enum reboot_mode reboot_mode, const char *cmd) 184 + static int xen_restart(struct notifier_block *nb, unsigned long action, 185 + void *data) 186 186 { 187 187 xen_reboot(SHUTDOWN_reboot); 188 + 189 + return NOTIFY_DONE; 188 190 } 189 191 192 + static struct notifier_block xen_restart_nb = { 193 + .notifier_call = xen_restart, 194 + .priority = 192, 195 + }; 190 196 191 197 static void xen_power_off(void) 192 198 { ··· 412 404 return -ENODEV; 413 405 414 406 pm_power_off = xen_power_off; 415 - arm_pm_restart = xen_restart; 407 + register_restart_handler(&xen_restart_nb); 416 408 if (!xen_initial_domain()) { 417 409 struct timespec64 ts; 418 410 xen_read_wallclock(&ts);
-2
arch/arm64/include/asm/system_misc.h
··· 32 32 struct mm_struct; 33 33 extern void __show_regs(struct pt_regs *); 34 34 35 - extern void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd); 36 - 37 35 #endif /* __ASSEMBLY__ */ 38 36 39 37 #endif /* __ASM_SYSTEM_MISC_H */
+1 -6
arch/arm64/kernel/process.c
··· 70 70 void (*pm_power_off)(void); 71 71 EXPORT_SYMBOL_GPL(pm_power_off); 72 72 73 - void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd); 74 - 75 73 #ifdef CONFIG_HOTPLUG_CPU 76 74 void arch_cpu_idle_dead(void) 77 75 { ··· 140 142 efi_reboot(reboot_mode, NULL); 141 143 142 144 /* Now call the architecture specific reboot code. */ 143 - if (arm_pm_restart) 144 - arm_pm_restart(reboot_mode, cmd); 145 - else 146 - do_kernel_restart(cmd); 145 + do_kernel_restart(cmd); 147 146 148 147 /* 149 148 * Whoops - the architecture was unable to reboot.
+10 -2
drivers/firmware/psci/psci.c
··· 296 296 return 0; 297 297 } 298 298 299 - static void psci_sys_reset(enum reboot_mode reboot_mode, const char *cmd) 299 + static int psci_sys_reset(struct notifier_block *nb, unsigned long action, 300 + void *data) 300 301 { 301 302 if ((reboot_mode == REBOOT_WARM || reboot_mode == REBOOT_SOFT) && 302 303 psci_system_reset2_supported) { ··· 310 309 } else { 311 310 invoke_psci_fn(PSCI_0_2_FN_SYSTEM_RESET, 0, 0, 0); 312 311 } 312 + 313 + return NOTIFY_DONE; 313 314 } 315 + 316 + static struct notifier_block psci_sys_reset_nb = { 317 + .notifier_call = psci_sys_reset, 318 + .priority = 129, 319 + }; 314 320 315 321 static void psci_sys_poweroff(void) 316 322 { ··· 485 477 .migrate_info_type = psci_migrate_info_type, 486 478 }; 487 479 488 - arm_pm_restart = psci_sys_reset; 480 + register_restart_handler(&psci_sys_reset_nb); 489 481 490 482 pm_power_off = psci_sys_poweroff; 491 483 }