Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'for-linus' of git://git.armlinux.org.uk/~rmk/linux-arm

Pull ARM updates from Russell King:
"Lots of little things this time:

- allow modules to be autoloaded according to the HWCAP feature bits
(used primarily for crypto modules)

- split module core and init PLT sections, since the core code and
init code could be placed far apart, and the PLT sections need to
be local to the code block.

- three patches from Chris Brandt to allow Cortex-A9 L2 cache
optimisations to be disabled where a SoC didn't wire up the out of
band signals.

- NoMMU compliance fixes, avoiding corruption of vector table which
is not being used at this point, and avoiding possible register
state corruption when switching mode.

- fixmap memory attribute compliance update.

- remove unnecessary locking from update_sections_early()

- ftrace fix for DEBUG_RODATA with !FRAME_POINTER"

* 'for-linus' of git://git.armlinux.org.uk/~rmk/linux-arm:
ARM: 8672/1: mm: remove tasklist locking from update_sections_early()
ARM: 8671/1: V7M: Preserve registers across switch from Thread to Handler mode
ARM: 8670/1: V7M: Do not corrupt vector table around v7m_invalidate_l1 call
ARM: 8668/1: ftrace: Fix dynamic ftrace with DEBUG_RODATA and !FRAME_POINTER
ARM: 8667/3: Fix memory attribute inconsistencies when using fixmap
ARM: 8663/1: wire up HWCAP/HWCAP2 feature bits to the CPU modalias
ARM: 8666/1: mm: dump: Add domain to output
ARM: 8662/1: module: split core and init PLT sections
ARM: 8661/1: dts: r7s72100: add l2 cache
ARM: 8660/1: shmobile: r7s72100: Enable L2 cache
ARM: 8659/1: l2c: allow CA9 optimizations to be disabled

+211 -58
+3
Documentation/devicetree/bindings/arm/l2c2x0.txt
··· 90 90 - arm,standby-mode: L2 standby mode enable. Value <0> (forcibly disable), 91 91 <1> (forcibly enable), property absent (OS specific behavior, 92 92 preferably retain firmware settings) 93 + - arm,early-bresp-disable : Disable the CA9 optimization Early BRESP (PL310) 94 + - arm,full-line-zero-disable : Disable the CA9 optimization Full line of zero 95 + write (PL310) 93 96 94 97 Example: 95 98
+1
arch/arm/Kconfig
··· 27 27 select GENERIC_ALLOCATOR 28 28 select GENERIC_ATOMIC64 if (CPU_V7M || CPU_V6 || !CPU_32v6K || !AEABI) 29 29 select GENERIC_CLOCKEVENTS_BROADCAST if SMP 30 + select GENERIC_CPU_AUTOPROBE 30 31 select GENERIC_EARLY_IOREMAP 31 32 select GENERIC_IDLE_POLL_SETUP 32 33 select GENERIC_IRQ_PROBE
+11
arch/arm/boot/dts/r7s72100.dtsi
··· 177 177 compatible = "arm,cortex-a9"; 178 178 reg = <0>; 179 179 clock-frequency = <400000000>; 180 + next-level-cache = <&L2>; 180 181 }; 181 182 }; 182 183 ··· 367 366 interrupt-controller; 368 367 reg = <0xe8201000 0x1000>, 369 368 <0xe8202000 0x1000>; 369 + }; 370 + 371 + L2: cache-controller@3ffff000 { 372 + compatible = "arm,pl310-cache"; 373 + reg = <0x3ffff000 0x1000>; 374 + interrupts = <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>; 375 + arm,early-bresp-disable; 376 + arm,full-line-zero-disable; 377 + cache-unified; 378 + cache-level = <2>; 370 379 }; 371 380 372 381 i2c0: i2c@fcfee000 {
+38
arch/arm/include/asm/cpufeature.h
··· 1 + /* 2 + * Copyright (C) 2017 Linaro Ltd. <ard.biesheuvel@linaro.org> 3 + * 4 + * This program is free software; you can redistribute it and/or modify 5 + * it under the terms of the GNU General Public License version 2 as 6 + * published by the Free Software Foundation. 7 + */ 8 + 9 + #ifndef __ASM_CPUFEATURE_H 10 + #define __ASM_CPUFEATURE_H 11 + 12 + #include <linux/log2.h> 13 + #include <asm/hwcap.h> 14 + 15 + /* 16 + * Due to the fact that ELF_HWCAP is a 32-bit type on ARM, and given the number 17 + * of optional CPU features it defines, ARM's CPU hardware capability bits have 18 + * been distributed over separate elf_hwcap and elf_hwcap2 variables, each of 19 + * which covers a subset of the available CPU features. 20 + * 21 + * Currently, only a few of those are suitable for automatic module loading 22 + * (which is the primary use case of this facility) and those happen to be all 23 + * covered by HWCAP2. So let's only cover those via the cpu_feature() 24 + * convenience macro for now (which is used by module_cpu_feature_match()). 25 + * However, all capabilities are exposed via the modalias, and can be matched 26 + * using an explicit MODULE_DEVICE_TABLE() that uses __hwcap_feature() directly. 27 + */ 28 + #define MAX_CPU_FEATURES 64 29 + #define __hwcap_feature(x) ilog2(HWCAP_ ## x) 30 + #define __hwcap2_feature(x) (32 + ilog2(HWCAP2_ ## x)) 31 + #define cpu_feature(x) __hwcap2_feature(x) 32 + 33 + static inline bool cpu_have_feature(unsigned int num) 34 + { 35 + return num < 32 ? elf_hwcap & BIT(num) : elf_hwcap2 & BIT(num - 32); 36 + } 37 + 38 + #endif
+1 -1
arch/arm/include/asm/fixmap.h
··· 41 41 42 42 #define FIXMAP_PAGE_COMMON (L_PTE_YOUNG | L_PTE_PRESENT | L_PTE_XN | L_PTE_DIRTY) 43 43 44 - #define FIXMAP_PAGE_NORMAL (FIXMAP_PAGE_COMMON | L_PTE_MT_WRITEBACK) 44 + #define FIXMAP_PAGE_NORMAL (pgprot_kernel | L_PTE_XN) 45 45 #define FIXMAP_PAGE_RO (FIXMAP_PAGE_NORMAL | L_PTE_RDONLY) 46 46 47 47 /* Used by set_fixmap_(io|nocache), both meant for mapping a device */
+7 -2
arch/arm/include/asm/module.h
··· 18 18 }; 19 19 #endif 20 20 21 + struct mod_plt_sec { 22 + struct elf32_shdr *plt; 23 + int plt_count; 24 + }; 25 + 21 26 struct mod_arch_specific { 22 27 #ifdef CONFIG_ARM_UNWIND 23 28 struct unwind_table *unwind[ARM_SEC_MAX]; 24 29 #endif 25 30 #ifdef CONFIG_ARM_MODULE_PLTS 26 - struct elf32_shdr *plt; 27 - int plt_count; 31 + struct mod_plt_sec core; 32 + struct mod_plt_sec init; 28 33 #endif 29 34 }; 30 35
+6 -5
arch/arm/kernel/ftrace.c
··· 29 29 #endif 30 30 31 31 #ifdef CONFIG_DYNAMIC_FTRACE 32 - #ifdef CONFIG_OLD_MCOUNT 33 - #define OLD_MCOUNT_ADDR ((unsigned long) mcount) 34 - #define OLD_FTRACE_ADDR ((unsigned long) ftrace_caller_old) 35 - 36 - #define OLD_NOP 0xe1a00000 /* mov r0, r0 */ 37 32 38 33 static int __ftrace_modify_code(void *data) 39 34 { ··· 45 50 { 46 51 stop_machine(__ftrace_modify_code, &command, NULL); 47 52 } 53 + 54 + #ifdef CONFIG_OLD_MCOUNT 55 + #define OLD_MCOUNT_ADDR ((unsigned long) mcount) 56 + #define OLD_FTRACE_ADDR ((unsigned long) ftrace_caller_old) 57 + 58 + #define OLD_NOP 0xe1a00000 /* mov r0, r0 */ 48 59 49 60 static unsigned long ftrace_nop_replace(struct dyn_ftrace *rec) 50 61 {
+59 -26
arch/arm/kernel/module-plts.c
··· 1 1 /* 2 - * Copyright (C) 2014 Linaro Ltd. <ard.biesheuvel@linaro.org> 2 + * Copyright (C) 2014-2017 Linaro Ltd. <ard.biesheuvel@linaro.org> 3 3 * 4 4 * This program is free software; you can redistribute it and/or modify 5 5 * it under the terms of the GNU General Public License version 2 as ··· 31 31 u32 lit[PLT_ENT_COUNT]; 32 32 }; 33 33 34 + static bool in_init(const struct module *mod, unsigned long loc) 35 + { 36 + return loc - (u32)mod->init_layout.base < mod->init_layout.size; 37 + } 38 + 34 39 u32 get_module_plt(struct module *mod, unsigned long loc, Elf32_Addr val) 35 40 { 36 - struct plt_entries *plt = (struct plt_entries *)mod->arch.plt->sh_addr; 41 + struct mod_plt_sec *pltsec = !in_init(mod, loc) ? &mod->arch.core : 42 + &mod->arch.init; 43 + 44 + struct plt_entries *plt = (struct plt_entries *)pltsec->plt->sh_addr; 37 45 int idx = 0; 38 46 39 47 /* ··· 49 41 * relocations are sorted, this will be the last entry we allocated. 50 42 * (if one exists). 51 43 */ 52 - if (mod->arch.plt_count > 0) { 53 - plt += (mod->arch.plt_count - 1) / PLT_ENT_COUNT; 54 - idx = (mod->arch.plt_count - 1) % PLT_ENT_COUNT; 44 + if (pltsec->plt_count > 0) { 45 + plt += (pltsec->plt_count - 1) / PLT_ENT_COUNT; 46 + idx = (pltsec->plt_count - 1) % PLT_ENT_COUNT; 55 47 56 48 if (plt->lit[idx] == val) 57 49 return (u32)&plt->ldr[idx]; ··· 61 53 plt++; 62 54 } 63 55 64 - mod->arch.plt_count++; 65 - BUG_ON(mod->arch.plt_count * PLT_ENT_SIZE > mod->arch.plt->sh_size); 56 + pltsec->plt_count++; 57 + BUG_ON(pltsec->plt_count * PLT_ENT_SIZE > pltsec->plt->sh_size); 66 58 67 59 if (!idx) 68 60 /* Populate a new set of entries */ ··· 137 129 138 130 /* Count how many PLT entries we may need */ 139 131 static unsigned int count_plts(const Elf32_Sym *syms, Elf32_Addr base, 140 - const Elf32_Rel *rel, int num) 132 + const Elf32_Rel *rel, int num, Elf32_Word dstidx) 141 133 { 142 134 unsigned int ret = 0; 143 135 const Elf32_Sym *s; ··· 152 144 case R_ARM_THM_JUMP24: 153 145 /* 154 146 * We only have to consider branch targets that resolve 155 - * to undefined symbols. This is not simply a heuristic, 156 - * it is a fundamental limitation, since the PLT itself 157 - * is part of the module, and needs to be within range 158 - * as well, so modules can never grow beyond that limit. 147 + * to symbols that are defined in a different section. 148 + * This is not simply a heuristic, it is a fundamental 149 + * limitation, since there is no guaranteed way to emit 150 + * PLT entries sufficiently close to the branch if the 151 + * section size exceeds the range of a branch 152 + * instruction. So ignore relocations against defined 153 + * symbols if they live in the same section as the 154 + * relocation target. 159 155 */ 160 156 s = syms + ELF32_R_SYM(rel[i].r_info); 161 - if (s->st_shndx != SHN_UNDEF) 157 + if (s->st_shndx == dstidx) 162 158 break; 163 159 164 160 /* ··· 173 161 * So we need to support them, but there is no need to 174 162 * take them into consideration when trying to optimize 175 163 * this code. So let's only check for duplicates when 176 - * the addend is zero. 164 + * the addend is zero. (Note that calls into the core 165 + * module via init PLT entries could involve section 166 + * relative symbol references with non-zero addends, for 167 + * which we may end up emitting duplicates, but the init 168 + * PLT is released along with the rest of the .init 169 + * region as soon as module loading completes.) 177 170 */ 178 171 if (!is_zero_addend_relocation(base, rel + i) || 179 172 !duplicate_rel(base, rel, i)) ··· 191 174 int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, 192 175 char *secstrings, struct module *mod) 193 176 { 194 - unsigned long plts = 0; 177 + unsigned long core_plts = 0; 178 + unsigned long init_plts = 0; 195 179 Elf32_Shdr *s, *sechdrs_end = sechdrs + ehdr->e_shnum; 196 180 Elf32_Sym *syms = NULL; 197 181 ··· 202 184 */ 203 185 for (s = sechdrs; s < sechdrs_end; ++s) { 204 186 if (strcmp(".plt", secstrings + s->sh_name) == 0) 205 - mod->arch.plt = s; 187 + mod->arch.core.plt = s; 188 + else if (strcmp(".init.plt", secstrings + s->sh_name) == 0) 189 + mod->arch.init.plt = s; 206 190 else if (s->sh_type == SHT_SYMTAB) 207 191 syms = (Elf32_Sym *)s->sh_addr; 208 192 } 209 193 210 - if (!mod->arch.plt) { 211 - pr_err("%s: module PLT section missing\n", mod->name); 194 + if (!mod->arch.core.plt || !mod->arch.init.plt) { 195 + pr_err("%s: module PLT section(s) missing\n", mod->name); 212 196 return -ENOEXEC; 213 197 } 214 198 if (!syms) { ··· 233 213 /* sort by type and symbol index */ 234 214 sort(rels, numrels, sizeof(Elf32_Rel), cmp_rel, NULL); 235 215 236 - plts += count_plts(syms, dstsec->sh_addr, rels, numrels); 216 + if (strncmp(secstrings + dstsec->sh_name, ".init", 5) != 0) 217 + core_plts += count_plts(syms, dstsec->sh_addr, rels, 218 + numrels, s->sh_info); 219 + else 220 + init_plts += count_plts(syms, dstsec->sh_addr, rels, 221 + numrels, s->sh_info); 237 222 } 238 223 239 - mod->arch.plt->sh_type = SHT_NOBITS; 240 - mod->arch.plt->sh_flags = SHF_EXECINSTR | SHF_ALLOC; 241 - mod->arch.plt->sh_addralign = L1_CACHE_BYTES; 242 - mod->arch.plt->sh_size = round_up(plts * PLT_ENT_SIZE, 243 - sizeof(struct plt_entries)); 244 - mod->arch.plt_count = 0; 224 + mod->arch.core.plt->sh_type = SHT_NOBITS; 225 + mod->arch.core.plt->sh_flags = SHF_EXECINSTR | SHF_ALLOC; 226 + mod->arch.core.plt->sh_addralign = L1_CACHE_BYTES; 227 + mod->arch.core.plt->sh_size = round_up(core_plts * PLT_ENT_SIZE, 228 + sizeof(struct plt_entries)); 229 + mod->arch.core.plt_count = 0; 245 230 246 - pr_debug("%s: plt=%x\n", __func__, mod->arch.plt->sh_size); 231 + mod->arch.init.plt->sh_type = SHT_NOBITS; 232 + mod->arch.init.plt->sh_flags = SHF_EXECINSTR | SHF_ALLOC; 233 + mod->arch.init.plt->sh_addralign = L1_CACHE_BYTES; 234 + mod->arch.init.plt->sh_size = round_up(init_plts * PLT_ENT_SIZE, 235 + sizeof(struct plt_entries)); 236 + mod->arch.init.plt_count = 0; 237 + 238 + pr_debug("%s: plt=%x, init.plt=%x\n", __func__, 239 + mod->arch.core.plt->sh_size, mod->arch.init.plt->sh_size); 247 240 return 0; 248 241 }
+1
arch/arm/kernel/module.lds
··· 1 1 SECTIONS { 2 2 .plt : { BYTE(0) } 3 + .init.plt : { BYTE(0) } 3 4 }
+2 -2
arch/arm/kernel/setup.c
··· 80 80 81 81 extern void init_default_cache_policy(unsigned long); 82 82 extern void paging_init(const struct machine_desc *desc); 83 - extern void early_paging_init(const struct machine_desc *); 83 + extern void early_mm_init(const struct machine_desc *); 84 84 extern void adjust_lowmem_bounds(void); 85 85 extern enum reboot_mode reboot_mode; 86 86 extern void setup_dma_zone(const struct machine_desc *desc); ··· 1088 1088 parse_early_param(); 1089 1089 1090 1090 #ifdef CONFIG_MMU 1091 - early_paging_init(mdesc); 1091 + early_mm_init(mdesc); 1092 1092 #endif 1093 1093 setup_dma_zone(mdesc); 1094 1094 xen_early_init();
+2
arch/arm/mach-shmobile/setup-r7s72100.c
··· 26 26 }; 27 27 28 28 DT_MACHINE_START(R7S72100_DT, "Generic R7S72100 (Flattened Device Tree)") 29 + .l2c_aux_val = 0, 30 + .l2c_aux_mask = ~0, 29 31 .init_early = shmobile_init_delay, 30 32 .init_late = shmobile_init_late, 31 33 .dt_compat = r7s72100_boards_compat_dt,
+11 -2
arch/arm/mm/cache-l2x0.c
··· 57 57 58 58 struct l2x0_regs l2x0_saved_regs; 59 59 60 + static bool l2x0_bresp_disable; 61 + static bool l2x0_flz_disable; 62 + 60 63 /* 61 64 * Common code for all cache controllers. 62 65 */ ··· 623 620 u32 aux = l2x0_saved_regs.aux_ctrl; 624 621 625 622 if (rev >= L310_CACHE_ID_RTL_R2P0) { 626 - if (cortex_a9) { 623 + if (cortex_a9 && !l2x0_bresp_disable) { 627 624 aux |= L310_AUX_CTRL_EARLY_BRESP; 628 625 pr_info("L2C-310 enabling early BRESP for Cortex-A9\n"); 629 626 } else if (aux & L310_AUX_CTRL_EARLY_BRESP) { ··· 632 629 } 633 630 } 634 631 635 - if (cortex_a9) { 632 + if (cortex_a9 && !l2x0_flz_disable) { 636 633 u32 aux_cur = readl_relaxed(base + L2X0_AUX_CTRL); 637 634 u32 acr = get_auxcr(); 638 635 ··· 1202 1199 *aux_val &= ~L2C_AUX_CTRL_PARITY_ENABLE; 1203 1200 *aux_mask &= ~L2C_AUX_CTRL_PARITY_ENABLE; 1204 1201 } 1202 + 1203 + if (of_property_read_bool(np, "arm,early-bresp-disable")) 1204 + l2x0_bresp_disable = true; 1205 + 1206 + if (of_property_read_bool(np, "arm,full-line-zero-disable")) 1207 + l2x0_flz_disable = true; 1205 1208 1206 1209 prefetch = l2x0_saved_regs.prefetch_ctrl; 1207 1210
+44 -10
arch/arm/mm/dump.c
··· 17 17 #include <linux/mm.h> 18 18 #include <linux/seq_file.h> 19 19 20 + #include <asm/domain.h> 20 21 #include <asm/fixmap.h> 21 22 #include <asm/memory.h> 22 23 #include <asm/pgtable.h> ··· 44 43 unsigned long start_address; 45 44 unsigned level; 46 45 u64 current_prot; 46 + const char *current_domain; 47 47 }; 48 48 49 49 struct prot_bits { ··· 218 216 } 219 217 } 220 218 221 - static void note_page(struct pg_state *st, unsigned long addr, unsigned level, u64 val) 219 + static void note_page(struct pg_state *st, unsigned long addr, 220 + unsigned int level, u64 val, const char *domain) 222 221 { 223 222 static const char units[] = "KMGTPE"; 224 223 u64 prot = val & pg_level[level].mask; ··· 227 224 if (!st->level) { 228 225 st->level = level; 229 226 st->current_prot = prot; 227 + st->current_domain = domain; 230 228 seq_printf(st->seq, "---[ %s ]---\n", st->marker->name); 231 229 } else if (prot != st->current_prot || level != st->level || 230 + domain != st->current_domain || 232 231 addr >= st->marker[1].start_address) { 233 232 const char *unit = units; 234 233 unsigned long delta; ··· 245 240 unit++; 246 241 } 247 242 seq_printf(st->seq, "%9lu%c", delta, *unit); 243 + if (st->current_domain) 244 + seq_printf(st->seq, " %s", st->current_domain); 248 245 if (pg_level[st->level].bits) 249 246 dump_prot(st, pg_level[st->level].bits, pg_level[st->level].num); 250 247 seq_printf(st->seq, "\n"); ··· 258 251 } 259 252 st->start_address = addr; 260 253 st->current_prot = prot; 254 + st->current_domain = domain; 261 255 st->level = level; 262 256 } 263 257 } 264 258 265 - static void walk_pte(struct pg_state *st, pmd_t *pmd, unsigned long start) 259 + static void walk_pte(struct pg_state *st, pmd_t *pmd, unsigned long start, 260 + const char *domain) 266 261 { 267 262 pte_t *pte = pte_offset_kernel(pmd, 0); 268 263 unsigned long addr; ··· 272 263 273 264 for (i = 0; i < PTRS_PER_PTE; i++, pte++) { 274 265 addr = start + i * PAGE_SIZE; 275 - note_page(st, addr, 4, pte_val(*pte)); 266 + note_page(st, addr, 4, pte_val(*pte), domain); 276 267 } 268 + } 269 + 270 + static const char *get_domain_name(pmd_t *pmd) 271 + { 272 + #ifndef CONFIG_ARM_LPAE 273 + switch (pmd_val(*pmd) & PMD_DOMAIN_MASK) { 274 + case PMD_DOMAIN(DOMAIN_KERNEL): 275 + return "KERNEL "; 276 + case PMD_DOMAIN(DOMAIN_USER): 277 + return "USER "; 278 + case PMD_DOMAIN(DOMAIN_IO): 279 + return "IO "; 280 + case PMD_DOMAIN(DOMAIN_VECTORS): 281 + return "VECTORS"; 282 + default: 283 + return "unknown"; 284 + } 285 + #endif 286 + return NULL; 277 287 } 278 288 279 289 static void walk_pmd(struct pg_state *st, pud_t *pud, unsigned long start) ··· 300 272 pmd_t *pmd = pmd_offset(pud, 0); 301 273 unsigned long addr; 302 274 unsigned i; 275 + const char *domain; 303 276 304 277 for (i = 0; i < PTRS_PER_PMD; i++, pmd++) { 305 278 addr = start + i * PMD_SIZE; 279 + domain = get_domain_name(pmd); 306 280 if (pmd_none(*pmd) || pmd_large(*pmd) || !pmd_present(*pmd)) 307 - note_page(st, addr, 3, pmd_val(*pmd)); 281 + note_page(st, addr, 3, pmd_val(*pmd), domain); 308 282 else 309 - walk_pte(st, pmd, addr); 283 + walk_pte(st, pmd, addr, domain); 310 284 311 - if (SECTION_SIZE < PMD_SIZE && pmd_large(pmd[1])) 312 - note_page(st, addr + SECTION_SIZE, 3, pmd_val(pmd[1])); 285 + if (SECTION_SIZE < PMD_SIZE && pmd_large(pmd[1])) { 286 + addr += SECTION_SIZE; 287 + pmd++; 288 + domain = get_domain_name(pmd); 289 + note_page(st, addr, 3, pmd_val(*pmd), domain); 290 + } 313 291 } 314 292 } 315 293 ··· 330 296 if (!pud_none(*pud)) { 331 297 walk_pmd(st, pud, addr); 332 298 } else { 333 - note_page(st, addr, 2, pud_val(*pud)); 299 + note_page(st, addr, 2, pud_val(*pud), NULL); 334 300 } 335 301 } 336 302 } ··· 351 317 if (!pgd_none(*pgd)) { 352 318 walk_pud(&st, pgd, addr); 353 319 } else { 354 - note_page(&st, addr, 1, pgd_val(*pgd)); 320 + note_page(&st, addr, 1, pgd_val(*pgd), NULL); 355 321 } 356 322 } 357 323 358 - note_page(&st, 0, 0, 0); 324 + note_page(&st, 0, 0, 0, NULL); 359 325 } 360 326 361 327 static int ptdump_show(struct seq_file *m, void *v)
+8 -5
arch/arm/mm/init.c
··· 709 709 710 710 } 711 711 712 + /** 713 + * update_sections_early intended to be called only through stop_machine 714 + * framework and executed by only one CPU while all other CPUs will spin and 715 + * wait, so no locking is required in this function. 716 + */ 712 717 static void update_sections_early(struct section_perm perms[], int n) 713 718 { 714 719 struct task_struct *t, *s; 715 720 716 - read_lock(&tasklist_lock); 717 721 for_each_process(t) { 718 722 if (t->flags & PF_KTHREAD) 719 723 continue; 720 724 for_each_thread(t, s) 721 725 set_section_perms(perms, n, true, s->mm); 722 726 } 723 - read_unlock(&tasklist_lock); 724 727 set_section_perms(perms, n, true, current->active_mm); 725 728 set_section_perms(perms, n, true, &init_mm); 726 729 } 727 730 728 - int __fix_kernmem_perms(void *unused) 731 + static int __fix_kernmem_perms(void *unused) 729 732 { 730 733 update_sections_early(nx_perms, ARRAY_SIZE(nx_perms)); 731 734 return 0; 732 735 } 733 736 734 - void fix_kernmem_perms(void) 737 + static void fix_kernmem_perms(void) 735 738 { 736 739 stop_machine(__fix_kernmem_perms, NULL, NULL); 737 740 } 738 741 739 - int __mark_rodata_ro(void *unused) 742 + static int __mark_rodata_ro(void *unused) 740 743 { 741 744 update_sections_early(ro_perms, ARRAY_SIZE(ro_perms)); 742 745 return 0;
+13 -3
arch/arm/mm/mmu.c
··· 414 414 FIXADDR_END); 415 415 BUG_ON(idx >= __end_of_fixed_addresses); 416 416 417 + /* we only support device mappings until pgprot_kernel has been set */ 418 + if (WARN_ON(pgprot_val(prot) != pgprot_val(FIXMAP_PAGE_IO) && 419 + pgprot_val(pgprot_kernel) == 0)) 420 + return; 421 + 417 422 if (pgprot_val(prot)) 418 423 set_pte_at(NULL, vaddr, pte, 419 424 pfn_pte(phys >> PAGE_SHIFT, prot)); ··· 1497 1492 * early_paging_init() recreates boot time page table setup, allowing machines 1498 1493 * to switch over to a high (>4G) address space on LPAE systems 1499 1494 */ 1500 - void __init early_paging_init(const struct machine_desc *mdesc) 1495 + static void __init early_paging_init(const struct machine_desc *mdesc) 1501 1496 { 1502 1497 pgtables_remap *lpae_pgtables_remap; 1503 1498 unsigned long pa_pgd; ··· 1565 1560 1566 1561 #else 1567 1562 1568 - void __init early_paging_init(const struct machine_desc *mdesc) 1563 + static void __init early_paging_init(const struct machine_desc *mdesc) 1569 1564 { 1570 1565 long long offset; 1571 1566 ··· 1621 1616 { 1622 1617 void *zero_page; 1623 1618 1624 - build_mem_type_table(); 1625 1619 prepare_page_table(); 1626 1620 map_lowmem(); 1627 1621 memblock_set_current_limit(arm_lowmem_limit); ··· 1639 1635 1640 1636 empty_zero_page = virt_to_page(zero_page); 1641 1637 __flush_dcache_page(NULL, empty_zero_page); 1638 + } 1639 + 1640 + void __init early_mm_init(const struct machine_desc *mdesc) 1641 + { 1642 + build_mem_type_table(); 1643 + early_paging_init(mdesc); 1642 1644 }
+4 -2
arch/arm/mm/proc-v7m.S
··· 135 135 dsb 136 136 mov r6, lr @ save LR 137 137 ldr sp, =init_thread_union + THREAD_START_SP 138 + stmia sp, {r0-r3, r12} 138 139 cpsie i 139 140 svc #0 140 141 1: cpsid i 142 + ldmia sp, {r0-r3, r12} 141 143 str r5, [r12, #11 * 4] @ restore the original SVC vector entry 142 144 mov lr, r6 @ restore LR 143 145 ··· 149 147 150 148 @ Configure caches (if implemented) 151 149 teq r8, #0 152 - stmneia r12, {r0-r6, lr} @ v7m_invalidate_l1 touches r0-r6 150 + stmneia sp, {r0-r6, lr} @ v7m_invalidate_l1 touches r0-r6 153 151 blne v7m_invalidate_l1 154 152 teq r8, #0 @ re-evalutae condition 155 - ldmneia r12, {r0-r6, lr} 153 + ldmneia sp, {r0-r6, lr} 156 154 157 155 @ Configure the System Control Register to ensure 8-byte stack alignment 158 156 @ Note the STKALIGN bit is either RW or RAO.