Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'for-linus' of git://git.armlinux.org.uk/~rmk/linux-arm

Pull ARM updates from Russell King:

- nommu updates from Afzal Mohammed cleaning up the vectors support

- allow DMA memory "mapping" for nommu Benjamin Gaignard

- fixing a correctness issue with R_ARM_PREL31 relocations in the
module linker

- add strlen() prototype for the decompressor

- support for DEBUG_VIRTUAL from Florian Fainelli

- adjusting memory bounds after memory reservations have been
registered

- unipher cache handling updates from Masahiro Yamada

- initrd and Thumb Kconfig cleanups

* 'for-linus' of git://git.armlinux.org.uk/~rmk/linux-arm: (23 commits)
ARM: mm: round the initrd reservation to page boundaries
ARM: mm: clean up initrd initialisation
ARM: mm: move initrd init code out of arm_memblock_init()
ARM: 8655/1: improve NOMMU definition of pgprot_*()
ARM: 8654/1: decompressor: add strlen prototype
ARM: 8652/1: cache-uniphier: clean up active way setup code
ARM: 8651/1: cache-uniphier: include <linux/errno.h> instead of <linux/types.h>
ARM: 8650/1: module: handle negative R_ARM_PREL31 addends correctly
ARM: 8649/2: nommu: remove Hivecs configuration is asm
ARM: 8648/2: nommu: display vectors base
ARM: 8647/2: nommu: dynamic exception base address setting
ARM: 8646/1: mmu: decouple VECTORS_BASE from Kconfig
ARM: 8644/1: Reduce "CPU: shutdown" message to debug level
ARM: 8641/1: treewide: Replace uses of virt_to_phys with __pa_symbol
ARM: 8640/1: Add support for CONFIG_DEBUG_VIRTUAL
ARM: 8639/1: Define KERNEL_START and KERNEL_END
ARM: 8638/1: mtd: lart: Rename partition defines to be prefixed with PART_
ARM: 8637/1: Adjust memory boundaries after reservations
ARM: 8636/1: Cleanup sanity_check_meminfo
ARM: add CPU_THUMB_CAPABLE to indicate possible Thumb support
...

+386 -218
+1
arch/arm/Kconfig
··· 2 2 bool 3 3 default y 4 4 select ARCH_CLOCKSOURCE_DATA 5 + select ARCH_HAS_DEBUG_VIRTUAL 5 6 select ARCH_HAS_DEVMEM_IS_ALLOWED 6 7 select ARCH_HAS_ELF_RANDOMIZE 7 8 select ARCH_HAS_SET_MEMORY
+1 -2
arch/arm/Kconfig-nommu
··· 34 34 used instead of the auto-probing which utilizes the register. 35 35 36 36 config REMAP_VECTORS_TO_RAM 37 - bool 'Install vectors to the beginning of RAM' if DRAM_BASE 38 - depends on DRAM_BASE 37 + bool 'Install vectors to the beginning of RAM' 39 38 help 40 39 The kernel needs to change the hardware exception vectors. 41 40 In nommu mode, the hardware exception vectors are normally
+1
arch/arm/boot/compressed/decompress.c
··· 32 32 33 33 /* Not needed, but used in some headers pulled in by decompressors */ 34 34 extern char * strstr(const char * s1, const char *s2); 35 + extern size_t strlen(const char *s); 35 36 36 37 #ifdef CONFIG_KERNEL_GZIP 37 38 #include "../../../../lib/decompress_inflate.c"
+6 -6
arch/arm/common/mcpm_entry.c
··· 144 144 145 145 void mcpm_set_entry_vector(unsigned cpu, unsigned cluster, void *ptr) 146 146 { 147 - unsigned long val = ptr ? virt_to_phys(ptr) : 0; 147 + unsigned long val = ptr ? __pa_symbol(ptr) : 0; 148 148 mcpm_entry_vectors[cluster][cpu] = val; 149 149 sync_cache_w(&mcpm_entry_vectors[cluster][cpu]); 150 150 } ··· 299 299 * the kernel as if the power_up method just had deasserted reset 300 300 * on the CPU. 301 301 */ 302 - phys_reset = (phys_reset_t)(unsigned long)virt_to_phys(cpu_reset); 303 - phys_reset(virt_to_phys(mcpm_entry_point)); 302 + phys_reset = (phys_reset_t)(unsigned long)__pa_symbol(cpu_reset); 303 + phys_reset(__pa_symbol(mcpm_entry_point)); 304 304 305 305 /* should never get here */ 306 306 BUG(); ··· 388 388 __mcpm_outbound_leave_critical(cluster, CLUSTER_DOWN); 389 389 __mcpm_cpu_down(cpu, cluster); 390 390 391 - phys_reset = (phys_reset_t)(unsigned long)virt_to_phys(cpu_reset); 392 - phys_reset(virt_to_phys(mcpm_entry_point)); 391 + phys_reset = (phys_reset_t)(unsigned long)__pa_symbol(cpu_reset); 392 + phys_reset(__pa_symbol(mcpm_entry_point)); 393 393 BUG(); 394 394 } 395 395 ··· 449 449 sync_cache_w(&mcpm_sync); 450 450 451 451 if (power_up_setup) { 452 - mcpm_power_up_setup_phys = virt_to_phys(power_up_setup); 452 + mcpm_power_up_setup_phys = __pa_symbol(power_up_setup); 453 453 sync_cache_w(&mcpm_power_up_setup_phys); 454 454 } 455 455
+1 -1
arch/arm/include/asm/hardware/cache-uniphier.h
··· 16 16 #ifndef __CACHE_UNIPHIER_H 17 17 #define __CACHE_UNIPHIER_H 18 18 19 - #include <linux/types.h> 19 + #include <linux/errno.h> 20 20 21 21 #ifdef CONFIG_CACHE_UNIPHIER 22 22 int uniphier_cache_init(void);
+27 -2
arch/arm/include/asm/memory.h
··· 83 83 #define IOREMAP_MAX_ORDER 24 84 84 #endif 85 85 86 + #define VECTORS_BASE UL(0xffff0000) 87 + 86 88 #else /* CONFIG_MMU */ 89 + 90 + #ifndef __ASSEMBLY__ 91 + extern unsigned long vectors_base; 92 + #define VECTORS_BASE vectors_base 93 + #endif 87 94 88 95 /* 89 96 * The limitation of user task size can grow up to the end of free ram region. ··· 117 110 #define XIP_VIRT_ADDR(physaddr) (physaddr) 118 111 119 112 #endif /* !CONFIG_MMU */ 113 + 114 + #ifdef CONFIG_XIP_KERNEL 115 + #define KERNEL_START _sdata 116 + #else 117 + #define KERNEL_START _stext 118 + #endif 119 + #define KERNEL_END _end 120 120 121 121 /* 122 122 * We fix the TCM memories max 32 KiB ITCM resp DTCM at these ··· 220 206 : "r" (x), "I" (__PV_BITS_31_24) \ 221 207 : "cc") 222 208 223 - static inline phys_addr_t __virt_to_phys(unsigned long x) 209 + static inline phys_addr_t __virt_to_phys_nodebug(unsigned long x) 224 210 { 225 211 phys_addr_t t; 226 212 ··· 252 238 #define PHYS_OFFSET PLAT_PHYS_OFFSET 253 239 #define PHYS_PFN_OFFSET ((unsigned long)(PHYS_OFFSET >> PAGE_SHIFT)) 254 240 255 - static inline phys_addr_t __virt_to_phys(unsigned long x) 241 + static inline phys_addr_t __virt_to_phys_nodebug(unsigned long x) 256 242 { 257 243 return (phys_addr_t)x - PAGE_OFFSET + PHYS_OFFSET; 258 244 } ··· 267 253 #define virt_to_pfn(kaddr) \ 268 254 ((((unsigned long)(kaddr) - PAGE_OFFSET) >> PAGE_SHIFT) + \ 269 255 PHYS_PFN_OFFSET) 256 + 257 + #define __pa_symbol_nodebug(x) __virt_to_phys_nodebug((x)) 258 + 259 + #ifdef CONFIG_DEBUG_VIRTUAL 260 + extern phys_addr_t __virt_to_phys(unsigned long x); 261 + extern phys_addr_t __phys_addr_symbol(unsigned long x); 262 + #else 263 + #define __virt_to_phys(x) __virt_to_phys_nodebug(x) 264 + #define __phys_addr_symbol(x) __pa_symbol_nodebug(x) 265 + #endif 270 266 271 267 /* 272 268 * These are *only* valid on the kernel direct mapped RAM memory. ··· 300 276 * Drivers should NOT use these either. 301 277 */ 302 278 #define __pa(x) __virt_to_phys((unsigned long)(x)) 279 + #define __pa_symbol(x) __phys_addr_symbol(RELOC_HIDE((unsigned long)(x), 0)) 303 280 #define __va(x) ((void *)__phys_to_virt((phys_addr_t)(x))) 304 281 #define pfn_to_kaddr(pfn) __va((phys_addr_t)(pfn) << PAGE_SHIFT) 305 282
+3 -3
arch/arm/include/asm/pgtable-nommu.h
··· 63 63 /* 64 64 * Mark the prot value as uncacheable and unbufferable. 65 65 */ 66 - #define pgprot_noncached(prot) __pgprot(0) 67 - #define pgprot_writecombine(prot) __pgprot(0) 68 - #define pgprot_dmacoherent(prot) __pgprot(0) 66 + #define pgprot_noncached(prot) (prot) 67 + #define pgprot_writecombine(prot) (prot) 68 + #define pgprot_dmacoherent(prot) (prot) 69 69 70 70 71 71 /*
-5
arch/arm/kernel/head-nommu.S
··· 152 152 #ifdef CONFIG_CPU_ICACHE_DISABLE 153 153 bic r0, r0, #CR_I 154 154 #endif 155 - #ifdef CONFIG_CPU_HIGH_VECTOR 156 - orr r0, r0, #CR_V 157 - #else 158 - bic r0, r0, #CR_V 159 - #endif 160 155 mcr p15, 0, r0, c1, c0, 0 @ write control reg 161 156 #elif defined (CONFIG_CPU_V7M) 162 157 /* For V7M systems we want to modify the CCR similarly to the SCTLR */
+11 -2
arch/arm/kernel/module.c
··· 155 155 break; 156 156 157 157 case R_ARM_PREL31: 158 - offset = *(u32 *)loc + sym->st_value - loc; 159 - *(u32 *)loc = offset & 0x7fffffff; 158 + offset = (*(s32 *)loc << 1) >> 1; /* sign extend */ 159 + offset += sym->st_value - loc; 160 + if (offset >= 0x40000000 || offset < -0x40000000) { 161 + pr_err("%s: section %u reloc %u sym '%s': relocation %u out of range (%#lx -> %#x)\n", 162 + module->name, relindex, i, symname, 163 + ELF32_R_TYPE(rel->r_info), loc, 164 + sym->st_value); 165 + return -ENOEXEC; 166 + } 167 + *(u32 *)loc &= 0x80000000; 168 + *(u32 *)loc |= offset & 0x7fffffff; 160 169 break; 161 170 162 171 case R_ARM_MOVW_ABS_NC:
+8 -2
arch/arm/kernel/setup.c
··· 81 81 extern void init_default_cache_policy(unsigned long); 82 82 extern void paging_init(const struct machine_desc *desc); 83 83 extern void early_paging_init(const struct machine_desc *); 84 - extern void sanity_check_meminfo(void); 84 + extern void adjust_lowmem_bounds(void); 85 85 extern enum reboot_mode reboot_mode; 86 86 extern void setup_dma_zone(const struct machine_desc *desc); 87 87 ··· 1093 1093 setup_dma_zone(mdesc); 1094 1094 xen_early_init(); 1095 1095 efi_init(); 1096 - sanity_check_meminfo(); 1096 + /* 1097 + * Make sure the calculation for lowmem/highmem is set appropriately 1098 + * before reserving/allocating any mmeory 1099 + */ 1100 + adjust_lowmem_bounds(); 1097 1101 arm_memblock_init(mdesc); 1102 + /* Memory may have been removed so recalculate the bounds. */ 1103 + adjust_lowmem_bounds(); 1098 1104 1099 1105 early_ioremap_reset(); 1100 1106
+1 -1
arch/arm/kernel/smp.c
··· 251 251 pr_err("CPU%u: cpu didn't die\n", cpu); 252 252 return; 253 253 } 254 - pr_notice("CPU%u: shutdown\n", cpu); 254 + pr_debug("CPU%u: shutdown\n", cpu); 255 255 256 256 /* 257 257 * platform_cpu_kill() is generally expected to do the powering off
+1 -1
arch/arm/mach-alpine/platsmp.c
··· 27 27 { 28 28 phys_addr_t addr; 29 29 30 - addr = virt_to_phys(secondary_startup); 30 + addr = __pa_symbol(secondary_startup); 31 31 32 32 if (addr > (phys_addr_t)(uint32_t)(-1)) { 33 33 pr_err("FAIL: resume address over 32bit (%pa)", &addr);
+1 -1
arch/arm/mach-axxia/platsmp.c
··· 25 25 static void write_release_addr(u32 release_phys) 26 26 { 27 27 u32 *virt = (u32 *) phys_to_virt(release_phys); 28 - writel_relaxed(virt_to_phys(secondary_startup), virt); 28 + writel_relaxed(__pa_symbol(secondary_startup), virt); 29 29 /* Make sure this store is visible to other CPUs */ 30 30 smp_wmb(); 31 31 __cpuc_flush_dcache_area(virt, sizeof(u32));
+1 -1
arch/arm/mach-bcm/bcm63xx_smp.c
··· 135 135 } 136 136 137 137 /* Write the secondary init routine to the BootLUT reset vector */ 138 - val = virt_to_phys(secondary_startup); 138 + val = __pa_symbol(secondary_startup); 139 139 writel_relaxed(val, bootlut_base + BOOTLUT_RESET_VECT); 140 140 141 141 /* Power up the core, will jump straight to its reset vector when we
+1 -1
arch/arm/mach-bcm/platsmp-brcmstb.c
··· 151 151 * Set the reset vector to point to the secondary_startup 152 152 * routine 153 153 */ 154 - cpu_set_boot_addr(cpu, virt_to_phys(secondary_startup)); 154 + cpu_set_boot_addr(cpu, __pa_symbol(secondary_startup)); 155 155 156 156 /* Unhalt the cpu */ 157 157 cpu_rst_cfg_set(cpu, 0);
+2 -2
arch/arm/mach-bcm/platsmp.c
··· 116 116 return -ENOMEM; 117 117 } 118 118 119 - secondary_startup_phy = virt_to_phys(secondary_startup); 119 + secondary_startup_phy = __pa_symbol(secondary_startup); 120 120 BUG_ON(secondary_startup_phy > (phys_addr_t)U32_MAX); 121 121 122 122 writel_relaxed(secondary_startup_phy, sku_rom_lut); ··· 189 189 * Secondary cores will start in secondary_startup(), 190 190 * defined in "arch/arm/kernel/head.S" 191 191 */ 192 - boot_func = virt_to_phys(secondary_startup); 192 + boot_func = __pa_symbol(secondary_startup); 193 193 BUG_ON(boot_func & BOOT_ADDR_CPUID_MASK); 194 194 BUG_ON(boot_func > (phys_addr_t)U32_MAX); 195 195
+3 -2
arch/arm/mach-berlin/platsmp.c
··· 15 15 16 16 #include <asm/cacheflush.h> 17 17 #include <asm/cp15.h> 18 + #include <asm/memory.h> 18 19 #include <asm/smp_plat.h> 19 20 #include <asm/smp_scu.h> 20 21 ··· 76 75 if (!cpu_ctrl) 77 76 goto unmap_scu; 78 77 79 - vectors_base = ioremap(CONFIG_VECTORS_BASE, SZ_32K); 78 + vectors_base = ioremap(VECTORS_BASE, SZ_32K); 80 79 if (!vectors_base) 81 80 goto unmap_scu; 82 81 ··· 93 92 * Write the secondary startup address into the SW reset address 94 93 * vector. This is used by boot_inst. 95 94 */ 96 - writel(virt_to_phys(secondary_startup), vectors_base + SW_RESET_ADDR); 95 + writel(__pa_symbol(secondary_startup), vectors_base + SW_RESET_ADDR); 97 96 98 97 iounmap(vectors_base); 99 98 unmap_scu:
+2 -2
arch/arm/mach-exynos/firmware.c
··· 41 41 case FW_DO_IDLE_AFTR: 42 42 if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A9) 43 43 exynos_save_cp15(); 44 - writel_relaxed(virt_to_phys(exynos_cpu_resume_ns), 44 + writel_relaxed(__pa_symbol(exynos_cpu_resume_ns), 45 45 sysram_ns_base_addr + 0x24); 46 46 writel_relaxed(EXYNOS_AFTR_MAGIC, sysram_ns_base_addr + 0x20); 47 47 if (soc_is_exynos3250()) { ··· 135 135 exynos_save_cp15(); 136 136 137 137 writel(EXYNOS_SLEEP_MAGIC, sysram_ns_base_addr + EXYNOS_BOOT_FLAG); 138 - writel(virt_to_phys(exynos_cpu_resume_ns), 138 + writel(__pa_symbol(exynos_cpu_resume_ns), 139 139 sysram_ns_base_addr + EXYNOS_BOOT_ADDR); 140 140 141 141 return cpu_suspend(0, exynos_cpu_suspend);
+1 -1
arch/arm/mach-exynos/mcpm-exynos.c
··· 221 221 */ 222 222 __raw_writel(0xe59f0000, ns_sram_base_addr); /* ldr r0, [pc, #0] */ 223 223 __raw_writel(0xe12fff10, ns_sram_base_addr + 4); /* bx r0 */ 224 - __raw_writel(virt_to_phys(mcpm_entry_point), ns_sram_base_addr + 8); 224 + __raw_writel(__pa_symbol(mcpm_entry_point), ns_sram_base_addr + 8); 225 225 } 226 226 227 227 static struct syscore_ops exynos_mcpm_syscore_ops = {
+2 -2
arch/arm/mach-exynos/platsmp.c
··· 353 353 354 354 smp_rmb(); 355 355 356 - boot_addr = virt_to_phys(exynos4_secondary_startup); 356 + boot_addr = __pa_symbol(exynos4_secondary_startup); 357 357 358 358 ret = exynos_set_boot_addr(core_id, boot_addr); 359 359 if (ret) ··· 413 413 414 414 mpidr = cpu_logical_map(i); 415 415 core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0); 416 - boot_addr = virt_to_phys(exynos4_secondary_startup); 416 + boot_addr = __pa_symbol(exynos4_secondary_startup); 417 417 418 418 ret = exynos_set_boot_addr(core_id, boot_addr); 419 419 if (ret)
+3 -3
arch/arm/mach-exynos/pm.c
··· 132 132 133 133 static void exynos_cpu_set_boot_vector(long flags) 134 134 { 135 - writel_relaxed(virt_to_phys(exynos_cpu_resume), 135 + writel_relaxed(__pa_symbol(exynos_cpu_resume), 136 136 exynos_boot_vector_addr()); 137 137 writel_relaxed(flags, exynos_boot_vector_flag()); 138 138 } ··· 238 238 239 239 abort: 240 240 if (cpu_online(1)) { 241 - unsigned long boot_addr = virt_to_phys(exynos_cpu_resume); 241 + unsigned long boot_addr = __pa_symbol(exynos_cpu_resume); 242 242 243 243 /* 244 244 * Set the boot vector to something non-zero ··· 330 330 331 331 static void exynos_pre_enter_aftr(void) 332 332 { 333 - unsigned long boot_addr = virt_to_phys(exynos_cpu_resume); 333 + unsigned long boot_addr = __pa_symbol(exynos_cpu_resume); 334 334 335 335 (void)exynos_set_boot_addr(1, boot_addr); 336 336 }
+3 -3
arch/arm/mach-exynos/suspend.c
··· 301 301 exynos_pm_enter_sleep_mode(); 302 302 303 303 /* ensure at least INFORM0 has the resume address */ 304 - pmu_raw_writel(virt_to_phys(exynos_cpu_resume), S5P_INFORM0); 304 + pmu_raw_writel(__pa_symbol(exynos_cpu_resume), S5P_INFORM0); 305 305 } 306 306 307 307 static void exynos3250_pm_prepare(void) ··· 318 318 exynos_pm_enter_sleep_mode(); 319 319 320 320 /* ensure at least INFORM0 has the resume address */ 321 - pmu_raw_writel(virt_to_phys(exynos_cpu_resume), S5P_INFORM0); 321 + pmu_raw_writel(__pa_symbol(exynos_cpu_resume), S5P_INFORM0); 322 322 } 323 323 324 324 static void exynos5420_pm_prepare(void) ··· 343 343 344 344 /* ensure at least INFORM0 has the resume address */ 345 345 if (IS_ENABLED(CONFIG_EXYNOS5420_MCPM)) 346 - pmu_raw_writel(virt_to_phys(mcpm_entry_point), S5P_INFORM0); 346 + pmu_raw_writel(__pa_symbol(mcpm_entry_point), S5P_INFORM0); 347 347 348 348 tmp = pmu_raw_readl(EXYNOS_L2_OPTION(0)); 349 349 tmp &= ~EXYNOS_L2_USE_RETENTION;
+1 -1
arch/arm/mach-hisi/platmcpm.c
··· 327 327 */ 328 328 writel_relaxed(hip04_boot_method[0], relocation); 329 329 writel_relaxed(0xa5a5a5a5, relocation + 4); /* magic number */ 330 - writel_relaxed(virt_to_phys(secondary_startup), relocation + 8); 330 + writel_relaxed(__pa_symbol(secondary_startup), relocation + 8); 331 331 writel_relaxed(0, relocation + 12); 332 332 iounmap(relocation); 333 333
+3 -3
arch/arm/mach-hisi/platsmp.c
··· 28 28 cpu = cpu_logical_map(cpu); 29 29 if (!cpu || !ctrl_base) 30 30 return; 31 - writel_relaxed(virt_to_phys(jump_addr), ctrl_base + ((cpu - 1) << 2)); 31 + writel_relaxed(__pa_symbol(jump_addr), ctrl_base + ((cpu - 1) << 2)); 32 32 } 33 33 34 34 int hi3xxx_get_cpu_jump(int cpu) ··· 118 118 { 119 119 phys_addr_t jumpaddr; 120 120 121 - jumpaddr = virt_to_phys(secondary_startup); 121 + jumpaddr = __pa_symbol(secondary_startup); 122 122 hix5hd2_set_scu_boot_addr(HIX5HD2_BOOT_ADDRESS, jumpaddr); 123 123 hix5hd2_set_cpu(cpu, true); 124 124 arch_send_wakeup_ipi_mask(cpumask_of(cpu)); ··· 156 156 struct device_node *node; 157 157 158 158 159 - jumpaddr = virt_to_phys(secondary_startup); 159 + jumpaddr = __pa_symbol(secondary_startup); 160 160 hip01_set_boot_addr(HIP01_BOOT_ADDRESS, jumpaddr); 161 161 162 162 node = of_find_compatible_node(NULL, NULL, "hisilicon,hip01-sysctrl");
+1 -1
arch/arm/mach-imx/platsmp.c
··· 117 117 dcfg_base = of_iomap(np, 0); 118 118 BUG_ON(!dcfg_base); 119 119 120 - paddr = virt_to_phys(secondary_startup); 120 + paddr = __pa_symbol(secondary_startup); 121 121 writel_relaxed(cpu_to_be32(paddr), dcfg_base + DCFG_CCSR_SCRATCHRW1); 122 122 123 123 iounmap(dcfg_base);
+1 -1
arch/arm/mach-imx/pm-imx6.c
··· 499 499 memset(suspend_ocram_base, 0, sizeof(*pm_info)); 500 500 pm_info = suspend_ocram_base; 501 501 pm_info->pbase = ocram_pbase; 502 - pm_info->resume_addr = virt_to_phys(v7_cpu_resume); 502 + pm_info->resume_addr = __pa_symbol(v7_cpu_resume); 503 503 pm_info->pm_info_size = sizeof(*pm_info); 504 504 505 505 /*
+1 -1
arch/arm/mach-imx/src.c
··· 99 99 void imx_set_cpu_jump(int cpu, void *jump_addr) 100 100 { 101 101 cpu = cpu_logical_map(cpu); 102 - writel_relaxed(virt_to_phys(jump_addr), 102 + writel_relaxed(__pa_symbol(jump_addr), 103 103 src_base + SRC_GPR1 + cpu * 8); 104 104 } 105 105
+1 -1
arch/arm/mach-mediatek/platsmp.c
··· 122 122 * write the address of slave startup address into the system-wide 123 123 * jump register 124 124 */ 125 - writel_relaxed(virt_to_phys(secondary_startup_arm), 125 + writel_relaxed(__pa_symbol(secondary_startup_arm), 126 126 mtk_smp_base + mtk_smp_info->jump_reg); 127 127 } 128 128
+1 -1
arch/arm/mach-mvebu/pm.c
··· 110 110 { 111 111 phys_addr_t resume_pc; 112 112 113 - resume_pc = virt_to_phys(armada_370_xp_cpu_resume); 113 + resume_pc = __pa_symbol(armada_370_xp_cpu_resume); 114 114 115 115 /* 116 116 * The bootloader expects the first two words to be a magic
+1 -1
arch/arm/mach-mvebu/pmsu.c
··· 112 112 113 113 void mvebu_pmsu_set_cpu_boot_addr(int hw_cpu, void *boot_addr) 114 114 { 115 - writel(virt_to_phys(boot_addr), pmsu_mp_base + 115 + writel(__pa_symbol(boot_addr), pmsu_mp_base + 116 116 PMSU_BOOT_ADDR_REDIRECT_OFFSET(hw_cpu)); 117 117 } 118 118
+1 -1
arch/arm/mach-mvebu/system-controller.c
··· 153 153 if (of_machine_is_compatible("marvell,armada375")) 154 154 mvebu_armada375_smp_wa_init(); 155 155 156 - writel(virt_to_phys(boot_addr), system_controller_base + 156 + writel(__pa_symbol(boot_addr), system_controller_base + 157 157 mvebu_sc->resume_boot_addr); 158 158 } 159 159 #endif
+4 -4
arch/arm/mach-omap2/control.c
··· 315 315 scratchpad_contents.boot_config_ptr = 0x0; 316 316 if (cpu_is_omap3630()) 317 317 scratchpad_contents.public_restore_ptr = 318 - virt_to_phys(omap3_restore_3630); 318 + __pa_symbol(omap3_restore_3630); 319 319 else if (omap_rev() != OMAP3430_REV_ES3_0 && 320 320 omap_rev() != OMAP3430_REV_ES3_1 && 321 321 omap_rev() != OMAP3430_REV_ES3_1_2) 322 322 scratchpad_contents.public_restore_ptr = 323 - virt_to_phys(omap3_restore); 323 + __pa_symbol(omap3_restore); 324 324 else 325 325 scratchpad_contents.public_restore_ptr = 326 - virt_to_phys(omap3_restore_es3); 326 + __pa_symbol(omap3_restore_es3); 327 327 328 328 if (omap_type() == OMAP2_DEVICE_TYPE_GP) 329 329 scratchpad_contents.secure_ram_restore_ptr = 0x0; ··· 395 395 sdrc_block_contents.flags = 0x0; 396 396 sdrc_block_contents.block_size = 0x0; 397 397 398 - arm_context_addr = virt_to_phys(omap3_arm_context); 398 + arm_context_addr = __pa_symbol(omap3_arm_context); 399 399 400 400 /* Copy all the contents to the scratchpad location */ 401 401 scratchpad_address = OMAP2_L4_IO_ADDRESS(OMAP343X_SCRATCHPAD);
+6 -6
arch/arm/mach-omap2/omap-mpuss-lowpower.c
··· 273 273 cpu_clear_prev_logic_pwrst(cpu); 274 274 pwrdm_set_next_pwrst(pm_info->pwrdm, power_state); 275 275 pwrdm_set_logic_retst(pm_info->pwrdm, cpu_logic_state); 276 - set_cpu_wakeup_addr(cpu, virt_to_phys(omap_pm_ops.resume)); 276 + set_cpu_wakeup_addr(cpu, __pa_symbol(omap_pm_ops.resume)); 277 277 omap_pm_ops.scu_prepare(cpu, power_state); 278 278 l2x0_pwrst_prepare(cpu, save_state); 279 279 ··· 325 325 326 326 pwrdm_clear_all_prev_pwrst(pm_info->pwrdm); 327 327 pwrdm_set_next_pwrst(pm_info->pwrdm, power_state); 328 - set_cpu_wakeup_addr(cpu, virt_to_phys(omap_pm_ops.hotplug_restart)); 328 + set_cpu_wakeup_addr(cpu, __pa_symbol(omap_pm_ops.hotplug_restart)); 329 329 omap_pm_ops.scu_prepare(cpu, power_state); 330 330 331 331 /* ··· 467 467 sar_base = omap4_get_sar_ram_base(); 468 468 469 469 if (cpu_is_omap443x()) 470 - startup_pa = virt_to_phys(omap4_secondary_startup); 470 + startup_pa = __pa_symbol(omap4_secondary_startup); 471 471 else if (cpu_is_omap446x()) 472 - startup_pa = virt_to_phys(omap4460_secondary_startup); 472 + startup_pa = __pa_symbol(omap4460_secondary_startup); 473 473 else if ((__boot_cpu_mode & MODE_MASK) == HYP_MODE) 474 - startup_pa = virt_to_phys(omap5_secondary_hyp_startup); 474 + startup_pa = __pa_symbol(omap5_secondary_hyp_startup); 475 475 else 476 - startup_pa = virt_to_phys(omap5_secondary_startup); 476 + startup_pa = __pa_symbol(omap5_secondary_startup); 477 477 478 478 if (cpu_is_omap44xx()) 479 479 writel_relaxed(startup_pa, sar_base +
+2 -2
arch/arm/mach-omap2/omap-smp.c
··· 316 316 * A barrier is added to ensure that write buffer is drained 317 317 */ 318 318 if (omap_secure_apis_support()) 319 - omap_auxcoreboot_addr(virt_to_phys(cfg.startup_addr)); 319 + omap_auxcoreboot_addr(__pa_symbol(cfg.startup_addr)); 320 320 else 321 - writel_relaxed(virt_to_phys(cfg.startup_addr), 321 + writel_relaxed(__pa_symbol(cfg.startup_addr), 322 322 base + OMAP_AUX_CORE_BOOT_1); 323 323 } 324 324
+1 -1
arch/arm/mach-prima2/platsmp.c
··· 65 65 * waiting for. This would wake up the secondary core from WFE 66 66 */ 67 67 #define SIRFSOC_CPU1_JUMPADDR_OFFSET 0x2bc 68 - __raw_writel(virt_to_phys(sirfsoc_secondary_startup), 68 + __raw_writel(__pa_symbol(sirfsoc_secondary_startup), 69 69 clk_base + SIRFSOC_CPU1_JUMPADDR_OFFSET); 70 70 71 71 #define SIRFSOC_CPU1_WAKEMAGIC_OFFSET 0x2b8
+1 -1
arch/arm/mach-prima2/pm.c
··· 54 54 55 55 static int sirfsoc_pre_suspend_power_off(void) 56 56 { 57 - u32 wakeup_entry = virt_to_phys(cpu_resume); 57 + u32 wakeup_entry = __pa_symbol(cpu_resume); 58 58 59 59 sirfsoc_rtc_iobrg_writel(wakeup_entry, sirfsoc_pwrc_base + 60 60 SIRFSOC_PWRC_SCRATCH_PAD1);
+1 -1
arch/arm/mach-pxa/palmz72.c
··· 249 249 store_ptr = *PALMZ72_SAVE_DWORD; 250 250 251 251 /* Setting PSPR to a proper value */ 252 - PSPR = virt_to_phys(&palmz72_resume_info); 252 + PSPR = __pa_symbol(&palmz72_resume_info); 253 253 254 254 return 0; 255 255 }
+1 -1
arch/arm/mach-pxa/pxa25x.c
··· 85 85 static int pxa25x_cpu_pm_prepare(void) 86 86 { 87 87 /* set resume return address */ 88 - PSPR = virt_to_phys(cpu_resume); 88 + PSPR = __pa_symbol(cpu_resume); 89 89 return 0; 90 90 } 91 91
+1 -1
arch/arm/mach-pxa/pxa27x.c
··· 168 168 static int pxa27x_cpu_pm_prepare(void) 169 169 { 170 170 /* set resume return address */ 171 - PSPR = virt_to_phys(cpu_resume); 171 + PSPR = __pa_symbol(cpu_resume); 172 172 return 0; 173 173 } 174 174
+1 -1
arch/arm/mach-pxa/pxa3xx.c
··· 123 123 PSPR = 0x5c014000; 124 124 125 125 /* overwrite with the resume address */ 126 - *p = virt_to_phys(cpu_resume); 126 + *p = __pa_symbol(cpu_resume); 127 127 128 128 cpu_suspend(0, pxa3xx_finish_suspend); 129 129
+1 -1
arch/arm/mach-realview/platsmp-dt.c
··· 76 76 } 77 77 /* Put the boot address in this magic register */ 78 78 regmap_write(map, REALVIEW_SYS_FLAGSSET_OFFSET, 79 - virt_to_phys(versatile_secondary_startup)); 79 + __pa_symbol(versatile_secondary_startup)); 80 80 } 81 81 82 82 static const struct smp_operations realview_dt_smp_ops __initconst = {
+2 -2
arch/arm/mach-rockchip/platsmp.c
··· 156 156 */ 157 157 mdelay(1); /* ensure the cpus other than cpu0 to startup */ 158 158 159 - writel(virt_to_phys(secondary_startup), sram_base_addr + 8); 159 + writel(__pa_symbol(secondary_startup), sram_base_addr + 8); 160 160 writel(0xDEADBEAF, sram_base_addr + 4); 161 161 dsb_sev(); 162 162 } ··· 195 195 } 196 196 197 197 /* set the boot function for the sram code */ 198 - rockchip_boot_fn = virt_to_phys(secondary_startup); 198 + rockchip_boot_fn = __pa_symbol(secondary_startup); 199 199 200 200 /* copy the trampoline to sram, that runs during startup of the core */ 201 201 memcpy(sram_base_addr, &rockchip_secondary_trampoline, trampoline_sz);
+1 -1
arch/arm/mach-rockchip/pm.c
··· 62 62 static void rk3288_config_bootdata(void) 63 63 { 64 64 rkpm_bootdata_cpusp = rk3288_bootram_phy + (SZ_4K - 8); 65 - rkpm_bootdata_cpu_code = virt_to_phys(cpu_resume); 65 + rkpm_bootdata_cpu_code = __pa_symbol(cpu_resume); 66 66 67 67 rkpm_bootdata_l2ctlr_f = 1; 68 68 rkpm_bootdata_l2ctlr = rk3288_l2_config();
+1 -1
arch/arm/mach-s3c24xx/mach-jive.c
··· 484 484 * correct address to resume from. */ 485 485 486 486 __raw_writel(0x2BED, S3C2412_INFORM0); 487 - __raw_writel(virt_to_phys(s3c_cpu_resume), S3C2412_INFORM1); 487 + __raw_writel(__pa_symbol(s3c_cpu_resume), S3C2412_INFORM1); 488 488 489 489 return 0; 490 490 }
+1 -1
arch/arm/mach-s3c24xx/pm-s3c2410.c
··· 45 45 { 46 46 /* ensure at least GSTATUS3 has the resume address */ 47 47 48 - __raw_writel(virt_to_phys(s3c_cpu_resume), S3C2410_GSTATUS3); 48 + __raw_writel(__pa_symbol(s3c_cpu_resume), S3C2410_GSTATUS3); 49 49 50 50 S3C_PMDBG("GSTATUS3 0x%08x\n", __raw_readl(S3C2410_GSTATUS3)); 51 51 S3C_PMDBG("GSTATUS4 0x%08x\n", __raw_readl(S3C2410_GSTATUS4));
+1 -1
arch/arm/mach-s3c24xx/pm-s3c2416.c
··· 48 48 * correct address to resume from. 49 49 */ 50 50 __raw_writel(0x2BED, S3C2412_INFORM0); 51 - __raw_writel(virt_to_phys(s3c_cpu_resume), S3C2412_INFORM1); 51 + __raw_writel(__pa_symbol(s3c_cpu_resume), S3C2412_INFORM1); 52 52 } 53 53 54 54 static int s3c2416_pm_add(struct device *dev, struct subsys_interface *sif)
+1 -1
arch/arm/mach-s3c64xx/pm.c
··· 304 304 wake_irqs, ARRAY_SIZE(wake_irqs)); 305 305 306 306 /* store address of resume. */ 307 - __raw_writel(virt_to_phys(s3c_cpu_resume), S3C64XX_INFORM0); 307 + __raw_writel(__pa_symbol(s3c_cpu_resume), S3C64XX_INFORM0); 308 308 309 309 /* ensure previous wakeup state is cleared before sleeping */ 310 310 __raw_writel(__raw_readl(S3C64XX_WAKEUP_STAT), S3C64XX_WAKEUP_STAT);
+1 -1
arch/arm/mach-s5pv210/pm.c
··· 69 69 __raw_writel(s5pv210_irqwake_intmask, S5P_WAKEUP_MASK); 70 70 71 71 /* ensure at least INFORM0 has the resume address */ 72 - __raw_writel(virt_to_phys(s5pv210_cpu_resume), S5P_INFORM0); 72 + __raw_writel(__pa_symbol(s5pv210_cpu_resume), S5P_INFORM0); 73 73 74 74 tmp = __raw_readl(S5P_SLEEP_CFG); 75 75 tmp &= ~(S5P_SLEEP_CFG_OSC_EN | S5P_SLEEP_CFG_USBOSC_EN);
+1 -1
arch/arm/mach-sa1100/pm.c
··· 73 73 RCSR = RCSR_HWR | RCSR_SWR | RCSR_WDR | RCSR_SMR; 74 74 75 75 /* set resume return address */ 76 - PSPR = virt_to_phys(cpu_resume); 76 + PSPR = __pa_symbol(cpu_resume); 77 77 78 78 /* go zzz */ 79 79 cpu_suspend(0, sa1100_finish_suspend);
+3 -3
arch/arm/mach-shmobile/platsmp-apmu.c
··· 190 190 static void __init shmobile_smp_apmu_setup_boot(void) 191 191 { 192 192 /* install boot code shared by all CPUs */ 193 - shmobile_boot_fn = virt_to_phys(shmobile_smp_boot); 193 + shmobile_boot_fn = __pa_symbol(shmobile_smp_boot); 194 194 } 195 195 196 196 void __init shmobile_smp_apmu_prepare_cpus(unsigned int max_cpus, ··· 204 204 int shmobile_smp_apmu_boot_secondary(unsigned int cpu, struct task_struct *idle) 205 205 { 206 206 /* For this particular CPU register boot vector */ 207 - shmobile_smp_hook(cpu, virt_to_phys(secondary_startup), 0); 207 + shmobile_smp_hook(cpu, __pa_symbol(secondary_startup), 0); 208 208 209 209 return apmu_wrap(cpu, apmu_power_on); 210 210 } ··· 308 308 #if defined(CONFIG_SUSPEND) 309 309 static int shmobile_smp_apmu_do_suspend(unsigned long cpu) 310 310 { 311 - shmobile_smp_hook(cpu, virt_to_phys(cpu_resume), 0); 311 + shmobile_smp_hook(cpu, __pa_symbol(cpu_resume), 0); 312 312 shmobile_smp_apmu_cpu_shutdown(cpu); 313 313 cpu_do_idle(); /* WFI selects Core Standby */ 314 314 return 1;
+2 -2
arch/arm/mach-shmobile/platsmp-scu.c
··· 24 24 static int shmobile_scu_cpu_prepare(unsigned int cpu) 25 25 { 26 26 /* For this particular CPU register SCU SMP boot vector */ 27 - shmobile_smp_hook(cpu, virt_to_phys(shmobile_boot_scu), 27 + shmobile_smp_hook(cpu, __pa_symbol(shmobile_boot_scu), 28 28 shmobile_scu_base_phys); 29 29 return 0; 30 30 } ··· 33 33 unsigned int max_cpus) 34 34 { 35 35 /* install boot code shared by all CPUs */ 36 - shmobile_boot_fn = virt_to_phys(shmobile_smp_boot); 36 + shmobile_boot_fn = __pa_symbol(shmobile_smp_boot); 37 37 38 38 /* enable SCU and cache coherency on booting CPU */ 39 39 shmobile_scu_base_phys = scu_base_phys;
+2 -2
arch/arm/mach-socfpga/platsmp.c
··· 40 40 41 41 memcpy(phys_to_virt(0), &secondary_trampoline, trampoline_size); 42 42 43 - writel(virt_to_phys(secondary_startup), 43 + writel(__pa_symbol(secondary_startup), 44 44 sys_manager_base_addr + (socfpga_cpu1start_addr & 0x000000ff)); 45 45 46 46 flush_cache_all(); ··· 63 63 SOCFPGA_A10_RSTMGR_MODMPURST); 64 64 memcpy(phys_to_virt(0), &secondary_trampoline, trampoline_size); 65 65 66 - writel(virt_to_phys(secondary_startup), 66 + writel(__pa_symbol(secondary_startup), 67 67 sys_manager_base_addr + (socfpga_cpu1start_addr & 0x00000fff)); 68 68 69 69 flush_cache_all();
+1 -1
arch/arm/mach-spear/platsmp.c
··· 117 117 * (presently it is in SRAM). The BootMonitor waits until it receives a 118 118 * soft interrupt, and then the secondary CPU branches to this address. 119 119 */ 120 - __raw_writel(virt_to_phys(spear13xx_secondary_startup), SYS_LOCATION); 120 + __raw_writel(__pa_symbol(spear13xx_secondary_startup), SYS_LOCATION); 121 121 } 122 122 123 123 const struct smp_operations spear13xx_smp_ops __initconst = {
+1 -1
arch/arm/mach-sti/platsmp.c
··· 103 103 u32 __iomem *cpu_strt_ptr; 104 104 u32 release_phys; 105 105 int cpu; 106 - unsigned long entry_pa = virt_to_phys(sti_secondary_startup); 106 + unsigned long entry_pa = __pa_symbol(sti_secondary_startup); 107 107 108 108 np = of_find_compatible_node(NULL, NULL, "arm,cortex-a9-scu"); 109 109
+2 -2
arch/arm/mach-sunxi/platsmp.c
··· 80 80 spin_lock(&cpu_lock); 81 81 82 82 /* Set CPU boot address */ 83 - writel(virt_to_phys(secondary_startup), 83 + writel(__pa_symbol(secondary_startup), 84 84 cpucfg_membase + CPUCFG_PRIVATE0_REG); 85 85 86 86 /* Assert the CPU core in reset */ ··· 162 162 spin_lock(&cpu_lock); 163 163 164 164 /* Set CPU boot address */ 165 - writel(virt_to_phys(secondary_startup), 165 + writel(__pa_symbol(secondary_startup), 166 166 cpucfg_membase + CPUCFG_PRIVATE0_REG); 167 167 168 168 /* Assert the CPU core in reset */
+1 -1
arch/arm/mach-tango/platsmp.c
··· 5 5 6 6 static int tango_boot_secondary(unsigned int cpu, struct task_struct *idle) 7 7 { 8 - tango_set_aux_boot_addr(virt_to_phys(secondary_startup)); 8 + tango_set_aux_boot_addr(__pa_symbol(secondary_startup)); 9 9 tango_start_aux_core(cpu); 10 10 return 0; 11 11 }
+1 -1
arch/arm/mach-tango/pm.c
··· 5 5 6 6 static int tango_pm_powerdown(unsigned long arg) 7 7 { 8 - tango_suspend(virt_to_phys(cpu_resume)); 8 + tango_suspend(__pa_symbol(cpu_resume)); 9 9 10 10 return -EIO; /* tango_suspend has failed */ 11 11 }
+2 -2
arch/arm/mach-tegra/reset.c
··· 94 94 __tegra_cpu_reset_handler_data[TEGRA_RESET_MASK_PRESENT] = 95 95 *((u32 *)cpu_possible_mask); 96 96 __tegra_cpu_reset_handler_data[TEGRA_RESET_STARTUP_SECONDARY] = 97 - virt_to_phys((void *)secondary_startup); 97 + __pa_symbol((void *)secondary_startup); 98 98 #endif 99 99 100 100 #ifdef CONFIG_PM_SLEEP 101 101 __tegra_cpu_reset_handler_data[TEGRA_RESET_STARTUP_LP1] = 102 102 TEGRA_IRAM_LPx_RESUME_AREA; 103 103 __tegra_cpu_reset_handler_data[TEGRA_RESET_STARTUP_LP2] = 104 - virt_to_phys((void *)tegra_resume); 104 + __pa_symbol((void *)tegra_resume); 105 105 #endif 106 106 107 107 tegra_cpu_reset_handler_enable();
+1 -1
arch/arm/mach-ux500/platsmp.c
··· 79 79 * backup ram register at offset 0x1FF0, which is what boot rom code 80 80 * is waiting for. This will wake up the secondary core from WFE. 81 81 */ 82 - writel(virt_to_phys(secondary_startup), 82 + writel(__pa_symbol(secondary_startup), 83 83 backupram + UX500_CPU1_JUMPADDR_OFFSET); 84 84 writel(0xA1FEED01, 85 85 backupram + UX500_CPU1_WAKEMAGIC_OFFSET);
+1 -1
arch/arm/mach-vexpress/dcscb.c
··· 166 166 * Future entries into the kernel can now go 167 167 * through the cluster entry vectors. 168 168 */ 169 - vexpress_flags_set(virt_to_phys(mcpm_entry_point)); 169 + vexpress_flags_set(__pa_symbol(mcpm_entry_point)); 170 170 171 171 return 0; 172 172 }
+1 -1
arch/arm/mach-vexpress/platsmp.c
··· 79 79 * until it receives a soft interrupt, and then the 80 80 * secondary CPU branches to this address. 81 81 */ 82 - vexpress_flags_set(virt_to_phys(versatile_secondary_startup)); 82 + vexpress_flags_set(__pa_symbol(versatile_secondary_startup)); 83 83 } 84 84 85 85 const struct smp_operations vexpress_smp_dt_ops __initconst = {
+2 -2
arch/arm/mach-vexpress/tc2_pm.c
··· 54 54 if (cluster >= TC2_CLUSTERS || cpu >= tc2_nr_cpus[cluster]) 55 55 return -EINVAL; 56 56 ve_spc_set_resume_addr(cluster, cpu, 57 - virt_to_phys(mcpm_entry_point)); 57 + __pa_symbol(mcpm_entry_point)); 58 58 ve_spc_cpu_wakeup_irq(cluster, cpu, true); 59 59 return 0; 60 60 } ··· 159 159 160 160 static void tc2_pm_cpu_suspend_prepare(unsigned int cpu, unsigned int cluster) 161 161 { 162 - ve_spc_set_resume_addr(cluster, cpu, virt_to_phys(mcpm_entry_point)); 162 + ve_spc_set_resume_addr(cluster, cpu, __pa_symbol(mcpm_entry_point)); 163 163 } 164 164 165 165 static void tc2_pm_cpu_is_up(unsigned int cpu, unsigned int cluster)
+2 -2
arch/arm/mach-zx/platsmp.c
··· 76 76 * until it receives a soft interrupt, and then the 77 77 * secondary CPU branches to this address. 78 78 */ 79 - __raw_writel(virt_to_phys(zx_secondary_startup), 79 + __raw_writel(__pa_symbol(zx_secondary_startup), 80 80 aonsysctrl_base + AON_SYS_CTRL_RESERVED1); 81 81 82 82 iounmap(aonsysctrl_base); ··· 94 94 95 95 /* Map the first 4 KB IRAM for suspend usage */ 96 96 sys_iram = __arm_ioremap_exec(ZX_IRAM_BASE, PAGE_SIZE, false); 97 - zx_secondary_startup_pa = virt_to_phys(zx_secondary_startup); 97 + zx_secondary_startup_pa = __pa_symbol(zx_secondary_startup); 98 98 fncpy(sys_iram, &zx_resume_jump, zx_suspend_iram_sz); 99 99 } 100 100
+1 -1
arch/arm/mach-zynq/platsmp.c
··· 89 89 90 90 static int zynq_boot_secondary(unsigned int cpu, struct task_struct *idle) 91 91 { 92 - return zynq_cpun_start(virt_to_phys(secondary_startup), cpu); 92 + return zynq_cpun_start(__pa_symbol(secondary_startup), cpu); 93 93 } 94 94 95 95 /*
+26 -5
arch/arm/mm/Kconfig
··· 29 29 select CPU_COPY_V4WT if MMU 30 30 select CPU_CP15_MMU 31 31 select CPU_PABRT_LEGACY 32 + select CPU_THUMB_CAPABLE 32 33 select CPU_TLB_V4WT if MMU 33 34 help 34 35 A 32-bit RISC processor with 8kByte Cache, Write Buffer and ··· 47 46 select CPU_CACHE_V4 48 47 select CPU_CP15_MPU 49 48 select CPU_PABRT_LEGACY 49 + select CPU_THUMB_CAPABLE 50 50 help 51 51 A 32-bit RISC processor with 8KB cache or 4KB variants, 52 52 write buffer and MPU(Protection Unit) built around ··· 81 79 select CPU_COPY_V4WB if MMU 82 80 select CPU_CP15_MMU 83 81 select CPU_PABRT_LEGACY 82 + select CPU_THUMB_CAPABLE 84 83 select CPU_TLB_V4WBI if MMU 85 84 help 86 85 The ARM920T is licensed to be produced by numerous vendors, ··· 100 97 select CPU_COPY_V4WB if MMU 101 98 select CPU_CP15_MMU 102 99 select CPU_PABRT_LEGACY 100 + select CPU_THUMB_CAPABLE 103 101 select CPU_TLB_V4WBI if MMU 104 102 help 105 103 The ARM922T is a version of the ARM920T, but with smaller ··· 120 116 select CPU_COPY_V4WB if MMU 121 117 select CPU_CP15_MMU 122 118 select CPU_PABRT_LEGACY 119 + select CPU_THUMB_CAPABLE 123 120 select CPU_TLB_V4WBI if MMU 124 121 help 125 122 The ARM925T is a mix between the ARM920T and ARM926T, but with ··· 139 134 select CPU_COPY_V4WB if MMU 140 135 select CPU_CP15_MMU 141 136 select CPU_PABRT_LEGACY 137 + select CPU_THUMB_CAPABLE 142 138 select CPU_TLB_V4WBI if MMU 143 139 help 144 140 This is a variant of the ARM920. It has slightly different ··· 176 170 select CPU_CACHE_VIVT 177 171 select CPU_CP15_MPU 178 172 select CPU_PABRT_LEGACY 173 + select CPU_THUMB_CAPABLE 179 174 help 180 175 ARM940T is a member of the ARM9TDMI family of general- 181 176 purpose microprocessors with MPU and separate 4KB ··· 195 188 select CPU_CACHE_VIVT 196 189 select CPU_CP15_MPU 197 190 select CPU_PABRT_LEGACY 191 + select CPU_THUMB_CAPABLE 198 192 help 199 193 ARM946E-S is a member of the ARM9E-S family of high- 200 194 performance, 32-bit system-on-chip processor solutions. ··· 214 206 select CPU_COPY_V4WB if MMU 215 207 select CPU_CP15_MMU 216 208 select CPU_PABRT_LEGACY 209 + select CPU_THUMB_CAPABLE 217 210 select CPU_TLB_V4WBI if MMU 218 211 help 219 212 The ARM1020 is the 32K cached version of the ARM10 processor, ··· 234 225 select CPU_COPY_V4WB if MMU 235 226 select CPU_CP15_MMU 236 227 select CPU_PABRT_LEGACY 228 + select CPU_THUMB_CAPABLE 237 229 select CPU_TLB_V4WBI if MMU 238 230 239 231 # ARM1022E ··· 246 236 select CPU_COPY_V4WB if MMU # can probably do better 247 237 select CPU_CP15_MMU 248 238 select CPU_PABRT_LEGACY 239 + select CPU_THUMB_CAPABLE 249 240 select CPU_TLB_V4WBI if MMU 250 241 help 251 242 The ARM1022E is an implementation of the ARMv5TE architecture ··· 265 254 select CPU_COPY_V4WB if MMU # can probably do better 266 255 select CPU_CP15_MMU 267 256 select CPU_PABRT_LEGACY 257 + select CPU_THUMB_CAPABLE 268 258 select CPU_TLB_V4WBI if MMU 269 259 help 270 260 The ARM1026EJ-S is an implementation of the ARMv5TEJ architecture ··· 314 302 select CPU_CACHE_VIVT 315 303 select CPU_CP15_MMU 316 304 select CPU_PABRT_LEGACY 305 + select CPU_THUMB_CAPABLE 317 306 select CPU_TLB_V4WBI if MMU 318 307 319 308 # XScale Core Version 3 ··· 325 312 select CPU_CACHE_VIVT 326 313 select CPU_CP15_MMU 327 314 select CPU_PABRT_LEGACY 315 + select CPU_THUMB_CAPABLE 328 316 select CPU_TLB_V4WBI if MMU 329 317 select IO_36 330 318 ··· 338 324 select CPU_COPY_V4WB if MMU 339 325 select CPU_CP15_MMU 340 326 select CPU_PABRT_LEGACY 327 + select CPU_THUMB_CAPABLE 341 328 select CPU_TLB_V4WBI if MMU 342 329 343 330 # Feroceon ··· 350 335 select CPU_COPY_FEROCEON if MMU 351 336 select CPU_CP15_MMU 352 337 select CPU_PABRT_LEGACY 338 + select CPU_THUMB_CAPABLE 353 339 select CPU_TLB_FEROCEON if MMU 354 340 355 341 config CPU_FEROCEON_OLD_ID ··· 383 367 select CPU_CP15_MMU 384 368 select CPU_HAS_ASID if MMU 385 369 select CPU_PABRT_V6 370 + select CPU_THUMB_CAPABLE 386 371 select CPU_TLB_V6 if MMU 387 372 388 373 # ARMv6k ··· 398 381 select CPU_CP15_MMU 399 382 select CPU_HAS_ASID if MMU 400 383 select CPU_PABRT_V6 384 + select CPU_THUMB_CAPABLE 401 385 select CPU_TLB_V6 if MMU 402 386 403 387 # ARMv7 ··· 414 396 select CPU_CP15_MPU if !MMU 415 397 select CPU_HAS_ASID if MMU 416 398 select CPU_PABRT_V7 399 + select CPU_THUMB_CAPABLE 417 400 select CPU_TLB_V7 if MMU 418 401 419 402 # ARMv7M ··· 429 410 430 411 config CPU_THUMBONLY 431 412 bool 413 + select CPU_THUMB_CAPABLE 432 414 # There are no CPUs available with MMU that don't implement an ARM ISA: 433 415 depends on !MMU 434 416 help 435 417 Select this if your CPU doesn't support the 32 bit ARM instructions. 418 + 419 + config CPU_THUMB_CAPABLE 420 + bool 421 + help 422 + Select this if your CPU can support Thumb mode. 436 423 437 424 # Figure out what processor architecture version we should be using. 438 425 # This defines the compiler instruction set which depends on the machine type. ··· 680 655 681 656 config ARM_THUMB 682 657 bool "Support Thumb user binaries" if !CPU_THUMBONLY 683 - depends on CPU_ARM720T || CPU_ARM740T || CPU_ARM920T || CPU_ARM922T || \ 684 - CPU_ARM925T || CPU_ARM926T || CPU_ARM940T || CPU_ARM946E || \ 685 - CPU_ARM1020 || CPU_ARM1020E || CPU_ARM1022 || CPU_ARM1026 || \ 686 - CPU_XSCALE || CPU_XSC3 || CPU_MOHAWK || CPU_V6 || CPU_V6K || \ 687 - CPU_V7 || CPU_FEROCEON || CPU_V7M 658 + depends on CPU_THUMB_CAPABLE 688 659 default y 689 660 help 690 661 Say Y if you want to include kernel support for running user space
+1
arch/arm/mm/Makefile
··· 14 14 15 15 obj-$(CONFIG_ARM_PTDUMP) += dump.o 16 16 obj-$(CONFIG_MODULES) += proc-syms.o 17 + obj-$(CONFIG_DEBUG_VIRTUAL) += physaddr.o 17 18 18 19 obj-$(CONFIG_ALIGNMENT_TRAP) += alignment.o 19 20 obj-$(CONFIG_HIGHMEM) += highmem.o
+9 -14
arch/arm/mm/cache-uniphier.c
··· 15 15 16 16 #define pr_fmt(fmt) "uniphier: " fmt 17 17 18 + #include <linux/bitops.h> 18 19 #include <linux/init.h> 19 20 #include <linux/io.h> 20 21 #include <linux/log2.h> ··· 72 71 * @ctrl_base: virtual base address of control registers 73 72 * @rev_base: virtual base address of revision registers 74 73 * @op_base: virtual base address of operation registers 75 - * @way_present_mask: each bit specifies if the way is present 76 - * @way_locked_mask: each bit specifies if the way is locked 74 + * @way_mask: each bit specifies if the way is present 77 75 * @nsets: number of associativity sets 78 76 * @line_size: line size in bytes 79 77 * @range_op_max_size: max size that can be handled by a single range operation ··· 83 83 void __iomem *rev_base; 84 84 void __iomem *op_base; 85 85 void __iomem *way_ctrl_base; 86 - u32 way_present_mask; 87 - u32 way_locked_mask; 86 + u32 way_mask; 88 87 u32 nsets; 89 88 u32 line_size; 90 89 u32 range_op_max_size; ··· 233 234 writel_relaxed(val, data->ctrl_base + UNIPHIER_SSCC); 234 235 } 235 236 236 - static void __init __uniphier_cache_set_locked_ways( 237 - struct uniphier_cache_data *data, 238 - u32 way_mask) 237 + static void __init __uniphier_cache_set_active_ways( 238 + struct uniphier_cache_data *data) 239 239 { 240 240 unsigned int cpu; 241 241 242 - data->way_locked_mask = way_mask & data->way_present_mask; 243 - 244 242 for_each_possible_cpu(cpu) 245 - writel_relaxed(~data->way_locked_mask & data->way_present_mask, 246 - data->way_ctrl_base + 4 * cpu); 243 + writel_relaxed(data->way_mask, data->way_ctrl_base + 4 * cpu); 247 244 } 248 245 249 246 static void uniphier_cache_maint_range(unsigned long start, unsigned long end, ··· 302 307 303 308 list_for_each_entry(data, &uniphier_cache_list, list) { 304 309 __uniphier_cache_enable(data, true); 305 - __uniphier_cache_set_locked_ways(data, 0); 310 + __uniphier_cache_set_active_ways(data); 306 311 } 307 312 } 308 313 ··· 377 382 goto err; 378 383 } 379 384 380 - data->way_present_mask = 381 - ((u32)1 << cache_size / data->nsets / data->line_size) - 1; 385 + data->way_mask = GENMASK(cache_size / data->nsets / data->line_size - 1, 386 + 0); 382 387 383 388 data->ctrl_base = of_iomap(np, 0); 384 389 if (!data->ctrl_base) {
+3
arch/arm/mm/dma-mapping.c
··· 870 870 vma->vm_end - vma->vm_start, 871 871 vma->vm_page_prot); 872 872 } 873 + #else 874 + ret = vm_iomap_memory(vma, vma->vm_start, 875 + (vma->vm_end - vma->vm_start)); 873 876 #endif /* CONFIG_MMU */ 874 877 875 878 return ret;
+3 -2
arch/arm/mm/dump.c
··· 18 18 #include <linux/seq_file.h> 19 19 20 20 #include <asm/fixmap.h> 21 + #include <asm/memory.h> 21 22 #include <asm/pgtable.h> 22 23 23 24 struct addr_marker { ··· 32 31 { 0, "vmalloc() Area" }, 33 32 { VMALLOC_END, "vmalloc() End" }, 34 33 { FIXADDR_START, "Fixmap Area" }, 35 - { CONFIG_VECTORS_BASE, "Vectors" }, 36 - { CONFIG_VECTORS_BASE + PAGE_SIZE * 2, "Vectors End" }, 34 + { VECTORS_BASE, "Vectors" }, 35 + { VECTORS_BASE + PAGE_SIZE * 2, "Vectors End" }, 37 36 { -1, NULL }, 38 37 }; 39 38
+6
arch/arm/mm/flush.c
··· 327 327 if (page == ZERO_PAGE(0)) 328 328 return; 329 329 330 + if (!cache_ops_need_broadcast() && cache_is_vipt_nonaliasing()) { 331 + if (test_bit(PG_dcache_clean, &page->flags)) 332 + clear_bit(PG_dcache_clean, &page->flags); 333 + return; 334 + } 335 + 330 336 mapping = page_mapping(page); 331 337 332 338 if (!cache_ops_need_broadcast() &&
+45 -27
arch/arm/mm/init.c
··· 27 27 #include <asm/cp15.h> 28 28 #include <asm/mach-types.h> 29 29 #include <asm/memblock.h> 30 + #include <asm/memory.h> 30 31 #include <asm/prom.h> 31 32 #include <asm/sections.h> 32 33 #include <asm/setup.h> ··· 228 227 return phys; 229 228 } 230 229 231 - void __init arm_memblock_init(const struct machine_desc *mdesc) 230 + static void __init arm_initrd_init(void) 232 231 { 233 - /* Register the kernel text, kernel data and initrd with memblock. */ 234 - #ifdef CONFIG_XIP_KERNEL 235 - memblock_reserve(__pa(_sdata), _end - _sdata); 236 - #else 237 - memblock_reserve(__pa(_stext), _end - _stext); 238 - #endif 239 232 #ifdef CONFIG_BLK_DEV_INITRD 233 + phys_addr_t start; 234 + unsigned long size; 235 + 240 236 /* FDT scan will populate initrd_start */ 241 237 if (initrd_start && !phys_initrd_size) { 242 238 phys_initrd_start = __virt_to_phys(initrd_start); 243 239 phys_initrd_size = initrd_end - initrd_start; 244 240 } 245 - initrd_start = initrd_end = 0; 246 - if (phys_initrd_size && 247 - !memblock_is_region_memory(phys_initrd_start, phys_initrd_size)) { 248 - pr_err("INITRD: 0x%08llx+0x%08lx is not a memory region - disabling initrd\n", 249 - (u64)phys_initrd_start, phys_initrd_size); 250 - phys_initrd_start = phys_initrd_size = 0; 251 - } 252 - if (phys_initrd_size && 253 - memblock_is_region_reserved(phys_initrd_start, phys_initrd_size)) { 254 - pr_err("INITRD: 0x%08llx+0x%08lx overlaps in-use memory region - disabling initrd\n", 255 - (u64)phys_initrd_start, phys_initrd_size); 256 - phys_initrd_start = phys_initrd_size = 0; 257 - } 258 - if (phys_initrd_size) { 259 - memblock_reserve(phys_initrd_start, phys_initrd_size); 260 241 261 - /* Now convert initrd to virtual addresses */ 262 - initrd_start = __phys_to_virt(phys_initrd_start); 263 - initrd_end = initrd_start + phys_initrd_size; 242 + initrd_start = initrd_end = 0; 243 + 244 + if (!phys_initrd_size) 245 + return; 246 + 247 + /* 248 + * Round the memory region to page boundaries as per free_initrd_mem() 249 + * This allows us to detect whether the pages overlapping the initrd 250 + * are in use, but more importantly, reserves the entire set of pages 251 + * as we don't want these pages allocated for other purposes. 252 + */ 253 + start = round_down(phys_initrd_start, PAGE_SIZE); 254 + size = phys_initrd_size + (phys_initrd_start - start); 255 + size = round_up(size, PAGE_SIZE); 256 + 257 + if (!memblock_is_region_memory(start, size)) { 258 + pr_err("INITRD: 0x%08llx+0x%08lx is not a memory region - disabling initrd\n", 259 + (u64)start, size); 260 + return; 264 261 } 262 + 263 + if (memblock_is_region_reserved(start, size)) { 264 + pr_err("INITRD: 0x%08llx+0x%08lx overlaps in-use memory region - disabling initrd\n", 265 + (u64)start, size); 266 + return; 267 + } 268 + 269 + memblock_reserve(start, size); 270 + 271 + /* Now convert initrd to virtual addresses */ 272 + initrd_start = __phys_to_virt(phys_initrd_start); 273 + initrd_end = initrd_start + phys_initrd_size; 265 274 #endif 275 + } 276 + 277 + void __init arm_memblock_init(const struct machine_desc *mdesc) 278 + { 279 + /* Register the kernel text, kernel data and initrd with memblock. */ 280 + memblock_reserve(__pa(KERNEL_START), KERNEL_END - KERNEL_START); 281 + 282 + arm_initrd_init(); 266 283 267 284 arm_mm_memblock_reserve(); 268 285 ··· 540 521 " .data : 0x%p" " - 0x%p" " (%4td kB)\n" 541 522 " .bss : 0x%p" " - 0x%p" " (%4td kB)\n", 542 523 543 - MLK(UL(CONFIG_VECTORS_BASE), UL(CONFIG_VECTORS_BASE) + 544 - (PAGE_SIZE)), 524 + MLK(VECTORS_BASE, VECTORS_BASE + PAGE_SIZE), 545 525 #ifdef CONFIG_HAVE_TCM 546 526 MLK(DTCM_OFFSET, (unsigned long) dtcm_end), 547 527 MLK(ITCM_OFFSET, (unsigned long) itcm_end),
+28 -47
arch/arm/mm/mmu.c
··· 1152 1152 1153 1153 phys_addr_t arm_lowmem_limit __initdata = 0; 1154 1154 1155 - void __init sanity_check_meminfo(void) 1155 + void __init adjust_lowmem_bounds(void) 1156 1156 { 1157 1157 phys_addr_t memblock_limit = 0; 1158 - int highmem = 0; 1159 1158 u64 vmalloc_limit; 1160 1159 struct memblock_region *reg; 1161 - bool should_use_highmem = false; 1160 + phys_addr_t lowmem_limit = 0; 1162 1161 1163 1162 /* 1164 1163 * Let's use our own (unoptimized) equivalent of __pa() that is ··· 1171 1172 for_each_memblock(memory, reg) { 1172 1173 phys_addr_t block_start = reg->base; 1173 1174 phys_addr_t block_end = reg->base + reg->size; 1174 - phys_addr_t size_limit = reg->size; 1175 1175 1176 - if (reg->base >= vmalloc_limit) 1177 - highmem = 1; 1178 - else 1179 - size_limit = vmalloc_limit - reg->base; 1180 - 1181 - 1182 - if (!IS_ENABLED(CONFIG_HIGHMEM) || cache_is_vipt_aliasing()) { 1183 - 1184 - if (highmem) { 1185 - pr_notice("Ignoring RAM at %pa-%pa (!CONFIG_HIGHMEM)\n", 1186 - &block_start, &block_end); 1187 - memblock_remove(reg->base, reg->size); 1188 - should_use_highmem = true; 1189 - continue; 1190 - } 1191 - 1192 - if (reg->size > size_limit) { 1193 - phys_addr_t overlap_size = reg->size - size_limit; 1194 - 1195 - pr_notice("Truncating RAM at %pa-%pa", 1196 - &block_start, &block_end); 1197 - block_end = vmalloc_limit; 1198 - pr_cont(" to -%pa", &block_end); 1199 - memblock_remove(vmalloc_limit, overlap_size); 1200 - should_use_highmem = true; 1201 - } 1202 - } 1203 - 1204 - if (!highmem) { 1205 - if (block_end > arm_lowmem_limit) { 1206 - if (reg->size > size_limit) 1207 - arm_lowmem_limit = vmalloc_limit; 1208 - else 1209 - arm_lowmem_limit = block_end; 1210 - } 1176 + if (reg->base < vmalloc_limit) { 1177 + if (block_end > lowmem_limit) 1178 + /* 1179 + * Compare as u64 to ensure vmalloc_limit does 1180 + * not get truncated. block_end should always 1181 + * fit in phys_addr_t so there should be no 1182 + * issue with assignment. 1183 + */ 1184 + lowmem_limit = min_t(u64, 1185 + vmalloc_limit, 1186 + block_end); 1211 1187 1212 1188 /* 1213 1189 * Find the first non-pmd-aligned page, and point ··· 1201 1227 if (!IS_ALIGNED(block_start, PMD_SIZE)) 1202 1228 memblock_limit = block_start; 1203 1229 else if (!IS_ALIGNED(block_end, PMD_SIZE)) 1204 - memblock_limit = arm_lowmem_limit; 1230 + memblock_limit = lowmem_limit; 1205 1231 } 1206 1232 1207 1233 } 1208 1234 } 1209 1235 1210 - if (should_use_highmem) 1211 - pr_notice("Consider using a HIGHMEM enabled kernel.\n"); 1236 + arm_lowmem_limit = lowmem_limit; 1212 1237 1213 1238 high_memory = __va(arm_lowmem_limit - 1) + 1; 1214 1239 ··· 1220 1247 memblock_limit = round_down(memblock_limit, PMD_SIZE); 1221 1248 if (!memblock_limit) 1222 1249 memblock_limit = arm_lowmem_limit; 1250 + 1251 + if (!IS_ENABLED(CONFIG_HIGHMEM) || cache_is_vipt_aliasing()) { 1252 + if (memblock_end_of_DRAM() > arm_lowmem_limit) { 1253 + phys_addr_t end = memblock_end_of_DRAM(); 1254 + 1255 + pr_notice("Ignoring RAM at %pa-%pa\n", 1256 + &memblock_limit, &end); 1257 + pr_notice("Consider using a HIGHMEM enabled kernel.\n"); 1258 + 1259 + memblock_remove(memblock_limit, end - memblock_limit); 1260 + } 1261 + } 1223 1262 1224 1263 memblock_set_current_limit(memblock_limit); 1225 1264 } ··· 1422 1437 static void __init map_lowmem(void) 1423 1438 { 1424 1439 struct memblock_region *reg; 1425 - #ifdef CONFIG_XIP_KERNEL 1426 - phys_addr_t kernel_x_start = round_down(__pa(_sdata), SECTION_SIZE); 1427 - #else 1428 - phys_addr_t kernel_x_start = round_down(__pa(_stext), SECTION_SIZE); 1429 - #endif 1440 + phys_addr_t kernel_x_start = round_down(__pa(KERNEL_START), SECTION_SIZE); 1430 1441 phys_addr_t kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE); 1431 1442 1432 1443 /* Map all the lowmem memory banks. */
+54 -6
arch/arm/mm/nommu.c
··· 11 11 #include <linux/kernel.h> 12 12 13 13 #include <asm/cacheflush.h> 14 + #include <asm/cp15.h> 14 15 #include <asm/sections.h> 15 16 #include <asm/page.h> 16 17 #include <asm/setup.h> ··· 22 21 #include <asm/procinfo.h> 23 22 24 23 #include "mm.h" 24 + 25 + unsigned long vectors_base; 25 26 26 27 #ifdef CONFIG_ARM_MPU 27 28 struct mpu_rgn_info mpu_rgn_info; ··· 88 85 } 89 86 90 87 /* MPU initialisation functions */ 91 - void __init sanity_check_meminfo_mpu(void) 88 + void __init adjust_lowmem_bounds_mpu(void) 92 89 { 93 90 phys_addr_t phys_offset = PHYS_OFFSET; 94 91 phys_addr_t aligned_region_size, specified_mem_size, rounded_mem_size; ··· 277 274 } 278 275 } 279 276 #else 280 - static void sanity_check_meminfo_mpu(void) {} 277 + static void adjust_lowmem_bounds_mpu(void) {} 281 278 static void __init mpu_setup(void) {} 282 279 #endif /* CONFIG_ARM_MPU */ 280 + 281 + #ifdef CONFIG_CPU_CP15 282 + #ifdef CONFIG_CPU_HIGH_VECTOR 283 + static unsigned long __init setup_vectors_base(void) 284 + { 285 + unsigned long reg = get_cr(); 286 + 287 + set_cr(reg | CR_V); 288 + return 0xffff0000; 289 + } 290 + #else /* CONFIG_CPU_HIGH_VECTOR */ 291 + /* Write exception base address to VBAR */ 292 + static inline void set_vbar(unsigned long val) 293 + { 294 + asm("mcr p15, 0, %0, c12, c0, 0" : : "r" (val) : "cc"); 295 + } 296 + 297 + /* 298 + * Security extensions, bits[7:4], permitted values, 299 + * 0b0000 - not implemented, 0b0001/0b0010 - implemented 300 + */ 301 + static inline bool security_extensions_enabled(void) 302 + { 303 + return !!cpuid_feature_extract(CPUID_EXT_PFR1, 4); 304 + } 305 + 306 + static unsigned long __init setup_vectors_base(void) 307 + { 308 + unsigned long base = 0, reg = get_cr(); 309 + 310 + set_cr(reg & ~CR_V); 311 + if (security_extensions_enabled()) { 312 + if (IS_ENABLED(CONFIG_REMAP_VECTORS_TO_RAM)) 313 + base = CONFIG_DRAM_BASE; 314 + set_vbar(base); 315 + } else if (IS_ENABLED(CONFIG_REMAP_VECTORS_TO_RAM)) { 316 + if (CONFIG_DRAM_BASE != 0) 317 + pr_err("Security extensions not enabled, vectors cannot be remapped to RAM, vectors base will be 0x00000000\n"); 318 + } 319 + 320 + return base; 321 + } 322 + #endif /* CONFIG_CPU_HIGH_VECTOR */ 323 + #endif /* CONFIG_CPU_CP15 */ 283 324 284 325 void __init arm_mm_memblock_reserve(void) 285 326 { 286 327 #ifndef CONFIG_CPU_V7M 328 + vectors_base = IS_ENABLED(CONFIG_CPU_CP15) ? setup_vectors_base() : 0; 287 329 /* 288 330 * Register the exception vector page. 289 331 * some architectures which the DRAM is the exception vector to trap, 290 332 * alloc_page breaks with error, although it is not NULL, but "0." 291 333 */ 292 - memblock_reserve(CONFIG_VECTORS_BASE, 2 * PAGE_SIZE); 334 + memblock_reserve(vectors_base, 2 * PAGE_SIZE); 293 335 #else /* ifndef CONFIG_CPU_V7M */ 294 336 /* 295 337 * There is no dedicated vector page on V7-M. So nothing needs to be ··· 343 295 #endif 344 296 } 345 297 346 - void __init sanity_check_meminfo(void) 298 + void __init adjust_lowmem_bounds(void) 347 299 { 348 300 phys_addr_t end; 349 - sanity_check_meminfo_mpu(); 301 + adjust_lowmem_bounds_mpu(); 350 302 end = memblock_end_of_DRAM(); 351 303 high_memory = __va(end - 1) + 1; 352 304 memblock_set_current_limit(end); ··· 358 310 */ 359 311 void __init paging_init(const struct machine_desc *mdesc) 360 312 { 361 - early_trap_init((void *)CONFIG_VECTORS_BASE); 313 + early_trap_init((void *)vectors_base); 362 314 mpu_setup(); 363 315 bootmem_init(); 364 316 }
+57
arch/arm/mm/physaddr.c
··· 1 + #include <linux/bug.h> 2 + #include <linux/export.h> 3 + #include <linux/types.h> 4 + #include <linux/mmdebug.h> 5 + #include <linux/mm.h> 6 + 7 + #include <asm/sections.h> 8 + #include <asm/memory.h> 9 + #include <asm/fixmap.h> 10 + #include <asm/dma.h> 11 + 12 + #include "mm.h" 13 + 14 + static inline bool __virt_addr_valid(unsigned long x) 15 + { 16 + /* 17 + * high_memory does not get immediately defined, and there 18 + * are early callers of __pa() against PAGE_OFFSET 19 + */ 20 + if (!high_memory && x >= PAGE_OFFSET) 21 + return true; 22 + 23 + if (high_memory && x >= PAGE_OFFSET && x < (unsigned long)high_memory) 24 + return true; 25 + 26 + /* 27 + * MAX_DMA_ADDRESS is a virtual address that may not correspond to an 28 + * actual physical address. Enough code relies on __pa(MAX_DMA_ADDRESS) 29 + * that we just need to work around it and always return true. 30 + */ 31 + if (x == MAX_DMA_ADDRESS) 32 + return true; 33 + 34 + return false; 35 + } 36 + 37 + phys_addr_t __virt_to_phys(unsigned long x) 38 + { 39 + WARN(!__virt_addr_valid(x), 40 + "virt_to_phys used for non-linear address: %pK (%pS)\n", 41 + (void *)x, (void *)x); 42 + 43 + return __virt_to_phys_nodebug(x); 44 + } 45 + EXPORT_SYMBOL(__virt_to_phys); 46 + 47 + phys_addr_t __phys_addr_symbol(unsigned long x) 48 + { 49 + /* This is bounds checking against the kernel image only. 50 + * __pa_symbol should only be used on kernel symbol addresses. 51 + */ 52 + VIRTUAL_BUG_ON(x < (unsigned long)KERNEL_START || 53 + x > (unsigned long)KERNEL_END); 54 + 55 + return __pa_symbol_nodebug(x); 56 + } 57 + EXPORT_SYMBOL(__phys_addr_symbol);
+12 -12
drivers/mtd/devices/lart.c
··· 75 75 76 76 /* blob */ 77 77 #define NUM_BLOB_BLOCKS FLASH_NUMBLOCKS_16m_PARAM 78 - #define BLOB_START 0x00000000 79 - #define BLOB_LEN (NUM_BLOB_BLOCKS * FLASH_BLOCKSIZE_PARAM) 78 + #define PART_BLOB_START 0x00000000 79 + #define PART_BLOB_LEN (NUM_BLOB_BLOCKS * FLASH_BLOCKSIZE_PARAM) 80 80 81 81 /* kernel */ 82 82 #define NUM_KERNEL_BLOCKS 7 83 - #define KERNEL_START (BLOB_START + BLOB_LEN) 84 - #define KERNEL_LEN (NUM_KERNEL_BLOCKS * FLASH_BLOCKSIZE_MAIN) 83 + #define PART_KERNEL_START (PART_BLOB_START + PART_BLOB_LEN) 84 + #define PART_KERNEL_LEN (NUM_KERNEL_BLOCKS * FLASH_BLOCKSIZE_MAIN) 85 85 86 86 /* initial ramdisk */ 87 87 #define NUM_INITRD_BLOCKS 24 88 - #define INITRD_START (KERNEL_START + KERNEL_LEN) 89 - #define INITRD_LEN (NUM_INITRD_BLOCKS * FLASH_BLOCKSIZE_MAIN) 88 + #define PART_INITRD_START (PART_KERNEL_START + PART_KERNEL_LEN) 89 + #define PART_INITRD_LEN (NUM_INITRD_BLOCKS * FLASH_BLOCKSIZE_MAIN) 90 90 91 91 /* 92 92 * See section 4.0 in "3 Volt Fast Boot Block Flash Memory" Intel Datasheet ··· 587 587 /* blob */ 588 588 { 589 589 .name = "blob", 590 - .offset = BLOB_START, 591 - .size = BLOB_LEN, 590 + .offset = PART_BLOB_START, 591 + .size = PART_BLOB_LEN, 592 592 }, 593 593 /* kernel */ 594 594 { 595 595 .name = "kernel", 596 - .offset = KERNEL_START, /* MTDPART_OFS_APPEND */ 597 - .size = KERNEL_LEN, 596 + .offset = PART_KERNEL_START, /* MTDPART_OFS_APPEND */ 597 + .size = PART_KERNEL_LEN, 598 598 }, 599 599 /* initial ramdisk / file system */ 600 600 { 601 601 .name = "file system", 602 - .offset = INITRD_START, /* MTDPART_OFS_APPEND */ 603 - .size = INITRD_LEN, /* MTDPART_SIZ_FULL */ 602 + .offset = PART_INITRD_START, /* MTDPART_OFS_APPEND */ 603 + .size = PART_INITRD_LEN, /* MTDPART_SIZ_FULL */ 604 604 } 605 605 }; 606 606 #define NUM_PARTITIONS ARRAY_SIZE(lart_partitions)