Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'for-linus' of git://git.armlinux.org.uk/~rmk/linux-arm

Pull ARM updates from Russell King:

- add support for ELF fdpic binaries on both MMU and noMMU platforms

- linker script cleanups

- support for compressed .data section for XIP images

- discard memblock arrays when possible

- various cleanups

- atomic DMA pool updates

- better diagnostics of missing/corrupt device tree

- export information to allow userspace kexec tool to place images more
inteligently, so that the device tree isn't overwritten by the
booting kernel

- make early_printk more efficient on semihosted systems

- noMMU cleanups

- SA1111 PCMCIA update in preparation for further cleanups

* 'for-linus' of git://git.armlinux.org.uk/~rmk/linux-arm: (38 commits)
ARM: 8719/1: NOMMU: work around maybe-uninitialized warning
ARM: 8717/2: debug printch/printascii: translate '\n' to "\r\n" not "\n\r"
ARM: 8713/1: NOMMU: Support MPU in XIP configuration
ARM: 8712/1: NOMMU: Use more MPU regions to cover memory
ARM: 8711/1: V7M: Add support for MPU to M-class
ARM: 8710/1: Kconfig: Kill CONFIG_VECTORS_BASE
ARM: 8709/1: NOMMU: Disallow MPU for XIP
ARM: 8708/1: NOMMU: Rework MPU to be mostly done in C
ARM: 8707/1: NOMMU: Update MPU accessors to use cp15 helpers
ARM: 8706/1: NOMMU: Move out MPU setup in separate module
ARM: 8702/1: head-common.S: Clear lr before jumping to start_kernel()
ARM: 8705/1: early_printk: use printascii() rather than printch()
ARM: 8703/1: debug.S: move hexbuf to a writable section
ARM: add additional table to compressed kernel
ARM: decompressor: fix BSS size calculation
pcmcia: sa1111: remove special sa1111 mmio accessors
pcmcia: sa1111: use sa1111_get_irq() to obtain IRQ resources
ARM: better diagnostics with missing/corrupt dtb
ARM: 8699/1: dma-mapping: Remove init_dma_coherent_pool_size()
ARM: 8698/1: dma-mapping: Mark atomic_pool as __ro_after_init
..

+1226 -540
+12 -9
arch/arm/Kconfig
··· 3 3 bool 4 4 default y 5 5 select ARCH_CLOCKSOURCE_DATA 6 + select ARCH_DISCARD_MEMBLOCK if !HAVE_ARCH_PFN_VALID 6 7 select ARCH_HAS_DEBUG_VIRTUAL 7 8 select ARCH_HAS_DEVMEM_IS_ALLOWED 8 9 select ARCH_HAS_ELF_RANDOMIZE ··· 240 239 241 240 config ARCH_MTD_XIP 242 241 bool 243 - 244 - config VECTORS_BASE 245 - hex 246 - default 0xffff0000 if MMU || CPU_HIGH_VECTOR 247 - default DRAM_BASE if REMAP_VECTORS_TO_RAM 248 - default 0x00000000 249 - help 250 - The base address of exception vectors. This must be two pages 251 - in size. 252 242 253 243 config ARM_PATCH_PHYS_VIRT 254 244 bool "Patch physical to virtual translations at runtime" if EMBEDDED ··· 1997 2005 This is the physical address in your flash memory the kernel will 1998 2006 be linked for and stored to. This address is dependent on your 1999 2007 own flash usage. 2008 + 2009 + config XIP_DEFLATED_DATA 2010 + bool "Store kernel .data section compressed in ROM" 2011 + depends on XIP_KERNEL 2012 + select ZLIB_INFLATE 2013 + help 2014 + Before the kernel is actually executed, its .data section has to be 2015 + copied to RAM from ROM. This option allows for storing that data 2016 + in compressed form and decompressed to RAM rather than merely being 2017 + copied, saving some precious ROM space. A possible drawback is a 2018 + slightly longer boot delay. 2000 2019 2001 2020 config KEXEC 2002 2021 bool "Kexec system call (EXPERIMENTAL)"
+2 -2
arch/arm/Kconfig-nommu
··· 53 53 54 54 config ARM_MPU 55 55 bool 'Use the ARM v7 PMSA Compliant MPU' 56 - depends on CPU_V7 57 - default y 56 + depends on CPU_V7 || CPU_V7M 57 + default y if CPU_V7 58 58 help 59 59 Some ARM systems without an MMU have instead a Memory Protection 60 60 Unit (MPU) that defines the type and permissions for regions of
+12 -1
arch/arm/boot/Makefile
··· 31 31 32 32 ifeq ($(CONFIG_XIP_KERNEL),y) 33 33 34 + cmd_deflate_xip_data = $(CONFIG_SHELL) -c \ 35 + '$(srctree)/$(src)/deflate_xip_data.sh $< $@ || { rm -f $@; false; }' 36 + 37 + ifeq ($(CONFIG_XIP_DEFLATED_DATA),y) 38 + quiet_cmd_mkxip = XIPZ $@ 39 + cmd_mkxip = $(cmd_objcopy) && $(cmd_deflate_xip_data) 40 + else 41 + quiet_cmd_mkxip = $(quiet_cmd_objcopy) 42 + cmd_mkxip = $(cmd_objcopy) 43 + endif 44 + 34 45 $(obj)/xipImage: vmlinux FORCE 35 - $(call if_changed,objcopy) 46 + $(call if_changed,mkxip) 36 47 @$(kecho) ' Physical Address of xipImage: $(CONFIG_XIP_PHYS_ADDR)' 37 48 38 49 $(obj)/Image $(obj)/zImage: FORCE
+5 -2
arch/arm/boot/compressed/Makefile
··· 117 117 asflags-y := -DZIMAGE 118 118 119 119 # Supply kernel BSS size to the decompressor via a linker symbol. 120 - KBSS_SZ = $(shell $(CROSS_COMPILE)size $(obj)/../../../../vmlinux | \ 121 - awk 'END{print $$3}') 120 + KBSS_SZ = $(shell $(CROSS_COMPILE)nm $(obj)/../../../../vmlinux | \ 121 + perl -e 'while (<>) { \ 122 + $$bss_start=hex($$1) if /^([[:xdigit:]]+) B __bss_start$$/; \ 123 + $$bss_end=hex($$1) if /^([[:xdigit:]]+) B __bss_stop$$/; \ 124 + }; printf "%d\n", $$bss_end - $$bss_start;') 122 125 LDFLAGS_vmlinux = --defsym _kernel_bss_size=$(KBSS_SZ) 123 126 # Supply ZRELADDR to the decompressor via a linker symbol. 124 127 ifneq ($(CONFIG_AUTO_ZRELADDR),y)
+2
arch/arm/boot/compressed/head.S
··· 143 143 .word _magic_start @ absolute load/run zImage address 144 144 .word _magic_end @ zImage end address 145 145 .word 0x04030201 @ endianness flag 146 + .word 0x45454545 @ another magic number to indicate 147 + .word _magic_table @ additional data table 146 148 147 149 __EFI_HEADER 148 150 1:
+11
arch/arm/boot/compressed/vmlinux.lds.S
··· 44 44 *(.glue_7t) 45 45 *(.glue_7) 46 46 } 47 + .table : ALIGN(4) { 48 + _table_start = .; 49 + LONG(ZIMAGE_MAGIC(2)) 50 + LONG(ZIMAGE_MAGIC(0x5a534c4b)) 51 + LONG(ZIMAGE_MAGIC(__piggy_size_addr - _start)) 52 + LONG(ZIMAGE_MAGIC(_kernel_bss_size)) 53 + LONG(0) 54 + _table_end = .; 55 + } 47 56 .rodata : { 48 57 *(.rodata) 49 58 *(.rodata.*) 50 59 } 51 60 .piggydata : { 52 61 *(.piggydata) 62 + __piggy_size_addr = . - 4; 53 63 } 54 64 55 65 . = ALIGN(4); ··· 107 97 _magic_sig = ZIMAGE_MAGIC(0x016f2818); 108 98 _magic_start = ZIMAGE_MAGIC(_start); 109 99 _magic_end = ZIMAGE_MAGIC(_edata); 100 + _magic_table = ZIMAGE_MAGIC(_table_start - _start); 110 101 111 102 . = BSS_START; 112 103 __bss_start = .;
+64
arch/arm/boot/deflate_xip_data.sh
··· 1 + #!/bin/sh 2 + 3 + # XIP kernel .data segment compressor 4 + # 5 + # Created by: Nicolas Pitre, August 2017 6 + # Copyright: (C) 2017 Linaro Limited 7 + # 8 + # This program is free software; you can redistribute it and/or modify 9 + # it under the terms of the GNU General Public License version 2 as 10 + # published by the Free Software Foundation. 11 + 12 + # This script locates the start of the .data section in xipImage and 13 + # substitutes it with a compressed version. The needed offsets are obtained 14 + # from symbol addresses in vmlinux. It is expected that .data extends to 15 + # the end of xipImage. 16 + 17 + set -e 18 + 19 + VMLINUX="$1" 20 + XIPIMAGE="$2" 21 + 22 + DD="dd status=none" 23 + 24 + # Use "make V=1" to debug this script. 25 + case "$KBUILD_VERBOSE" in 26 + *1*) 27 + set -x 28 + ;; 29 + esac 30 + 31 + sym_val() { 32 + # extract hex value for symbol in $1 33 + local val=$($NM "$VMLINUX" | sed -n "/ $1$/{s/ .*$//p;q}") 34 + [ "$val" ] || { echo "can't find $1 in $VMLINUX" 1>&2; exit 1; } 35 + # convert from hex to decimal 36 + echo $((0x$val)) 37 + } 38 + 39 + __data_loc=$(sym_val __data_loc) 40 + _edata_loc=$(sym_val _edata_loc) 41 + base_offset=$(sym_val _xiprom) 42 + 43 + # convert to file based offsets 44 + data_start=$(($__data_loc - $base_offset)) 45 + data_end=$(($_edata_loc - $base_offset)) 46 + 47 + # Make sure data occupies the last part of the file. 48 + file_end=$(stat -c "%s" "$XIPIMAGE") 49 + if [ "$file_end" != "$data_end" ]; then 50 + printf "end of xipImage doesn't match with _edata_loc (%#x vs %#x)\n" \ 51 + $(($file_end + $base_offset)) $_edata_loc 2>&1 52 + exit 1; 53 + fi 54 + 55 + # be ready to clean up 56 + trap 'rm -f "$XIPIMAGE.tmp"' 0 1 2 3 57 + 58 + # substitute the data section by a compressed version 59 + $DD if="$XIPIMAGE" count=$data_start iflag=count_bytes of="$XIPIMAGE.tmp" 60 + $DD if="$XIPIMAGE" skip=$data_start iflag=skip_bytes | 61 + gzip -9 >> "$XIPIMAGE.tmp" 62 + 63 + # replace kernel binary 64 + mv -f "$XIPIMAGE.tmp" "$XIPIMAGE"
+10
arch/arm/include/asm/cputype.h
··· 174 174 return read_cpuid(CPUID_CACHETYPE); 175 175 } 176 176 177 + static inline unsigned int __attribute_const__ read_cpuid_mputype(void) 178 + { 179 + return read_cpuid(CPUID_MPUIR); 180 + } 181 + 177 182 #elif defined(CONFIG_CPU_V7M) 178 183 179 184 static inline unsigned int __attribute_const__ read_cpuid_id(void) ··· 189 184 static inline unsigned int __attribute_const__ read_cpuid_cachetype(void) 190 185 { 191 186 return readl(BASEADDR_V7M_SCB + V7M_SCB_CTR); 187 + } 188 + 189 + static inline unsigned int __attribute_const__ read_cpuid_mputype(void) 190 + { 191 + return readl(BASEADDR_V7M_SCB + MPU_TYPE); 192 192 } 193 193 194 194 #else /* ifdef CONFIG_CPU_CP15 / elif defined(CONFIG_CPU_V7M) */
-7
arch/arm/include/asm/dma-mapping.h
··· 191 191 unsigned long attrs); 192 192 193 193 /* 194 - * This can be called during early boot to increase the size of the atomic 195 - * coherent DMA pool above the default value of 256KiB. It must be called 196 - * before postcore_initcall. 197 - */ 198 - extern void __init init_dma_coherent_pool_size(unsigned long size); 199 - 200 - /* 201 194 * For SA-1111, IXP425, and ADI systems the dma-mapping functions are "magic" 202 195 * and utilize bounce buffers as needed to work around limited DMA windows. 203 196 *
+14 -2
arch/arm/include/asm/elf.h
··· 101 101 extern int elf_check_arch(const struct elf32_hdr *); 102 102 #define elf_check_arch elf_check_arch 103 103 104 + #define ELFOSABI_ARM_FDPIC 65 /* ARM FDPIC platform */ 105 + #define elf_check_fdpic(x) ((x)->e_ident[EI_OSABI] == ELFOSABI_ARM_FDPIC) 106 + #define elf_check_const_displacement(x) ((x)->e_flags & EF_ARM_PIC) 107 + #define ELF_FDPIC_CORE_EFLAGS 0 108 + 104 109 #define vmcore_elf64_check_arch(x) (0) 105 110 106 - extern int arm_elf_read_implies_exec(const struct elf32_hdr *, int); 107 - #define elf_read_implies_exec(ex,stk) arm_elf_read_implies_exec(&(ex), stk) 111 + extern int arm_elf_read_implies_exec(int); 112 + #define elf_read_implies_exec(ex,stk) arm_elf_read_implies_exec(stk) 108 113 109 114 struct task_struct; 110 115 int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs); ··· 125 120 registered with atexit, as per the SVR4 ABI. A value of 0 means we 126 121 have no such handler. */ 127 122 #define ELF_PLAT_INIT(_r, load_addr) (_r)->ARM_r0 = 0 123 + 124 + #define ELF_FDPIC_PLAT_INIT(_r, _exec_map_addr, _interp_map_addr, dynamic_addr) \ 125 + do { \ 126 + (_r)->ARM_r7 = _exec_map_addr; \ 127 + (_r)->ARM_r8 = _interp_map_addr; \ 128 + (_r)->ARM_r9 = dynamic_addr; \ 129 + } while(0) 128 130 129 131 extern void elf_set_personality(const struct elf32_hdr *); 130 132 #define SET_PERSONALITY(ex) elf_set_personality(&(ex))
-1
arch/arm/include/asm/highmem.h
··· 19 19 } while (0) 20 20 21 21 extern pte_t *pkmap_page_table; 22 - extern pte_t *fixmap_page_table; 23 22 24 23 extern void *kmap_high(struct page *page); 25 24 extern void kunmap_high(struct page *page);
+8
arch/arm/include/asm/mmu.h
··· 15 15 #ifdef CONFIG_VDSO 16 16 unsigned long vdso; 17 17 #endif 18 + #ifdef CONFIG_BINFMT_ELF_FDPIC 19 + unsigned long exec_fdpic_loadmap; 20 + unsigned long interp_fdpic_loadmap; 21 + #endif 18 22 } mm_context_t; 19 23 20 24 #ifdef CONFIG_CPU_HAS_ASID ··· 38 34 */ 39 35 typedef struct { 40 36 unsigned long end_brk; 37 + #ifdef CONFIG_BINFMT_ELF_FDPIC 38 + unsigned long exec_fdpic_loadmap; 39 + unsigned long interp_fdpic_loadmap; 40 + #endif 41 41 } mm_context_t; 42 42 43 43 #endif
+20 -6
arch/arm/include/asm/mpu.h
··· 2 2 #ifndef __ARM_MPU_H 3 3 #define __ARM_MPU_H 4 4 5 - #ifdef CONFIG_ARM_MPU 6 - 7 5 /* MPUIR layout */ 8 6 #define MPUIR_nU 1 9 7 #define MPUIR_DREGION 8 ··· 16 18 /* MPU D/I Size Register fields */ 17 19 #define MPU_RSR_SZ 1 18 20 #define MPU_RSR_EN 0 21 + #define MPU_RSR_SD 8 22 + 23 + /* Number of subregions (SD) */ 24 + #define MPU_NR_SUBREGS 8 25 + #define MPU_MIN_SUBREG_SIZE 256 19 26 20 27 /* The D/I RSR value for an enabled region spanning the whole of memory */ 21 28 #define MPU_RSR_ALL_MEM 63 ··· 42 39 #endif 43 40 44 41 /* Access permission bits of ACR (only define those that we use)*/ 42 + #define MPU_AP_PL1RO_PL0NA (0x5 << 8) 45 43 #define MPU_AP_PL1RW_PL0RW (0x3 << 8) 46 44 #define MPU_AP_PL1RW_PL0R0 (0x2 << 8) 47 45 #define MPU_AP_PL1RW_PL0NA (0x1 << 8) ··· 51 47 #define MPU_PROBE_REGION 0 52 48 #define MPU_BG_REGION 1 53 49 #define MPU_RAM_REGION 2 54 - #define MPU_VECTORS_REGION 3 50 + #define MPU_ROM_REGION 3 55 51 56 52 /* Maximum number of regions Linux is interested in */ 57 53 #define MPU_MAX_REGIONS 16 ··· 69 65 }; 70 66 71 67 struct mpu_rgn_info { 72 - u32 mpuir; 68 + unsigned int used; 73 69 struct mpu_rgn rgns[MPU_MAX_REGIONS]; 74 70 }; 75 71 extern struct mpu_rgn_info mpu_rgn_info; 76 72 77 - #endif /* __ASSEMBLY__ */ 73 + #ifdef CONFIG_ARM_MPU 78 74 79 - #endif /* CONFIG_ARM_MPU */ 75 + extern void __init adjust_lowmem_bounds_mpu(void); 76 + extern void __init mpu_setup(void); 77 + 78 + #else 79 + 80 + static inline void adjust_lowmem_bounds_mpu(void) {} 81 + static inline void mpu_setup(void) {} 82 + 83 + #endif /* !CONFIG_ARM_MPU */ 84 + 85 + #endif /* __ASSEMBLY__ */ 80 86 81 87 #endif
+15 -7
arch/arm/include/asm/processor.h
··· 47 47 48 48 #define INIT_THREAD { } 49 49 50 - #ifdef CONFIG_MMU 51 - #define nommu_start_thread(regs) do { } while (0) 52 - #else 53 - #define nommu_start_thread(regs) regs->ARM_r10 = current->mm->start_data 54 - #endif 55 - 56 50 #define start_thread(regs,pc,sp) \ 57 51 ({ \ 52 + unsigned long r7, r8, r9; \ 53 + \ 54 + if (IS_ENABLED(CONFIG_BINFMT_ELF_FDPIC)) { \ 55 + r7 = regs->ARM_r7; \ 56 + r8 = regs->ARM_r8; \ 57 + r9 = regs->ARM_r9; \ 58 + } \ 58 59 memset(regs->uregs, 0, sizeof(regs->uregs)); \ 60 + if (IS_ENABLED(CONFIG_BINFMT_ELF_FDPIC) && \ 61 + current->personality & FDPIC_FUNCPTRS) { \ 62 + regs->ARM_r7 = r7; \ 63 + regs->ARM_r8 = r8; \ 64 + regs->ARM_r9 = r9; \ 65 + regs->ARM_r10 = current->mm->start_data; \ 66 + } else if (!IS_ENABLED(CONFIG_MMU)) \ 67 + regs->ARM_r10 = current->mm->start_data; \ 59 68 if (current->personality & ADDR_LIMIT_32BIT) \ 60 69 regs->ARM_cpsr = USR_MODE; \ 61 70 else \ ··· 74 65 regs->ARM_cpsr |= PSR_ENDSTATE; \ 75 66 regs->ARM_pc = pc & ~1; /* pc */ \ 76 67 regs->ARM_sp = sp; /* sp */ \ 77 - nommu_start_thread(regs); \ 78 68 }) 79 69 80 70 /* Forward declaration, a strange C thing */
+1 -1
arch/arm/include/asm/smp.h
··· 60 60 */ 61 61 struct secondary_data { 62 62 union { 63 - unsigned long mpu_rgn_szr; 63 + struct mpu_rgn_info *mpu_rgn_info; 64 64 u64 pgdir; 65 65 }; 66 66 unsigned long swapper_pg_dir;
+1
arch/arm/include/asm/ucontext.h
··· 3 3 #define _ASMARM_UCONTEXT_H 4 4 5 5 #include <asm/fpstate.h> 6 + #include <asm/user.h> 6 7 7 8 /* 8 9 * struct sigcontext only has room for the basic registers, but struct
+10
arch/arm/include/asm/v7m.h
··· 58 58 #define V7M_SCB_CCSIDR 0x80 /* Cache size ID register */ 59 59 #define V7M_SCB_CSSELR 0x84 /* Cache size selection register */ 60 60 61 + /* Memory-mapped MPU registers for M-class */ 62 + #define MPU_TYPE 0x90 63 + #define MPU_CTRL 0x94 64 + #define MPU_CTRL_ENABLE 1 65 + #define MPU_CTRL_PRIVDEFENA (1 << 2) 66 + 67 + #define MPU_RNR 0x98 68 + #define MPU_RBAR 0x9c 69 + #define MPU_RASR 0xa0 70 + 61 71 /* Cache opeartions */ 62 72 #define V7M_SCB_ICIALLU 0x250 /* I-cache invalidate all to PoU */ 63 73 #define V7M_SCB_ICIMVAU 0x258 /* I-cache invalidate by MVA to PoU */
+4
arch/arm/include/uapi/asm/ptrace.h
··· 32 32 #define PTRACE_SETVFPREGS 28 33 33 #define PTRACE_GETHBPREGS 29 34 34 #define PTRACE_SETHBPREGS 30 35 + #define PTRACE_GETFDPIC 31 36 + 37 + #define PTRACE_GETFDPIC_EXEC 0 38 + #define PTRACE_GETFDPIC_INTERP 1 35 39 36 40 /* 37 41 * PSR bits
+1
arch/arm/include/uapi/asm/unistd.h
··· 36 36 #define __ARM_NR_usr26 (__ARM_NR_BASE+3) 37 37 #define __ARM_NR_usr32 (__ARM_NR_BASE+4) 38 38 #define __ARM_NR_set_tls (__ARM_NR_BASE+5) 39 + #define __ARM_NR_get_tls (__ARM_NR_BASE+6) 39 40 40 41 #endif /* _UAPI__ASM_ARM_UNISTD_H */
+5
arch/arm/kernel/Makefile
··· 88 88 obj-$(CONFIG_DEBUG_LL) += debug.o 89 89 obj-$(CONFIG_EARLY_PRINTK) += early_printk.o 90 90 91 + # This is executed very early using a temporary stack when no memory allocator 92 + # nor global data is available. Everything has to be allocated on the stack. 93 + CFLAGS_head-inflate-data.o := $(call cc-option,-Wframe-larger-than=10240) 94 + obj-$(CONFIG_XIP_DEFLATED_DATA) += head-inflate-data.o 95 + 91 96 obj-$(CONFIG_ARM_VIRT_EXT) += hyp-stub.o 92 97 AFLAGS_hyp-stub.o :=-Wa,-march=armv7-a 93 98 ifeq ($(CONFIG_ARM_PSCI),y)
+15
arch/arm/kernel/asm-offsets.c
··· 23 23 #include <asm/mach/arch.h> 24 24 #include <asm/thread_info.h> 25 25 #include <asm/memory.h> 26 + #include <asm/mpu.h> 26 27 #include <asm/procinfo.h> 27 28 #include <asm/suspend.h> 28 29 #include <asm/vdso_datapage.h> 29 30 #include <asm/hardware/cache-l2x0.h> 30 31 #include <linux/kbuild.h> 32 + #include "signal.h" 31 33 32 34 /* 33 35 * Make sure that the compiler and target are compatible. ··· 114 112 DEFINE(SVC_ADDR_LIMIT, offsetof(struct svc_pt_regs, addr_limit)); 115 113 DEFINE(SVC_REGS_SIZE, sizeof(struct svc_pt_regs)); 116 114 BLANK(); 115 + DEFINE(SIGFRAME_RC3_OFFSET, offsetof(struct sigframe, retcode[3])); 116 + DEFINE(RT_SIGFRAME_RC3_OFFSET, offsetof(struct rt_sigframe, sig.retcode[3])); 117 + BLANK(); 117 118 #ifdef CONFIG_CACHE_L2X0 118 119 DEFINE(L2X0_R_PHY_BASE, offsetof(struct l2x0_regs, phy_base)); 119 120 DEFINE(L2X0_R_AUX_CTRL, offsetof(struct l2x0_regs, aux_ctrl)); ··· 187 182 BLANK(); 188 183 #ifdef CONFIG_VDSO 189 184 DEFINE(VDSO_DATA_SIZE, sizeof(union vdso_data_store)); 185 + #endif 186 + BLANK(); 187 + #ifdef CONFIG_ARM_MPU 188 + DEFINE(MPU_RNG_INFO_RNGS, offsetof(struct mpu_rgn_info, rgns)); 189 + DEFINE(MPU_RNG_INFO_USED, offsetof(struct mpu_rgn_info, used)); 190 + 191 + DEFINE(MPU_RNG_SIZE, sizeof(struct mpu_rgn)); 192 + DEFINE(MPU_RGN_DRBAR, offsetof(struct mpu_rgn, drbar)); 193 + DEFINE(MPU_RGN_DRSR, offsetof(struct mpu_rgn, drsr)); 194 + DEFINE(MPU_RGN_DRACR, offsetof(struct mpu_rgn, dracr)); 190 195 #endif 191 196 return 0; 192 197 }
+2 -5
arch/arm/kernel/atags_parse.c
··· 196 196 break; 197 197 } 198 198 199 - if (!mdesc) { 200 - early_print("\nError: unrecognized/unsupported machine ID" 201 - " (r1 = 0x%08x).\n\n", machine_nr); 202 - dump_machine_table(); /* does not return */ 203 - } 199 + if (!mdesc) 200 + return NULL; 204 201 205 202 if (__atags_pointer) 206 203 tags = phys_to_virt(__atags_pointer);
+25 -14
arch/arm/kernel/debug.S
··· 55 55 56 56 ENTRY(printhex2) 57 57 mov r1, #2 58 - printhex: adr r2, hexbuf 58 + printhex: adr r2, hexbuf_rel 59 + ldr r3, [r2] 60 + add r2, r2, r3 59 61 add r3, r2, r1 60 62 mov r1, #0 61 63 strb r1, [r3] ··· 73 71 b printascii 74 72 ENDPROC(printhex2) 75 73 76 - hexbuf: .space 16 74 + .pushsection .bss 75 + hexbuf_addr: .space 16 76 + .popsection 77 + .align 78 + hexbuf_rel: .long hexbuf_addr - . 77 79 78 80 .ltorg 79 81 ··· 85 79 86 80 ENTRY(printascii) 87 81 addruart_current r3, r1, r2 88 - b 2f 89 - 1: waituart r2, r3 90 - senduart r1, r3 91 - busyuart r2, r3 92 - teq r1, #'\n' 93 - moveq r1, #'\r' 94 - beq 1b 95 - 2: teq r0, #0 82 + 1: teq r0, #0 96 83 ldrneb r1, [r0], #1 97 84 teqne r1, #0 98 - bne 1b 99 - ret lr 85 + reteq lr 86 + 2: teq r1, #'\n' 87 + bne 3f 88 + mov r1, #'\r' 89 + waituart r2, r3 90 + senduart r1, r3 91 + busyuart r2, r3 92 + mov r1, #'\n' 93 + 3: waituart r2, r3 94 + senduart r1, r3 95 + busyuart r2, r3 96 + b 1b 100 97 ENDPROC(printascii) 101 98 102 99 ENTRY(printch) 103 100 addruart_current r3, r1, r2 104 101 mov r1, r0 105 102 mov r0, #0 106 - b 1b 103 + b 2b 107 104 ENDPROC(printch) 108 105 109 106 #ifdef CONFIG_MMU ··· 133 124 ENDPROC(printascii) 134 125 135 126 ENTRY(printch) 136 - adr r1, hexbuf 127 + adr r1, hexbuf_rel 128 + ldr r2, [r1] 129 + add r1, r1, r2 137 130 strb r0, [r1] 138 131 mov r0, #0x03 @ SYS_WRITEC 139 132 ARM( svc #0x123456 )
+10 -6
arch/arm/kernel/early_printk.c
··· 11 11 #include <linux/kernel.h> 12 12 #include <linux/console.h> 13 13 #include <linux/init.h> 14 + #include <linux/string.h> 14 15 15 - extern void printch(int); 16 + extern void printascii(const char *); 16 17 17 18 static void early_write(const char *s, unsigned n) 18 19 { 19 - while (n-- > 0) { 20 - if (*s == '\n') 21 - printch('\r'); 22 - printch(*s); 23 - s++; 20 + char buf[128]; 21 + while (n) { 22 + unsigned l = min(n, sizeof(buf)-1); 23 + memcpy(buf, s, l); 24 + buf[l] = 0; 25 + s += l; 26 + n -= l; 27 + printascii(buf); 24 28 } 25 29 } 26 30
+23 -1
arch/arm/kernel/elf.c
··· 4 4 #include <linux/personality.h> 5 5 #include <linux/binfmts.h> 6 6 #include <linux/elf.h> 7 + #include <linux/elf-fdpic.h> 7 8 #include <asm/system_info.h> 8 9 9 10 int elf_check_arch(const struct elf32_hdr *x) ··· 82 81 * - the binary requires an executable stack 83 82 * - we're running on a CPU which doesn't support NX. 84 83 */ 85 - int arm_elf_read_implies_exec(const struct elf32_hdr *x, int executable_stack) 84 + int arm_elf_read_implies_exec(int executable_stack) 86 85 { 87 86 if (executable_stack != EXSTACK_DISABLE_X) 88 87 return 1; ··· 91 90 return 0; 92 91 } 93 92 EXPORT_SYMBOL(arm_elf_read_implies_exec); 93 + 94 + #if defined(CONFIG_MMU) && defined(CONFIG_BINFMT_ELF_FDPIC) 95 + 96 + void elf_fdpic_arch_lay_out_mm(struct elf_fdpic_params *exec_params, 97 + struct elf_fdpic_params *interp_params, 98 + unsigned long *start_stack, 99 + unsigned long *start_brk) 100 + { 101 + elf_set_personality(&exec_params->hdr); 102 + 103 + exec_params->load_addr = 0x8000; 104 + interp_params->load_addr = ELF_ET_DYN_BASE; 105 + *start_stack = TASK_SIZE - SZ_16M; 106 + 107 + if ((exec_params->flags & ELF_FDPIC_FLAG_ARRANGEMENT) == ELF_FDPIC_FLAG_INDEPENDENT) { 108 + exec_params->flags &= ~ELF_FDPIC_FLAG_ARRANGEMENT; 109 + exec_params->flags |= ELF_FDPIC_FLAG_CONSTDISP; 110 + } 111 + } 112 + 113 + #endif
-9
arch/arm/kernel/entry-common.S
··· 400 400 * offset, we return EINVAL. 401 401 */ 402 402 sys_mmap2: 403 - #if PAGE_SHIFT > 12 404 - tst r5, #PGOFF_MASK 405 - moveq r5, r5, lsr #PAGE_SHIFT - 12 406 - streq r5, [sp, #4] 407 - beq sys_mmap_pgoff 408 - mov r0, #-EINVAL 409 - ret lr 410 - #else 411 403 str r5, [sp, #4] 412 404 b sys_mmap_pgoff 413 - #endif 414 405 ENDPROC(sys_mmap2) 415 406 416 407 #ifdef CONFIG_OABI_COMPAT
+52 -30
arch/arm/kernel/head-common.S
··· 79 79 */ 80 80 __INIT 81 81 __mmap_switched: 82 - adr r3, __mmap_switched_data 83 82 84 - ldmia r3!, {r4, r5, r6, r7} 85 - cmp r4, r5 @ Copy data segment if needed 86 - 1: cmpne r5, r6 87 - ldrne fp, [r4], #4 88 - strne fp, [r5], #4 89 - bne 1b 83 + mov r7, r1 84 + mov r8, r2 85 + mov r10, r0 90 86 91 - mov fp, #0 @ Clear BSS (and zero fp) 92 - 1: cmp r6, r7 93 - strcc fp, [r6],#4 94 - bcc 1b 87 + adr r4, __mmap_switched_data 88 + mov fp, #0 95 89 96 - ARM( ldmia r3, {r4, r5, r6, r7, sp}) 97 - THUMB( ldmia r3, {r4, r5, r6, r7} ) 98 - THUMB( ldr sp, [r3, #16] ) 99 - str r9, [r4] @ Save processor ID 100 - str r1, [r5] @ Save machine type 101 - str r2, [r6] @ Save atags pointer 102 - cmp r7, #0 103 - strne r0, [r7] @ Save control register values 90 + #if defined(CONFIG_XIP_DEFLATED_DATA) 91 + ARM( ldr sp, [r4], #4 ) 92 + THUMB( ldr sp, [r4] ) 93 + THUMB( add r4, #4 ) 94 + bl __inflate_kernel_data @ decompress .data to RAM 95 + teq r0, #0 96 + bne __error 97 + #elif defined(CONFIG_XIP_KERNEL) 98 + ARM( ldmia r4!, {r0, r1, r2, sp} ) 99 + THUMB( ldmia r4!, {r0, r1, r2, r3} ) 100 + THUMB( mov sp, r3 ) 101 + sub r2, r2, r1 102 + bl memcpy @ copy .data to RAM 103 + #endif 104 + 105 + ARM( ldmia r4!, {r0, r1, sp} ) 106 + THUMB( ldmia r4!, {r0, r1, r3} ) 107 + THUMB( mov sp, r3 ) 108 + sub r1, r1, r0 109 + bl __memzero @ clear .bss 110 + 111 + ldmia r4, {r0, r1, r2, r3} 112 + str r9, [r0] @ Save processor ID 113 + str r7, [r1] @ Save machine type 114 + str r8, [r2] @ Save atags pointer 115 + cmp r3, #0 116 + strne r10, [r3] @ Save control register values 117 + mov lr, #0 104 118 b start_kernel 105 119 ENDPROC(__mmap_switched) 106 120 107 121 .align 2 108 122 .type __mmap_switched_data, %object 109 123 __mmap_switched_data: 110 - .long __data_loc @ r4 111 - .long _sdata @ r5 112 - .long __bss_start @ r6 113 - .long _end @ r7 114 - .long processor_id @ r4 115 - .long __machine_arch_type @ r5 116 - .long __atags_pointer @ r6 117 - #ifdef CONFIG_CPU_CP15 118 - .long cr_alignment @ r7 119 - #else 120 - .long 0 @ r7 124 + #ifdef CONFIG_XIP_KERNEL 125 + #ifndef CONFIG_XIP_DEFLATED_DATA 126 + .long _sdata @ r0 127 + .long __data_loc @ r1 128 + .long _edata_loc @ r2 121 129 #endif 130 + .long __bss_stop @ sp (temporary stack in .bss) 131 + #endif 132 + 133 + .long __bss_start @ r0 134 + .long __bss_stop @ r1 122 135 .long init_thread_union + THREAD_START_SP @ sp 136 + 137 + .long processor_id @ r0 138 + .long __machine_arch_type @ r1 139 + .long __atags_pointer @ r2 140 + #ifdef CONFIG_CPU_CP15 141 + .long cr_alignment @ r3 142 + #else 143 + .long 0 @ r3 144 + #endif 123 145 .size __mmap_switched_data, . - __mmap_switched_data 124 146 125 147 /*
+62
arch/arm/kernel/head-inflate-data.c
··· 1 + /* 2 + * XIP kernel .data segment decompressor 3 + * 4 + * Created by: Nicolas Pitre, August 2017 5 + * Copyright: (C) 2017 Linaro Limited 6 + * 7 + * This program is free software; you can redistribute it and/or modify 8 + * it under the terms of the GNU General Public License version 2 as 9 + * published by the Free Software Foundation. 10 + */ 11 + 12 + #include <linux/init.h> 13 + #include <linux/zutil.h> 14 + 15 + /* for struct inflate_state */ 16 + #include "../../../lib/zlib_inflate/inftrees.h" 17 + #include "../../../lib/zlib_inflate/inflate.h" 18 + #include "../../../lib/zlib_inflate/infutil.h" 19 + 20 + extern char __data_loc[]; 21 + extern char _edata_loc[]; 22 + extern char _sdata[]; 23 + 24 + /* 25 + * This code is called very early during the boot process to decompress 26 + * the .data segment stored compressed in ROM. Therefore none of the global 27 + * variables are valid yet, hence no kernel services such as memory 28 + * allocation is available. Everything must be allocated on the stack and 29 + * we must avoid any global data access. We use a temporary stack located 30 + * in the .bss area. The linker script makes sure the .bss is big enough 31 + * to hold our stack frame plus some room for called functions. 32 + * 33 + * We mimic the code in lib/decompress_inflate.c to use the smallest work 34 + * area possible. And because everything is statically allocated on the 35 + * stack then there is no need to clean up before returning. 36 + */ 37 + 38 + int __init __inflate_kernel_data(void) 39 + { 40 + struct z_stream_s stream, *strm = &stream; 41 + struct inflate_state state; 42 + char *in = __data_loc; 43 + int rc; 44 + 45 + /* Check and skip gzip header (assume no filename) */ 46 + if (in[0] != 0x1f || in[1] != 0x8b || in[2] != 0x08 || in[3] & ~3) 47 + return -1; 48 + in += 10; 49 + 50 + strm->workspace = &state; 51 + strm->next_in = in; 52 + strm->avail_in = _edata_loc - __data_loc; /* upper bound */ 53 + strm->next_out = _sdata; 54 + strm->avail_out = _edata_loc - __data_loc; 55 + zlib_inflateInit2(strm, -MAX_WBITS); 56 + WS(strm)->inflate_state.wsize = 0; 57 + WS(strm)->inflate_state.window = NULL; 58 + rc = zlib_inflate(strm, Z_FINISH); 59 + if (rc == Z_OK || rc == Z_STREAM_END) 60 + rc = strm->avail_out; /* should be 0 */ 61 + return rc; 62 + }
+117 -29
arch/arm/kernel/head-nommu.S
··· 13 13 */ 14 14 #include <linux/linkage.h> 15 15 #include <linux/init.h> 16 + #include <linux/errno.h> 16 17 17 18 #include <asm/assembler.h> 18 19 #include <asm/ptrace.h> ··· 111 110 112 111 #ifdef CONFIG_ARM_MPU 113 112 /* Use MPU region info supplied by __cpu_up */ 114 - ldr r6, [r7] @ get secondary_data.mpu_szr 115 - bl __setup_mpu @ Initialize the MPU 113 + ldr r6, [r7] @ get secondary_data.mpu_rgn_info 114 + bl __secondary_setup_mpu @ Initialize the MPU 116 115 #endif 117 116 118 117 badr lr, 1f @ return (PIC) address ··· 176 175 #ifdef CONFIG_ARM_MPU 177 176 178 177 178 + #ifndef CONFIG_CPU_V7M 179 179 /* Set which MPU region should be programmed */ 180 - .macro set_region_nr tmp, rgnr 180 + .macro set_region_nr tmp, rgnr, unused 181 181 mov \tmp, \rgnr @ Use static region numbers 182 182 mcr p15, 0, \tmp, c6, c2, 0 @ Write RGNR 183 183 .endm 184 184 185 185 /* Setup a single MPU region, either D or I side (D-side for unified) */ 186 - .macro setup_region bar, acr, sr, side = MPU_DATA_SIDE 186 + .macro setup_region bar, acr, sr, side = MPU_DATA_SIDE, unused 187 187 mcr p15, 0, \bar, c6, c1, (0 + \side) @ I/DRBAR 188 188 mcr p15, 0, \acr, c6, c1, (4 + \side) @ I/DRACR 189 189 mcr p15, 0, \sr, c6, c1, (2 + \side) @ I/DRSR 190 190 .endm 191 + #else 192 + .macro set_region_nr tmp, rgnr, base 193 + mov \tmp, \rgnr 194 + str \tmp, [\base, #MPU_RNR] 195 + .endm 191 196 197 + .macro setup_region bar, acr, sr, unused, base 198 + lsl \acr, \acr, #16 199 + orr \acr, \acr, \sr 200 + str \bar, [\base, #MPU_RBAR] 201 + str \acr, [\base, #MPU_RASR] 202 + .endm 203 + 204 + #endif 192 205 /* 193 206 * Setup the MPU and initial MPU Regions. We create the following regions: 194 207 * Region 0: Use this for probing the MPU details, so leave disabled. ··· 216 201 ENTRY(__setup_mpu) 217 202 218 203 /* Probe for v7 PMSA compliance */ 219 - mrc p15, 0, r0, c0, c1, 4 @ Read ID_MMFR0 204 + M_CLASS(movw r12, #:lower16:BASEADDR_V7M_SCB) 205 + M_CLASS(movt r12, #:upper16:BASEADDR_V7M_SCB) 206 + 207 + AR_CLASS(mrc p15, 0, r0, c0, c1, 4) @ Read ID_MMFR0 208 + M_CLASS(ldr r0, [r12, 0x50]) 220 209 and r0, r0, #(MMFR0_PMSA) @ PMSA field 221 210 teq r0, #(MMFR0_PMSAv7) @ PMSA v7 222 - bne __error_p @ Fail: ARM_MPU on NOT v7 PMSA 211 + bxne lr 223 212 224 213 /* Determine whether the D/I-side memory map is unified. We set the 225 214 * flags here and continue to use them for the rest of this function */ 226 - mrc p15, 0, r0, c0, c0, 4 @ MPUIR 215 + AR_CLASS(mrc p15, 0, r0, c0, c0, 4) @ MPUIR 216 + M_CLASS(ldr r0, [r12, #MPU_TYPE]) 227 217 ands r5, r0, #MPUIR_DREGION_SZMASK @ 0 size d region => No MPU 228 - beq __error_p @ Fail: ARM_MPU and no MPU 218 + bxeq lr 229 219 tst r0, #MPUIR_nU @ MPUIR_nU = 0 for unified 230 220 231 221 /* Setup second region first to free up r6 */ 232 - set_region_nr r0, #MPU_RAM_REGION 222 + set_region_nr r0, #MPU_RAM_REGION, r12 233 223 isb 234 224 /* Full access from PL0, PL1, shared for CONFIG_SMP, cacheable */ 235 225 ldr r0, =PLAT_PHYS_OFFSET @ RAM starts at PHYS_OFFSET 236 226 ldr r5,=(MPU_AP_PL1RW_PL0RW | MPU_RGN_NORMAL) 237 227 238 - setup_region r0, r5, r6, MPU_DATA_SIDE @ PHYS_OFFSET, shared, enabled 239 - beq 1f @ Memory-map not unified 240 - setup_region r0, r5, r6, MPU_INSTR_SIDE @ PHYS_OFFSET, shared, enabled 228 + setup_region r0, r5, r6, MPU_DATA_SIDE, r12 @ PHYS_OFFSET, shared, enabled 229 + beq 1f @ Memory-map not unified 230 + setup_region r0, r5, r6, MPU_INSTR_SIDE, r12 @ PHYS_OFFSET, shared, enabled 241 231 1: isb 242 232 243 233 /* First/background region */ 244 - set_region_nr r0, #MPU_BG_REGION 234 + set_region_nr r0, #MPU_BG_REGION, r12 245 235 isb 246 236 /* Execute Never, strongly ordered, inaccessible to PL0, rw PL1 */ 247 237 mov r0, #0 @ BG region starts at 0x0 248 238 ldr r5,=(MPU_ACR_XN | MPU_RGN_STRONGLY_ORDERED | MPU_AP_PL1RW_PL0NA) 249 239 mov r6, #MPU_RSR_ALL_MEM @ 4GB region, enabled 250 240 251 - setup_region r0, r5, r6, MPU_DATA_SIDE @ 0x0, BG region, enabled 252 - beq 2f @ Memory-map not unified 253 - setup_region r0, r5, r6, MPU_INSTR_SIDE @ 0x0, BG region, enabled 241 + setup_region r0, r5, r6, MPU_DATA_SIDE, r12 @ 0x0, BG region, enabled 242 + beq 2f @ Memory-map not unified 243 + setup_region r0, r5, r6, MPU_INSTR_SIDE r12 @ 0x0, BG region, enabled 254 244 2: isb 255 245 256 - /* Vectors region */ 257 - set_region_nr r0, #MPU_VECTORS_REGION 246 + #ifdef CONFIG_XIP_KERNEL 247 + set_region_nr r0, #MPU_ROM_REGION, r12 258 248 isb 259 - /* Shared, inaccessible to PL0, rw PL1 */ 260 - mov r0, #CONFIG_VECTORS_BASE @ Cover from VECTORS_BASE 261 - ldr r5,=(MPU_AP_PL1RW_PL0NA | MPU_RGN_NORMAL) 262 - /* Writing N to bits 5:1 (RSR_SZ) --> region size 2^N+1 */ 263 - mov r6, #(((2 * PAGE_SHIFT - 1) << MPU_RSR_SZ) | 1 << MPU_RSR_EN) 264 249 265 - setup_region r0, r5, r6, MPU_DATA_SIDE @ VECTORS_BASE, PL0 NA, enabled 266 - beq 3f @ Memory-map not unified 267 - setup_region r0, r5, r6, MPU_INSTR_SIDE @ VECTORS_BASE, PL0 NA, enabled 250 + ldr r5,=(MPU_AP_PL1RO_PL0NA | MPU_RGN_NORMAL) 251 + 252 + ldr r0, =CONFIG_XIP_PHYS_ADDR @ ROM start 253 + ldr r6, =(_exiprom) @ ROM end 254 + sub r6, r6, r0 @ Minimum size of region to map 255 + clz r6, r6 @ Region size must be 2^N... 256 + rsb r6, r6, #31 @ ...so round up region size 257 + lsl r6, r6, #MPU_RSR_SZ @ Put size in right field 258 + orr r6, r6, #(1 << MPU_RSR_EN) @ Set region enabled bit 259 + 260 + setup_region r0, r5, r6, MPU_DATA_SIDE, r12 @ XIP_PHYS_ADDR, shared, enabled 261 + beq 3f @ Memory-map not unified 262 + setup_region r0, r5, r6, MPU_INSTR_SIDE, r12 @ XIP_PHYS_ADDR, shared, enabled 268 263 3: isb 264 + #endif 265 + 266 + /* Enable the MPU */ 267 + AR_CLASS(mrc p15, 0, r0, c1, c0, 0) @ Read SCTLR 268 + AR_CLASS(bic r0, r0, #CR_BR) @ Disable the 'default mem-map' 269 + AR_CLASS(orr r0, r0, #CR_M) @ Set SCTRL.M (MPU on) 270 + AR_CLASS(mcr p15, 0, r0, c1, c0, 0) @ Enable MPU 271 + 272 + M_CLASS(ldr r0, [r12, #MPU_CTRL]) 273 + M_CLASS(bic r0, #MPU_CTRL_PRIVDEFENA) 274 + M_CLASS(orr r0, #MPU_CTRL_ENABLE) 275 + M_CLASS(str r0, [r12, #MPU_CTRL]) 276 + isb 277 + 278 + ret lr 279 + ENDPROC(__setup_mpu) 280 + 281 + #ifdef CONFIG_SMP 282 + /* 283 + * r6: pointer at mpu_rgn_info 284 + */ 285 + 286 + ENTRY(__secondary_setup_mpu) 287 + /* Probe for v7 PMSA compliance */ 288 + mrc p15, 0, r0, c0, c1, 4 @ Read ID_MMFR0 289 + and r0, r0, #(MMFR0_PMSA) @ PMSA field 290 + teq r0, #(MMFR0_PMSAv7) @ PMSA v7 291 + bne __error_p 292 + 293 + /* Determine whether the D/I-side memory map is unified. We set the 294 + * flags here and continue to use them for the rest of this function */ 295 + mrc p15, 0, r0, c0, c0, 4 @ MPUIR 296 + ands r5, r0, #MPUIR_DREGION_SZMASK @ 0 size d region => No MPU 297 + beq __error_p 298 + 299 + ldr r4, [r6, #MPU_RNG_INFO_USED] 300 + mov r5, #MPU_RNG_SIZE 301 + add r3, r6, #MPU_RNG_INFO_RNGS 302 + mla r3, r4, r5, r3 303 + 304 + 1: 305 + tst r0, #MPUIR_nU @ MPUIR_nU = 0 for unified 306 + sub r3, r3, #MPU_RNG_SIZE 307 + sub r4, r4, #1 308 + 309 + set_region_nr r0, r4 310 + isb 311 + 312 + ldr r0, [r3, #MPU_RGN_DRBAR] 313 + ldr r6, [r3, #MPU_RGN_DRSR] 314 + ldr r5, [r3, #MPU_RGN_DRACR] 315 + 316 + setup_region r0, r5, r6, MPU_DATA_SIDE 317 + beq 2f 318 + setup_region r0, r5, r6, MPU_INSTR_SIDE 319 + 2: isb 320 + 321 + mrc p15, 0, r0, c0, c0, 4 @ Reevaluate the MPUIR 322 + cmp r4, #0 323 + bgt 1b 269 324 270 325 /* Enable the MPU */ 271 326 mrc p15, 0, r0, c1, c0, 0 @ Read SCTLR 272 - bic r0, r0, #CR_BR @ Disable the 'default mem-map' 327 + bic r0, r0, #CR_BR @ Disable the 'default mem-map' 273 328 orr r0, r0, #CR_M @ Set SCTRL.M (MPU on) 274 329 mcr p15, 0, r0, c1, c0, 0 @ Enable MPU 275 330 isb 331 + 276 332 ret lr 277 - ENDPROC(__setup_mpu) 278 - #endif 333 + ENDPROC(__secondary_setup_mpu) 334 + 335 + #endif /* CONFIG_SMP */ 336 + #endif /* CONFIG_ARM_MPU */ 279 337 #include "head-common.S"
+10
arch/arm/kernel/setup.c
··· 1069 1069 mdesc = setup_machine_fdt(__atags_pointer); 1070 1070 if (!mdesc) 1071 1071 mdesc = setup_machine_tags(__atags_pointer, __machine_arch_type); 1072 + if (!mdesc) { 1073 + early_print("\nError: invalid dtb and unrecognized/unsupported machine ID\n"); 1074 + early_print(" r1=0x%08x, r2=0x%08x\n", __machine_arch_type, 1075 + __atags_pointer); 1076 + if (__atags_pointer) 1077 + early_print(" r2[]=%*ph\n", 16, 1078 + phys_to_virt(__atags_pointer)); 1079 + dump_machine_table(); 1080 + } 1081 + 1072 1082 machine_desc = mdesc; 1073 1083 machine_name = mdesc->name; 1074 1084 dump_stack_set_arch_desc("%s", mdesc->name);
+38 -15
arch/arm/kernel/signal.c
··· 19 19 #include <asm/elf.h> 20 20 #include <asm/cacheflush.h> 21 21 #include <asm/traps.h> 22 - #include <asm/ucontext.h> 23 22 #include <asm/unistd.h> 24 23 #include <asm/vfp.h> 25 24 26 - extern const unsigned long sigreturn_codes[7]; 25 + #include "signal.h" 26 + 27 + extern const unsigned long sigreturn_codes[17]; 27 28 28 29 static unsigned long signal_return_offset; 29 30 ··· 173 172 /* 174 173 * Do a signal return; undo the signal stack. These are aligned to 64-bit. 175 174 */ 176 - struct sigframe { 177 - struct ucontext uc; 178 - unsigned long retcode[2]; 179 - }; 180 - 181 - struct rt_sigframe { 182 - struct siginfo info; 183 - struct sigframe sig; 184 - }; 185 175 186 176 static int restore_sigframe(struct pt_regs *regs, struct sigframe __user *sf) 187 177 { ··· 358 366 unsigned long __user *rc, void __user *frame) 359 367 { 360 368 unsigned long handler = (unsigned long)ksig->ka.sa.sa_handler; 369 + unsigned long handler_fdpic_GOT = 0; 361 370 unsigned long retcode; 362 - int thumb = 0; 371 + unsigned int idx, thumb = 0; 363 372 unsigned long cpsr = regs->ARM_cpsr & ~(PSR_f | PSR_E_BIT); 373 + bool fdpic = IS_ENABLED(CONFIG_BINFMT_ELF_FDPIC) && 374 + (current->personality & FDPIC_FUNCPTRS); 375 + 376 + if (fdpic) { 377 + unsigned long __user *fdpic_func_desc = 378 + (unsigned long __user *)handler; 379 + if (__get_user(handler, &fdpic_func_desc[0]) || 380 + __get_user(handler_fdpic_GOT, &fdpic_func_desc[1])) 381 + return 1; 382 + } 364 383 365 384 cpsr |= PSR_ENDSTATE; 366 385 ··· 411 408 412 409 if (ksig->ka.sa.sa_flags & SA_RESTORER) { 413 410 retcode = (unsigned long)ksig->ka.sa.sa_restorer; 411 + if (fdpic) { 412 + /* 413 + * We need code to load the function descriptor. 414 + * That code follows the standard sigreturn code 415 + * (6 words), and is made of 3 + 2 words for each 416 + * variant. The 4th copied word is the actual FD 417 + * address that the assembly code expects. 418 + */ 419 + idx = 6 + thumb * 3; 420 + if (ksig->ka.sa.sa_flags & SA_SIGINFO) 421 + idx += 5; 422 + if (__put_user(sigreturn_codes[idx], rc ) || 423 + __put_user(sigreturn_codes[idx+1], rc+1) || 424 + __put_user(sigreturn_codes[idx+2], rc+2) || 425 + __put_user(retcode, rc+3)) 426 + return 1; 427 + goto rc_finish; 428 + } 414 429 } else { 415 - unsigned int idx = thumb << 1; 416 - 430 + idx = thumb << 1; 417 431 if (ksig->ka.sa.sa_flags & SA_SIGINFO) 418 432 idx += 3; 419 433 ··· 442 422 __put_user(sigreturn_codes[idx+1], rc+1)) 443 423 return 1; 444 424 425 + rc_finish: 445 426 #ifdef CONFIG_MMU 446 427 if (cpsr & MODE32_BIT) { 447 428 struct mm_struct *mm = current->mm; ··· 462 441 * the return code written onto the stack. 463 442 */ 464 443 flush_icache_range((unsigned long)rc, 465 - (unsigned long)(rc + 2)); 444 + (unsigned long)(rc + 3)); 466 445 467 446 retcode = ((unsigned long)rc) + thumb; 468 447 } ··· 472 451 regs->ARM_sp = (unsigned long)frame; 473 452 regs->ARM_lr = retcode; 474 453 regs->ARM_pc = handler; 454 + if (fdpic) 455 + regs->ARM_r9 = handler_fdpic_GOT; 475 456 regs->ARM_cpsr = cpsr; 476 457 477 458 return 0;
+11
arch/arm/kernel/signal.h
··· 1 + #include <asm/ucontext.h> 2 + 3 + struct sigframe { 4 + struct ucontext uc; 5 + unsigned long retcode[4]; 6 + }; 7 + 8 + struct rt_sigframe { 9 + struct siginfo info; 10 + struct sigframe sig; 11 + };
+51 -5
arch/arm/kernel/sigreturn_codes.S
··· 14 14 * GNU General Public License for more details. 15 15 */ 16 16 17 + #include <asm/assembler.h> 18 + #include <asm/asm-offsets.h> 17 19 #include <asm/unistd.h> 18 20 19 21 /* ··· 52 50 .org sigreturn_codes + 12 * (\n) + 8 53 51 .thumb 54 52 .endm 53 + 54 + .macro arm_fdpic_slot n 55 + .org sigreturn_codes + 24 + 20 * (\n) 56 + ARM_OK( .arm ) 57 + .endm 58 + 59 + .macro thumb_fdpic_slot n 60 + .org sigreturn_codes + 24 + 20 * (\n) + 12 61 + .thumb 62 + .endm 63 + 55 64 56 65 #if __LINUX_ARM_ARCH__ <= 4 57 66 /* ··· 103 90 movs r7, #(__NR_rt_sigreturn - __NR_SYSCALL_BASE) 104 91 swi #0 105 92 93 + /* ARM sigreturn restorer FDPIC bounce code snippet */ 94 + arm_fdpic_slot 0 95 + ARM_OK( ldr r3, [sp, #SIGFRAME_RC3_OFFSET] ) 96 + ARM_OK( ldmia r3, {r3, r9} ) 97 + #ifdef CONFIG_ARM_THUMB 98 + ARM_OK( bx r3 ) 99 + #else 100 + ARM_OK( ret r3 ) 101 + #endif 102 + 103 + /* Thumb sigreturn restorer FDPIC bounce code snippet */ 104 + thumb_fdpic_slot 0 105 + ldr r3, [sp, #SIGFRAME_RC3_OFFSET] 106 + ldmia r3, {r2, r3} 107 + mov r9, r3 108 + bx r2 109 + 110 + /* ARM sigreturn_rt restorer FDPIC bounce code snippet */ 111 + arm_fdpic_slot 1 112 + ARM_OK( ldr r3, [sp, #RT_SIGFRAME_RC3_OFFSET] ) 113 + ARM_OK( ldmia r3, {r3, r9} ) 114 + #ifdef CONFIG_ARM_THUMB 115 + ARM_OK( bx r3 ) 116 + #else 117 + ARM_OK( ret r3 ) 118 + #endif 119 + 120 + /* Thumb sigreturn_rt restorer FDPIC bounce code snippet */ 121 + thumb_fdpic_slot 1 122 + ldr r3, [sp, #RT_SIGFRAME_RC3_OFFSET] 123 + ldmia r3, {r2, r3} 124 + mov r9, r3 125 + bx r2 126 + 106 127 /* 107 - * Note on addtional space: setup_return in signal.c 108 - * algorithm uses two words copy regardless whether 109 - * it is thumb case or not, so we need additional 110 - * word after real last entry. 128 + * Note on additional space: setup_return in signal.c 129 + * always copies the same number of words regardless whether 130 + * it is thumb case or not, so we need one additional padding 131 + * word after the last entry. 111 132 */ 112 - arm_slot 2 113 133 .space 4 114 134 115 135 .size sigreturn_codes, . - sigreturn_codes
+1 -1
arch/arm/kernel/smp.c
··· 114 114 */ 115 115 secondary_data.stack = task_stack_page(idle) + THREAD_START_SP; 116 116 #ifdef CONFIG_ARM_MPU 117 - secondary_data.mpu_rgn_szr = mpu_rgn_info.rgns[MPU_RAM_REGION].drsr; 117 + secondary_data.mpu_rgn_info = &mpu_rgn_info; 118 118 #endif 119 119 120 120 #ifdef CONFIG_MMU
+3
arch/arm/kernel/traps.c
··· 655 655 set_tls(regs->ARM_r0); 656 656 return 0; 657 657 658 + case NR(get_tls): 659 + return current_thread_info()->tp_value[0]; 660 + 658 661 default: 659 662 /* Calls 9f00xx..9f07ff are defined to return -ENOSYS 660 663 if not implemented, rather than raising SIGILL. This
+68 -53
arch/arm/kernel/vmlinux-xip.lds.S
··· 7 7 /* No __ro_after_init data in the .rodata section - which will always be ro */ 8 8 #define RO_AFTER_INIT_DATA 9 9 10 + #include <linux/sizes.h> 11 + 10 12 #include <asm-generic/vmlinux.lds.h> 11 13 #include <asm/cache.h> 12 14 #include <asm/thread_info.h> ··· 80 78 *(.text.fixup) 81 79 *(__ex_table) 82 80 #endif 83 - #ifndef CONFIG_SMP_ON_UP 84 81 *(.alt.smp.init) 85 - #endif 86 82 *(.discard) 87 83 *(.discard.*) 88 84 } ··· 182 182 *(.taglist.init) 183 183 __tagtable_end = .; 184 184 } 185 - #ifdef CONFIG_SMP_ON_UP 186 - .init.smpalt : { 187 - __smpalt_begin = .; 188 - *(.alt.smp.init) 189 - __smpalt_end = .; 190 - } 191 - #endif 192 - .init.pv_table : { 193 - __pv_table_begin = .; 194 - *(.pv_table) 195 - __pv_table_end = .; 196 - } 197 - .init.data : { 185 + .init.rodata : { 198 186 INIT_SETUP(16) 199 187 INIT_CALLS 200 188 CON_INITCALL ··· 190 202 INIT_RAM_FS 191 203 } 192 204 205 + #ifdef CONFIG_ARM_MPU 206 + . = ALIGN(SZ_128K); 207 + #endif 208 + _exiprom = .; /* End of XIP ROM area */ 209 + 210 + /* 211 + * From this point, stuff is considered writable and will be copied to RAM 212 + */ 213 + __data_loc = ALIGN(4); /* location in file */ 214 + . = PAGE_OFFSET + TEXT_OFFSET; /* location in memory */ 215 + #undef LOAD_OFFSET 216 + #define LOAD_OFFSET (PAGE_OFFSET + TEXT_OFFSET - __data_loc) 217 + 218 + . = ALIGN(THREAD_SIZE); 219 + _sdata = .; 220 + RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE) 221 + .data.ro_after_init : AT(ADDR(.data.ro_after_init) - LOAD_OFFSET) { 222 + *(.data..ro_after_init) 223 + } 224 + _edata = .; 225 + 226 + . = ALIGN(PAGE_SIZE); 227 + __init_begin = .; 228 + .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) { 229 + INIT_DATA 230 + } 231 + .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) { 232 + ARM_EXIT_KEEP(EXIT_DATA) 233 + } 193 234 #ifdef CONFIG_SMP 194 235 PERCPU_SECTION(L1_CACHE_BYTES) 195 236 #endif 196 237 197 - _exiprom = .; /* End of XIP ROM area */ 198 - __data_loc = ALIGN(4); /* location in binary */ 199 - . = PAGE_OFFSET + TEXT_OFFSET; 238 + /* 239 + * End of copied data. We need a dummy section to get its LMA. 240 + * Also located before final ALIGN() as trailing padding is not stored 241 + * in the resulting binary file and useless to copy. 242 + */ 243 + .data.endmark : AT(ADDR(.data.endmark) - LOAD_OFFSET) { } 244 + _edata_loc = LOADADDR(.data.endmark); 200 245 201 - .data : AT(__data_loc) { 202 - _data = .; /* address in memory */ 203 - _sdata = .; 204 - 205 - /* 206 - * first, the init task union, aligned 207 - * to an 8192 byte boundary. 208 - */ 209 - INIT_TASK_DATA(THREAD_SIZE) 210 - 211 - . = ALIGN(PAGE_SIZE); 212 - __init_begin = .; 213 - INIT_DATA 214 - ARM_EXIT_KEEP(EXIT_DATA) 215 - . = ALIGN(PAGE_SIZE); 216 - __init_end = .; 217 - 218 - *(.data..ro_after_init) 219 - 220 - NOSAVE_DATA 221 - CACHELINE_ALIGNED_DATA(L1_CACHE_BYTES) 222 - READ_MOSTLY_DATA(L1_CACHE_BYTES) 223 - 224 - /* 225 - * and the usual data section 226 - */ 227 - DATA_DATA 228 - CONSTRUCTORS 229 - 230 - _edata = .; 231 - } 232 - _edata_loc = __data_loc + SIZEOF(.data); 233 - 234 - BUG_TABLE 246 + . = ALIGN(PAGE_SIZE); 247 + __init_end = .; 235 248 236 249 #ifdef CONFIG_HAVE_TCM 237 250 /* ··· 291 302 } 292 303 #endif 293 304 294 - BSS_SECTION(0, 0, 0) 305 + BSS_SECTION(0, 0, 8) 295 306 _end = .; 296 307 297 308 STABS_DEBUG ··· 312 323 */ 313 324 ASSERT(__hyp_idmap_text_end - (__hyp_idmap_text_start & PAGE_MASK) <= PAGE_SIZE, 314 325 "HYP init code too big or misaligned") 326 + 327 + #ifdef CONFIG_XIP_DEFLATED_DATA 328 + /* 329 + * The .bss is used as a stack area for __inflate_kernel_data() whose stack 330 + * frame is 9568 bytes. Make sure it has extra room left. 331 + */ 332 + ASSERT((_end - __bss_start) >= 12288, ".bss too small for CONFIG_XIP_DEFLATED_DATA") 333 + #endif 334 + 335 + #ifdef CONFIG_ARM_MPU 336 + /* 337 + * Due to PMSAv7 restriction on base address and size we have to 338 + * enforce minimal alignment restrictions. It was seen that weaker 339 + * alignment restriction on _xiprom will likely force XIP address 340 + * space spawns multiple MPU regions thus it is likely we run in 341 + * situation when we are reprogramming MPU region we run on with 342 + * something which doesn't cover reprogramming code itself, so as soon 343 + * as we update MPU settings we'd immediately try to execute straight 344 + * from background region which is XN. 345 + * It seem that alignment in 1M should suit most users. 346 + * _exiprom is aligned as 1/8 of 1M so can be covered by subregion 347 + * disable 348 + */ 349 + ASSERT(!(_xiprom & (SZ_1M - 1)), "XIP start address may cause MPU programming issues") 350 + ASSERT(!(_exiprom & (SZ_128K - 1)), "XIP end address may cause MPU programming issues") 351 + #endif
+6 -34
arch/arm/kernel/vmlinux.lds.S
··· 215 215 *(.pv_table) 216 216 __pv_table_end = .; 217 217 } 218 - .init.data : { 219 - INIT_DATA 220 - INIT_SETUP(16) 221 - INIT_CALLS 222 - CON_INITCALL 223 - SECURITY_INITCALL 224 - INIT_RAM_FS 225 - } 218 + 219 + INIT_DATA_SECTION(16) 220 + 226 221 .exit.data : { 227 222 ARM_EXIT_KEEP(EXIT_DATA) 228 223 } ··· 232 237 . = ALIGN(THREAD_SIZE); 233 238 #endif 234 239 __init_end = .; 235 - __data_loc = .; 236 240 237 - .data : AT(__data_loc) { 238 - _data = .; /* address in memory */ 239 - _sdata = .; 240 - 241 - /* 242 - * first, the init task union, aligned 243 - * to an 8192 byte boundary. 244 - */ 245 - INIT_TASK_DATA(THREAD_SIZE) 246 - 247 - NOSAVE_DATA 248 - CACHELINE_ALIGNED_DATA(L1_CACHE_BYTES) 249 - READ_MOSTLY_DATA(L1_CACHE_BYTES) 250 - 251 - /* 252 - * and the usual data section 253 - */ 254 - DATA_DATA 255 - CONSTRUCTORS 256 - 257 - _edata = .; 258 - } 259 - _edata_loc = __data_loc + SIZEOF(.data); 260 - 261 - BUG_TABLE 241 + _sdata = .; 242 + RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE) 243 + _edata = .; 262 244 263 245 #ifdef CONFIG_HAVE_TCM 264 246 /*
+1
arch/arm/mm/Makefile
··· 10 10 11 11 ifneq ($(CONFIG_MMU),y) 12 12 obj-y += nommu.o 13 + obj-$(CONFIG_ARM_MPU) += pmsa-v7.o 13 14 endif 14 15 15 16 obj-$(CONFIG_ARM_PTDUMP) += dump.o
+3 -18
arch/arm/mm/dma-mapping.c
··· 382 382 } 383 383 384 384 #define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K 385 - static struct gen_pool *atomic_pool; 385 + static struct gen_pool *atomic_pool __ro_after_init; 386 386 387 - static size_t atomic_pool_size = DEFAULT_DMA_COHERENT_POOL_SIZE; 387 + static size_t atomic_pool_size __initdata = DEFAULT_DMA_COHERENT_POOL_SIZE; 388 388 389 389 static int __init early_coherent_pool(char *p) 390 390 { ··· 392 392 return 0; 393 393 } 394 394 early_param("coherent_pool", early_coherent_pool); 395 - 396 - void __init init_dma_coherent_pool_size(unsigned long size) 397 - { 398 - /* 399 - * Catch any attempt to set the pool size too late. 400 - */ 401 - BUG_ON(atomic_pool); 402 - 403 - /* 404 - * Set architecture specific coherent pool size only if 405 - * it has not been changed by kernel command line parameter. 406 - */ 407 - if (atomic_pool_size == DEFAULT_DMA_COHERENT_POOL_SIZE) 408 - atomic_pool_size = size; 409 - } 410 395 411 396 /* 412 397 * Initialise the coherent pool for atomic allocations. ··· 428 443 429 444 gen_pool_set_algo(atomic_pool, 430 445 gen_pool_first_fit_order_align, 431 - (void *)PAGE_SHIFT); 446 + NULL); 432 447 pr_info("DMA: preallocated %zu KiB pool for atomic coherent allocations\n", 433 448 atomic_pool_size / 1024); 434 449 return 0;
-10
arch/arm/mm/init.c
··· 580 580 BUILD_BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET); 581 581 BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET); 582 582 #endif 583 - 584 - if (PAGE_SIZE >= 16384 && get_num_physpages() <= 128) { 585 - extern int sysctl_overcommit_memory; 586 - /* 587 - * On a machine this small we won't get 588 - * anywhere without overcommit, so turn 589 - * it on by default. 590 - */ 591 - sysctl_overcommit_memory = OVERCOMMIT_ALWAYS; 592 - } 593 583 } 594 584 595 585 #ifdef CONFIG_STRICT_KERNEL_RWX
+1 -253
arch/arm/mm/nommu.c
··· 27 27 28 28 #ifdef CONFIG_ARM_MPU 29 29 struct mpu_rgn_info mpu_rgn_info; 30 - 31 - /* Region number */ 32 - static void rgnr_write(u32 v) 33 - { 34 - asm("mcr p15, 0, %0, c6, c2, 0" : : "r" (v)); 35 - } 36 - 37 - /* Data-side / unified region attributes */ 38 - 39 - /* Region access control register */ 40 - static void dracr_write(u32 v) 41 - { 42 - asm("mcr p15, 0, %0, c6, c1, 4" : : "r" (v)); 43 - } 44 - 45 - /* Region size register */ 46 - static void drsr_write(u32 v) 47 - { 48 - asm("mcr p15, 0, %0, c6, c1, 2" : : "r" (v)); 49 - } 50 - 51 - /* Region base address register */ 52 - static void drbar_write(u32 v) 53 - { 54 - asm("mcr p15, 0, %0, c6, c1, 0" : : "r" (v)); 55 - } 56 - 57 - static u32 drbar_read(void) 58 - { 59 - u32 v; 60 - asm("mrc p15, 0, %0, c6, c1, 0" : "=r" (v)); 61 - return v; 62 - } 63 - /* Optional instruction-side region attributes */ 64 - 65 - /* I-side Region access control register */ 66 - static void iracr_write(u32 v) 67 - { 68 - asm("mcr p15, 0, %0, c6, c1, 5" : : "r" (v)); 69 - } 70 - 71 - /* I-side Region size register */ 72 - static void irsr_write(u32 v) 73 - { 74 - asm("mcr p15, 0, %0, c6, c1, 3" : : "r" (v)); 75 - } 76 - 77 - /* I-side Region base address register */ 78 - static void irbar_write(u32 v) 79 - { 80 - asm("mcr p15, 0, %0, c6, c1, 1" : : "r" (v)); 81 - } 82 - 83 - static unsigned long irbar_read(void) 84 - { 85 - unsigned long v; 86 - asm("mrc p15, 0, %0, c6, c1, 1" : "=r" (v)); 87 - return v; 88 - } 89 - 90 - /* MPU initialisation functions */ 91 - void __init adjust_lowmem_bounds_mpu(void) 92 - { 93 - phys_addr_t phys_offset = PHYS_OFFSET; 94 - phys_addr_t aligned_region_size, specified_mem_size, rounded_mem_size; 95 - struct memblock_region *reg; 96 - bool first = true; 97 - phys_addr_t mem_start; 98 - phys_addr_t mem_end; 99 - 100 - for_each_memblock(memory, reg) { 101 - if (first) { 102 - /* 103 - * Initially only use memory continuous from 104 - * PHYS_OFFSET */ 105 - if (reg->base != phys_offset) 106 - panic("First memory bank must be contiguous from PHYS_OFFSET"); 107 - 108 - mem_start = reg->base; 109 - mem_end = reg->base + reg->size; 110 - specified_mem_size = reg->size; 111 - first = false; 112 - } else { 113 - /* 114 - * memblock auto merges contiguous blocks, remove 115 - * all blocks afterwards in one go (we can't remove 116 - * blocks separately while iterating) 117 - */ 118 - pr_notice("Ignoring RAM after %pa, memory at %pa ignored\n", 119 - &mem_end, &reg->base); 120 - memblock_remove(reg->base, 0 - reg->base); 121 - break; 122 - } 123 - } 124 - 125 - /* 126 - * MPU has curious alignment requirements: Size must be power of 2, and 127 - * region start must be aligned to the region size 128 - */ 129 - if (phys_offset != 0) 130 - pr_info("PHYS_OFFSET != 0 => MPU Region size constrained by alignment requirements\n"); 131 - 132 - /* 133 - * Maximum aligned region might overflow phys_addr_t if phys_offset is 134 - * 0. Hence we keep everything below 4G until we take the smaller of 135 - * the aligned_region_size and rounded_mem_size, one of which is 136 - * guaranteed to be smaller than the maximum physical address. 137 - */ 138 - aligned_region_size = (phys_offset - 1) ^ (phys_offset); 139 - /* Find the max power-of-two sized region that fits inside our bank */ 140 - rounded_mem_size = (1 << __fls(specified_mem_size)) - 1; 141 - 142 - /* The actual region size is the smaller of the two */ 143 - aligned_region_size = aligned_region_size < rounded_mem_size 144 - ? aligned_region_size + 1 145 - : rounded_mem_size + 1; 146 - 147 - if (aligned_region_size != specified_mem_size) { 148 - pr_warn("Truncating memory from %pa to %pa (MPU region constraints)", 149 - &specified_mem_size, &aligned_region_size); 150 - memblock_remove(mem_start + aligned_region_size, 151 - specified_mem_size - aligned_region_size); 152 - 153 - mem_end = mem_start + aligned_region_size; 154 - } 155 - 156 - pr_debug("MPU Region from %pa size %pa (end %pa))\n", 157 - &phys_offset, &aligned_region_size, &mem_end); 158 - 159 - } 160 - 161 - static int mpu_present(void) 162 - { 163 - return ((read_cpuid_ext(CPUID_EXT_MMFR0) & MMFR0_PMSA) == MMFR0_PMSAv7); 164 - } 165 - 166 - static int mpu_max_regions(void) 167 - { 168 - /* 169 - * We don't support a different number of I/D side regions so if we 170 - * have separate instruction and data memory maps then return 171 - * whichever side has a smaller number of supported regions. 172 - */ 173 - u32 dregions, iregions, mpuir; 174 - mpuir = read_cpuid(CPUID_MPUIR); 175 - 176 - dregions = iregions = (mpuir & MPUIR_DREGION_SZMASK) >> MPUIR_DREGION; 177 - 178 - /* Check for separate d-side and i-side memory maps */ 179 - if (mpuir & MPUIR_nU) 180 - iregions = (mpuir & MPUIR_IREGION_SZMASK) >> MPUIR_IREGION; 181 - 182 - /* Use the smallest of the two maxima */ 183 - return min(dregions, iregions); 184 - } 185 - 186 - static int mpu_iside_independent(void) 187 - { 188 - /* MPUIR.nU specifies whether there is *not* a unified memory map */ 189 - return read_cpuid(CPUID_MPUIR) & MPUIR_nU; 190 - } 191 - 192 - static int mpu_min_region_order(void) 193 - { 194 - u32 drbar_result, irbar_result; 195 - /* We've kept a region free for this probing */ 196 - rgnr_write(MPU_PROBE_REGION); 197 - isb(); 198 - /* 199 - * As per ARM ARM, write 0xFFFFFFFC to DRBAR to find the minimum 200 - * region order 201 - */ 202 - drbar_write(0xFFFFFFFC); 203 - drbar_result = irbar_result = drbar_read(); 204 - drbar_write(0x0); 205 - /* If the MPU is non-unified, we use the larger of the two minima*/ 206 - if (mpu_iside_independent()) { 207 - irbar_write(0xFFFFFFFC); 208 - irbar_result = irbar_read(); 209 - irbar_write(0x0); 210 - } 211 - isb(); /* Ensure that MPU region operations have completed */ 212 - /* Return whichever result is larger */ 213 - return __ffs(max(drbar_result, irbar_result)); 214 - } 215 - 216 - static int mpu_setup_region(unsigned int number, phys_addr_t start, 217 - unsigned int size_order, unsigned int properties) 218 - { 219 - u32 size_data; 220 - 221 - /* We kept a region free for probing resolution of MPU regions*/ 222 - if (number > mpu_max_regions() || number == MPU_PROBE_REGION) 223 - return -ENOENT; 224 - 225 - if (size_order > 32) 226 - return -ENOMEM; 227 - 228 - if (size_order < mpu_min_region_order()) 229 - return -ENOMEM; 230 - 231 - /* Writing N to bits 5:1 (RSR_SZ) specifies region size 2^N+1 */ 232 - size_data = ((size_order - 1) << MPU_RSR_SZ) | 1 << MPU_RSR_EN; 233 - 234 - dsb(); /* Ensure all previous data accesses occur with old mappings */ 235 - rgnr_write(number); 236 - isb(); 237 - drbar_write(start); 238 - dracr_write(properties); 239 - isb(); /* Propagate properties before enabling region */ 240 - drsr_write(size_data); 241 - 242 - /* Check for independent I-side registers */ 243 - if (mpu_iside_independent()) { 244 - irbar_write(start); 245 - iracr_write(properties); 246 - isb(); 247 - irsr_write(size_data); 248 - } 249 - isb(); 250 - 251 - /* Store region info (we treat i/d side the same, so only store d) */ 252 - mpu_rgn_info.rgns[number].dracr = properties; 253 - mpu_rgn_info.rgns[number].drbar = start; 254 - mpu_rgn_info.rgns[number].drsr = size_data; 255 - return 0; 256 - } 257 - 258 - /* 259 - * Set up default MPU regions, doing nothing if there is no MPU 260 - */ 261 - void __init mpu_setup(void) 262 - { 263 - int region_err; 264 - if (!mpu_present()) 265 - return; 266 - 267 - region_err = mpu_setup_region(MPU_RAM_REGION, PHYS_OFFSET, 268 - ilog2(memblock.memory.regions[0].size), 269 - MPU_AP_PL1RW_PL0RW | MPU_RGN_NORMAL); 270 - if (region_err) { 271 - panic("MPU region initialization failure! %d", region_err); 272 - } else { 273 - pr_info("Using ARMv7 PMSA Compliant MPU. " 274 - "Region independence: %s, Max regions: %d\n", 275 - mpu_iside_independent() ? "Yes" : "No", 276 - mpu_max_regions()); 277 - } 278 - } 279 - #else 280 - static void adjust_lowmem_bounds_mpu(void) {} 281 - static void __init mpu_setup(void) {} 282 - #endif /* CONFIG_ARM_MPU */ 30 + #endif 283 31 284 32 #ifdef CONFIG_CPU_CP15 285 33 #ifdef CONFIG_CPU_HIGH_VECTOR
+484
arch/arm/mm/pmsa-v7.c
··· 1 + /* 2 + * Based on linux/arch/arm/mm/nommu.c 3 + * 4 + * ARM PMSAv7 supporting functions. 5 + */ 6 + 7 + #include <linux/bitops.h> 8 + #include <linux/memblock.h> 9 + 10 + #include <asm/cacheflush.h> 11 + #include <asm/cp15.h> 12 + #include <asm/cputype.h> 13 + #include <asm/mpu.h> 14 + #include <asm/sections.h> 15 + 16 + #include "mm.h" 17 + 18 + struct region { 19 + phys_addr_t base; 20 + phys_addr_t size; 21 + unsigned long subreg; 22 + }; 23 + 24 + static struct region __initdata mem[MPU_MAX_REGIONS]; 25 + #ifdef CONFIG_XIP_KERNEL 26 + static struct region __initdata xip[MPU_MAX_REGIONS]; 27 + #endif 28 + 29 + static unsigned int __initdata mpu_min_region_order; 30 + static unsigned int __initdata mpu_max_regions; 31 + 32 + static int __init __mpu_min_region_order(void); 33 + static int __init __mpu_max_regions(void); 34 + 35 + #ifndef CONFIG_CPU_V7M 36 + 37 + #define DRBAR __ACCESS_CP15(c6, 0, c1, 0) 38 + #define IRBAR __ACCESS_CP15(c6, 0, c1, 1) 39 + #define DRSR __ACCESS_CP15(c6, 0, c1, 2) 40 + #define IRSR __ACCESS_CP15(c6, 0, c1, 3) 41 + #define DRACR __ACCESS_CP15(c6, 0, c1, 4) 42 + #define IRACR __ACCESS_CP15(c6, 0, c1, 5) 43 + #define RNGNR __ACCESS_CP15(c6, 0, c2, 0) 44 + 45 + /* Region number */ 46 + static inline void rgnr_write(u32 v) 47 + { 48 + write_sysreg(v, RNGNR); 49 + } 50 + 51 + /* Data-side / unified region attributes */ 52 + 53 + /* Region access control register */ 54 + static inline void dracr_write(u32 v) 55 + { 56 + write_sysreg(v, DRACR); 57 + } 58 + 59 + /* Region size register */ 60 + static inline void drsr_write(u32 v) 61 + { 62 + write_sysreg(v, DRSR); 63 + } 64 + 65 + /* Region base address register */ 66 + static inline void drbar_write(u32 v) 67 + { 68 + write_sysreg(v, DRBAR); 69 + } 70 + 71 + static inline u32 drbar_read(void) 72 + { 73 + return read_sysreg(DRBAR); 74 + } 75 + /* Optional instruction-side region attributes */ 76 + 77 + /* I-side Region access control register */ 78 + static inline void iracr_write(u32 v) 79 + { 80 + write_sysreg(v, IRACR); 81 + } 82 + 83 + /* I-side Region size register */ 84 + static inline void irsr_write(u32 v) 85 + { 86 + write_sysreg(v, IRSR); 87 + } 88 + 89 + /* I-side Region base address register */ 90 + static inline void irbar_write(u32 v) 91 + { 92 + write_sysreg(v, IRBAR); 93 + } 94 + 95 + static inline u32 irbar_read(void) 96 + { 97 + return read_sysreg(IRBAR); 98 + } 99 + 100 + #else 101 + 102 + static inline void rgnr_write(u32 v) 103 + { 104 + writel_relaxed(v, BASEADDR_V7M_SCB + MPU_RNR); 105 + } 106 + 107 + /* Data-side / unified region attributes */ 108 + 109 + /* Region access control register */ 110 + static inline void dracr_write(u32 v) 111 + { 112 + u32 rsr = readl_relaxed(BASEADDR_V7M_SCB + MPU_RASR) & GENMASK(15, 0); 113 + 114 + writel_relaxed((v << 16) | rsr, BASEADDR_V7M_SCB + MPU_RASR); 115 + } 116 + 117 + /* Region size register */ 118 + static inline void drsr_write(u32 v) 119 + { 120 + u32 racr = readl_relaxed(BASEADDR_V7M_SCB + MPU_RASR) & GENMASK(31, 16); 121 + 122 + writel_relaxed(v | racr, BASEADDR_V7M_SCB + MPU_RASR); 123 + } 124 + 125 + /* Region base address register */ 126 + static inline void drbar_write(u32 v) 127 + { 128 + writel_relaxed(v, BASEADDR_V7M_SCB + MPU_RBAR); 129 + } 130 + 131 + static inline u32 drbar_read(void) 132 + { 133 + return readl_relaxed(BASEADDR_V7M_SCB + MPU_RBAR); 134 + } 135 + 136 + /* ARMv7-M only supports a unified MPU, so I-side operations are nop */ 137 + 138 + static inline void iracr_write(u32 v) {} 139 + static inline void irsr_write(u32 v) {} 140 + static inline void irbar_write(u32 v) {} 141 + static inline unsigned long irbar_read(void) {return 0;} 142 + 143 + #endif 144 + 145 + static int __init mpu_present(void) 146 + { 147 + return ((read_cpuid_ext(CPUID_EXT_MMFR0) & MMFR0_PMSA) == MMFR0_PMSAv7); 148 + } 149 + 150 + static bool __init try_split_region(phys_addr_t base, phys_addr_t size, struct region *region) 151 + { 152 + unsigned long subreg, bslots, sslots; 153 + phys_addr_t abase = base & ~(size - 1); 154 + phys_addr_t asize = base + size - abase; 155 + phys_addr_t p2size = 1 << __fls(asize); 156 + phys_addr_t bdiff, sdiff; 157 + 158 + if (p2size != asize) 159 + p2size *= 2; 160 + 161 + bdiff = base - abase; 162 + sdiff = p2size - asize; 163 + subreg = p2size / MPU_NR_SUBREGS; 164 + 165 + if ((bdiff % subreg) || (sdiff % subreg)) 166 + return false; 167 + 168 + bslots = bdiff / subreg; 169 + sslots = sdiff / subreg; 170 + 171 + if (bslots || sslots) { 172 + int i; 173 + 174 + if (subreg < MPU_MIN_SUBREG_SIZE) 175 + return false; 176 + 177 + if (bslots + sslots > MPU_NR_SUBREGS) 178 + return false; 179 + 180 + for (i = 0; i < bslots; i++) 181 + _set_bit(i, &region->subreg); 182 + 183 + for (i = 1; i <= sslots; i++) 184 + _set_bit(MPU_NR_SUBREGS - i, &region->subreg); 185 + } 186 + 187 + region->base = abase; 188 + region->size = p2size; 189 + 190 + return true; 191 + } 192 + 193 + static int __init allocate_region(phys_addr_t base, phys_addr_t size, 194 + unsigned int limit, struct region *regions) 195 + { 196 + int count = 0; 197 + phys_addr_t diff = size; 198 + int attempts = MPU_MAX_REGIONS; 199 + 200 + while (diff) { 201 + /* Try cover region as is (maybe with help of subregions) */ 202 + if (try_split_region(base, size, &regions[count])) { 203 + count++; 204 + base += size; 205 + diff -= size; 206 + size = diff; 207 + } else { 208 + /* 209 + * Maximum aligned region might overflow phys_addr_t 210 + * if "base" is 0. Hence we keep everything below 4G 211 + * until we take the smaller of the aligned region 212 + * size ("asize") and rounded region size ("p2size"), 213 + * one of which is guaranteed to be smaller than the 214 + * maximum physical address. 215 + */ 216 + phys_addr_t asize = (base - 1) ^ base; 217 + phys_addr_t p2size = (1 << __fls(diff)) - 1; 218 + 219 + size = asize < p2size ? asize + 1 : p2size + 1; 220 + } 221 + 222 + if (count > limit) 223 + break; 224 + 225 + if (!attempts) 226 + break; 227 + 228 + attempts--; 229 + } 230 + 231 + return count; 232 + } 233 + 234 + /* MPU initialisation functions */ 235 + void __init adjust_lowmem_bounds_mpu(void) 236 + { 237 + phys_addr_t specified_mem_size = 0, total_mem_size = 0; 238 + struct memblock_region *reg; 239 + bool first = true; 240 + phys_addr_t mem_start; 241 + phys_addr_t mem_end; 242 + unsigned int mem_max_regions; 243 + int num, i; 244 + 245 + if (!mpu_present()) 246 + return; 247 + 248 + /* Free-up MPU_PROBE_REGION */ 249 + mpu_min_region_order = __mpu_min_region_order(); 250 + 251 + /* How many regions are supported */ 252 + mpu_max_regions = __mpu_max_regions(); 253 + 254 + mem_max_regions = min((unsigned int)MPU_MAX_REGIONS, mpu_max_regions); 255 + 256 + /* We need to keep one slot for background region */ 257 + mem_max_regions--; 258 + 259 + #ifndef CONFIG_CPU_V7M 260 + /* ... and one for vectors */ 261 + mem_max_regions--; 262 + #endif 263 + 264 + #ifdef CONFIG_XIP_KERNEL 265 + /* plus some regions to cover XIP ROM */ 266 + num = allocate_region(CONFIG_XIP_PHYS_ADDR, __pa(_exiprom) - CONFIG_XIP_PHYS_ADDR, 267 + mem_max_regions, xip); 268 + 269 + mem_max_regions -= num; 270 + #endif 271 + 272 + for_each_memblock(memory, reg) { 273 + if (first) { 274 + phys_addr_t phys_offset = PHYS_OFFSET; 275 + 276 + /* 277 + * Initially only use memory continuous from 278 + * PHYS_OFFSET */ 279 + if (reg->base != phys_offset) 280 + panic("First memory bank must be contiguous from PHYS_OFFSET"); 281 + 282 + mem_start = reg->base; 283 + mem_end = reg->base + reg->size; 284 + specified_mem_size = reg->size; 285 + first = false; 286 + } else { 287 + /* 288 + * memblock auto merges contiguous blocks, remove 289 + * all blocks afterwards in one go (we can't remove 290 + * blocks separately while iterating) 291 + */ 292 + pr_notice("Ignoring RAM after %pa, memory at %pa ignored\n", 293 + &mem_end, &reg->base); 294 + memblock_remove(reg->base, 0 - reg->base); 295 + break; 296 + } 297 + } 298 + 299 + num = allocate_region(mem_start, specified_mem_size, mem_max_regions, mem); 300 + 301 + for (i = 0; i < num; i++) { 302 + unsigned long subreg = mem[i].size / MPU_NR_SUBREGS; 303 + 304 + total_mem_size += mem[i].size - subreg * hweight_long(mem[i].subreg); 305 + 306 + pr_debug("MPU: base %pa size %pa disable subregions: %*pbl\n", 307 + &mem[i].base, &mem[i].size, MPU_NR_SUBREGS, &mem[i].subreg); 308 + } 309 + 310 + if (total_mem_size != specified_mem_size) { 311 + pr_warn("Truncating memory from %pa to %pa (MPU region constraints)", 312 + &specified_mem_size, &total_mem_size); 313 + memblock_remove(mem_start + total_mem_size, 314 + specified_mem_size - total_mem_size); 315 + } 316 + } 317 + 318 + static int __init __mpu_max_regions(void) 319 + { 320 + /* 321 + * We don't support a different number of I/D side regions so if we 322 + * have separate instruction and data memory maps then return 323 + * whichever side has a smaller number of supported regions. 324 + */ 325 + u32 dregions, iregions, mpuir; 326 + 327 + mpuir = read_cpuid_mputype(); 328 + 329 + dregions = iregions = (mpuir & MPUIR_DREGION_SZMASK) >> MPUIR_DREGION; 330 + 331 + /* Check for separate d-side and i-side memory maps */ 332 + if (mpuir & MPUIR_nU) 333 + iregions = (mpuir & MPUIR_IREGION_SZMASK) >> MPUIR_IREGION; 334 + 335 + /* Use the smallest of the two maxima */ 336 + return min(dregions, iregions); 337 + } 338 + 339 + static int __init mpu_iside_independent(void) 340 + { 341 + /* MPUIR.nU specifies whether there is *not* a unified memory map */ 342 + return read_cpuid_mputype() & MPUIR_nU; 343 + } 344 + 345 + static int __init __mpu_min_region_order(void) 346 + { 347 + u32 drbar_result, irbar_result; 348 + 349 + /* We've kept a region free for this probing */ 350 + rgnr_write(MPU_PROBE_REGION); 351 + isb(); 352 + /* 353 + * As per ARM ARM, write 0xFFFFFFFC to DRBAR to find the minimum 354 + * region order 355 + */ 356 + drbar_write(0xFFFFFFFC); 357 + drbar_result = irbar_result = drbar_read(); 358 + drbar_write(0x0); 359 + /* If the MPU is non-unified, we use the larger of the two minima*/ 360 + if (mpu_iside_independent()) { 361 + irbar_write(0xFFFFFFFC); 362 + irbar_result = irbar_read(); 363 + irbar_write(0x0); 364 + } 365 + isb(); /* Ensure that MPU region operations have completed */ 366 + /* Return whichever result is larger */ 367 + 368 + return __ffs(max(drbar_result, irbar_result)); 369 + } 370 + 371 + static int __init mpu_setup_region(unsigned int number, phys_addr_t start, 372 + unsigned int size_order, unsigned int properties, 373 + unsigned int subregions, bool need_flush) 374 + { 375 + u32 size_data; 376 + 377 + /* We kept a region free for probing resolution of MPU regions*/ 378 + if (number > mpu_max_regions 379 + || number >= MPU_MAX_REGIONS) 380 + return -ENOENT; 381 + 382 + if (size_order > 32) 383 + return -ENOMEM; 384 + 385 + if (size_order < mpu_min_region_order) 386 + return -ENOMEM; 387 + 388 + /* Writing N to bits 5:1 (RSR_SZ) specifies region size 2^N+1 */ 389 + size_data = ((size_order - 1) << MPU_RSR_SZ) | 1 << MPU_RSR_EN; 390 + size_data |= subregions << MPU_RSR_SD; 391 + 392 + if (need_flush) 393 + flush_cache_all(); 394 + 395 + dsb(); /* Ensure all previous data accesses occur with old mappings */ 396 + rgnr_write(number); 397 + isb(); 398 + drbar_write(start); 399 + dracr_write(properties); 400 + isb(); /* Propagate properties before enabling region */ 401 + drsr_write(size_data); 402 + 403 + /* Check for independent I-side registers */ 404 + if (mpu_iside_independent()) { 405 + irbar_write(start); 406 + iracr_write(properties); 407 + isb(); 408 + irsr_write(size_data); 409 + } 410 + isb(); 411 + 412 + /* Store region info (we treat i/d side the same, so only store d) */ 413 + mpu_rgn_info.rgns[number].dracr = properties; 414 + mpu_rgn_info.rgns[number].drbar = start; 415 + mpu_rgn_info.rgns[number].drsr = size_data; 416 + 417 + mpu_rgn_info.used++; 418 + 419 + return 0; 420 + } 421 + 422 + /* 423 + * Set up default MPU regions, doing nothing if there is no MPU 424 + */ 425 + void __init mpu_setup(void) 426 + { 427 + int i, region = 0, err = 0; 428 + 429 + if (!mpu_present()) 430 + return; 431 + 432 + /* Setup MPU (order is important) */ 433 + 434 + /* Background */ 435 + err |= mpu_setup_region(region++, 0, 32, 436 + MPU_ACR_XN | MPU_RGN_STRONGLY_ORDERED | MPU_AP_PL1RW_PL0NA, 437 + 0, false); 438 + 439 + #ifdef CONFIG_XIP_KERNEL 440 + /* ROM */ 441 + for (i = 0; i < ARRAY_SIZE(xip); i++) { 442 + /* 443 + * In case we overwrite RAM region we set earlier in 444 + * head-nommu.S (which is cachable) all subsequent 445 + * data access till we setup RAM bellow would be done 446 + * with BG region (which is uncachable), thus we need 447 + * to clean and invalidate cache. 448 + */ 449 + bool need_flush = region == MPU_RAM_REGION; 450 + 451 + if (!xip[i].size) 452 + continue; 453 + 454 + err |= mpu_setup_region(region++, xip[i].base, ilog2(xip[i].size), 455 + MPU_AP_PL1RO_PL0NA | MPU_RGN_NORMAL, 456 + xip[i].subreg, need_flush); 457 + } 458 + #endif 459 + 460 + /* RAM */ 461 + for (i = 0; i < ARRAY_SIZE(mem); i++) { 462 + if (!mem[i].size) 463 + continue; 464 + 465 + err |= mpu_setup_region(region++, mem[i].base, ilog2(mem[i].size), 466 + MPU_AP_PL1RW_PL0RW | MPU_RGN_NORMAL, 467 + mem[i].subreg, false); 468 + } 469 + 470 + /* Vectors */ 471 + #ifndef CONFIG_CPU_V7M 472 + err |= mpu_setup_region(region++, vectors_base, ilog2(2 * PAGE_SIZE), 473 + MPU_AP_PL1RW_PL0NA | MPU_RGN_NORMAL, 474 + 0, false); 475 + #endif 476 + if (err) { 477 + panic("MPU region initialization failure! %d", err); 478 + } else { 479 + pr_info("Using ARMv7 PMSA Compliant MPU. " 480 + "Region independence: %s, Used %d of %d regions\n", 481 + mpu_iside_independent() ? "Yes" : "No", 482 + mpu_rgn_info.used, mpu_max_regions); 483 + } 484 + }
+20 -13
drivers/pcmcia/sa1111_generic.c
··· 63 63 #define IDX_IRQ_S1_READY_NINT (3) 64 64 #define IDX_IRQ_S1_CD_VALID (4) 65 65 #define IDX_IRQ_S1_BVD1_STSCHG (5) 66 + #define NUM_IRQS (6) 66 67 67 68 void sa1111_pcmcia_socket_state(struct soc_pcmcia_socket *skt, struct pcmcia_state *state) 68 69 { 69 70 struct sa1111_pcmcia_socket *s = to_skt(skt); 70 - unsigned long status = sa1111_readl(s->dev->mapbase + PCSR); 71 + u32 status = readl_relaxed(s->dev->mapbase + PCSR); 71 72 72 73 switch (skt->nr) { 73 74 case 0: ··· 96 95 int sa1111_pcmcia_configure_socket(struct soc_pcmcia_socket *skt, const socket_state_t *state) 97 96 { 98 97 struct sa1111_pcmcia_socket *s = to_skt(skt); 99 - unsigned int pccr_skt_mask, pccr_set_mask, val; 98 + u32 pccr_skt_mask, pccr_set_mask, val; 100 99 unsigned long flags; 101 100 102 101 switch (skt->nr) { ··· 124 123 pccr_set_mask |= PCCR_S0_FLT|PCCR_S1_FLT; 125 124 126 125 local_irq_save(flags); 127 - val = sa1111_readl(s->dev->mapbase + PCCR); 126 + val = readl_relaxed(s->dev->mapbase + PCCR); 128 127 val &= ~pccr_skt_mask; 129 128 val |= pccr_set_mask & pccr_skt_mask; 130 - sa1111_writel(val, s->dev->mapbase + PCCR); 129 + writel_relaxed(val, s->dev->mapbase + PCCR); 131 130 local_irq_restore(flags); 132 131 133 132 return 0; ··· 138 137 { 139 138 struct sa1111_pcmcia_socket *s; 140 139 struct clk *clk; 141 - int i, ret = 0; 140 + int i, ret = 0, irqs[NUM_IRQS]; 142 141 143 142 clk = devm_clk_get(&dev->dev, NULL); 144 143 if (IS_ERR(clk)) 145 144 return PTR_ERR(clk); 145 + 146 + for (i = 0; i < NUM_IRQS; i++) { 147 + irqs[i] = sa1111_get_irq(dev, i); 148 + if (irqs[i] <= 0) 149 + return irqs[i] ? : -ENXIO; 150 + } 146 151 147 152 ops->socket_state = sa1111_pcmcia_socket_state; 148 153 ··· 163 156 soc_pcmcia_init_one(&s->soc, ops, &dev->dev); 164 157 s->dev = dev; 165 158 if (s->soc.nr) { 166 - s->soc.socket.pci_irq = dev->irq[IDX_IRQ_S1_READY_NINT]; 167 - s->soc.stat[SOC_STAT_CD].irq = dev->irq[IDX_IRQ_S1_CD_VALID]; 159 + s->soc.socket.pci_irq = irqs[IDX_IRQ_S1_READY_NINT]; 160 + s->soc.stat[SOC_STAT_CD].irq = irqs[IDX_IRQ_S1_CD_VALID]; 168 161 s->soc.stat[SOC_STAT_CD].name = "SA1111 CF card detect"; 169 - s->soc.stat[SOC_STAT_BVD1].irq = dev->irq[IDX_IRQ_S1_BVD1_STSCHG]; 162 + s->soc.stat[SOC_STAT_BVD1].irq = irqs[IDX_IRQ_S1_BVD1_STSCHG]; 170 163 s->soc.stat[SOC_STAT_BVD1].name = "SA1111 CF BVD1"; 171 164 } else { 172 - s->soc.socket.pci_irq = dev->irq[IDX_IRQ_S0_READY_NINT]; 173 - s->soc.stat[SOC_STAT_CD].irq = dev->irq[IDX_IRQ_S0_CD_VALID]; 165 + s->soc.socket.pci_irq = irqs[IDX_IRQ_S0_READY_NINT]; 166 + s->soc.stat[SOC_STAT_CD].irq = irqs[IDX_IRQ_S0_CD_VALID]; 174 167 s->soc.stat[SOC_STAT_CD].name = "SA1111 PCMCIA card detect"; 175 - s->soc.stat[SOC_STAT_BVD1].irq = dev->irq[IDX_IRQ_S0_BVD1_STSCHG]; 168 + s->soc.stat[SOC_STAT_BVD1].irq = irqs[IDX_IRQ_S0_BVD1_STSCHG]; 176 169 s->soc.stat[SOC_STAT_BVD1].name = "SA1111 PCMCIA BVD1"; 177 170 } 178 171 ··· 208 201 /* 209 202 * Initialise the suspend state. 210 203 */ 211 - sa1111_writel(PCSSR_S0_SLEEP | PCSSR_S1_SLEEP, base + PCSSR); 212 - sa1111_writel(PCCR_S0_FLT | PCCR_S1_FLT, base + PCCR); 204 + writel_relaxed(PCSSR_S0_SLEEP | PCSSR_S1_SLEEP, base + PCSSR); 205 + writel_relaxed(PCCR_S0_FLT | PCCR_S1_FLT, base + PCCR); 213 206 214 207 ret = -ENODEV; 215 208 #ifdef CONFIG_SA1100_BADGE4
+2 -2
fs/Kconfig.binfmt
··· 34 34 35 35 config BINFMT_ELF_FDPIC 36 36 bool "Kernel support for FDPIC ELF binaries" 37 - default y 38 - depends on (FRV || BLACKFIN || (SUPERH32 && !MMU) || C6X) 37 + default y if !BINFMT_ELF 38 + depends on (ARM || FRV || BLACKFIN || (SUPERH32 && !MMU) || C6X) 39 39 select ELFCORE 40 40 help 41 41 ELF FDPIC binaries are based on ELF, but allow the individual load
+13 -2
fs/binfmt_elf.c
··· 51 51 #define user_siginfo_t siginfo_t 52 52 #endif 53 53 54 + /* That's for binfmt_elf_fdpic to deal with */ 55 + #ifndef elf_check_fdpic 56 + #define elf_check_fdpic(ex) false 57 + #endif 58 + 54 59 static int load_elf_binary(struct linux_binprm *bprm); 55 60 static unsigned long elf_map(struct file *, unsigned long, struct elf_phdr *, 56 61 int, int, unsigned long); ··· 546 541 if (interp_elf_ex->e_type != ET_EXEC && 547 542 interp_elf_ex->e_type != ET_DYN) 548 543 goto out; 549 - if (!elf_check_arch(interp_elf_ex)) 544 + if (!elf_check_arch(interp_elf_ex) || 545 + elf_check_fdpic(interp_elf_ex)) 550 546 goto out; 551 547 if (!interpreter->f_op->mmap) 552 548 goto out; ··· 724 718 goto out; 725 719 if (!elf_check_arch(&loc->elf_ex)) 726 720 goto out; 721 + if (elf_check_fdpic(&loc->elf_ex)) 722 + goto out; 727 723 if (!bprm->file->f_op->mmap) 728 724 goto out; 729 725 ··· 825 817 if (memcmp(loc->interp_elf_ex.e_ident, ELFMAG, SELFMAG) != 0) 826 818 goto out_free_dentry; 827 819 /* Verify the interpreter has a valid arch */ 828 - if (!elf_check_arch(&loc->interp_elf_ex)) 820 + if (!elf_check_arch(&loc->interp_elf_ex) || 821 + elf_check_fdpic(&loc->interp_elf_ex)) 829 822 goto out_free_dentry; 830 823 831 824 /* Load the interpreter program headers */ ··· 1198 1189 /* First of all, some simple consistency checks */ 1199 1190 if (elf_ex.e_type != ET_EXEC || elf_ex.e_phnum > 2 || 1200 1191 !elf_check_arch(&elf_ex) || !file->f_op->mmap) 1192 + goto out; 1193 + if (elf_check_fdpic(&elf_ex)) 1201 1194 goto out; 1202 1195 1203 1196 /* Now read in all of the header information */
+11 -2
fs/binfmt_elf_fdpic.c
··· 378 378 executable_stack); 379 379 if (retval < 0) 380 380 goto error; 381 + #ifdef ARCH_HAS_SETUP_ADDITIONAL_PAGES 382 + retval = arch_setup_additional_pages(bprm, !!interpreter_name); 383 + if (retval < 0) 384 + goto error; 385 + #endif 381 386 #endif 382 387 383 388 /* load the executable and interpreter into memory */ ··· 836 831 if (phdr->p_vaddr >= seg->p_vaddr && 837 832 phdr->p_vaddr + phdr->p_memsz <= 838 833 seg->p_vaddr + seg->p_memsz) { 834 + Elf32_Dyn __user *dyn; 835 + Elf32_Sword d_tag; 836 + 839 837 params->dynamic_addr = 840 838 (phdr->p_vaddr - seg->p_vaddr) + 841 839 seg->addr; ··· 851 843 goto dynamic_error; 852 844 853 845 tmp = phdr->p_memsz / sizeof(Elf32_Dyn); 854 - if (((Elf32_Dyn *) 855 - params->dynamic_addr)[tmp - 1].d_tag != 0) 846 + dyn = (Elf32_Dyn __user *)params->dynamic_addr; 847 + __get_user(d_tag, &dyn[tmp - 1].d_tag); 848 + if (d_tag != 0) 856 849 goto dynamic_error; 857 850 break; 858 851 }