Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'for-next' of git://git.pengutronix.de/git/ukl/linux into devel-stable

Pull ARM-v7M support from Uwe Kleine-König:
"All but the last patch were in next since next-20130418 without issues.
The last patch fixes a problem in combination with

8164f7a (ARM: 7680/1: Detect support for SDIV/UDIV from ISAR0 register)

which triggers a WARN_ON without an implemented read_cpuid_ext.

The branch merges fine into v3.10-rc1 and I'd be happy if you pulled it
for 3.11-rc1. The only missing piece to be able to run a Cortex-M3 is
the irqchip driver that will go in via Thomas Gleixner and platform
specific stuff."

+742 -27
+2 -2
arch/arm/Kconfig
··· 9 9 select BUILDTIME_EXTABLE_SORT if MMU 10 10 select CPU_PM if (SUSPEND || CPU_IDLE) 11 11 select DCACHE_WORD_ACCESS if (CPU_V6 || CPU_V6K || CPU_V7) && !CPU_BIG_ENDIAN && MMU 12 - select GENERIC_ATOMIC64 if (CPU_V6 || !CPU_32v6K || !AEABI) 12 + select GENERIC_ATOMIC64 if (CPU_V7M || CPU_V6 || !CPU_32v6K || !AEABI) 13 13 select GENERIC_CLOCKEVENTS_BROADCAST if SMP 14 14 select GENERIC_IRQ_PROBE 15 15 select GENERIC_IRQ_SHOW ··· 1585 1585 1586 1586 config THUMB2_KERNEL 1587 1587 bool "Compile the kernel in Thumb-2 mode" if !CPU_THUMBONLY 1588 - depends on CPU_V7 && !CPU_V6 && !CPU_V6K 1588 + depends on (CPU_V7 || CPU_V7M) && !CPU_V6 && !CPU_V6K 1589 1589 default y if CPU_THUMBONLY 1590 1590 select AEABI 1591 1591 select ARM_ASM_UNIFIED
+1 -1
arch/arm/Kconfig-nommu
··· 28 28 config PROCESSOR_ID 29 29 hex 'Hard wire the processor ID' 30 30 default 0x00007700 31 - depends on !CPU_CP15 31 + depends on !(CPU_CP15 || CPU_V7M) 32 32 help 33 33 If processor has no CP15 register, this processor ID is 34 34 used instead of the auto-probing which utilizes the register.
+1
arch/arm/Makefile
··· 59 59 # Note that GCC does not numerically define an architecture version 60 60 # macro, but instead defines a whole series of macros which makes 61 61 # testing for a specific architecture or later rather impossible. 62 + arch-$(CONFIG_CPU_32v7M) :=-D__LINUX_ARM_ARCH__=7 -march=armv7-m -Wa,-march=armv7-m 62 63 arch-$(CONFIG_CPU_32v7) :=-D__LINUX_ARM_ARCH__=7 $(call cc-option,-march=armv7-a,-march=armv5t -Wa$(comma)-march=armv7-a) 63 64 arch-$(CONFIG_CPU_32v6) :=-D__LINUX_ARM_ARCH__=6 $(call cc-option,-march=armv6,-march=armv5t -Wa$(comma)-march=armv6) 64 65 # Only override the compiler option if ARMv6. The ARMv6K extensions are
+16 -1
arch/arm/include/asm/assembler.h
··· 136 136 * assumes FIQs are enabled, and that the processor is in SVC mode. 137 137 */ 138 138 .macro save_and_disable_irqs, oldcpsr 139 + #ifdef CONFIG_CPU_V7M 140 + mrs \oldcpsr, primask 141 + #else 139 142 mrs \oldcpsr, cpsr 143 + #endif 140 144 disable_irq 141 145 .endm 142 146 ··· 154 150 * guarantee that this will preserve the flags. 155 151 */ 156 152 .macro restore_irqs_notrace, oldcpsr 153 + #ifdef CONFIG_CPU_V7M 154 + msr primask, \oldcpsr 155 + #else 157 156 msr cpsr_c, \oldcpsr 157 + #endif 158 158 .endm 159 159 160 160 .macro restore_irqs, oldcpsr ··· 237 229 #endif 238 230 .endm 239 231 240 - #ifdef CONFIG_THUMB2_KERNEL 232 + #if defined(CONFIG_CPU_V7M) 233 + /* 234 + * setmode is used to assert to be in svc mode during boot. For v7-M 235 + * this is done in __v7m_setup, so setmode can be empty here. 236 + */ 237 + .macro setmode, mode, reg 238 + .endm 239 + #elif defined(CONFIG_THUMB2_KERNEL) 241 240 .macro setmode, mode, reg 242 241 mov \reg, #\mode 243 242 msr cpsr_c, \reg
+42 -2
arch/arm/include/asm/cputype.h
··· 10 10 #define CPUID_TLBTYPE 3 11 11 #define CPUID_MPIDR 5 12 12 13 + #ifdef CONFIG_CPU_V7M 14 + #define CPUID_EXT_PFR0 0x40 15 + #define CPUID_EXT_PFR1 0x44 16 + #define CPUID_EXT_DFR0 0x48 17 + #define CPUID_EXT_AFR0 0x4c 18 + #define CPUID_EXT_MMFR0 0x50 19 + #define CPUID_EXT_MMFR1 0x54 20 + #define CPUID_EXT_MMFR2 0x58 21 + #define CPUID_EXT_MMFR3 0x5c 22 + #define CPUID_EXT_ISAR0 0x60 23 + #define CPUID_EXT_ISAR1 0x64 24 + #define CPUID_EXT_ISAR2 0x68 25 + #define CPUID_EXT_ISAR3 0x6c 26 + #define CPUID_EXT_ISAR4 0x70 27 + #define CPUID_EXT_ISAR5 0x74 28 + #else 13 29 #define CPUID_EXT_PFR0 "c1, 0" 14 30 #define CPUID_EXT_PFR1 "c1, 1" 15 31 #define CPUID_EXT_DFR0 "c1, 2" ··· 40 24 #define CPUID_EXT_ISAR3 "c2, 3" 41 25 #define CPUID_EXT_ISAR4 "c2, 4" 42 26 #define CPUID_EXT_ISAR5 "c2, 5" 27 + #endif 43 28 44 29 #define MPIDR_SMP_BITMASK (0x3 << 30) 45 30 #define MPIDR_SMP_VALUE (0x2 << 30) ··· 96 79 __val; \ 97 80 }) 98 81 99 - #else /* ifdef CONFIG_CPU_CP15 */ 82 + #elif defined(CONFIG_CPU_V7M) 83 + 84 + #include <asm/io.h> 85 + #include <asm/v7m.h> 86 + 87 + #define read_cpuid(reg) \ 88 + ({ \ 89 + WARN_ON_ONCE(1); \ 90 + 0; \ 91 + }) 92 + 93 + static inline unsigned int __attribute_const__ read_cpuid_ext(unsigned offset) 94 + { 95 + return readl(BASEADDR_V7M_SCB + offset); 96 + } 97 + 98 + #else /* ifdef CONFIG_CPU_CP15 / elif defined (CONFIG_CPU_V7M) */ 100 99 101 100 /* 102 101 * read_cpuid and read_cpuid_ext should only ever be called on machines that ··· 139 106 return read_cpuid(CPUID_ID); 140 107 } 141 108 142 - #else /* ifdef CONFIG_CPU_CP15 */ 109 + #elif defined(CONFIG_CPU_V7M) 110 + 111 + static inline unsigned int __attribute_const__ read_cpuid_id(void) 112 + { 113 + return readl(BASEADDR_V7M_SCB + V7M_SCB_CPUID); 114 + } 115 + 116 + #else /* ifdef CONFIG_CPU_CP15 / elif defined(CONFIG_CPU_V7M) */ 143 117 144 118 static inline unsigned int __attribute_const__ read_cpuid_id(void) 145 119 {
+27
arch/arm/include/asm/glue-cache.h
··· 117 117 # endif 118 118 #endif 119 119 120 + #if defined(CONFIG_CPU_V7M) 121 + # ifdef _CACHE 122 + # define MULTI_CACHE 1 123 + # else 124 + # define _CACHE nop 125 + # endif 126 + #endif 127 + 120 128 #if !defined(_CACHE) && !defined(MULTI_CACHE) 121 129 #error Unknown cache maintenance model 130 + #endif 131 + 132 + #ifndef __ASSEMBLER__ 133 + extern inline void nop_flush_icache_all(void) { } 134 + extern inline void nop_flush_kern_cache_all(void) { } 135 + extern inline void nop_flush_kern_cache_louis(void) { } 136 + extern inline void nop_flush_user_cache_all(void) { } 137 + extern inline void nop_flush_user_cache_range(unsigned long a, 138 + unsigned long b, unsigned int c) { } 139 + 140 + extern inline void nop_coherent_kern_range(unsigned long a, unsigned long b) { } 141 + extern inline int nop_coherent_user_range(unsigned long a, 142 + unsigned long b) { return 0; } 143 + extern inline void nop_flush_kern_dcache_area(void *a, size_t s) { } 144 + 145 + extern inline void nop_dma_flush_range(const void *a, const void *b) { } 146 + 147 + extern inline void nop_dma_map_area(const void *s, size_t l, int f) { } 148 + extern inline void nop_dma_unmap_area(const void *s, size_t l, int f) { } 122 149 #endif 123 150 124 151 #ifndef MULTI_CACHE
+8
arch/arm/include/asm/glue-df.h
··· 95 95 # endif 96 96 #endif 97 97 98 + #ifdef CONFIG_CPU_ABRT_NOMMU 99 + # ifdef CPU_DABORT_HANDLER 100 + # define MULTI_DABORT 1 101 + # else 102 + # define CPU_DABORT_HANDLER nommu_early_abort 103 + # endif 104 + #endif 105 + 98 106 #ifndef CPU_DABORT_HANDLER 99 107 #error Unknown data abort handler type 100 108 #endif
+9
arch/arm/include/asm/glue-proc.h
··· 230 230 # endif 231 231 #endif 232 232 233 + #ifdef CONFIG_CPU_V7M 234 + # ifdef CPU_NAME 235 + # undef MULTI_CPU 236 + # define MULTI_CPU 237 + # else 238 + # define CPU_NAME cpu_v7m 239 + # endif 240 + #endif 241 + 233 242 #ifndef MULTI_CPU 234 243 #define cpu_proc_init __glue(CPU_NAME,_proc_init) 235 244 #define cpu_proc_fin __glue(CPU_NAME,_proc_fin)
+16 -6
arch/arm/include/asm/irqflags.h
··· 8 8 /* 9 9 * CPU interrupt mask handling. 10 10 */ 11 + #ifdef CONFIG_CPU_V7M 12 + #define IRQMASK_REG_NAME_R "primask" 13 + #define IRQMASK_REG_NAME_W "primask" 14 + #define IRQMASK_I_BIT 1 15 + #else 16 + #define IRQMASK_REG_NAME_R "cpsr" 17 + #define IRQMASK_REG_NAME_W "cpsr_c" 18 + #define IRQMASK_I_BIT PSR_I_BIT 19 + #endif 20 + 11 21 #if __LINUX_ARM_ARCH__ >= 6 12 22 13 23 static inline unsigned long arch_local_irq_save(void) ··· 25 15 unsigned long flags; 26 16 27 17 asm volatile( 28 - " mrs %0, cpsr @ arch_local_irq_save\n" 18 + " mrs %0, " IRQMASK_REG_NAME_R " @ arch_local_irq_save\n" 29 19 " cpsid i" 30 20 : "=r" (flags) : : "memory", "cc"); 31 21 return flags; ··· 139 129 { 140 130 unsigned long flags; 141 131 asm volatile( 142 - " mrs %0, cpsr @ local_save_flags" 132 + " mrs %0, " IRQMASK_REG_NAME_R " @ local_save_flags" 143 133 : "=r" (flags) : : "memory", "cc"); 144 134 return flags; 145 135 } ··· 150 140 static inline void arch_local_irq_restore(unsigned long flags) 151 141 { 152 142 asm volatile( 153 - " msr cpsr_c, %0 @ local_irq_restore" 143 + " msr " IRQMASK_REG_NAME_W ", %0 @ local_irq_restore" 154 144 : 155 145 : "r" (flags) 156 146 : "memory", "cc"); ··· 158 148 159 149 static inline int arch_irqs_disabled_flags(unsigned long flags) 160 150 { 161 - return flags & PSR_I_BIT; 151 + return flags & IRQMASK_I_BIT; 162 152 } 163 153 164 - #endif 165 - #endif 154 + #endif /* ifdef __KERNEL__ */ 155 + #endif /* ifndef __ASM_ARM_IRQFLAGS_H */
+4
arch/arm/include/asm/ptrace.h
··· 45 45 */ 46 46 static inline int valid_user_regs(struct pt_regs *regs) 47 47 { 48 + #ifndef CONFIG_CPU_V7M 48 49 unsigned long mode = regs->ARM_cpsr & MODE_MASK; 49 50 50 51 /* ··· 68 67 regs->ARM_cpsr |= USR_MODE; 69 68 70 69 return 0; 70 + #else /* ifndef CONFIG_CPU_V7M */ 71 + return 1; 72 + #endif 71 73 } 72 74 73 75 static inline long regs_return_value(struct pt_regs *regs)
+1
arch/arm/include/asm/system_info.h
··· 11 11 #define CPU_ARCH_ARMv5TEJ 7 12 12 #define CPU_ARCH_ARMv6 8 13 13 #define CPU_ARCH_ARMv7 9 14 + #define CPU_ARCH_ARMv7M 10 14 15 15 16 #ifndef __ASSEMBLY__ 16 17
+44
arch/arm/include/asm/v7m.h
··· 1 + /* 2 + * Common defines for v7m cpus 3 + */ 4 + #define V7M_SCS_ICTR IOMEM(0xe000e004) 5 + #define V7M_SCS_ICTR_INTLINESNUM_MASK 0x0000000f 6 + 7 + #define BASEADDR_V7M_SCB IOMEM(0xe000ed00) 8 + 9 + #define V7M_SCB_CPUID 0x00 10 + 11 + #define V7M_SCB_ICSR 0x04 12 + #define V7M_SCB_ICSR_PENDSVSET (1 << 28) 13 + #define V7M_SCB_ICSR_PENDSVCLR (1 << 27) 14 + #define V7M_SCB_ICSR_RETTOBASE (1 << 11) 15 + 16 + #define V7M_SCB_VTOR 0x08 17 + 18 + #define V7M_SCB_SCR 0x10 19 + #define V7M_SCB_SCR_SLEEPDEEP (1 << 2) 20 + 21 + #define V7M_SCB_CCR 0x14 22 + #define V7M_SCB_CCR_STKALIGN (1 << 9) 23 + 24 + #define V7M_SCB_SHPR2 0x1c 25 + #define V7M_SCB_SHPR3 0x20 26 + 27 + #define V7M_SCB_SHCSR 0x24 28 + #define V7M_SCB_SHCSR_USGFAULTENA (1 << 18) 29 + #define V7M_SCB_SHCSR_BUSFAULTENA (1 << 17) 30 + #define V7M_SCB_SHCSR_MEMFAULTENA (1 << 16) 31 + 32 + #define V7M_xPSR_FRAMEPTRALIGN 0x00000200 33 + #define V7M_xPSR_EXCEPTIONNO 0x000001ff 34 + 35 + /* 36 + * When branching to an address that has bits [31:28] == 0xf an exception return 37 + * occurs. Bits [27:5] are reserved (SBOP). If the processor implements the FP 38 + * extension Bit [4] defines if the exception frame has space allocated for FP 39 + * state information, SBOP otherwise. Bit [3] defines the mode that is returned 40 + * to (0 -> handler mode; 1 -> thread mode). Bit [2] defines which sp is used 41 + * (0 -> msp; 1 -> psp). Bits [1:0] are fixed to 0b01. 42 + */ 43 + #define EXC_RET_STACK_MASK 0x00000004 44 + #define EXC_RET_THREADMODE_PROCESSSTACK 0xfffffffd
+27 -8
arch/arm/include/uapi/asm/ptrace.h
··· 34 34 35 35 /* 36 36 * PSR bits 37 + * Note on V7M there is no mode contained in the PSR 37 38 */ 38 39 #define USR26_MODE 0x00000000 39 40 #define FIQ26_MODE 0x00000001 40 41 #define IRQ26_MODE 0x00000002 41 42 #define SVC26_MODE 0x00000003 43 + #if defined(__KERNEL__) && defined(CONFIG_CPU_V7M) 44 + /* 45 + * Use 0 here to get code right that creates a userspace 46 + * or kernel space thread. 47 + */ 48 + #define USR_MODE 0x00000000 49 + #define SVC_MODE 0x00000000 50 + #else 42 51 #define USR_MODE 0x00000010 52 + #define SVC_MODE 0x00000013 53 + #endif 43 54 #define FIQ_MODE 0x00000011 44 55 #define IRQ_MODE 0x00000012 45 - #define SVC_MODE 0x00000013 46 56 #define ABT_MODE 0x00000017 47 57 #define HYP_MODE 0x0000001a 48 58 #define UND_MODE 0x0000001b 49 59 #define SYSTEM_MODE 0x0000001f 50 60 #define MODE32_BIT 0x00000010 51 61 #define MODE_MASK 0x0000001f 52 - #define PSR_T_BIT 0x00000020 53 - #define PSR_F_BIT 0x00000040 54 - #define PSR_I_BIT 0x00000080 55 - #define PSR_A_BIT 0x00000100 56 - #define PSR_E_BIT 0x00000200 57 - #define PSR_J_BIT 0x01000000 58 - #define PSR_Q_BIT 0x08000000 62 + 63 + #define V4_PSR_T_BIT 0x00000020 /* >= V4T, but not V7M */ 64 + #define V7M_PSR_T_BIT 0x01000000 65 + #if defined(__KERNEL__) && defined(CONFIG_CPU_V7M) 66 + #define PSR_T_BIT V7M_PSR_T_BIT 67 + #else 68 + /* for compatibility */ 69 + #define PSR_T_BIT V4_PSR_T_BIT 70 + #endif 71 + 72 + #define PSR_F_BIT 0x00000040 /* >= V4, but not V7M */ 73 + #define PSR_I_BIT 0x00000080 /* >= V4, but not V7M */ 74 + #define PSR_A_BIT 0x00000100 /* >= V6, but not V7M */ 75 + #define PSR_E_BIT 0x00000200 /* >= V6, but not V7M */ 76 + #define PSR_J_BIT 0x01000000 /* >= V5J, but not V7M */ 77 + #define PSR_Q_BIT 0x08000000 /* >= V5E, including V7M */ 59 78 #define PSR_V_BIT 0x10000000 60 79 #define PSR_C_BIT 0x20000000 61 80 #define PSR_Z_BIT 0x40000000
+7 -1
arch/arm/kernel/Makefile
··· 15 15 16 16 # Object file lists. 17 17 18 - obj-y := elf.o entry-armv.o entry-common.o irq.o opcodes.o \ 18 + obj-y := elf.o entry-common.o irq.o opcodes.o \ 19 19 process.o ptrace.o return_address.o sched_clock.o \ 20 20 setup.o signal.o stacktrace.o sys_arm.o time.o traps.o 21 21 22 22 obj-$(CONFIG_ATAGS) += atags_parse.o 23 23 obj-$(CONFIG_ATAGS_PROC) += atags_proc.o 24 24 obj-$(CONFIG_DEPRECATED_PARAM_STRUCT) += atags_compat.o 25 + 26 + ifeq ($(CONFIG_CPU_V7M),y) 27 + obj-y += entry-v7m.o 28 + else 29 + obj-y += entry-armv.o 30 + endif 25 31 26 32 obj-$(CONFIG_OC_ETM) += etm.o 27 33 obj-$(CONFIG_CPU_IDLE) += cpuidle.o
+4
arch/arm/kernel/entry-common.S
··· 350 350 351 351 .align 5 352 352 ENTRY(vector_swi) 353 + #ifdef CONFIG_CPU_V7M 354 + v7m_exception_entry 355 + #else 353 356 sub sp, sp, #S_FRAME_SIZE 354 357 stmia sp, {r0 - r12} @ Calling r0 - r12 355 358 ARM( add r8, sp, #S_PC ) ··· 363 360 str lr, [sp, #S_PC] @ Save calling PC 364 361 str r8, [sp, #S_PSR] @ Save CPSR 365 362 str r0, [sp, #S_OLD_R0] @ Save OLD_R0 363 + #endif 366 364 zero_fp 367 365 368 366 /*
+124
arch/arm/kernel/entry-header.S
··· 5 5 #include <asm/asm-offsets.h> 6 6 #include <asm/errno.h> 7 7 #include <asm/thread_info.h> 8 + #include <asm/v7m.h> 8 9 9 10 @ Bad Abort numbers 10 11 @ ----------------- ··· 44 43 mcr p15, 0, \rtemp, c1, c0 45 44 #endif 46 45 .endm 46 + 47 + #ifdef CONFIG_CPU_V7M 48 + /* 49 + * ARMv7-M exception entry/exit macros. 50 + * 51 + * xPSR, ReturnAddress(), LR (R14), R12, R3, R2, R1, and R0 are 52 + * automatically saved on the current stack (32 words) before 53 + * switching to the exception stack (SP_main). 54 + * 55 + * If exception is taken while in user mode, SP_main is 56 + * empty. Otherwise, SP_main is aligned to 64 bit automatically 57 + * (CCR.STKALIGN set). 58 + * 59 + * Linux assumes that the interrupts are disabled when entering an 60 + * exception handler and it may BUG if this is not the case. Interrupts 61 + * are disabled during entry and reenabled in the exit macro. 62 + * 63 + * v7m_exception_slow_exit is used when returning from SVC or PendSV. 64 + * When returning to kernel mode, we don't return from exception. 65 + */ 66 + .macro v7m_exception_entry 67 + @ determine the location of the registers saved by the core during 68 + @ exception entry. Depending on the mode the cpu was in when the 69 + @ exception happend that is either on the main or the process stack. 70 + @ Bit 2 of EXC_RETURN stored in the lr register specifies which stack 71 + @ was used. 72 + tst lr, #EXC_RET_STACK_MASK 73 + mrsne r12, psp 74 + moveq r12, sp 75 + 76 + @ we cannot rely on r0-r3 and r12 matching the value saved in the 77 + @ exception frame because of tail-chaining. So these have to be 78 + @ reloaded. 79 + ldmia r12!, {r0-r3} 80 + 81 + @ Linux expects to have irqs off. Do it here before taking stack space 82 + cpsid i 83 + 84 + sub sp, #S_FRAME_SIZE-S_IP 85 + stmdb sp!, {r0-r11} 86 + 87 + @ load saved r12, lr, return address and xPSR. 88 + @ r0-r7 are used for signals and never touched from now on. Clobbering 89 + @ r8-r12 is OK. 90 + mov r9, r12 91 + ldmia r9!, {r8, r10-r12} 92 + 93 + @ calculate the original stack pointer value. 94 + @ r9 currently points to the memory location just above the auto saved 95 + @ xPSR. 96 + @ The cpu might automatically 8-byte align the stack. Bit 9 97 + @ of the saved xPSR specifies if stack aligning took place. In this case 98 + @ another 32-bit value is included in the stack. 99 + 100 + tst r12, V7M_xPSR_FRAMEPTRALIGN 101 + addne r9, r9, #4 102 + 103 + @ store saved r12 using str to have a register to hold the base for stm 104 + str r8, [sp, #S_IP] 105 + add r8, sp, #S_SP 106 + @ store r13-r15, xPSR 107 + stmia r8!, {r9-r12} 108 + @ store old_r0 109 + str r0, [r8] 110 + .endm 111 + 112 + /* 113 + * PENDSV and SVCALL are configured to have the same exception 114 + * priorities. As a kernel thread runs at SVCALL execution priority it 115 + * can never be preempted and so we will never have to return to a 116 + * kernel thread here. 117 + */ 118 + .macro v7m_exception_slow_exit ret_r0 119 + cpsid i 120 + ldr lr, =EXC_RET_THREADMODE_PROCESSSTACK 121 + 122 + @ read original r12, sp, lr, pc and xPSR 123 + add r12, sp, #S_IP 124 + ldmia r12, {r1-r5} 125 + 126 + @ an exception frame is always 8-byte aligned. To tell the hardware if 127 + @ the sp to be restored is aligned or not set bit 9 of the saved xPSR 128 + @ accordingly. 129 + tst r2, #4 130 + subne r2, r2, #4 131 + orrne r5, V7M_xPSR_FRAMEPTRALIGN 132 + biceq r5, V7M_xPSR_FRAMEPTRALIGN 133 + 134 + @ write basic exception frame 135 + stmdb r2!, {r1, r3-r5} 136 + ldmia sp, {r1, r3-r5} 137 + .if \ret_r0 138 + stmdb r2!, {r0, r3-r5} 139 + .else 140 + stmdb r2!, {r1, r3-r5} 141 + .endif 142 + 143 + @ restore process sp 144 + msr psp, r2 145 + 146 + @ restore original r4-r11 147 + ldmia sp!, {r0-r11} 148 + 149 + @ restore main sp 150 + add sp, sp, #S_FRAME_SIZE-S_IP 151 + 152 + cpsie i 153 + bx lr 154 + .endm 155 + #endif /* CONFIG_CPU_V7M */ 47 156 48 157 @ 49 158 @ Store/load the USER SP and LR registers by switching to the SYS ··· 276 165 rfeia sp! 277 166 .endm 278 167 168 + #ifdef CONFIG_CPU_V7M 169 + /* 170 + * Note we don't need to do clrex here as clearing the local monitor is 171 + * part of each exception entry and exit sequence. 172 + */ 173 + .macro restore_user_regs, fast = 0, offset = 0 174 + .if \offset 175 + add sp, #\offset 176 + .endif 177 + v7m_exception_slow_exit ret_r0 = \fast 178 + .endm 179 + #else /* ifdef CONFIG_CPU_V7M */ 279 180 .macro restore_user_regs, fast = 0, offset = 0 280 181 clrex @ clear the exclusive monitor 281 182 mov r2, sp ··· 304 181 add sp, sp, #S_FRAME_SIZE - S_SP 305 182 movs pc, lr @ return & move spsr_svc into cpsr 306 183 .endm 184 + #endif /* ifdef CONFIG_CPU_V7M / else */ 307 185 308 186 .macro get_thread_info, rd 309 187 mov \rd, sp
+143
arch/arm/kernel/entry-v7m.S
··· 1 + /* 2 + * linux/arch/arm/kernel/entry-v7m.S 3 + * 4 + * Copyright (C) 2008 ARM Ltd. 5 + * 6 + * This program is free software; you can redistribute it and/or modify 7 + * it under the terms of the GNU General Public License version 2 as 8 + * published by the Free Software Foundation. 9 + * 10 + * Low-level vector interface routines for the ARMv7-M architecture 11 + */ 12 + #include <asm/memory.h> 13 + #include <asm/glue.h> 14 + #include <asm/thread_notify.h> 15 + #include <asm/v7m.h> 16 + 17 + #include <mach/entry-macro.S> 18 + 19 + #include "entry-header.S" 20 + 21 + #ifdef CONFIG_TRACE_IRQFLAGS 22 + #error "CONFIG_TRACE_IRQFLAGS not supported on the current ARMv7M implementation" 23 + #endif 24 + 25 + __invalid_entry: 26 + v7m_exception_entry 27 + adr r0, strerr 28 + mrs r1, ipsr 29 + mov r2, lr 30 + bl printk 31 + mov r0, sp 32 + bl show_regs 33 + 1: b 1b 34 + ENDPROC(__invalid_entry) 35 + 36 + strerr: .asciz "\nUnhandled exception: IPSR = %08lx LR = %08lx\n" 37 + 38 + .align 2 39 + __irq_entry: 40 + v7m_exception_entry 41 + 42 + @ 43 + @ Invoke the IRQ handler 44 + @ 45 + mrs r0, ipsr 46 + ldr r1, =V7M_xPSR_EXCEPTIONNO 47 + and r0, r1 48 + sub r0, #16 49 + mov r1, sp 50 + stmdb sp!, {lr} 51 + @ routine called with r0 = irq number, r1 = struct pt_regs * 52 + bl nvic_do_IRQ 53 + 54 + pop {lr} 55 + @ 56 + @ Check for any pending work if returning to user 57 + @ 58 + ldr r1, =BASEADDR_V7M_SCB 59 + ldr r0, [r1, V7M_SCB_ICSR] 60 + tst r0, V7M_SCB_ICSR_RETTOBASE 61 + beq 2f 62 + 63 + get_thread_info tsk 64 + ldr r2, [tsk, #TI_FLAGS] 65 + tst r2, #_TIF_WORK_MASK 66 + beq 2f @ no work pending 67 + mov r0, #V7M_SCB_ICSR_PENDSVSET 68 + str r0, [r1, V7M_SCB_ICSR] @ raise PendSV 69 + 70 + 2: 71 + @ registers r0-r3 and r12 are automatically restored on exception 72 + @ return. r4-r7 were not clobbered in v7m_exception_entry so for 73 + @ correctness they don't need to be restored. So only r8-r11 must be 74 + @ restored here. The easiest way to do so is to restore r0-r7, too. 75 + ldmia sp!, {r0-r11} 76 + add sp, #S_FRAME_SIZE-S_IP 77 + cpsie i 78 + bx lr 79 + ENDPROC(__irq_entry) 80 + 81 + __pendsv_entry: 82 + v7m_exception_entry 83 + 84 + ldr r1, =BASEADDR_V7M_SCB 85 + mov r0, #V7M_SCB_ICSR_PENDSVCLR 86 + str r0, [r1, V7M_SCB_ICSR] @ clear PendSV 87 + 88 + @ execute the pending work, including reschedule 89 + get_thread_info tsk 90 + mov why, #0 91 + b ret_to_user 92 + ENDPROC(__pendsv_entry) 93 + 94 + /* 95 + * Register switch for ARMv7-M processors. 96 + * r0 = previous task_struct, r1 = previous thread_info, r2 = next thread_info 97 + * previous and next are guaranteed not to be the same. 98 + */ 99 + ENTRY(__switch_to) 100 + .fnstart 101 + .cantunwind 102 + add ip, r1, #TI_CPU_SAVE 103 + stmia ip!, {r4 - r11} @ Store most regs on stack 104 + str sp, [ip], #4 105 + str lr, [ip], #4 106 + mov r5, r0 107 + add r4, r2, #TI_CPU_SAVE 108 + ldr r0, =thread_notify_head 109 + mov r1, #THREAD_NOTIFY_SWITCH 110 + bl atomic_notifier_call_chain 111 + mov ip, r4 112 + mov r0, r5 113 + ldmia ip!, {r4 - r11} @ Load all regs saved previously 114 + ldr sp, [ip] 115 + ldr pc, [ip, #4]! 116 + .fnend 117 + ENDPROC(__switch_to) 118 + 119 + .data 120 + .align 8 121 + /* 122 + * Vector table (64 words => 256 bytes natural alignment) 123 + */ 124 + ENTRY(vector_table) 125 + .long 0 @ 0 - Reset stack pointer 126 + .long __invalid_entry @ 1 - Reset 127 + .long __invalid_entry @ 2 - NMI 128 + .long __invalid_entry @ 3 - HardFault 129 + .long __invalid_entry @ 4 - MemManage 130 + .long __invalid_entry @ 5 - BusFault 131 + .long __invalid_entry @ 6 - UsageFault 132 + .long __invalid_entry @ 7 - Reserved 133 + .long __invalid_entry @ 8 - Reserved 134 + .long __invalid_entry @ 9 - Reserved 135 + .long __invalid_entry @ 10 - Reserved 136 + .long vector_swi @ 11 - SVCall 137 + .long __invalid_entry @ 12 - Debug Monitor 138 + .long __invalid_entry @ 13 - Reserved 139 + .long __pendsv_entry @ 14 - PendSV 140 + .long __invalid_entry @ 15 - SysTick 141 + .rept 64 - 16 142 + .long __irq_entry @ 16..64 - External Interrupts 143 + .endr
+7 -3
arch/arm/kernel/head-nommu.S
··· 19 19 #include <asm/asm-offsets.h> 20 20 #include <asm/cp15.h> 21 21 #include <asm/thread_info.h> 22 + #include <asm/v7m.h> 22 23 23 24 /* 24 25 * Kernel startup entry point. ··· 51 50 52 51 setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9 @ ensure svc mode 53 52 @ and irqs disabled 54 - #ifndef CONFIG_CPU_CP15 55 - ldr r9, =CONFIG_PROCESSOR_ID 56 - #else 53 + #if defined(CONFIG_CPU_CP15) 57 54 mrc p15, 0, r9, c0, c0 @ get processor id 55 + #elif defined(CONFIG_CPU_V7M) 56 + ldr r9, =BASEADDR_V7M_SCB 57 + ldr r9, [r9, V7M_SCB_CPUID] 58 + #else 59 + ldr r9, =CONFIG_PROCESSOR_ID 58 60 #endif 59 61 bl __lookup_processor_type @ r5=procinfo r9=cpuid 60 62 movs r10, r5 @ invalid processor (r5=0)?
+15 -2
arch/arm/kernel/setup.c
··· 128 128 u32 und[3]; 129 129 } ____cacheline_aligned; 130 130 131 + #ifndef CONFIG_CPU_V7M 131 132 static struct stack stacks[NR_CPUS]; 133 + #endif 132 134 133 135 char elf_platform[ELF_PLATFORM_SIZE]; 134 136 EXPORT_SYMBOL(elf_platform); ··· 209 207 "5TEJ", 210 208 "6TEJ", 211 209 "7", 212 - "?(11)", 210 + "7M", 213 211 "?(12)", 214 212 "?(13)", 215 213 "?(14)", ··· 218 216 "?(17)", 219 217 }; 220 218 219 + #ifdef CONFIG_CPU_V7M 220 + static int __get_cpu_architecture(void) 221 + { 222 + return CPU_ARCH_ARMv7M; 223 + } 224 + #else 221 225 static int __get_cpu_architecture(void) 222 226 { 223 227 int cpu_arch; ··· 256 248 257 249 return cpu_arch; 258 250 } 251 + #endif 259 252 260 253 int __pure cpu_architecture(void) 261 254 { ··· 302 293 { 303 294 unsigned int arch = cpu_architecture(); 304 295 305 - if (arch >= CPU_ARCH_ARMv6) { 296 + if (arch == CPU_ARCH_ARMv7M) { 297 + cacheid = 0; 298 + } else if (arch >= CPU_ARCH_ARMv6) { 306 299 unsigned int cachetype = read_cpuid_cachetype(); 307 300 if ((cachetype & (7 << 29)) == 4 << 29) { 308 301 /* ARMv7 register format */ ··· 403 392 */ 404 393 void notrace cpu_init(void) 405 394 { 395 + #ifndef CONFIG_CPU_V7M 406 396 unsigned int cpu = smp_processor_id(); 407 397 struct stack *stk = &stacks[cpu]; 408 398 ··· 454 442 "I" (offsetof(struct stack, und[0])), 455 443 PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE) 456 444 : "r14"); 445 + #endif 457 446 } 458 447 459 448 int __cpu_logical_map[NR_CPUS];
+8
arch/arm/kernel/traps.c
··· 812 812 813 813 void __init early_trap_init(void *vectors_base) 814 814 { 815 + #ifndef CONFIG_CPU_V7M 815 816 unsigned long vectors = (unsigned long)vectors_base; 816 817 extern char __stubs_start[], __stubs_end[]; 817 818 extern char __vectors_start[], __vectors_end[]; ··· 844 843 845 844 flush_icache_range(vectors, vectors + PAGE_SIZE); 846 845 modify_domain(DOMAIN_USER, DOMAIN_CLIENT); 846 + #else /* ifndef CONFIG_CPU_V7M */ 847 + /* 848 + * on V7-M there is no need to copy the vector table to a dedicated 849 + * memory area. The address is configurable and so a table in the kernel 850 + * image can be used. 851 + */ 852 + #endif 847 853 }
+20 -1
arch/arm/mm/Kconfig
··· 397 397 select CPU_PABRT_V7 398 398 select CPU_TLB_V7 if MMU 399 399 400 + # ARMv7M 401 + config CPU_V7M 402 + bool 403 + select CPU_32v7M 404 + select CPU_ABRT_NOMMU 405 + select CPU_CACHE_NOP 406 + select CPU_PABRT_LEGACY 407 + select CPU_THUMBONLY 408 + 400 409 config CPU_THUMBONLY 401 410 bool 402 411 # There are no CPUs available with MMU that don't implement an ARM ISA: ··· 448 439 bool 449 440 450 441 config CPU_32v7 442 + bool 443 + 444 + config CPU_32v7M 451 445 bool 452 446 453 447 # The abort model ··· 501 489 bool 502 490 503 491 config CPU_CACHE_V7 492 + bool 493 + 494 + config CPU_CACHE_NOP 504 495 bool 505 496 506 497 config CPU_CACHE_VIVT ··· 628 613 629 614 config ARM_THUMB 630 615 bool "Support Thumb user binaries" if !CPU_THUMBONLY 631 - depends on CPU_ARM720T || CPU_ARM740T || CPU_ARM920T || CPU_ARM922T || CPU_ARM925T || CPU_ARM926T || CPU_ARM940T || CPU_ARM946E || CPU_ARM1020 || CPU_ARM1020E || CPU_ARM1022 || CPU_ARM1026 || CPU_XSCALE || CPU_XSC3 || CPU_MOHAWK || CPU_V6 || CPU_V6K || CPU_V7 || CPU_FEROCEON 616 + depends on CPU_ARM720T || CPU_ARM740T || CPU_ARM920T || CPU_ARM922T || \ 617 + CPU_ARM925T || CPU_ARM926T || CPU_ARM940T || CPU_ARM946E || \ 618 + CPU_ARM1020 || CPU_ARM1020E || CPU_ARM1022 || CPU_ARM1026 || \ 619 + CPU_XSCALE || CPU_XSC3 || CPU_MOHAWK || CPU_V6 || CPU_V6K || \ 620 + CPU_V7 || CPU_FEROCEON || CPU_V7M 632 621 default y 633 622 help 634 623 Say Y if you want to include kernel support for running user space
+2
arch/arm/mm/Makefile
··· 39 39 obj-$(CONFIG_CPU_CACHE_V6) += cache-v6.o 40 40 obj-$(CONFIG_CPU_CACHE_V7) += cache-v7.o 41 41 obj-$(CONFIG_CPU_CACHE_FA) += cache-fa.o 42 + obj-$(CONFIG_CPU_CACHE_NOP) += cache-nop.o 42 43 43 44 AFLAGS_cache-v6.o :=-Wa,-march=armv6 44 45 AFLAGS_cache-v7.o :=-Wa,-march=armv7-a ··· 88 87 obj-$(CONFIG_CPU_V6) += proc-v6.o 89 88 obj-$(CONFIG_CPU_V6K) += proc-v6.o 90 89 obj-$(CONFIG_CPU_V7) += proc-v7.o 90 + obj-$(CONFIG_CPU_V7M) += proc-v7m.o 91 91 92 92 AFLAGS_proc-v6.o :=-Wa,-march=armv6 93 93 AFLAGS_proc-v7.o :=-Wa,-march=armv7-a
+50
arch/arm/mm/cache-nop.S
··· 1 + /* 2 + * This program is free software; you can redistribute it and/or modify 3 + * it under the terms of the GNU General Public License version 2 as 4 + * published by the Free Software Foundation. 5 + */ 6 + #include <linux/linkage.h> 7 + #include <linux/init.h> 8 + 9 + #include "proc-macros.S" 10 + 11 + ENTRY(nop_flush_icache_all) 12 + mov pc, lr 13 + ENDPROC(nop_flush_icache_all) 14 + 15 + .globl nop_flush_kern_cache_all 16 + .equ nop_flush_kern_cache_all, nop_flush_icache_all 17 + 18 + .globl nop_flush_kern_cache_louis 19 + .equ nop_flush_kern_cache_louis, nop_flush_icache_all 20 + 21 + .globl nop_flush_user_cache_all 22 + .equ nop_flush_user_cache_all, nop_flush_icache_all 23 + 24 + .globl nop_flush_user_cache_range 25 + .equ nop_flush_user_cache_range, nop_flush_icache_all 26 + 27 + .globl nop_coherent_kern_range 28 + .equ nop_coherent_kern_range, nop_flush_icache_all 29 + 30 + ENTRY(nop_coherent_user_range) 31 + mov r0, 0 32 + mov pc, lr 33 + ENDPROC(nop_coherent_user_range) 34 + 35 + .globl nop_flush_kern_dcache_area 36 + .equ nop_flush_kern_dcache_area, nop_flush_icache_all 37 + 38 + .globl nop_dma_flush_range 39 + .equ nop_dma_flush_range, nop_flush_icache_all 40 + 41 + .globl nop_dma_map_area 42 + .equ nop_dma_map_area, nop_flush_icache_all 43 + 44 + .globl nop_dma_unmap_area 45 + .equ nop_dma_unmap_area, nop_flush_icache_all 46 + 47 + __INITDATA 48 + 49 + @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) 50 + define_cache_functions nop
+7
arch/arm/mm/nommu.c
··· 20 20 21 21 void __init arm_mm_memblock_reserve(void) 22 22 { 23 + #ifndef CONFIG_CPU_V7M 23 24 /* 24 25 * Register the exception vector page. 25 26 * some architectures which the DRAM is the exception vector to trap, 26 27 * alloc_page breaks with error, although it is not NULL, but "0." 27 28 */ 28 29 memblock_reserve(CONFIG_VECTORS_BASE, PAGE_SIZE); 30 + #else /* ifndef CONFIG_CPU_V7M */ 31 + /* 32 + * There is no dedicated vector page on V7-M. So nothing needs to be 33 + * reserved here. 34 + */ 35 + #endif 29 36 } 30 37 31 38 void __init sanity_check_meminfo(void)
+157
arch/arm/mm/proc-v7m.S
··· 1 + /* 2 + * linux/arch/arm/mm/proc-v7m.S 3 + * 4 + * Copyright (C) 2008 ARM Ltd. 5 + * Copyright (C) 2001 Deep Blue Solutions Ltd. 6 + * 7 + * This program is free software; you can redistribute it and/or modify 8 + * it under the terms of the GNU General Public License version 2 as 9 + * published by the Free Software Foundation. 10 + * 11 + * This is the "shell" of the ARMv7-M processor support. 12 + */ 13 + #include <linux/linkage.h> 14 + #include <asm/assembler.h> 15 + #include <asm/v7m.h> 16 + #include "proc-macros.S" 17 + 18 + ENTRY(cpu_v7m_proc_init) 19 + mov pc, lr 20 + ENDPROC(cpu_v7m_proc_init) 21 + 22 + ENTRY(cpu_v7m_proc_fin) 23 + mov pc, lr 24 + ENDPROC(cpu_v7m_proc_fin) 25 + 26 + /* 27 + * cpu_v7m_reset(loc) 28 + * 29 + * Perform a soft reset of the system. Put the CPU into the 30 + * same state as it would be if it had been reset, and branch 31 + * to what would be the reset vector. 32 + * 33 + * - loc - location to jump to for soft reset 34 + */ 35 + .align 5 36 + ENTRY(cpu_v7m_reset) 37 + mov pc, r0 38 + ENDPROC(cpu_v7m_reset) 39 + 40 + /* 41 + * cpu_v7m_do_idle() 42 + * 43 + * Idle the processor (eg, wait for interrupt). 44 + * 45 + * IRQs are already disabled. 46 + */ 47 + ENTRY(cpu_v7m_do_idle) 48 + wfi 49 + mov pc, lr 50 + ENDPROC(cpu_v7m_do_idle) 51 + 52 + ENTRY(cpu_v7m_dcache_clean_area) 53 + mov pc, lr 54 + ENDPROC(cpu_v7m_dcache_clean_area) 55 + 56 + /* 57 + * There is no MMU, so here is nothing to do. 58 + */ 59 + ENTRY(cpu_v7m_switch_mm) 60 + mov pc, lr 61 + ENDPROC(cpu_v7m_switch_mm) 62 + 63 + .globl cpu_v7m_suspend_size 64 + .equ cpu_v7m_suspend_size, 0 65 + 66 + #ifdef CONFIG_ARM_CPU_SUSPEND 67 + ENTRY(cpu_v7m_do_suspend) 68 + mov pc, lr 69 + ENDPROC(cpu_v7m_do_suspend) 70 + 71 + ENTRY(cpu_v7m_do_resume) 72 + mov pc, lr 73 + ENDPROC(cpu_v7m_do_resume) 74 + #endif 75 + 76 + .section ".text.init", #alloc, #execinstr 77 + 78 + /* 79 + * __v7m_setup 80 + * 81 + * This should be able to cover all ARMv7-M cores. 82 + */ 83 + __v7m_setup: 84 + @ Configure the vector table base address 85 + ldr r0, =BASEADDR_V7M_SCB 86 + ldr r12, =vector_table 87 + str r12, [r0, V7M_SCB_VTOR] 88 + 89 + @ enable UsageFault, BusFault and MemManage fault. 90 + ldr r5, [r0, #V7M_SCB_SHCSR] 91 + orr r5, #(V7M_SCB_SHCSR_USGFAULTENA | V7M_SCB_SHCSR_BUSFAULTENA | V7M_SCB_SHCSR_MEMFAULTENA) 92 + str r5, [r0, #V7M_SCB_SHCSR] 93 + 94 + @ Lower the priority of the SVC and PendSV exceptions 95 + mov r5, #0x80000000 96 + str r5, [r0, V7M_SCB_SHPR2] @ set SVC priority 97 + mov r5, #0x00800000 98 + str r5, [r0, V7M_SCB_SHPR3] @ set PendSV priority 99 + 100 + @ SVC to run the kernel in this mode 101 + adr r1, BSYM(1f) 102 + ldr r5, [r12, #11 * 4] @ read the SVC vector entry 103 + str r1, [r12, #11 * 4] @ write the temporary SVC vector entry 104 + mov r6, lr @ save LR 105 + mov r7, sp @ save SP 106 + ldr sp, =__v7m_setup_stack_top 107 + cpsie i 108 + svc #0 109 + 1: cpsid i 110 + str r5, [r12, #11 * 4] @ restore the original SVC vector entry 111 + mov lr, r6 @ restore LR 112 + mov sp, r7 @ restore SP 113 + 114 + @ Special-purpose control register 115 + mov r1, #1 116 + msr control, r1 @ Thread mode has unpriviledged access 117 + 118 + @ Configure the System Control Register to ensure 8-byte stack alignment 119 + @ Note the STKALIGN bit is either RW or RAO. 120 + ldr r12, [r0, V7M_SCB_CCR] @ system control register 121 + orr r12, #V7M_SCB_CCR_STKALIGN 122 + str r12, [r0, V7M_SCB_CCR] 123 + mov pc, lr 124 + ENDPROC(__v7m_setup) 125 + 126 + define_processor_functions v7m, dabort=nommu_early_abort, pabort=legacy_pabort, nommu=1 127 + 128 + .section ".rodata" 129 + string cpu_arch_name, "armv7m" 130 + string cpu_elf_name "v7m" 131 + string cpu_v7m_name "ARMv7-M" 132 + 133 + .section ".proc.info.init", #alloc, #execinstr 134 + 135 + /* 136 + * Match any ARMv7-M processor core. 137 + */ 138 + .type __v7m_proc_info, #object 139 + __v7m_proc_info: 140 + .long 0x000f0000 @ Required ID value 141 + .long 0x000f0000 @ Mask for ID 142 + .long 0 @ proc_info_list.__cpu_mm_mmu_flags 143 + .long 0 @ proc_info_list.__cpu_io_mmu_flags 144 + b __v7m_setup @ proc_info_list.__cpu_flush 145 + .long cpu_arch_name 146 + .long cpu_elf_name 147 + .long HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT 148 + .long cpu_v7m_name 149 + .long v7m_processor_functions @ proc_info_list.proc 150 + .long 0 @ proc_info_list.tlb 151 + .long 0 @ proc_info_list.user 152 + .long nop_cache_fns @ proc_info_list.cache 153 + .size __v7m_proc_info, . - __v7m_proc_info 154 + 155 + __v7m_setup_stack: 156 + .space 4 * 8 @ 8 registers 157 + __v7m_setup_stack_top: