Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

arm64: CPU support

This patch adds AArch64 CPU specific functionality. It assumes that the
implementation is generic to AArch64 and does not require specific
identification. Different CPU implementations may require the setting of
various ACTLR_EL1 bits but such information is not currently available
and it should ideally be pushed to firmware.

Signed-off-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Acked-by: Tony Lindgren <tony@atomide.com>
Acked-by: Nicolas Pitre <nico@linaro.org>
Acked-by: Olof Johansson <olof@lixom.net>
Acked-by: Santosh Shilimkar <santosh.shilimkar@ti.com>

+523
+30
arch/arm64/include/asm/cputable.h
··· 1 + /* 2 + * arch/arm64/include/asm/cputable.h 3 + * 4 + * Copyright (C) 2012 ARM Ltd. 5 + * 6 + * This program is free software: you can redistribute it and/or modify 7 + * it under the terms of the GNU General Public License version 2 as 8 + * published by the Free Software Foundation. 9 + * 10 + * This program is distributed in the hope that it will be useful, 11 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 + * GNU General Public License for more details. 14 + * 15 + * You should have received a copy of the GNU General Public License 16 + * along with this program. If not, see <http://www.gnu.org/licenses/>. 17 + */ 18 + #ifndef __ASM_CPUTABLE_H 19 + #define __ASM_CPUTABLE_H 20 + 21 + struct cpu_info { 22 + unsigned int cpu_id_val; 23 + unsigned int cpu_id_mask; 24 + const char *cpu_name; 25 + unsigned long (*cpu_setup)(void); 26 + }; 27 + 28 + extern struct cpu_info *lookup_processor_type(unsigned int); 29 + 30 + #endif
+49
arch/arm64/include/asm/cputype.h
··· 1 + /* 2 + * Copyright (C) 2012 ARM Ltd. 3 + * 4 + * This program is free software; you can redistribute it and/or modify 5 + * it under the terms of the GNU General Public License version 2 as 6 + * published by the Free Software Foundation. 7 + * 8 + * This program is distributed in the hope that it will be useful, 9 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 + * GNU General Public License for more details. 12 + * 13 + * You should have received a copy of the GNU General Public License 14 + * along with this program. If not, see <http://www.gnu.org/licenses/>. 15 + */ 16 + #ifndef __ASM_CPUTYPE_H 17 + #define __ASM_CPUTYPE_H 18 + 19 + #define ID_MIDR_EL1 "midr_el1" 20 + #define ID_CTR_EL0 "ctr_el0" 21 + 22 + #define ID_AA64PFR0_EL1 "id_aa64pfr0_el1" 23 + #define ID_AA64DFR0_EL1 "id_aa64dfr0_el1" 24 + #define ID_AA64AFR0_EL1 "id_aa64afr0_el1" 25 + #define ID_AA64ISAR0_EL1 "id_aa64isar0_el1" 26 + #define ID_AA64MMFR0_EL1 "id_aa64mmfr0_el1" 27 + 28 + #define read_cpuid(reg) ({ \ 29 + u64 __val; \ 30 + asm("mrs %0, " reg : "=r" (__val)); \ 31 + __val; \ 32 + }) 33 + 34 + /* 35 + * The CPU ID never changes at run time, so we might as well tell the 36 + * compiler that it's constant. Use this function to read the CPU ID 37 + * rather than directly reading processor_id or read_cpuid() directly. 38 + */ 39 + static inline u32 __attribute_const__ read_cpuid_id(void) 40 + { 41 + return read_cpuid(ID_MIDR_EL1); 42 + } 43 + 44 + static inline u32 __attribute_const__ read_cpuid_cachetype(void) 45 + { 46 + return read_cpuid(ID_CTR_EL0); 47 + } 48 + 49 + #endif
+50
arch/arm64/include/asm/proc-fns.h
··· 1 + /* 2 + * Based on arch/arm/include/asm/proc-fns.h 3 + * 4 + * Copyright (C) 1997-1999 Russell King 5 + * Copyright (C) 2000 Deep Blue Solutions Ltd 6 + * Copyright (C) 2012 ARM Ltd. 7 + * 8 + * This program is free software; you can redistribute it and/or modify 9 + * it under the terms of the GNU General Public License version 2 as 10 + * published by the Free Software Foundation. 11 + * 12 + * This program is distributed in the hope that it will be useful, 13 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 + * GNU General Public License for more details. 16 + * 17 + * You should have received a copy of the GNU General Public License 18 + * along with this program. If not, see <http://www.gnu.org/licenses/>. 19 + */ 20 + #ifndef __ASM_PROCFNS_H 21 + #define __ASM_PROCFNS_H 22 + 23 + #ifdef __KERNEL__ 24 + #ifndef __ASSEMBLY__ 25 + 26 + #include <asm/page.h> 27 + 28 + struct mm_struct; 29 + 30 + extern void cpu_cache_off(void); 31 + extern void cpu_do_idle(void); 32 + extern void cpu_do_switch_mm(unsigned long pgd_phys, struct mm_struct *mm); 33 + extern void cpu_reset(unsigned long addr) __attribute__((noreturn)); 34 + 35 + #include <asm/memory.h> 36 + 37 + #define cpu_switch_mm(pgd,mm) cpu_do_switch_mm(virt_to_phys(pgd),mm) 38 + 39 + #define cpu_get_pgd() \ 40 + ({ \ 41 + unsigned long pg; \ 42 + asm("mrs %0, ttbr0_el1\n" \ 43 + : "=r" (pg)); \ 44 + pg &= ~0xffff000000003ffful; \ 45 + (pgd_t *)phys_to_virt(pg); \ 46 + }) 47 + 48 + #endif /* __ASSEMBLY__ */ 49 + #endif /* __KERNEL__ */ 50 + #endif /* __ASM_PROCFNS_H */
+175
arch/arm64/include/asm/processor.h
··· 1 + /* 2 + * Based on arch/arm/include/asm/processor.h 3 + * 4 + * Copyright (C) 1995-1999 Russell King 5 + * Copyright (C) 2012 ARM Ltd. 6 + * 7 + * This program is free software; you can redistribute it and/or modify 8 + * it under the terms of the GNU General Public License version 2 as 9 + * published by the Free Software Foundation. 10 + * 11 + * This program is distributed in the hope that it will be useful, 12 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 + * GNU General Public License for more details. 15 + * 16 + * You should have received a copy of the GNU General Public License 17 + * along with this program. If not, see <http://www.gnu.org/licenses/>. 18 + */ 19 + #ifndef __ASM_PROCESSOR_H 20 + #define __ASM_PROCESSOR_H 21 + 22 + /* 23 + * Default implementation of macro that returns current 24 + * instruction pointer ("program counter"). 25 + */ 26 + #define current_text_addr() ({ __label__ _l; _l: &&_l;}) 27 + 28 + #ifdef __KERNEL__ 29 + 30 + #include <linux/string.h> 31 + 32 + #include <asm/fpsimd.h> 33 + #include <asm/hw_breakpoint.h> 34 + #include <asm/ptrace.h> 35 + #include <asm/types.h> 36 + 37 + #ifdef __KERNEL__ 38 + #define STACK_TOP_MAX TASK_SIZE_64 39 + #ifdef CONFIG_COMPAT 40 + #define AARCH32_VECTORS_BASE 0xffff0000 41 + #define STACK_TOP (test_thread_flag(TIF_32BIT) ? \ 42 + AARCH32_VECTORS_BASE : STACK_TOP_MAX) 43 + #else 44 + #define STACK_TOP STACK_TOP_MAX 45 + #endif /* CONFIG_COMPAT */ 46 + #endif /* __KERNEL__ */ 47 + 48 + struct debug_info { 49 + /* Have we suspended stepping by a debugger? */ 50 + int suspended_step; 51 + /* Allow breakpoints and watchpoints to be disabled for this thread. */ 52 + int bps_disabled; 53 + int wps_disabled; 54 + /* Hardware breakpoints pinned to this task. */ 55 + struct perf_event *hbp_break[ARM_MAX_BRP]; 56 + struct perf_event *hbp_watch[ARM_MAX_WRP]; 57 + }; 58 + 59 + struct cpu_context { 60 + unsigned long x19; 61 + unsigned long x20; 62 + unsigned long x21; 63 + unsigned long x22; 64 + unsigned long x23; 65 + unsigned long x24; 66 + unsigned long x25; 67 + unsigned long x26; 68 + unsigned long x27; 69 + unsigned long x28; 70 + unsigned long fp; 71 + unsigned long sp; 72 + unsigned long pc; 73 + }; 74 + 75 + struct thread_struct { 76 + struct cpu_context cpu_context; /* cpu context */ 77 + unsigned long tp_value; 78 + struct fpsimd_state fpsimd_state; 79 + unsigned long fault_address; /* fault info */ 80 + struct debug_info debug; /* debugging */ 81 + }; 82 + 83 + #define INIT_THREAD { } 84 + 85 + static inline void start_thread_common(struct pt_regs *regs, unsigned long pc) 86 + { 87 + memset(regs, 0, sizeof(*regs)); 88 + regs->syscallno = ~0UL; 89 + regs->pc = pc; 90 + } 91 + 92 + static inline void start_thread(struct pt_regs *regs, unsigned long pc, 93 + unsigned long sp) 94 + { 95 + unsigned long *stack = (unsigned long *)sp; 96 + 97 + start_thread_common(regs, pc); 98 + regs->pstate = PSR_MODE_EL0t; 99 + regs->sp = sp; 100 + regs->regs[2] = stack[2]; /* x2 (envp) */ 101 + regs->regs[1] = stack[1]; /* x1 (argv) */ 102 + regs->regs[0] = stack[0]; /* x0 (argc) */ 103 + } 104 + 105 + #ifdef CONFIG_COMPAT 106 + static inline void compat_start_thread(struct pt_regs *regs, unsigned long pc, 107 + unsigned long sp) 108 + { 109 + unsigned int *stack = (unsigned int *)sp; 110 + 111 + start_thread_common(regs, pc); 112 + regs->pstate = COMPAT_PSR_MODE_USR; 113 + if (pc & 1) 114 + regs->pstate |= COMPAT_PSR_T_BIT; 115 + regs->compat_sp = sp; 116 + regs->regs[2] = stack[2]; /* x2 (envp) */ 117 + regs->regs[1] = stack[1]; /* x1 (argv) */ 118 + regs->regs[0] = stack[0]; /* x0 (argc) */ 119 + } 120 + #endif 121 + 122 + /* Forward declaration, a strange C thing */ 123 + struct task_struct; 124 + 125 + /* Free all resources held by a thread. */ 126 + extern void release_thread(struct task_struct *); 127 + 128 + /* Prepare to copy thread state - unlazy all lazy status */ 129 + #define prepare_to_copy(tsk) do { } while (0) 130 + 131 + unsigned long get_wchan(struct task_struct *p); 132 + 133 + #define cpu_relax() barrier() 134 + 135 + /* Thread switching */ 136 + extern struct task_struct *cpu_switch_to(struct task_struct *prev, 137 + struct task_struct *next); 138 + 139 + /* 140 + * Create a new kernel thread 141 + */ 142 + extern int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags); 143 + 144 + #define task_pt_regs(p) \ 145 + ((struct pt_regs *)(THREAD_START_SP + task_stack_page(p)) - 1) 146 + 147 + #define KSTK_EIP(tsk) task_pt_regs(tsk)->pc 148 + #define KSTK_ESP(tsk) task_pt_regs(tsk)->sp 149 + 150 + /* 151 + * Prefetching support 152 + */ 153 + #define ARCH_HAS_PREFETCH 154 + static inline void prefetch(const void *ptr) 155 + { 156 + asm volatile("prfm pldl1keep, %a0\n" : : "p" (ptr)); 157 + } 158 + 159 + #define ARCH_HAS_PREFETCHW 160 + static inline void prefetchw(const void *ptr) 161 + { 162 + asm volatile("prfm pstl1keep, %a0\n" : : "p" (ptr)); 163 + } 164 + 165 + #define ARCH_HAS_SPINLOCK_PREFETCH 166 + static inline void spin_lock_prefetch(const void *x) 167 + { 168 + prefetchw(x); 169 + } 170 + 171 + #define HAVE_ARCH_PICK_MMAP_LAYOUT 172 + 173 + #endif 174 + 175 + #endif /* __ASM_PROCESSOR_H */
+33
arch/arm64/kernel/cputable.c
··· 1 + /* 2 + * arch/arm64/kernel/cputable.c 3 + * 4 + * Copyright (C) 2012 ARM Ltd. 5 + * 6 + * This program is free software: you can redistribute it and/or modify 7 + * it under the terms of the GNU General Public License version 2 as 8 + * published by the Free Software Foundation. 9 + * 10 + * This program is distributed in the hope that it will be useful, 11 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 + * GNU General Public License for more details. 14 + * 15 + * You should have received a copy of the GNU General Public License 16 + * along with this program. If not, see <http://www.gnu.org/licenses/>. 17 + */ 18 + 19 + #include <linux/init.h> 20 + 21 + #include <asm/cputable.h> 22 + 23 + extern unsigned long __cpu_setup(void); 24 + 25 + struct cpu_info __initdata cpu_table[] = { 26 + { 27 + .cpu_id_val = 0x000f0000, 28 + .cpu_id_mask = 0x000f0000, 29 + .cpu_name = "AArch64 Processor", 30 + .cpu_setup = __cpu_setup, 31 + }, 32 + { /* Empty */ }, 33 + };
+186
arch/arm64/mm/proc.S
··· 1 + /* 2 + * Based on arch/arm/mm/proc.S 3 + * 4 + * Copyright (C) 2001 Deep Blue Solutions Ltd. 5 + * Copyright (C) 2012 ARM Ltd. 6 + * Author: Catalin Marinas <catalin.marinas@arm.com> 7 + * 8 + * This program is free software; you can redistribute it and/or modify 9 + * it under the terms of the GNU General Public License version 2 as 10 + * published by the Free Software Foundation. 11 + * 12 + * This program is distributed in the hope that it will be useful, 13 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 + * GNU General Public License for more details. 16 + * 17 + * You should have received a copy of the GNU General Public License 18 + * along with this program. If not, see <http://www.gnu.org/licenses/>. 19 + */ 20 + 21 + #include <linux/init.h> 22 + #include <linux/linkage.h> 23 + #include <asm/assembler.h> 24 + #include <asm/asm-offsets.h> 25 + #include <asm/hwcap.h> 26 + #include <asm/pgtable-hwdef.h> 27 + #include <asm/pgtable.h> 28 + 29 + #include "proc-macros.S" 30 + 31 + #ifndef CONFIG_SMP 32 + /* PTWs cacheable, inner/outer WBWA not shareable */ 33 + #define TCR_FLAGS TCR_IRGN_WBWA | TCR_ORGN_WBWA 34 + #else 35 + /* PTWs cacheable, inner/outer WBWA shareable */ 36 + #define TCR_FLAGS TCR_IRGN_WBWA | TCR_ORGN_WBWA | TCR_SHARED 37 + #endif 38 + 39 + #define MAIR(attr, mt) ((attr) << ((mt) * 8)) 40 + 41 + /* 42 + * cpu_cache_off() 43 + * 44 + * Turn the CPU D-cache off. 45 + */ 46 + ENTRY(cpu_cache_off) 47 + mrs x0, sctlr_el1 48 + bic x0, x0, #1 << 2 // clear SCTLR.C 49 + msr sctlr_el1, x0 50 + isb 51 + ret 52 + ENDPROC(cpu_cache_off) 53 + 54 + /* 55 + * cpu_reset(loc) 56 + * 57 + * Perform a soft reset of the system. Put the CPU into the same state 58 + * as it would be if it had been reset, and branch to what would be the 59 + * reset vector. It must be executed with the flat identity mapping. 60 + * 61 + * - loc - location to jump to for soft reset 62 + */ 63 + .align 5 64 + ENTRY(cpu_reset) 65 + mrs x1, sctlr_el1 66 + bic x1, x1, #1 67 + msr sctlr_el1, x1 // disable the MMU 68 + isb 69 + ret x0 70 + ENDPROC(cpu_reset) 71 + 72 + /* 73 + * cpu_do_idle() 74 + * 75 + * Idle the processor (wait for interrupt). 76 + */ 77 + ENTRY(cpu_do_idle) 78 + dsb sy // WFI may enter a low-power mode 79 + wfi 80 + ret 81 + ENDPROC(cpu_do_idle) 82 + 83 + /* 84 + * cpu_switch_mm(pgd_phys, tsk) 85 + * 86 + * Set the translation table base pointer to be pgd_phys. 87 + * 88 + * - pgd_phys - physical address of new TTB 89 + */ 90 + ENTRY(cpu_do_switch_mm) 91 + mmid w1, x1 // get mm->context.id 92 + bfi x0, x1, #48, #16 // set the ASID 93 + msr ttbr0_el1, x0 // set TTBR0 94 + isb 95 + ret 96 + ENDPROC(cpu_do_switch_mm) 97 + 98 + cpu_name: 99 + .ascii "AArch64 Processor" 100 + .align 101 + 102 + .section ".text.init", #alloc, #execinstr 103 + 104 + /* 105 + * __cpu_setup 106 + * 107 + * Initialise the processor for turning the MMU on. Return in x0 the 108 + * value of the SCTLR_EL1 register. 109 + */ 110 + ENTRY(__cpu_setup) 111 + #ifdef CONFIG_SMP 112 + /* TODO: only do this for certain CPUs */ 113 + /* 114 + * Enable SMP/nAMP mode. 115 + */ 116 + mrs x0, actlr_el1 117 + tbnz x0, #6, 1f // already enabled? 118 + orr x0, x0, #1 << 6 119 + msr actlr_el1, x0 120 + 1: 121 + #endif 122 + /* 123 + * Preserve the link register across the function call. 124 + */ 125 + mov x28, lr 126 + bl __flush_dcache_all 127 + mov lr, x28 128 + ic iallu // I+BTB cache invalidate 129 + dsb sy 130 + 131 + mov x0, #3 << 20 132 + msr cpacr_el1, x0 // Enable FP/ASIMD 133 + mov x0, #1 134 + msr oslar_el1, x0 // Set the debug OS lock 135 + tlbi vmalle1is // invalidate I + D TLBs 136 + /* 137 + * Memory region attributes for LPAE: 138 + * 139 + * n = AttrIndx[2:0] 140 + * n MAIR 141 + * DEVICE_nGnRnE 000 00000000 142 + * DEVICE_nGnRE 001 00000100 143 + * DEVICE_GRE 010 00001100 144 + * NORMAL_NC 011 01000100 145 + * NORMAL 100 11111111 146 + */ 147 + ldr x5, =MAIR(0x00, MT_DEVICE_nGnRnE) | \ 148 + MAIR(0x04, MT_DEVICE_nGnRE) | \ 149 + MAIR(0x0c, MT_DEVICE_GRE) | \ 150 + MAIR(0x44, MT_NORMAL_NC) | \ 151 + MAIR(0xff, MT_NORMAL) 152 + msr mair_el1, x5 153 + /* 154 + * Prepare SCTLR 155 + */ 156 + adr x5, crval 157 + ldp w5, w6, [x5] 158 + mrs x0, sctlr_el1 159 + bic x0, x0, x5 // clear bits 160 + orr x0, x0, x6 // set bits 161 + /* 162 + * Set/prepare TCR and TTBR. We use 512GB (39-bit) address range for 163 + * both user and kernel. 164 + */ 165 + ldr x10, =TCR_TxSZ(VA_BITS) | TCR_FLAGS | TCR_IPS_40BIT | \ 166 + TCR_ASID16 | (1 << 31) 167 + #ifdef CONFIG_ARM64_64K_PAGES 168 + orr x10, x10, TCR_TG0_64K 169 + orr x10, x10, TCR_TG1_64K 170 + #endif 171 + msr tcr_el1, x10 172 + ret // return to head.S 173 + ENDPROC(__cpu_setup) 174 + 175 + /* 176 + * n n T 177 + * U E WT T UD US IHBS 178 + * CE0 XWHW CZ ME TEEA S 179 + * .... .IEE .... NEAI TE.I ..AD DEN0 ACAM 180 + * 0011 0... 1101 ..0. ..0. 10.. .... .... < hardware reserved 181 + * .... .100 .... 01.1 11.1 ..01 0001 1101 < software settings 182 + */ 183 + .type crval, #object 184 + crval: 185 + .word 0x030802e2 // clear 186 + .word 0x0405d11d // set