Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

[ARM] armv7: add support for ARMv7 cores.

This patch adds support for the ARMv7 cores.

Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>

authored by

Catalin Marinas and committed by
Russell King
bbe88886 5b94f675

+585
+32
arch/arm/mm/abort-ev7.S
··· 1 + #include <linux/linkage.h> 2 + #include <asm/assembler.h> 3 + /* 4 + * Function: v7_early_abort 5 + * 6 + * Params : r2 = address of aborted instruction 7 + * : r3 = saved SPSR 8 + * 9 + * Returns : r0 = address of abort 10 + * : r1 = FSR, bit 11 = write 11 + * : r2-r8 = corrupted 12 + * : r9 = preserved 13 + * : sp = pointer to registers 14 + * 15 + * Purpose : obtain information about current aborted instruction. 16 + */ 17 + .align 5 18 + ENTRY(v7_early_abort) 19 + /* 20 + * The effect of data aborts on on the exclusive access monitor are 21 + * UNPREDICTABLE. Do a CLREX to clear the state 22 + */ 23 + clrex 24 + 25 + mrc p15, 0, r1, c5, c0, 0 @ get FSR 26 + mrc p15, 0, r0, c6, c0, 0 @ get FAR 27 + 28 + /* 29 + * V6 code adjusts the returned DFSR. 30 + * New designs should not need to patch up faults. 31 + */ 32 + mov pc, lr
+253
arch/arm/mm/cache-v7.S
··· 1 + /* 2 + * linux/arch/arm/mm/cache-v7.S 3 + * 4 + * Copyright (C) 2001 Deep Blue Solutions Ltd. 5 + * Copyright (C) 2005 ARM Ltd. 6 + * 7 + * This program is free software; you can redistribute it and/or modify 8 + * it under the terms of the GNU General Public License version 2 as 9 + * published by the Free Software Foundation. 10 + * 11 + * This is the "shell" of the ARMv7 processor support. 12 + */ 13 + #include <linux/linkage.h> 14 + #include <linux/init.h> 15 + #include <asm/assembler.h> 16 + 17 + #include "proc-macros.S" 18 + 19 + /* 20 + * v7_flush_dcache_all() 21 + * 22 + * Flush the whole D-cache. 23 + * 24 + * Corrupted registers: r0-r5, r7, r9-r11 25 + * 26 + * - mm - mm_struct describing address space 27 + */ 28 + ENTRY(v7_flush_dcache_all) 29 + mrc p15, 1, r0, c0, c0, 1 @ read clidr 30 + ands r3, r0, #0x7000000 @ extract loc from clidr 31 + mov r3, r3, lsr #23 @ left align loc bit field 32 + beq finished @ if loc is 0, then no need to clean 33 + mov r10, #0 @ start clean at cache level 0 34 + loop1: 35 + add r2, r10, r10, lsr #1 @ work out 3x current cache level 36 + mov r1, r0, lsr r2 @ extract cache type bits from clidr 37 + and r1, r1, #7 @ mask of the bits for current cache only 38 + cmp r1, #2 @ see what cache we have at this level 39 + blt skip @ skip if no cache, or just i-cache 40 + mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr 41 + isb @ isb to sych the new cssr&csidr 42 + mrc p15, 1, r1, c0, c0, 0 @ read the new csidr 43 + and r2, r1, #7 @ extract the length of the cache lines 44 + add r2, r2, #4 @ add 4 (line length offset) 45 + ldr r4, =0x3ff 46 + ands r4, r4, r1, lsr #3 @ find maximum number on the way size 47 + clz r5, r4 @ find bit position of way size increment 48 + ldr r7, =0x7fff 49 + ands r7, r7, r1, lsr #13 @ extract max number of the index size 50 + loop2: 51 + mov r9, r4 @ create working copy of max way size 52 + loop3: 53 + orr r11, r10, r9, lsl r5 @ factor way and cache number into r11 54 + orr r11, r11, r7, lsl r2 @ factor index number into r11 55 + mcr p15, 0, r11, c7, c14, 2 @ clean & invalidate by set/way 56 + subs r9, r9, #1 @ decrement the way 57 + bge loop3 58 + subs r7, r7, #1 @ decrement the index 59 + bge loop2 60 + skip: 61 + add r10, r10, #2 @ increment cache number 62 + cmp r3, r10 63 + bgt loop1 64 + finished: 65 + mov r10, #0 @ swith back to cache level 0 66 + mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr 67 + isb 68 + mov pc, lr 69 + 70 + /* 71 + * v7_flush_cache_all() 72 + * 73 + * Flush the entire cache system. 74 + * The data cache flush is now achieved using atomic clean / invalidates 75 + * working outwards from L1 cache. This is done using Set/Way based cache 76 + * maintainance instructions. 77 + * The instruction cache can still be invalidated back to the point of 78 + * unification in a single instruction. 79 + * 80 + */ 81 + ENTRY(v7_flush_kern_cache_all) 82 + stmfd sp!, {r4-r5, r7, r9-r11, lr} 83 + bl v7_flush_dcache_all 84 + mov r0, #0 85 + mcr p15, 0, r0, c7, c5, 0 @ I+BTB cache invalidate 86 + ldmfd sp!, {r4-r5, r7, r9-r11, lr} 87 + mov pc, lr 88 + 89 + /* 90 + * v7_flush_cache_all() 91 + * 92 + * Flush all TLB entries in a particular address space 93 + * 94 + * - mm - mm_struct describing address space 95 + */ 96 + ENTRY(v7_flush_user_cache_all) 97 + /*FALLTHROUGH*/ 98 + 99 + /* 100 + * v7_flush_cache_range(start, end, flags) 101 + * 102 + * Flush a range of TLB entries in the specified address space. 103 + * 104 + * - start - start address (may not be aligned) 105 + * - end - end address (exclusive, may not be aligned) 106 + * - flags - vm_area_struct flags describing address space 107 + * 108 + * It is assumed that: 109 + * - we have a VIPT cache. 110 + */ 111 + ENTRY(v7_flush_user_cache_range) 112 + mov pc, lr 113 + 114 + /* 115 + * v7_coherent_kern_range(start,end) 116 + * 117 + * Ensure that the I and D caches are coherent within specified 118 + * region. This is typically used when code has been written to 119 + * a memory region, and will be executed. 120 + * 121 + * - start - virtual start address of region 122 + * - end - virtual end address of region 123 + * 124 + * It is assumed that: 125 + * - the Icache does not read data from the write buffer 126 + */ 127 + ENTRY(v7_coherent_kern_range) 128 + /* FALLTHROUGH */ 129 + 130 + /* 131 + * v7_coherent_user_range(start,end) 132 + * 133 + * Ensure that the I and D caches are coherent within specified 134 + * region. This is typically used when code has been written to 135 + * a memory region, and will be executed. 136 + * 137 + * - start - virtual start address of region 138 + * - end - virtual end address of region 139 + * 140 + * It is assumed that: 141 + * - the Icache does not read data from the write buffer 142 + */ 143 + ENTRY(v7_coherent_user_range) 144 + dcache_line_size r2, r3 145 + sub r3, r2, #1 146 + bic r0, r0, r3 147 + 1: mcr p15, 0, r0, c7, c11, 1 @ clean D line to the point of unification 148 + dsb 149 + mcr p15, 0, r0, c7, c5, 1 @ invalidate I line 150 + add r0, r0, r2 151 + cmp r0, r1 152 + blo 1b 153 + mov r0, #0 154 + mcr p15, 0, r0, c7, c5, 6 @ invalidate BTB 155 + dsb 156 + isb 157 + mov pc, lr 158 + 159 + /* 160 + * v7_flush_kern_dcache_page(kaddr) 161 + * 162 + * Ensure that the data held in the page kaddr is written back 163 + * to the page in question. 164 + * 165 + * - kaddr - kernel address (guaranteed to be page aligned) 166 + */ 167 + ENTRY(v7_flush_kern_dcache_page) 168 + dcache_line_size r2, r3 169 + add r1, r0, #PAGE_SZ 170 + 1: 171 + mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line / unified line 172 + add r0, r0, r2 173 + cmp r0, r1 174 + blo 1b 175 + dsb 176 + mov pc, lr 177 + 178 + /* 179 + * v7_dma_inv_range(start,end) 180 + * 181 + * Invalidate the data cache within the specified region; we will 182 + * be performing a DMA operation in this region and we want to 183 + * purge old data in the cache. 184 + * 185 + * - start - virtual start address of region 186 + * - end - virtual end address of region 187 + */ 188 + ENTRY(v7_dma_inv_range) 189 + dcache_line_size r2, r3 190 + sub r3, r2, #1 191 + tst r0, r3 192 + bic r0, r0, r3 193 + mcrne p15, 0, r0, c7, c14, 1 @ clean & invalidate D / U line 194 + 195 + tst r1, r3 196 + bic r1, r1, r3 197 + mcrne p15, 0, r1, c7, c14, 1 @ clean & invalidate D / U line 198 + 1: 199 + mcr p15, 0, r0, c7, c6, 1 @ invalidate D / U line 200 + add r0, r0, r2 201 + cmp r0, r1 202 + blo 1b 203 + dsb 204 + mov pc, lr 205 + 206 + /* 207 + * v7_dma_clean_range(start,end) 208 + * - start - virtual start address of region 209 + * - end - virtual end address of region 210 + */ 211 + ENTRY(v7_dma_clean_range) 212 + dcache_line_size r2, r3 213 + sub r3, r2, #1 214 + bic r0, r0, r3 215 + 1: 216 + mcr p15, 0, r0, c7, c10, 1 @ clean D / U line 217 + add r0, r0, r2 218 + cmp r0, r1 219 + blo 1b 220 + dsb 221 + mov pc, lr 222 + 223 + /* 224 + * v7_dma_flush_range(start,end) 225 + * - start - virtual start address of region 226 + * - end - virtual end address of region 227 + */ 228 + ENTRY(v7_dma_flush_range) 229 + dcache_line_size r2, r3 230 + sub r3, r2, #1 231 + bic r0, r0, r3 232 + 1: 233 + mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D / U line 234 + add r0, r0, r2 235 + cmp r0, r1 236 + blo 1b 237 + dsb 238 + mov pc, lr 239 + 240 + __INITDATA 241 + 242 + .type v7_cache_fns, #object 243 + ENTRY(v7_cache_fns) 244 + .long v7_flush_kern_cache_all 245 + .long v7_flush_user_cache_all 246 + .long v7_flush_user_cache_range 247 + .long v7_coherent_kern_range 248 + .long v7_coherent_user_range 249 + .long v7_flush_kern_dcache_page 250 + .long v7_dma_inv_range 251 + .long v7_dma_clean_range 252 + .long v7_dma_flush_range 253 + .size v7_cache_fns, . - v7_cache_fns
+12
arch/arm/mm/proc-macros.S
··· 59 59 .word \ucset 60 60 #endif 61 61 .endm 62 + 63 + /* 64 + * cache_line_size - get the cache line size from the CSIDR register 65 + * (available on ARMv7+). It assumes that the CSSR register was configured 66 + * to access the L1 data cache CSIDR. 67 + */ 68 + .macro dcache_line_size, reg, tmp 69 + mrc p15, 1, \tmp, c0, c0, 0 @ read CSIDR 70 + and \tmp, \tmp, #7 @ cache line size encoding 71 + mov \reg, #16 @ size offset 72 + mov \reg, \reg, lsl \tmp @ actual cache line size 73 + .endm
+262
arch/arm/mm/proc-v7.S
··· 1 + /* 2 + * linux/arch/arm/mm/proc-v7.S 3 + * 4 + * Copyright (C) 2001 Deep Blue Solutions Ltd. 5 + * 6 + * This program is free software; you can redistribute it and/or modify 7 + * it under the terms of the GNU General Public License version 2 as 8 + * published by the Free Software Foundation. 9 + * 10 + * This is the "shell" of the ARMv7 processor support. 11 + */ 12 + #include <linux/linkage.h> 13 + #include <asm/assembler.h> 14 + #include <asm/asm-offsets.h> 15 + #include <asm/elf.h> 16 + #include <asm/pgtable-hwdef.h> 17 + #include <asm/pgtable.h> 18 + 19 + #include "proc-macros.S" 20 + 21 + #define TTB_C (1 << 0) 22 + #define TTB_S (1 << 1) 23 + #define TTB_RGN_OC_WT (2 << 3) 24 + #define TTB_RGN_OC_WB (3 << 3) 25 + 26 + ENTRY(cpu_v7_proc_init) 27 + mov pc, lr 28 + 29 + ENTRY(cpu_v7_proc_fin) 30 + mov pc, lr 31 + 32 + /* 33 + * cpu_v7_reset(loc) 34 + * 35 + * Perform a soft reset of the system. Put the CPU into the 36 + * same state as it would be if it had been reset, and branch 37 + * to what would be the reset vector. 38 + * 39 + * - loc - location to jump to for soft reset 40 + * 41 + * It is assumed that: 42 + */ 43 + .align 5 44 + ENTRY(cpu_v7_reset) 45 + mov pc, r0 46 + 47 + /* 48 + * cpu_v7_do_idle() 49 + * 50 + * Idle the processor (eg, wait for interrupt). 51 + * 52 + * IRQs are already disabled. 53 + */ 54 + ENTRY(cpu_v7_do_idle) 55 + .long 0xe320f003 @ ARM V7 WFI instruction 56 + mov pc, lr 57 + 58 + ENTRY(cpu_v7_dcache_clean_area) 59 + #ifndef TLB_CAN_READ_FROM_L1_CACHE 60 + dcache_line_size r2, r3 61 + 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 62 + add r0, r0, r2 63 + subs r1, r1, r2 64 + bhi 1b 65 + dsb 66 + #endif 67 + mov pc, lr 68 + 69 + /* 70 + * cpu_v7_switch_mm(pgd_phys, tsk) 71 + * 72 + * Set the translation table base pointer to be pgd_phys 73 + * 74 + * - pgd_phys - physical address of new TTB 75 + * 76 + * It is assumed that: 77 + * - we are not using split page tables 78 + */ 79 + ENTRY(cpu_v7_switch_mm) 80 + mov r2, #0 81 + ldr r1, [r1, #MM_CONTEXT_ID] @ get mm->context.id 82 + orr r0, r0, #TTB_RGN_OC_WB @ mark PTWs outer cacheable, WB 83 + mcr p15, 0, r2, c13, c0, 1 @ set reserved context ID 84 + isb 85 + 1: mcr p15, 0, r0, c2, c0, 0 @ set TTB 0 86 + isb 87 + mcr p15, 0, r1, c13, c0, 1 @ set context ID 88 + isb 89 + mov pc, lr 90 + 91 + /* 92 + * cpu_v7_set_pte_ext(ptep, pte) 93 + * 94 + * Set a level 2 translation table entry. 95 + * 96 + * - ptep - pointer to level 2 translation table entry 97 + * (hardware version is stored at -1024 bytes) 98 + * - pte - PTE value to store 99 + * - ext - value for extended PTE bits 100 + * 101 + * Permissions: 102 + * YUWD APX AP1 AP0 SVC User 103 + * 0xxx 0 0 0 no acc no acc 104 + * 100x 1 0 1 r/o no acc 105 + * 10x0 1 0 1 r/o no acc 106 + * 1011 0 0 1 r/w no acc 107 + * 110x 0 1 0 r/w r/o 108 + * 11x0 0 1 0 r/w r/o 109 + * 1111 0 1 1 r/w r/w 110 + */ 111 + ENTRY(cpu_v7_set_pte_ext) 112 + str r1, [r0], #-2048 @ linux version 113 + 114 + bic r3, r1, #0x000003f0 115 + bic r3, r3, #0x00000003 116 + orr r3, r3, r2 117 + orr r3, r3, #PTE_EXT_AP0 | 2 118 + 119 + tst r1, #L_PTE_WRITE 120 + tstne r1, #L_PTE_DIRTY 121 + orreq r3, r3, #PTE_EXT_APX 122 + 123 + tst r1, #L_PTE_USER 124 + orrne r3, r3, #PTE_EXT_AP1 125 + tstne r3, #PTE_EXT_APX 126 + bicne r3, r3, #PTE_EXT_APX | PTE_EXT_AP0 127 + 128 + tst r1, #L_PTE_YOUNG 129 + biceq r3, r3, #PTE_EXT_APX | PTE_EXT_AP_MASK 130 + 131 + tst r1, #L_PTE_EXEC 132 + orreq r3, r3, #PTE_EXT_XN 133 + 134 + tst r1, #L_PTE_PRESENT 135 + moveq r3, #0 136 + 137 + str r3, [r0] 138 + mcr p15, 0, r0, c7, c10, 1 @ flush_pte 139 + mov pc, lr 140 + 141 + cpu_v7_name: 142 + .ascii "ARMv7 Processor" 143 + .align 144 + 145 + .section ".text.init", #alloc, #execinstr 146 + 147 + /* 148 + * __v7_setup 149 + * 150 + * Initialise TLB, Caches, and MMU state ready to switch the MMU 151 + * on. Return in r0 the new CP15 C1 control register setting. 152 + * 153 + * We automatically detect if we have a Harvard cache, and use the 154 + * Harvard cache control instructions insead of the unified cache 155 + * control instructions. 156 + * 157 + * This should be able to cover all ARMv7 cores. 158 + * 159 + * It is assumed that: 160 + * - cache type register is implemented 161 + */ 162 + __v7_setup: 163 + adr r12, __v7_setup_stack @ the local stack 164 + stmia r12, {r0-r5, r7, r9, r11, lr} 165 + bl v7_flush_dcache_all 166 + ldmia r12, {r0-r5, r7, r9, r11, lr} 167 + mov r10, #0 168 + #ifdef HARVARD_CACHE 169 + mcr p15, 0, r10, c7, c5, 0 @ I+BTB cache invalidate 170 + #endif 171 + dsb 172 + mcr p15, 0, r10, c8, c7, 0 @ invalidate I + D TLBs 173 + mcr p15, 0, r10, c2, c0, 2 @ TTB control register 174 + orr r4, r4, #TTB_RGN_OC_WB @ mark PTWs outer cacheable, WB 175 + mcr p15, 0, r4, c2, c0, 0 @ load TTB0 176 + mcr p15, 0, r4, c2, c0, 1 @ load TTB1 177 + mov r10, #0x1f @ domains 0, 1 = manager 178 + mcr p15, 0, r10, c3, c0, 0 @ load domain access register 179 + #ifndef CONFIG_CPU_L2CACHE_DISABLE 180 + @ L2 cache configuration in the L2 aux control register 181 + mrc p15, 1, r10, c9, c0, 2 182 + bic r10, r10, #(1 << 16) @ L2 outer cache 183 + mcr p15, 1, r10, c9, c0, 2 184 + @ L2 cache is enabled in the aux control register 185 + mrc p15, 0, r10, c1, c0, 1 186 + orr r10, r10, #2 187 + mcr p15, 0, r10, c1, c0, 1 188 + #endif 189 + mrc p15, 0, r0, c1, c0, 0 @ read control register 190 + ldr r10, cr1_clear @ get mask for bits to clear 191 + bic r0, r0, r10 @ clear bits them 192 + ldr r10, cr1_set @ get mask for bits to set 193 + orr r0, r0, r10 @ set them 194 + mov pc, lr @ return to head.S:__ret 195 + 196 + /* 197 + * V X F I D LR 198 + * .... ...E PUI. .T.T 4RVI ZFRS BLDP WCAM 199 + * rrrr rrrx xxx0 0101 xxxx xxxx x111 xxxx < forced 200 + * 0 110 0011 1.00 .111 1101 < we want 201 + */ 202 + .type cr1_clear, #object 203 + .type cr1_set, #object 204 + cr1_clear: 205 + .word 0x0120c302 206 + cr1_set: 207 + .word 0x00c0387d 208 + 209 + __v7_setup_stack: 210 + .space 4 * 11 @ 11 registers 211 + 212 + .type v7_processor_functions, #object 213 + ENTRY(v7_processor_functions) 214 + .word v7_early_abort 215 + .word cpu_v7_proc_init 216 + .word cpu_v7_proc_fin 217 + .word cpu_v7_reset 218 + .word cpu_v7_do_idle 219 + .word cpu_v7_dcache_clean_area 220 + .word cpu_v7_switch_mm 221 + .word cpu_v7_set_pte_ext 222 + .size v7_processor_functions, . - v7_processor_functions 223 + 224 + .type cpu_arch_name, #object 225 + cpu_arch_name: 226 + .asciz "armv7" 227 + .size cpu_arch_name, . - cpu_arch_name 228 + 229 + .type cpu_elf_name, #object 230 + cpu_elf_name: 231 + .asciz "v7" 232 + .size cpu_elf_name, . - cpu_elf_name 233 + .align 234 + 235 + .section ".proc.info.init", #alloc, #execinstr 236 + 237 + /* 238 + * Match any ARMv7 processor core. 239 + */ 240 + .type __v7_proc_info, #object 241 + __v7_proc_info: 242 + .long 0x000f0000 @ Required ID value 243 + .long 0x000f0000 @ Mask for ID 244 + .long PMD_TYPE_SECT | \ 245 + PMD_SECT_BUFFERABLE | \ 246 + PMD_SECT_CACHEABLE | \ 247 + PMD_SECT_AP_WRITE | \ 248 + PMD_SECT_AP_READ 249 + .long PMD_TYPE_SECT | \ 250 + PMD_SECT_XN | \ 251 + PMD_SECT_AP_WRITE | \ 252 + PMD_SECT_AP_READ 253 + b __v7_setup 254 + .long cpu_arch_name 255 + .long cpu_elf_name 256 + .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP 257 + .long cpu_v7_name 258 + .long v7_processor_functions 259 + .long v6wbi_tlb_fns 260 + .long v6_user_fns 261 + .long v7_cache_fns 262 + .size __v7_proc_info, . - __v7_proc_info
+8
include/asm-arm/cacheflush.h
··· 102 102 //# endif 103 103 #endif 104 104 105 + #if defined(CONFIG_CPU_V7) 106 + //# ifdef _CACHE 107 + # define MULTI_CACHE 1 108 + //# else 109 + //# define _CACHE v7 110 + //# endif 111 + #endif 112 + 105 113 #if !defined(_CACHE) && !defined(MULTI_CACHE) 106 114 #error Unknown cache maintainence model 107 115 #endif
+9
include/asm-arm/glue.h
··· 38 38 * v5tej_early - ARMv5 with Thumb and Java early abort handler 39 39 * xscale - ARMv5 with Thumb with Xscale extensions 40 40 * v6_early - ARMv6 generic early abort handler 41 + * v7_early - ARMv7 generic early abort handler 41 42 */ 42 43 #undef CPU_ABORT_HANDLER 43 44 #undef MULTI_ABORT ··· 104 103 # define MULTI_ABORT 1 105 104 # else 106 105 # define CPU_ABORT_HANDLER v6_early_abort 106 + # endif 107 + #endif 108 + 109 + #ifdef CONFIG_CPU_ABRT_EV7 110 + # ifdef CPU_ABORT_HANDLER 111 + # define MULTI_ABORT 1 112 + # else 113 + # define CPU_ABORT_HANDLER v7_early_abort 107 114 # endif 108 115 #endif 109 116
+8
include/asm-arm/proc-fns.h
··· 193 193 # define CPU_NAME cpu_v6 194 194 # endif 195 195 # endif 196 + # ifdef CONFIG_CPU_V7 197 + # ifdef CPU_NAME 198 + # undef MULTI_CPU 199 + # define MULTI_CPU 200 + # else 201 + # define CPU_NAME cpu_v7 202 + # endif 203 + # endif 196 204 #endif 197 205 198 206 #ifndef __ASSEMBLY__
+1
include/asm-arm/system.h
··· 14 14 #define CPU_ARCH_ARMv5TE 6 15 15 #define CPU_ARCH_ARMv5TEJ 7 16 16 #define CPU_ARCH_ARMv6 8 17 + #define CPU_ARCH_ARMv7 9 17 18 18 19 /* 19 20 * CR1 bits (CP#15 CR1)