Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

ARM: 7861/1: cacheflush: consolidate single-CPU ARMv7 cache disabling code

This code is becoming duplicated in many places. So let's consolidate
it into a handy macro that is known to be right and available for reuse.

Signed-off-by: Nicolas Pitre <nico@linaro.org>
Acked-by: Dave Martin <Dave.Martin@arm.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>

authored by

Nicolas Pitre and committed by
Russell King
39792c7c 3c8828f6

+52 -98
+46
arch/arm/include/asm/cacheflush.h
··· 435 435 #define sync_cache_w(ptr) __sync_cache_range_w(ptr, sizeof *(ptr)) 436 436 #define sync_cache_r(ptr) __sync_cache_range_r(ptr, sizeof *(ptr)) 437 437 438 + /* 439 + * Disabling cache access for one CPU in an ARMv7 SMP system is tricky. 440 + * To do so we must: 441 + * 442 + * - Clear the SCTLR.C bit to prevent further cache allocations 443 + * - Flush the desired level of cache 444 + * - Clear the ACTLR "SMP" bit to disable local coherency 445 + * 446 + * ... and so without any intervening memory access in between those steps, 447 + * not even to the stack. 448 + * 449 + * WARNING -- After this has been called: 450 + * 451 + * - No ldrex/strex (and similar) instructions must be used. 452 + * - The CPU is obviously no longer coherent with the other CPUs. 453 + * - This is unlikely to work as expected if Linux is running non-secure. 454 + * 455 + * Note: 456 + * 457 + * - This is known to apply to several ARMv7 processor implementations, 458 + * however some exceptions may exist. Caveat emptor. 459 + * 460 + * - The clobber list is dictated by the call to v7_flush_dcache_*. 461 + * fp is preserved to the stack explicitly prior disabling the cache 462 + * since adding it to the clobber list is incompatible with having 463 + * CONFIG_FRAME_POINTER=y. ip is saved as well if ever r12-clobbering 464 + * trampoline are inserted by the linker and to keep sp 64-bit aligned. 465 + */ 466 + #define v7_exit_coherency_flush(level) \ 467 + asm volatile( \ 468 + "stmfd sp!, {fp, ip} \n\t" \ 469 + "mrc p15, 0, r0, c1, c0, 0 @ get SCTLR \n\t" \ 470 + "bic r0, r0, #"__stringify(CR_C)" \n\t" \ 471 + "mcr p15, 0, r0, c1, c0, 0 @ set SCTLR \n\t" \ 472 + "isb \n\t" \ 473 + "bl v7_flush_dcache_"__stringify(level)" \n\t" \ 474 + "clrex \n\t" \ 475 + "mrc p15, 0, r0, c1, c0, 1 @ get ACTLR \n\t" \ 476 + "bic r0, r0, #(1 << 6) @ disable local coherency \n\t" \ 477 + "mcr p15, 0, r0, c1, c0, 1 @ set ACTLR \n\t" \ 478 + "isb \n\t" \ 479 + "dsb \n\t" \ 480 + "ldmfd sp!, {fp, ip}" \ 481 + : : : "r0","r1","r2","r3","r4","r5","r6","r7", \ 482 + "r9","r10","lr","memory" ) 483 + 438 484 #endif
+4 -52
arch/arm/mach-vexpress/dcscb.c
··· 133 133 if (last_man && __mcpm_outbound_enter_critical(cpu, cluster)) { 134 134 arch_spin_unlock(&dcscb_lock); 135 135 136 - /* 137 - * Flush all cache levels for this cluster. 138 - * 139 - * To do so we do: 140 - * - Clear the SCTLR.C bit to prevent further cache allocations 141 - * - Flush the whole cache 142 - * - Clear the ACTLR "SMP" bit to disable local coherency 143 - * 144 - * Let's do it in the safest possible way i.e. with 145 - * no memory access within the following sequence 146 - * including to the stack. 147 - * 148 - * Note: fp is preserved to the stack explicitly prior doing 149 - * this since adding it to the clobber list is incompatible 150 - * with having CONFIG_FRAME_POINTER=y. 151 - */ 152 - asm volatile( 153 - "str fp, [sp, #-4]! \n\t" 154 - "mrc p15, 0, r0, c1, c0, 0 @ get CR \n\t" 155 - "bic r0, r0, #"__stringify(CR_C)" \n\t" 156 - "mcr p15, 0, r0, c1, c0, 0 @ set CR \n\t" 157 - "isb \n\t" 158 - "bl v7_flush_dcache_all \n\t" 159 - "clrex \n\t" 160 - "mrc p15, 0, r0, c1, c0, 1 @ get AUXCR \n\t" 161 - "bic r0, r0, #(1 << 6) @ disable local coherency \n\t" 162 - "mcr p15, 0, r0, c1, c0, 1 @ set AUXCR \n\t" 163 - "isb \n\t" 164 - "dsb \n\t" 165 - "ldr fp, [sp], #4" 166 - : : : "r0","r1","r2","r3","r4","r5","r6","r7", 167 - "r9","r10","lr","memory"); 136 + /* Flush all cache levels for this cluster. */ 137 + v7_exit_coherency_flush(all); 168 138 169 139 /* 170 140 * This is a harmless no-op. On platforms with a real ··· 153 183 } else { 154 184 arch_spin_unlock(&dcscb_lock); 155 185 156 - /* 157 - * Flush the local CPU cache. 158 - * Let's do it in the safest possible way as above. 159 - */ 160 - asm volatile( 161 - "str fp, [sp, #-4]! \n\t" 162 - "mrc p15, 0, r0, c1, c0, 0 @ get CR \n\t" 163 - "bic r0, r0, #"__stringify(CR_C)" \n\t" 164 - "mcr p15, 0, r0, c1, c0, 0 @ set CR \n\t" 165 - "isb \n\t" 166 - "bl v7_flush_dcache_louis \n\t" 167 - "clrex \n\t" 168 - "mrc p15, 0, r0, c1, c0, 1 @ get AUXCR \n\t" 169 - "bic r0, r0, #(1 << 6) @ disable local coherency \n\t" 170 - "mcr p15, 0, r0, c1, c0, 1 @ set AUXCR \n\t" 171 - "isb \n\t" 172 - "dsb \n\t" 173 - "ldr fp, [sp], #4" 174 - : : : "r0","r1","r2","r3","r4","r5","r6","r7", 175 - "r9","r10","lr","memory"); 186 + /* Disable and flush the local CPU cache. */ 187 + v7_exit_coherency_flush(louis); 176 188 } 177 189 178 190 __mcpm_cpu_down(cpu, cluster);
+2 -46
arch/arm/mach-vexpress/tc2_pm.c
··· 156 156 : : "r" (0x400) ); 157 157 } 158 158 159 - /* 160 - * We need to disable and flush the whole (L1 and L2) cache. 161 - * Let's do it in the safest possible way i.e. with 162 - * no memory access within the following sequence 163 - * including the stack. 164 - * 165 - * Note: fp is preserved to the stack explicitly prior doing 166 - * this since adding it to the clobber list is incompatible 167 - * with having CONFIG_FRAME_POINTER=y. 168 - */ 169 - asm volatile( 170 - "str fp, [sp, #-4]! \n\t" 171 - "mrc p15, 0, r0, c1, c0, 0 @ get CR \n\t" 172 - "bic r0, r0, #"__stringify(CR_C)" \n\t" 173 - "mcr p15, 0, r0, c1, c0, 0 @ set CR \n\t" 174 - "isb \n\t" 175 - "bl v7_flush_dcache_all \n\t" 176 - "clrex \n\t" 177 - "mrc p15, 0, r0, c1, c0, 1 @ get AUXCR \n\t" 178 - "bic r0, r0, #(1 << 6) @ disable local coherency \n\t" 179 - "mcr p15, 0, r0, c1, c0, 1 @ set AUXCR \n\t" 180 - "isb \n\t" 181 - "dsb \n\t" 182 - "ldr fp, [sp], #4" 183 - : : : "r0","r1","r2","r3","r4","r5","r6","r7", 184 - "r9","r10","lr","memory"); 159 + v7_exit_coherency_flush(all); 185 160 186 161 cci_disable_port_by_cpu(mpidr); 187 162 ··· 172 197 173 198 arch_spin_unlock(&tc2_pm_lock); 174 199 175 - /* 176 - * We need to disable and flush only the L1 cache. 177 - * Let's do it in the safest possible way as above. 178 - */ 179 - asm volatile( 180 - "str fp, [sp, #-4]! \n\t" 181 - "mrc p15, 0, r0, c1, c0, 0 @ get CR \n\t" 182 - "bic r0, r0, #"__stringify(CR_C)" \n\t" 183 - "mcr p15, 0, r0, c1, c0, 0 @ set CR \n\t" 184 - "isb \n\t" 185 - "bl v7_flush_dcache_louis \n\t" 186 - "clrex \n\t" 187 - "mrc p15, 0, r0, c1, c0, 1 @ get AUXCR \n\t" 188 - "bic r0, r0, #(1 << 6) @ disable local coherency \n\t" 189 - "mcr p15, 0, r0, c1, c0, 1 @ set AUXCR \n\t" 190 - "isb \n\t" 191 - "dsb \n\t" 192 - "ldr fp, [sp], #4" 193 - : : : "r0","r1","r2","r3","r4","r5","r6","r7", 194 - "r9","r10","lr","memory"); 200 + v7_exit_coherency_flush(louis); 195 201 } 196 202 197 203 __mcpm_cpu_down(cpu, cluster);