Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

powerpc/cache: add cache flush operation for various e500

Various e500 core have different cache architecture, so they
need different cache flush operations. Therefore, add a callback
function cpu_flush_caches to the struct cpu_spec. The cache flush
operation for the specific kind of e500 is selected at init time.
The callback function will flush all caches inside the current cpu.

Signed-off-by: Chenhui Zhao <chenhui.zhao@freescale.com>
Signed-off-by: Tang Yuantian <Yuantian.Tang@feescale.com>
Signed-off-by: Scott Wood <oss@buserror.net>

authored by

chenhui zhao and committed by
Scott Wood
e7affb1d ebb9d30a

+128 -78
-2
arch/powerpc/include/asm/cacheflush.h
··· 30 30 #define flush_dcache_mmap_lock(mapping) do { } while (0) 31 31 #define flush_dcache_mmap_unlock(mapping) do { } while (0) 32 32 33 - extern void __flush_disable_L1(void); 34 - 35 33 extern void flush_icache_range(unsigned long, unsigned long); 36 34 extern void flush_icache_user_range(struct vm_area_struct *vma, 37 35 struct page *page, unsigned long addr,
+8
arch/powerpc/include/asm/cputable.h
··· 43 43 extern int machine_check_e200(struct pt_regs *regs); 44 44 extern int machine_check_47x(struct pt_regs *regs); 45 45 46 + extern void cpu_down_flush_e500v2(void); 47 + extern void cpu_down_flush_e500mc(void); 48 + extern void cpu_down_flush_e5500(void); 49 + extern void cpu_down_flush_e6500(void); 50 + 46 51 /* NOTE WELL: Update identify_cpu() if fields are added or removed! */ 47 52 struct cpu_spec { 48 53 /* CPU is matched via (PVR & pvr_mask) == pvr_value */ ··· 63 58 /* cache line sizes */ 64 59 unsigned int icache_bsize; 65 60 unsigned int dcache_bsize; 61 + 62 + /* flush caches inside the current cpu */ 63 + void (*cpu_down_flush)(void); 66 64 67 65 /* number of performance monitor counters */ 68 66 unsigned int num_pmcs;
+1
arch/powerpc/kernel/asm-offsets.c
··· 376 376 DEFINE(CPU_SPEC_FEATURES, offsetof(struct cpu_spec, cpu_features)); 377 377 DEFINE(CPU_SPEC_SETUP, offsetof(struct cpu_spec, cpu_setup)); 378 378 DEFINE(CPU_SPEC_RESTORE, offsetof(struct cpu_spec, cpu_restore)); 379 + DEFINE(CPU_DOWN_FLUSH, offsetof(struct cpu_spec, cpu_down_flush)); 379 380 380 381 DEFINE(pbe_address, offsetof(struct pbe, address)); 381 382 DEFINE(pbe_orig_address, offsetof(struct pbe, orig_address));
+112
arch/powerpc/kernel/cpu_setup_fsl_booke.S
··· 13 13 * 14 14 */ 15 15 16 + #include <asm/page.h> 16 17 #include <asm/processor.h> 17 18 #include <asm/cputable.h> 18 19 #include <asm/ppc_asm.h> 19 20 #include <asm/mmu-book3e.h> 20 21 #include <asm/asm-offsets.h> 22 + #include <asm/mpc85xx.h> 21 23 22 24 _GLOBAL(__e500_icache_setup) 23 25 mfspr r0, SPRN_L1CSR1 ··· 235 233 mtlr r5 236 234 blr 237 235 #endif 236 + 237 + /* flush L1 date cache, it can apply to e500v2, e500mc and e5500 */ 238 + _GLOBAL(flush_dcache_L1) 239 + mfmsr r10 240 + wrteei 0 241 + 242 + mfspr r3,SPRN_L1CFG0 243 + rlwinm r5,r3,9,3 /* Extract cache block size */ 244 + twlgti r5,1 /* Only 32 and 64 byte cache blocks 245 + * are currently defined. 246 + */ 247 + li r4,32 248 + subfic r6,r5,2 /* r6 = log2(1KiB / cache block size) - 249 + * log2(number of ways) 250 + */ 251 + slw r5,r4,r5 /* r5 = cache block size */ 252 + 253 + rlwinm r7,r3,0,0xff /* Extract number of KiB in the cache */ 254 + mulli r7,r7,13 /* An 8-way cache will require 13 255 + * loads per set. 256 + */ 257 + slw r7,r7,r6 258 + 259 + /* save off HID0 and set DCFA */ 260 + mfspr r8,SPRN_HID0 261 + ori r9,r8,HID0_DCFA@l 262 + mtspr SPRN_HID0,r9 263 + isync 264 + 265 + LOAD_REG_IMMEDIATE(r6, KERNELBASE) 266 + mr r4, r6 267 + mtctr r7 268 + 269 + 1: lwz r3,0(r4) /* Load... */ 270 + add r4,r4,r5 271 + bdnz 1b 272 + 273 + msync 274 + mr r4, r6 275 + mtctr r7 276 + 277 + 1: dcbf 0,r4 /* ...and flush. */ 278 + add r4,r4,r5 279 + bdnz 1b 280 + 281 + /* restore HID0 */ 282 + mtspr SPRN_HID0,r8 283 + isync 284 + 285 + wrtee r10 286 + 287 + blr 288 + 289 + has_L2_cache: 290 + /* skip L2 cache on P2040/P2040E as they have no L2 cache */ 291 + mfspr r3, SPRN_SVR 292 + /* shift right by 8 bits and clear E bit of SVR */ 293 + rlwinm r4, r3, 24, ~0x800 294 + 295 + lis r3, SVR_P2040@h 296 + ori r3, r3, SVR_P2040@l 297 + cmpw r4, r3 298 + beq 1f 299 + 300 + li r3, 1 301 + blr 302 + 1: 303 + li r3, 0 304 + blr 305 + 306 + /* flush backside L2 cache */ 307 + flush_backside_L2_cache: 308 + mflr r10 309 + bl has_L2_cache 310 + mtlr r10 311 + cmpwi r3, 0 312 + beq 2f 313 + 314 + /* Flush the L2 cache */ 315 + mfspr r3, SPRN_L2CSR0 316 + ori r3, r3, L2CSR0_L2FL@l 317 + msync 318 + isync 319 + mtspr SPRN_L2CSR0,r3 320 + isync 321 + 322 + /* check if it is complete */ 323 + 1: mfspr r3,SPRN_L2CSR0 324 + andi. r3, r3, L2CSR0_L2FL@l 325 + bne 1b 326 + 2: 327 + blr 328 + 329 + _GLOBAL(cpu_down_flush_e500v2) 330 + mflr r0 331 + bl flush_dcache_L1 332 + mtlr r0 333 + blr 334 + 335 + _GLOBAL(cpu_down_flush_e500mc) 336 + _GLOBAL(cpu_down_flush_e5500) 337 + mflr r0 338 + bl flush_dcache_L1 339 + bl flush_backside_L2_cache 340 + mtlr r0 341 + blr 342 + 343 + /* L1 Data Cache of e6500 contains no modified data, no flush is required */ 344 + _GLOBAL(cpu_down_flush_e6500) 345 + blr
+4
arch/powerpc/kernel/cputable.c
··· 2050 2050 .cpu_setup = __setup_cpu_e500v2, 2051 2051 .machine_check = machine_check_e500, 2052 2052 .platform = "ppc8548", 2053 + .cpu_down_flush = cpu_down_flush_e500v2, 2053 2054 }, 2054 2055 #else 2055 2056 { /* e500mc */ ··· 2070 2069 .cpu_setup = __setup_cpu_e500mc, 2071 2070 .machine_check = machine_check_e500mc, 2072 2071 .platform = "ppce500mc", 2072 + .cpu_down_flush = cpu_down_flush_e500mc, 2073 2073 }, 2074 2074 #endif /* CONFIG_PPC_E500MC */ 2075 2075 #endif /* CONFIG_PPC32 */ ··· 2095 2093 #endif 2096 2094 .machine_check = machine_check_e500mc, 2097 2095 .platform = "ppce5500", 2096 + .cpu_down_flush = cpu_down_flush_e5500, 2098 2097 }, 2099 2098 { /* e6500 */ 2100 2099 .pvr_mask = 0xffff0000, ··· 2118 2115 #endif 2119 2116 .machine_check = machine_check_e500mc, 2120 2117 .platform = "ppce6500", 2118 + .cpu_down_flush = cpu_down_flush_e6500, 2121 2119 }, 2122 2120 #endif /* CONFIG_PPC_E500MC */ 2123 2121 #ifdef CONFIG_PPC32
-74
arch/powerpc/kernel/head_fsl_booke.S
··· 1037 1037 isync /* Force context change */ 1038 1038 blr 1039 1039 1040 - _GLOBAL(flush_dcache_L1) 1041 - mfspr r3,SPRN_L1CFG0 1042 - 1043 - rlwinm r5,r3,9,3 /* Extract cache block size */ 1044 - twlgti r5,1 /* Only 32 and 64 byte cache blocks 1045 - * are currently defined. 1046 - */ 1047 - li r4,32 1048 - subfic r6,r5,2 /* r6 = log2(1KiB / cache block size) - 1049 - * log2(number of ways) 1050 - */ 1051 - slw r5,r4,r5 /* r5 = cache block size */ 1052 - 1053 - rlwinm r7,r3,0,0xff /* Extract number of KiB in the cache */ 1054 - mulli r7,r7,13 /* An 8-way cache will require 13 1055 - * loads per set. 1056 - */ 1057 - slw r7,r7,r6 1058 - 1059 - /* save off HID0 and set DCFA */ 1060 - mfspr r8,SPRN_HID0 1061 - ori r9,r8,HID0_DCFA@l 1062 - mtspr SPRN_HID0,r9 1063 - isync 1064 - 1065 - lis r4,KERNELBASE@h 1066 - mtctr r7 1067 - 1068 - 1: lwz r3,0(r4) /* Load... */ 1069 - add r4,r4,r5 1070 - bdnz 1b 1071 - 1072 - msync 1073 - lis r4,KERNELBASE@h 1074 - mtctr r7 1075 - 1076 - 1: dcbf 0,r4 /* ...and flush. */ 1077 - add r4,r4,r5 1078 - bdnz 1b 1079 - 1080 - /* restore HID0 */ 1081 - mtspr SPRN_HID0,r8 1082 - isync 1083 - 1084 - blr 1085 - 1086 - /* Flush L1 d-cache, invalidate and disable d-cache and i-cache */ 1087 - _GLOBAL(__flush_disable_L1) 1088 - mflr r10 1089 - bl flush_dcache_L1 /* Flush L1 d-cache */ 1090 - mtlr r10 1091 - 1092 - mfspr r4, SPRN_L1CSR0 /* Invalidate and disable d-cache */ 1093 - li r5, 2 1094 - rlwimi r4, r5, 0, 3 1095 - 1096 - msync 1097 - isync 1098 - mtspr SPRN_L1CSR0, r4 1099 - isync 1100 - 1101 - 1: mfspr r4, SPRN_L1CSR0 /* Wait for the invalidate to finish */ 1102 - andi. r4, r4, 2 1103 - bne 1b 1104 - 1105 - mfspr r4, SPRN_L1CSR1 /* Invalidate and disable i-cache */ 1106 - li r5, 2 1107 - rlwimi r4, r5, 0, 3 1108 - 1109 - mtspr SPRN_L1CSR1, r4 1110 - isync 1111 - 1112 - blr 1113 - 1114 1040 #ifdef CONFIG_SMP 1115 1041 /* When we get here, r24 needs to hold the CPU # */ 1116 1042 .globl __secondary_start
+3 -2
arch/powerpc/platforms/85xx/smp.c
··· 139 139 140 140 mtspr(SPRN_TCR, 0); 141 141 142 - __flush_disable_L1(); 142 + cur_cpu_spec->cpu_down_flush(); 143 + 143 144 tmp = (mfspr(SPRN_HID0) & ~(HID0_DOZE|HID0_SLEEP)) | HID0_NAP; 144 145 mtspr(SPRN_HID0, tmp); 145 146 isync(); ··· 360 359 local_irq_disable(); 361 360 362 361 if (secondary) { 363 - __flush_disable_L1(); 362 + cur_cpu_spec->cpu_down_flush(); 364 363 atomic_inc(&kexec_down_cpus); 365 364 /* loop forever */ 366 365 while (1);