Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

MIPS: Loongson: Get rid of Loongson 2 #ifdefery all over arch/mips.

It was ugly.

Signed-off-by: Ralf Baechle <ralf@linux-mips.org>

+170 -130
+5 -4
arch/mips/include/asm/cacheops.h
··· 20 20 #define Index_Load_Tag_D 0x05 21 21 #define Index_Store_Tag_I 0x08 22 22 #define Index_Store_Tag_D 0x09 23 - #if defined(CONFIG_CPU_LOONGSON2) 24 - #define Hit_Invalidate_I 0x00 25 - #else 26 23 #define Hit_Invalidate_I 0x10 27 - #endif 28 24 #define Hit_Invalidate_D 0x11 29 25 #define Hit_Writeback_Inv_D 0x15 30 26 ··· 79 83 #define Index_Store_Data_I 0x1c 80 84 #define Index_Store_Data_D 0x1d 81 85 #define Index_Store_Data_S 0x1f 86 + 87 + /* 88 + * Loongson2-specific cacheops 89 + */ 90 + #define Hit_Invalidate_I_Loongson23 0x00 82 91 83 92 #endif /* __ASM_CACHEOPS_H */
+30 -11
arch/mips/include/asm/r4kcache.h
··· 15 15 #include <asm/asm.h> 16 16 #include <asm/cacheops.h> 17 17 #include <asm/cpu-features.h> 18 + #include <asm/cpu-type.h> 18 19 #include <asm/mipsmtregs.h> 19 20 20 21 /* ··· 163 162 static inline void flush_icache_line(unsigned long addr) 164 163 { 165 164 __iflush_prologue 166 - cache_op(Hit_Invalidate_I, addr); 165 + switch (boot_cpu_type()) { 166 + case CPU_LOONGSON2: 167 + cache_op(Hit_Invalidate_I_Loongson23, addr); 168 + break; 169 + 170 + default: 171 + cache_op(Hit_Invalidate_I, addr); 172 + break; 173 + } 167 174 __iflush_epilogue 168 175 } 169 176 ··· 217 208 */ 218 209 static inline void protected_flush_icache_line(unsigned long addr) 219 210 { 220 - protected_cache_op(Hit_Invalidate_I, addr); 211 + switch (boot_cpu_type()) { 212 + case CPU_LOONGSON2: 213 + protected_cache_op(Hit_Invalidate_I_Loongson23, addr); 214 + break; 215 + 216 + default: 217 + protected_cache_op(Hit_Invalidate_I, addr); 218 + break; 219 + } 221 220 } 222 221 223 222 /* ··· 429 412 __BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 128) 430 413 431 414 /* build blast_xxx_range, protected_blast_xxx_range */ 432 - #define __BUILD_BLAST_CACHE_RANGE(pfx, desc, hitop, prot) \ 433 - static inline void prot##blast_##pfx##cache##_range(unsigned long start, \ 415 + #define __BUILD_BLAST_CACHE_RANGE(pfx, desc, hitop, prot, extra) \ 416 + static inline void prot##extra##blast_##pfx##cache##_range(unsigned long start, \ 434 417 unsigned long end) \ 435 418 { \ 436 419 unsigned long lsize = cpu_##desc##_line_size(); \ ··· 449 432 __##pfx##flush_epilogue \ 450 433 } 451 434 452 - __BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, protected_) 453 - __BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, protected_) 454 - __BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, protected_) 455 - __BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, ) 456 - __BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, ) 435 + __BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, protected_, ) 436 + __BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, protected_, ) 437 + __BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, protected_, ) 438 + __BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I_Loongson23, \ 439 + protected_, loongson23_) 440 + __BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, , ) 441 + __BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, , ) 457 442 /* blast_inv_dcache_range */ 458 - __BUILD_BLAST_CACHE_RANGE(inv_d, dcache, Hit_Invalidate_D, ) 459 - __BUILD_BLAST_CACHE_RANGE(inv_s, scache, Hit_Invalidate_SD, ) 443 + __BUILD_BLAST_CACHE_RANGE(inv_d, dcache, Hit_Invalidate_D, , ) 444 + __BUILD_BLAST_CACHE_RANGE(inv_s, scache, Hit_Invalidate_SD, , ) 460 445 461 446 #endif /* _ASM_R4KCACHE_H */
+31 -21
arch/mips/mm/c-r4k.c
··· 346 346 347 347 static inline void local_r4k___flush_cache_all(void * args) 348 348 { 349 - #if defined(CONFIG_CPU_LOONGSON2) 350 - r4k_blast_scache(); 351 - return; 352 - #endif 353 - r4k_blast_dcache(); 354 - r4k_blast_icache(); 355 - 356 349 switch (current_cpu_type()) { 350 + case CPU_LOONGSON2: 357 351 case CPU_R4000SC: 358 352 case CPU_R4000MC: 359 353 case CPU_R4400SC: ··· 355 361 case CPU_R10000: 356 362 case CPU_R12000: 357 363 case CPU_R14000: 364 + /* 365 + * These caches are inclusive caches, that is, if something 366 + * is not cached in the S-cache, we know it also won't be 367 + * in one of the primary caches. 368 + */ 358 369 r4k_blast_scache(); 370 + break; 371 + 372 + default: 373 + r4k_blast_dcache(); 374 + r4k_blast_icache(); 375 + break; 359 376 } 360 377 } 361 378 ··· 577 572 578 573 if (end - start > icache_size) 579 574 r4k_blast_icache(); 580 - else 581 - protected_blast_icache_range(start, end); 575 + else { 576 + switch (boot_cpu_type()) { 577 + case CPU_LOONGSON2: 578 + protected_blast_icache_range(start, end); 579 + break; 580 + 581 + default: 582 + protected_loongson23_blast_icache_range(start, end); 583 + break; 584 + } 585 + } 582 586 } 583 587 584 588 static inline void local_r4k_flush_icache_range_ipi(void *args) ··· 1123 1109 case CPU_ALCHEMY: 1124 1110 c->icache.flags |= MIPS_CACHE_IC_F_DC; 1125 1111 break; 1126 - } 1127 1112 1128 - #ifdef CONFIG_CPU_LOONGSON2 1129 - /* 1130 - * LOONGSON2 has 4 way icache, but when using indexed cache op, 1131 - * one op will act on all 4 ways 1132 - */ 1133 - c->icache.ways = 1; 1134 - #endif 1113 + case CPU_LOONGSON2: 1114 + /* 1115 + * LOONGSON2 has 4 way icache, but when using indexed cache op, 1116 + * one op will act on all 4 ways 1117 + */ 1118 + c->icache.ways = 1; 1119 + } 1135 1120 1136 1121 printk("Primary instruction cache %ldkB, %s, %s, linesize %d bytes.\n", 1137 1122 icache_size >> 10, ··· 1206 1193 return 1; 1207 1194 } 1208 1195 1209 - #if defined(CONFIG_CPU_LOONGSON2) 1210 1196 static void __init loongson2_sc_init(void) 1211 1197 { 1212 1198 struct cpuinfo_mips *c = &current_cpu_data; ··· 1221 1209 1222 1210 c->options |= MIPS_CPU_INCLUSIVE_CACHES; 1223 1211 } 1224 - #endif 1225 1212 1226 1213 extern int r5k_sc_init(void); 1227 1214 extern int rm7k_sc_init(void); ··· 1270 1259 #endif 1271 1260 return; 1272 1261 1273 - #if defined(CONFIG_CPU_LOONGSON2) 1274 1262 case CPU_LOONGSON2: 1275 1263 loongson2_sc_init(); 1276 1264 return; 1277 - #endif 1265 + 1278 1266 case CPU_XLP: 1279 1267 /* don't need to worry about L2, fully coherent */ 1280 1268 return;
+21 -16
arch/mips/mm/tlb-r4k.c
··· 52 52 53 53 #endif /* CONFIG_MIPS_MT_SMTC */ 54 54 55 - #if defined(CONFIG_CPU_LOONGSON2) 56 55 /* 57 56 * LOONGSON2 has a 4 entry itlb which is a subset of dtlb, 58 57 * unfortrunately, itlb is not totally transparent to software. 59 58 */ 60 - #define FLUSH_ITLB write_c0_diag(4); 59 + static inline void flush_itlb(void) 60 + { 61 + switch (current_cpu_type()) { 62 + case CPU_LOONGSON2: 63 + write_c0_diag(4); 64 + break; 65 + default: 66 + break; 67 + } 68 + } 61 69 62 - #define FLUSH_ITLB_VM(vma) { if ((vma)->vm_flags & VM_EXEC) write_c0_diag(4); } 63 - 64 - #else 65 - 66 - #define FLUSH_ITLB 67 - #define FLUSH_ITLB_VM(vma) 68 - 69 - #endif 70 + static inline void flush_itlb_vm(struct vm_area_struct *vma) 71 + { 72 + if (vma->vm_flags & VM_EXEC) 73 + flush_itlb(); 74 + } 70 75 71 76 void local_flush_tlb_all(void) 72 77 { ··· 98 93 } 99 94 tlbw_use_hazard(); 100 95 write_c0_entryhi(old_ctx); 101 - FLUSH_ITLB; 96 + flush_itlb(); 102 97 EXIT_CRITICAL(flags); 103 98 } 104 99 EXPORT_SYMBOL(local_flush_tlb_all); ··· 160 155 } else { 161 156 drop_mmu_context(mm, cpu); 162 157 } 163 - FLUSH_ITLB; 158 + flush_itlb(); 164 159 EXIT_CRITICAL(flags); 165 160 } 166 161 } ··· 202 197 } else { 203 198 local_flush_tlb_all(); 204 199 } 205 - FLUSH_ITLB; 200 + flush_itlb(); 206 201 EXIT_CRITICAL(flags); 207 202 } 208 203 ··· 235 230 236 231 finish: 237 232 write_c0_entryhi(oldpid); 238 - FLUSH_ITLB_VM(vma); 233 + flush_itlb_vm(vma); 239 234 EXIT_CRITICAL(flags); 240 235 } 241 236 } ··· 267 262 tlbw_use_hazard(); 268 263 } 269 264 write_c0_entryhi(oldpid); 270 - FLUSH_ITLB; 265 + flush_itlb(); 271 266 EXIT_CRITICAL(flags); 272 267 } 273 268 ··· 340 335 tlb_write_indexed(); 341 336 } 342 337 tlbw_use_hazard(); 343 - FLUSH_ITLB_VM(vma); 338 + flush_itlb_vm(vma); 344 339 EXIT_CRITICAL(flags); 345 340 } 346 341
+83 -78
arch/mips/mm/tlbex.c
··· 1311 1311 * need three, with the second nop'ed and the third being 1312 1312 * unused. 1313 1313 */ 1314 - /* Loongson2 ebase is different than r4k, we have more space */ 1315 - #if defined(CONFIG_32BIT) || defined(CONFIG_CPU_LOONGSON2) 1316 - if ((p - tlb_handler) > 64) 1317 - panic("TLB refill handler space exceeded"); 1318 - #else 1319 - if (((p - tlb_handler) > (MIPS64_REFILL_INSNS * 2) - 1) 1320 - || (((p - tlb_handler) > (MIPS64_REFILL_INSNS * 2) - 3) 1321 - && uasm_insn_has_bdelay(relocs, 1322 - tlb_handler + MIPS64_REFILL_INSNS - 3))) 1323 - panic("TLB refill handler space exceeded"); 1324 - #endif 1325 - 1326 - /* 1327 - * Now fold the handler in the TLB refill handler space. 1328 - */ 1329 - #if defined(CONFIG_32BIT) || defined(CONFIG_CPU_LOONGSON2) 1330 - f = final_handler; 1331 - /* Simplest case, just copy the handler. */ 1332 - uasm_copy_handler(relocs, labels, tlb_handler, p, f); 1333 - final_len = p - tlb_handler; 1334 - #else /* CONFIG_64BIT */ 1335 - f = final_handler + MIPS64_REFILL_INSNS; 1336 - if ((p - tlb_handler) <= MIPS64_REFILL_INSNS) { 1337 - /* Just copy the handler. */ 1338 - uasm_copy_handler(relocs, labels, tlb_handler, p, f); 1339 - final_len = p - tlb_handler; 1340 - } else { 1314 + switch (boot_cpu_type()) { 1315 + default: 1316 + if (sizeof(long) == 4) { 1317 + case CPU_LOONGSON2: 1318 + /* Loongson2 ebase is different than r4k, we have more space */ 1319 + if ((p - tlb_handler) > 64) 1320 + panic("TLB refill handler space exceeded"); 1321 + /* 1322 + * Now fold the handler in the TLB refill handler space. 1323 + */ 1324 + f = final_handler; 1325 + /* Simplest case, just copy the handler. */ 1326 + uasm_copy_handler(relocs, labels, tlb_handler, p, f); 1327 + final_len = p - tlb_handler; 1328 + break; 1329 + } else { 1330 + if (((p - tlb_handler) > (MIPS64_REFILL_INSNS * 2) - 1) 1331 + || (((p - tlb_handler) > (MIPS64_REFILL_INSNS * 2) - 3) 1332 + && uasm_insn_has_bdelay(relocs, 1333 + tlb_handler + MIPS64_REFILL_INSNS - 3))) 1334 + panic("TLB refill handler space exceeded"); 1335 + /* 1336 + * Now fold the handler in the TLB refill handler space. 1337 + */ 1338 + f = final_handler + MIPS64_REFILL_INSNS; 1339 + if ((p - tlb_handler) <= MIPS64_REFILL_INSNS) { 1340 + /* Just copy the handler. */ 1341 + uasm_copy_handler(relocs, labels, tlb_handler, p, f); 1342 + final_len = p - tlb_handler; 1343 + } else { 1341 1344 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT 1342 - const enum label_id ls = label_tlb_huge_update; 1345 + const enum label_id ls = label_tlb_huge_update; 1343 1346 #else 1344 - const enum label_id ls = label_vmalloc; 1347 + const enum label_id ls = label_vmalloc; 1345 1348 #endif 1346 - u32 *split; 1347 - int ov = 0; 1348 - int i; 1349 + u32 *split; 1350 + int ov = 0; 1351 + int i; 1349 1352 1350 - for (i = 0; i < ARRAY_SIZE(labels) && labels[i].lab != ls; i++) 1351 - ; 1352 - BUG_ON(i == ARRAY_SIZE(labels)); 1353 - split = labels[i].addr; 1353 + for (i = 0; i < ARRAY_SIZE(labels) && labels[i].lab != ls; i++) 1354 + ; 1355 + BUG_ON(i == ARRAY_SIZE(labels)); 1356 + split = labels[i].addr; 1354 1357 1355 - /* 1356 - * See if we have overflown one way or the other. 1357 - */ 1358 - if (split > tlb_handler + MIPS64_REFILL_INSNS || 1359 - split < p - MIPS64_REFILL_INSNS) 1360 - ov = 1; 1358 + /* 1359 + * See if we have overflown one way or the other. 1360 + */ 1361 + if (split > tlb_handler + MIPS64_REFILL_INSNS || 1362 + split < p - MIPS64_REFILL_INSNS) 1363 + ov = 1; 1361 1364 1362 - if (ov) { 1363 - /* 1364 - * Split two instructions before the end. One 1365 - * for the branch and one for the instruction 1366 - * in the delay slot. 1367 - */ 1368 - split = tlb_handler + MIPS64_REFILL_INSNS - 2; 1365 + if (ov) { 1366 + /* 1367 + * Split two instructions before the end. One 1368 + * for the branch and one for the instruction 1369 + * in the delay slot. 1370 + */ 1371 + split = tlb_handler + MIPS64_REFILL_INSNS - 2; 1369 1372 1370 - /* 1371 - * If the branch would fall in a delay slot, 1372 - * we must back up an additional instruction 1373 - * so that it is no longer in a delay slot. 1374 - */ 1375 - if (uasm_insn_has_bdelay(relocs, split - 1)) 1376 - split--; 1377 - } 1378 - /* Copy first part of the handler. */ 1379 - uasm_copy_handler(relocs, labels, tlb_handler, split, f); 1380 - f += split - tlb_handler; 1373 + /* 1374 + * If the branch would fall in a delay slot, 1375 + * we must back up an additional instruction 1376 + * so that it is no longer in a delay slot. 1377 + */ 1378 + if (uasm_insn_has_bdelay(relocs, split - 1)) 1379 + split--; 1380 + } 1381 + /* Copy first part of the handler. */ 1382 + uasm_copy_handler(relocs, labels, tlb_handler, split, f); 1383 + f += split - tlb_handler; 1381 1384 1382 - if (ov) { 1383 - /* Insert branch. */ 1384 - uasm_l_split(&l, final_handler); 1385 - uasm_il_b(&f, &r, label_split); 1386 - if (uasm_insn_has_bdelay(relocs, split)) 1387 - uasm_i_nop(&f); 1388 - else { 1389 - uasm_copy_handler(relocs, labels, 1390 - split, split + 1, f); 1391 - uasm_move_labels(labels, f, f + 1, -1); 1392 - f++; 1393 - split++; 1385 + if (ov) { 1386 + /* Insert branch. */ 1387 + uasm_l_split(&l, final_handler); 1388 + uasm_il_b(&f, &r, label_split); 1389 + if (uasm_insn_has_bdelay(relocs, split)) 1390 + uasm_i_nop(&f); 1391 + else { 1392 + uasm_copy_handler(relocs, labels, 1393 + split, split + 1, f); 1394 + uasm_move_labels(labels, f, f + 1, -1); 1395 + f++; 1396 + split++; 1397 + } 1398 + } 1399 + 1400 + /* Copy the rest of the handler. */ 1401 + uasm_copy_handler(relocs, labels, split, p, final_handler); 1402 + final_len = (f - (final_handler + MIPS64_REFILL_INSNS)) + 1403 + (p - split); 1394 1404 } 1395 1405 } 1396 - 1397 - /* Copy the rest of the handler. */ 1398 - uasm_copy_handler(relocs, labels, split, p, final_handler); 1399 - final_len = (f - (final_handler + MIPS64_REFILL_INSNS)) + 1400 - (p - split); 1406 + break; 1401 1407 } 1402 - #endif /* CONFIG_64BIT */ 1403 1408 1404 1409 uasm_resolve_relocs(relocs, labels); 1405 1410 pr_debug("Wrote TLB refill handler (%u instructions).\n",