Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[MIPS] Sibyte: Replace SB1 cachecode with standard R4000 class cache code.

It may not be perfect yet but the SB1 code is badly borken and has
horrible performance issues.

Downside: This seriously breaks support for pass 1 parts of the BCM1250
where indexed cacheops don't work quite reliable but I seem to be the
last one on the planet with a pass 1 part anyway.

Signed-off-by: Ralf Baechle <ralf@linux-mips.org>

+54 -577
-8
arch/mips/kernel/cpu-probe.c
··· 746 746 { 747 747 decode_configs(c); 748 748 749 - /* 750 - * For historical reasons the SB1 comes with it's own variant of 751 - * cache code which eventually will be folded into c-r4k.c. Until 752 - * then we pretend it's got it's own cache architecture. 753 - */ 754 - c->options &= ~MIPS_CPU_4K_CACHE; 755 - c->options |= MIPS_CPU_SB1_CACHE; 756 - 757 749 switch (c->processor_id & 0xff00) { 758 750 case PRID_IMP_SB1: 759 751 c->cputype = CPU_SB1;
+6
arch/mips/kernel/traps.c
··· 1435 1435 flush_icache_range(ebase + offset, ebase + offset + size); 1436 1436 } 1437 1437 1438 + static char panic_null_cerr[] __initdata = 1439 + "Trying to set NULL cache error exception handler"; 1440 + 1438 1441 /* Install uncached CPU exception handler */ 1439 1442 void __init set_uncached_handler (unsigned long offset, void *addr, unsigned long size) 1440 1443 { ··· 1447 1444 #ifdef CONFIG_64BIT 1448 1445 unsigned long uncached_ebase = TO_UNCAC(ebase); 1449 1446 #endif 1447 + 1448 + if (!addr) 1449 + panic(panic_null_cerr); 1450 1450 1451 1451 memcpy((void *)(uncached_ebase + offset), addr, size); 1452 1452 }
+1 -1
arch/mips/mm/Makefile
··· 22 22 obj-$(CONFIG_CPU_R8000) += c-r4k.o cex-gen.o pg-r4k.o tlb-r8k.o 23 23 obj-$(CONFIG_CPU_RM7000) += c-r4k.o cex-gen.o pg-r4k.o tlb-r4k.o 24 24 obj-$(CONFIG_CPU_RM9000) += c-r4k.o cex-gen.o pg-r4k.o tlb-r4k.o 25 - obj-$(CONFIG_CPU_SB1) += c-sb1.o cerr-sb1.o cex-sb1.o pg-sb1.o \ 25 + obj-$(CONFIG_CPU_SB1) += c-r4k.o cerr-sb1.o cex-sb1.o pg-sb1.o \ 26 26 tlb-r4k.o 27 27 obj-$(CONFIG_CPU_TX39XX) += c-tx39.o pg-r4k.o tlb-r3k.o 28 28 obj-$(CONFIG_CPU_TX49XX) += c-r4k.o cex-gen.o pg-r4k.o tlb-r4k.o
+18 -4
arch/mips/mm/c-r4k.c
··· 9 9 */ 10 10 #include <linux/init.h> 11 11 #include <linux/kernel.h> 12 + #include <linux/linkage.h> 12 13 #include <linux/sched.h> 13 14 #include <linux/mm.h> 14 15 #include <linux/bitops.h> ··· 949 948 switch (c->cputype) { 950 949 case CPU_20KC: 951 950 case CPU_25KF: 951 + case CPU_SB1: 952 + case CPU_SB1A: 952 953 c->dcache.flags |= MIPS_CACHE_PINDEX; 954 + break; 955 + 953 956 case CPU_R10000: 954 957 case CPU_R12000: 955 958 case CPU_R14000: 956 - case CPU_SB1: 957 959 break; 960 + 958 961 case CPU_24K: 959 962 case CPU_34K: 960 963 case CPU_74K: ··· 1240 1235 { 1241 1236 extern void build_clear_page(void); 1242 1237 extern void build_copy_page(void); 1243 - extern char except_vec2_generic; 1238 + extern char __weak except_vec2_generic; 1239 + extern char __weak except_vec2_sb1; 1244 1240 struct cpuinfo_mips *c = &current_cpu_data; 1245 1241 1246 - /* Default cache error handler for R4000 and R5000 family */ 1247 - set_uncached_handler (0x100, &except_vec2_generic, 0x80); 1242 + switch (c->cputype) { 1243 + case CPU_SB1: 1244 + case CPU_SB1A: 1245 + set_uncached_handler(0x100, &except_vec2_sb1, 0x80); 1246 + break; 1247 + 1248 + default: 1249 + set_uncached_handler(0x100, &except_vec2_generic, 0x80); 1250 + break; 1251 + } 1248 1252 1249 1253 probe_pcache(); 1250 1254 setup_scache();
-535
arch/mips/mm/c-sb1.c
··· 1 - /* 2 - * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com) 3 - * Copyright (C) 1997, 2001 Ralf Baechle (ralf@gnu.org) 4 - * Copyright (C) 2000, 2001, 2002, 2003 Broadcom Corporation 5 - * Copyright (C) 2004 Maciej W. Rozycki 6 - * 7 - * This program is free software; you can redistribute it and/or 8 - * modify it under the terms of the GNU General Public License 9 - * as published by the Free Software Foundation; either version 2 10 - * of the License, or (at your option) any later version. 11 - * 12 - * This program is distributed in the hope that it will be useful, 13 - * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 - * GNU General Public License for more details. 16 - * 17 - * You should have received a copy of the GNU General Public License 18 - * along with this program; if not, write to the Free Software 19 - * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 20 - */ 21 - #include <linux/init.h> 22 - #include <linux/hardirq.h> 23 - 24 - #include <asm/asm.h> 25 - #include <asm/bootinfo.h> 26 - #include <asm/cacheops.h> 27 - #include <asm/cpu.h> 28 - #include <asm/mipsregs.h> 29 - #include <asm/mmu_context.h> 30 - #include <asm/uaccess.h> 31 - 32 - extern void sb1_dma_init(void); 33 - 34 - /* These are probed at ld_mmu time */ 35 - static unsigned long icache_size; 36 - static unsigned long dcache_size; 37 - 38 - static unsigned short icache_line_size; 39 - static unsigned short dcache_line_size; 40 - 41 - static unsigned int icache_index_mask; 42 - static unsigned int dcache_index_mask; 43 - 44 - static unsigned short icache_assoc; 45 - static unsigned short dcache_assoc; 46 - 47 - static unsigned short icache_sets; 48 - static unsigned short dcache_sets; 49 - 50 - static unsigned int icache_range_cutoff; 51 - static unsigned int dcache_range_cutoff; 52 - 53 - static inline void sb1_on_each_cpu(void (*func) (void *info), void *info, 54 - int retry, int wait) 55 - { 56 - preempt_disable(); 57 - smp_call_function(func, info, retry, wait); 58 - func(info); 59 - preempt_enable(); 60 - } 61 - 62 - /* 63 - * The dcache is fully coherent to the system, with one 64 - * big caveat: the instruction stream. In other words, 65 - * if we miss in the icache, and have dirty data in the 66 - * L1 dcache, then we'll go out to memory (or the L2) and 67 - * get the not-as-recent data. 68 - * 69 - * So the only time we have to flush the dcache is when 70 - * we're flushing the icache. Since the L2 is fully 71 - * coherent to everything, including I/O, we never have 72 - * to flush it 73 - */ 74 - 75 - #define cache_set_op(op, addr) \ 76 - __asm__ __volatile__( \ 77 - " .set noreorder \n" \ 78 - " .set mips64\n\t \n" \ 79 - " cache %0, (0<<13)(%1) \n" \ 80 - " cache %0, (1<<13)(%1) \n" \ 81 - " cache %0, (2<<13)(%1) \n" \ 82 - " cache %0, (3<<13)(%1) \n" \ 83 - " .set mips0 \n" \ 84 - " .set reorder" \ 85 - : \ 86 - : "i" (op), "r" (addr)) 87 - 88 - #define sync() \ 89 - __asm__ __volatile( \ 90 - " .set mips64\n\t \n" \ 91 - " sync \n" \ 92 - " .set mips0") 93 - 94 - #define mispredict() \ 95 - __asm__ __volatile__( \ 96 - " bnezl $0, 1f \n" /* Force mispredict */ \ 97 - "1: \n"); 98 - 99 - /* 100 - * Writeback and invalidate the entire dcache 101 - */ 102 - static inline void __sb1_writeback_inv_dcache_all(void) 103 - { 104 - unsigned long addr = 0; 105 - 106 - while (addr < dcache_line_size * dcache_sets) { 107 - cache_set_op(Index_Writeback_Inv_D, addr); 108 - addr += dcache_line_size; 109 - } 110 - } 111 - 112 - /* 113 - * Writeback and invalidate a range of the dcache. The addresses are 114 - * virtual, and since we're using index ops and bit 12 is part of both 115 - * the virtual frame and physical index, we have to clear both sets 116 - * (bit 12 set and cleared). 117 - */ 118 - static inline void __sb1_writeback_inv_dcache_range(unsigned long start, 119 - unsigned long end) 120 - { 121 - unsigned long index; 122 - 123 - start &= ~(dcache_line_size - 1); 124 - end = (end + dcache_line_size - 1) & ~(dcache_line_size - 1); 125 - 126 - while (start != end) { 127 - index = start & dcache_index_mask; 128 - cache_set_op(Index_Writeback_Inv_D, index); 129 - cache_set_op(Index_Writeback_Inv_D, index ^ (1<<12)); 130 - start += dcache_line_size; 131 - } 132 - sync(); 133 - } 134 - 135 - /* 136 - * Writeback and invalidate a range of the dcache. With physical 137 - * addresseses, we don't have to worry about possible bit 12 aliasing. 138 - * XXXKW is it worth turning on KX and using hit ops with xkphys? 139 - */ 140 - static inline void __sb1_writeback_inv_dcache_phys_range(unsigned long start, 141 - unsigned long end) 142 - { 143 - start &= ~(dcache_line_size - 1); 144 - end = (end + dcache_line_size - 1) & ~(dcache_line_size - 1); 145 - 146 - while (start != end) { 147 - cache_set_op(Index_Writeback_Inv_D, start & dcache_index_mask); 148 - start += dcache_line_size; 149 - } 150 - sync(); 151 - } 152 - 153 - 154 - /* 155 - * Invalidate the entire icache 156 - */ 157 - static inline void __sb1_flush_icache_all(void) 158 - { 159 - unsigned long addr = 0; 160 - 161 - while (addr < icache_line_size * icache_sets) { 162 - cache_set_op(Index_Invalidate_I, addr); 163 - addr += icache_line_size; 164 - } 165 - } 166 - 167 - /* 168 - * Invalidate a range of the icache. The addresses are virtual, and 169 - * the cache is virtually indexed and tagged. However, we don't 170 - * necessarily have the right ASID context, so use index ops instead 171 - * of hit ops. 172 - */ 173 - static inline void __sb1_flush_icache_range(unsigned long start, 174 - unsigned long end) 175 - { 176 - start &= ~(icache_line_size - 1); 177 - end = (end + icache_line_size - 1) & ~(icache_line_size - 1); 178 - 179 - while (start != end) { 180 - cache_set_op(Index_Invalidate_I, start & icache_index_mask); 181 - start += icache_line_size; 182 - } 183 - mispredict(); 184 - sync(); 185 - } 186 - 187 - /* 188 - * Flush the icache for a given physical page. Need to writeback the 189 - * dcache first, then invalidate the icache. If the page isn't 190 - * executable, nothing is required. 191 - */ 192 - static void local_sb1_flush_cache_page(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn) 193 - { 194 - int cpu = smp_processor_id(); 195 - 196 - #ifndef CONFIG_SMP 197 - if (!(vma->vm_flags & VM_EXEC)) 198 - return; 199 - #endif 200 - 201 - __sb1_writeback_inv_dcache_range(addr, addr + PAGE_SIZE); 202 - 203 - /* 204 - * Bumping the ASID is probably cheaper than the flush ... 205 - */ 206 - if (vma->vm_mm == current->active_mm) { 207 - if (cpu_context(cpu, vma->vm_mm) != 0) 208 - drop_mmu_context(vma->vm_mm, cpu); 209 - } else 210 - __sb1_flush_icache_range(addr, addr + PAGE_SIZE); 211 - } 212 - 213 - #ifdef CONFIG_SMP 214 - struct flush_cache_page_args { 215 - struct vm_area_struct *vma; 216 - unsigned long addr; 217 - unsigned long pfn; 218 - }; 219 - 220 - static void sb1_flush_cache_page_ipi(void *info) 221 - { 222 - struct flush_cache_page_args *args = info; 223 - 224 - local_sb1_flush_cache_page(args->vma, args->addr, args->pfn); 225 - } 226 - 227 - /* Dirty dcache could be on another CPU, so do the IPIs */ 228 - static void sb1_flush_cache_page(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn) 229 - { 230 - struct flush_cache_page_args args; 231 - 232 - if (!(vma->vm_flags & VM_EXEC)) 233 - return; 234 - 235 - addr &= PAGE_MASK; 236 - args.vma = vma; 237 - args.addr = addr; 238 - args.pfn = pfn; 239 - sb1_on_each_cpu(sb1_flush_cache_page_ipi, (void *) &args, 1, 1); 240 - } 241 - #else 242 - void sb1_flush_cache_page(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn) 243 - __attribute__((alias("local_sb1_flush_cache_page"))); 244 - #endif 245 - 246 - #ifdef CONFIG_SMP 247 - static void sb1_flush_cache_data_page_ipi(void *info) 248 - { 249 - unsigned long start = (unsigned long)info; 250 - 251 - __sb1_writeback_inv_dcache_range(start, start + PAGE_SIZE); 252 - } 253 - 254 - static void sb1_flush_cache_data_page(unsigned long addr) 255 - { 256 - if (in_atomic()) 257 - __sb1_writeback_inv_dcache_range(addr, addr + PAGE_SIZE); 258 - else 259 - on_each_cpu(sb1_flush_cache_data_page_ipi, (void *) addr, 1, 1); 260 - } 261 - #else 262 - 263 - static void local_sb1_flush_cache_data_page(unsigned long addr) 264 - { 265 - __sb1_writeback_inv_dcache_range(addr, addr + PAGE_SIZE); 266 - } 267 - 268 - void sb1_flush_cache_data_page(unsigned long) 269 - __attribute__((alias("local_sb1_flush_cache_data_page"))); 270 - #endif 271 - 272 - /* 273 - * Invalidate all caches on this CPU 274 - */ 275 - static void __used local_sb1___flush_cache_all(void) 276 - { 277 - __sb1_writeback_inv_dcache_all(); 278 - __sb1_flush_icache_all(); 279 - } 280 - 281 - #ifdef CONFIG_SMP 282 - void sb1___flush_cache_all_ipi(void *ignored) 283 - __attribute__((alias("local_sb1___flush_cache_all"))); 284 - 285 - static void sb1___flush_cache_all(void) 286 - { 287 - sb1_on_each_cpu(sb1___flush_cache_all_ipi, 0, 1, 1); 288 - } 289 - #else 290 - void sb1___flush_cache_all(void) 291 - __attribute__((alias("local_sb1___flush_cache_all"))); 292 - #endif 293 - 294 - /* 295 - * When flushing a range in the icache, we have to first writeback 296 - * the dcache for the same range, so new ifetches will see any 297 - * data that was dirty in the dcache. 298 - * 299 - * The start/end arguments are Kseg addresses (possibly mapped Kseg). 300 - */ 301 - 302 - static void local_sb1_flush_icache_range(unsigned long start, 303 - unsigned long end) 304 - { 305 - /* Just wb-inv the whole dcache if the range is big enough */ 306 - if ((end - start) > dcache_range_cutoff) 307 - __sb1_writeback_inv_dcache_all(); 308 - else 309 - __sb1_writeback_inv_dcache_range(start, end); 310 - 311 - /* Just flush the whole icache if the range is big enough */ 312 - if ((end - start) > icache_range_cutoff) 313 - __sb1_flush_icache_all(); 314 - else 315 - __sb1_flush_icache_range(start, end); 316 - } 317 - 318 - #ifdef CONFIG_SMP 319 - struct flush_icache_range_args { 320 - unsigned long start; 321 - unsigned long end; 322 - }; 323 - 324 - static void sb1_flush_icache_range_ipi(void *info) 325 - { 326 - struct flush_icache_range_args *args = info; 327 - 328 - local_sb1_flush_icache_range(args->start, args->end); 329 - } 330 - 331 - void sb1_flush_icache_range(unsigned long start, unsigned long end) 332 - { 333 - struct flush_icache_range_args args; 334 - 335 - args.start = start; 336 - args.end = end; 337 - sb1_on_each_cpu(sb1_flush_icache_range_ipi, &args, 1, 1); 338 - } 339 - #else 340 - void sb1_flush_icache_range(unsigned long start, unsigned long end) 341 - __attribute__((alias("local_sb1_flush_icache_range"))); 342 - #endif 343 - 344 - /* 345 - * A signal trampoline must fit into a single cacheline. 346 - */ 347 - static void local_sb1_flush_cache_sigtramp(unsigned long addr) 348 - { 349 - cache_set_op(Index_Writeback_Inv_D, addr & dcache_index_mask); 350 - cache_set_op(Index_Writeback_Inv_D, (addr ^ (1<<12)) & dcache_index_mask); 351 - cache_set_op(Index_Invalidate_I, addr & icache_index_mask); 352 - mispredict(); 353 - } 354 - 355 - #ifdef CONFIG_SMP 356 - static void sb1_flush_cache_sigtramp_ipi(void *info) 357 - { 358 - unsigned long iaddr = (unsigned long) info; 359 - local_sb1_flush_cache_sigtramp(iaddr); 360 - } 361 - 362 - static void sb1_flush_cache_sigtramp(unsigned long addr) 363 - { 364 - sb1_on_each_cpu(sb1_flush_cache_sigtramp_ipi, (void *) addr, 1, 1); 365 - } 366 - #else 367 - void sb1_flush_cache_sigtramp(unsigned long addr) 368 - __attribute__((alias("local_sb1_flush_cache_sigtramp"))); 369 - #endif 370 - 371 - 372 - /* 373 - * Anything that just flushes dcache state can be ignored, as we're always 374 - * coherent in dcache space. This is just a dummy function that all the 375 - * nop'ed routines point to 376 - */ 377 - static void sb1_nop(void) 378 - { 379 - } 380 - 381 - /* 382 - * Cache set values (from the mips64 spec) 383 - * 0 - 64 384 - * 1 - 128 385 - * 2 - 256 386 - * 3 - 512 387 - * 4 - 1024 388 - * 5 - 2048 389 - * 6 - 4096 390 - * 7 - Reserved 391 - */ 392 - 393 - static unsigned int decode_cache_sets(unsigned int config_field) 394 - { 395 - if (config_field == 7) { 396 - /* JDCXXX - Find a graceful way to abort. */ 397 - return 0; 398 - } 399 - return (1<<(config_field + 6)); 400 - } 401 - 402 - /* 403 - * Cache line size values (from the mips64 spec) 404 - * 0 - No cache present. 405 - * 1 - 4 bytes 406 - * 2 - 8 bytes 407 - * 3 - 16 bytes 408 - * 4 - 32 bytes 409 - * 5 - 64 bytes 410 - * 6 - 128 bytes 411 - * 7 - Reserved 412 - */ 413 - 414 - static unsigned int decode_cache_line_size(unsigned int config_field) 415 - { 416 - if (config_field == 0) { 417 - return 0; 418 - } else if (config_field == 7) { 419 - /* JDCXXX - Find a graceful way to abort. */ 420 - return 0; 421 - } 422 - return (1<<(config_field + 1)); 423 - } 424 - 425 - /* 426 - * Relevant bits of the config1 register format (from the MIPS32/MIPS64 specs) 427 - * 428 - * 24:22 Icache sets per way 429 - * 21:19 Icache line size 430 - * 18:16 Icache Associativity 431 - * 15:13 Dcache sets per way 432 - * 12:10 Dcache line size 433 - * 9:7 Dcache Associativity 434 - */ 435 - 436 - static char *way_string[] = { 437 - "direct mapped", "2-way", "3-way", "4-way", 438 - "5-way", "6-way", "7-way", "8-way", 439 - }; 440 - 441 - static __init void probe_cache_sizes(void) 442 - { 443 - u32 config1; 444 - 445 - config1 = read_c0_config1(); 446 - icache_line_size = decode_cache_line_size((config1 >> 19) & 0x7); 447 - dcache_line_size = decode_cache_line_size((config1 >> 10) & 0x7); 448 - icache_sets = decode_cache_sets((config1 >> 22) & 0x7); 449 - dcache_sets = decode_cache_sets((config1 >> 13) & 0x7); 450 - icache_assoc = ((config1 >> 16) & 0x7) + 1; 451 - dcache_assoc = ((config1 >> 7) & 0x7) + 1; 452 - icache_size = icache_line_size * icache_sets * icache_assoc; 453 - dcache_size = dcache_line_size * dcache_sets * dcache_assoc; 454 - /* Need to remove non-index bits for index ops */ 455 - icache_index_mask = (icache_sets - 1) * icache_line_size; 456 - dcache_index_mask = (dcache_sets - 1) * dcache_line_size; 457 - /* 458 - * These are for choosing range (index ops) versus all. 459 - * icache flushes all ways for each set, so drop icache_assoc. 460 - * dcache flushes all ways and each setting of bit 12 for each 461 - * index, so drop dcache_assoc and halve the dcache_sets. 462 - */ 463 - icache_range_cutoff = icache_sets * icache_line_size; 464 - dcache_range_cutoff = (dcache_sets / 2) * icache_line_size; 465 - 466 - printk("Primary instruction cache %ldkB, %s, linesize %d bytes.\n", 467 - icache_size >> 10, way_string[icache_assoc - 1], 468 - icache_line_size); 469 - printk("Primary data cache %ldkB, %s, linesize %d bytes.\n", 470 - dcache_size >> 10, way_string[dcache_assoc - 1], 471 - dcache_line_size); 472 - } 473 - 474 - /* 475 - * This is called from cache.c. We have to set up all the 476 - * memory management function pointers, as well as initialize 477 - * the caches and tlbs 478 - */ 479 - void __init sb1_cache_init(void) 480 - { 481 - extern char except_vec2_sb1; 482 - 483 - /* Special cache error handler for SB1 */ 484 - set_uncached_handler (0x100, &except_vec2_sb1, 0x80); 485 - 486 - probe_cache_sizes(); 487 - 488 - #ifdef CONFIG_SIBYTE_DMA_PAGEOPS 489 - sb1_dma_init(); 490 - #endif 491 - 492 - /* 493 - * None of these are needed for the SB1 - the Dcache is 494 - * physically indexed and tagged, so no virtual aliasing can 495 - * occur 496 - */ 497 - flush_cache_range = (void *) sb1_nop; 498 - flush_cache_mm = (void (*)(struct mm_struct *))sb1_nop; 499 - flush_cache_all = sb1_nop; 500 - 501 - /* These routines are for Icache coherence with the Dcache */ 502 - flush_icache_range = sb1_flush_icache_range; 503 - flush_icache_all = __sb1_flush_icache_all; /* local only */ 504 - 505 - /* This implies an Icache flush too, so can't be nop'ed */ 506 - flush_cache_page = sb1_flush_cache_page; 507 - 508 - flush_cache_sigtramp = sb1_flush_cache_sigtramp; 509 - local_flush_data_cache_page = (void *) sb1_nop; 510 - flush_data_cache_page = sb1_flush_cache_data_page; 511 - 512 - /* Full flush */ 513 - __flush_cache_all = sb1___flush_cache_all; 514 - 515 - change_c0_config(CONF_CM_CMASK, CONF_CM_DEFAULT); 516 - 517 - /* 518 - * This is the only way to force the update of K0 to complete 519 - * before subsequent instruction fetch. 520 - */ 521 - __asm__ __volatile__( 522 - ".set push \n" 523 - " .set noat \n" 524 - " .set noreorder \n" 525 - " .set mips3 \n" 526 - " " STR(PTR_LA) " $1, 1f \n" 527 - " " STR(MTC0) " $1, $14 \n" 528 - " eret \n" 529 - "1: .set pop" 530 - : 531 - : 532 - : "memory"); 533 - 534 - local_sb1___flush_cache_all(); 535 - }
+2 -7
arch/mips/mm/cache.c
··· 3 3 * License. See the file "COPYING" in the main directory of this archive 4 4 * for more details. 5 5 * 6 - * Copyright (C) 1994 - 2003, 07 by Ralf Baechle (ralf@linux-mips.org) 6 + * Copyright (C) 1994 - 2003, 06, 07 by Ralf Baechle (ralf@linux-mips.org) 7 7 * Copyright (C) 2007 MIPS Technologies, Inc. 8 8 */ 9 9 #include <linux/fs.h> 10 10 #include <linux/fcntl.h> 11 11 #include <linux/init.h> 12 12 #include <linux/kernel.h> 13 + #include <linux/linkage.h> 13 14 #include <linux/module.h> 14 15 #include <linux/sched.h> 15 16 #include <linux/mm.h> ··· 156 155 extern void __weak tx39_cache_init(void); 157 156 158 157 tx39_cache_init(); 159 - return; 160 - } 161 - if (cpu_has_sb1_cache) { 162 - extern void __weak sb1_cache_init(void); 163 - 164 - sb1_cache_init(); 165 158 return; 166 159 } 167 160
+8
arch/mips/mm/pg-sb1.c
··· 292 292 293 293 EXPORT_SYMBOL(clear_page); 294 294 EXPORT_SYMBOL(copy_page); 295 + 296 + void __init build_clear_page(void) 297 + { 298 + } 299 + 300 + void __init build_copy_page(void) 301 + { 302 + }
-3
include/asm-mips/cpu-features.h
··· 35 35 #ifndef cpu_has_tx39_cache 36 36 #define cpu_has_tx39_cache (cpu_data[0].options & MIPS_CPU_TX39_CACHE) 37 37 #endif 38 - #ifndef cpu_has_sb1_cache 39 - #define cpu_has_sb1_cache (cpu_data[0].options & MIPS_CPU_SB1_CACHE) 40 - #endif 41 38 #ifndef cpu_has_fpu 42 39 #define cpu_has_fpu (current_cpu_data.options & MIPS_CPU_FPU) 43 40 #define raw_cpu_has_fpu (raw_current_cpu_data.options & MIPS_CPU_FPU)
+17 -18
include/asm-mips/cpu.h
··· 255 255 #define MIPS_CPU_3K_CACHE 0x00000004 /* R3000-style caches */ 256 256 #define MIPS_CPU_4K_CACHE 0x00000008 /* R4000-style caches */ 257 257 #define MIPS_CPU_TX39_CACHE 0x00000010 /* TX3900-style caches */ 258 - #define MIPS_CPU_SB1_CACHE 0x00000020 /* SB1-style caches */ 259 - #define MIPS_CPU_FPU 0x00000040 /* CPU has FPU */ 260 - #define MIPS_CPU_32FPR 0x00000080 /* 32 dbl. prec. FP registers */ 261 - #define MIPS_CPU_COUNTER 0x00000100 /* Cycle count/compare */ 262 - #define MIPS_CPU_WATCH 0x00000200 /* watchpoint registers */ 263 - #define MIPS_CPU_DIVEC 0x00000400 /* dedicated interrupt vector */ 264 - #define MIPS_CPU_VCE 0x00000800 /* virt. coherence conflict possible */ 265 - #define MIPS_CPU_CACHE_CDEX_P 0x00001000 /* Create_Dirty_Exclusive CACHE op */ 266 - #define MIPS_CPU_CACHE_CDEX_S 0x00002000 /* ... same for seconary cache ... */ 267 - #define MIPS_CPU_MCHECK 0x00004000 /* Machine check exception */ 268 - #define MIPS_CPU_EJTAG 0x00008000 /* EJTAG exception */ 269 - #define MIPS_CPU_NOFPUEX 0x00010000 /* no FPU exception */ 270 - #define MIPS_CPU_LLSC 0x00020000 /* CPU has ll/sc instructions */ 271 - #define MIPS_CPU_INCLUSIVE_CACHES 0x00040000 /* P-cache subset enforced */ 272 - #define MIPS_CPU_PREFETCH 0x00080000 /* CPU has usable prefetch */ 273 - #define MIPS_CPU_VINT 0x00100000 /* CPU supports MIPSR2 vectored interrupts */ 274 - #define MIPS_CPU_VEIC 0x00200000 /* CPU supports MIPSR2 external interrupt controller mode */ 275 - #define MIPS_CPU_ULRI 0x00400000 /* CPU has ULRI feature */ 258 + #define MIPS_CPU_FPU 0x00000020 /* CPU has FPU */ 259 + #define MIPS_CPU_32FPR 0x00000040 /* 32 dbl. prec. FP registers */ 260 + #define MIPS_CPU_COUNTER 0x00000080 /* Cycle count/compare */ 261 + #define MIPS_CPU_WATCH 0x00000100 /* watchpoint registers */ 262 + #define MIPS_CPU_DIVEC 0x00000200 /* dedicated interrupt vector */ 263 + #define MIPS_CPU_VCE 0x00000400 /* virt. coherence conflict possible */ 264 + #define MIPS_CPU_CACHE_CDEX_P 0x00000800 /* Create_Dirty_Exclusive CACHE op */ 265 + #define MIPS_CPU_CACHE_CDEX_S 0x00001000 /* ... same for seconary cache ... */ 266 + #define MIPS_CPU_MCHECK 0x00002000 /* Machine check exception */ 267 + #define MIPS_CPU_EJTAG 0x00004000 /* EJTAG exception */ 268 + #define MIPS_CPU_NOFPUEX 0x00008000 /* no FPU exception */ 269 + #define MIPS_CPU_LLSC 0x00010000 /* CPU has ll/sc instructions */ 270 + #define MIPS_CPU_INCLUSIVE_CACHES 0x00020000 /* P-cache subset enforced */ 271 + #define MIPS_CPU_PREFETCH 0x00040000 /* CPU has usable prefetch */ 272 + #define MIPS_CPU_VINT 0x00080000 /* CPU supports MIPSR2 vectored interrupts */ 273 + #define MIPS_CPU_VEIC 0x00100000 /* CPU supports MIPSR2 external interrupt controller mode */ 274 + #define MIPS_CPU_ULRI 0x00200000 /* CPU has ULRI feature */ 276 275 277 276 /* 278 277 * CPU ASE encodings
+2
include/asm-mips/linkage.h
··· 5 5 #include <asm/asm.h> 6 6 #endif 7 7 8 + #define __weak __attribute__((weak)) 9 + 8 10 #endif
-1
include/asm-mips/mach-cobalt/cpu-feature-overrides.h
··· 14 14 #define cpu_has_3k_cache 0 15 15 #define cpu_has_4k_cache 1 16 16 #define cpu_has_tx39_cache 0 17 - #define cpu_has_sb1_cache 0 18 17 #define cpu_has_fpu 1 19 18 #define cpu_has_32fpr 1 20 19 #define cpu_has_counter 1