Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.22-rc3 535 lines 14 kB view raw
1/* 2 * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com) 3 * Copyright (C) 1997, 2001 Ralf Baechle (ralf@gnu.org) 4 * Copyright (C) 2000, 2001, 2002, 2003 Broadcom Corporation 5 * Copyright (C) 2004 Maciej W. Rozycki 6 * 7 * This program is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU General Public License 9 * as published by the Free Software Foundation; either version 2 10 * of the License, or (at your option) any later version. 11 * 12 * This program is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 * GNU General Public License for more details. 16 * 17 * You should have received a copy of the GNU General Public License 18 * along with this program; if not, write to the Free Software 19 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 20 */ 21#include <linux/init.h> 22#include <linux/hardirq.h> 23 24#include <asm/asm.h> 25#include <asm/bootinfo.h> 26#include <asm/cacheops.h> 27#include <asm/cpu.h> 28#include <asm/mipsregs.h> 29#include <asm/mmu_context.h> 30#include <asm/uaccess.h> 31 32extern void sb1_dma_init(void); 33 34/* These are probed at ld_mmu time */ 35static unsigned long icache_size; 36static unsigned long dcache_size; 37 38static unsigned short icache_line_size; 39static unsigned short dcache_line_size; 40 41static unsigned int icache_index_mask; 42static unsigned int dcache_index_mask; 43 44static unsigned short icache_assoc; 45static unsigned short dcache_assoc; 46 47static unsigned short icache_sets; 48static unsigned short dcache_sets; 49 50static unsigned int icache_range_cutoff; 51static unsigned int dcache_range_cutoff; 52 53static inline void sb1_on_each_cpu(void (*func) (void *info), void *info, 54 int retry, int wait) 55{ 56 preempt_disable(); 57 smp_call_function(func, info, retry, wait); 58 func(info); 59 preempt_enable(); 60} 61 62/* 63 * The dcache is fully coherent to the system, with one 64 * big caveat: the instruction stream. In other words, 65 * if we miss in the icache, and have dirty data in the 66 * L1 dcache, then we'll go out to memory (or the L2) and 67 * get the not-as-recent data. 68 * 69 * So the only time we have to flush the dcache is when 70 * we're flushing the icache. Since the L2 is fully 71 * coherent to everything, including I/O, we never have 72 * to flush it 73 */ 74 75#define cache_set_op(op, addr) \ 76 __asm__ __volatile__( \ 77 " .set noreorder \n" \ 78 " .set mips64\n\t \n" \ 79 " cache %0, (0<<13)(%1) \n" \ 80 " cache %0, (1<<13)(%1) \n" \ 81 " cache %0, (2<<13)(%1) \n" \ 82 " cache %0, (3<<13)(%1) \n" \ 83 " .set mips0 \n" \ 84 " .set reorder" \ 85 : \ 86 : "i" (op), "r" (addr)) 87 88#define sync() \ 89 __asm__ __volatile( \ 90 " .set mips64\n\t \n" \ 91 " sync \n" \ 92 " .set mips0") 93 94#define mispredict() \ 95 __asm__ __volatile__( \ 96 " bnezl $0, 1f \n" /* Force mispredict */ \ 97 "1: \n"); 98 99/* 100 * Writeback and invalidate the entire dcache 101 */ 102static inline void __sb1_writeback_inv_dcache_all(void) 103{ 104 unsigned long addr = 0; 105 106 while (addr < dcache_line_size * dcache_sets) { 107 cache_set_op(Index_Writeback_Inv_D, addr); 108 addr += dcache_line_size; 109 } 110} 111 112/* 113 * Writeback and invalidate a range of the dcache. The addresses are 114 * virtual, and since we're using index ops and bit 12 is part of both 115 * the virtual frame and physical index, we have to clear both sets 116 * (bit 12 set and cleared). 117 */ 118static inline void __sb1_writeback_inv_dcache_range(unsigned long start, 119 unsigned long end) 120{ 121 unsigned long index; 122 123 start &= ~(dcache_line_size - 1); 124 end = (end + dcache_line_size - 1) & ~(dcache_line_size - 1); 125 126 while (start != end) { 127 index = start & dcache_index_mask; 128 cache_set_op(Index_Writeback_Inv_D, index); 129 cache_set_op(Index_Writeback_Inv_D, index ^ (1<<12)); 130 start += dcache_line_size; 131 } 132 sync(); 133} 134 135/* 136 * Writeback and invalidate a range of the dcache. With physical 137 * addresseses, we don't have to worry about possible bit 12 aliasing. 138 * XXXKW is it worth turning on KX and using hit ops with xkphys? 139 */ 140static inline void __sb1_writeback_inv_dcache_phys_range(unsigned long start, 141 unsigned long end) 142{ 143 start &= ~(dcache_line_size - 1); 144 end = (end + dcache_line_size - 1) & ~(dcache_line_size - 1); 145 146 while (start != end) { 147 cache_set_op(Index_Writeback_Inv_D, start & dcache_index_mask); 148 start += dcache_line_size; 149 } 150 sync(); 151} 152 153 154/* 155 * Invalidate the entire icache 156 */ 157static inline void __sb1_flush_icache_all(void) 158{ 159 unsigned long addr = 0; 160 161 while (addr < icache_line_size * icache_sets) { 162 cache_set_op(Index_Invalidate_I, addr); 163 addr += icache_line_size; 164 } 165} 166 167/* 168 * Invalidate a range of the icache. The addresses are virtual, and 169 * the cache is virtually indexed and tagged. However, we don't 170 * necessarily have the right ASID context, so use index ops instead 171 * of hit ops. 172 */ 173static inline void __sb1_flush_icache_range(unsigned long start, 174 unsigned long end) 175{ 176 start &= ~(icache_line_size - 1); 177 end = (end + icache_line_size - 1) & ~(icache_line_size - 1); 178 179 while (start != end) { 180 cache_set_op(Index_Invalidate_I, start & icache_index_mask); 181 start += icache_line_size; 182 } 183 mispredict(); 184 sync(); 185} 186 187/* 188 * Flush the icache for a given physical page. Need to writeback the 189 * dcache first, then invalidate the icache. If the page isn't 190 * executable, nothing is required. 191 */ 192static void local_sb1_flush_cache_page(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn) 193{ 194 int cpu = smp_processor_id(); 195 196#ifndef CONFIG_SMP 197 if (!(vma->vm_flags & VM_EXEC)) 198 return; 199#endif 200 201 __sb1_writeback_inv_dcache_range(addr, addr + PAGE_SIZE); 202 203 /* 204 * Bumping the ASID is probably cheaper than the flush ... 205 */ 206 if (vma->vm_mm == current->active_mm) { 207 if (cpu_context(cpu, vma->vm_mm) != 0) 208 drop_mmu_context(vma->vm_mm, cpu); 209 } else 210 __sb1_flush_icache_range(addr, addr + PAGE_SIZE); 211} 212 213#ifdef CONFIG_SMP 214struct flush_cache_page_args { 215 struct vm_area_struct *vma; 216 unsigned long addr; 217 unsigned long pfn; 218}; 219 220static void sb1_flush_cache_page_ipi(void *info) 221{ 222 struct flush_cache_page_args *args = info; 223 224 local_sb1_flush_cache_page(args->vma, args->addr, args->pfn); 225} 226 227/* Dirty dcache could be on another CPU, so do the IPIs */ 228static void sb1_flush_cache_page(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn) 229{ 230 struct flush_cache_page_args args; 231 232 if (!(vma->vm_flags & VM_EXEC)) 233 return; 234 235 addr &= PAGE_MASK; 236 args.vma = vma; 237 args.addr = addr; 238 args.pfn = pfn; 239 sb1_on_each_cpu(sb1_flush_cache_page_ipi, (void *) &args, 1, 1); 240} 241#else 242void sb1_flush_cache_page(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn) 243 __attribute__((alias("local_sb1_flush_cache_page"))); 244#endif 245 246#ifdef CONFIG_SMP 247static void sb1_flush_cache_data_page_ipi(void *info) 248{ 249 unsigned long start = (unsigned long)info; 250 251 __sb1_writeback_inv_dcache_range(start, start + PAGE_SIZE); 252} 253 254static void sb1_flush_cache_data_page(unsigned long addr) 255{ 256 if (in_atomic()) 257 __sb1_writeback_inv_dcache_range(addr, addr + PAGE_SIZE); 258 else 259 on_each_cpu(sb1_flush_cache_data_page_ipi, (void *) addr, 1, 1); 260} 261#else 262 263static void local_sb1_flush_cache_data_page(unsigned long addr) 264{ 265 __sb1_writeback_inv_dcache_range(addr, addr + PAGE_SIZE); 266} 267 268void sb1_flush_cache_data_page(unsigned long) 269 __attribute__((alias("local_sb1_flush_cache_data_page"))); 270#endif 271 272/* 273 * Invalidate all caches on this CPU 274 */ 275static void __attribute_used__ local_sb1___flush_cache_all(void) 276{ 277 __sb1_writeback_inv_dcache_all(); 278 __sb1_flush_icache_all(); 279} 280 281#ifdef CONFIG_SMP 282void sb1___flush_cache_all_ipi(void *ignored) 283 __attribute__((alias("local_sb1___flush_cache_all"))); 284 285static void sb1___flush_cache_all(void) 286{ 287 sb1_on_each_cpu(sb1___flush_cache_all_ipi, 0, 1, 1); 288} 289#else 290void sb1___flush_cache_all(void) 291 __attribute__((alias("local_sb1___flush_cache_all"))); 292#endif 293 294/* 295 * When flushing a range in the icache, we have to first writeback 296 * the dcache for the same range, so new ifetches will see any 297 * data that was dirty in the dcache. 298 * 299 * The start/end arguments are Kseg addresses (possibly mapped Kseg). 300 */ 301 302static void local_sb1_flush_icache_range(unsigned long start, 303 unsigned long end) 304{ 305 /* Just wb-inv the whole dcache if the range is big enough */ 306 if ((end - start) > dcache_range_cutoff) 307 __sb1_writeback_inv_dcache_all(); 308 else 309 __sb1_writeback_inv_dcache_range(start, end); 310 311 /* Just flush the whole icache if the range is big enough */ 312 if ((end - start) > icache_range_cutoff) 313 __sb1_flush_icache_all(); 314 else 315 __sb1_flush_icache_range(start, end); 316} 317 318#ifdef CONFIG_SMP 319struct flush_icache_range_args { 320 unsigned long start; 321 unsigned long end; 322}; 323 324static void sb1_flush_icache_range_ipi(void *info) 325{ 326 struct flush_icache_range_args *args = info; 327 328 local_sb1_flush_icache_range(args->start, args->end); 329} 330 331void sb1_flush_icache_range(unsigned long start, unsigned long end) 332{ 333 struct flush_icache_range_args args; 334 335 args.start = start; 336 args.end = end; 337 sb1_on_each_cpu(sb1_flush_icache_range_ipi, &args, 1, 1); 338} 339#else 340void sb1_flush_icache_range(unsigned long start, unsigned long end) 341 __attribute__((alias("local_sb1_flush_icache_range"))); 342#endif 343 344/* 345 * A signal trampoline must fit into a single cacheline. 346 */ 347static void local_sb1_flush_cache_sigtramp(unsigned long addr) 348{ 349 cache_set_op(Index_Writeback_Inv_D, addr & dcache_index_mask); 350 cache_set_op(Index_Writeback_Inv_D, (addr ^ (1<<12)) & dcache_index_mask); 351 cache_set_op(Index_Invalidate_I, addr & icache_index_mask); 352 mispredict(); 353} 354 355#ifdef CONFIG_SMP 356static void sb1_flush_cache_sigtramp_ipi(void *info) 357{ 358 unsigned long iaddr = (unsigned long) info; 359 local_sb1_flush_cache_sigtramp(iaddr); 360} 361 362static void sb1_flush_cache_sigtramp(unsigned long addr) 363{ 364 sb1_on_each_cpu(sb1_flush_cache_sigtramp_ipi, (void *) addr, 1, 1); 365} 366#else 367void sb1_flush_cache_sigtramp(unsigned long addr) 368 __attribute__((alias("local_sb1_flush_cache_sigtramp"))); 369#endif 370 371 372/* 373 * Anything that just flushes dcache state can be ignored, as we're always 374 * coherent in dcache space. This is just a dummy function that all the 375 * nop'ed routines point to 376 */ 377static void sb1_nop(void) 378{ 379} 380 381/* 382 * Cache set values (from the mips64 spec) 383 * 0 - 64 384 * 1 - 128 385 * 2 - 256 386 * 3 - 512 387 * 4 - 1024 388 * 5 - 2048 389 * 6 - 4096 390 * 7 - Reserved 391 */ 392 393static unsigned int decode_cache_sets(unsigned int config_field) 394{ 395 if (config_field == 7) { 396 /* JDCXXX - Find a graceful way to abort. */ 397 return 0; 398 } 399 return (1<<(config_field + 6)); 400} 401 402/* 403 * Cache line size values (from the mips64 spec) 404 * 0 - No cache present. 405 * 1 - 4 bytes 406 * 2 - 8 bytes 407 * 3 - 16 bytes 408 * 4 - 32 bytes 409 * 5 - 64 bytes 410 * 6 - 128 bytes 411 * 7 - Reserved 412 */ 413 414static unsigned int decode_cache_line_size(unsigned int config_field) 415{ 416 if (config_field == 0) { 417 return 0; 418 } else if (config_field == 7) { 419 /* JDCXXX - Find a graceful way to abort. */ 420 return 0; 421 } 422 return (1<<(config_field + 1)); 423} 424 425/* 426 * Relevant bits of the config1 register format (from the MIPS32/MIPS64 specs) 427 * 428 * 24:22 Icache sets per way 429 * 21:19 Icache line size 430 * 18:16 Icache Associativity 431 * 15:13 Dcache sets per way 432 * 12:10 Dcache line size 433 * 9:7 Dcache Associativity 434 */ 435 436static char *way_string[] = { 437 "direct mapped", "2-way", "3-way", "4-way", 438 "5-way", "6-way", "7-way", "8-way", 439}; 440 441static __init void probe_cache_sizes(void) 442{ 443 u32 config1; 444 445 config1 = read_c0_config1(); 446 icache_line_size = decode_cache_line_size((config1 >> 19) & 0x7); 447 dcache_line_size = decode_cache_line_size((config1 >> 10) & 0x7); 448 icache_sets = decode_cache_sets((config1 >> 22) & 0x7); 449 dcache_sets = decode_cache_sets((config1 >> 13) & 0x7); 450 icache_assoc = ((config1 >> 16) & 0x7) + 1; 451 dcache_assoc = ((config1 >> 7) & 0x7) + 1; 452 icache_size = icache_line_size * icache_sets * icache_assoc; 453 dcache_size = dcache_line_size * dcache_sets * dcache_assoc; 454 /* Need to remove non-index bits for index ops */ 455 icache_index_mask = (icache_sets - 1) * icache_line_size; 456 dcache_index_mask = (dcache_sets - 1) * dcache_line_size; 457 /* 458 * These are for choosing range (index ops) versus all. 459 * icache flushes all ways for each set, so drop icache_assoc. 460 * dcache flushes all ways and each setting of bit 12 for each 461 * index, so drop dcache_assoc and halve the dcache_sets. 462 */ 463 icache_range_cutoff = icache_sets * icache_line_size; 464 dcache_range_cutoff = (dcache_sets / 2) * icache_line_size; 465 466 printk("Primary instruction cache %ldkB, %s, linesize %d bytes.\n", 467 icache_size >> 10, way_string[icache_assoc - 1], 468 icache_line_size); 469 printk("Primary data cache %ldkB, %s, linesize %d bytes.\n", 470 dcache_size >> 10, way_string[dcache_assoc - 1], 471 dcache_line_size); 472} 473 474/* 475 * This is called from cache.c. We have to set up all the 476 * memory management function pointers, as well as initialize 477 * the caches and tlbs 478 */ 479void sb1_cache_init(void) 480{ 481 extern char except_vec2_sb1; 482 483 /* Special cache error handler for SB1 */ 484 set_uncached_handler (0x100, &except_vec2_sb1, 0x80); 485 486 probe_cache_sizes(); 487 488#ifdef CONFIG_SIBYTE_DMA_PAGEOPS 489 sb1_dma_init(); 490#endif 491 492 /* 493 * None of these are needed for the SB1 - the Dcache is 494 * physically indexed and tagged, so no virtual aliasing can 495 * occur 496 */ 497 flush_cache_range = (void *) sb1_nop; 498 flush_cache_mm = (void (*)(struct mm_struct *))sb1_nop; 499 flush_cache_all = sb1_nop; 500 501 /* These routines are for Icache coherence with the Dcache */ 502 flush_icache_range = sb1_flush_icache_range; 503 flush_icache_all = __sb1_flush_icache_all; /* local only */ 504 505 /* This implies an Icache flush too, so can't be nop'ed */ 506 flush_cache_page = sb1_flush_cache_page; 507 508 flush_cache_sigtramp = sb1_flush_cache_sigtramp; 509 local_flush_data_cache_page = (void *) sb1_nop; 510 flush_data_cache_page = sb1_flush_cache_data_page; 511 512 /* Full flush */ 513 __flush_cache_all = sb1___flush_cache_all; 514 515 change_c0_config(CONF_CM_CMASK, CONF_CM_DEFAULT); 516 517 /* 518 * This is the only way to force the update of K0 to complete 519 * before subsequent instruction fetch. 520 */ 521 __asm__ __volatile__( 522 ".set push \n" 523 " .set noat \n" 524 " .set noreorder \n" 525 " .set mips3 \n" 526 " " STR(PTR_LA) " $1, 1f \n" 527 " " STR(MTC0) " $1, $14 \n" 528 " eret \n" 529 "1: .set pop" 530 : 531 : 532 : "memory"); 533 534 local_sb1___flush_cache_all(); 535}