Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.19-rc4 509 lines 14 kB view raw
1/* 2 * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com) 3 * Copyright (C) 1997, 2001 Ralf Baechle (ralf@gnu.org) 4 * Copyright (C) 2000, 2001, 2002, 2003 Broadcom Corporation 5 * Copyright (C) 2004 Maciej W. Rozycki 6 * 7 * This program is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU General Public License 9 * as published by the Free Software Foundation; either version 2 10 * of the License, or (at your option) any later version. 11 * 12 * This program is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 * GNU General Public License for more details. 16 * 17 * You should have received a copy of the GNU General Public License 18 * along with this program; if not, write to the Free Software 19 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 20 */ 21#include <linux/init.h> 22 23#include <asm/asm.h> 24#include <asm/bootinfo.h> 25#include <asm/cacheops.h> 26#include <asm/cpu.h> 27#include <asm/mipsregs.h> 28#include <asm/mmu_context.h> 29#include <asm/uaccess.h> 30 31extern void sb1_dma_init(void); 32 33/* These are probed at ld_mmu time */ 34static unsigned long icache_size; 35static unsigned long dcache_size; 36 37static unsigned short icache_line_size; 38static unsigned short dcache_line_size; 39 40static unsigned int icache_index_mask; 41static unsigned int dcache_index_mask; 42 43static unsigned short icache_assoc; 44static unsigned short dcache_assoc; 45 46static unsigned short icache_sets; 47static unsigned short dcache_sets; 48 49static unsigned int icache_range_cutoff; 50static unsigned int dcache_range_cutoff; 51 52static inline void sb1_on_each_cpu(void (*func) (void *info), void *info, 53 int retry, int wait) 54{ 55 preempt_disable(); 56 smp_call_function(func, info, retry, wait); 57 func(info); 58 preempt_enable(); 59} 60 61/* 62 * The dcache is fully coherent to the system, with one 63 * big caveat: the instruction stream. In other words, 64 * if we miss in the icache, and have dirty data in the 65 * L1 dcache, then we'll go out to memory (or the L2) and 66 * get the not-as-recent data. 67 * 68 * So the only time we have to flush the dcache is when 69 * we're flushing the icache. Since the L2 is fully 70 * coherent to everything, including I/O, we never have 71 * to flush it 72 */ 73 74#define cache_set_op(op, addr) \ 75 __asm__ __volatile__( \ 76 " .set noreorder \n" \ 77 " .set mips64\n\t \n" \ 78 " cache %0, (0<<13)(%1) \n" \ 79 " cache %0, (1<<13)(%1) \n" \ 80 " cache %0, (2<<13)(%1) \n" \ 81 " cache %0, (3<<13)(%1) \n" \ 82 " .set mips0 \n" \ 83 " .set reorder" \ 84 : \ 85 : "i" (op), "r" (addr)) 86 87#define sync() \ 88 __asm__ __volatile( \ 89 " .set mips64\n\t \n" \ 90 " sync \n" \ 91 " .set mips0") 92 93#define mispredict() \ 94 __asm__ __volatile__( \ 95 " bnezl $0, 1f \n" /* Force mispredict */ \ 96 "1: \n"); 97 98/* 99 * Writeback and invalidate the entire dcache 100 */ 101static inline void __sb1_writeback_inv_dcache_all(void) 102{ 103 unsigned long addr = 0; 104 105 while (addr < dcache_line_size * dcache_sets) { 106 cache_set_op(Index_Writeback_Inv_D, addr); 107 addr += dcache_line_size; 108 } 109} 110 111/* 112 * Writeback and invalidate a range of the dcache. The addresses are 113 * virtual, and since we're using index ops and bit 12 is part of both 114 * the virtual frame and physical index, we have to clear both sets 115 * (bit 12 set and cleared). 116 */ 117static inline void __sb1_writeback_inv_dcache_range(unsigned long start, 118 unsigned long end) 119{ 120 unsigned long index; 121 122 start &= ~(dcache_line_size - 1); 123 end = (end + dcache_line_size - 1) & ~(dcache_line_size - 1); 124 125 while (start != end) { 126 index = start & dcache_index_mask; 127 cache_set_op(Index_Writeback_Inv_D, index); 128 cache_set_op(Index_Writeback_Inv_D, index ^ (1<<12)); 129 start += dcache_line_size; 130 } 131 sync(); 132} 133 134/* 135 * Writeback and invalidate a range of the dcache. With physical 136 * addresseses, we don't have to worry about possible bit 12 aliasing. 137 * XXXKW is it worth turning on KX and using hit ops with xkphys? 138 */ 139static inline void __sb1_writeback_inv_dcache_phys_range(unsigned long start, 140 unsigned long end) 141{ 142 start &= ~(dcache_line_size - 1); 143 end = (end + dcache_line_size - 1) & ~(dcache_line_size - 1); 144 145 while (start != end) { 146 cache_set_op(Index_Writeback_Inv_D, start & dcache_index_mask); 147 start += dcache_line_size; 148 } 149 sync(); 150} 151 152 153/* 154 * Invalidate the entire icache 155 */ 156static inline void __sb1_flush_icache_all(void) 157{ 158 unsigned long addr = 0; 159 160 while (addr < icache_line_size * icache_sets) { 161 cache_set_op(Index_Invalidate_I, addr); 162 addr += icache_line_size; 163 } 164} 165 166/* 167 * Invalidate a range of the icache. The addresses are virtual, and 168 * the cache is virtually indexed and tagged. However, we don't 169 * necessarily have the right ASID context, so use index ops instead 170 * of hit ops. 171 */ 172static inline void __sb1_flush_icache_range(unsigned long start, 173 unsigned long end) 174{ 175 start &= ~(icache_line_size - 1); 176 end = (end + icache_line_size - 1) & ~(icache_line_size - 1); 177 178 while (start != end) { 179 cache_set_op(Index_Invalidate_I, start & icache_index_mask); 180 start += icache_line_size; 181 } 182 mispredict(); 183 sync(); 184} 185 186/* 187 * Flush the icache for a given physical page. Need to writeback the 188 * dcache first, then invalidate the icache. If the page isn't 189 * executable, nothing is required. 190 */ 191static void local_sb1_flush_cache_page(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn) 192{ 193 int cpu = smp_processor_id(); 194 195#ifndef CONFIG_SMP 196 if (!(vma->vm_flags & VM_EXEC)) 197 return; 198#endif 199 200 __sb1_writeback_inv_dcache_range(addr, addr + PAGE_SIZE); 201 202 /* 203 * Bumping the ASID is probably cheaper than the flush ... 204 */ 205 if (vma->vm_mm == current->active_mm) { 206 if (cpu_context(cpu, vma->vm_mm) != 0) 207 drop_mmu_context(vma->vm_mm, cpu); 208 } else 209 __sb1_flush_icache_range(addr, addr + PAGE_SIZE); 210} 211 212#ifdef CONFIG_SMP 213struct flush_cache_page_args { 214 struct vm_area_struct *vma; 215 unsigned long addr; 216 unsigned long pfn; 217}; 218 219static void sb1_flush_cache_page_ipi(void *info) 220{ 221 struct flush_cache_page_args *args = info; 222 223 local_sb1_flush_cache_page(args->vma, args->addr, args->pfn); 224} 225 226/* Dirty dcache could be on another CPU, so do the IPIs */ 227static void sb1_flush_cache_page(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn) 228{ 229 struct flush_cache_page_args args; 230 231 if (!(vma->vm_flags & VM_EXEC)) 232 return; 233 234 addr &= PAGE_MASK; 235 args.vma = vma; 236 args.addr = addr; 237 args.pfn = pfn; 238 sb1_on_each_cpu(sb1_flush_cache_page_ipi, (void *) &args, 1, 1); 239} 240#else 241void sb1_flush_cache_page(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn) 242 __attribute__((alias("local_sb1_flush_cache_page"))); 243#endif 244 245 246/* 247 * Invalidate all caches on this CPU 248 */ 249static void __attribute_used__ local_sb1___flush_cache_all(void) 250{ 251 __sb1_writeback_inv_dcache_all(); 252 __sb1_flush_icache_all(); 253} 254 255#ifdef CONFIG_SMP 256void sb1___flush_cache_all_ipi(void *ignored) 257 __attribute__((alias("local_sb1___flush_cache_all"))); 258 259static void sb1___flush_cache_all(void) 260{ 261 sb1_on_each_cpu(sb1___flush_cache_all_ipi, 0, 1, 1); 262} 263#else 264void sb1___flush_cache_all(void) 265 __attribute__((alias("local_sb1___flush_cache_all"))); 266#endif 267 268/* 269 * When flushing a range in the icache, we have to first writeback 270 * the dcache for the same range, so new ifetches will see any 271 * data that was dirty in the dcache. 272 * 273 * The start/end arguments are Kseg addresses (possibly mapped Kseg). 274 */ 275 276static void local_sb1_flush_icache_range(unsigned long start, 277 unsigned long end) 278{ 279 /* Just wb-inv the whole dcache if the range is big enough */ 280 if ((end - start) > dcache_range_cutoff) 281 __sb1_writeback_inv_dcache_all(); 282 else 283 __sb1_writeback_inv_dcache_range(start, end); 284 285 /* Just flush the whole icache if the range is big enough */ 286 if ((end - start) > icache_range_cutoff) 287 __sb1_flush_icache_all(); 288 else 289 __sb1_flush_icache_range(start, end); 290} 291 292#ifdef CONFIG_SMP 293struct flush_icache_range_args { 294 unsigned long start; 295 unsigned long end; 296}; 297 298static void sb1_flush_icache_range_ipi(void *info) 299{ 300 struct flush_icache_range_args *args = info; 301 302 local_sb1_flush_icache_range(args->start, args->end); 303} 304 305void sb1_flush_icache_range(unsigned long start, unsigned long end) 306{ 307 struct flush_icache_range_args args; 308 309 args.start = start; 310 args.end = end; 311 sb1_on_each_cpu(sb1_flush_icache_range_ipi, &args, 1, 1); 312} 313#else 314void sb1_flush_icache_range(unsigned long start, unsigned long end) 315 __attribute__((alias("local_sb1_flush_icache_range"))); 316#endif 317 318/* 319 * A signal trampoline must fit into a single cacheline. 320 */ 321static void local_sb1_flush_cache_sigtramp(unsigned long addr) 322{ 323 cache_set_op(Index_Writeback_Inv_D, addr & dcache_index_mask); 324 cache_set_op(Index_Writeback_Inv_D, (addr ^ (1<<12)) & dcache_index_mask); 325 cache_set_op(Index_Invalidate_I, addr & icache_index_mask); 326 mispredict(); 327} 328 329#ifdef CONFIG_SMP 330static void sb1_flush_cache_sigtramp_ipi(void *info) 331{ 332 unsigned long iaddr = (unsigned long) info; 333 local_sb1_flush_cache_sigtramp(iaddr); 334} 335 336static void sb1_flush_cache_sigtramp(unsigned long addr) 337{ 338 sb1_on_each_cpu(sb1_flush_cache_sigtramp_ipi, (void *) addr, 1, 1); 339} 340#else 341void sb1_flush_cache_sigtramp(unsigned long addr) 342 __attribute__((alias("local_sb1_flush_cache_sigtramp"))); 343#endif 344 345 346/* 347 * Anything that just flushes dcache state can be ignored, as we're always 348 * coherent in dcache space. This is just a dummy function that all the 349 * nop'ed routines point to 350 */ 351static void sb1_nop(void) 352{ 353} 354 355/* 356 * Cache set values (from the mips64 spec) 357 * 0 - 64 358 * 1 - 128 359 * 2 - 256 360 * 3 - 512 361 * 4 - 1024 362 * 5 - 2048 363 * 6 - 4096 364 * 7 - Reserved 365 */ 366 367static unsigned int decode_cache_sets(unsigned int config_field) 368{ 369 if (config_field == 7) { 370 /* JDCXXX - Find a graceful way to abort. */ 371 return 0; 372 } 373 return (1<<(config_field + 6)); 374} 375 376/* 377 * Cache line size values (from the mips64 spec) 378 * 0 - No cache present. 379 * 1 - 4 bytes 380 * 2 - 8 bytes 381 * 3 - 16 bytes 382 * 4 - 32 bytes 383 * 5 - 64 bytes 384 * 6 - 128 bytes 385 * 7 - Reserved 386 */ 387 388static unsigned int decode_cache_line_size(unsigned int config_field) 389{ 390 if (config_field == 0) { 391 return 0; 392 } else if (config_field == 7) { 393 /* JDCXXX - Find a graceful way to abort. */ 394 return 0; 395 } 396 return (1<<(config_field + 1)); 397} 398 399/* 400 * Relevant bits of the config1 register format (from the MIPS32/MIPS64 specs) 401 * 402 * 24:22 Icache sets per way 403 * 21:19 Icache line size 404 * 18:16 Icache Associativity 405 * 15:13 Dcache sets per way 406 * 12:10 Dcache line size 407 * 9:7 Dcache Associativity 408 */ 409 410static char *way_string[] = { 411 "direct mapped", "2-way", "3-way", "4-way", 412 "5-way", "6-way", "7-way", "8-way", 413}; 414 415static __init void probe_cache_sizes(void) 416{ 417 u32 config1; 418 419 config1 = read_c0_config1(); 420 icache_line_size = decode_cache_line_size((config1 >> 19) & 0x7); 421 dcache_line_size = decode_cache_line_size((config1 >> 10) & 0x7); 422 icache_sets = decode_cache_sets((config1 >> 22) & 0x7); 423 dcache_sets = decode_cache_sets((config1 >> 13) & 0x7); 424 icache_assoc = ((config1 >> 16) & 0x7) + 1; 425 dcache_assoc = ((config1 >> 7) & 0x7) + 1; 426 icache_size = icache_line_size * icache_sets * icache_assoc; 427 dcache_size = dcache_line_size * dcache_sets * dcache_assoc; 428 /* Need to remove non-index bits for index ops */ 429 icache_index_mask = (icache_sets - 1) * icache_line_size; 430 dcache_index_mask = (dcache_sets - 1) * dcache_line_size; 431 /* 432 * These are for choosing range (index ops) versus all. 433 * icache flushes all ways for each set, so drop icache_assoc. 434 * dcache flushes all ways and each setting of bit 12 for each 435 * index, so drop dcache_assoc and halve the dcache_sets. 436 */ 437 icache_range_cutoff = icache_sets * icache_line_size; 438 dcache_range_cutoff = (dcache_sets / 2) * icache_line_size; 439 440 printk("Primary instruction cache %ldkB, %s, linesize %d bytes.\n", 441 icache_size >> 10, way_string[icache_assoc - 1], 442 icache_line_size); 443 printk("Primary data cache %ldkB, %s, linesize %d bytes.\n", 444 dcache_size >> 10, way_string[dcache_assoc - 1], 445 dcache_line_size); 446} 447 448/* 449 * This is called from cache.c. We have to set up all the 450 * memory management function pointers, as well as initialize 451 * the caches and tlbs 452 */ 453void sb1_cache_init(void) 454{ 455 extern char except_vec2_sb1; 456 457 /* Special cache error handler for SB1 */ 458 set_uncached_handler (0x100, &except_vec2_sb1, 0x80); 459 460 probe_cache_sizes(); 461 462#ifdef CONFIG_SIBYTE_DMA_PAGEOPS 463 sb1_dma_init(); 464#endif 465 466 /* 467 * None of these are needed for the SB1 - the Dcache is 468 * physically indexed and tagged, so no virtual aliasing can 469 * occur 470 */ 471 flush_cache_range = (void *) sb1_nop; 472 flush_cache_mm = (void (*)(struct mm_struct *))sb1_nop; 473 flush_cache_all = sb1_nop; 474 475 /* These routines are for Icache coherence with the Dcache */ 476 flush_icache_range = sb1_flush_icache_range; 477 flush_icache_all = __sb1_flush_icache_all; /* local only */ 478 479 /* This implies an Icache flush too, so can't be nop'ed */ 480 flush_cache_page = sb1_flush_cache_page; 481 482 flush_cache_sigtramp = sb1_flush_cache_sigtramp; 483 local_flush_data_cache_page = (void *) sb1_nop; 484 flush_data_cache_page = (void *) sb1_nop; 485 486 /* Full flush */ 487 __flush_cache_all = sb1___flush_cache_all; 488 489 change_c0_config(CONF_CM_CMASK, CONF_CM_DEFAULT); 490 491 /* 492 * This is the only way to force the update of K0 to complete 493 * before subsequent instruction fetch. 494 */ 495 __asm__ __volatile__( 496 ".set push \n" 497 " .set noat \n" 498 " .set noreorder \n" 499 " .set mips3 \n" 500 " " STR(PTR_LA) " $1, 1f \n" 501 " " STR(MTC0) " $1, $14 \n" 502 " eret \n" 503 "1: .set pop" 504 : 505 : 506 : "memory"); 507 508 flush_cache_all(); 509}