Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.25 1281 lines 34 kB view raw
1/* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Synthesize TLB refill handlers at runtime. 7 * 8 * Copyright (C) 2004, 2005, 2006, 2008 Thiemo Seufer 9 * Copyright (C) 2005, 2007 Maciej W. Rozycki 10 * Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org) 11 * 12 * ... and the days got worse and worse and now you see 13 * I've gone completly out of my mind. 14 * 15 * They're coming to take me a away haha 16 * they're coming to take me a away hoho hihi haha 17 * to the funny farm where code is beautiful all the time ... 18 * 19 * (Condolences to Napoleon XIV) 20 */ 21 22#include <linux/kernel.h> 23#include <linux/types.h> 24#include <linux/string.h> 25#include <linux/init.h> 26 27#include <asm/mmu_context.h> 28#include <asm/war.h> 29 30#include "uasm.h" 31 32static inline int r45k_bvahwbug(void) 33{ 34 /* XXX: We should probe for the presence of this bug, but we don't. */ 35 return 0; 36} 37 38static inline int r4k_250MHZhwbug(void) 39{ 40 /* XXX: We should probe for the presence of this bug, but we don't. */ 41 return 0; 42} 43 44static inline int __maybe_unused bcm1250_m3_war(void) 45{ 46 return BCM1250_M3_WAR; 47} 48 49static inline int __maybe_unused r10000_llsc_war(void) 50{ 51 return R10000_LLSC_WAR; 52} 53 54/* 55 * Found by experiment: At least some revisions of the 4kc throw under 56 * some circumstances a machine check exception, triggered by invalid 57 * values in the index register. Delaying the tlbp instruction until 58 * after the next branch, plus adding an additional nop in front of 59 * tlbwi/tlbwr avoids the invalid index register values. Nobody knows 60 * why; it's not an issue caused by the core RTL. 61 * 62 */ 63static int __cpuinit m4kc_tlbp_war(void) 64{ 65 return (current_cpu_data.processor_id & 0xffff00) == 66 (PRID_COMP_MIPS | PRID_IMP_4KC); 67} 68 69/* Handle labels (which must be positive integers). */ 70enum label_id { 71 label_second_part = 1, 72 label_leave, 73#ifdef MODULE_START 74 label_module_alloc, 75#endif 76 label_vmalloc, 77 label_vmalloc_done, 78 label_tlbw_hazard, 79 label_split, 80 label_nopage_tlbl, 81 label_nopage_tlbs, 82 label_nopage_tlbm, 83 label_smp_pgtable_change, 84 label_r3000_write_probe_fail, 85}; 86 87UASM_L_LA(_second_part) 88UASM_L_LA(_leave) 89#ifdef MODULE_START 90UASM_L_LA(_module_alloc) 91#endif 92UASM_L_LA(_vmalloc) 93UASM_L_LA(_vmalloc_done) 94UASM_L_LA(_tlbw_hazard) 95UASM_L_LA(_split) 96UASM_L_LA(_nopage_tlbl) 97UASM_L_LA(_nopage_tlbs) 98UASM_L_LA(_nopage_tlbm) 99UASM_L_LA(_smp_pgtable_change) 100UASM_L_LA(_r3000_write_probe_fail) 101 102/* 103 * For debug purposes. 104 */ 105static inline void dump_handler(const u32 *handler, int count) 106{ 107 int i; 108 109 pr_debug("\t.set push\n"); 110 pr_debug("\t.set noreorder\n"); 111 112 for (i = 0; i < count; i++) 113 pr_debug("\t%p\t.word 0x%08x\n", &handler[i], handler[i]); 114 115 pr_debug("\t.set pop\n"); 116} 117 118/* The only general purpose registers allowed in TLB handlers. */ 119#define K0 26 120#define K1 27 121 122/* Some CP0 registers */ 123#define C0_INDEX 0, 0 124#define C0_ENTRYLO0 2, 0 125#define C0_TCBIND 2, 2 126#define C0_ENTRYLO1 3, 0 127#define C0_CONTEXT 4, 0 128#define C0_BADVADDR 8, 0 129#define C0_ENTRYHI 10, 0 130#define C0_EPC 14, 0 131#define C0_XCONTEXT 20, 0 132 133#ifdef CONFIG_64BIT 134# define GET_CONTEXT(buf, reg) UASM_i_MFC0(buf, reg, C0_XCONTEXT) 135#else 136# define GET_CONTEXT(buf, reg) UASM_i_MFC0(buf, reg, C0_CONTEXT) 137#endif 138 139/* The worst case length of the handler is around 18 instructions for 140 * R3000-style TLBs and up to 63 instructions for R4000-style TLBs. 141 * Maximum space available is 32 instructions for R3000 and 64 142 * instructions for R4000. 143 * 144 * We deliberately chose a buffer size of 128, so we won't scribble 145 * over anything important on overflow before we panic. 146 */ 147static u32 tlb_handler[128] __cpuinitdata; 148 149/* simply assume worst case size for labels and relocs */ 150static struct uasm_label labels[128] __cpuinitdata; 151static struct uasm_reloc relocs[128] __cpuinitdata; 152 153/* 154 * The R3000 TLB handler is simple. 155 */ 156static void __cpuinit build_r3000_tlb_refill_handler(void) 157{ 158 long pgdc = (long)pgd_current; 159 u32 *p; 160 161 memset(tlb_handler, 0, sizeof(tlb_handler)); 162 p = tlb_handler; 163 164 uasm_i_mfc0(&p, K0, C0_BADVADDR); 165 uasm_i_lui(&p, K1, uasm_rel_hi(pgdc)); /* cp0 delay */ 166 uasm_i_lw(&p, K1, uasm_rel_lo(pgdc), K1); 167 uasm_i_srl(&p, K0, K0, 22); /* load delay */ 168 uasm_i_sll(&p, K0, K0, 2); 169 uasm_i_addu(&p, K1, K1, K0); 170 uasm_i_mfc0(&p, K0, C0_CONTEXT); 171 uasm_i_lw(&p, K1, 0, K1); /* cp0 delay */ 172 uasm_i_andi(&p, K0, K0, 0xffc); /* load delay */ 173 uasm_i_addu(&p, K1, K1, K0); 174 uasm_i_lw(&p, K0, 0, K1); 175 uasm_i_nop(&p); /* load delay */ 176 uasm_i_mtc0(&p, K0, C0_ENTRYLO0); 177 uasm_i_mfc0(&p, K1, C0_EPC); /* cp0 delay */ 178 uasm_i_tlbwr(&p); /* cp0 delay */ 179 uasm_i_jr(&p, K1); 180 uasm_i_rfe(&p); /* branch delay */ 181 182 if (p > tlb_handler + 32) 183 panic("TLB refill handler space exceeded"); 184 185 pr_debug("Wrote TLB refill handler (%u instructions).\n", 186 (unsigned int)(p - tlb_handler)); 187 188 memcpy((void *)ebase, tlb_handler, 0x80); 189 190 dump_handler((u32 *)ebase, 32); 191} 192 193/* 194 * The R4000 TLB handler is much more complicated. We have two 195 * consecutive handler areas with 32 instructions space each. 196 * Since they aren't used at the same time, we can overflow in the 197 * other one.To keep things simple, we first assume linear space, 198 * then we relocate it to the final handler layout as needed. 199 */ 200static u32 final_handler[64] __cpuinitdata; 201 202/* 203 * Hazards 204 * 205 * From the IDT errata for the QED RM5230 (Nevada), processor revision 1.0: 206 * 2. A timing hazard exists for the TLBP instruction. 207 * 208 * stalling_instruction 209 * TLBP 210 * 211 * The JTLB is being read for the TLBP throughout the stall generated by the 212 * previous instruction. This is not really correct as the stalling instruction 213 * can modify the address used to access the JTLB. The failure symptom is that 214 * the TLBP instruction will use an address created for the stalling instruction 215 * and not the address held in C0_ENHI and thus report the wrong results. 216 * 217 * The software work-around is to not allow the instruction preceding the TLBP 218 * to stall - make it an NOP or some other instruction guaranteed not to stall. 219 * 220 * Errata 2 will not be fixed. This errata is also on the R5000. 221 * 222 * As if we MIPS hackers wouldn't know how to nop pipelines happy ... 223 */ 224static void __cpuinit __maybe_unused build_tlb_probe_entry(u32 **p) 225{ 226 switch (current_cpu_type()) { 227 /* Found by experiment: R4600 v2.0 needs this, too. */ 228 case CPU_R4600: 229 case CPU_R5000: 230 case CPU_R5000A: 231 case CPU_NEVADA: 232 uasm_i_nop(p); 233 uasm_i_tlbp(p); 234 break; 235 236 default: 237 uasm_i_tlbp(p); 238 break; 239 } 240} 241 242/* 243 * Write random or indexed TLB entry, and care about the hazards from 244 * the preceeding mtc0 and for the following eret. 245 */ 246enum tlb_write_entry { tlb_random, tlb_indexed }; 247 248static void __cpuinit build_tlb_write_entry(u32 **p, struct uasm_label **l, 249 struct uasm_reloc **r, 250 enum tlb_write_entry wmode) 251{ 252 void(*tlbw)(u32 **) = NULL; 253 254 switch (wmode) { 255 case tlb_random: tlbw = uasm_i_tlbwr; break; 256 case tlb_indexed: tlbw = uasm_i_tlbwi; break; 257 } 258 259 if (cpu_has_mips_r2) { 260 uasm_i_ehb(p); 261 tlbw(p); 262 return; 263 } 264 265 switch (current_cpu_type()) { 266 case CPU_R4000PC: 267 case CPU_R4000SC: 268 case CPU_R4000MC: 269 case CPU_R4400PC: 270 case CPU_R4400SC: 271 case CPU_R4400MC: 272 /* 273 * This branch uses up a mtc0 hazard nop slot and saves 274 * two nops after the tlbw instruction. 275 */ 276 uasm_il_bgezl(p, r, 0, label_tlbw_hazard); 277 tlbw(p); 278 uasm_l_tlbw_hazard(l, *p); 279 uasm_i_nop(p); 280 break; 281 282 case CPU_R4600: 283 case CPU_R4700: 284 case CPU_R5000: 285 case CPU_R5000A: 286 uasm_i_nop(p); 287 tlbw(p); 288 uasm_i_nop(p); 289 break; 290 291 case CPU_R4300: 292 case CPU_5KC: 293 case CPU_TX49XX: 294 case CPU_AU1000: 295 case CPU_AU1100: 296 case CPU_AU1500: 297 case CPU_AU1550: 298 case CPU_AU1200: 299 case CPU_AU1210: 300 case CPU_AU1250: 301 case CPU_PR4450: 302 uasm_i_nop(p); 303 tlbw(p); 304 break; 305 306 case CPU_R10000: 307 case CPU_R12000: 308 case CPU_R14000: 309 case CPU_4KC: 310 case CPU_4KEC: 311 case CPU_SB1: 312 case CPU_SB1A: 313 case CPU_4KSC: 314 case CPU_20KC: 315 case CPU_25KF: 316 case CPU_BCM3302: 317 case CPU_BCM4710: 318 case CPU_LOONGSON2: 319 if (m4kc_tlbp_war()) 320 uasm_i_nop(p); 321 tlbw(p); 322 break; 323 324 case CPU_NEVADA: 325 uasm_i_nop(p); /* QED specifies 2 nops hazard */ 326 /* 327 * This branch uses up a mtc0 hazard nop slot and saves 328 * a nop after the tlbw instruction. 329 */ 330 uasm_il_bgezl(p, r, 0, label_tlbw_hazard); 331 tlbw(p); 332 uasm_l_tlbw_hazard(l, *p); 333 break; 334 335 case CPU_RM7000: 336 uasm_i_nop(p); 337 uasm_i_nop(p); 338 uasm_i_nop(p); 339 uasm_i_nop(p); 340 tlbw(p); 341 break; 342 343 case CPU_RM9000: 344 /* 345 * When the JTLB is updated by tlbwi or tlbwr, a subsequent 346 * use of the JTLB for instructions should not occur for 4 347 * cpu cycles and use for data translations should not occur 348 * for 3 cpu cycles. 349 */ 350 uasm_i_ssnop(p); 351 uasm_i_ssnop(p); 352 uasm_i_ssnop(p); 353 uasm_i_ssnop(p); 354 tlbw(p); 355 uasm_i_ssnop(p); 356 uasm_i_ssnop(p); 357 uasm_i_ssnop(p); 358 uasm_i_ssnop(p); 359 break; 360 361 case CPU_VR4111: 362 case CPU_VR4121: 363 case CPU_VR4122: 364 case CPU_VR4181: 365 case CPU_VR4181A: 366 uasm_i_nop(p); 367 uasm_i_nop(p); 368 tlbw(p); 369 uasm_i_nop(p); 370 uasm_i_nop(p); 371 break; 372 373 case CPU_VR4131: 374 case CPU_VR4133: 375 case CPU_R5432: 376 uasm_i_nop(p); 377 uasm_i_nop(p); 378 tlbw(p); 379 break; 380 381 default: 382 panic("No TLB refill handler yet (CPU type: %d)", 383 current_cpu_data.cputype); 384 break; 385 } 386} 387 388#ifdef CONFIG_64BIT 389/* 390 * TMP and PTR are scratch. 391 * TMP will be clobbered, PTR will hold the pmd entry. 392 */ 393static void __cpuinit 394build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r, 395 unsigned int tmp, unsigned int ptr) 396{ 397 long pgdc = (long)pgd_current; 398 399 /* 400 * The vmalloc handling is not in the hotpath. 401 */ 402 uasm_i_dmfc0(p, tmp, C0_BADVADDR); 403#ifdef MODULE_START 404 uasm_il_bltz(p, r, tmp, label_module_alloc); 405#else 406 uasm_il_bltz(p, r, tmp, label_vmalloc); 407#endif 408 /* No uasm_i_nop needed here, since the next insn doesn't touch TMP. */ 409 410#ifdef CONFIG_SMP 411# ifdef CONFIG_MIPS_MT_SMTC 412 /* 413 * SMTC uses TCBind value as "CPU" index 414 */ 415 uasm_i_mfc0(p, ptr, C0_TCBIND); 416 uasm_i_dsrl(p, ptr, ptr, 19); 417# else 418 /* 419 * 64 bit SMP running in XKPHYS has smp_processor_id() << 3 420 * stored in CONTEXT. 421 */ 422 uasm_i_dmfc0(p, ptr, C0_CONTEXT); 423 uasm_i_dsrl(p, ptr, ptr, 23); 424#endif 425 UASM_i_LA_mostly(p, tmp, pgdc); 426 uasm_i_daddu(p, ptr, ptr, tmp); 427 uasm_i_dmfc0(p, tmp, C0_BADVADDR); 428 uasm_i_ld(p, ptr, uasm_rel_lo(pgdc), ptr); 429#else 430 UASM_i_LA_mostly(p, ptr, pgdc); 431 uasm_i_ld(p, ptr, uasm_rel_lo(pgdc), ptr); 432#endif 433 434 uasm_l_vmalloc_done(l, *p); 435 436 if (PGDIR_SHIFT - 3 < 32) /* get pgd offset in bytes */ 437 uasm_i_dsrl(p, tmp, tmp, PGDIR_SHIFT-3); 438 else 439 uasm_i_dsrl32(p, tmp, tmp, PGDIR_SHIFT - 3 - 32); 440 441 uasm_i_andi(p, tmp, tmp, (PTRS_PER_PGD - 1)<<3); 442 uasm_i_daddu(p, ptr, ptr, tmp); /* add in pgd offset */ 443 uasm_i_dmfc0(p, tmp, C0_BADVADDR); /* get faulting address */ 444 uasm_i_ld(p, ptr, 0, ptr); /* get pmd pointer */ 445 uasm_i_dsrl(p, tmp, tmp, PMD_SHIFT-3); /* get pmd offset in bytes */ 446 uasm_i_andi(p, tmp, tmp, (PTRS_PER_PMD - 1)<<3); 447 uasm_i_daddu(p, ptr, ptr, tmp); /* add in pmd offset */ 448} 449 450/* 451 * BVADDR is the faulting address, PTR is scratch. 452 * PTR will hold the pgd for vmalloc. 453 */ 454static void __cpuinit 455build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r, 456 unsigned int bvaddr, unsigned int ptr) 457{ 458 long swpd = (long)swapper_pg_dir; 459 460#ifdef MODULE_START 461 long modd = (long)module_pg_dir; 462 463 uasm_l_module_alloc(l, *p); 464 /* 465 * Assumption: 466 * VMALLOC_START >= 0xc000000000000000UL 467 * MODULE_START >= 0xe000000000000000UL 468 */ 469 UASM_i_SLL(p, ptr, bvaddr, 2); 470 uasm_il_bgez(p, r, ptr, label_vmalloc); 471 472 if (uasm_in_compat_space_p(MODULE_START) && 473 !uasm_rel_lo(MODULE_START)) { 474 uasm_i_lui(p, ptr, uasm_rel_hi(MODULE_START)); /* delay slot */ 475 } else { 476 /* unlikely configuration */ 477 uasm_i_nop(p); /* delay slot */ 478 UASM_i_LA(p, ptr, MODULE_START); 479 } 480 uasm_i_dsubu(p, bvaddr, bvaddr, ptr); 481 482 if (uasm_in_compat_space_p(modd) && !uasm_rel_lo(modd)) { 483 uasm_il_b(p, r, label_vmalloc_done); 484 uasm_i_lui(p, ptr, uasm_rel_hi(modd)); 485 } else { 486 UASM_i_LA_mostly(p, ptr, modd); 487 uasm_il_b(p, r, label_vmalloc_done); 488 if (uasm_in_compat_space_p(modd)) 489 uasm_i_addiu(p, ptr, ptr, uasm_rel_lo(modd)); 490 else 491 uasm_i_daddiu(p, ptr, ptr, uasm_rel_lo(modd)); 492 } 493 494 uasm_l_vmalloc(l, *p); 495 if (uasm_in_compat_space_p(MODULE_START) && 496 !uasm_rel_lo(MODULE_START) && 497 MODULE_START << 32 == VMALLOC_START) 498 uasm_i_dsll32(p, ptr, ptr, 0); /* typical case */ 499 else 500 UASM_i_LA(p, ptr, VMALLOC_START); 501#else 502 uasm_l_vmalloc(l, *p); 503 UASM_i_LA(p, ptr, VMALLOC_START); 504#endif 505 uasm_i_dsubu(p, bvaddr, bvaddr, ptr); 506 507 if (uasm_in_compat_space_p(swpd) && !uasm_rel_lo(swpd)) { 508 uasm_il_b(p, r, label_vmalloc_done); 509 uasm_i_lui(p, ptr, uasm_rel_hi(swpd)); 510 } else { 511 UASM_i_LA_mostly(p, ptr, swpd); 512 uasm_il_b(p, r, label_vmalloc_done); 513 if (uasm_in_compat_space_p(swpd)) 514 uasm_i_addiu(p, ptr, ptr, uasm_rel_lo(swpd)); 515 else 516 uasm_i_daddiu(p, ptr, ptr, uasm_rel_lo(swpd)); 517 } 518} 519 520#else /* !CONFIG_64BIT */ 521 522/* 523 * TMP and PTR are scratch. 524 * TMP will be clobbered, PTR will hold the pgd entry. 525 */ 526static void __cpuinit __maybe_unused 527build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr) 528{ 529 long pgdc = (long)pgd_current; 530 531 /* 32 bit SMP has smp_processor_id() stored in CONTEXT. */ 532#ifdef CONFIG_SMP 533#ifdef CONFIG_MIPS_MT_SMTC 534 /* 535 * SMTC uses TCBind value as "CPU" index 536 */ 537 uasm_i_mfc0(p, ptr, C0_TCBIND); 538 UASM_i_LA_mostly(p, tmp, pgdc); 539 uasm_i_srl(p, ptr, ptr, 19); 540#else 541 /* 542 * smp_processor_id() << 3 is stored in CONTEXT. 543 */ 544 uasm_i_mfc0(p, ptr, C0_CONTEXT); 545 UASM_i_LA_mostly(p, tmp, pgdc); 546 uasm_i_srl(p, ptr, ptr, 23); 547#endif 548 uasm_i_addu(p, ptr, tmp, ptr); 549#else 550 UASM_i_LA_mostly(p, ptr, pgdc); 551#endif 552 uasm_i_mfc0(p, tmp, C0_BADVADDR); /* get faulting address */ 553 uasm_i_lw(p, ptr, uasm_rel_lo(pgdc), ptr); 554 uasm_i_srl(p, tmp, tmp, PGDIR_SHIFT); /* get pgd only bits */ 555 uasm_i_sll(p, tmp, tmp, PGD_T_LOG2); 556 uasm_i_addu(p, ptr, ptr, tmp); /* add in pgd offset */ 557} 558 559#endif /* !CONFIG_64BIT */ 560 561static void __cpuinit build_adjust_context(u32 **p, unsigned int ctx) 562{ 563 unsigned int shift = 4 - (PTE_T_LOG2 + 1) + PAGE_SHIFT - 12; 564 unsigned int mask = (PTRS_PER_PTE / 2 - 1) << (PTE_T_LOG2 + 1); 565 566 switch (current_cpu_type()) { 567 case CPU_VR41XX: 568 case CPU_VR4111: 569 case CPU_VR4121: 570 case CPU_VR4122: 571 case CPU_VR4131: 572 case CPU_VR4181: 573 case CPU_VR4181A: 574 case CPU_VR4133: 575 shift += 2; 576 break; 577 578 default: 579 break; 580 } 581 582 if (shift) 583 UASM_i_SRL(p, ctx, ctx, shift); 584 uasm_i_andi(p, ctx, ctx, mask); 585} 586 587static void __cpuinit build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr) 588{ 589 /* 590 * Bug workaround for the Nevada. It seems as if under certain 591 * circumstances the move from cp0_context might produce a 592 * bogus result when the mfc0 instruction and its consumer are 593 * in a different cacheline or a load instruction, probably any 594 * memory reference, is between them. 595 */ 596 switch (current_cpu_type()) { 597 case CPU_NEVADA: 598 UASM_i_LW(p, ptr, 0, ptr); 599 GET_CONTEXT(p, tmp); /* get context reg */ 600 break; 601 602 default: 603 GET_CONTEXT(p, tmp); /* get context reg */ 604 UASM_i_LW(p, ptr, 0, ptr); 605 break; 606 } 607 608 build_adjust_context(p, tmp); 609 UASM_i_ADDU(p, ptr, ptr, tmp); /* add in offset */ 610} 611 612static void __cpuinit build_update_entries(u32 **p, unsigned int tmp, 613 unsigned int ptep) 614{ 615 /* 616 * 64bit address support (36bit on a 32bit CPU) in a 32bit 617 * Kernel is a special case. Only a few CPUs use it. 618 */ 619#ifdef CONFIG_64BIT_PHYS_ADDR 620 if (cpu_has_64bits) { 621 uasm_i_ld(p, tmp, 0, ptep); /* get even pte */ 622 uasm_i_ld(p, ptep, sizeof(pte_t), ptep); /* get odd pte */ 623 uasm_i_dsrl(p, tmp, tmp, 6); /* convert to entrylo0 */ 624 uasm_i_mtc0(p, tmp, C0_ENTRYLO0); /* load it */ 625 uasm_i_dsrl(p, ptep, ptep, 6); /* convert to entrylo1 */ 626 uasm_i_mtc0(p, ptep, C0_ENTRYLO1); /* load it */ 627 } else { 628 int pte_off_even = sizeof(pte_t) / 2; 629 int pte_off_odd = pte_off_even + sizeof(pte_t); 630 631 /* The pte entries are pre-shifted */ 632 uasm_i_lw(p, tmp, pte_off_even, ptep); /* get even pte */ 633 uasm_i_mtc0(p, tmp, C0_ENTRYLO0); /* load it */ 634 uasm_i_lw(p, ptep, pte_off_odd, ptep); /* get odd pte */ 635 uasm_i_mtc0(p, ptep, C0_ENTRYLO1); /* load it */ 636 } 637#else 638 UASM_i_LW(p, tmp, 0, ptep); /* get even pte */ 639 UASM_i_LW(p, ptep, sizeof(pte_t), ptep); /* get odd pte */ 640 if (r45k_bvahwbug()) 641 build_tlb_probe_entry(p); 642 UASM_i_SRL(p, tmp, tmp, 6); /* convert to entrylo0 */ 643 if (r4k_250MHZhwbug()) 644 uasm_i_mtc0(p, 0, C0_ENTRYLO0); 645 uasm_i_mtc0(p, tmp, C0_ENTRYLO0); /* load it */ 646 UASM_i_SRL(p, ptep, ptep, 6); /* convert to entrylo1 */ 647 if (r45k_bvahwbug()) 648 uasm_i_mfc0(p, tmp, C0_INDEX); 649 if (r4k_250MHZhwbug()) 650 uasm_i_mtc0(p, 0, C0_ENTRYLO1); 651 uasm_i_mtc0(p, ptep, C0_ENTRYLO1); /* load it */ 652#endif 653} 654 655static void __cpuinit build_r4000_tlb_refill_handler(void) 656{ 657 u32 *p = tlb_handler; 658 struct uasm_label *l = labels; 659 struct uasm_reloc *r = relocs; 660 u32 *f; 661 unsigned int final_len; 662 663 memset(tlb_handler, 0, sizeof(tlb_handler)); 664 memset(labels, 0, sizeof(labels)); 665 memset(relocs, 0, sizeof(relocs)); 666 memset(final_handler, 0, sizeof(final_handler)); 667 668 /* 669 * create the plain linear handler 670 */ 671 if (bcm1250_m3_war()) { 672 UASM_i_MFC0(&p, K0, C0_BADVADDR); 673 UASM_i_MFC0(&p, K1, C0_ENTRYHI); 674 uasm_i_xor(&p, K0, K0, K1); 675 UASM_i_SRL(&p, K0, K0, PAGE_SHIFT + 1); 676 uasm_il_bnez(&p, &r, K0, label_leave); 677 /* No need for uasm_i_nop */ 678 } 679 680#ifdef CONFIG_64BIT 681 build_get_pmde64(&p, &l, &r, K0, K1); /* get pmd in K1 */ 682#else 683 build_get_pgde32(&p, K0, K1); /* get pgd in K1 */ 684#endif 685 686 build_get_ptep(&p, K0, K1); 687 build_update_entries(&p, K0, K1); 688 build_tlb_write_entry(&p, &l, &r, tlb_random); 689 uasm_l_leave(&l, p); 690 uasm_i_eret(&p); /* return from trap */ 691 692#ifdef CONFIG_64BIT 693 build_get_pgd_vmalloc64(&p, &l, &r, K0, K1); 694#endif 695 696 /* 697 * Overflow check: For the 64bit handler, we need at least one 698 * free instruction slot for the wrap-around branch. In worst 699 * case, if the intended insertion point is a delay slot, we 700 * need three, with the second nop'ed and the third being 701 * unused. 702 */ 703 /* Loongson2 ebase is different than r4k, we have more space */ 704#if defined(CONFIG_32BIT) || defined(CONFIG_CPU_LOONGSON2) 705 if ((p - tlb_handler) > 64) 706 panic("TLB refill handler space exceeded"); 707#else 708 if (((p - tlb_handler) > 63) 709 || (((p - tlb_handler) > 61) 710 && uasm_insn_has_bdelay(relocs, tlb_handler + 29))) 711 panic("TLB refill handler space exceeded"); 712#endif 713 714 /* 715 * Now fold the handler in the TLB refill handler space. 716 */ 717#if defined(CONFIG_32BIT) || defined(CONFIG_CPU_LOONGSON2) 718 f = final_handler; 719 /* Simplest case, just copy the handler. */ 720 uasm_copy_handler(relocs, labels, tlb_handler, p, f); 721 final_len = p - tlb_handler; 722#else /* CONFIG_64BIT */ 723 f = final_handler + 32; 724 if ((p - tlb_handler) <= 32) { 725 /* Just copy the handler. */ 726 uasm_copy_handler(relocs, labels, tlb_handler, p, f); 727 final_len = p - tlb_handler; 728 } else { 729 u32 *split = tlb_handler + 30; 730 731 /* 732 * Find the split point. 733 */ 734 if (uasm_insn_has_bdelay(relocs, split - 1)) 735 split--; 736 737 /* Copy first part of the handler. */ 738 uasm_copy_handler(relocs, labels, tlb_handler, split, f); 739 f += split - tlb_handler; 740 741 /* Insert branch. */ 742 uasm_l_split(&l, final_handler); 743 uasm_il_b(&f, &r, label_split); 744 if (uasm_insn_has_bdelay(relocs, split)) 745 uasm_i_nop(&f); 746 else { 747 uasm_copy_handler(relocs, labels, split, split + 1, f); 748 uasm_move_labels(labels, f, f + 1, -1); 749 f++; 750 split++; 751 } 752 753 /* Copy the rest of the handler. */ 754 uasm_copy_handler(relocs, labels, split, p, final_handler); 755 final_len = (f - (final_handler + 32)) + (p - split); 756 } 757#endif /* CONFIG_64BIT */ 758 759 uasm_resolve_relocs(relocs, labels); 760 pr_debug("Wrote TLB refill handler (%u instructions).\n", 761 final_len); 762 763 memcpy((void *)ebase, final_handler, 0x100); 764 765 dump_handler((u32 *)ebase, 64); 766} 767 768/* 769 * TLB load/store/modify handlers. 770 * 771 * Only the fastpath gets synthesized at runtime, the slowpath for 772 * do_page_fault remains normal asm. 773 */ 774extern void tlb_do_page_fault_0(void); 775extern void tlb_do_page_fault_1(void); 776 777/* 778 * 128 instructions for the fastpath handler is generous and should 779 * never be exceeded. 780 */ 781#define FASTPATH_SIZE 128 782 783u32 handle_tlbl[FASTPATH_SIZE] __cacheline_aligned; 784u32 handle_tlbs[FASTPATH_SIZE] __cacheline_aligned; 785u32 handle_tlbm[FASTPATH_SIZE] __cacheline_aligned; 786 787static void __cpuinit 788iPTE_LW(u32 **p, struct uasm_label **l, unsigned int pte, unsigned int ptr) 789{ 790#ifdef CONFIG_SMP 791# ifdef CONFIG_64BIT_PHYS_ADDR 792 if (cpu_has_64bits) 793 uasm_i_lld(p, pte, 0, ptr); 794 else 795# endif 796 UASM_i_LL(p, pte, 0, ptr); 797#else 798# ifdef CONFIG_64BIT_PHYS_ADDR 799 if (cpu_has_64bits) 800 uasm_i_ld(p, pte, 0, ptr); 801 else 802# endif 803 UASM_i_LW(p, pte, 0, ptr); 804#endif 805} 806 807static void __cpuinit 808iPTE_SW(u32 **p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr, 809 unsigned int mode) 810{ 811#ifdef CONFIG_64BIT_PHYS_ADDR 812 unsigned int hwmode = mode & (_PAGE_VALID | _PAGE_DIRTY); 813#endif 814 815 uasm_i_ori(p, pte, pte, mode); 816#ifdef CONFIG_SMP 817# ifdef CONFIG_64BIT_PHYS_ADDR 818 if (cpu_has_64bits) 819 uasm_i_scd(p, pte, 0, ptr); 820 else 821# endif 822 UASM_i_SC(p, pte, 0, ptr); 823 824 if (r10000_llsc_war()) 825 uasm_il_beqzl(p, r, pte, label_smp_pgtable_change); 826 else 827 uasm_il_beqz(p, r, pte, label_smp_pgtable_change); 828 829# ifdef CONFIG_64BIT_PHYS_ADDR 830 if (!cpu_has_64bits) { 831 /* no uasm_i_nop needed */ 832 uasm_i_ll(p, pte, sizeof(pte_t) / 2, ptr); 833 uasm_i_ori(p, pte, pte, hwmode); 834 uasm_i_sc(p, pte, sizeof(pte_t) / 2, ptr); 835 uasm_il_beqz(p, r, pte, label_smp_pgtable_change); 836 /* no uasm_i_nop needed */ 837 uasm_i_lw(p, pte, 0, ptr); 838 } else 839 uasm_i_nop(p); 840# else 841 uasm_i_nop(p); 842# endif 843#else 844# ifdef CONFIG_64BIT_PHYS_ADDR 845 if (cpu_has_64bits) 846 uasm_i_sd(p, pte, 0, ptr); 847 else 848# endif 849 UASM_i_SW(p, pte, 0, ptr); 850 851# ifdef CONFIG_64BIT_PHYS_ADDR 852 if (!cpu_has_64bits) { 853 uasm_i_lw(p, pte, sizeof(pte_t) / 2, ptr); 854 uasm_i_ori(p, pte, pte, hwmode); 855 uasm_i_sw(p, pte, sizeof(pte_t) / 2, ptr); 856 uasm_i_lw(p, pte, 0, ptr); 857 } 858# endif 859#endif 860} 861 862/* 863 * Check if PTE is present, if not then jump to LABEL. PTR points to 864 * the page table where this PTE is located, PTE will be re-loaded 865 * with it's original value. 866 */ 867static void __cpuinit 868build_pte_present(u32 **p, struct uasm_label **l, struct uasm_reloc **r, 869 unsigned int pte, unsigned int ptr, enum label_id lid) 870{ 871 uasm_i_andi(p, pte, pte, _PAGE_PRESENT | _PAGE_READ); 872 uasm_i_xori(p, pte, pte, _PAGE_PRESENT | _PAGE_READ); 873 uasm_il_bnez(p, r, pte, lid); 874 iPTE_LW(p, l, pte, ptr); 875} 876 877/* Make PTE valid, store result in PTR. */ 878static void __cpuinit 879build_make_valid(u32 **p, struct uasm_reloc **r, unsigned int pte, 880 unsigned int ptr) 881{ 882 unsigned int mode = _PAGE_VALID | _PAGE_ACCESSED; 883 884 iPTE_SW(p, r, pte, ptr, mode); 885} 886 887/* 888 * Check if PTE can be written to, if not branch to LABEL. Regardless 889 * restore PTE with value from PTR when done. 890 */ 891static void __cpuinit 892build_pte_writable(u32 **p, struct uasm_label **l, struct uasm_reloc **r, 893 unsigned int pte, unsigned int ptr, enum label_id lid) 894{ 895 uasm_i_andi(p, pte, pte, _PAGE_PRESENT | _PAGE_WRITE); 896 uasm_i_xori(p, pte, pte, _PAGE_PRESENT | _PAGE_WRITE); 897 uasm_il_bnez(p, r, pte, lid); 898 iPTE_LW(p, l, pte, ptr); 899} 900 901/* Make PTE writable, update software status bits as well, then store 902 * at PTR. 903 */ 904static void __cpuinit 905build_make_write(u32 **p, struct uasm_reloc **r, unsigned int pte, 906 unsigned int ptr) 907{ 908 unsigned int mode = (_PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID 909 | _PAGE_DIRTY); 910 911 iPTE_SW(p, r, pte, ptr, mode); 912} 913 914/* 915 * Check if PTE can be modified, if not branch to LABEL. Regardless 916 * restore PTE with value from PTR when done. 917 */ 918static void __cpuinit 919build_pte_modifiable(u32 **p, struct uasm_label **l, struct uasm_reloc **r, 920 unsigned int pte, unsigned int ptr, enum label_id lid) 921{ 922 uasm_i_andi(p, pte, pte, _PAGE_WRITE); 923 uasm_il_beqz(p, r, pte, lid); 924 iPTE_LW(p, l, pte, ptr); 925} 926 927/* 928 * R3000 style TLB load/store/modify handlers. 929 */ 930 931/* 932 * This places the pte into ENTRYLO0 and writes it with tlbwi. 933 * Then it returns. 934 */ 935static void __cpuinit 936build_r3000_pte_reload_tlbwi(u32 **p, unsigned int pte, unsigned int tmp) 937{ 938 uasm_i_mtc0(p, pte, C0_ENTRYLO0); /* cp0 delay */ 939 uasm_i_mfc0(p, tmp, C0_EPC); /* cp0 delay */ 940 uasm_i_tlbwi(p); 941 uasm_i_jr(p, tmp); 942 uasm_i_rfe(p); /* branch delay */ 943} 944 945/* 946 * This places the pte into ENTRYLO0 and writes it with tlbwi 947 * or tlbwr as appropriate. This is because the index register 948 * may have the probe fail bit set as a result of a trap on a 949 * kseg2 access, i.e. without refill. Then it returns. 950 */ 951static void __cpuinit 952build_r3000_tlb_reload_write(u32 **p, struct uasm_label **l, 953 struct uasm_reloc **r, unsigned int pte, 954 unsigned int tmp) 955{ 956 uasm_i_mfc0(p, tmp, C0_INDEX); 957 uasm_i_mtc0(p, pte, C0_ENTRYLO0); /* cp0 delay */ 958 uasm_il_bltz(p, r, tmp, label_r3000_write_probe_fail); /* cp0 delay */ 959 uasm_i_mfc0(p, tmp, C0_EPC); /* branch delay */ 960 uasm_i_tlbwi(p); /* cp0 delay */ 961 uasm_i_jr(p, tmp); 962 uasm_i_rfe(p); /* branch delay */ 963 uasm_l_r3000_write_probe_fail(l, *p); 964 uasm_i_tlbwr(p); /* cp0 delay */ 965 uasm_i_jr(p, tmp); 966 uasm_i_rfe(p); /* branch delay */ 967} 968 969static void __cpuinit 970build_r3000_tlbchange_handler_head(u32 **p, unsigned int pte, 971 unsigned int ptr) 972{ 973 long pgdc = (long)pgd_current; 974 975 uasm_i_mfc0(p, pte, C0_BADVADDR); 976 uasm_i_lui(p, ptr, uasm_rel_hi(pgdc)); /* cp0 delay */ 977 uasm_i_lw(p, ptr, uasm_rel_lo(pgdc), ptr); 978 uasm_i_srl(p, pte, pte, 22); /* load delay */ 979 uasm_i_sll(p, pte, pte, 2); 980 uasm_i_addu(p, ptr, ptr, pte); 981 uasm_i_mfc0(p, pte, C0_CONTEXT); 982 uasm_i_lw(p, ptr, 0, ptr); /* cp0 delay */ 983 uasm_i_andi(p, pte, pte, 0xffc); /* load delay */ 984 uasm_i_addu(p, ptr, ptr, pte); 985 uasm_i_lw(p, pte, 0, ptr); 986 uasm_i_tlbp(p); /* load delay */ 987} 988 989static void __cpuinit build_r3000_tlb_load_handler(void) 990{ 991 u32 *p = handle_tlbl; 992 struct uasm_label *l = labels; 993 struct uasm_reloc *r = relocs; 994 995 memset(handle_tlbl, 0, sizeof(handle_tlbl)); 996 memset(labels, 0, sizeof(labels)); 997 memset(relocs, 0, sizeof(relocs)); 998 999 build_r3000_tlbchange_handler_head(&p, K0, K1); 1000 build_pte_present(&p, &l, &r, K0, K1, label_nopage_tlbl); 1001 uasm_i_nop(&p); /* load delay */ 1002 build_make_valid(&p, &r, K0, K1); 1003 build_r3000_tlb_reload_write(&p, &l, &r, K0, K1); 1004 1005 uasm_l_nopage_tlbl(&l, p); 1006 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff); 1007 uasm_i_nop(&p); 1008 1009 if ((p - handle_tlbl) > FASTPATH_SIZE) 1010 panic("TLB load handler fastpath space exceeded"); 1011 1012 uasm_resolve_relocs(relocs, labels); 1013 pr_debug("Wrote TLB load handler fastpath (%u instructions).\n", 1014 (unsigned int)(p - handle_tlbl)); 1015 1016 dump_handler(handle_tlbl, ARRAY_SIZE(handle_tlbl)); 1017} 1018 1019static void __cpuinit build_r3000_tlb_store_handler(void) 1020{ 1021 u32 *p = handle_tlbs; 1022 struct uasm_label *l = labels; 1023 struct uasm_reloc *r = relocs; 1024 1025 memset(handle_tlbs, 0, sizeof(handle_tlbs)); 1026 memset(labels, 0, sizeof(labels)); 1027 memset(relocs, 0, sizeof(relocs)); 1028 1029 build_r3000_tlbchange_handler_head(&p, K0, K1); 1030 build_pte_writable(&p, &l, &r, K0, K1, label_nopage_tlbs); 1031 uasm_i_nop(&p); /* load delay */ 1032 build_make_write(&p, &r, K0, K1); 1033 build_r3000_tlb_reload_write(&p, &l, &r, K0, K1); 1034 1035 uasm_l_nopage_tlbs(&l, p); 1036 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff); 1037 uasm_i_nop(&p); 1038 1039 if ((p - handle_tlbs) > FASTPATH_SIZE) 1040 panic("TLB store handler fastpath space exceeded"); 1041 1042 uasm_resolve_relocs(relocs, labels); 1043 pr_debug("Wrote TLB store handler fastpath (%u instructions).\n", 1044 (unsigned int)(p - handle_tlbs)); 1045 1046 dump_handler(handle_tlbs, ARRAY_SIZE(handle_tlbs)); 1047} 1048 1049static void __cpuinit build_r3000_tlb_modify_handler(void) 1050{ 1051 u32 *p = handle_tlbm; 1052 struct uasm_label *l = labels; 1053 struct uasm_reloc *r = relocs; 1054 1055 memset(handle_tlbm, 0, sizeof(handle_tlbm)); 1056 memset(labels, 0, sizeof(labels)); 1057 memset(relocs, 0, sizeof(relocs)); 1058 1059 build_r3000_tlbchange_handler_head(&p, K0, K1); 1060 build_pte_modifiable(&p, &l, &r, K0, K1, label_nopage_tlbm); 1061 uasm_i_nop(&p); /* load delay */ 1062 build_make_write(&p, &r, K0, K1); 1063 build_r3000_pte_reload_tlbwi(&p, K0, K1); 1064 1065 uasm_l_nopage_tlbm(&l, p); 1066 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff); 1067 uasm_i_nop(&p); 1068 1069 if ((p - handle_tlbm) > FASTPATH_SIZE) 1070 panic("TLB modify handler fastpath space exceeded"); 1071 1072 uasm_resolve_relocs(relocs, labels); 1073 pr_debug("Wrote TLB modify handler fastpath (%u instructions).\n", 1074 (unsigned int)(p - handle_tlbm)); 1075 1076 dump_handler(handle_tlbm, ARRAY_SIZE(handle_tlbm)); 1077} 1078 1079/* 1080 * R4000 style TLB load/store/modify handlers. 1081 */ 1082static void __cpuinit 1083build_r4000_tlbchange_handler_head(u32 **p, struct uasm_label **l, 1084 struct uasm_reloc **r, unsigned int pte, 1085 unsigned int ptr) 1086{ 1087#ifdef CONFIG_64BIT 1088 build_get_pmde64(p, l, r, pte, ptr); /* get pmd in ptr */ 1089#else 1090 build_get_pgde32(p, pte, ptr); /* get pgd in ptr */ 1091#endif 1092 1093 UASM_i_MFC0(p, pte, C0_BADVADDR); 1094 UASM_i_LW(p, ptr, 0, ptr); 1095 UASM_i_SRL(p, pte, pte, PAGE_SHIFT + PTE_ORDER - PTE_T_LOG2); 1096 uasm_i_andi(p, pte, pte, (PTRS_PER_PTE - 1) << PTE_T_LOG2); 1097 UASM_i_ADDU(p, ptr, ptr, pte); 1098 1099#ifdef CONFIG_SMP 1100 uasm_l_smp_pgtable_change(l, *p); 1101#endif 1102 iPTE_LW(p, l, pte, ptr); /* get even pte */ 1103 if (!m4kc_tlbp_war()) 1104 build_tlb_probe_entry(p); 1105} 1106 1107static void __cpuinit 1108build_r4000_tlbchange_handler_tail(u32 **p, struct uasm_label **l, 1109 struct uasm_reloc **r, unsigned int tmp, 1110 unsigned int ptr) 1111{ 1112 uasm_i_ori(p, ptr, ptr, sizeof(pte_t)); 1113 uasm_i_xori(p, ptr, ptr, sizeof(pte_t)); 1114 build_update_entries(p, tmp, ptr); 1115 build_tlb_write_entry(p, l, r, tlb_indexed); 1116 uasm_l_leave(l, *p); 1117 uasm_i_eret(p); /* return from trap */ 1118 1119#ifdef CONFIG_64BIT 1120 build_get_pgd_vmalloc64(p, l, r, tmp, ptr); 1121#endif 1122} 1123 1124static void __cpuinit build_r4000_tlb_load_handler(void) 1125{ 1126 u32 *p = handle_tlbl; 1127 struct uasm_label *l = labels; 1128 struct uasm_reloc *r = relocs; 1129 1130 memset(handle_tlbl, 0, sizeof(handle_tlbl)); 1131 memset(labels, 0, sizeof(labels)); 1132 memset(relocs, 0, sizeof(relocs)); 1133 1134 if (bcm1250_m3_war()) { 1135 UASM_i_MFC0(&p, K0, C0_BADVADDR); 1136 UASM_i_MFC0(&p, K1, C0_ENTRYHI); 1137 uasm_i_xor(&p, K0, K0, K1); 1138 UASM_i_SRL(&p, K0, K0, PAGE_SHIFT + 1); 1139 uasm_il_bnez(&p, &r, K0, label_leave); 1140 /* No need for uasm_i_nop */ 1141 } 1142 1143 build_r4000_tlbchange_handler_head(&p, &l, &r, K0, K1); 1144 build_pte_present(&p, &l, &r, K0, K1, label_nopage_tlbl); 1145 if (m4kc_tlbp_war()) 1146 build_tlb_probe_entry(&p); 1147 build_make_valid(&p, &r, K0, K1); 1148 build_r4000_tlbchange_handler_tail(&p, &l, &r, K0, K1); 1149 1150 uasm_l_nopage_tlbl(&l, p); 1151 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff); 1152 uasm_i_nop(&p); 1153 1154 if ((p - handle_tlbl) > FASTPATH_SIZE) 1155 panic("TLB load handler fastpath space exceeded"); 1156 1157 uasm_resolve_relocs(relocs, labels); 1158 pr_debug("Wrote TLB load handler fastpath (%u instructions).\n", 1159 (unsigned int)(p - handle_tlbl)); 1160 1161 dump_handler(handle_tlbl, ARRAY_SIZE(handle_tlbl)); 1162} 1163 1164static void __cpuinit build_r4000_tlb_store_handler(void) 1165{ 1166 u32 *p = handle_tlbs; 1167 struct uasm_label *l = labels; 1168 struct uasm_reloc *r = relocs; 1169 1170 memset(handle_tlbs, 0, sizeof(handle_tlbs)); 1171 memset(labels, 0, sizeof(labels)); 1172 memset(relocs, 0, sizeof(relocs)); 1173 1174 build_r4000_tlbchange_handler_head(&p, &l, &r, K0, K1); 1175 build_pte_writable(&p, &l, &r, K0, K1, label_nopage_tlbs); 1176 if (m4kc_tlbp_war()) 1177 build_tlb_probe_entry(&p); 1178 build_make_write(&p, &r, K0, K1); 1179 build_r4000_tlbchange_handler_tail(&p, &l, &r, K0, K1); 1180 1181 uasm_l_nopage_tlbs(&l, p); 1182 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff); 1183 uasm_i_nop(&p); 1184 1185 if ((p - handle_tlbs) > FASTPATH_SIZE) 1186 panic("TLB store handler fastpath space exceeded"); 1187 1188 uasm_resolve_relocs(relocs, labels); 1189 pr_debug("Wrote TLB store handler fastpath (%u instructions).\n", 1190 (unsigned int)(p - handle_tlbs)); 1191 1192 dump_handler(handle_tlbs, ARRAY_SIZE(handle_tlbs)); 1193} 1194 1195static void __cpuinit build_r4000_tlb_modify_handler(void) 1196{ 1197 u32 *p = handle_tlbm; 1198 struct uasm_label *l = labels; 1199 struct uasm_reloc *r = relocs; 1200 1201 memset(handle_tlbm, 0, sizeof(handle_tlbm)); 1202 memset(labels, 0, sizeof(labels)); 1203 memset(relocs, 0, sizeof(relocs)); 1204 1205 build_r4000_tlbchange_handler_head(&p, &l, &r, K0, K1); 1206 build_pte_modifiable(&p, &l, &r, K0, K1, label_nopage_tlbm); 1207 if (m4kc_tlbp_war()) 1208 build_tlb_probe_entry(&p); 1209 /* Present and writable bits set, set accessed and dirty bits. */ 1210 build_make_write(&p, &r, K0, K1); 1211 build_r4000_tlbchange_handler_tail(&p, &l, &r, K0, K1); 1212 1213 uasm_l_nopage_tlbm(&l, p); 1214 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff); 1215 uasm_i_nop(&p); 1216 1217 if ((p - handle_tlbm) > FASTPATH_SIZE) 1218 panic("TLB modify handler fastpath space exceeded"); 1219 1220 uasm_resolve_relocs(relocs, labels); 1221 pr_debug("Wrote TLB modify handler fastpath (%u instructions).\n", 1222 (unsigned int)(p - handle_tlbm)); 1223 1224 dump_handler(handle_tlbm, ARRAY_SIZE(handle_tlbm)); 1225} 1226 1227void __cpuinit build_tlb_refill_handler(void) 1228{ 1229 /* 1230 * The refill handler is generated per-CPU, multi-node systems 1231 * may have local storage for it. The other handlers are only 1232 * needed once. 1233 */ 1234 static int run_once = 0; 1235 1236 switch (current_cpu_type()) { 1237 case CPU_R2000: 1238 case CPU_R3000: 1239 case CPU_R3000A: 1240 case CPU_R3081E: 1241 case CPU_TX3912: 1242 case CPU_TX3922: 1243 case CPU_TX3927: 1244 build_r3000_tlb_refill_handler(); 1245 if (!run_once) { 1246 build_r3000_tlb_load_handler(); 1247 build_r3000_tlb_store_handler(); 1248 build_r3000_tlb_modify_handler(); 1249 run_once++; 1250 } 1251 break; 1252 1253 case CPU_R6000: 1254 case CPU_R6000A: 1255 panic("No R6000 TLB refill handler yet"); 1256 break; 1257 1258 case CPU_R8000: 1259 panic("No R8000 TLB refill handler yet"); 1260 break; 1261 1262 default: 1263 build_r4000_tlb_refill_handler(); 1264 if (!run_once) { 1265 build_r4000_tlb_load_handler(); 1266 build_r4000_tlb_store_handler(); 1267 build_r4000_tlb_modify_handler(); 1268 run_once++; 1269 } 1270 } 1271} 1272 1273void __cpuinit flush_tlb_handlers(void) 1274{ 1275 flush_icache_range((unsigned long)handle_tlbl, 1276 (unsigned long)handle_tlbl + sizeof(handle_tlbl)); 1277 flush_icache_range((unsigned long)handle_tlbs, 1278 (unsigned long)handle_tlbs + sizeof(handle_tlbs)); 1279 flush_icache_range((unsigned long)handle_tlbm, 1280 (unsigned long)handle_tlbm + sizeof(handle_tlbm)); 1281}