Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

powerpc/e6500: TLB miss handler with hardware tablewalk support

There are a few things that make the existing hw tablewalk handlers
unsuitable for e6500:

- Indirect entries go in TLB1 (though the resulting direct entries go in
TLB0).

- It has threads, but no "tlbsrx." -- so we need a spinlock and
a normal "tlbsx". Because we need this lock, hardware tablewalk
is mandatory on e6500 unless we want to add spinlock+tlbsx to
the normal bolted TLB miss handler.

- TLB1 has no HES (nor next-victim hint) so we need software round robin
(TODO: integrate this round robin data with hugetlb/KVM)

- The existing tablewalk handlers map half of a page table at a time,
because IBM hardware has a fixed 1MiB indirect page size. e6500
has variable size indirect entries, with a minimum of 2MiB.
So we can't do the half-page indirect mapping, and even if we
could it would be less efficient than mapping the full page.

- Like on e5500, the linear mapping is bolted, so we don't need the
overhead of supporting nested tlb misses.

Note that hardware tablewalk does not work in rev1 of e6500.
We do not expect to support e6500 rev1 in mainline Linux.

Signed-off-by: Scott Wood <scottwood@freescale.com>
Cc: Mihai Caraman <mihai.caraman@freescale.com>

+325 -35
+13
arch/powerpc/include/asm/mmu-book3e.h
··· 286 286 extern int mmu_linear_psize; 287 287 extern int mmu_vmemmap_psize; 288 288 289 + struct tlb_core_data { 290 + /* For software way selection, as on Freescale TLB1 */ 291 + u8 esel_next, esel_max, esel_first; 292 + 293 + /* Per-core spinlock for e6500 TLB handlers (no tlbsrx.) */ 294 + u8 lock; 295 + }; 296 + 289 297 #ifdef CONFIG_PPC64 290 298 extern unsigned long linear_map_top; 299 + extern int book3e_htw_mode; 300 + 301 + #define PPC_HTW_NONE 0 302 + #define PPC_HTW_IBM 1 303 + #define PPC_HTW_E6500 2 291 304 292 305 /* 293 306 * 64-bit booke platforms don't load the tlb in the tlb miss handler code.
+10 -9
arch/powerpc/include/asm/mmu.h
··· 180 180 #define MMU_PAGE_64K_AP 3 /* "Admixed pages" (hash64 only) */ 181 181 #define MMU_PAGE_256K 4 182 182 #define MMU_PAGE_1M 5 183 - #define MMU_PAGE_4M 6 184 - #define MMU_PAGE_8M 7 185 - #define MMU_PAGE_16M 8 186 - #define MMU_PAGE_64M 9 187 - #define MMU_PAGE_256M 10 188 - #define MMU_PAGE_1G 11 189 - #define MMU_PAGE_16G 12 190 - #define MMU_PAGE_64G 13 183 + #define MMU_PAGE_2M 6 184 + #define MMU_PAGE_4M 7 185 + #define MMU_PAGE_8M 8 186 + #define MMU_PAGE_16M 9 187 + #define MMU_PAGE_64M 10 188 + #define MMU_PAGE_256M 11 189 + #define MMU_PAGE_1G 12 190 + #define MMU_PAGE_16G 13 191 + #define MMU_PAGE_64G 14 191 192 192 - #define MMU_PAGE_COUNT 14 193 + #define MMU_PAGE_COUNT 15 193 194 194 195 #if defined(CONFIG_PPC_STD_MMU_64) 195 196 /* 64-bit classic hash table MMU */
+6
arch/powerpc/include/asm/paca.h
··· 113 113 /* Keep pgd in the same cacheline as the start of extlb */ 114 114 pgd_t *pgd __attribute__((aligned(0x80))); /* Current PGD */ 115 115 pgd_t *kernel_pgd; /* Kernel PGD */ 116 + 117 + /* Shared by all threads of a core -- points to tcd of first thread */ 118 + struct tlb_core_data *tcd_ptr; 119 + 116 120 /* We can have up to 3 levels of reentrancy in the TLB miss handler */ 117 121 u64 extlb[3][EX_TLB_SIZE / sizeof(u64)]; 118 122 u64 exmc[8]; /* used for machine checks */ ··· 127 123 void *mc_kstack; 128 124 void *crit_kstack; 129 125 void *dbg_kstack; 126 + 127 + struct tlb_core_data tcd; 130 128 #endif /* CONFIG_PPC_BOOK3E */ 131 129 132 130 mm_context_t context;
+9
arch/powerpc/kernel/asm-offsets.c
··· 203 203 DEFINE(PACA_MC_STACK, offsetof(struct paca_struct, mc_kstack)); 204 204 DEFINE(PACA_CRIT_STACK, offsetof(struct paca_struct, crit_kstack)); 205 205 DEFINE(PACA_DBG_STACK, offsetof(struct paca_struct, dbg_kstack)); 206 + DEFINE(PACA_TCD_PTR, offsetof(struct paca_struct, tcd_ptr)); 207 + 208 + DEFINE(TCD_ESEL_NEXT, 209 + offsetof(struct tlb_core_data, esel_next)); 210 + DEFINE(TCD_ESEL_MAX, 211 + offsetof(struct tlb_core_data, esel_max)); 212 + DEFINE(TCD_ESEL_FIRST, 213 + offsetof(struct tlb_core_data, esel_first)); 214 + DEFINE(TCD_LOCK, offsetof(struct tlb_core_data, lock)); 206 215 #endif /* CONFIG_PPC_BOOK3E */ 207 216 208 217 #ifdef CONFIG_PPC_STD_MMU_64
+5
arch/powerpc/kernel/paca.c
··· 160 160 #ifdef CONFIG_PPC_STD_MMU_64 161 161 new_paca->slb_shadow_ptr = init_slb_shadow(cpu); 162 162 #endif /* CONFIG_PPC_STD_MMU_64 */ 163 + 164 + #ifdef CONFIG_PPC_BOOK3E 165 + /* For now -- if we have threads this will be adjusted later */ 166 + new_paca->tcd_ptr = &new_paca->tcd; 167 + #endif 163 168 } 164 169 165 170 /* Put the paca pointer into r13 and SPRG_PACA */
+31
arch/powerpc/kernel/setup_64.c
··· 97 97 int icache_bsize; 98 98 int ucache_bsize; 99 99 100 + #if defined(CONFIG_PPC_BOOK3E) && defined(CONFIG_SMP) 101 + static void setup_tlb_core_data(void) 102 + { 103 + int cpu; 104 + 105 + for_each_possible_cpu(cpu) { 106 + int first = cpu_first_thread_sibling(cpu); 107 + 108 + paca[cpu].tcd_ptr = &paca[first].tcd; 109 + 110 + /* 111 + * If we have threads, we need either tlbsrx. 112 + * or e6500 tablewalk mode, or else TLB handlers 113 + * will be racy and could produce duplicate entries. 114 + */ 115 + if (smt_enabled_at_boot >= 2 && 116 + !mmu_has_feature(MMU_FTR_USE_TLBRSRV) && 117 + book3e_htw_mode != PPC_HTW_E6500) { 118 + /* Should we panic instead? */ 119 + WARN_ONCE("%s: unsupported MMU configuration -- expect problems\n", 120 + __func__); 121 + } 122 + } 123 + } 124 + #else 125 + static void setup_tlb_core_data(void) 126 + { 127 + } 128 + #endif 129 + 100 130 #ifdef CONFIG_SMP 101 131 102 132 static char *smt_enabled_cmdline; ··· 475 445 476 446 smp_setup_cpu_maps(); 477 447 check_smt_enabled(); 448 + setup_tlb_core_data(); 478 449 479 450 #ifdef CONFIG_SMP 480 451 /* Release secondary cpus out of their spinloops at 0x60 now that
+7
arch/powerpc/mm/fsl_booke_mmu.c
··· 52 52 #include <asm/smp.h> 53 53 #include <asm/machdep.h> 54 54 #include <asm/setup.h> 55 + #include <asm/paca.h> 55 56 56 57 #include "mmu_decl.h" 57 58 ··· 191 190 phys += cam_sz; 192 191 } 193 192 tlbcam_index = i; 193 + 194 + #ifdef CONFIG_PPC64 195 + get_paca()->tcd.esel_next = i; 196 + get_paca()->tcd.esel_max = mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY; 197 + get_paca()->tcd.esel_first = i; 198 + #endif 194 199 195 200 return amount_mapped; 196 201 }
+6
arch/powerpc/mm/mem.c
··· 307 307 308 308 void __init mem_init(void) 309 309 { 310 + /* 311 + * book3s is limited to 16 page sizes due to encoding this in 312 + * a 4-bit field for slices. 313 + */ 314 + BUILD_BUG_ON(MMU_PAGE_COUNT > 16); 315 + 310 316 #ifdef CONFIG_SWIOTLB 311 317 swiotlb_init(0); 312 318 #endif
+171
arch/powerpc/mm/tlb_low_64e.S
··· 239 239 beq tlb_miss_common_bolted 240 240 b itlb_miss_kernel_bolted 241 241 242 + /* 243 + * TLB miss handling for e6500 and derivatives, using hardware tablewalk. 244 + * 245 + * Linear mapping is bolted: no virtual page table or nested TLB misses 246 + * Indirect entries in TLB1, hardware loads resulting direct entries 247 + * into TLB0 248 + * No HES or NV hint on TLB1, so we need to do software round-robin 249 + * No tlbsrx. so we need a spinlock, and we have to deal 250 + * with MAS-damage caused by tlbsx 251 + * 4K pages only 252 + */ 253 + 254 + START_EXCEPTION(instruction_tlb_miss_e6500) 255 + tlb_prolog_bolted BOOKE_INTERRUPT_ITLB_MISS SPRN_SRR0 256 + 257 + ld r11,PACA_TCD_PTR(r13) 258 + srdi. r15,r16,60 /* get region */ 259 + ori r16,r16,1 260 + 261 + TLB_MISS_STATS_SAVE_INFO_BOLTED 262 + bne tlb_miss_kernel_e6500 /* user/kernel test */ 263 + 264 + b tlb_miss_common_e6500 265 + 266 + START_EXCEPTION(data_tlb_miss_e6500) 267 + tlb_prolog_bolted BOOKE_INTERRUPT_DTLB_MISS SPRN_DEAR 268 + 269 + ld r11,PACA_TCD_PTR(r13) 270 + srdi. r15,r16,60 /* get region */ 271 + rldicr r16,r16,0,62 272 + 273 + TLB_MISS_STATS_SAVE_INFO_BOLTED 274 + bne tlb_miss_kernel_e6500 /* user vs kernel check */ 275 + 276 + /* 277 + * This is the guts of the TLB miss handler for e6500 and derivatives. 278 + * We are entered with: 279 + * 280 + * r16 = page of faulting address (low bit 0 if data, 1 if instruction) 281 + * r15 = crap (free to use) 282 + * r14 = page table base 283 + * r13 = PACA 284 + * r11 = tlb_per_core ptr 285 + * r10 = crap (free to use) 286 + */ 287 + tlb_miss_common_e6500: 288 + /* 289 + * Search if we already have an indirect entry for that virtual 290 + * address, and if we do, bail out. 291 + * 292 + * MAS6:IND should be already set based on MAS4 293 + */ 294 + addi r10,r11,TCD_LOCK 295 + 1: lbarx r15,0,r10 296 + cmpdi r15,0 297 + bne 2f 298 + li r15,1 299 + stbcx. r15,0,r10 300 + bne 1b 301 + .subsection 1 302 + 2: lbz r15,0(r10) 303 + cmpdi r15,0 304 + bne 2b 305 + b 1b 306 + .previous 307 + 308 + mfspr r15,SPRN_MAS2 309 + 310 + tlbsx 0,r16 311 + mfspr r10,SPRN_MAS1 312 + andis. r10,r10,MAS1_VALID@h 313 + bne tlb_miss_done_e6500 314 + 315 + /* Undo MAS-damage from the tlbsx */ 316 + mfspr r10,SPRN_MAS1 317 + oris r10,r10,MAS1_VALID@h 318 + mtspr SPRN_MAS1,r10 319 + mtspr SPRN_MAS2,r15 320 + 321 + /* Now, we need to walk the page tables. First check if we are in 322 + * range. 323 + */ 324 + rldicl. r10,r16,64-PGTABLE_EADDR_SIZE,PGTABLE_EADDR_SIZE+4 325 + bne- tlb_miss_fault_e6500 326 + 327 + rldicl r15,r16,64-PGDIR_SHIFT+3,64-PGD_INDEX_SIZE-3 328 + cmpldi cr0,r14,0 329 + clrrdi r15,r15,3 330 + beq- tlb_miss_fault_e6500 /* No PGDIR, bail */ 331 + ldx r14,r14,r15 /* grab pgd entry */ 332 + 333 + rldicl r15,r16,64-PUD_SHIFT+3,64-PUD_INDEX_SIZE-3 334 + clrrdi r15,r15,3 335 + cmpdi cr0,r14,0 336 + bge tlb_miss_fault_e6500 /* Bad pgd entry or hugepage; bail */ 337 + ldx r14,r14,r15 /* grab pud entry */ 338 + 339 + rldicl r15,r16,64-PMD_SHIFT+3,64-PMD_INDEX_SIZE-3 340 + clrrdi r15,r15,3 341 + cmpdi cr0,r14,0 342 + bge tlb_miss_fault_e6500 343 + ldx r14,r14,r15 /* Grab pmd entry */ 344 + 345 + mfspr r10,SPRN_MAS0 346 + cmpdi cr0,r14,0 347 + bge tlb_miss_fault_e6500 348 + 349 + /* Now we build the MAS for a 2M indirect page: 350 + * 351 + * MAS 0 : ESEL needs to be filled by software round-robin 352 + * MAS 1 : Fully set up 353 + * - PID already updated by caller if necessary 354 + * - TSIZE for now is base ind page size always 355 + * - TID already cleared if necessary 356 + * MAS 2 : Default not 2M-aligned, need to be redone 357 + * MAS 3+7 : Needs to be done 358 + */ 359 + 360 + ori r14,r14,(BOOK3E_PAGESZ_4K << MAS3_SPSIZE_SHIFT) 361 + mtspr SPRN_MAS7_MAS3,r14 362 + 363 + clrrdi r15,r16,21 /* make EA 2M-aligned */ 364 + mtspr SPRN_MAS2,r15 365 + 366 + lbz r15,TCD_ESEL_NEXT(r11) 367 + lbz r16,TCD_ESEL_MAX(r11) 368 + lbz r14,TCD_ESEL_FIRST(r11) 369 + rlwimi r10,r15,16,0x00ff0000 /* insert esel_next into MAS0 */ 370 + addi r15,r15,1 /* increment esel_next */ 371 + mtspr SPRN_MAS0,r10 372 + cmpw r15,r16 373 + iseleq r15,r14,r15 /* if next == last use first */ 374 + stb r15,TCD_ESEL_NEXT(r11) 375 + 376 + tlbwe 377 + 378 + tlb_miss_done_e6500: 379 + .macro tlb_unlock_e6500 380 + li r15,0 381 + isync 382 + stb r15,TCD_LOCK(r11) 383 + .endm 384 + 385 + tlb_unlock_e6500 386 + TLB_MISS_STATS_X(MMSTAT_TLB_MISS_NORM_OK) 387 + tlb_epilog_bolted 388 + rfi 389 + 390 + tlb_miss_kernel_e6500: 391 + mfspr r10,SPRN_MAS1 392 + ld r14,PACA_KERNELPGD(r13) 393 + cmpldi cr0,r15,8 /* Check for vmalloc region */ 394 + rlwinm r10,r10,0,16,1 /* Clear TID */ 395 + mtspr SPRN_MAS1,r10 396 + beq+ tlb_miss_common_e6500 397 + 398 + tlb_miss_fault_e6500: 399 + tlb_unlock_e6500 400 + /* We need to check if it was an instruction miss */ 401 + andi. r16,r16,1 402 + bne itlb_miss_fault_e6500 403 + dtlb_miss_fault_e6500: 404 + TLB_MISS_STATS_D(MMSTAT_TLB_MISS_NORM_FAULT) 405 + tlb_epilog_bolted 406 + b exc_data_storage_book3e 407 + itlb_miss_fault_e6500: 408 + TLB_MISS_STATS_I(MMSTAT_TLB_MISS_NORM_FAULT) 409 + tlb_epilog_bolted 410 + b exc_instruction_storage_book3e 411 + 412 + 242 413 /********************************************************************** 243 414 * * 244 415 * TLB miss handling for Book3E with TLB reservation and HES support *
+67 -26
arch/powerpc/mm/tlb_nohash.c
··· 43 43 #include <asm/tlb.h> 44 44 #include <asm/code-patching.h> 45 45 #include <asm/hugetlb.h> 46 + #include <asm/paca.h> 46 47 47 48 #include "mmu_decl.h" 48 49 ··· 58 57 [MMU_PAGE_4K] = { 59 58 .shift = 12, 60 59 .enc = BOOK3E_PAGESZ_4K, 60 + }, 61 + [MMU_PAGE_2M] = { 62 + .shift = 21, 63 + .enc = BOOK3E_PAGESZ_2M, 61 64 }, 62 65 [MMU_PAGE_4M] = { 63 66 .shift = 22, ··· 141 136 int mmu_linear_psize; /* Page size used for the linear mapping */ 142 137 int mmu_pte_psize; /* Page size used for PTE pages */ 143 138 int mmu_vmemmap_psize; /* Page size used for the virtual mem map */ 144 - int book3e_htw_enabled; /* Is HW tablewalk enabled ? */ 139 + int book3e_htw_mode; /* HW tablewalk? Value is PPC_HTW_* */ 145 140 unsigned long linear_map_top; /* Top of linear mapping */ 146 141 147 142 #endif /* CONFIG_PPC64 */ ··· 382 377 { 383 378 int tsize = mmu_psize_defs[mmu_pte_psize].enc; 384 379 385 - if (book3e_htw_enabled) { 380 + if (book3e_htw_mode != PPC_HTW_NONE) { 386 381 unsigned long start = address & PMD_MASK; 387 382 unsigned long end = address + PMD_SIZE; 388 383 unsigned long size = 1UL << mmu_psize_defs[mmu_pte_psize].shift; ··· 435 430 def = &mmu_psize_defs[psize]; 436 431 shift = def->shift; 437 432 438 - if (shift == 0) 433 + if (shift == 0 || shift & 1) 439 434 continue; 440 435 441 436 /* adjust to be in terms of 4^shift Kb */ ··· 445 440 def->flags |= MMU_PAGE_SIZE_DIRECT; 446 441 } 447 442 448 - goto no_indirect; 443 + goto out; 449 444 } 450 445 451 446 if (fsl_mmu && (mmucfg & MMUCFG_MAVN) == MMUCFG_MAVN_V2) { 452 - u32 tlb1ps = mfspr(SPRN_TLB1PS); 447 + u32 tlb1cfg, tlb1ps; 448 + 449 + tlb0cfg = mfspr(SPRN_TLB0CFG); 450 + tlb1cfg = mfspr(SPRN_TLB1CFG); 451 + tlb1ps = mfspr(SPRN_TLB1PS); 452 + eptcfg = mfspr(SPRN_EPTCFG); 453 + 454 + if ((tlb1cfg & TLBnCFG_IND) && (tlb0cfg & TLBnCFG_PT)) 455 + book3e_htw_mode = PPC_HTW_E6500; 456 + 457 + /* 458 + * We expect 4K subpage size and unrestricted indirect size. 459 + * The lack of a restriction on indirect size is a Freescale 460 + * extension, indicated by PSn = 0 but SPSn != 0. 461 + */ 462 + if (eptcfg != 2) 463 + book3e_htw_mode = PPC_HTW_NONE; 453 464 454 465 for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) { 455 466 struct mmu_psize_def *def = &mmu_psize_defs[psize]; 456 467 457 468 if (tlb1ps & (1U << (def->shift - 10))) { 458 469 def->flags |= MMU_PAGE_SIZE_DIRECT; 470 + 471 + if (book3e_htw_mode && psize == MMU_PAGE_2M) 472 + def->flags |= MMU_PAGE_SIZE_INDIRECT; 459 473 } 460 474 } 461 475 462 - goto no_indirect; 476 + goto out; 463 477 } 464 478 #endif 465 479 ··· 495 471 } 496 472 497 473 /* Indirect page sizes supported ? */ 498 - if ((tlb0cfg & TLBnCFG_IND) == 0) 499 - goto no_indirect; 474 + if ((tlb0cfg & TLBnCFG_IND) == 0 || 475 + (tlb0cfg & TLBnCFG_PT) == 0) 476 + goto out; 477 + 478 + book3e_htw_mode = PPC_HTW_IBM; 500 479 501 480 /* Now, we only deal with one IND page size for each 502 481 * direct size. Hopefully all implementations today are ··· 524 497 def->ind = ps + 10; 525 498 } 526 499 } 527 - no_indirect: 528 500 501 + out: 529 502 /* Cleanup array and print summary */ 530 503 pr_info("MMU: Supported page sizes\n"); 531 504 for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) { ··· 547 520 548 521 static void setup_mmu_htw(void) 549 522 { 550 - /* Check if HW tablewalk is present, and if yes, enable it by: 551 - * 552 - * - patching the TLB miss handlers to branch to the 553 - * one dedicates to it 554 - * 555 - * - setting the global book3e_htw_enabled 556 - */ 557 - unsigned int tlb0cfg = mfspr(SPRN_TLB0CFG); 523 + /* 524 + * If we want to use HW tablewalk, enable it by patching the TLB miss 525 + * handlers to branch to the one dedicated to it. 526 + */ 558 527 559 - if ((tlb0cfg & TLBnCFG_IND) && 560 - (tlb0cfg & TLBnCFG_PT)) { 528 + switch (book3e_htw_mode) { 529 + case PPC_HTW_IBM: 561 530 patch_exception(0x1c0, exc_data_tlb_miss_htw_book3e); 562 531 patch_exception(0x1e0, exc_instruction_tlb_miss_htw_book3e); 563 - book3e_htw_enabled = 1; 532 + break; 533 + case PPC_HTW_E6500: 534 + patch_exception(0x1c0, exc_data_tlb_miss_e6500_book3e); 535 + patch_exception(0x1e0, exc_instruction_tlb_miss_e6500_book3e); 536 + break; 564 537 } 565 538 pr_info("MMU: Book3E HW tablewalk %s\n", 566 - book3e_htw_enabled ? "enabled" : "not supported"); 539 + book3e_htw_mode != PPC_HTW_NONE ? "enabled" : "not supported"); 567 540 } 568 541 569 542 /* ··· 603 576 /* Set MAS4 based on page table setting */ 604 577 605 578 mas4 = 0x4 << MAS4_WIMGED_SHIFT; 606 - if (book3e_htw_enabled) { 607 - mas4 |= mas4 | MAS4_INDD; 579 + switch (book3e_htw_mode) { 580 + case PPC_HTW_E6500: 581 + mas4 |= MAS4_INDD; 582 + mas4 |= BOOK3E_PAGESZ_2M << MAS4_TSIZED_SHIFT; 583 + mas4 |= MAS4_TLBSELD(1); 584 + mmu_pte_psize = MMU_PAGE_2M; 585 + break; 586 + 587 + case PPC_HTW_IBM: 588 + mas4 |= MAS4_INDD; 608 589 #ifdef CONFIG_PPC_64K_PAGES 609 590 mas4 |= BOOK3E_PAGESZ_256M << MAS4_TSIZED_SHIFT; 610 591 mmu_pte_psize = MMU_PAGE_256M; ··· 620 585 mas4 |= BOOK3E_PAGESZ_1M << MAS4_TSIZED_SHIFT; 621 586 mmu_pte_psize = MMU_PAGE_1M; 622 587 #endif 623 - } else { 588 + break; 589 + 590 + case PPC_HTW_NONE: 624 591 #ifdef CONFIG_PPC_64K_PAGES 625 592 mas4 |= BOOK3E_PAGESZ_64K << MAS4_TSIZED_SHIFT; 626 593 #else 627 594 mas4 |= BOOK3E_PAGESZ_4K << MAS4_TSIZED_SHIFT; 628 595 #endif 629 596 mmu_pte_psize = mmu_virtual_psize; 597 + break; 630 598 } 631 599 mtspr(SPRN_MAS4, mas4); 632 600 ··· 649 611 /* limit memory so we dont have linear faults */ 650 612 memblock_enforce_memory_limit(linear_map_top); 651 613 652 - patch_exception(0x1c0, exc_data_tlb_miss_bolted_book3e); 653 - patch_exception(0x1e0, exc_instruction_tlb_miss_bolted_book3e); 614 + if (book3e_htw_mode == PPC_HTW_NONE) { 615 + patch_exception(0x1c0, exc_data_tlb_miss_bolted_book3e); 616 + patch_exception(0x1e0, 617 + exc_instruction_tlb_miss_bolted_book3e); 618 + } 654 619 } 655 620 #endif 656 621