Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

powerpc: Add TLB management code for 64-bit Book3E

This adds the TLB miss handler assembly, the low level TLB flush routines
along with the necessary hook for dealing with our virtual page tables
or indirect TLB entries that need to be flushes when PTE pages are freed.

There is currently no support for hugetlbfs

Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>

+1055 -5
+3
arch/powerpc/include/asm/mmu-40x.h
··· 61 61 62 62 #endif /* !__ASSEMBLY__ */ 63 63 64 + #define mmu_virtual_psize MMU_PAGE_4K 65 + #define mmu_linear_psize MMU_PAGE_256M 66 + 64 67 #endif /* _ASM_POWERPC_MMU_40X_H_ */
+6
arch/powerpc/include/asm/mmu-44x.h
··· 79 79 80 80 #if (PAGE_SHIFT == 12) 81 81 #define PPC44x_TLBE_SIZE PPC44x_TLB_4K 82 + #define mmu_virtual_psize MMU_PAGE_4K 82 83 #elif (PAGE_SHIFT == 14) 83 84 #define PPC44x_TLBE_SIZE PPC44x_TLB_16K 85 + #define mmu_virtual_psize MMU_PAGE_16K 84 86 #elif (PAGE_SHIFT == 16) 85 87 #define PPC44x_TLBE_SIZE PPC44x_TLB_64K 88 + #define mmu_virtual_psize MMU_PAGE_64K 86 89 #elif (PAGE_SHIFT == 18) 87 90 #define PPC44x_TLBE_SIZE PPC44x_TLB_256K 91 + #define mmu_virtual_psize MMU_PAGE_256K 88 92 #else 89 93 #error "Unsupported PAGE_SIZE" 90 94 #endif 95 + 96 + #define mmu_linear_psize MMU_PAGE_256M 91 97 92 98 #define PPC44x_PGD_OFF_SHIFT (32 - PGDIR_SHIFT + PGD_T_LOG2) 93 99 #define PPC44x_PGD_OFF_MASK_BIT (PGDIR_SHIFT - PGD_T_LOG2)
+3
arch/powerpc/include/asm/mmu-8xx.h
··· 143 143 } mm_context_t; 144 144 #endif /* !__ASSEMBLY__ */ 145 145 146 + #define mmu_virtual_psize MMU_PAGE_4K 147 + #define mmu_linear_psize MMU_PAGE_8M 148 + 146 149 #endif /* _ASM_POWERPC_MMU_8XX_H_ */
+6
arch/powerpc/include/asm/mmu-hash32.h
··· 80 80 81 81 #endif /* !__ASSEMBLY__ */ 82 82 83 + /* We happily ignore the smaller BATs on 601, we don't actually use 84 + * those definitions on hash32 at the moment anyway 85 + */ 86 + #define mmu_virtual_psize MMU_PAGE_4K 87 + #define mmu_linear_psize MMU_PAGE_256M 88 + 83 89 #endif /* _ASM_POWERPC_MMU_HASH32_H_ */
+8
arch/powerpc/include/asm/mmu_context.h
··· 43 43 tsk->thread.pgdir = next->pgd; 44 44 #endif /* CONFIG_PPC32 */ 45 45 46 + /* 64-bit Book3E keeps track of current PGD in the PACA */ 47 + #ifdef CONFIG_PPC_BOOK3E_64 48 + get_paca()->pgd = next->pgd; 49 + #endif 46 50 /* Nothing else to do if we aren't actually switching */ 47 51 if (prev == next) 48 52 return; ··· 93 89 static inline void enter_lazy_tlb(struct mm_struct *mm, 94 90 struct task_struct *tsk) 95 91 { 92 + /* 64-bit Book3E keeps track of current PGD in the PACA */ 93 + #ifdef CONFIG_PPC_BOOK3E_64 94 + get_paca()->pgd = NULL; 95 + #endif 96 96 } 97 97 98 98 #endif /* __KERNEL__ */
+4
arch/powerpc/kernel/setup_64.c
··· 62 62 #include <asm/udbg.h> 63 63 #include <asm/kexec.h> 64 64 #include <asm/swiotlb.h> 65 + #include <asm/mmu_context.h> 65 66 66 67 #include "setup.h" 67 68 ··· 148 147 { 149 148 local_paca = &paca[cpu]; 150 149 mtspr(SPRN_SPRG_PACA, local_paca); 150 + #ifdef CONFIG_PPC_BOOK3E 151 + mtspr(SPRN_SPRG_TLB_EXFRAME, local_paca->extlb); 152 + #endif 151 153 } 152 154 153 155 /*
+13 -1
arch/powerpc/mm/mmu_decl.h
··· 41 41 #else /* CONFIG_40x || CONFIG_8xx */ 42 42 extern void _tlbil_all(void); 43 43 extern void _tlbil_pid(unsigned int pid); 44 + #ifdef CONFIG_PPC_BOOK3E 45 + extern void _tlbil_pid_noind(unsigned int pid); 46 + #else 44 47 #define _tlbil_pid_noind(pid) _tlbil_pid(pid) 48 + #endif 45 49 #endif /* !(CONFIG_40x || CONFIG_8xx) */ 46 50 47 51 /* ··· 57 53 { 58 54 asm volatile ("tlbie %0; sync" : : "r" (address) : "memory"); 59 55 } 60 - #else /* CONFIG_8xx */ 56 + #elif defined(CONFIG_PPC_BOOK3E) 57 + extern void _tlbil_va(unsigned long address, unsigned int pid, 58 + unsigned int tsize, unsigned int ind); 59 + #else 61 60 extern void __tlbil_va(unsigned long address, unsigned int pid); 62 61 static inline void _tlbil_va(unsigned long address, unsigned int pid, 63 62 unsigned int tsize, unsigned int ind) ··· 74 67 * implementation. When that becomes the case, this will be 75 68 * an extern. 76 69 */ 70 + #ifdef CONFIG_PPC_BOOK3E 71 + extern void _tlbivax_bcast(unsigned long address, unsigned int pid, 72 + unsigned int tsize, unsigned int ind); 73 + #else 77 74 static inline void _tlbivax_bcast(unsigned long address, unsigned int pid, 78 75 unsigned int tsize, unsigned int ind) 79 76 { 80 77 BUG(); 81 78 } 79 + #endif 82 80 83 81 #else /* CONFIG_PPC_MMU_NOHASH */ 84 82
+734
arch/powerpc/mm/tlb_low_64e.S
··· 1 + /* 2 + * Low leve TLB miss handlers for Book3E 3 + * 4 + * Copyright (C) 2008-2009 5 + * Ben. Herrenschmidt (benh@kernel.crashing.org), IBM Corp. 6 + * 7 + * This program is free software; you can redistribute it and/or 8 + * modify it under the terms of the GNU General Public License 9 + * as published by the Free Software Foundation; either version 10 + * 2 of the License, or (at your option) any later version. 11 + */ 12 + 13 + #include <asm/processor.h> 14 + #include <asm/reg.h> 15 + #include <asm/page.h> 16 + #include <asm/mmu.h> 17 + #include <asm/ppc_asm.h> 18 + #include <asm/asm-offsets.h> 19 + #include <asm/cputable.h> 20 + #include <asm/pgtable.h> 21 + #include <asm/reg.h> 22 + #include <asm/exception-64e.h> 23 + #include <asm/ppc-opcode.h> 24 + 25 + #ifdef CONFIG_PPC_64K_PAGES 26 + #define VPTE_PMD_SHIFT (PTE_INDEX_SIZE+1) 27 + #else 28 + #define VPTE_PMD_SHIFT (PTE_INDEX_SIZE) 29 + #endif 30 + #define VPTE_PUD_SHIFT (VPTE_PMD_SHIFT + PMD_INDEX_SIZE) 31 + #define VPTE_PGD_SHIFT (VPTE_PUD_SHIFT + PUD_INDEX_SIZE) 32 + #define VPTE_INDEX_SIZE (VPTE_PGD_SHIFT + PGD_INDEX_SIZE) 33 + 34 + 35 + /********************************************************************** 36 + * * 37 + * TLB miss handling for Book3E with TLB reservation and HES support * 38 + * * 39 + **********************************************************************/ 40 + 41 + 42 + /* Data TLB miss */ 43 + START_EXCEPTION(data_tlb_miss) 44 + TLB_MISS_PROLOG 45 + 46 + /* Now we handle the fault proper. We only save DEAR in normal 47 + * fault case since that's the only interesting values here. 48 + * We could probably also optimize by not saving SRR0/1 in the 49 + * linear mapping case but I'll leave that for later 50 + */ 51 + mfspr r14,SPRN_ESR 52 + mfspr r16,SPRN_DEAR /* get faulting address */ 53 + srdi r15,r16,60 /* get region */ 54 + cmpldi cr0,r15,0xc /* linear mapping ? */ 55 + TLB_MISS_STATS_SAVE_INFO 56 + beq tlb_load_linear /* yes -> go to linear map load */ 57 + 58 + /* The page tables are mapped virtually linear. At this point, though, 59 + * we don't know whether we are trying to fault in a first level 60 + * virtual address or a virtual page table address. We can get that 61 + * from bit 0x1 of the region ID which we have set for a page table 62 + */ 63 + andi. r10,r15,0x1 64 + bne- virt_page_table_tlb_miss 65 + 66 + std r14,EX_TLB_ESR(r12); /* save ESR */ 67 + std r16,EX_TLB_DEAR(r12); /* save DEAR */ 68 + 69 + /* We need _PAGE_PRESENT and _PAGE_ACCESSED set */ 70 + li r11,_PAGE_PRESENT 71 + oris r11,r11,_PAGE_ACCESSED@h 72 + 73 + /* We do the user/kernel test for the PID here along with the RW test 74 + */ 75 + cmpldi cr0,r15,0 /* Check for user region */ 76 + 77 + /* We pre-test some combination of permissions to avoid double 78 + * faults: 79 + * 80 + * We move the ESR:ST bit into the position of _PAGE_BAP_SW in the PTE 81 + * ESR_ST is 0x00800000 82 + * _PAGE_BAP_SW is 0x00000010 83 + * So the shift is >> 19. This tests for supervisor writeability. 84 + * If the page happens to be supervisor writeable and not user 85 + * writeable, we will take a new fault later, but that should be 86 + * a rare enough case. 87 + * 88 + * We also move ESR_ST in _PAGE_DIRTY position 89 + * _PAGE_DIRTY is 0x00001000 so the shift is >> 11 90 + * 91 + * MAS1 is preset for all we need except for TID that needs to 92 + * be cleared for kernel translations 93 + */ 94 + rlwimi r11,r14,32-19,27,27 95 + rlwimi r11,r14,32-16,19,19 96 + beq normal_tlb_miss 97 + /* XXX replace the RMW cycles with immediate loads + writes */ 98 + 1: mfspr r10,SPRN_MAS1 99 + cmpldi cr0,r15,8 /* Check for vmalloc region */ 100 + rlwinm r10,r10,0,16,1 /* Clear TID */ 101 + mtspr SPRN_MAS1,r10 102 + beq+ normal_tlb_miss 103 + 104 + /* We got a crappy address, just fault with whatever DEAR and ESR 105 + * are here 106 + */ 107 + TLB_MISS_STATS_D(MMSTAT_TLB_MISS_NORM_FAULT) 108 + TLB_MISS_EPILOG_ERROR 109 + b exc_data_storage_book3e 110 + 111 + /* Instruction TLB miss */ 112 + START_EXCEPTION(instruction_tlb_miss) 113 + TLB_MISS_PROLOG 114 + 115 + /* If we take a recursive fault, the second level handler may need 116 + * to know whether we are handling a data or instruction fault in 117 + * order to get to the right store fault handler. We provide that 118 + * info by writing a crazy value in ESR in our exception frame 119 + */ 120 + li r14,-1 /* store to exception frame is done later */ 121 + 122 + /* Now we handle the fault proper. We only save DEAR in the non 123 + * linear mapping case since we know the linear mapping case will 124 + * not re-enter. We could indeed optimize and also not save SRR0/1 125 + * in the linear mapping case but I'll leave that for later 126 + * 127 + * Faulting address is SRR0 which is already in r16 128 + */ 129 + srdi r15,r16,60 /* get region */ 130 + cmpldi cr0,r15,0xc /* linear mapping ? */ 131 + TLB_MISS_STATS_SAVE_INFO 132 + beq tlb_load_linear /* yes -> go to linear map load */ 133 + 134 + /* We do the user/kernel test for the PID here along with the RW test 135 + */ 136 + li r11,_PAGE_PRESENT|_PAGE_HWEXEC /* Base perm */ 137 + oris r11,r11,_PAGE_ACCESSED@h 138 + 139 + cmpldi cr0,r15,0 /* Check for user region */ 140 + std r14,EX_TLB_ESR(r12) /* write crazy -1 to frame */ 141 + beq normal_tlb_miss 142 + /* XXX replace the RMW cycles with immediate loads + writes */ 143 + 1: mfspr r10,SPRN_MAS1 144 + cmpldi cr0,r15,8 /* Check for vmalloc region */ 145 + rlwinm r10,r10,0,16,1 /* Clear TID */ 146 + mtspr SPRN_MAS1,r10 147 + beq+ normal_tlb_miss 148 + 149 + /* We got a crappy address, just fault */ 150 + TLB_MISS_STATS_I(MMSTAT_TLB_MISS_NORM_FAULT) 151 + TLB_MISS_EPILOG_ERROR 152 + b exc_instruction_storage_book3e 153 + 154 + /* 155 + * This is the guts of the first-level TLB miss handler for direct 156 + * misses. We are entered with: 157 + * 158 + * r16 = faulting address 159 + * r15 = region ID 160 + * r14 = crap (free to use) 161 + * r13 = PACA 162 + * r12 = TLB exception frame in PACA 163 + * r11 = PTE permission mask 164 + * r10 = crap (free to use) 165 + */ 166 + normal_tlb_miss: 167 + /* So we first construct the page table address. We do that by 168 + * shifting the bottom of the address (not the region ID) by 169 + * PAGE_SHIFT-3, clearing the bottom 3 bits (get a PTE ptr) and 170 + * or'ing the fourth high bit. 171 + * 172 + * NOTE: For 64K pages, we do things slightly differently in 173 + * order to handle the weird page table format used by linux 174 + */ 175 + ori r10,r15,0x1 176 + #ifdef CONFIG_PPC_64K_PAGES 177 + /* For the top bits, 16 bytes per PTE */ 178 + rldicl r14,r16,64-(PAGE_SHIFT-4),PAGE_SHIFT-4+4 179 + /* Now create the bottom bits as 0 in position 0x8000 and 180 + * the rest calculated for 8 bytes per PTE 181 + */ 182 + rldicl r15,r16,64-(PAGE_SHIFT-3),64-15 183 + /* Insert the bottom bits in */ 184 + rlwimi r14,r15,0,16,31 185 + #else 186 + rldicl r14,r16,64-(PAGE_SHIFT-3),PAGE_SHIFT-3+4 187 + #endif 188 + sldi r15,r10,60 189 + clrrdi r14,r14,3 190 + or r10,r15,r14 191 + 192 + /* Set the TLB reservation and seach for existing entry. Then load 193 + * the entry. 194 + */ 195 + PPC_TLBSRX_DOT(0,r16) 196 + ld r14,0(r10) 197 + beq normal_tlb_miss_done 198 + 199 + finish_normal_tlb_miss: 200 + /* Check if required permissions are met */ 201 + andc. r15,r11,r14 202 + bne- normal_tlb_miss_access_fault 203 + 204 + /* Now we build the MAS: 205 + * 206 + * MAS 0 : Fully setup with defaults in MAS4 and TLBnCFG 207 + * MAS 1 : Almost fully setup 208 + * - PID already updated by caller if necessary 209 + * - TSIZE need change if !base page size, not 210 + * yet implemented for now 211 + * MAS 2 : Defaults not useful, need to be redone 212 + * MAS 3+7 : Needs to be done 213 + * 214 + * TODO: mix up code below for better scheduling 215 + */ 216 + clrrdi r11,r16,12 /* Clear low crap in EA */ 217 + rlwimi r11,r14,32-19,27,31 /* Insert WIMGE */ 218 + mtspr SPRN_MAS2,r11 219 + 220 + /* Check page size, if not standard, update MAS1 */ 221 + rldicl r11,r14,64-8,64-8 222 + #ifdef CONFIG_PPC_64K_PAGES 223 + cmpldi cr0,r11,BOOK3E_PAGESZ_64K 224 + #else 225 + cmpldi cr0,r11,BOOK3E_PAGESZ_4K 226 + #endif 227 + beq- 1f 228 + mfspr r11,SPRN_MAS1 229 + rlwimi r11,r14,31,21,24 230 + rlwinm r11,r11,0,21,19 231 + mtspr SPRN_MAS1,r11 232 + 1: 233 + /* Move RPN in position */ 234 + rldicr r11,r14,64-(PTE_RPN_SHIFT-PAGE_SHIFT),63-PAGE_SHIFT 235 + clrldi r15,r11,12 /* Clear crap at the top */ 236 + rlwimi r15,r14,32-8,22,25 /* Move in U bits */ 237 + rlwimi r15,r14,32-2,26,31 /* Move in BAP bits */ 238 + 239 + /* Mask out SW and UW if !DIRTY (XXX optimize this !) */ 240 + andi. r11,r14,_PAGE_DIRTY 241 + bne 1f 242 + li r11,MAS3_SW|MAS3_UW 243 + andc r15,r15,r11 244 + 1: mtspr SPRN_MAS7_MAS3,r15 245 + 246 + tlbwe 247 + 248 + normal_tlb_miss_done: 249 + /* We don't bother with restoring DEAR or ESR since we know we are 250 + * level 0 and just going back to userland. They are only needed 251 + * if you are going to take an access fault 252 + */ 253 + TLB_MISS_STATS_X(MMSTAT_TLB_MISS_NORM_OK) 254 + TLB_MISS_EPILOG_SUCCESS 255 + rfi 256 + 257 + normal_tlb_miss_access_fault: 258 + /* We need to check if it was an instruction miss */ 259 + andi. r10,r11,_PAGE_HWEXEC 260 + bne 1f 261 + ld r14,EX_TLB_DEAR(r12) 262 + ld r15,EX_TLB_ESR(r12) 263 + mtspr SPRN_DEAR,r14 264 + mtspr SPRN_ESR,r15 265 + TLB_MISS_STATS_D(MMSTAT_TLB_MISS_NORM_FAULT) 266 + TLB_MISS_EPILOG_ERROR 267 + b exc_data_storage_book3e 268 + 1: TLB_MISS_STATS_I(MMSTAT_TLB_MISS_NORM_FAULT) 269 + TLB_MISS_EPILOG_ERROR 270 + b exc_instruction_storage_book3e 271 + 272 + 273 + /* 274 + * This is the guts of the second-level TLB miss handler for direct 275 + * misses. We are entered with: 276 + * 277 + * r16 = virtual page table faulting address 278 + * r15 = region (top 4 bits of address) 279 + * r14 = crap (free to use) 280 + * r13 = PACA 281 + * r12 = TLB exception frame in PACA 282 + * r11 = crap (free to use) 283 + * r10 = crap (free to use) 284 + * 285 + * Note that this should only ever be called as a second level handler 286 + * with the current scheme when using SW load. 287 + * That means we can always get the original fault DEAR at 288 + * EX_TLB_DEAR-EX_TLB_SIZE(r12) 289 + * 290 + * It can be re-entered by the linear mapping miss handler. However, to 291 + * avoid too much complication, it will restart the whole fault at level 292 + * 0 so we don't care too much about clobbers 293 + * 294 + * XXX That code was written back when we couldn't clobber r14. We can now, 295 + * so we could probably optimize things a bit 296 + */ 297 + virt_page_table_tlb_miss: 298 + /* Are we hitting a kernel page table ? */ 299 + andi. r10,r15,0x8 300 + 301 + /* The cool thing now is that r10 contains 0 for user and 8 for kernel, 302 + * and we happen to have the swapper_pg_dir at offset 8 from the user 303 + * pgdir in the PACA :-). 304 + */ 305 + add r11,r10,r13 306 + 307 + /* If kernel, we need to clear MAS1 TID */ 308 + beq 1f 309 + /* XXX replace the RMW cycles with immediate loads + writes */ 310 + mfspr r10,SPRN_MAS1 311 + rlwinm r10,r10,0,16,1 /* Clear TID */ 312 + mtspr SPRN_MAS1,r10 313 + 1: 314 + /* Search if we already have a TLB entry for that virtual address, and 315 + * if we do, bail out. 316 + */ 317 + PPC_TLBSRX_DOT(0,r16) 318 + beq virt_page_table_tlb_miss_done 319 + 320 + /* Now, we need to walk the page tables. First check if we are in 321 + * range. 322 + */ 323 + rldicl. r10,r16,64-(VPTE_INDEX_SIZE+3),VPTE_INDEX_SIZE+3+4 324 + bne- virt_page_table_tlb_miss_fault 325 + 326 + /* Get the PGD pointer */ 327 + ld r15,PACAPGD(r11) 328 + cmpldi cr0,r15,0 329 + beq- virt_page_table_tlb_miss_fault 330 + 331 + /* Get to PGD entry */ 332 + rldicl r11,r16,64-VPTE_PGD_SHIFT,64-PGD_INDEX_SIZE-3 333 + clrrdi r10,r11,3 334 + ldx r15,r10,r15 335 + cmpldi cr0,r15,0 336 + beq virt_page_table_tlb_miss_fault 337 + 338 + #ifndef CONFIG_PPC_64K_PAGES 339 + /* Get to PUD entry */ 340 + rldicl r11,r16,64-VPTE_PUD_SHIFT,64-PUD_INDEX_SIZE-3 341 + clrrdi r10,r11,3 342 + ldx r15,r10,r15 343 + cmpldi cr0,r15,0 344 + beq virt_page_table_tlb_miss_fault 345 + #endif /* CONFIG_PPC_64K_PAGES */ 346 + 347 + /* Get to PMD entry */ 348 + rldicl r11,r16,64-VPTE_PMD_SHIFT,64-PMD_INDEX_SIZE-3 349 + clrrdi r10,r11,3 350 + ldx r15,r10,r15 351 + cmpldi cr0,r15,0 352 + beq virt_page_table_tlb_miss_fault 353 + 354 + /* Ok, we're all right, we can now create a kernel translation for 355 + * a 4K or 64K page from r16 -> r15. 356 + */ 357 + /* Now we build the MAS: 358 + * 359 + * MAS 0 : Fully setup with defaults in MAS4 and TLBnCFG 360 + * MAS 1 : Almost fully setup 361 + * - PID already updated by caller if necessary 362 + * - TSIZE for now is base page size always 363 + * MAS 2 : Use defaults 364 + * MAS 3+7 : Needs to be done 365 + * 366 + * So we only do MAS 2 and 3 for now... 367 + */ 368 + clrldi r11,r15,4 /* remove region ID from RPN */ 369 + ori r10,r11,1 /* Or-in SR */ 370 + mtspr SPRN_MAS7_MAS3,r10 371 + 372 + tlbwe 373 + 374 + virt_page_table_tlb_miss_done: 375 + 376 + /* We have overriden MAS2:EPN but currently our primary TLB miss 377 + * handler will always restore it so that should not be an issue, 378 + * if we ever optimize the primary handler to not write MAS2 on 379 + * some cases, we'll have to restore MAS2:EPN here based on the 380 + * original fault's DEAR. If we do that we have to modify the 381 + * ITLB miss handler to also store SRR0 in the exception frame 382 + * as DEAR. 383 + * 384 + * However, one nasty thing we did is we cleared the reservation 385 + * (well, potentially we did). We do a trick here thus if we 386 + * are not a level 0 exception (we interrupted the TLB miss) we 387 + * offset the return address by -4 in order to replay the tlbsrx 388 + * instruction there 389 + */ 390 + subf r10,r13,r12 391 + cmpldi cr0,r10,PACA_EXTLB+EX_TLB_SIZE 392 + bne- 1f 393 + ld r11,PACA_EXTLB+EX_TLB_SIZE+EX_TLB_SRR0(r13) 394 + addi r10,r11,-4 395 + std r10,PACA_EXTLB+EX_TLB_SIZE+EX_TLB_SRR0(r13) 396 + 1: 397 + /* Return to caller, normal case */ 398 + TLB_MISS_STATS_X(MMSTAT_TLB_MISS_PT_OK); 399 + TLB_MISS_EPILOG_SUCCESS 400 + rfi 401 + 402 + virt_page_table_tlb_miss_fault: 403 + /* If we fault here, things are a little bit tricky. We need to call 404 + * either data or instruction store fault, and we need to retreive 405 + * the original fault address and ESR (for data). 406 + * 407 + * The thing is, we know that in normal circumstances, this is 408 + * always called as a second level tlb miss for SW load or as a first 409 + * level TLB miss for HW load, so we should be able to peek at the 410 + * relevant informations in the first exception frame in the PACA. 411 + * 412 + * However, we do need to double check that, because we may just hit 413 + * a stray kernel pointer or a userland attack trying to hit those 414 + * areas. If that is the case, we do a data fault. (We can't get here 415 + * from an instruction tlb miss anyway). 416 + * 417 + * Note also that when going to a fault, we must unwind the previous 418 + * level as well. Since we are doing that, we don't need to clear or 419 + * restore the TLB reservation neither. 420 + */ 421 + subf r10,r13,r12 422 + cmpldi cr0,r10,PACA_EXTLB+EX_TLB_SIZE 423 + bne- virt_page_table_tlb_miss_whacko_fault 424 + 425 + /* We dig the original DEAR and ESR from slot 0 */ 426 + ld r15,EX_TLB_DEAR+PACA_EXTLB(r13) 427 + ld r16,EX_TLB_ESR+PACA_EXTLB(r13) 428 + 429 + /* We check for the "special" ESR value for instruction faults */ 430 + cmpdi cr0,r16,-1 431 + beq 1f 432 + mtspr SPRN_DEAR,r15 433 + mtspr SPRN_ESR,r16 434 + TLB_MISS_STATS_D(MMSTAT_TLB_MISS_PT_FAULT); 435 + TLB_MISS_EPILOG_ERROR 436 + b exc_data_storage_book3e 437 + 1: TLB_MISS_STATS_I(MMSTAT_TLB_MISS_PT_FAULT); 438 + TLB_MISS_EPILOG_ERROR 439 + b exc_instruction_storage_book3e 440 + 441 + virt_page_table_tlb_miss_whacko_fault: 442 + /* The linear fault will restart everything so ESR and DEAR will 443 + * not have been clobbered, let's just fault with what we have 444 + */ 445 + TLB_MISS_STATS_X(MMSTAT_TLB_MISS_PT_FAULT); 446 + TLB_MISS_EPILOG_ERROR 447 + b exc_data_storage_book3e 448 + 449 + 450 + /************************************************************** 451 + * * 452 + * TLB miss handling for Book3E with hw page table support * 453 + * * 454 + **************************************************************/ 455 + 456 + 457 + /* Data TLB miss */ 458 + START_EXCEPTION(data_tlb_miss_htw) 459 + TLB_MISS_PROLOG 460 + 461 + /* Now we handle the fault proper. We only save DEAR in normal 462 + * fault case since that's the only interesting values here. 463 + * We could probably also optimize by not saving SRR0/1 in the 464 + * linear mapping case but I'll leave that for later 465 + */ 466 + mfspr r14,SPRN_ESR 467 + mfspr r16,SPRN_DEAR /* get faulting address */ 468 + srdi r11,r16,60 /* get region */ 469 + cmpldi cr0,r11,0xc /* linear mapping ? */ 470 + TLB_MISS_STATS_SAVE_INFO 471 + beq tlb_load_linear /* yes -> go to linear map load */ 472 + 473 + /* We do the user/kernel test for the PID here along with the RW test 474 + */ 475 + cmpldi cr0,r11,0 /* Check for user region */ 476 + ld r15,PACAPGD(r13) /* Load user pgdir */ 477 + beq htw_tlb_miss 478 + 479 + /* XXX replace the RMW cycles with immediate loads + writes */ 480 + 1: mfspr r10,SPRN_MAS1 481 + cmpldi cr0,r11,8 /* Check for vmalloc region */ 482 + rlwinm r10,r10,0,16,1 /* Clear TID */ 483 + mtspr SPRN_MAS1,r10 484 + ld r15,PACA_KERNELPGD(r13) /* Load kernel pgdir */ 485 + beq+ htw_tlb_miss 486 + 487 + /* We got a crappy address, just fault with whatever DEAR and ESR 488 + * are here 489 + */ 490 + TLB_MISS_STATS_D(MMSTAT_TLB_MISS_NORM_FAULT) 491 + TLB_MISS_EPILOG_ERROR 492 + b exc_data_storage_book3e 493 + 494 + /* Instruction TLB miss */ 495 + START_EXCEPTION(instruction_tlb_miss_htw) 496 + TLB_MISS_PROLOG 497 + 498 + /* If we take a recursive fault, the second level handler may need 499 + * to know whether we are handling a data or instruction fault in 500 + * order to get to the right store fault handler. We provide that 501 + * info by keeping a crazy value for ESR in r14 502 + */ 503 + li r14,-1 /* store to exception frame is done later */ 504 + 505 + /* Now we handle the fault proper. We only save DEAR in the non 506 + * linear mapping case since we know the linear mapping case will 507 + * not re-enter. We could indeed optimize and also not save SRR0/1 508 + * in the linear mapping case but I'll leave that for later 509 + * 510 + * Faulting address is SRR0 which is already in r16 511 + */ 512 + srdi r11,r16,60 /* get region */ 513 + cmpldi cr0,r11,0xc /* linear mapping ? */ 514 + TLB_MISS_STATS_SAVE_INFO 515 + beq tlb_load_linear /* yes -> go to linear map load */ 516 + 517 + /* We do the user/kernel test for the PID here along with the RW test 518 + */ 519 + cmpldi cr0,r11,0 /* Check for user region */ 520 + ld r15,PACAPGD(r13) /* Load user pgdir */ 521 + beq htw_tlb_miss 522 + 523 + /* XXX replace the RMW cycles with immediate loads + writes */ 524 + 1: mfspr r10,SPRN_MAS1 525 + cmpldi cr0,r11,8 /* Check for vmalloc region */ 526 + rlwinm r10,r10,0,16,1 /* Clear TID */ 527 + mtspr SPRN_MAS1,r10 528 + ld r15,PACA_KERNELPGD(r13) /* Load kernel pgdir */ 529 + beq+ htw_tlb_miss 530 + 531 + /* We got a crappy address, just fault */ 532 + TLB_MISS_STATS_I(MMSTAT_TLB_MISS_NORM_FAULT) 533 + TLB_MISS_EPILOG_ERROR 534 + b exc_instruction_storage_book3e 535 + 536 + 537 + /* 538 + * This is the guts of the second-level TLB miss handler for direct 539 + * misses. We are entered with: 540 + * 541 + * r16 = virtual page table faulting address 542 + * r15 = PGD pointer 543 + * r14 = ESR 544 + * r13 = PACA 545 + * r12 = TLB exception frame in PACA 546 + * r11 = crap (free to use) 547 + * r10 = crap (free to use) 548 + * 549 + * It can be re-entered by the linear mapping miss handler. However, to 550 + * avoid too much complication, it will save/restore things for us 551 + */ 552 + htw_tlb_miss: 553 + /* Search if we already have a TLB entry for that virtual address, and 554 + * if we do, bail out. 555 + * 556 + * MAS1:IND should be already set based on MAS4 557 + */ 558 + PPC_TLBSRX_DOT(0,r16) 559 + beq htw_tlb_miss_done 560 + 561 + /* Now, we need to walk the page tables. First check if we are in 562 + * range. 563 + */ 564 + rldicl. r10,r16,64-PGTABLE_EADDR_SIZE,PGTABLE_EADDR_SIZE+4 565 + bne- htw_tlb_miss_fault 566 + 567 + /* Get the PGD pointer */ 568 + cmpldi cr0,r15,0 569 + beq- htw_tlb_miss_fault 570 + 571 + /* Get to PGD entry */ 572 + rldicl r11,r16,64-(PGDIR_SHIFT-3),64-PGD_INDEX_SIZE-3 573 + clrrdi r10,r11,3 574 + ldx r15,r10,r15 575 + cmpldi cr0,r15,0 576 + beq htw_tlb_miss_fault 577 + 578 + #ifndef CONFIG_PPC_64K_PAGES 579 + /* Get to PUD entry */ 580 + rldicl r11,r16,64-(PUD_SHIFT-3),64-PUD_INDEX_SIZE-3 581 + clrrdi r10,r11,3 582 + ldx r15,r10,r15 583 + cmpldi cr0,r15,0 584 + beq htw_tlb_miss_fault 585 + #endif /* CONFIG_PPC_64K_PAGES */ 586 + 587 + /* Get to PMD entry */ 588 + rldicl r11,r16,64-(PMD_SHIFT-3),64-PMD_INDEX_SIZE-3 589 + clrrdi r10,r11,3 590 + ldx r15,r10,r15 591 + cmpldi cr0,r15,0 592 + beq htw_tlb_miss_fault 593 + 594 + /* Ok, we're all right, we can now create an indirect entry for 595 + * a 1M or 256M page. 596 + * 597 + * The last trick is now that because we use "half" pages for 598 + * the HTW (1M IND is 2K and 256M IND is 32K) we need to account 599 + * for an added LSB bit to the RPN. For 64K pages, there is no 600 + * problem as we already use 32K arrays (half PTE pages), but for 601 + * 4K page we need to extract a bit from the virtual address and 602 + * insert it into the "PA52" bit of the RPN. 603 + */ 604 + #ifndef CONFIG_PPC_64K_PAGES 605 + rlwimi r15,r16,32-9,20,20 606 + #endif 607 + /* Now we build the MAS: 608 + * 609 + * MAS 0 : Fully setup with defaults in MAS4 and TLBnCFG 610 + * MAS 1 : Almost fully setup 611 + * - PID already updated by caller if necessary 612 + * - TSIZE for now is base ind page size always 613 + * MAS 2 : Use defaults 614 + * MAS 3+7 : Needs to be done 615 + */ 616 + #ifdef CONFIG_PPC_64K_PAGES 617 + ori r10,r15,(BOOK3E_PAGESZ_64K << MAS3_SPSIZE_SHIFT) 618 + #else 619 + ori r10,r15,(BOOK3E_PAGESZ_4K << MAS3_SPSIZE_SHIFT) 620 + #endif 621 + mtspr SPRN_MAS7_MAS3,r10 622 + 623 + tlbwe 624 + 625 + htw_tlb_miss_done: 626 + /* We don't bother with restoring DEAR or ESR since we know we are 627 + * level 0 and just going back to userland. They are only needed 628 + * if you are going to take an access fault 629 + */ 630 + TLB_MISS_STATS_X(MMSTAT_TLB_MISS_PT_OK) 631 + TLB_MISS_EPILOG_SUCCESS 632 + rfi 633 + 634 + htw_tlb_miss_fault: 635 + /* We need to check if it was an instruction miss. We know this 636 + * though because r14 would contain -1 637 + */ 638 + cmpdi cr0,r14,-1 639 + beq 1f 640 + mtspr SPRN_DEAR,r16 641 + mtspr SPRN_ESR,r14 642 + TLB_MISS_STATS_D(MMSTAT_TLB_MISS_PT_FAULT) 643 + TLB_MISS_EPILOG_ERROR 644 + b exc_data_storage_book3e 645 + 1: TLB_MISS_STATS_I(MMSTAT_TLB_MISS_PT_FAULT) 646 + TLB_MISS_EPILOG_ERROR 647 + b exc_instruction_storage_book3e 648 + 649 + /* 650 + * This is the guts of "any" level TLB miss handler for kernel linear 651 + * mapping misses. We are entered with: 652 + * 653 + * 654 + * r16 = faulting address 655 + * r15 = crap (free to use) 656 + * r14 = ESR (data) or -1 (instruction) 657 + * r13 = PACA 658 + * r12 = TLB exception frame in PACA 659 + * r11 = crap (free to use) 660 + * r10 = crap (free to use) 661 + * 662 + * In addition we know that we will not re-enter, so in theory, we could 663 + * use a simpler epilog not restoring SRR0/1 etc.. but we'll do that later. 664 + * 665 + * We also need to be careful about MAS registers here & TLB reservation, 666 + * as we know we'll have clobbered them if we interrupt the main TLB miss 667 + * handlers in which case we probably want to do a full restart at level 668 + * 0 rather than saving / restoring the MAS. 669 + * 670 + * Note: If we care about performance of that core, we can easily shuffle 671 + * a few things around 672 + */ 673 + tlb_load_linear: 674 + /* For now, we assume the linear mapping is contiguous and stops at 675 + * linear_map_top. We also assume the size is a multiple of 1G, thus 676 + * we only use 1G pages for now. That might have to be changed in a 677 + * final implementation, especially when dealing with hypervisors 678 + */ 679 + ld r11,PACATOC(r13) 680 + ld r11,linear_map_top@got(r11) 681 + ld r10,0(r11) 682 + cmpld cr0,r10,r16 683 + bge tlb_load_linear_fault 684 + 685 + /* MAS1 need whole new setup. */ 686 + li r15,(BOOK3E_PAGESZ_1GB<<MAS1_TSIZE_SHIFT) 687 + oris r15,r15,MAS1_VALID@h /* MAS1 needs V and TSIZE */ 688 + mtspr SPRN_MAS1,r15 689 + 690 + /* Already somebody there ? */ 691 + PPC_TLBSRX_DOT(0,r16) 692 + beq tlb_load_linear_done 693 + 694 + /* Now we build the remaining MAS. MAS0 and 2 should be fine 695 + * with their defaults, which leaves us with MAS 3 and 7. The 696 + * mapping is linear, so we just take the address, clear the 697 + * region bits, and or in the permission bits which are currently 698 + * hard wired 699 + */ 700 + clrrdi r10,r16,30 /* 1G page index */ 701 + clrldi r10,r10,4 /* clear region bits */ 702 + ori r10,r10,MAS3_SR|MAS3_SW|MAS3_SX 703 + mtspr SPRN_MAS7_MAS3,r10 704 + 705 + tlbwe 706 + 707 + tlb_load_linear_done: 708 + /* We use the "error" epilog for success as we do want to 709 + * restore to the initial faulting context, whatever it was. 710 + * We do that because we can't resume a fault within a TLB 711 + * miss handler, due to MAS and TLB reservation being clobbered. 712 + */ 713 + TLB_MISS_STATS_X(MMSTAT_TLB_MISS_LINEAR) 714 + TLB_MISS_EPILOG_ERROR 715 + rfi 716 + 717 + tlb_load_linear_fault: 718 + /* We keep the DEAR and ESR around, this shouldn't have happened */ 719 + cmpdi cr0,r14,-1 720 + beq 1f 721 + TLB_MISS_EPILOG_ERROR_SPECIAL 722 + b exc_data_storage_book3e 723 + 1: TLB_MISS_EPILOG_ERROR_SPECIAL 724 + b exc_instruction_storage_book3e 725 + 726 + 727 + #ifdef CONFIG_BOOK3E_MMU_TLB_STATS 728 + .tlb_stat_inc: 729 + 1: ldarx r8,0,r9 730 + addi r8,r8,1 731 + stdcx. r8,0,r9 732 + bne- 1b 733 + blr 734 + #endif
+199 -4
arch/powerpc/mm/tlb_nohash.c
··· 7 7 * 8 8 * -- BenH 9 9 * 10 - * Copyright 2008 Ben Herrenschmidt <benh@kernel.crashing.org> 11 - * IBM Corp. 10 + * Copyright 2008,2009 Ben Herrenschmidt <benh@kernel.crashing.org> 11 + * IBM Corp. 12 12 * 13 13 * Derived from arch/ppc/mm/init.c: 14 14 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) ··· 34 34 #include <linux/pagemap.h> 35 35 #include <linux/preempt.h> 36 36 #include <linux/spinlock.h> 37 + #include <linux/lmb.h> 37 38 38 39 #include <asm/tlbflush.h> 39 40 #include <asm/tlb.h> 41 + #include <asm/code-patching.h> 40 42 41 43 #include "mmu_decl.h" 44 + 45 + #ifdef CONFIG_PPC_BOOK3E 46 + struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = { 47 + [MMU_PAGE_4K] = { 48 + .shift = 12, 49 + .enc = BOOK3E_PAGESZ_4K, 50 + }, 51 + [MMU_PAGE_16K] = { 52 + .shift = 14, 53 + .enc = BOOK3E_PAGESZ_16K, 54 + }, 55 + [MMU_PAGE_64K] = { 56 + .shift = 16, 57 + .enc = BOOK3E_PAGESZ_64K, 58 + }, 59 + [MMU_PAGE_1M] = { 60 + .shift = 20, 61 + .enc = BOOK3E_PAGESZ_1M, 62 + }, 63 + [MMU_PAGE_16M] = { 64 + .shift = 24, 65 + .enc = BOOK3E_PAGESZ_16M, 66 + }, 67 + [MMU_PAGE_256M] = { 68 + .shift = 28, 69 + .enc = BOOK3E_PAGESZ_256M, 70 + }, 71 + [MMU_PAGE_1G] = { 72 + .shift = 30, 73 + .enc = BOOK3E_PAGESZ_1GB, 74 + }, 75 + }; 76 + static inline int mmu_get_tsize(int psize) 77 + { 78 + return mmu_psize_defs[psize].enc; 79 + } 80 + #else 81 + static inline int mmu_get_tsize(int psize) 82 + { 83 + /* This isn't used on !Book3E for now */ 84 + return 0; 85 + } 86 + #endif 87 + 88 + /* The variables below are currently only used on 64-bit Book3E 89 + * though this will probably be made common with other nohash 90 + * implementations at some point 91 + */ 92 + #ifdef CONFIG_PPC64 93 + 94 + int mmu_linear_psize; /* Page size used for the linear mapping */ 95 + int mmu_pte_psize; /* Page size used for PTE pages */ 96 + int book3e_htw_enabled; /* Is HW tablewalk enabled ? */ 97 + unsigned long linear_map_top; /* Top of linear mapping */ 98 + 99 + #endif /* CONFIG_PPC64 */ 42 100 43 101 /* 44 102 * Base TLB flushing operations: ··· 140 82 void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr) 141 83 { 142 84 __local_flush_tlb_page(vma ? vma->vm_mm : NULL, vmaddr, 143 - 0 /* tsize unused for now */, 0); 85 + mmu_get_tsize(mmu_virtual_psize), 0); 144 86 } 145 87 EXPORT_SYMBOL(local_flush_tlb_page); 146 88 ··· 256 198 void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr) 257 199 { 258 200 __flush_tlb_page(vma ? vma->vm_mm : NULL, vmaddr, 259 - 0 /* tsize unused for now */, 0); 201 + mmu_get_tsize(mmu_virtual_psize), 0); 260 202 } 261 203 EXPORT_SYMBOL(flush_tlb_page); 262 204 ··· 299 241 /* Push out batch of freed page tables */ 300 242 pte_free_finish(); 301 243 } 244 + 245 + /* 246 + * Below are functions specific to the 64-bit variant of Book3E though that 247 + * may change in the future 248 + */ 249 + 250 + #ifdef CONFIG_PPC64 251 + 252 + /* 253 + * Handling of virtual linear page tables or indirect TLB entries 254 + * flushing when PTE pages are freed 255 + */ 256 + void tlb_flush_pgtable(struct mmu_gather *tlb, unsigned long address) 257 + { 258 + int tsize = mmu_psize_defs[mmu_pte_psize].enc; 259 + 260 + if (book3e_htw_enabled) { 261 + unsigned long start = address & PMD_MASK; 262 + unsigned long end = address + PMD_SIZE; 263 + unsigned long size = 1UL << mmu_psize_defs[mmu_pte_psize].shift; 264 + 265 + /* This isn't the most optimal, ideally we would factor out the 266 + * while preempt & CPU mask mucking around, or even the IPI but 267 + * it will do for now 268 + */ 269 + while (start < end) { 270 + __flush_tlb_page(tlb->mm, start, tsize, 1); 271 + start += size; 272 + } 273 + } else { 274 + unsigned long rmask = 0xf000000000000000ul; 275 + unsigned long rid = (address & rmask) | 0x1000000000000000ul; 276 + unsigned long vpte = address & ~rmask; 277 + 278 + #ifdef CONFIG_PPC_64K_PAGES 279 + vpte = (vpte >> (PAGE_SHIFT - 4)) & ~0xfffful; 280 + #else 281 + vpte = (vpte >> (PAGE_SHIFT - 3)) & ~0xffful; 282 + #endif 283 + vpte |= rid; 284 + __flush_tlb_page(tlb->mm, vpte, tsize, 0); 285 + } 286 + } 287 + 288 + /* 289 + * Early initialization of the MMU TLB code 290 + */ 291 + static void __early_init_mmu(int boot_cpu) 292 + { 293 + extern unsigned int interrupt_base_book3e; 294 + extern unsigned int exc_data_tlb_miss_htw_book3e; 295 + extern unsigned int exc_instruction_tlb_miss_htw_book3e; 296 + 297 + unsigned int *ibase = &interrupt_base_book3e; 298 + unsigned int mas4; 299 + 300 + /* XXX This will have to be decided at runtime, but right 301 + * now our boot and TLB miss code hard wires it 302 + */ 303 + mmu_linear_psize = MMU_PAGE_1G; 304 + 305 + 306 + /* Check if HW tablewalk is present, and if yes, enable it by: 307 + * 308 + * - patching the TLB miss handlers to branch to the 309 + * one dedicates to it 310 + * 311 + * - setting the global book3e_htw_enabled 312 + * 313 + * - Set MAS4:INDD and default page size 314 + */ 315 + 316 + /* XXX This code only checks for TLB 0 capabilities and doesn't 317 + * check what page size combos are supported by the HW. It 318 + * also doesn't handle the case where a separate array holds 319 + * the IND entries from the array loaded by the PT. 320 + */ 321 + if (boot_cpu) { 322 + unsigned int tlb0cfg = mfspr(SPRN_TLB0CFG); 323 + 324 + /* Check if HW loader is supported */ 325 + if ((tlb0cfg & TLBnCFG_IND) && 326 + (tlb0cfg & TLBnCFG_PT)) { 327 + patch_branch(ibase + (0x1c0 / 4), 328 + (unsigned long)&exc_data_tlb_miss_htw_book3e, 0); 329 + patch_branch(ibase + (0x1e0 / 4), 330 + (unsigned long)&exc_instruction_tlb_miss_htw_book3e, 0); 331 + book3e_htw_enabled = 1; 332 + } 333 + pr_info("MMU: Book3E Page Tables %s\n", 334 + book3e_htw_enabled ? "Enabled" : "Disabled"); 335 + } 336 + 337 + /* Set MAS4 based on page table setting */ 338 + 339 + mas4 = 0x4 << MAS4_WIMGED_SHIFT; 340 + if (book3e_htw_enabled) { 341 + mas4 |= mas4 | MAS4_INDD; 342 + #ifdef CONFIG_PPC_64K_PAGES 343 + mas4 |= BOOK3E_PAGESZ_256M << MAS4_TSIZED_SHIFT; 344 + mmu_pte_psize = MMU_PAGE_256M; 345 + #else 346 + mas4 |= BOOK3E_PAGESZ_1M << MAS4_TSIZED_SHIFT; 347 + mmu_pte_psize = MMU_PAGE_1M; 348 + #endif 349 + } else { 350 + #ifdef CONFIG_PPC_64K_PAGES 351 + mas4 |= BOOK3E_PAGESZ_64K << MAS4_TSIZED_SHIFT; 352 + #else 353 + mas4 |= BOOK3E_PAGESZ_4K << MAS4_TSIZED_SHIFT; 354 + #endif 355 + mmu_pte_psize = mmu_virtual_psize; 356 + } 357 + mtspr(SPRN_MAS4, mas4); 358 + 359 + /* Set the global containing the top of the linear mapping 360 + * for use by the TLB miss code 361 + */ 362 + linear_map_top = lmb_end_of_DRAM(); 363 + 364 + /* A sync won't hurt us after mucking around with 365 + * the MMU configuration 366 + */ 367 + mb(); 368 + } 369 + 370 + void __init early_init_mmu(void) 371 + { 372 + __early_init_mmu(1); 373 + } 374 + 375 + void __cpuinit early_init_mmu_secondary(void) 376 + { 377 + __early_init_mmu(0); 378 + } 379 + 380 + #endif /* CONFIG_PPC64 */
+79
arch/powerpc/mm/tlb_nohash_low.S
··· 191 191 isync 192 192 1: wrtee r10 193 193 blr 194 + #elif defined(CONFIG_PPC_BOOK3E) 195 + /* 196 + * New Book3E (>= 2.06) implementation 197 + * 198 + * Note: We may be able to get away without the interrupt masking stuff 199 + * if we save/restore MAS6 on exceptions that might modify it 200 + */ 201 + _GLOBAL(_tlbil_pid) 202 + slwi r4,r3,MAS6_SPID_SHIFT 203 + mfmsr r10 204 + wrteei 0 205 + mtspr SPRN_MAS6,r4 206 + PPC_TLBILX_PID(0,0) 207 + wrtee r10 208 + msync 209 + isync 210 + blr 211 + 212 + _GLOBAL(_tlbil_pid_noind) 213 + slwi r4,r3,MAS6_SPID_SHIFT 214 + mfmsr r10 215 + ori r4,r4,MAS6_SIND 216 + wrteei 0 217 + mtspr SPRN_MAS6,r4 218 + PPC_TLBILX_PID(0,0) 219 + wrtee r10 220 + msync 221 + isync 222 + blr 223 + 224 + _GLOBAL(_tlbil_all) 225 + PPC_TLBILX_ALL(0,0) 226 + msync 227 + isync 228 + blr 229 + 230 + _GLOBAL(_tlbil_va) 231 + mfmsr r10 232 + wrteei 0 233 + cmpwi cr0,r6,0 234 + slwi r4,r4,MAS6_SPID_SHIFT 235 + rlwimi r4,r5,MAS6_ISIZE_SHIFT,MAS6_ISIZE_MASK 236 + beq 1f 237 + rlwimi r4,r6,MAS6_SIND_SHIFT,MAS6_SIND 238 + 1: mtspr SPRN_MAS6,r4 /* assume AS=0 for now */ 239 + PPC_TLBILX_VA(0,r3) 240 + msync 241 + isync 242 + wrtee r10 243 + blr 244 + 245 + _GLOBAL(_tlbivax_bcast) 246 + mfmsr r10 247 + wrteei 0 248 + cmpwi cr0,r6,0 249 + slwi r4,r4,MAS6_SPID_SHIFT 250 + rlwimi r4,r5,MAS6_ISIZE_SHIFT,MAS6_ISIZE_MASK 251 + beq 1f 252 + rlwimi r4,r6,MAS6_SIND_SHIFT,MAS6_SIND 253 + 1: mtspr SPRN_MAS6,r4 /* assume AS=0 for now */ 254 + PPC_TLBIVAX(0,r3) 255 + eieio 256 + tlbsync 257 + sync 258 + wrtee r10 259 + blr 260 + 261 + _GLOBAL(set_context) 262 + #ifdef CONFIG_BDI_SWITCH 263 + /* Context switch the PTE pointer for the Abatron BDI2000. 264 + * The PGDIR is the second parameter. 265 + */ 266 + lis r5, abatron_pteptrs@h 267 + ori r5, r5, abatron_pteptrs@l 268 + stw r4, 0x4(r5) 269 + #endif 270 + mtspr SPRN_PID,r3 271 + isync /* Force context change */ 272 + blr 194 273 #else 195 274 #error Unsupported processor type ! 196 275 #endif