Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

MIPS: Retrieve ASID masks using function accepting struct cpuinfo_mips

In preparation for supporting variable ASID masks, retrieve ASID masks
using functions in asm/cpu-info.h which accept struct cpuinfo_mips. This
will allow those functions to determine the ASID mask based upon the CPU
in a later patch. This also allows for the r3k & r8k cases to be handled
in Kconfig, which is arguably cleaner than the previous #ifdefs.

Signed-off-by: Paul Burton <paul.burton@imgtec.com>
Signed-off-by: James Hogan <james.hogan@imgtec.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Radim Krčmář <rkrcmar@redhat.com>
Cc: linux-mips@linux-mips.org
Cc: kvm@vger.kernel.org
Patchwork: https://patchwork.linux-mips.org/patch/13210/
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>

authored by

Paul Burton and committed by
Ralf Baechle
4edf00a4 f1b711c6

+92 -61
+11
arch/mips/Kconfig
··· 2449 2449 config CPU_R4400_WORKAROUNDS 2450 2450 bool 2451 2451 2452 + config MIPS_ASID_SHIFT 2453 + int 2454 + default 6 if CPU_R3000 || CPU_TX39XX 2455 + default 4 if CPU_R8000 2456 + default 0 2457 + 2458 + config MIPS_ASID_BITS 2459 + int 2460 + default 6 if CPU_R3000 || CPU_TX39XX 2461 + default 8 2462 + 2452 2463 # 2453 2464 # - Highmem only makes sense for the 32-bit kernel. 2454 2465 # - The current highmem code will only work properly on physically indexed
+10
arch/mips/include/asm/cpu-info.h
··· 132 132 # define cpu_vpe_id(cpuinfo) ({ (void)cpuinfo; 0; }) 133 133 #endif 134 134 135 + static inline unsigned long cpu_asid_inc(void) 136 + { 137 + return 1 << CONFIG_MIPS_ASID_SHIFT; 138 + } 139 + 140 + static inline unsigned long cpu_asid_mask(struct cpuinfo_mips *cpuinfo) 141 + { 142 + return ((1 << CONFIG_MIPS_ASID_BITS) - 1) << CONFIG_MIPS_ASID_SHIFT; 143 + } 144 + 135 145 #endif /* __ASM_CPU_INFO_H */
+24 -29
arch/mips/include/asm/mmu_context.h
··· 65 65 back_to_back_c0_hazard(); \ 66 66 TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir) 67 67 #endif /* CONFIG_MIPS_PGD_C0_CONTEXT*/ 68 - #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) 69 - 70 - #define ASID_INC 0x40 71 - #define ASID_MASK 0xfc0 72 - 73 - #elif defined(CONFIG_CPU_R8000) 74 - 75 - #define ASID_INC 0x10 76 - #define ASID_MASK 0xff0 77 - 78 - #else /* FIXME: not correct for R6000 */ 79 - 80 - #define ASID_INC 0x1 81 - #define ASID_MASK 0xff 82 - 83 - #endif 84 - 85 - #define cpu_context(cpu, mm) ((mm)->context.asid[cpu]) 86 - #define cpu_asid(cpu, mm) (cpu_context((cpu), (mm)) & ASID_MASK) 87 - #define asid_cache(cpu) (cpu_data[cpu].asid_cache) 88 - 89 - static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) 90 - { 91 - } 92 68 93 69 /* 94 70 * All unused by hardware upper bits will be considered 95 71 * as a software asid extension. 96 72 */ 97 - #define ASID_VERSION_MASK ((unsigned long)~(ASID_MASK|(ASID_MASK-1))) 98 - #define ASID_FIRST_VERSION ((unsigned long)(~ASID_VERSION_MASK) + 1) 73 + static unsigned long asid_version_mask(unsigned int cpu) 74 + { 75 + unsigned long asid_mask = cpu_asid_mask(&cpu_data[cpu]); 76 + 77 + return ~(asid_mask | (asid_mask - 1)); 78 + } 79 + 80 + static unsigned long asid_first_version(unsigned int cpu) 81 + { 82 + return ~asid_version_mask(cpu) + 1; 83 + } 84 + 85 + #define cpu_context(cpu, mm) ((mm)->context.asid[cpu]) 86 + #define asid_cache(cpu) (cpu_data[cpu].asid_cache) 87 + #define cpu_asid(cpu, mm) \ 88 + (cpu_context((cpu), (mm)) & cpu_asid_mask(&cpu_data[cpu])) 89 + 90 + static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) 91 + { 92 + } 93 + 99 94 100 95 /* Normal, classic MIPS get_new_mmu_context */ 101 96 static inline void ··· 99 104 extern void kvm_local_flush_tlb_all(void); 100 105 unsigned long asid = asid_cache(cpu); 101 106 102 - if (! ((asid += ASID_INC) & ASID_MASK) ) { 107 + if (!((asid += cpu_asid_inc()) & cpu_asid_mask(&cpu_data[cpu]))) { 103 108 if (cpu_has_vtag_icache) 104 109 flush_icache_all(); 105 110 #ifdef CONFIG_KVM ··· 108 113 local_flush_tlb_all(); /* start new asid cycle */ 109 114 #endif 110 115 if (!asid) /* fix version if needed */ 111 - asid = ASID_FIRST_VERSION; 116 + asid = asid_first_version(cpu); 112 117 } 113 118 114 119 cpu_context(cpu, mm) = asid_cache(cpu) = asid; ··· 140 145 141 146 htw_stop(); 142 147 /* Check if our ASID is of an older version and thus invalid */ 143 - if ((cpu_context(cpu, next) ^ asid_cache(cpu)) & ASID_VERSION_MASK) 148 + if ((cpu_context(cpu, next) ^ asid_cache(cpu)) & asid_version_mask(cpu)) 144 149 get_new_mmu_context(next, cpu); 145 150 write_c0_entryhi(cpu_asid(cpu, next)); 146 151 TLBMISS_HANDLER_SETUP_PGD(next->pgd);
+1 -1
arch/mips/kernel/traps.c
··· 2136 2136 } 2137 2137 2138 2138 if (!cpu_data[cpu].asid_cache) 2139 - cpu_data[cpu].asid_cache = ASID_FIRST_VERSION; 2139 + cpu_data[cpu].asid_cache = asid_first_version(cpu); 2140 2140 2141 2141 atomic_inc(&init_mm.mm_count); 2142 2142 current->active_mm = &init_mm;
+19 -11
arch/mips/kvm/tlb.c
··· 49 49 50 50 uint32_t kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu) 51 51 { 52 - return vcpu->arch.guest_kernel_asid[smp_processor_id()] & ASID_MASK; 52 + int cpu = smp_processor_id(); 53 + 54 + return vcpu->arch.guest_kernel_asid[cpu] & 55 + cpu_asid_mask(&cpu_data[cpu]); 53 56 } 54 57 55 58 uint32_t kvm_mips_get_user_asid(struct kvm_vcpu *vcpu) 56 59 { 57 - return vcpu->arch.guest_user_asid[smp_processor_id()] & ASID_MASK; 60 + int cpu = smp_processor_id(); 61 + 62 + return vcpu->arch.guest_user_asid[cpu] & 63 + cpu_asid_mask(&cpu_data[cpu]); 58 64 } 59 65 60 66 inline uint32_t kvm_mips_get_commpage_asid(struct kvm_vcpu *vcpu) ··· 84 78 old_pagemask = read_c0_pagemask(); 85 79 86 80 kvm_info("HOST TLBs:\n"); 87 - kvm_info("ASID: %#lx\n", read_c0_entryhi() & ASID_MASK); 81 + kvm_info("ASID: %#lx\n", read_c0_entryhi() & 82 + cpu_asid_mask(&current_cpu_data)); 88 83 89 84 for (i = 0; i < current_cpu_data.tlbsize; i++) { 90 85 write_c0_index(i); ··· 571 564 { 572 565 unsigned long asid = asid_cache(cpu); 573 566 574 - asid += ASID_INC; 575 - if (!(asid & ASID_MASK)) { 567 + asid += cpu_asid_inc(); 568 + if (!(asid & cpu_asid_mask(&cpu_data[cpu]))) { 576 569 if (cpu_has_vtag_icache) 577 570 flush_icache_all(); 578 571 579 572 kvm_local_flush_tlb_all(); /* start new asid cycle */ 580 573 581 574 if (!asid) /* fix version if needed */ 582 - asid = ASID_FIRST_VERSION; 575 + asid = asid_first_version(cpu); 583 576 } 584 577 585 578 cpu_context(cpu, mm) = asid_cache(cpu) = asid; ··· 634 627 /* Restore ASID once we are scheduled back after preemption */ 635 628 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) 636 629 { 630 + unsigned long asid_mask = cpu_asid_mask(&cpu_data[cpu]); 637 631 unsigned long flags; 638 632 int newasid = 0; 639 633 ··· 645 637 local_irq_save(flags); 646 638 647 639 if ((vcpu->arch.guest_kernel_asid[cpu] ^ asid_cache(cpu)) & 648 - ASID_VERSION_MASK) { 640 + asid_version_mask(cpu)) { 649 641 kvm_get_new_mmu_context(&vcpu->arch.guest_kernel_mm, cpu, vcpu); 650 642 vcpu->arch.guest_kernel_asid[cpu] = 651 643 vcpu->arch.guest_kernel_mm.context.asid[cpu]; ··· 680 672 */ 681 673 if (current->flags & PF_VCPU) { 682 674 write_c0_entryhi(vcpu->arch. 683 - preempt_entryhi & ASID_MASK); 675 + preempt_entryhi & asid_mask); 684 676 ehb(); 685 677 } 686 678 } else { ··· 695 687 if (KVM_GUEST_KERNEL_MODE(vcpu)) 696 688 write_c0_entryhi(vcpu->arch. 697 689 guest_kernel_asid[cpu] & 698 - ASID_MASK); 690 + asid_mask); 699 691 else 700 692 write_c0_entryhi(vcpu->arch. 701 693 guest_user_asid[cpu] & 702 - ASID_MASK); 694 + asid_mask); 703 695 ehb(); 704 696 } 705 697 } ··· 729 721 kvm_mips_callbacks->vcpu_get_regs(vcpu); 730 722 731 723 if (((cpu_context(cpu, current->mm) ^ asid_cache(cpu)) & 732 - ASID_VERSION_MASK)) { 724 + asid_version_mask(cpu))) { 733 725 kvm_debug("%s: Dropping MMU Context: %#lx\n", __func__, 734 726 cpu_context(cpu, current->mm)); 735 727 drop_mmu_context(current->mm, cpu);
+6 -4
arch/mips/lib/dump_tlb.c
··· 73 73 unsigned long s_entryhi, entryhi, asid; 74 74 unsigned long long entrylo0, entrylo1, pa; 75 75 unsigned int s_index, s_pagemask, pagemask, c0, c1, i; 76 + unsigned long asidmask = cpu_asid_mask(&current_cpu_data); 77 + int asidwidth = DIV_ROUND_UP(ilog2(asidmask) + 1, 4); 76 78 #ifdef CONFIG_32BIT 77 79 bool xpa = cpu_has_xpa && (read_c0_pagegrain() & PG_ELPA); 78 80 int pwidth = xpa ? 11 : 8; ··· 88 86 s_pagemask = read_c0_pagemask(); 89 87 s_entryhi = read_c0_entryhi(); 90 88 s_index = read_c0_index(); 91 - asid = s_entryhi & 0xff; 89 + asid = s_entryhi & asidmask; 92 90 93 91 for (i = first; i <= last; i++) { 94 92 write_c0_index(i); ··· 117 115 * due to duplicate TLB entry. 118 116 */ 119 117 if (!((entrylo0 | entrylo1) & ENTRYLO_G) && 120 - (entryhi & 0xff) != asid) 118 + (entryhi & asidmask) != asid) 121 119 continue; 122 120 123 121 /* ··· 128 126 c0 = (entrylo0 & ENTRYLO_C) >> ENTRYLO_C_SHIFT; 129 127 c1 = (entrylo1 & ENTRYLO_C) >> ENTRYLO_C_SHIFT; 130 128 131 - printk("va=%0*lx asid=%02lx\n", 129 + printk("va=%0*lx asid=%0*lx\n", 132 130 vwidth, (entryhi & ~0x1fffUL), 133 - entryhi & 0xff); 131 + asidwidth, entryhi & asidmask); 134 132 /* RI/XI are in awkward places, so mask them off separately */ 135 133 pa = entrylo0 & ~(MIPS_ENTRYLO_RI | MIPS_ENTRYLO_XI); 136 134 if (xpa)
+5 -4
arch/mips/lib/r3k_dump_tlb.c
··· 29 29 { 30 30 int i; 31 31 unsigned int asid; 32 - unsigned long entryhi, entrylo0; 32 + unsigned long entryhi, entrylo0, asid_mask; 33 33 34 - asid = read_c0_entryhi() & ASID_MASK; 34 + asid_mask = cpu_asid_mask(&current_cpu_data); 35 + asid = read_c0_entryhi() & asid_mask; 35 36 36 37 for (i = first; i <= last; i++) { 37 38 write_c0_index(i<<8); ··· 47 46 /* Unused entries have a virtual address of KSEG0. */ 48 47 if ((entryhi & PAGE_MASK) != KSEG0 && 49 48 (entrylo0 & R3K_ENTRYLO_G || 50 - (entryhi & ASID_MASK) == asid)) { 49 + (entryhi & asid_mask) == asid)) { 51 50 /* 52 51 * Only print entries in use 53 52 */ ··· 56 55 printk("va=%08lx asid=%08lx" 57 56 " [pa=%06lx n=%d d=%d v=%d g=%d]", 58 57 entryhi & PAGE_MASK, 59 - entryhi & ASID_MASK, 58 + entryhi & asid_mask, 60 59 entrylo0 & PAGE_MASK, 61 60 (entrylo0 & R3K_ENTRYLO_N) ? 1 : 0, 62 61 (entrylo0 & R3K_ENTRYLO_D) ? 1 : 0,
+14 -10
arch/mips/mm/tlb-r3k.c
··· 43 43 { 44 44 unsigned long old_ctx; 45 45 46 - old_ctx = read_c0_entryhi() & ASID_MASK; 46 + old_ctx = read_c0_entryhi() & cpu_asid_mask(&current_cpu_data); 47 47 write_c0_entrylo0(0); 48 48 while (entry < current_cpu_data.tlbsize) { 49 49 write_c0_index(entry << 8); ··· 81 81 void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, 82 82 unsigned long end) 83 83 { 84 + unsigned long asid_mask = cpu_asid_mask(&current_cpu_data); 84 85 struct mm_struct *mm = vma->vm_mm; 85 86 int cpu = smp_processor_id(); 86 87 ··· 90 89 91 90 #ifdef DEBUG_TLB 92 91 printk("[tlbrange<%lu,0x%08lx,0x%08lx>]", 93 - cpu_context(cpu, mm) & ASID_MASK, start, end); 92 + cpu_context(cpu, mm) & asid_mask, start, end); 94 93 #endif 95 94 local_irq_save(flags); 96 95 size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; 97 96 if (size <= current_cpu_data.tlbsize) { 98 - int oldpid = read_c0_entryhi() & ASID_MASK; 99 - int newpid = cpu_context(cpu, mm) & ASID_MASK; 97 + int oldpid = read_c0_entryhi() & asid_mask; 98 + int newpid = cpu_context(cpu, mm) & asid_mask; 100 99 101 100 start &= PAGE_MASK; 102 101 end += PAGE_SIZE - 1; ··· 160 159 161 160 void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) 162 161 { 162 + unsigned long asid_mask = cpu_asid_mask(&current_cpu_data); 163 163 int cpu = smp_processor_id(); 164 164 165 165 if (cpu_context(cpu, vma->vm_mm) != 0) { ··· 170 168 #ifdef DEBUG_TLB 171 169 printk("[tlbpage<%lu,0x%08lx>]", cpu_context(cpu, vma->vm_mm), page); 172 170 #endif 173 - newpid = cpu_context(cpu, vma->vm_mm) & ASID_MASK; 171 + newpid = cpu_context(cpu, vma->vm_mm) & asid_mask; 174 172 page &= PAGE_MASK; 175 173 local_irq_save(flags); 176 - oldpid = read_c0_entryhi() & ASID_MASK; 174 + oldpid = read_c0_entryhi() & asid_mask; 177 175 write_c0_entryhi(page | newpid); 178 176 BARRIER; 179 177 tlb_probe(); ··· 192 190 193 191 void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte) 194 192 { 193 + unsigned long asid_mask = cpu_asid_mask(&current_cpu_data); 195 194 unsigned long flags; 196 195 int idx, pid; 197 196 ··· 202 199 if (current->active_mm != vma->vm_mm) 203 200 return; 204 201 205 - pid = read_c0_entryhi() & ASID_MASK; 202 + pid = read_c0_entryhi() & asid_mask; 206 203 207 204 #ifdef DEBUG_TLB 208 - if ((pid != (cpu_context(cpu, vma->vm_mm) & ASID_MASK)) || (cpu_context(cpu, vma->vm_mm) == 0)) { 205 + if ((pid != (cpu_context(cpu, vma->vm_mm) & asid_mask)) || (cpu_context(cpu, vma->vm_mm) == 0)) { 209 206 printk("update_mmu_cache: Wheee, bogus tlbpid mmpid=%lu tlbpid=%d\n", 210 207 (cpu_context(cpu, vma->vm_mm)), pid); 211 208 } ··· 231 228 void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1, 232 229 unsigned long entryhi, unsigned long pagemask) 233 230 { 231 + unsigned long asid_mask = cpu_asid_mask(&current_cpu_data); 234 232 unsigned long flags; 235 233 unsigned long old_ctx; 236 234 static unsigned long wired = 0; ··· 247 243 248 244 local_irq_save(flags); 249 245 /* Save old context and create impossible VPN2 value */ 250 - old_ctx = read_c0_entryhi() & ASID_MASK; 246 + old_ctx = read_c0_entryhi() & asid_mask; 251 247 old_pagemask = read_c0_pagemask(); 252 248 w = read_c0_wired(); 253 249 write_c0_wired(w + 1); ··· 270 266 #endif 271 267 272 268 local_irq_save(flags); 273 - old_ctx = read_c0_entryhi() & ASID_MASK; 269 + old_ctx = read_c0_entryhi() & asid_mask; 274 270 write_c0_entrylo0(entrylo0); 275 271 write_c0_entryhi(entryhi); 276 272 write_c0_index(wired);
+1 -1
arch/mips/mm/tlb-r4k.c
··· 304 304 local_irq_save(flags); 305 305 306 306 htw_stop(); 307 - pid = read_c0_entryhi() & ASID_MASK; 307 + pid = read_c0_entryhi() & cpu_asid_mask(&current_cpu_data); 308 308 address &= (PAGE_MASK << 1); 309 309 write_c0_entryhi(address | pid); 310 310 pgdp = pgd_offset(vma->vm_mm, address);
+1 -1
arch/mips/mm/tlb-r8k.c
··· 194 194 if (current->active_mm != vma->vm_mm) 195 195 return; 196 196 197 - pid = read_c0_entryhi() & ASID_MASK; 197 + pid = read_c0_entryhi() & cpu_asid_mask(&current_cpu_data); 198 198 199 199 local_irq_save(flags); 200 200 address &= PAGE_MASK;