Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

MIPS: KVM: remove shadow_tlb code

The kvm_mips_init_shadow_tlb() function is called from
kvm_arch_vcpu_init() and initialises entries 0 to
current_cpu_data.tlbsize-1 of the virtual cpu's shadow_tlb[64] array.

However newer cores with FTLBs can have a tlbsize > 64, for example the
ProAptiv I'm testing on has a total tlbsize of 576. This causes
kvm_mips_init_shadow_tlb() to overflow the shadow_tlb[64] array and
overwrite the comparecount_timer among other things, causing a lock up
when starting a KVM guest.

Aside from kvm_mips_init_shadow_tlb() which only initialises it, the
shadow_tlb[64] array is only actually used by the following functions:
- kvm_shadow_tlb_put() & kvm_shadow_tlb_load()
These are never called. The only call sites are #if 0'd out.
- kvm_mips_dump_shadow_tlbs()
This is never called.

It was originally added for trap & emulate, but turned out to be
unnecessary so it was disabled.

So instead of fixing the shadow_tlb initialisation code, lets just
remove the shadow_tlb[64] array and the above functions entirely. The
only functional change here is the removal of broken shadow_tlb
initialisation. The rest just deletes dead code.

Signed-off-by: James Hogan <james.hogan@imgtec.com>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: linux-mips@linux-mips.org
Cc: Gleb Natapov <gleb@redhat.com>
Cc: kvm@vger.kernel.org
Cc: Sanjay Lal <sanjayl@kymasys.com>
Acked-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: John Crispin <blogic@openwrt.org>
Patchwork: http://patchwork.linux-mips.org/patch/6384/

authored by

James Hogan and committed by
Ralf Baechle
08596b0a e36059e5

-138
-7
arch/mips/include/asm/kvm_host.h
··· 391 391 uint32_t guest_kernel_asid[NR_CPUS]; 392 392 struct mm_struct guest_kernel_mm, guest_user_mm; 393 393 394 - struct kvm_mips_tlb shadow_tlb[NR_CPUS][KVM_MIPS_GUEST_TLB_SIZE]; 395 - 396 - 397 394 struct hrtimer comparecount_timer; 398 395 399 396 int last_sched_cpu; ··· 526 529 527 530 extern void kvm_mips_dump_host_tlbs(void); 528 531 extern void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu); 529 - extern void kvm_mips_dump_shadow_tlbs(struct kvm_vcpu *vcpu); 530 532 extern void kvm_mips_flush_host_tlb(int skip_kseg0); 531 533 extern int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long entryhi); 532 534 extern int kvm_mips_host_tlb_inv_index(struct kvm_vcpu *vcpu, int index); ··· 537 541 unsigned long gva); 538 542 extern void kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu, 539 543 struct kvm_vcpu *vcpu); 540 - extern void kvm_shadow_tlb_put(struct kvm_vcpu *vcpu); 541 - extern void kvm_shadow_tlb_load(struct kvm_vcpu *vcpu); 542 544 extern void kvm_local_flush_tlb_all(void); 543 - extern void kvm_mips_init_shadow_tlb(struct kvm_vcpu *vcpu); 544 545 extern void kvm_mips_alloc_new_mmu_context(struct kvm_vcpu *vcpu); 545 546 extern void kvm_mips_vcpu_load(struct kvm_vcpu *vcpu, int cpu); 546 547 extern void kvm_mips_vcpu_put(struct kvm_vcpu *vcpu);
-1
arch/mips/kvm/kvm_mips.c
··· 1001 1001 hrtimer_init(&vcpu->arch.comparecount_timer, CLOCK_MONOTONIC, 1002 1002 HRTIMER_MODE_REL); 1003 1003 vcpu->arch.comparecount_timer.function = kvm_mips_comparecount_wakeup; 1004 - kvm_mips_init_shadow_tlb(vcpu); 1005 1004 return 0; 1006 1005 } 1007 1006
-130
arch/mips/kvm/kvm_tlb.c
··· 145 145 } 146 146 } 147 147 148 - void kvm_mips_dump_shadow_tlbs(struct kvm_vcpu *vcpu) 149 - { 150 - int i; 151 - volatile struct kvm_mips_tlb tlb; 152 - 153 - printk("Shadow TLBs:\n"); 154 - for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) { 155 - tlb = vcpu->arch.shadow_tlb[smp_processor_id()][i]; 156 - printk("TLB%c%3d Hi 0x%08lx ", 157 - (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*', 158 - i, tlb.tlb_hi); 159 - printk("Lo0=0x%09" PRIx64 " %c%c attr %lx ", 160 - (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo0), 161 - (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ', 162 - (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ', 163 - (tlb.tlb_lo0 >> 3) & 7); 164 - printk("Lo1=0x%09" PRIx64 " %c%c attr %lx sz=%lx\n", 165 - (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo1), 166 - (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ', 167 - (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ', 168 - (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask); 169 - } 170 - } 171 - 172 148 static int kvm_mips_map_page(struct kvm *kvm, gfn_t gfn) 173 149 { 174 150 int srcu_idx, err = 0; ··· 631 655 cpu_context(cpu, mm) = asid_cache(cpu) = asid; 632 656 } 633 657 634 - void kvm_shadow_tlb_put(struct kvm_vcpu *vcpu) 635 - { 636 - unsigned long flags; 637 - unsigned long old_entryhi; 638 - unsigned long old_pagemask; 639 - int entry = 0; 640 - int cpu = smp_processor_id(); 641 - 642 - local_irq_save(flags); 643 - 644 - old_entryhi = read_c0_entryhi(); 645 - old_pagemask = read_c0_pagemask(); 646 - 647 - for (entry = 0; entry < current_cpu_data.tlbsize; entry++) { 648 - write_c0_index(entry); 649 - mtc0_tlbw_hazard(); 650 - tlb_read(); 651 - tlbw_use_hazard(); 652 - 653 - vcpu->arch.shadow_tlb[cpu][entry].tlb_hi = read_c0_entryhi(); 654 - vcpu->arch.shadow_tlb[cpu][entry].tlb_lo0 = read_c0_entrylo0(); 655 - vcpu->arch.shadow_tlb[cpu][entry].tlb_lo1 = read_c0_entrylo1(); 656 - vcpu->arch.shadow_tlb[cpu][entry].tlb_mask = read_c0_pagemask(); 657 - } 658 - 659 - write_c0_entryhi(old_entryhi); 660 - write_c0_pagemask(old_pagemask); 661 - mtc0_tlbw_hazard(); 662 - 663 - local_irq_restore(flags); 664 - 665 - } 666 - 667 - void kvm_shadow_tlb_load(struct kvm_vcpu *vcpu) 668 - { 669 - unsigned long flags; 670 - unsigned long old_ctx; 671 - int entry; 672 - int cpu = smp_processor_id(); 673 - 674 - local_irq_save(flags); 675 - 676 - old_ctx = read_c0_entryhi(); 677 - 678 - for (entry = 0; entry < current_cpu_data.tlbsize; entry++) { 679 - write_c0_entryhi(vcpu->arch.shadow_tlb[cpu][entry].tlb_hi); 680 - mtc0_tlbw_hazard(); 681 - write_c0_entrylo0(vcpu->arch.shadow_tlb[cpu][entry].tlb_lo0); 682 - write_c0_entrylo1(vcpu->arch.shadow_tlb[cpu][entry].tlb_lo1); 683 - 684 - write_c0_index(entry); 685 - mtc0_tlbw_hazard(); 686 - 687 - tlb_write_indexed(); 688 - tlbw_use_hazard(); 689 - } 690 - 691 - tlbw_use_hazard(); 692 - write_c0_entryhi(old_ctx); 693 - mtc0_tlbw_hazard(); 694 - local_irq_restore(flags); 695 - } 696 - 697 - 698 658 void kvm_local_flush_tlb_all(void) 699 659 { 700 660 unsigned long flags; ··· 657 745 mtc0_tlbw_hazard(); 658 746 659 747 local_irq_restore(flags); 660 - } 661 - 662 - void kvm_mips_init_shadow_tlb(struct kvm_vcpu *vcpu) 663 - { 664 - int cpu, entry; 665 - 666 - for_each_possible_cpu(cpu) { 667 - for (entry = 0; entry < current_cpu_data.tlbsize; entry++) { 668 - vcpu->arch.shadow_tlb[cpu][entry].tlb_hi = 669 - UNIQUE_ENTRYHI(entry); 670 - vcpu->arch.shadow_tlb[cpu][entry].tlb_lo0 = 0x0; 671 - vcpu->arch.shadow_tlb[cpu][entry].tlb_lo1 = 0x0; 672 - vcpu->arch.shadow_tlb[cpu][entry].tlb_mask = 673 - read_c0_pagemask(); 674 - #ifdef DEBUG 675 - kvm_debug 676 - ("shadow_tlb[%d][%d]: tlb_hi: %#lx, lo0: %#lx, lo1: %#lx\n", 677 - cpu, entry, 678 - vcpu->arch.shadow_tlb[cpu][entry].tlb_hi, 679 - vcpu->arch.shadow_tlb[cpu][entry].tlb_lo0, 680 - vcpu->arch.shadow_tlb[cpu][entry].tlb_lo1); 681 - #endif 682 - } 683 - } 684 748 } 685 749 686 750 /* Restore ASID once we are scheduled back after preemption */ ··· 695 807 kvm_info("[%d->%d]KVM VCPU[%d] switch\n", 696 808 vcpu->arch.last_sched_cpu, cpu, vcpu->vcpu_id); 697 809 } 698 - 699 - /* Only reload shadow host TLB if new ASIDs haven't been allocated */ 700 - #if 0 701 - if ((atomic_read(&kvm_mips_instance) > 1) && !newasid) { 702 - kvm_mips_flush_host_tlb(0); 703 - kvm_shadow_tlb_load(vcpu); 704 - } 705 - #endif 706 810 707 811 if (!newasid) { 708 812 /* If we preempted while the guest was executing, then reload the pre-empted ASID */ ··· 740 860 741 861 vcpu->arch.preempt_entryhi = read_c0_entryhi(); 742 862 vcpu->arch.last_sched_cpu = cpu; 743 - 744 - #if 0 745 - if ((atomic_read(&kvm_mips_instance) > 1)) { 746 - kvm_shadow_tlb_put(vcpu); 747 - } 748 - #endif 749 863 750 864 if (((cpu_context(cpu, current->mm) ^ asid_cache(cpu)) & 751 865 ASID_VERSION_MASK)) { ··· 802 928 } 803 929 804 930 EXPORT_SYMBOL(kvm_local_flush_tlb_all); 805 - EXPORT_SYMBOL(kvm_shadow_tlb_put); 806 931 EXPORT_SYMBOL(kvm_mips_handle_mapped_seg_tlb_fault); 807 932 EXPORT_SYMBOL(kvm_mips_handle_commpage_tlb_fault); 808 - EXPORT_SYMBOL(kvm_mips_init_shadow_tlb); 809 933 EXPORT_SYMBOL(kvm_mips_dump_host_tlbs); 810 934 EXPORT_SYMBOL(kvm_mips_handle_kseg0_tlb_fault); 811 935 EXPORT_SYMBOL(kvm_mips_host_tlb_lookup); ··· 811 939 EXPORT_SYMBOL(kvm_mips_guest_tlb_lookup); 812 940 EXPORT_SYMBOL(kvm_mips_host_tlb_inv); 813 941 EXPORT_SYMBOL(kvm_mips_translate_guest_kseg0_to_hpa); 814 - EXPORT_SYMBOL(kvm_shadow_tlb_load); 815 - EXPORT_SYMBOL(kvm_mips_dump_shadow_tlbs); 816 942 EXPORT_SYMBOL(kvm_mips_dump_guest_tlbs); 817 943 EXPORT_SYMBOL(kvm_get_inst); 818 944 EXPORT_SYMBOL(kvm_arch_vcpu_load);