Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

KVM: PPC: Remove 440 support

The 440 target hasn't been properly functioning for a few releases and
before I was the only one who fixes a very serious bug that indicates to
me that nobody used it before either.

Furthermore KVM on 440 is slow to the extent of unusable.

We don't have to carry along completely unused code. Remove 440 and give
us one less thing to worry about.

Signed-off-by: Alexander Graf <agraf@suse.de>

+2 -1204
-2
Documentation/powerpc/00-INDEX
··· 17 17 - Documentation on the firmware assisted dump mechanism "fadump". 18 18 hvcs.txt 19 19 - IBM "Hypervisor Virtual Console Server" Installation Guide 20 - kvm_440.txt 21 - - Various notes on the implementation of KVM for PowerPC 440. 22 20 mpc52xx.txt 23 21 - Linux 2.6.x on MPC52xx family 24 22 pmu-ebb.txt
-41
Documentation/powerpc/kvm_440.txt
··· 1 - Hollis Blanchard <hollisb@us.ibm.com> 2 - 15 Apr 2008 3 - 4 - Various notes on the implementation of KVM for PowerPC 440: 5 - 6 - To enforce isolation, host userspace, guest kernel, and guest userspace all 7 - run at user privilege level. Only the host kernel runs in supervisor mode. 8 - Executing privileged instructions in the guest traps into KVM (in the host 9 - kernel), where we decode and emulate them. Through this technique, unmodified 10 - 440 Linux kernels can be run (slowly) as guests. Future performance work will 11 - focus on reducing the overhead and frequency of these traps. 12 - 13 - The usual code flow is started from userspace invoking an "run" ioctl, which 14 - causes KVM to switch into guest context. We use IVPR to hijack the host 15 - interrupt vectors while running the guest, which allows us to direct all 16 - interrupts to kvmppc_handle_interrupt(). At this point, we could either 17 - - handle the interrupt completely (e.g. emulate "mtspr SPRG0"), or 18 - - let the host interrupt handler run (e.g. when the decrementer fires), or 19 - - return to host userspace (e.g. when the guest performs device MMIO) 20 - 21 - Address spaces: We take advantage of the fact that Linux doesn't use the AS=1 22 - address space (in host or guest), which gives us virtual address space to use 23 - for guest mappings. While the guest is running, the host kernel remains mapped 24 - in AS=0, but the guest can only use AS=1 mappings. 25 - 26 - TLB entries: The TLB entries covering the host linear mapping remain 27 - present while running the guest. This reduces the overhead of lightweight 28 - exits, which are handled by KVM running in the host kernel. We keep three 29 - copies of the TLB: 30 - - guest TLB: contents of the TLB as the guest sees it 31 - - shadow TLB: the TLB that is actually in hardware while guest is running 32 - - host TLB: to restore TLB state when context switching guest -> host 33 - When a TLB miss occurs because a mapping was not present in the shadow TLB, 34 - but was present in the guest TLB, KVM handles the fault without invoking the 35 - guest. Large guest pages are backed by multiple 4KB shadow pages through this 36 - mechanism. 37 - 38 - IO: MMIO and DCR accesses are emulated by userspace. We use virtio for network 39 - and block IO, so those drivers must be enabled in the guest. It's possible 40 - that some qemu device emulation (e.g. e1000 or rtl8139) may also work with 41 - little effort.
+1 -3
arch/powerpc/Kconfig.debug
··· 202 202 203 203 config PPC_EARLY_DEBUG_44x 204 204 bool "Early serial debugging for IBM/AMCC 44x CPUs" 205 - # PPC_EARLY_DEBUG on 440 leaves AS=1 mappings above the TLB high water 206 - # mark, which doesn't work with current 440 KVM. 207 - depends on 44x && !KVM 205 + depends on 44x 208 206 help 209 207 Select this to enable early debugging for IBM 44x chips via the 210 208 inbuilt serial port. If you enable this, ensure you set
-1
arch/powerpc/configs/ppc44x_defconfig
··· 127 127 # CONFIG_CRYPTO_ANSI_CPRNG is not set 128 128 # CONFIG_CRYPTO_HW is not set 129 129 CONFIG_VIRTUALIZATION=y 130 - CONFIG_KVM_440=y
-67
arch/powerpc/include/asm/kvm_44x.h
··· 1 - /* 2 - * This program is free software; you can redistribute it and/or modify 3 - * it under the terms of the GNU General Public License, version 2, as 4 - * published by the Free Software Foundation. 5 - * 6 - * This program is distributed in the hope that it will be useful, 7 - * but WITHOUT ANY WARRANTY; without even the implied warranty of 8 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 9 - * GNU General Public License for more details. 10 - * 11 - * You should have received a copy of the GNU General Public License 12 - * along with this program; if not, write to the Free Software 13 - * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. 14 - * 15 - * Copyright IBM Corp. 2008 16 - * 17 - * Authors: Hollis Blanchard <hollisb@us.ibm.com> 18 - */ 19 - 20 - #ifndef __ASM_44X_H__ 21 - #define __ASM_44X_H__ 22 - 23 - #include <linux/kvm_host.h> 24 - 25 - #define PPC44x_TLB_SIZE 64 26 - 27 - /* If the guest is expecting it, this can be as large as we like; we'd just 28 - * need to find some way of advertising it. */ 29 - #define KVM44x_GUEST_TLB_SIZE 64 30 - 31 - struct kvmppc_44x_tlbe { 32 - u32 tid; /* Only the low 8 bits are used. */ 33 - u32 word0; 34 - u32 word1; 35 - u32 word2; 36 - }; 37 - 38 - struct kvmppc_44x_shadow_ref { 39 - struct page *page; 40 - u16 gtlb_index; 41 - u8 writeable; 42 - u8 tid; 43 - }; 44 - 45 - struct kvmppc_vcpu_44x { 46 - /* Unmodified copy of the guest's TLB. */ 47 - struct kvmppc_44x_tlbe guest_tlb[KVM44x_GUEST_TLB_SIZE]; 48 - 49 - /* References to guest pages in the hardware TLB. */ 50 - struct kvmppc_44x_shadow_ref shadow_refs[PPC44x_TLB_SIZE]; 51 - 52 - /* State of the shadow TLB at guest context switch time. */ 53 - struct kvmppc_44x_tlbe shadow_tlb[PPC44x_TLB_SIZE]; 54 - u8 shadow_tlb_mod[PPC44x_TLB_SIZE]; 55 - 56 - struct kvm_vcpu vcpu; 57 - }; 58 - 59 - static inline struct kvmppc_vcpu_44x *to_44x(struct kvm_vcpu *vcpu) 60 - { 61 - return container_of(vcpu, struct kvmppc_vcpu_44x, vcpu); 62 - } 63 - 64 - void kvmppc_44x_tlb_put(struct kvm_vcpu *vcpu); 65 - void kvmppc_44x_tlb_load(struct kvm_vcpu *vcpu); 66 - 67 - #endif /* __ASM_44X_H__ */
-1
arch/powerpc/include/asm/kvm_asm.h
··· 33 33 /* IVPR must be 64KiB-aligned. */ 34 34 #define VCPU_SIZE_ORDER 4 35 35 #define VCPU_SIZE_LOG (VCPU_SIZE_ORDER + 12) 36 - #define VCPU_TLB_PGSZ PPC44x_TLB_64K 37 36 #define VCPU_SIZE_BYTES (1<<VCPU_SIZE_LOG) 38 37 39 38 #define BOOKE_INTERRUPT_CRITICAL 0
-3
arch/powerpc/include/asm/kvm_host.h
··· 49 49 #define KVM_NR_IRQCHIPS 1 50 50 #define KVM_IRQCHIP_NUM_PINS 256 51 51 52 - #if !defined(CONFIG_KVM_440) 53 52 #include <linux/mmu_notifier.h> 54 53 55 54 #define KVM_ARCH_WANT_MMU_NOTIFIER ··· 60 61 extern int kvm_age_hva(struct kvm *kvm, unsigned long hva); 61 62 extern int kvm_test_age_hva(struct kvm *kvm, unsigned long hva); 62 63 extern void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); 63 - 64 - #endif 65 64 66 65 #define HPTEG_CACHE_NUM (1 << 15) 67 66 #define HPTEG_HASH_BITS_PTE 13
-237
arch/powerpc/kvm/44x.c
··· 1 - /* 2 - * This program is free software; you can redistribute it and/or modify 3 - * it under the terms of the GNU General Public License, version 2, as 4 - * published by the Free Software Foundation. 5 - * 6 - * This program is distributed in the hope that it will be useful, 7 - * but WITHOUT ANY WARRANTY; without even the implied warranty of 8 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 9 - * GNU General Public License for more details. 10 - * 11 - * You should have received a copy of the GNU General Public License 12 - * along with this program; if not, write to the Free Software 13 - * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. 14 - * 15 - * Copyright IBM Corp. 2008 16 - * 17 - * Authors: Hollis Blanchard <hollisb@us.ibm.com> 18 - */ 19 - 20 - #include <linux/kvm_host.h> 21 - #include <linux/slab.h> 22 - #include <linux/err.h> 23 - #include <linux/export.h> 24 - #include <linux/module.h> 25 - #include <linux/miscdevice.h> 26 - 27 - #include <asm/reg.h> 28 - #include <asm/cputable.h> 29 - #include <asm/tlbflush.h> 30 - #include <asm/kvm_44x.h> 31 - #include <asm/kvm_ppc.h> 32 - 33 - #include "44x_tlb.h" 34 - #include "booke.h" 35 - 36 - static void kvmppc_core_vcpu_load_44x(struct kvm_vcpu *vcpu, int cpu) 37 - { 38 - kvmppc_booke_vcpu_load(vcpu, cpu); 39 - kvmppc_44x_tlb_load(vcpu); 40 - } 41 - 42 - static void kvmppc_core_vcpu_put_44x(struct kvm_vcpu *vcpu) 43 - { 44 - kvmppc_44x_tlb_put(vcpu); 45 - kvmppc_booke_vcpu_put(vcpu); 46 - } 47 - 48 - int kvmppc_core_check_processor_compat(void) 49 - { 50 - int r; 51 - 52 - if (strncmp(cur_cpu_spec->platform, "ppc440", 6) == 0) 53 - r = 0; 54 - else 55 - r = -ENOTSUPP; 56 - 57 - return r; 58 - } 59 - 60 - int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu) 61 - { 62 - struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); 63 - struct kvmppc_44x_tlbe *tlbe = &vcpu_44x->guest_tlb[0]; 64 - int i; 65 - 66 - tlbe->tid = 0; 67 - tlbe->word0 = PPC44x_TLB_16M | PPC44x_TLB_VALID; 68 - tlbe->word1 = 0; 69 - tlbe->word2 = PPC44x_TLB_SX | PPC44x_TLB_SW | PPC44x_TLB_SR; 70 - 71 - tlbe++; 72 - tlbe->tid = 0; 73 - tlbe->word0 = 0xef600000 | PPC44x_TLB_4K | PPC44x_TLB_VALID; 74 - tlbe->word1 = 0xef600000; 75 - tlbe->word2 = PPC44x_TLB_SX | PPC44x_TLB_SW | PPC44x_TLB_SR 76 - | PPC44x_TLB_I | PPC44x_TLB_G; 77 - 78 - /* Since the guest can directly access the timebase, it must know the 79 - * real timebase frequency. Accordingly, it must see the state of 80 - * CCR1[TCS]. */ 81 - /* XXX CCR1 doesn't exist on all 440 SoCs. */ 82 - vcpu->arch.ccr1 = mfspr(SPRN_CCR1); 83 - 84 - for (i = 0; i < ARRAY_SIZE(vcpu_44x->shadow_refs); i++) 85 - vcpu_44x->shadow_refs[i].gtlb_index = -1; 86 - 87 - vcpu->arch.cpu_type = KVM_CPU_440; 88 - vcpu->arch.pvr = mfspr(SPRN_PVR); 89 - 90 - return 0; 91 - } 92 - 93 - /* 'linear_address' is actually an encoding of AS|PID|EADDR . */ 94 - int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu, 95 - struct kvm_translation *tr) 96 - { 97 - int index; 98 - gva_t eaddr; 99 - u8 pid; 100 - u8 as; 101 - 102 - eaddr = tr->linear_address; 103 - pid = (tr->linear_address >> 32) & 0xff; 104 - as = (tr->linear_address >> 40) & 0x1; 105 - 106 - index = kvmppc_44x_tlb_index(vcpu, eaddr, pid, as); 107 - if (index == -1) { 108 - tr->valid = 0; 109 - return 0; 110 - } 111 - 112 - tr->physical_address = kvmppc_mmu_xlate(vcpu, index, eaddr); 113 - /* XXX what does "writeable" and "usermode" even mean? */ 114 - tr->valid = 1; 115 - 116 - return 0; 117 - } 118 - 119 - static int kvmppc_core_get_sregs_44x(struct kvm_vcpu *vcpu, 120 - struct kvm_sregs *sregs) 121 - { 122 - return kvmppc_get_sregs_ivor(vcpu, sregs); 123 - } 124 - 125 - static int kvmppc_core_set_sregs_44x(struct kvm_vcpu *vcpu, 126 - struct kvm_sregs *sregs) 127 - { 128 - return kvmppc_set_sregs_ivor(vcpu, sregs); 129 - } 130 - 131 - static int kvmppc_get_one_reg_44x(struct kvm_vcpu *vcpu, u64 id, 132 - union kvmppc_one_reg *val) 133 - { 134 - return -EINVAL; 135 - } 136 - 137 - static int kvmppc_set_one_reg_44x(struct kvm_vcpu *vcpu, u64 id, 138 - union kvmppc_one_reg *val) 139 - { 140 - return -EINVAL; 141 - } 142 - 143 - static struct kvm_vcpu *kvmppc_core_vcpu_create_44x(struct kvm *kvm, 144 - unsigned int id) 145 - { 146 - struct kvmppc_vcpu_44x *vcpu_44x; 147 - struct kvm_vcpu *vcpu; 148 - int err; 149 - 150 - vcpu_44x = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL); 151 - if (!vcpu_44x) { 152 - err = -ENOMEM; 153 - goto out; 154 - } 155 - 156 - vcpu = &vcpu_44x->vcpu; 157 - err = kvm_vcpu_init(vcpu, kvm, id); 158 - if (err) 159 - goto free_vcpu; 160 - 161 - vcpu->arch.shared = (void*)__get_free_page(GFP_KERNEL|__GFP_ZERO); 162 - if (!vcpu->arch.shared) 163 - goto uninit_vcpu; 164 - 165 - return vcpu; 166 - 167 - uninit_vcpu: 168 - kvm_vcpu_uninit(vcpu); 169 - free_vcpu: 170 - kmem_cache_free(kvm_vcpu_cache, vcpu_44x); 171 - out: 172 - return ERR_PTR(err); 173 - } 174 - 175 - static void kvmppc_core_vcpu_free_44x(struct kvm_vcpu *vcpu) 176 - { 177 - struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); 178 - 179 - free_page((unsigned long)vcpu->arch.shared); 180 - kvm_vcpu_uninit(vcpu); 181 - kmem_cache_free(kvm_vcpu_cache, vcpu_44x); 182 - } 183 - 184 - static int kvmppc_core_init_vm_44x(struct kvm *kvm) 185 - { 186 - return 0; 187 - } 188 - 189 - static void kvmppc_core_destroy_vm_44x(struct kvm *kvm) 190 - { 191 - } 192 - 193 - static struct kvmppc_ops kvm_ops_44x = { 194 - .get_sregs = kvmppc_core_get_sregs_44x, 195 - .set_sregs = kvmppc_core_set_sregs_44x, 196 - .get_one_reg = kvmppc_get_one_reg_44x, 197 - .set_one_reg = kvmppc_set_one_reg_44x, 198 - .vcpu_load = kvmppc_core_vcpu_load_44x, 199 - .vcpu_put = kvmppc_core_vcpu_put_44x, 200 - .vcpu_create = kvmppc_core_vcpu_create_44x, 201 - .vcpu_free = kvmppc_core_vcpu_free_44x, 202 - .mmu_destroy = kvmppc_mmu_destroy_44x, 203 - .init_vm = kvmppc_core_init_vm_44x, 204 - .destroy_vm = kvmppc_core_destroy_vm_44x, 205 - .emulate_op = kvmppc_core_emulate_op_44x, 206 - .emulate_mtspr = kvmppc_core_emulate_mtspr_44x, 207 - .emulate_mfspr = kvmppc_core_emulate_mfspr_44x, 208 - }; 209 - 210 - static int __init kvmppc_44x_init(void) 211 - { 212 - int r; 213 - 214 - r = kvmppc_booke_init(); 215 - if (r) 216 - goto err_out; 217 - 218 - r = kvm_init(NULL, sizeof(struct kvmppc_vcpu_44x), 0, THIS_MODULE); 219 - if (r) 220 - goto err_out; 221 - kvm_ops_44x.owner = THIS_MODULE; 222 - kvmppc_pr_ops = &kvm_ops_44x; 223 - 224 - err_out: 225 - return r; 226 - } 227 - 228 - static void __exit kvmppc_44x_exit(void) 229 - { 230 - kvmppc_pr_ops = NULL; 231 - kvmppc_booke_exit(); 232 - } 233 - 234 - module_init(kvmppc_44x_init); 235 - module_exit(kvmppc_44x_exit); 236 - MODULE_ALIAS_MISCDEV(KVM_MINOR); 237 - MODULE_ALIAS("devname:kvm");
-194
arch/powerpc/kvm/44x_emulate.c
··· 1 - /* 2 - * This program is free software; you can redistribute it and/or modify 3 - * it under the terms of the GNU General Public License, version 2, as 4 - * published by the Free Software Foundation. 5 - * 6 - * This program is distributed in the hope that it will be useful, 7 - * but WITHOUT ANY WARRANTY; without even the implied warranty of 8 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 9 - * GNU General Public License for more details. 10 - * 11 - * You should have received a copy of the GNU General Public License 12 - * along with this program; if not, write to the Free Software 13 - * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. 14 - * 15 - * Copyright IBM Corp. 2008 16 - * 17 - * Authors: Hollis Blanchard <hollisb@us.ibm.com> 18 - */ 19 - 20 - #include <asm/kvm_ppc.h> 21 - #include <asm/dcr.h> 22 - #include <asm/dcr-regs.h> 23 - #include <asm/disassemble.h> 24 - #include <asm/kvm_44x.h> 25 - #include "timing.h" 26 - 27 - #include "booke.h" 28 - #include "44x_tlb.h" 29 - 30 - #define XOP_MFDCRX 259 31 - #define XOP_MFDCR 323 32 - #define XOP_MTDCRX 387 33 - #define XOP_MTDCR 451 34 - #define XOP_TLBSX 914 35 - #define XOP_ICCCI 966 36 - #define XOP_TLBWE 978 37 - 38 - static int emulate_mtdcr(struct kvm_vcpu *vcpu, int rs, int dcrn) 39 - { 40 - /* emulate some access in kernel */ 41 - switch (dcrn) { 42 - case DCRN_CPR0_CONFIG_ADDR: 43 - vcpu->arch.cpr0_cfgaddr = kvmppc_get_gpr(vcpu, rs); 44 - return EMULATE_DONE; 45 - default: 46 - vcpu->run->dcr.dcrn = dcrn; 47 - vcpu->run->dcr.data = kvmppc_get_gpr(vcpu, rs); 48 - vcpu->run->dcr.is_write = 1; 49 - vcpu->arch.dcr_is_write = 1; 50 - vcpu->arch.dcr_needed = 1; 51 - kvmppc_account_exit(vcpu, DCR_EXITS); 52 - return EMULATE_DO_DCR; 53 - } 54 - } 55 - 56 - static int emulate_mfdcr(struct kvm_vcpu *vcpu, int rt, int dcrn) 57 - { 58 - /* The guest may access CPR0 registers to determine the timebase 59 - * frequency, and it must know the real host frequency because it 60 - * can directly access the timebase registers. 61 - * 62 - * It would be possible to emulate those accesses in userspace, 63 - * but userspace can really only figure out the end frequency. 64 - * We could decompose that into the factors that compute it, but 65 - * that's tricky math, and it's easier to just report the real 66 - * CPR0 values. 67 - */ 68 - switch (dcrn) { 69 - case DCRN_CPR0_CONFIG_ADDR: 70 - kvmppc_set_gpr(vcpu, rt, vcpu->arch.cpr0_cfgaddr); 71 - break; 72 - case DCRN_CPR0_CONFIG_DATA: 73 - local_irq_disable(); 74 - mtdcr(DCRN_CPR0_CONFIG_ADDR, 75 - vcpu->arch.cpr0_cfgaddr); 76 - kvmppc_set_gpr(vcpu, rt, 77 - mfdcr(DCRN_CPR0_CONFIG_DATA)); 78 - local_irq_enable(); 79 - break; 80 - default: 81 - vcpu->run->dcr.dcrn = dcrn; 82 - vcpu->run->dcr.data = 0; 83 - vcpu->run->dcr.is_write = 0; 84 - vcpu->arch.dcr_is_write = 0; 85 - vcpu->arch.io_gpr = rt; 86 - vcpu->arch.dcr_needed = 1; 87 - kvmppc_account_exit(vcpu, DCR_EXITS); 88 - return EMULATE_DO_DCR; 89 - } 90 - 91 - return EMULATE_DONE; 92 - } 93 - 94 - int kvmppc_core_emulate_op_44x(struct kvm_run *run, struct kvm_vcpu *vcpu, 95 - unsigned int inst, int *advance) 96 - { 97 - int emulated = EMULATE_DONE; 98 - int dcrn = get_dcrn(inst); 99 - int ra = get_ra(inst); 100 - int rb = get_rb(inst); 101 - int rc = get_rc(inst); 102 - int rs = get_rs(inst); 103 - int rt = get_rt(inst); 104 - int ws = get_ws(inst); 105 - 106 - switch (get_op(inst)) { 107 - case 31: 108 - switch (get_xop(inst)) { 109 - 110 - case XOP_MFDCR: 111 - emulated = emulate_mfdcr(vcpu, rt, dcrn); 112 - break; 113 - 114 - case XOP_MFDCRX: 115 - emulated = emulate_mfdcr(vcpu, rt, 116 - kvmppc_get_gpr(vcpu, ra)); 117 - break; 118 - 119 - case XOP_MTDCR: 120 - emulated = emulate_mtdcr(vcpu, rs, dcrn); 121 - break; 122 - 123 - case XOP_MTDCRX: 124 - emulated = emulate_mtdcr(vcpu, rs, 125 - kvmppc_get_gpr(vcpu, ra)); 126 - break; 127 - 128 - case XOP_TLBWE: 129 - emulated = kvmppc_44x_emul_tlbwe(vcpu, ra, rs, ws); 130 - break; 131 - 132 - case XOP_TLBSX: 133 - emulated = kvmppc_44x_emul_tlbsx(vcpu, rt, ra, rb, rc); 134 - break; 135 - 136 - case XOP_ICCCI: 137 - break; 138 - 139 - default: 140 - emulated = EMULATE_FAIL; 141 - } 142 - 143 - break; 144 - 145 - default: 146 - emulated = EMULATE_FAIL; 147 - } 148 - 149 - if (emulated == EMULATE_FAIL) 150 - emulated = kvmppc_booke_emulate_op(run, vcpu, inst, advance); 151 - 152 - return emulated; 153 - } 154 - 155 - int kvmppc_core_emulate_mtspr_44x(struct kvm_vcpu *vcpu, int sprn, ulong spr_val) 156 - { 157 - int emulated = EMULATE_DONE; 158 - 159 - switch (sprn) { 160 - case SPRN_PID: 161 - kvmppc_set_pid(vcpu, spr_val); break; 162 - case SPRN_MMUCR: 163 - vcpu->arch.mmucr = spr_val; break; 164 - case SPRN_CCR0: 165 - vcpu->arch.ccr0 = spr_val; break; 166 - case SPRN_CCR1: 167 - vcpu->arch.ccr1 = spr_val; break; 168 - default: 169 - emulated = kvmppc_booke_emulate_mtspr(vcpu, sprn, spr_val); 170 - } 171 - 172 - return emulated; 173 - } 174 - 175 - int kvmppc_core_emulate_mfspr_44x(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val) 176 - { 177 - int emulated = EMULATE_DONE; 178 - 179 - switch (sprn) { 180 - case SPRN_PID: 181 - *spr_val = vcpu->arch.pid; break; 182 - case SPRN_MMUCR: 183 - *spr_val = vcpu->arch.mmucr; break; 184 - case SPRN_CCR0: 185 - *spr_val = vcpu->arch.ccr0; break; 186 - case SPRN_CCR1: 187 - *spr_val = vcpu->arch.ccr1; break; 188 - default: 189 - emulated = kvmppc_booke_emulate_mfspr(vcpu, sprn, spr_val); 190 - } 191 - 192 - return emulated; 193 - } 194 -
-528
arch/powerpc/kvm/44x_tlb.c
··· 1 - /* 2 - * This program is free software; you can redistribute it and/or modify 3 - * it under the terms of the GNU General Public License, version 2, as 4 - * published by the Free Software Foundation. 5 - * 6 - * This program is distributed in the hope that it will be useful, 7 - * but WITHOUT ANY WARRANTY; without even the implied warranty of 8 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 9 - * GNU General Public License for more details. 10 - * 11 - * You should have received a copy of the GNU General Public License 12 - * along with this program; if not, write to the Free Software 13 - * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. 14 - * 15 - * Copyright IBM Corp. 2007 16 - * 17 - * Authors: Hollis Blanchard <hollisb@us.ibm.com> 18 - */ 19 - 20 - #include <linux/types.h> 21 - #include <linux/string.h> 22 - #include <linux/kvm.h> 23 - #include <linux/kvm_host.h> 24 - #include <linux/highmem.h> 25 - 26 - #include <asm/tlbflush.h> 27 - #include <asm/mmu-44x.h> 28 - #include <asm/kvm_ppc.h> 29 - #include <asm/kvm_44x.h> 30 - #include "timing.h" 31 - 32 - #include "44x_tlb.h" 33 - #include "trace.h" 34 - 35 - #ifndef PPC44x_TLBE_SIZE 36 - #define PPC44x_TLBE_SIZE PPC44x_TLB_4K 37 - #endif 38 - 39 - #define PAGE_SIZE_4K (1<<12) 40 - #define PAGE_MASK_4K (~(PAGE_SIZE_4K - 1)) 41 - 42 - #define PPC44x_TLB_UATTR_MASK \ 43 - (PPC44x_TLB_U0|PPC44x_TLB_U1|PPC44x_TLB_U2|PPC44x_TLB_U3) 44 - #define PPC44x_TLB_USER_PERM_MASK (PPC44x_TLB_UX|PPC44x_TLB_UR|PPC44x_TLB_UW) 45 - #define PPC44x_TLB_SUPER_PERM_MASK (PPC44x_TLB_SX|PPC44x_TLB_SR|PPC44x_TLB_SW) 46 - 47 - #ifdef DEBUG 48 - void kvmppc_dump_tlbs(struct kvm_vcpu *vcpu) 49 - { 50 - struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); 51 - struct kvmppc_44x_tlbe *tlbe; 52 - int i; 53 - 54 - printk("vcpu %d TLB dump:\n", vcpu->vcpu_id); 55 - printk("| %2s | %3s | %8s | %8s | %8s |\n", 56 - "nr", "tid", "word0", "word1", "word2"); 57 - 58 - for (i = 0; i < ARRAY_SIZE(vcpu_44x->guest_tlb); i++) { 59 - tlbe = &vcpu_44x->guest_tlb[i]; 60 - if (tlbe->word0 & PPC44x_TLB_VALID) 61 - printk(" G%2d | %02X | %08X | %08X | %08X |\n", 62 - i, tlbe->tid, tlbe->word0, tlbe->word1, 63 - tlbe->word2); 64 - } 65 - } 66 - #endif 67 - 68 - static inline void kvmppc_44x_tlbie(unsigned int index) 69 - { 70 - /* 0 <= index < 64, so the V bit is clear and we can use the index as 71 - * word0. */ 72 - asm volatile( 73 - "tlbwe %[index], %[index], 0\n" 74 - : 75 - : [index] "r"(index) 76 - ); 77 - } 78 - 79 - static inline void kvmppc_44x_tlbre(unsigned int index, 80 - struct kvmppc_44x_tlbe *tlbe) 81 - { 82 - asm volatile( 83 - "tlbre %[word0], %[index], 0\n" 84 - "mfspr %[tid], %[sprn_mmucr]\n" 85 - "andi. %[tid], %[tid], 0xff\n" 86 - "tlbre %[word1], %[index], 1\n" 87 - "tlbre %[word2], %[index], 2\n" 88 - : [word0] "=r"(tlbe->word0), 89 - [word1] "=r"(tlbe->word1), 90 - [word2] "=r"(tlbe->word2), 91 - [tid] "=r"(tlbe->tid) 92 - : [index] "r"(index), 93 - [sprn_mmucr] "i"(SPRN_MMUCR) 94 - : "cc" 95 - ); 96 - } 97 - 98 - static inline void kvmppc_44x_tlbwe(unsigned int index, 99 - struct kvmppc_44x_tlbe *stlbe) 100 - { 101 - unsigned long tmp; 102 - 103 - asm volatile( 104 - "mfspr %[tmp], %[sprn_mmucr]\n" 105 - "rlwimi %[tmp], %[tid], 0, 0xff\n" 106 - "mtspr %[sprn_mmucr], %[tmp]\n" 107 - "tlbwe %[word0], %[index], 0\n" 108 - "tlbwe %[word1], %[index], 1\n" 109 - "tlbwe %[word2], %[index], 2\n" 110 - : [tmp] "=&r"(tmp) 111 - : [word0] "r"(stlbe->word0), 112 - [word1] "r"(stlbe->word1), 113 - [word2] "r"(stlbe->word2), 114 - [tid] "r"(stlbe->tid), 115 - [index] "r"(index), 116 - [sprn_mmucr] "i"(SPRN_MMUCR) 117 - ); 118 - } 119 - 120 - static u32 kvmppc_44x_tlb_shadow_attrib(u32 attrib, int usermode) 121 - { 122 - /* We only care about the guest's permission and user bits. */ 123 - attrib &= PPC44x_TLB_PERM_MASK|PPC44x_TLB_UATTR_MASK; 124 - 125 - if (!usermode) { 126 - /* Guest is in supervisor mode, so we need to translate guest 127 - * supervisor permissions into user permissions. */ 128 - attrib &= ~PPC44x_TLB_USER_PERM_MASK; 129 - attrib |= (attrib & PPC44x_TLB_SUPER_PERM_MASK) << 3; 130 - } 131 - 132 - /* Make sure host can always access this memory. */ 133 - attrib |= PPC44x_TLB_SX|PPC44x_TLB_SR|PPC44x_TLB_SW; 134 - 135 - /* WIMGE = 0b00100 */ 136 - attrib |= PPC44x_TLB_M; 137 - 138 - return attrib; 139 - } 140 - 141 - /* Load shadow TLB back into hardware. */ 142 - void kvmppc_44x_tlb_load(struct kvm_vcpu *vcpu) 143 - { 144 - struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); 145 - int i; 146 - 147 - for (i = 0; i <= tlb_44x_hwater; i++) { 148 - struct kvmppc_44x_tlbe *stlbe = &vcpu_44x->shadow_tlb[i]; 149 - 150 - if (get_tlb_v(stlbe) && get_tlb_ts(stlbe)) 151 - kvmppc_44x_tlbwe(i, stlbe); 152 - } 153 - } 154 - 155 - static void kvmppc_44x_tlbe_set_modified(struct kvmppc_vcpu_44x *vcpu_44x, 156 - unsigned int i) 157 - { 158 - vcpu_44x->shadow_tlb_mod[i] = 1; 159 - } 160 - 161 - /* Save hardware TLB to the vcpu, and invalidate all guest mappings. */ 162 - void kvmppc_44x_tlb_put(struct kvm_vcpu *vcpu) 163 - { 164 - struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); 165 - int i; 166 - 167 - for (i = 0; i <= tlb_44x_hwater; i++) { 168 - struct kvmppc_44x_tlbe *stlbe = &vcpu_44x->shadow_tlb[i]; 169 - 170 - if (vcpu_44x->shadow_tlb_mod[i]) 171 - kvmppc_44x_tlbre(i, stlbe); 172 - 173 - if (get_tlb_v(stlbe) && get_tlb_ts(stlbe)) 174 - kvmppc_44x_tlbie(i); 175 - } 176 - } 177 - 178 - 179 - /* Search the guest TLB for a matching entry. */ 180 - int kvmppc_44x_tlb_index(struct kvm_vcpu *vcpu, gva_t eaddr, unsigned int pid, 181 - unsigned int as) 182 - { 183 - struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); 184 - int i; 185 - 186 - /* XXX Replace loop with fancy data structures. */ 187 - for (i = 0; i < ARRAY_SIZE(vcpu_44x->guest_tlb); i++) { 188 - struct kvmppc_44x_tlbe *tlbe = &vcpu_44x->guest_tlb[i]; 189 - unsigned int tid; 190 - 191 - if (eaddr < get_tlb_eaddr(tlbe)) 192 - continue; 193 - 194 - if (eaddr > get_tlb_end(tlbe)) 195 - continue; 196 - 197 - tid = get_tlb_tid(tlbe); 198 - if (tid && (tid != pid)) 199 - continue; 200 - 201 - if (!get_tlb_v(tlbe)) 202 - continue; 203 - 204 - if (get_tlb_ts(tlbe) != as) 205 - continue; 206 - 207 - return i; 208 - } 209 - 210 - return -1; 211 - } 212 - 213 - gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int gtlb_index, 214 - gva_t eaddr) 215 - { 216 - struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); 217 - struct kvmppc_44x_tlbe *gtlbe = &vcpu_44x->guest_tlb[gtlb_index]; 218 - unsigned int pgmask = get_tlb_bytes(gtlbe) - 1; 219 - 220 - return get_tlb_raddr(gtlbe) | (eaddr & pgmask); 221 - } 222 - 223 - int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr) 224 - { 225 - unsigned int as = !!(vcpu->arch.shared->msr & MSR_IS); 226 - 227 - return kvmppc_44x_tlb_index(vcpu, eaddr, vcpu->arch.pid, as); 228 - } 229 - 230 - int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr) 231 - { 232 - unsigned int as = !!(vcpu->arch.shared->msr & MSR_DS); 233 - 234 - return kvmppc_44x_tlb_index(vcpu, eaddr, vcpu->arch.pid, as); 235 - } 236 - 237 - void kvmppc_mmu_itlb_miss(struct kvm_vcpu *vcpu) 238 - { 239 - } 240 - 241 - void kvmppc_mmu_dtlb_miss(struct kvm_vcpu *vcpu) 242 - { 243 - } 244 - 245 - static void kvmppc_44x_shadow_release(struct kvmppc_vcpu_44x *vcpu_44x, 246 - unsigned int stlb_index) 247 - { 248 - struct kvmppc_44x_shadow_ref *ref = &vcpu_44x->shadow_refs[stlb_index]; 249 - 250 - if (!ref->page) 251 - return; 252 - 253 - /* Discard from the TLB. */ 254 - /* Note: we could actually invalidate a host mapping, if the host overwrote 255 - * this TLB entry since we inserted a guest mapping. */ 256 - kvmppc_44x_tlbie(stlb_index); 257 - 258 - /* Now release the page. */ 259 - if (ref->writeable) 260 - kvm_release_page_dirty(ref->page); 261 - else 262 - kvm_release_page_clean(ref->page); 263 - 264 - ref->page = NULL; 265 - 266 - /* XXX set tlb_44x_index to stlb_index? */ 267 - 268 - trace_kvm_stlb_inval(stlb_index); 269 - } 270 - 271 - void kvmppc_mmu_destroy_44x(struct kvm_vcpu *vcpu) 272 - { 273 - struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); 274 - int i; 275 - 276 - for (i = 0; i <= tlb_44x_hwater; i++) 277 - kvmppc_44x_shadow_release(vcpu_44x, i); 278 - } 279 - 280 - /** 281 - * kvmppc_mmu_map -- create a host mapping for guest memory 282 - * 283 - * If the guest wanted a larger page than the host supports, only the first 284 - * host page is mapped here and the rest are demand faulted. 285 - * 286 - * If the guest wanted a smaller page than the host page size, we map only the 287 - * guest-size page (i.e. not a full host page mapping). 288 - * 289 - * Caller must ensure that the specified guest TLB entry is safe to insert into 290 - * the shadow TLB. 291 - */ 292 - void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gpa_t gpaddr, 293 - unsigned int gtlb_index) 294 - { 295 - struct kvmppc_44x_tlbe stlbe; 296 - struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); 297 - struct kvmppc_44x_tlbe *gtlbe = &vcpu_44x->guest_tlb[gtlb_index]; 298 - struct kvmppc_44x_shadow_ref *ref; 299 - struct page *new_page; 300 - hpa_t hpaddr; 301 - gfn_t gfn; 302 - u32 asid = gtlbe->tid; 303 - u32 flags = gtlbe->word2; 304 - u32 max_bytes = get_tlb_bytes(gtlbe); 305 - unsigned int victim; 306 - 307 - /* Select TLB entry to clobber. Indirectly guard against races with the TLB 308 - * miss handler by disabling interrupts. */ 309 - local_irq_disable(); 310 - victim = ++tlb_44x_index; 311 - if (victim > tlb_44x_hwater) 312 - victim = 0; 313 - tlb_44x_index = victim; 314 - local_irq_enable(); 315 - 316 - /* Get reference to new page. */ 317 - gfn = gpaddr >> PAGE_SHIFT; 318 - new_page = gfn_to_page(vcpu->kvm, gfn); 319 - if (is_error_page(new_page)) { 320 - printk(KERN_ERR "Couldn't get guest page for gfn %llx!\n", 321 - (unsigned long long)gfn); 322 - return; 323 - } 324 - hpaddr = page_to_phys(new_page); 325 - 326 - /* Invalidate any previous shadow mappings. */ 327 - kvmppc_44x_shadow_release(vcpu_44x, victim); 328 - 329 - /* XXX Make sure (va, size) doesn't overlap any other 330 - * entries. 440x6 user manual says the result would be 331 - * "undefined." */ 332 - 333 - /* XXX what about AS? */ 334 - 335 - /* Force TS=1 for all guest mappings. */ 336 - stlbe.word0 = PPC44x_TLB_VALID | PPC44x_TLB_TS; 337 - 338 - if (max_bytes >= PAGE_SIZE) { 339 - /* Guest mapping is larger than or equal to host page size. We can use 340 - * a "native" host mapping. */ 341 - stlbe.word0 |= (gvaddr & PAGE_MASK) | PPC44x_TLBE_SIZE; 342 - } else { 343 - /* Guest mapping is smaller than host page size. We must restrict the 344 - * size of the mapping to be at most the smaller of the two, but for 345 - * simplicity we fall back to a 4K mapping (this is probably what the 346 - * guest is using anyways). */ 347 - stlbe.word0 |= (gvaddr & PAGE_MASK_4K) | PPC44x_TLB_4K; 348 - 349 - /* 'hpaddr' is a host page, which is larger than the mapping we're 350 - * inserting here. To compensate, we must add the in-page offset to the 351 - * sub-page. */ 352 - hpaddr |= gpaddr & (PAGE_MASK ^ PAGE_MASK_4K); 353 - } 354 - 355 - stlbe.word1 = (hpaddr & 0xfffffc00) | ((hpaddr >> 32) & 0xf); 356 - stlbe.word2 = kvmppc_44x_tlb_shadow_attrib(flags, 357 - vcpu->arch.shared->msr & MSR_PR); 358 - stlbe.tid = !(asid & 0xff); 359 - 360 - /* Keep track of the reference so we can properly release it later. */ 361 - ref = &vcpu_44x->shadow_refs[victim]; 362 - ref->page = new_page; 363 - ref->gtlb_index = gtlb_index; 364 - ref->writeable = !!(stlbe.word2 & PPC44x_TLB_UW); 365 - ref->tid = stlbe.tid; 366 - 367 - /* Insert shadow mapping into hardware TLB. */ 368 - kvmppc_44x_tlbe_set_modified(vcpu_44x, victim); 369 - kvmppc_44x_tlbwe(victim, &stlbe); 370 - trace_kvm_stlb_write(victim, stlbe.tid, stlbe.word0, stlbe.word1, 371 - stlbe.word2); 372 - } 373 - 374 - /* For a particular guest TLB entry, invalidate the corresponding host TLB 375 - * mappings and release the host pages. */ 376 - static void kvmppc_44x_invalidate(struct kvm_vcpu *vcpu, 377 - unsigned int gtlb_index) 378 - { 379 - struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); 380 - int i; 381 - 382 - for (i = 0; i < ARRAY_SIZE(vcpu_44x->shadow_refs); i++) { 383 - struct kvmppc_44x_shadow_ref *ref = &vcpu_44x->shadow_refs[i]; 384 - if (ref->gtlb_index == gtlb_index) 385 - kvmppc_44x_shadow_release(vcpu_44x, i); 386 - } 387 - } 388 - 389 - void kvmppc_mmu_msr_notify(struct kvm_vcpu *vcpu, u32 old_msr) 390 - { 391 - int usermode = vcpu->arch.shared->msr & MSR_PR; 392 - 393 - vcpu->arch.shadow_pid = !usermode; 394 - } 395 - 396 - void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 new_pid) 397 - { 398 - struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); 399 - int i; 400 - 401 - if (unlikely(vcpu->arch.pid == new_pid)) 402 - return; 403 - 404 - vcpu->arch.pid = new_pid; 405 - 406 - /* Guest userspace runs with TID=0 mappings and PID=0, to make sure it 407 - * can't access guest kernel mappings (TID=1). When we switch to a new 408 - * guest PID, which will also use host PID=0, we must discard the old guest 409 - * userspace mappings. */ 410 - for (i = 0; i < ARRAY_SIZE(vcpu_44x->shadow_refs); i++) { 411 - struct kvmppc_44x_shadow_ref *ref = &vcpu_44x->shadow_refs[i]; 412 - 413 - if (ref->tid == 0) 414 - kvmppc_44x_shadow_release(vcpu_44x, i); 415 - } 416 - } 417 - 418 - static int tlbe_is_host_safe(const struct kvm_vcpu *vcpu, 419 - const struct kvmppc_44x_tlbe *tlbe) 420 - { 421 - gpa_t gpa; 422 - 423 - if (!get_tlb_v(tlbe)) 424 - return 0; 425 - 426 - /* Does it match current guest AS? */ 427 - /* XXX what about IS != DS? */ 428 - if (get_tlb_ts(tlbe) != !!(vcpu->arch.shared->msr & MSR_IS)) 429 - return 0; 430 - 431 - gpa = get_tlb_raddr(tlbe); 432 - if (!gfn_to_memslot(vcpu->kvm, gpa >> PAGE_SHIFT)) 433 - /* Mapping is not for RAM. */ 434 - return 0; 435 - 436 - return 1; 437 - } 438 - 439 - int kvmppc_44x_emul_tlbwe(struct kvm_vcpu *vcpu, u8 ra, u8 rs, u8 ws) 440 - { 441 - struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); 442 - struct kvmppc_44x_tlbe *tlbe; 443 - unsigned int gtlb_index; 444 - int idx; 445 - 446 - gtlb_index = kvmppc_get_gpr(vcpu, ra); 447 - if (gtlb_index >= KVM44x_GUEST_TLB_SIZE) { 448 - printk("%s: index %d\n", __func__, gtlb_index); 449 - kvmppc_dump_vcpu(vcpu); 450 - return EMULATE_FAIL; 451 - } 452 - 453 - tlbe = &vcpu_44x->guest_tlb[gtlb_index]; 454 - 455 - /* Invalidate shadow mappings for the about-to-be-clobbered TLB entry. */ 456 - if (tlbe->word0 & PPC44x_TLB_VALID) 457 - kvmppc_44x_invalidate(vcpu, gtlb_index); 458 - 459 - switch (ws) { 460 - case PPC44x_TLB_PAGEID: 461 - tlbe->tid = get_mmucr_stid(vcpu); 462 - tlbe->word0 = kvmppc_get_gpr(vcpu, rs); 463 - break; 464 - 465 - case PPC44x_TLB_XLAT: 466 - tlbe->word1 = kvmppc_get_gpr(vcpu, rs); 467 - break; 468 - 469 - case PPC44x_TLB_ATTRIB: 470 - tlbe->word2 = kvmppc_get_gpr(vcpu, rs); 471 - break; 472 - 473 - default: 474 - return EMULATE_FAIL; 475 - } 476 - 477 - idx = srcu_read_lock(&vcpu->kvm->srcu); 478 - 479 - if (tlbe_is_host_safe(vcpu, tlbe)) { 480 - gva_t eaddr; 481 - gpa_t gpaddr; 482 - u32 bytes; 483 - 484 - eaddr = get_tlb_eaddr(tlbe); 485 - gpaddr = get_tlb_raddr(tlbe); 486 - 487 - /* Use the advertised page size to mask effective and real addrs. */ 488 - bytes = get_tlb_bytes(tlbe); 489 - eaddr &= ~(bytes - 1); 490 - gpaddr &= ~(bytes - 1); 491 - 492 - kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index); 493 - } 494 - 495 - srcu_read_unlock(&vcpu->kvm->srcu, idx); 496 - 497 - trace_kvm_gtlb_write(gtlb_index, tlbe->tid, tlbe->word0, tlbe->word1, 498 - tlbe->word2); 499 - 500 - kvmppc_set_exit_type(vcpu, EMULATED_TLBWE_EXITS); 501 - return EMULATE_DONE; 502 - } 503 - 504 - int kvmppc_44x_emul_tlbsx(struct kvm_vcpu *vcpu, u8 rt, u8 ra, u8 rb, u8 rc) 505 - { 506 - u32 ea; 507 - int gtlb_index; 508 - unsigned int as = get_mmucr_sts(vcpu); 509 - unsigned int pid = get_mmucr_stid(vcpu); 510 - 511 - ea = kvmppc_get_gpr(vcpu, rb); 512 - if (ra) 513 - ea += kvmppc_get_gpr(vcpu, ra); 514 - 515 - gtlb_index = kvmppc_44x_tlb_index(vcpu, ea, pid, as); 516 - if (rc) { 517 - u32 cr = kvmppc_get_cr(vcpu); 518 - 519 - if (gtlb_index < 0) 520 - kvmppc_set_cr(vcpu, cr & ~0x20000000); 521 - else 522 - kvmppc_set_cr(vcpu, cr | 0x20000000); 523 - } 524 - kvmppc_set_gpr(vcpu, rt, gtlb_index); 525 - 526 - kvmppc_set_exit_type(vcpu, EMULATED_TLBSX_EXITS); 527 - return EMULATE_DONE; 528 - }
-86
arch/powerpc/kvm/44x_tlb.h
··· 1 - /* 2 - * This program is free software; you can redistribute it and/or modify 3 - * it under the terms of the GNU General Public License, version 2, as 4 - * published by the Free Software Foundation. 5 - * 6 - * This program is distributed in the hope that it will be useful, 7 - * but WITHOUT ANY WARRANTY; without even the implied warranty of 8 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 9 - * GNU General Public License for more details. 10 - * 11 - * You should have received a copy of the GNU General Public License 12 - * along with this program; if not, write to the Free Software 13 - * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. 14 - * 15 - * Copyright IBM Corp. 2007 16 - * 17 - * Authors: Hollis Blanchard <hollisb@us.ibm.com> 18 - */ 19 - 20 - #ifndef __KVM_POWERPC_TLB_H__ 21 - #define __KVM_POWERPC_TLB_H__ 22 - 23 - #include <linux/kvm_host.h> 24 - #include <asm/mmu-44x.h> 25 - 26 - extern int kvmppc_44x_tlb_index(struct kvm_vcpu *vcpu, gva_t eaddr, 27 - unsigned int pid, unsigned int as); 28 - 29 - extern int kvmppc_44x_emul_tlbsx(struct kvm_vcpu *vcpu, u8 rt, u8 ra, u8 rb, 30 - u8 rc); 31 - extern int kvmppc_44x_emul_tlbwe(struct kvm_vcpu *vcpu, u8 ra, u8 rs, u8 ws); 32 - 33 - /* TLB helper functions */ 34 - static inline unsigned int get_tlb_size(const struct kvmppc_44x_tlbe *tlbe) 35 - { 36 - return (tlbe->word0 >> 4) & 0xf; 37 - } 38 - 39 - static inline gva_t get_tlb_eaddr(const struct kvmppc_44x_tlbe *tlbe) 40 - { 41 - return tlbe->word0 & 0xfffffc00; 42 - } 43 - 44 - static inline gva_t get_tlb_bytes(const struct kvmppc_44x_tlbe *tlbe) 45 - { 46 - unsigned int pgsize = get_tlb_size(tlbe); 47 - return 1 << 10 << (pgsize << 1); 48 - } 49 - 50 - static inline gva_t get_tlb_end(const struct kvmppc_44x_tlbe *tlbe) 51 - { 52 - return get_tlb_eaddr(tlbe) + get_tlb_bytes(tlbe) - 1; 53 - } 54 - 55 - static inline u64 get_tlb_raddr(const struct kvmppc_44x_tlbe *tlbe) 56 - { 57 - u64 word1 = tlbe->word1; 58 - return ((word1 & 0xf) << 32) | (word1 & 0xfffffc00); 59 - } 60 - 61 - static inline unsigned int get_tlb_tid(const struct kvmppc_44x_tlbe *tlbe) 62 - { 63 - return tlbe->tid & 0xff; 64 - } 65 - 66 - static inline unsigned int get_tlb_ts(const struct kvmppc_44x_tlbe *tlbe) 67 - { 68 - return (tlbe->word0 >> 8) & 0x1; 69 - } 70 - 71 - static inline unsigned int get_tlb_v(const struct kvmppc_44x_tlbe *tlbe) 72 - { 73 - return (tlbe->word0 >> 9) & 0x1; 74 - } 75 - 76 - static inline unsigned int get_mmucr_stid(const struct kvm_vcpu *vcpu) 77 - { 78 - return vcpu->arch.mmucr & 0xff; 79 - } 80 - 81 - static inline unsigned int get_mmucr_sts(const struct kvm_vcpu *vcpu) 82 - { 83 - return (vcpu->arch.mmucr >> 16) & 0x1; 84 - } 85 - 86 - #endif /* __KVM_POWERPC_TLB_H__ */
+1 -15
arch/powerpc/kvm/Kconfig
··· 112 112 config KVM_BOOKE_HV 113 113 bool 114 114 115 - config KVM_440 116 - bool "KVM support for PowerPC 440 processors" 117 - depends on 44x 118 - select KVM 119 - select KVM_MMIO 120 - ---help--- 121 - Support running unmodified 440 guest kernels in virtual machines on 122 - 440 host processors. 123 - 124 - This module provides access to the hardware capabilities through 125 - a character device node named /dev/kvm. 126 - 127 - If unsure, say N. 128 - 129 115 config KVM_EXIT_TIMING 130 116 bool "Detailed exit timing" 131 - depends on KVM_440 || KVM_E500V2 || KVM_E500MC 117 + depends on KVM_E500V2 || KVM_E500MC 132 118 ---help--- 133 119 Calculate elapsed time for every exit/enter cycle. A per-vcpu 134 120 report is available in debugfs kvm/vm#_vcpu#_timing.
-12
arch/powerpc/kvm/Makefile
··· 10 10 common-objs-y = $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o \ 11 11 $(KVM)/eventfd.o 12 12 13 - CFLAGS_44x_tlb.o := -I. 14 13 CFLAGS_e500_mmu.o := -I. 15 14 CFLAGS_e500_mmu_host.o := -I. 16 15 CFLAGS_emulate.o := -I. ··· 19 20 obj-$(CONFIG_KVM_BOOK3S_HANDLER) += book3s_exports.o 20 21 21 22 AFLAGS_booke_interrupts.o := -I$(obj) 22 - 23 - kvm-440-objs := \ 24 - $(common-objs-y) \ 25 - booke.o \ 26 - booke_emulate.o \ 27 - booke_interrupts.o \ 28 - 44x.o \ 29 - 44x_tlb.o \ 30 - 44x_emulate.o 31 - kvm-objs-$(CONFIG_KVM_440) := $(kvm-440-objs) 32 23 33 24 kvm-e500-objs := \ 34 25 $(common-objs-y) \ ··· 116 127 117 128 kvm-objs := $(kvm-objs-m) $(kvm-objs-y) 118 129 119 - obj-$(CONFIG_KVM_440) += kvm.o 120 130 obj-$(CONFIG_KVM_E500V2) += kvm.o 121 131 obj-$(CONFIG_KVM_E500MC) += kvm.o 122 132 obj-$(CONFIG_KVM_BOOK3S_64) += kvm.o
-7
arch/powerpc/kvm/booke.h
··· 99 99 100 100 void kvmppc_set_pending_interrupt(struct kvm_vcpu *vcpu, enum int_class type); 101 101 102 - extern void kvmppc_mmu_destroy_44x(struct kvm_vcpu *vcpu); 103 - extern int kvmppc_core_emulate_op_44x(struct kvm_run *run, struct kvm_vcpu *vcpu, 104 - unsigned int inst, int *advance); 105 - extern int kvmppc_core_emulate_mtspr_44x(struct kvm_vcpu *vcpu, int sprn, 106 - ulong spr_val); 107 - extern int kvmppc_core_emulate_mfspr_44x(struct kvm_vcpu *vcpu, int sprn, 108 - ulong *spr_val); 109 102 extern void kvmppc_mmu_destroy_e500(struct kvm_vcpu *vcpu); 110 103 extern int kvmppc_core_emulate_op_e500(struct kvm_run *run, 111 104 struct kvm_vcpu *vcpu,
-5
arch/powerpc/kvm/booke_interrupts.S
··· 21 21 #include <asm/ppc_asm.h> 22 22 #include <asm/kvm_asm.h> 23 23 #include <asm/reg.h> 24 - #include <asm/mmu-44x.h> 25 24 #include <asm/page.h> 26 25 #include <asm/asm-offsets.h> 27 26 ··· 421 422 #ifdef CONFIG_FSL_BOOKE 422 423 lwz r3, VCPU_SHADOW_PID1(r4) 423 424 mtspr SPRN_PID1, r3 424 - #endif 425 - 426 - #ifdef CONFIG_44x 427 - iccci 0, 0 /* XXX hack */ 428 425 #endif 429 426 430 427 /* Load some guest volatiles. */
-1
arch/powerpc/kvm/bookehv_interrupts.S
··· 24 24 #include <asm/ppc_asm.h> 25 25 #include <asm/kvm_asm.h> 26 26 #include <asm/reg.h> 27 - #include <asm/mmu-44x.h> 28 27 #include <asm/page.h> 29 28 #include <asm/asm-compat.h> 30 29 #include <asm/asm-offsets.h>
-1
arch/powerpc/kvm/powerpc.c
··· 217 217 case KVM_HCALL_TOKEN(KVM_HC_FEATURES): 218 218 r = EV_SUCCESS; 219 219 #if defined(CONFIG_PPC_BOOK3S) || defined(CONFIG_KVM_E500V2) 220 - /* XXX Missing magic page on 44x */ 221 220 r2 |= (1 << KVM_FEATURE_MAGIC_PAGE); 222 221 #endif 223 222