Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v3.16-rc7 1227 lines 28 kB view raw
1/* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * KVM/MIPS: MIPS specific KVM APIs 7 * 8 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. 9 * Authors: Sanjay Lal <sanjayl@kymasys.com> 10*/ 11 12#include <linux/errno.h> 13#include <linux/err.h> 14#include <linux/module.h> 15#include <linux/vmalloc.h> 16#include <linux/fs.h> 17#include <linux/bootmem.h> 18#include <asm/page.h> 19#include <asm/cacheflush.h> 20#include <asm/mmu_context.h> 21 22#include <linux/kvm_host.h> 23 24#include "kvm_mips_int.h" 25#include "kvm_mips_comm.h" 26 27#define CREATE_TRACE_POINTS 28#include "trace.h" 29 30#ifndef VECTORSPACING 31#define VECTORSPACING 0x100 /* for EI/VI mode */ 32#endif 33 34#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU 35struct kvm_stats_debugfs_item debugfs_entries[] = { 36 { "wait", VCPU_STAT(wait_exits) }, 37 { "cache", VCPU_STAT(cache_exits) }, 38 { "signal", VCPU_STAT(signal_exits) }, 39 { "interrupt", VCPU_STAT(int_exits) }, 40 { "cop_unsuable", VCPU_STAT(cop_unusable_exits) }, 41 { "tlbmod", VCPU_STAT(tlbmod_exits) }, 42 { "tlbmiss_ld", VCPU_STAT(tlbmiss_ld_exits) }, 43 { "tlbmiss_st", VCPU_STAT(tlbmiss_st_exits) }, 44 { "addrerr_st", VCPU_STAT(addrerr_st_exits) }, 45 { "addrerr_ld", VCPU_STAT(addrerr_ld_exits) }, 46 { "syscall", VCPU_STAT(syscall_exits) }, 47 { "resvd_inst", VCPU_STAT(resvd_inst_exits) }, 48 { "break_inst", VCPU_STAT(break_inst_exits) }, 49 { "flush_dcache", VCPU_STAT(flush_dcache_exits) }, 50 { "halt_wakeup", VCPU_STAT(halt_wakeup) }, 51 {NULL} 52}; 53 54static int kvm_mips_reset_vcpu(struct kvm_vcpu *vcpu) 55{ 56 int i; 57 for_each_possible_cpu(i) { 58 vcpu->arch.guest_kernel_asid[i] = 0; 59 vcpu->arch.guest_user_asid[i] = 0; 60 } 61 return 0; 62} 63 64/* XXXKYMA: We are simulatoring a processor that has the WII bit set in Config7, so we 65 * are "runnable" if interrupts are pending 66 */ 67int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) 68{ 69 return !!(vcpu->arch.pending_exceptions); 70} 71 72int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) 73{ 74 return 1; 75} 76 77int kvm_arch_hardware_enable(void *garbage) 78{ 79 return 0; 80} 81 82void kvm_arch_hardware_disable(void *garbage) 83{ 84} 85 86int kvm_arch_hardware_setup(void) 87{ 88 return 0; 89} 90 91void kvm_arch_hardware_unsetup(void) 92{ 93} 94 95void kvm_arch_check_processor_compat(void *rtn) 96{ 97 int *r = (int *)rtn; 98 *r = 0; 99 return; 100} 101 102static void kvm_mips_init_tlbs(struct kvm *kvm) 103{ 104 unsigned long wired; 105 106 /* Add a wired entry to the TLB, it is used to map the commpage to the Guest kernel */ 107 wired = read_c0_wired(); 108 write_c0_wired(wired + 1); 109 mtc0_tlbw_hazard(); 110 kvm->arch.commpage_tlb = wired; 111 112 kvm_debug("[%d] commpage TLB: %d\n", smp_processor_id(), 113 kvm->arch.commpage_tlb); 114} 115 116static void kvm_mips_init_vm_percpu(void *arg) 117{ 118 struct kvm *kvm = (struct kvm *)arg; 119 120 kvm_mips_init_tlbs(kvm); 121 kvm_mips_callbacks->vm_init(kvm); 122 123} 124 125int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) 126{ 127 if (atomic_inc_return(&kvm_mips_instance) == 1) { 128 kvm_debug("%s: 1st KVM instance, setup host TLB parameters\n", 129 __func__); 130 on_each_cpu(kvm_mips_init_vm_percpu, kvm, 1); 131 } 132 133 134 return 0; 135} 136 137void kvm_mips_free_vcpus(struct kvm *kvm) 138{ 139 unsigned int i; 140 struct kvm_vcpu *vcpu; 141 142 /* Put the pages we reserved for the guest pmap */ 143 for (i = 0; i < kvm->arch.guest_pmap_npages; i++) { 144 if (kvm->arch.guest_pmap[i] != KVM_INVALID_PAGE) 145 kvm_mips_release_pfn_clean(kvm->arch.guest_pmap[i]); 146 } 147 kfree(kvm->arch.guest_pmap); 148 149 kvm_for_each_vcpu(i, vcpu, kvm) { 150 kvm_arch_vcpu_free(vcpu); 151 } 152 153 mutex_lock(&kvm->lock); 154 155 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++) 156 kvm->vcpus[i] = NULL; 157 158 atomic_set(&kvm->online_vcpus, 0); 159 160 mutex_unlock(&kvm->lock); 161} 162 163void kvm_arch_sync_events(struct kvm *kvm) 164{ 165} 166 167static void kvm_mips_uninit_tlbs(void *arg) 168{ 169 /* Restore wired count */ 170 write_c0_wired(0); 171 mtc0_tlbw_hazard(); 172 /* Clear out all the TLBs */ 173 kvm_local_flush_tlb_all(); 174} 175 176void kvm_arch_destroy_vm(struct kvm *kvm) 177{ 178 kvm_mips_free_vcpus(kvm); 179 180 /* If this is the last instance, restore wired count */ 181 if (atomic_dec_return(&kvm_mips_instance) == 0) { 182 kvm_debug("%s: last KVM instance, restoring TLB parameters\n", 183 __func__); 184 on_each_cpu(kvm_mips_uninit_tlbs, NULL, 1); 185 } 186} 187 188long 189kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) 190{ 191 return -ENOIOCTLCMD; 192} 193 194void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free, 195 struct kvm_memory_slot *dont) 196{ 197} 198 199int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, 200 unsigned long npages) 201{ 202 return 0; 203} 204 205void kvm_arch_memslots_updated(struct kvm *kvm) 206{ 207} 208 209int kvm_arch_prepare_memory_region(struct kvm *kvm, 210 struct kvm_memory_slot *memslot, 211 struct kvm_userspace_memory_region *mem, 212 enum kvm_mr_change change) 213{ 214 return 0; 215} 216 217void kvm_arch_commit_memory_region(struct kvm *kvm, 218 struct kvm_userspace_memory_region *mem, 219 const struct kvm_memory_slot *old, 220 enum kvm_mr_change change) 221{ 222 unsigned long npages = 0; 223 int i, err = 0; 224 225 kvm_debug("%s: kvm: %p slot: %d, GPA: %llx, size: %llx, QVA: %llx\n", 226 __func__, kvm, mem->slot, mem->guest_phys_addr, 227 mem->memory_size, mem->userspace_addr); 228 229 /* Setup Guest PMAP table */ 230 if (!kvm->arch.guest_pmap) { 231 if (mem->slot == 0) 232 npages = mem->memory_size >> PAGE_SHIFT; 233 234 if (npages) { 235 kvm->arch.guest_pmap_npages = npages; 236 kvm->arch.guest_pmap = 237 kzalloc(npages * sizeof(unsigned long), GFP_KERNEL); 238 239 if (!kvm->arch.guest_pmap) { 240 kvm_err("Failed to allocate guest PMAP"); 241 err = -ENOMEM; 242 goto out; 243 } 244 245 kvm_debug("Allocated space for Guest PMAP Table (%ld pages) @ %p\n", 246 npages, kvm->arch.guest_pmap); 247 248 /* Now setup the page table */ 249 for (i = 0; i < npages; i++) { 250 kvm->arch.guest_pmap[i] = KVM_INVALID_PAGE; 251 } 252 } 253 } 254out: 255 return; 256} 257 258void kvm_arch_flush_shadow_all(struct kvm *kvm) 259{ 260} 261 262void kvm_arch_flush_shadow_memslot(struct kvm *kvm, 263 struct kvm_memory_slot *slot) 264{ 265} 266 267void kvm_arch_flush_shadow(struct kvm *kvm) 268{ 269} 270 271struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) 272{ 273 extern char mips32_exception[], mips32_exceptionEnd[]; 274 extern char mips32_GuestException[], mips32_GuestExceptionEnd[]; 275 int err, size, offset; 276 void *gebase; 277 int i; 278 279 struct kvm_vcpu *vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL); 280 281 if (!vcpu) { 282 err = -ENOMEM; 283 goto out; 284 } 285 286 err = kvm_vcpu_init(vcpu, kvm, id); 287 288 if (err) 289 goto out_free_cpu; 290 291 kvm_debug("kvm @ %p: create cpu %d at %p\n", kvm, id, vcpu); 292 293 /* Allocate space for host mode exception handlers that handle 294 * guest mode exits 295 */ 296 if (cpu_has_veic || cpu_has_vint) { 297 size = 0x200 + VECTORSPACING * 64; 298 } else { 299 size = 0x4000; 300 } 301 302 /* Save Linux EBASE */ 303 vcpu->arch.host_ebase = (void *)read_c0_ebase(); 304 305 gebase = kzalloc(ALIGN(size, PAGE_SIZE), GFP_KERNEL); 306 307 if (!gebase) { 308 err = -ENOMEM; 309 goto out_free_cpu; 310 } 311 kvm_debug("Allocated %d bytes for KVM Exception Handlers @ %p\n", 312 ALIGN(size, PAGE_SIZE), gebase); 313 314 /* Save new ebase */ 315 vcpu->arch.guest_ebase = gebase; 316 317 /* Copy L1 Guest Exception handler to correct offset */ 318 319 /* TLB Refill, EXL = 0 */ 320 memcpy(gebase, mips32_exception, 321 mips32_exceptionEnd - mips32_exception); 322 323 /* General Exception Entry point */ 324 memcpy(gebase + 0x180, mips32_exception, 325 mips32_exceptionEnd - mips32_exception); 326 327 /* For vectored interrupts poke the exception code @ all offsets 0-7 */ 328 for (i = 0; i < 8; i++) { 329 kvm_debug("L1 Vectored handler @ %p\n", 330 gebase + 0x200 + (i * VECTORSPACING)); 331 memcpy(gebase + 0x200 + (i * VECTORSPACING), mips32_exception, 332 mips32_exceptionEnd - mips32_exception); 333 } 334 335 /* General handler, relocate to unmapped space for sanity's sake */ 336 offset = 0x2000; 337 kvm_debug("Installing KVM Exception handlers @ %p, %#x bytes\n", 338 gebase + offset, 339 mips32_GuestExceptionEnd - mips32_GuestException); 340 341 memcpy(gebase + offset, mips32_GuestException, 342 mips32_GuestExceptionEnd - mips32_GuestException); 343 344 /* Invalidate the icache for these ranges */ 345 local_flush_icache_range((unsigned long)gebase, 346 (unsigned long)gebase + ALIGN(size, PAGE_SIZE)); 347 348 /* Allocate comm page for guest kernel, a TLB will be reserved for mapping GVA @ 0xFFFF8000 to this page */ 349 vcpu->arch.kseg0_commpage = kzalloc(PAGE_SIZE << 1, GFP_KERNEL); 350 351 if (!vcpu->arch.kseg0_commpage) { 352 err = -ENOMEM; 353 goto out_free_gebase; 354 } 355 356 kvm_debug("Allocated COMM page @ %p\n", vcpu->arch.kseg0_commpage); 357 kvm_mips_commpage_init(vcpu); 358 359 /* Init */ 360 vcpu->arch.last_sched_cpu = -1; 361 362 /* Start off the timer */ 363 kvm_mips_init_count(vcpu); 364 365 return vcpu; 366 367out_free_gebase: 368 kfree(gebase); 369 370out_free_cpu: 371 kfree(vcpu); 372 373out: 374 return ERR_PTR(err); 375} 376 377void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) 378{ 379 hrtimer_cancel(&vcpu->arch.comparecount_timer); 380 381 kvm_vcpu_uninit(vcpu); 382 383 kvm_mips_dump_stats(vcpu); 384 385 kfree(vcpu->arch.guest_ebase); 386 kfree(vcpu->arch.kseg0_commpage); 387 kfree(vcpu); 388} 389 390void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) 391{ 392 kvm_arch_vcpu_free(vcpu); 393} 394 395int 396kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, 397 struct kvm_guest_debug *dbg) 398{ 399 return -ENOIOCTLCMD; 400} 401 402int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) 403{ 404 int r = 0; 405 sigset_t sigsaved; 406 407 if (vcpu->sigset_active) 408 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); 409 410 if (vcpu->mmio_needed) { 411 if (!vcpu->mmio_is_write) 412 kvm_mips_complete_mmio_load(vcpu, run); 413 vcpu->mmio_needed = 0; 414 } 415 416 local_irq_disable(); 417 /* Check if we have any exceptions/interrupts pending */ 418 kvm_mips_deliver_interrupts(vcpu, 419 kvm_read_c0_guest_cause(vcpu->arch.cop0)); 420 421 kvm_guest_enter(); 422 423 r = __kvm_mips_vcpu_run(run, vcpu); 424 425 kvm_guest_exit(); 426 local_irq_enable(); 427 428 if (vcpu->sigset_active) 429 sigprocmask(SIG_SETMASK, &sigsaved, NULL); 430 431 return r; 432} 433 434int 435kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_mips_interrupt *irq) 436{ 437 int intr = (int)irq->irq; 438 struct kvm_vcpu *dvcpu = NULL; 439 440 if (intr == 3 || intr == -3 || intr == 4 || intr == -4) 441 kvm_debug("%s: CPU: %d, INTR: %d\n", __func__, irq->cpu, 442 (int)intr); 443 444 if (irq->cpu == -1) 445 dvcpu = vcpu; 446 else 447 dvcpu = vcpu->kvm->vcpus[irq->cpu]; 448 449 if (intr == 2 || intr == 3 || intr == 4) { 450 kvm_mips_callbacks->queue_io_int(dvcpu, irq); 451 452 } else if (intr == -2 || intr == -3 || intr == -4) { 453 kvm_mips_callbacks->dequeue_io_int(dvcpu, irq); 454 } else { 455 kvm_err("%s: invalid interrupt ioctl (%d:%d)\n", __func__, 456 irq->cpu, irq->irq); 457 return -EINVAL; 458 } 459 460 dvcpu->arch.wait = 0; 461 462 if (waitqueue_active(&dvcpu->wq)) { 463 wake_up_interruptible(&dvcpu->wq); 464 } 465 466 return 0; 467} 468 469int 470kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, 471 struct kvm_mp_state *mp_state) 472{ 473 return -ENOIOCTLCMD; 474} 475 476int 477kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, 478 struct kvm_mp_state *mp_state) 479{ 480 return -ENOIOCTLCMD; 481} 482 483static u64 kvm_mips_get_one_regs[] = { 484 KVM_REG_MIPS_R0, 485 KVM_REG_MIPS_R1, 486 KVM_REG_MIPS_R2, 487 KVM_REG_MIPS_R3, 488 KVM_REG_MIPS_R4, 489 KVM_REG_MIPS_R5, 490 KVM_REG_MIPS_R6, 491 KVM_REG_MIPS_R7, 492 KVM_REG_MIPS_R8, 493 KVM_REG_MIPS_R9, 494 KVM_REG_MIPS_R10, 495 KVM_REG_MIPS_R11, 496 KVM_REG_MIPS_R12, 497 KVM_REG_MIPS_R13, 498 KVM_REG_MIPS_R14, 499 KVM_REG_MIPS_R15, 500 KVM_REG_MIPS_R16, 501 KVM_REG_MIPS_R17, 502 KVM_REG_MIPS_R18, 503 KVM_REG_MIPS_R19, 504 KVM_REG_MIPS_R20, 505 KVM_REG_MIPS_R21, 506 KVM_REG_MIPS_R22, 507 KVM_REG_MIPS_R23, 508 KVM_REG_MIPS_R24, 509 KVM_REG_MIPS_R25, 510 KVM_REG_MIPS_R26, 511 KVM_REG_MIPS_R27, 512 KVM_REG_MIPS_R28, 513 KVM_REG_MIPS_R29, 514 KVM_REG_MIPS_R30, 515 KVM_REG_MIPS_R31, 516 517 KVM_REG_MIPS_HI, 518 KVM_REG_MIPS_LO, 519 KVM_REG_MIPS_PC, 520 521 KVM_REG_MIPS_CP0_INDEX, 522 KVM_REG_MIPS_CP0_CONTEXT, 523 KVM_REG_MIPS_CP0_USERLOCAL, 524 KVM_REG_MIPS_CP0_PAGEMASK, 525 KVM_REG_MIPS_CP0_WIRED, 526 KVM_REG_MIPS_CP0_HWRENA, 527 KVM_REG_MIPS_CP0_BADVADDR, 528 KVM_REG_MIPS_CP0_COUNT, 529 KVM_REG_MIPS_CP0_ENTRYHI, 530 KVM_REG_MIPS_CP0_COMPARE, 531 KVM_REG_MIPS_CP0_STATUS, 532 KVM_REG_MIPS_CP0_CAUSE, 533 KVM_REG_MIPS_CP0_EPC, 534 KVM_REG_MIPS_CP0_CONFIG, 535 KVM_REG_MIPS_CP0_CONFIG1, 536 KVM_REG_MIPS_CP0_CONFIG2, 537 KVM_REG_MIPS_CP0_CONFIG3, 538 KVM_REG_MIPS_CP0_CONFIG7, 539 KVM_REG_MIPS_CP0_ERROREPC, 540 541 KVM_REG_MIPS_COUNT_CTL, 542 KVM_REG_MIPS_COUNT_RESUME, 543 KVM_REG_MIPS_COUNT_HZ, 544}; 545 546static int kvm_mips_get_reg(struct kvm_vcpu *vcpu, 547 const struct kvm_one_reg *reg) 548{ 549 struct mips_coproc *cop0 = vcpu->arch.cop0; 550 int ret; 551 s64 v; 552 553 switch (reg->id) { 554 case KVM_REG_MIPS_R0 ... KVM_REG_MIPS_R31: 555 v = (long)vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0]; 556 break; 557 case KVM_REG_MIPS_HI: 558 v = (long)vcpu->arch.hi; 559 break; 560 case KVM_REG_MIPS_LO: 561 v = (long)vcpu->arch.lo; 562 break; 563 case KVM_REG_MIPS_PC: 564 v = (long)vcpu->arch.pc; 565 break; 566 567 case KVM_REG_MIPS_CP0_INDEX: 568 v = (long)kvm_read_c0_guest_index(cop0); 569 break; 570 case KVM_REG_MIPS_CP0_CONTEXT: 571 v = (long)kvm_read_c0_guest_context(cop0); 572 break; 573 case KVM_REG_MIPS_CP0_USERLOCAL: 574 v = (long)kvm_read_c0_guest_userlocal(cop0); 575 break; 576 case KVM_REG_MIPS_CP0_PAGEMASK: 577 v = (long)kvm_read_c0_guest_pagemask(cop0); 578 break; 579 case KVM_REG_MIPS_CP0_WIRED: 580 v = (long)kvm_read_c0_guest_wired(cop0); 581 break; 582 case KVM_REG_MIPS_CP0_HWRENA: 583 v = (long)kvm_read_c0_guest_hwrena(cop0); 584 break; 585 case KVM_REG_MIPS_CP0_BADVADDR: 586 v = (long)kvm_read_c0_guest_badvaddr(cop0); 587 break; 588 case KVM_REG_MIPS_CP0_ENTRYHI: 589 v = (long)kvm_read_c0_guest_entryhi(cop0); 590 break; 591 case KVM_REG_MIPS_CP0_COMPARE: 592 v = (long)kvm_read_c0_guest_compare(cop0); 593 break; 594 case KVM_REG_MIPS_CP0_STATUS: 595 v = (long)kvm_read_c0_guest_status(cop0); 596 break; 597 case KVM_REG_MIPS_CP0_CAUSE: 598 v = (long)kvm_read_c0_guest_cause(cop0); 599 break; 600 case KVM_REG_MIPS_CP0_EPC: 601 v = (long)kvm_read_c0_guest_epc(cop0); 602 break; 603 case KVM_REG_MIPS_CP0_ERROREPC: 604 v = (long)kvm_read_c0_guest_errorepc(cop0); 605 break; 606 case KVM_REG_MIPS_CP0_CONFIG: 607 v = (long)kvm_read_c0_guest_config(cop0); 608 break; 609 case KVM_REG_MIPS_CP0_CONFIG1: 610 v = (long)kvm_read_c0_guest_config1(cop0); 611 break; 612 case KVM_REG_MIPS_CP0_CONFIG2: 613 v = (long)kvm_read_c0_guest_config2(cop0); 614 break; 615 case KVM_REG_MIPS_CP0_CONFIG3: 616 v = (long)kvm_read_c0_guest_config3(cop0); 617 break; 618 case KVM_REG_MIPS_CP0_CONFIG7: 619 v = (long)kvm_read_c0_guest_config7(cop0); 620 break; 621 /* registers to be handled specially */ 622 case KVM_REG_MIPS_CP0_COUNT: 623 case KVM_REG_MIPS_COUNT_CTL: 624 case KVM_REG_MIPS_COUNT_RESUME: 625 case KVM_REG_MIPS_COUNT_HZ: 626 ret = kvm_mips_callbacks->get_one_reg(vcpu, reg, &v); 627 if (ret) 628 return ret; 629 break; 630 default: 631 return -EINVAL; 632 } 633 if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) { 634 u64 __user *uaddr64 = (u64 __user *)(long)reg->addr; 635 return put_user(v, uaddr64); 636 } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32) { 637 u32 __user *uaddr32 = (u32 __user *)(long)reg->addr; 638 u32 v32 = (u32)v; 639 return put_user(v32, uaddr32); 640 } else { 641 return -EINVAL; 642 } 643} 644 645static int kvm_mips_set_reg(struct kvm_vcpu *vcpu, 646 const struct kvm_one_reg *reg) 647{ 648 struct mips_coproc *cop0 = vcpu->arch.cop0; 649 u64 v; 650 651 if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) { 652 u64 __user *uaddr64 = (u64 __user *)(long)reg->addr; 653 654 if (get_user(v, uaddr64) != 0) 655 return -EFAULT; 656 } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32) { 657 u32 __user *uaddr32 = (u32 __user *)(long)reg->addr; 658 s32 v32; 659 660 if (get_user(v32, uaddr32) != 0) 661 return -EFAULT; 662 v = (s64)v32; 663 } else { 664 return -EINVAL; 665 } 666 667 switch (reg->id) { 668 case KVM_REG_MIPS_R0: 669 /* Silently ignore requests to set $0 */ 670 break; 671 case KVM_REG_MIPS_R1 ... KVM_REG_MIPS_R31: 672 vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0] = v; 673 break; 674 case KVM_REG_MIPS_HI: 675 vcpu->arch.hi = v; 676 break; 677 case KVM_REG_MIPS_LO: 678 vcpu->arch.lo = v; 679 break; 680 case KVM_REG_MIPS_PC: 681 vcpu->arch.pc = v; 682 break; 683 684 case KVM_REG_MIPS_CP0_INDEX: 685 kvm_write_c0_guest_index(cop0, v); 686 break; 687 case KVM_REG_MIPS_CP0_CONTEXT: 688 kvm_write_c0_guest_context(cop0, v); 689 break; 690 case KVM_REG_MIPS_CP0_USERLOCAL: 691 kvm_write_c0_guest_userlocal(cop0, v); 692 break; 693 case KVM_REG_MIPS_CP0_PAGEMASK: 694 kvm_write_c0_guest_pagemask(cop0, v); 695 break; 696 case KVM_REG_MIPS_CP0_WIRED: 697 kvm_write_c0_guest_wired(cop0, v); 698 break; 699 case KVM_REG_MIPS_CP0_HWRENA: 700 kvm_write_c0_guest_hwrena(cop0, v); 701 break; 702 case KVM_REG_MIPS_CP0_BADVADDR: 703 kvm_write_c0_guest_badvaddr(cop0, v); 704 break; 705 case KVM_REG_MIPS_CP0_ENTRYHI: 706 kvm_write_c0_guest_entryhi(cop0, v); 707 break; 708 case KVM_REG_MIPS_CP0_STATUS: 709 kvm_write_c0_guest_status(cop0, v); 710 break; 711 case KVM_REG_MIPS_CP0_EPC: 712 kvm_write_c0_guest_epc(cop0, v); 713 break; 714 case KVM_REG_MIPS_CP0_ERROREPC: 715 kvm_write_c0_guest_errorepc(cop0, v); 716 break; 717 /* registers to be handled specially */ 718 case KVM_REG_MIPS_CP0_COUNT: 719 case KVM_REG_MIPS_CP0_COMPARE: 720 case KVM_REG_MIPS_CP0_CAUSE: 721 case KVM_REG_MIPS_COUNT_CTL: 722 case KVM_REG_MIPS_COUNT_RESUME: 723 case KVM_REG_MIPS_COUNT_HZ: 724 return kvm_mips_callbacks->set_one_reg(vcpu, reg, v); 725 default: 726 return -EINVAL; 727 } 728 return 0; 729} 730 731long 732kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) 733{ 734 struct kvm_vcpu *vcpu = filp->private_data; 735 void __user *argp = (void __user *)arg; 736 long r; 737 738 switch (ioctl) { 739 case KVM_SET_ONE_REG: 740 case KVM_GET_ONE_REG: { 741 struct kvm_one_reg reg; 742 if (copy_from_user(&reg, argp, sizeof(reg))) 743 return -EFAULT; 744 if (ioctl == KVM_SET_ONE_REG) 745 return kvm_mips_set_reg(vcpu, &reg); 746 else 747 return kvm_mips_get_reg(vcpu, &reg); 748 } 749 case KVM_GET_REG_LIST: { 750 struct kvm_reg_list __user *user_list = argp; 751 u64 __user *reg_dest; 752 struct kvm_reg_list reg_list; 753 unsigned n; 754 755 if (copy_from_user(&reg_list, user_list, sizeof(reg_list))) 756 return -EFAULT; 757 n = reg_list.n; 758 reg_list.n = ARRAY_SIZE(kvm_mips_get_one_regs); 759 if (copy_to_user(user_list, &reg_list, sizeof(reg_list))) 760 return -EFAULT; 761 if (n < reg_list.n) 762 return -E2BIG; 763 reg_dest = user_list->reg; 764 if (copy_to_user(reg_dest, kvm_mips_get_one_regs, 765 sizeof(kvm_mips_get_one_regs))) 766 return -EFAULT; 767 return 0; 768 } 769 case KVM_NMI: 770 /* Treat the NMI as a CPU reset */ 771 r = kvm_mips_reset_vcpu(vcpu); 772 break; 773 case KVM_INTERRUPT: 774 { 775 struct kvm_mips_interrupt irq; 776 r = -EFAULT; 777 if (copy_from_user(&irq, argp, sizeof(irq))) 778 goto out; 779 780 kvm_debug("[%d] %s: irq: %d\n", vcpu->vcpu_id, __func__, 781 irq.irq); 782 783 r = kvm_vcpu_ioctl_interrupt(vcpu, &irq); 784 break; 785 } 786 default: 787 r = -ENOIOCTLCMD; 788 } 789 790out: 791 return r; 792} 793 794/* 795 * Get (and clear) the dirty memory log for a memory slot. 796 */ 797int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) 798{ 799 struct kvm_memory_slot *memslot; 800 unsigned long ga, ga_end; 801 int is_dirty = 0; 802 int r; 803 unsigned long n; 804 805 mutex_lock(&kvm->slots_lock); 806 807 r = kvm_get_dirty_log(kvm, log, &is_dirty); 808 if (r) 809 goto out; 810 811 /* If nothing is dirty, don't bother messing with page tables. */ 812 if (is_dirty) { 813 memslot = &kvm->memslots->memslots[log->slot]; 814 815 ga = memslot->base_gfn << PAGE_SHIFT; 816 ga_end = ga + (memslot->npages << PAGE_SHIFT); 817 818 printk("%s: dirty, ga: %#lx, ga_end %#lx\n", __func__, ga, 819 ga_end); 820 821 n = kvm_dirty_bitmap_bytes(memslot); 822 memset(memslot->dirty_bitmap, 0, n); 823 } 824 825 r = 0; 826out: 827 mutex_unlock(&kvm->slots_lock); 828 return r; 829 830} 831 832long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) 833{ 834 long r; 835 836 switch (ioctl) { 837 default: 838 r = -ENOIOCTLCMD; 839 } 840 841 return r; 842} 843 844int kvm_arch_init(void *opaque) 845{ 846 int ret; 847 848 if (kvm_mips_callbacks) { 849 kvm_err("kvm: module already exists\n"); 850 return -EEXIST; 851 } 852 853 ret = kvm_mips_emulation_init(&kvm_mips_callbacks); 854 855 return ret; 856} 857 858void kvm_arch_exit(void) 859{ 860 kvm_mips_callbacks = NULL; 861} 862 863int 864kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) 865{ 866 return -ENOIOCTLCMD; 867} 868 869int 870kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) 871{ 872 return -ENOIOCTLCMD; 873} 874 875int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) 876{ 877 return 0; 878} 879 880int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) 881{ 882 return -ENOIOCTLCMD; 883} 884 885int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) 886{ 887 return -ENOIOCTLCMD; 888} 889 890int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) 891{ 892 return VM_FAULT_SIGBUS; 893} 894 895int kvm_dev_ioctl_check_extension(long ext) 896{ 897 int r; 898 899 switch (ext) { 900 case KVM_CAP_ONE_REG: 901 r = 1; 902 break; 903 case KVM_CAP_COALESCED_MMIO: 904 r = KVM_COALESCED_MMIO_PAGE_OFFSET; 905 break; 906 default: 907 r = 0; 908 break; 909 } 910 return r; 911} 912 913int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) 914{ 915 return kvm_mips_pending_timer(vcpu); 916} 917 918int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu) 919{ 920 int i; 921 struct mips_coproc *cop0; 922 923 if (!vcpu) 924 return -1; 925 926 printk("VCPU Register Dump:\n"); 927 printk("\tpc = 0x%08lx\n", vcpu->arch.pc); 928 printk("\texceptions: %08lx\n", vcpu->arch.pending_exceptions); 929 930 for (i = 0; i < 32; i += 4) { 931 printk("\tgpr%02d: %08lx %08lx %08lx %08lx\n", i, 932 vcpu->arch.gprs[i], 933 vcpu->arch.gprs[i + 1], 934 vcpu->arch.gprs[i + 2], vcpu->arch.gprs[i + 3]); 935 } 936 printk("\thi: 0x%08lx\n", vcpu->arch.hi); 937 printk("\tlo: 0x%08lx\n", vcpu->arch.lo); 938 939 cop0 = vcpu->arch.cop0; 940 printk("\tStatus: 0x%08lx, Cause: 0x%08lx\n", 941 kvm_read_c0_guest_status(cop0), kvm_read_c0_guest_cause(cop0)); 942 943 printk("\tEPC: 0x%08lx\n", kvm_read_c0_guest_epc(cop0)); 944 945 return 0; 946} 947 948int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) 949{ 950 int i; 951 952 for (i = 1; i < ARRAY_SIZE(vcpu->arch.gprs); i++) 953 vcpu->arch.gprs[i] = regs->gpr[i]; 954 vcpu->arch.gprs[0] = 0; /* zero is special, and cannot be set. */ 955 vcpu->arch.hi = regs->hi; 956 vcpu->arch.lo = regs->lo; 957 vcpu->arch.pc = regs->pc; 958 959 return 0; 960} 961 962int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) 963{ 964 int i; 965 966 for (i = 0; i < ARRAY_SIZE(vcpu->arch.gprs); i++) 967 regs->gpr[i] = vcpu->arch.gprs[i]; 968 969 regs->hi = vcpu->arch.hi; 970 regs->lo = vcpu->arch.lo; 971 regs->pc = vcpu->arch.pc; 972 973 return 0; 974} 975 976static void kvm_mips_comparecount_func(unsigned long data) 977{ 978 struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data; 979 980 kvm_mips_callbacks->queue_timer_int(vcpu); 981 982 vcpu->arch.wait = 0; 983 if (waitqueue_active(&vcpu->wq)) { 984 wake_up_interruptible(&vcpu->wq); 985 } 986} 987 988/* 989 * low level hrtimer wake routine. 990 */ 991static enum hrtimer_restart kvm_mips_comparecount_wakeup(struct hrtimer *timer) 992{ 993 struct kvm_vcpu *vcpu; 994 995 vcpu = container_of(timer, struct kvm_vcpu, arch.comparecount_timer); 996 kvm_mips_comparecount_func((unsigned long) vcpu); 997 return kvm_mips_count_timeout(vcpu); 998} 999 1000int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) 1001{ 1002 kvm_mips_callbacks->vcpu_init(vcpu); 1003 hrtimer_init(&vcpu->arch.comparecount_timer, CLOCK_MONOTONIC, 1004 HRTIMER_MODE_REL); 1005 vcpu->arch.comparecount_timer.function = kvm_mips_comparecount_wakeup; 1006 return 0; 1007} 1008 1009void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) 1010{ 1011 return; 1012} 1013 1014int 1015kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, struct kvm_translation *tr) 1016{ 1017 return 0; 1018} 1019 1020/* Initial guest state */ 1021int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) 1022{ 1023 return kvm_mips_callbacks->vcpu_setup(vcpu); 1024} 1025 1026static 1027void kvm_mips_set_c0_status(void) 1028{ 1029 uint32_t status = read_c0_status(); 1030 1031 if (cpu_has_fpu) 1032 status |= (ST0_CU1); 1033 1034 if (cpu_has_dsp) 1035 status |= (ST0_MX); 1036 1037 write_c0_status(status); 1038 ehb(); 1039} 1040 1041/* 1042 * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV) 1043 */ 1044int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu) 1045{ 1046 uint32_t cause = vcpu->arch.host_cp0_cause; 1047 uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f; 1048 uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc; 1049 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; 1050 enum emulation_result er = EMULATE_DONE; 1051 int ret = RESUME_GUEST; 1052 1053 /* Set a default exit reason */ 1054 run->exit_reason = KVM_EXIT_UNKNOWN; 1055 run->ready_for_interrupt_injection = 1; 1056 1057 /* Set the appropriate status bits based on host CPU features, before we hit the scheduler */ 1058 kvm_mips_set_c0_status(); 1059 1060 local_irq_enable(); 1061 1062 kvm_debug("kvm_mips_handle_exit: cause: %#x, PC: %p, kvm_run: %p, kvm_vcpu: %p\n", 1063 cause, opc, run, vcpu); 1064 1065 /* Do a privilege check, if in UM most of these exit conditions end up 1066 * causing an exception to be delivered to the Guest Kernel 1067 */ 1068 er = kvm_mips_check_privilege(cause, opc, run, vcpu); 1069 if (er == EMULATE_PRIV_FAIL) { 1070 goto skip_emul; 1071 } else if (er == EMULATE_FAIL) { 1072 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 1073 ret = RESUME_HOST; 1074 goto skip_emul; 1075 } 1076 1077 switch (exccode) { 1078 case T_INT: 1079 kvm_debug("[%d]T_INT @ %p\n", vcpu->vcpu_id, opc); 1080 1081 ++vcpu->stat.int_exits; 1082 trace_kvm_exit(vcpu, INT_EXITS); 1083 1084 if (need_resched()) { 1085 cond_resched(); 1086 } 1087 1088 ret = RESUME_GUEST; 1089 break; 1090 1091 case T_COP_UNUSABLE: 1092 kvm_debug("T_COP_UNUSABLE: @ PC: %p\n", opc); 1093 1094 ++vcpu->stat.cop_unusable_exits; 1095 trace_kvm_exit(vcpu, COP_UNUSABLE_EXITS); 1096 ret = kvm_mips_callbacks->handle_cop_unusable(vcpu); 1097 /* XXXKYMA: Might need to return to user space */ 1098 if (run->exit_reason == KVM_EXIT_IRQ_WINDOW_OPEN) { 1099 ret = RESUME_HOST; 1100 } 1101 break; 1102 1103 case T_TLB_MOD: 1104 ++vcpu->stat.tlbmod_exits; 1105 trace_kvm_exit(vcpu, TLBMOD_EXITS); 1106 ret = kvm_mips_callbacks->handle_tlb_mod(vcpu); 1107 break; 1108 1109 case T_TLB_ST_MISS: 1110 kvm_debug 1111 ("TLB ST fault: cause %#x, status %#lx, PC: %p, BadVaddr: %#lx\n", 1112 cause, kvm_read_c0_guest_status(vcpu->arch.cop0), opc, 1113 badvaddr); 1114 1115 ++vcpu->stat.tlbmiss_st_exits; 1116 trace_kvm_exit(vcpu, TLBMISS_ST_EXITS); 1117 ret = kvm_mips_callbacks->handle_tlb_st_miss(vcpu); 1118 break; 1119 1120 case T_TLB_LD_MISS: 1121 kvm_debug("TLB LD fault: cause %#x, PC: %p, BadVaddr: %#lx\n", 1122 cause, opc, badvaddr); 1123 1124 ++vcpu->stat.tlbmiss_ld_exits; 1125 trace_kvm_exit(vcpu, TLBMISS_LD_EXITS); 1126 ret = kvm_mips_callbacks->handle_tlb_ld_miss(vcpu); 1127 break; 1128 1129 case T_ADDR_ERR_ST: 1130 ++vcpu->stat.addrerr_st_exits; 1131 trace_kvm_exit(vcpu, ADDRERR_ST_EXITS); 1132 ret = kvm_mips_callbacks->handle_addr_err_st(vcpu); 1133 break; 1134 1135 case T_ADDR_ERR_LD: 1136 ++vcpu->stat.addrerr_ld_exits; 1137 trace_kvm_exit(vcpu, ADDRERR_LD_EXITS); 1138 ret = kvm_mips_callbacks->handle_addr_err_ld(vcpu); 1139 break; 1140 1141 case T_SYSCALL: 1142 ++vcpu->stat.syscall_exits; 1143 trace_kvm_exit(vcpu, SYSCALL_EXITS); 1144 ret = kvm_mips_callbacks->handle_syscall(vcpu); 1145 break; 1146 1147 case T_RES_INST: 1148 ++vcpu->stat.resvd_inst_exits; 1149 trace_kvm_exit(vcpu, RESVD_INST_EXITS); 1150 ret = kvm_mips_callbacks->handle_res_inst(vcpu); 1151 break; 1152 1153 case T_BREAK: 1154 ++vcpu->stat.break_inst_exits; 1155 trace_kvm_exit(vcpu, BREAK_INST_EXITS); 1156 ret = kvm_mips_callbacks->handle_break(vcpu); 1157 break; 1158 1159 default: 1160 kvm_err 1161 ("Exception Code: %d, not yet handled, @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#lx\n", 1162 exccode, opc, kvm_get_inst(opc, vcpu), badvaddr, 1163 kvm_read_c0_guest_status(vcpu->arch.cop0)); 1164 kvm_arch_vcpu_dump_regs(vcpu); 1165 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 1166 ret = RESUME_HOST; 1167 break; 1168 1169 } 1170 1171skip_emul: 1172 local_irq_disable(); 1173 1174 if (er == EMULATE_DONE && !(ret & RESUME_HOST)) 1175 kvm_mips_deliver_interrupts(vcpu, cause); 1176 1177 if (!(ret & RESUME_HOST)) { 1178 /* Only check for signals if not already exiting to userspace */ 1179 if (signal_pending(current)) { 1180 run->exit_reason = KVM_EXIT_INTR; 1181 ret = (-EINTR << 2) | RESUME_HOST; 1182 ++vcpu->stat.signal_exits; 1183 trace_kvm_exit(vcpu, SIGNAL_EXITS); 1184 } 1185 } 1186 1187 return ret; 1188} 1189 1190int __init kvm_mips_init(void) 1191{ 1192 int ret; 1193 1194 ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE); 1195 1196 if (ret) 1197 return ret; 1198 1199 /* On MIPS, kernel modules are executed from "mapped space", which requires TLBs. 1200 * The TLB handling code is statically linked with the rest of the kernel (kvm_tlb.c) 1201 * to avoid the possibility of double faulting. The issue is that the TLB code 1202 * references routines that are part of the the KVM module, 1203 * which are only available once the module is loaded. 1204 */ 1205 kvm_mips_gfn_to_pfn = gfn_to_pfn; 1206 kvm_mips_release_pfn_clean = kvm_release_pfn_clean; 1207 kvm_mips_is_error_pfn = is_error_pfn; 1208 1209 pr_info("KVM/MIPS Initialized\n"); 1210 return 0; 1211} 1212 1213void __exit kvm_mips_exit(void) 1214{ 1215 kvm_exit(); 1216 1217 kvm_mips_gfn_to_pfn = NULL; 1218 kvm_mips_release_pfn_clean = NULL; 1219 kvm_mips_is_error_pfn = NULL; 1220 1221 pr_info("KVM/MIPS unloaded\n"); 1222} 1223 1224module_init(kvm_mips_init); 1225module_exit(kvm_mips_exit); 1226 1227EXPORT_TRACEPOINT_SYMBOL(kvm_exit);