at v3.2-rc2 20 kB view raw
1/* 2 * Kernel-based Virtual Machine - device assignment support 3 * 4 * Copyright (C) 2010 Red Hat, Inc. and/or its affiliates. 5 * 6 * This work is licensed under the terms of the GNU GPL, version 2. See 7 * the COPYING file in the top-level directory. 8 * 9 */ 10 11#include <linux/kvm_host.h> 12#include <linux/kvm.h> 13#include <linux/uaccess.h> 14#include <linux/vmalloc.h> 15#include <linux/errno.h> 16#include <linux/spinlock.h> 17#include <linux/pci.h> 18#include <linux/interrupt.h> 19#include <linux/slab.h> 20#include "irq.h" 21 22static struct kvm_assigned_dev_kernel *kvm_find_assigned_dev(struct list_head *head, 23 int assigned_dev_id) 24{ 25 struct list_head *ptr; 26 struct kvm_assigned_dev_kernel *match; 27 28 list_for_each(ptr, head) { 29 match = list_entry(ptr, struct kvm_assigned_dev_kernel, list); 30 if (match->assigned_dev_id == assigned_dev_id) 31 return match; 32 } 33 return NULL; 34} 35 36static int find_index_from_host_irq(struct kvm_assigned_dev_kernel 37 *assigned_dev, int irq) 38{ 39 int i, index; 40 struct msix_entry *host_msix_entries; 41 42 host_msix_entries = assigned_dev->host_msix_entries; 43 44 index = -1; 45 for (i = 0; i < assigned_dev->entries_nr; i++) 46 if (irq == host_msix_entries[i].vector) { 47 index = i; 48 break; 49 } 50 if (index < 0) { 51 printk(KERN_WARNING "Fail to find correlated MSI-X entry!\n"); 52 return 0; 53 } 54 55 return index; 56} 57 58static irqreturn_t kvm_assigned_dev_thread(int irq, void *dev_id) 59{ 60 struct kvm_assigned_dev_kernel *assigned_dev = dev_id; 61 62 if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_INTX) { 63 spin_lock(&assigned_dev->intx_lock); 64 disable_irq_nosync(irq); 65 assigned_dev->host_irq_disabled = true; 66 spin_unlock(&assigned_dev->intx_lock); 67 } 68 69 kvm_set_irq(assigned_dev->kvm, assigned_dev->irq_source_id, 70 assigned_dev->guest_irq, 1); 71 72 return IRQ_HANDLED; 73} 74 75#ifdef __KVM_HAVE_MSIX 76static irqreturn_t kvm_assigned_dev_thread_msix(int irq, void *dev_id) 77{ 78 struct kvm_assigned_dev_kernel *assigned_dev = dev_id; 79 int index = find_index_from_host_irq(assigned_dev, irq); 80 u32 vector; 81 82 if (index >= 0) { 83 vector = assigned_dev->guest_msix_entries[index].vector; 84 kvm_set_irq(assigned_dev->kvm, assigned_dev->irq_source_id, 85 vector, 1); 86 } 87 88 return IRQ_HANDLED; 89} 90#endif 91 92/* Ack the irq line for an assigned device */ 93static void kvm_assigned_dev_ack_irq(struct kvm_irq_ack_notifier *kian) 94{ 95 struct kvm_assigned_dev_kernel *dev = 96 container_of(kian, struct kvm_assigned_dev_kernel, 97 ack_notifier); 98 99 kvm_set_irq(dev->kvm, dev->irq_source_id, dev->guest_irq, 0); 100 101 /* The guest irq may be shared so this ack may be 102 * from another device. 103 */ 104 spin_lock(&dev->intx_lock); 105 if (dev->host_irq_disabled) { 106 enable_irq(dev->host_irq); 107 dev->host_irq_disabled = false; 108 } 109 spin_unlock(&dev->intx_lock); 110} 111 112static void deassign_guest_irq(struct kvm *kvm, 113 struct kvm_assigned_dev_kernel *assigned_dev) 114{ 115 if (assigned_dev->ack_notifier.gsi != -1) 116 kvm_unregister_irq_ack_notifier(kvm, 117 &assigned_dev->ack_notifier); 118 119 kvm_set_irq(assigned_dev->kvm, assigned_dev->irq_source_id, 120 assigned_dev->guest_irq, 0); 121 122 if (assigned_dev->irq_source_id != -1) 123 kvm_free_irq_source_id(kvm, assigned_dev->irq_source_id); 124 assigned_dev->irq_source_id = -1; 125 assigned_dev->irq_requested_type &= ~(KVM_DEV_IRQ_GUEST_MASK); 126} 127 128/* The function implicit hold kvm->lock mutex due to cancel_work_sync() */ 129static void deassign_host_irq(struct kvm *kvm, 130 struct kvm_assigned_dev_kernel *assigned_dev) 131{ 132 /* 133 * We disable irq here to prevent further events. 134 * 135 * Notice this maybe result in nested disable if the interrupt type is 136 * INTx, but it's OK for we are going to free it. 137 * 138 * If this function is a part of VM destroy, please ensure that till 139 * now, the kvm state is still legal for probably we also have to wait 140 * on a currently running IRQ handler. 141 */ 142 if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSIX) { 143 int i; 144 for (i = 0; i < assigned_dev->entries_nr; i++) 145 disable_irq(assigned_dev->host_msix_entries[i].vector); 146 147 for (i = 0; i < assigned_dev->entries_nr; i++) 148 free_irq(assigned_dev->host_msix_entries[i].vector, 149 assigned_dev); 150 151 assigned_dev->entries_nr = 0; 152 kfree(assigned_dev->host_msix_entries); 153 kfree(assigned_dev->guest_msix_entries); 154 pci_disable_msix(assigned_dev->dev); 155 } else { 156 /* Deal with MSI and INTx */ 157 disable_irq(assigned_dev->host_irq); 158 159 free_irq(assigned_dev->host_irq, assigned_dev); 160 161 if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSI) 162 pci_disable_msi(assigned_dev->dev); 163 } 164 165 assigned_dev->irq_requested_type &= ~(KVM_DEV_IRQ_HOST_MASK); 166} 167 168static int kvm_deassign_irq(struct kvm *kvm, 169 struct kvm_assigned_dev_kernel *assigned_dev, 170 unsigned long irq_requested_type) 171{ 172 unsigned long guest_irq_type, host_irq_type; 173 174 if (!irqchip_in_kernel(kvm)) 175 return -EINVAL; 176 /* no irq assignment to deassign */ 177 if (!assigned_dev->irq_requested_type) 178 return -ENXIO; 179 180 host_irq_type = irq_requested_type & KVM_DEV_IRQ_HOST_MASK; 181 guest_irq_type = irq_requested_type & KVM_DEV_IRQ_GUEST_MASK; 182 183 if (host_irq_type) 184 deassign_host_irq(kvm, assigned_dev); 185 if (guest_irq_type) 186 deassign_guest_irq(kvm, assigned_dev); 187 188 return 0; 189} 190 191static void kvm_free_assigned_irq(struct kvm *kvm, 192 struct kvm_assigned_dev_kernel *assigned_dev) 193{ 194 kvm_deassign_irq(kvm, assigned_dev, assigned_dev->irq_requested_type); 195} 196 197static void kvm_free_assigned_device(struct kvm *kvm, 198 struct kvm_assigned_dev_kernel 199 *assigned_dev) 200{ 201 kvm_free_assigned_irq(kvm, assigned_dev); 202 203 pci_reset_function(assigned_dev->dev); 204 if (pci_load_and_free_saved_state(assigned_dev->dev, 205 &assigned_dev->pci_saved_state)) 206 printk(KERN_INFO "%s: Couldn't reload %s saved state\n", 207 __func__, dev_name(&assigned_dev->dev->dev)); 208 else 209 pci_restore_state(assigned_dev->dev); 210 211 assigned_dev->dev->dev_flags &= ~PCI_DEV_FLAGS_ASSIGNED; 212 213 pci_release_regions(assigned_dev->dev); 214 pci_disable_device(assigned_dev->dev); 215 pci_dev_put(assigned_dev->dev); 216 217 list_del(&assigned_dev->list); 218 kfree(assigned_dev); 219} 220 221void kvm_free_all_assigned_devices(struct kvm *kvm) 222{ 223 struct list_head *ptr, *ptr2; 224 struct kvm_assigned_dev_kernel *assigned_dev; 225 226 list_for_each_safe(ptr, ptr2, &kvm->arch.assigned_dev_head) { 227 assigned_dev = list_entry(ptr, 228 struct kvm_assigned_dev_kernel, 229 list); 230 231 kvm_free_assigned_device(kvm, assigned_dev); 232 } 233} 234 235static int assigned_device_enable_host_intx(struct kvm *kvm, 236 struct kvm_assigned_dev_kernel *dev) 237{ 238 dev->host_irq = dev->dev->irq; 239 /* Even though this is PCI, we don't want to use shared 240 * interrupts. Sharing host devices with guest-assigned devices 241 * on the same interrupt line is not a happy situation: there 242 * are going to be long delays in accepting, acking, etc. 243 */ 244 if (request_threaded_irq(dev->host_irq, NULL, kvm_assigned_dev_thread, 245 IRQF_ONESHOT, dev->irq_name, dev)) 246 return -EIO; 247 return 0; 248} 249 250#ifdef __KVM_HAVE_MSI 251static int assigned_device_enable_host_msi(struct kvm *kvm, 252 struct kvm_assigned_dev_kernel *dev) 253{ 254 int r; 255 256 if (!dev->dev->msi_enabled) { 257 r = pci_enable_msi(dev->dev); 258 if (r) 259 return r; 260 } 261 262 dev->host_irq = dev->dev->irq; 263 if (request_threaded_irq(dev->host_irq, NULL, kvm_assigned_dev_thread, 264 0, dev->irq_name, dev)) { 265 pci_disable_msi(dev->dev); 266 return -EIO; 267 } 268 269 return 0; 270} 271#endif 272 273#ifdef __KVM_HAVE_MSIX 274static int assigned_device_enable_host_msix(struct kvm *kvm, 275 struct kvm_assigned_dev_kernel *dev) 276{ 277 int i, r = -EINVAL; 278 279 /* host_msix_entries and guest_msix_entries should have been 280 * initialized */ 281 if (dev->entries_nr == 0) 282 return r; 283 284 r = pci_enable_msix(dev->dev, dev->host_msix_entries, dev->entries_nr); 285 if (r) 286 return r; 287 288 for (i = 0; i < dev->entries_nr; i++) { 289 r = request_threaded_irq(dev->host_msix_entries[i].vector, 290 NULL, kvm_assigned_dev_thread_msix, 291 0, dev->irq_name, dev); 292 if (r) 293 goto err; 294 } 295 296 return 0; 297err: 298 for (i -= 1; i >= 0; i--) 299 free_irq(dev->host_msix_entries[i].vector, dev); 300 pci_disable_msix(dev->dev); 301 return r; 302} 303 304#endif 305 306static int assigned_device_enable_guest_intx(struct kvm *kvm, 307 struct kvm_assigned_dev_kernel *dev, 308 struct kvm_assigned_irq *irq) 309{ 310 dev->guest_irq = irq->guest_irq; 311 dev->ack_notifier.gsi = irq->guest_irq; 312 return 0; 313} 314 315#ifdef __KVM_HAVE_MSI 316static int assigned_device_enable_guest_msi(struct kvm *kvm, 317 struct kvm_assigned_dev_kernel *dev, 318 struct kvm_assigned_irq *irq) 319{ 320 dev->guest_irq = irq->guest_irq; 321 dev->ack_notifier.gsi = -1; 322 dev->host_irq_disabled = false; 323 return 0; 324} 325#endif 326 327#ifdef __KVM_HAVE_MSIX 328static int assigned_device_enable_guest_msix(struct kvm *kvm, 329 struct kvm_assigned_dev_kernel *dev, 330 struct kvm_assigned_irq *irq) 331{ 332 dev->guest_irq = irq->guest_irq; 333 dev->ack_notifier.gsi = -1; 334 dev->host_irq_disabled = false; 335 return 0; 336} 337#endif 338 339static int assign_host_irq(struct kvm *kvm, 340 struct kvm_assigned_dev_kernel *dev, 341 __u32 host_irq_type) 342{ 343 int r = -EEXIST; 344 345 if (dev->irq_requested_type & KVM_DEV_IRQ_HOST_MASK) 346 return r; 347 348 snprintf(dev->irq_name, sizeof(dev->irq_name), "kvm:%s", 349 pci_name(dev->dev)); 350 351 switch (host_irq_type) { 352 case KVM_DEV_IRQ_HOST_INTX: 353 r = assigned_device_enable_host_intx(kvm, dev); 354 break; 355#ifdef __KVM_HAVE_MSI 356 case KVM_DEV_IRQ_HOST_MSI: 357 r = assigned_device_enable_host_msi(kvm, dev); 358 break; 359#endif 360#ifdef __KVM_HAVE_MSIX 361 case KVM_DEV_IRQ_HOST_MSIX: 362 r = assigned_device_enable_host_msix(kvm, dev); 363 break; 364#endif 365 default: 366 r = -EINVAL; 367 } 368 369 if (!r) 370 dev->irq_requested_type |= host_irq_type; 371 372 return r; 373} 374 375static int assign_guest_irq(struct kvm *kvm, 376 struct kvm_assigned_dev_kernel *dev, 377 struct kvm_assigned_irq *irq, 378 unsigned long guest_irq_type) 379{ 380 int id; 381 int r = -EEXIST; 382 383 if (dev->irq_requested_type & KVM_DEV_IRQ_GUEST_MASK) 384 return r; 385 386 id = kvm_request_irq_source_id(kvm); 387 if (id < 0) 388 return id; 389 390 dev->irq_source_id = id; 391 392 switch (guest_irq_type) { 393 case KVM_DEV_IRQ_GUEST_INTX: 394 r = assigned_device_enable_guest_intx(kvm, dev, irq); 395 break; 396#ifdef __KVM_HAVE_MSI 397 case KVM_DEV_IRQ_GUEST_MSI: 398 r = assigned_device_enable_guest_msi(kvm, dev, irq); 399 break; 400#endif 401#ifdef __KVM_HAVE_MSIX 402 case KVM_DEV_IRQ_GUEST_MSIX: 403 r = assigned_device_enable_guest_msix(kvm, dev, irq); 404 break; 405#endif 406 default: 407 r = -EINVAL; 408 } 409 410 if (!r) { 411 dev->irq_requested_type |= guest_irq_type; 412 if (dev->ack_notifier.gsi != -1) 413 kvm_register_irq_ack_notifier(kvm, &dev->ack_notifier); 414 } else 415 kvm_free_irq_source_id(kvm, dev->irq_source_id); 416 417 return r; 418} 419 420/* TODO Deal with KVM_DEV_IRQ_ASSIGNED_MASK_MSIX */ 421static int kvm_vm_ioctl_assign_irq(struct kvm *kvm, 422 struct kvm_assigned_irq *assigned_irq) 423{ 424 int r = -EINVAL; 425 struct kvm_assigned_dev_kernel *match; 426 unsigned long host_irq_type, guest_irq_type; 427 428 if (!irqchip_in_kernel(kvm)) 429 return r; 430 431 mutex_lock(&kvm->lock); 432 r = -ENODEV; 433 match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head, 434 assigned_irq->assigned_dev_id); 435 if (!match) 436 goto out; 437 438 host_irq_type = (assigned_irq->flags & KVM_DEV_IRQ_HOST_MASK); 439 guest_irq_type = (assigned_irq->flags & KVM_DEV_IRQ_GUEST_MASK); 440 441 r = -EINVAL; 442 /* can only assign one type at a time */ 443 if (hweight_long(host_irq_type) > 1) 444 goto out; 445 if (hweight_long(guest_irq_type) > 1) 446 goto out; 447 if (host_irq_type == 0 && guest_irq_type == 0) 448 goto out; 449 450 r = 0; 451 if (host_irq_type) 452 r = assign_host_irq(kvm, match, host_irq_type); 453 if (r) 454 goto out; 455 456 if (guest_irq_type) 457 r = assign_guest_irq(kvm, match, assigned_irq, guest_irq_type); 458out: 459 mutex_unlock(&kvm->lock); 460 return r; 461} 462 463static int kvm_vm_ioctl_deassign_dev_irq(struct kvm *kvm, 464 struct kvm_assigned_irq 465 *assigned_irq) 466{ 467 int r = -ENODEV; 468 struct kvm_assigned_dev_kernel *match; 469 470 mutex_lock(&kvm->lock); 471 472 match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head, 473 assigned_irq->assigned_dev_id); 474 if (!match) 475 goto out; 476 477 r = kvm_deassign_irq(kvm, match, assigned_irq->flags); 478out: 479 mutex_unlock(&kvm->lock); 480 return r; 481} 482 483static int kvm_vm_ioctl_assign_device(struct kvm *kvm, 484 struct kvm_assigned_pci_dev *assigned_dev) 485{ 486 int r = 0, idx; 487 struct kvm_assigned_dev_kernel *match; 488 struct pci_dev *dev; 489 490 mutex_lock(&kvm->lock); 491 idx = srcu_read_lock(&kvm->srcu); 492 493 match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head, 494 assigned_dev->assigned_dev_id); 495 if (match) { 496 /* device already assigned */ 497 r = -EEXIST; 498 goto out; 499 } 500 501 match = kzalloc(sizeof(struct kvm_assigned_dev_kernel), GFP_KERNEL); 502 if (match == NULL) { 503 printk(KERN_INFO "%s: Couldn't allocate memory\n", 504 __func__); 505 r = -ENOMEM; 506 goto out; 507 } 508 dev = pci_get_domain_bus_and_slot(assigned_dev->segnr, 509 assigned_dev->busnr, 510 assigned_dev->devfn); 511 if (!dev) { 512 printk(KERN_INFO "%s: host device not found\n", __func__); 513 r = -EINVAL; 514 goto out_free; 515 } 516 if (pci_enable_device(dev)) { 517 printk(KERN_INFO "%s: Could not enable PCI device\n", __func__); 518 r = -EBUSY; 519 goto out_put; 520 } 521 r = pci_request_regions(dev, "kvm_assigned_device"); 522 if (r) { 523 printk(KERN_INFO "%s: Could not get access to device regions\n", 524 __func__); 525 goto out_disable; 526 } 527 528 pci_reset_function(dev); 529 pci_save_state(dev); 530 match->pci_saved_state = pci_store_saved_state(dev); 531 if (!match->pci_saved_state) 532 printk(KERN_DEBUG "%s: Couldn't store %s saved state\n", 533 __func__, dev_name(&dev->dev)); 534 match->assigned_dev_id = assigned_dev->assigned_dev_id; 535 match->host_segnr = assigned_dev->segnr; 536 match->host_busnr = assigned_dev->busnr; 537 match->host_devfn = assigned_dev->devfn; 538 match->flags = assigned_dev->flags; 539 match->dev = dev; 540 spin_lock_init(&match->intx_lock); 541 match->irq_source_id = -1; 542 match->kvm = kvm; 543 match->ack_notifier.irq_acked = kvm_assigned_dev_ack_irq; 544 545 list_add(&match->list, &kvm->arch.assigned_dev_head); 546 547 if (assigned_dev->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU) { 548 if (!kvm->arch.iommu_domain) { 549 r = kvm_iommu_map_guest(kvm); 550 if (r) 551 goto out_list_del; 552 } 553 r = kvm_assign_device(kvm, match); 554 if (r) 555 goto out_list_del; 556 } 557 558out: 559 srcu_read_unlock(&kvm->srcu, idx); 560 mutex_unlock(&kvm->lock); 561 return r; 562out_list_del: 563 if (pci_load_and_free_saved_state(dev, &match->pci_saved_state)) 564 printk(KERN_INFO "%s: Couldn't reload %s saved state\n", 565 __func__, dev_name(&dev->dev)); 566 list_del(&match->list); 567 pci_release_regions(dev); 568out_disable: 569 pci_disable_device(dev); 570out_put: 571 pci_dev_put(dev); 572out_free: 573 kfree(match); 574 srcu_read_unlock(&kvm->srcu, idx); 575 mutex_unlock(&kvm->lock); 576 return r; 577} 578 579static int kvm_vm_ioctl_deassign_device(struct kvm *kvm, 580 struct kvm_assigned_pci_dev *assigned_dev) 581{ 582 int r = 0; 583 struct kvm_assigned_dev_kernel *match; 584 585 mutex_lock(&kvm->lock); 586 587 match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head, 588 assigned_dev->assigned_dev_id); 589 if (!match) { 590 printk(KERN_INFO "%s: device hasn't been assigned before, " 591 "so cannot be deassigned\n", __func__); 592 r = -EINVAL; 593 goto out; 594 } 595 596 if (match->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU) 597 kvm_deassign_device(kvm, match); 598 599 kvm_free_assigned_device(kvm, match); 600 601out: 602 mutex_unlock(&kvm->lock); 603 return r; 604} 605 606 607#ifdef __KVM_HAVE_MSIX 608static int kvm_vm_ioctl_set_msix_nr(struct kvm *kvm, 609 struct kvm_assigned_msix_nr *entry_nr) 610{ 611 int r = 0; 612 struct kvm_assigned_dev_kernel *adev; 613 614 mutex_lock(&kvm->lock); 615 616 adev = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head, 617 entry_nr->assigned_dev_id); 618 if (!adev) { 619 r = -EINVAL; 620 goto msix_nr_out; 621 } 622 623 if (adev->entries_nr == 0) { 624 adev->entries_nr = entry_nr->entry_nr; 625 if (adev->entries_nr == 0 || 626 adev->entries_nr > KVM_MAX_MSIX_PER_DEV) { 627 r = -EINVAL; 628 goto msix_nr_out; 629 } 630 631 adev->host_msix_entries = kzalloc(sizeof(struct msix_entry) * 632 entry_nr->entry_nr, 633 GFP_KERNEL); 634 if (!adev->host_msix_entries) { 635 r = -ENOMEM; 636 goto msix_nr_out; 637 } 638 adev->guest_msix_entries = 639 kzalloc(sizeof(struct msix_entry) * entry_nr->entry_nr, 640 GFP_KERNEL); 641 if (!adev->guest_msix_entries) { 642 kfree(adev->host_msix_entries); 643 r = -ENOMEM; 644 goto msix_nr_out; 645 } 646 } else /* Not allowed set MSI-X number twice */ 647 r = -EINVAL; 648msix_nr_out: 649 mutex_unlock(&kvm->lock); 650 return r; 651} 652 653static int kvm_vm_ioctl_set_msix_entry(struct kvm *kvm, 654 struct kvm_assigned_msix_entry *entry) 655{ 656 int r = 0, i; 657 struct kvm_assigned_dev_kernel *adev; 658 659 mutex_lock(&kvm->lock); 660 661 adev = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head, 662 entry->assigned_dev_id); 663 664 if (!adev) { 665 r = -EINVAL; 666 goto msix_entry_out; 667 } 668 669 for (i = 0; i < adev->entries_nr; i++) 670 if (adev->guest_msix_entries[i].vector == 0 || 671 adev->guest_msix_entries[i].entry == entry->entry) { 672 adev->guest_msix_entries[i].entry = entry->entry; 673 adev->guest_msix_entries[i].vector = entry->gsi; 674 adev->host_msix_entries[i].entry = entry->entry; 675 break; 676 } 677 if (i == adev->entries_nr) { 678 r = -ENOSPC; 679 goto msix_entry_out; 680 } 681 682msix_entry_out: 683 mutex_unlock(&kvm->lock); 684 685 return r; 686} 687#endif 688 689long kvm_vm_ioctl_assigned_device(struct kvm *kvm, unsigned ioctl, 690 unsigned long arg) 691{ 692 void __user *argp = (void __user *)arg; 693 int r; 694 695 switch (ioctl) { 696 case KVM_ASSIGN_PCI_DEVICE: { 697 struct kvm_assigned_pci_dev assigned_dev; 698 699 r = -EFAULT; 700 if (copy_from_user(&assigned_dev, argp, sizeof assigned_dev)) 701 goto out; 702 r = kvm_vm_ioctl_assign_device(kvm, &assigned_dev); 703 if (r) 704 goto out; 705 break; 706 } 707 case KVM_ASSIGN_IRQ: { 708 r = -EOPNOTSUPP; 709 break; 710 } 711 case KVM_ASSIGN_DEV_IRQ: { 712 struct kvm_assigned_irq assigned_irq; 713 714 r = -EFAULT; 715 if (copy_from_user(&assigned_irq, argp, sizeof assigned_irq)) 716 goto out; 717 r = kvm_vm_ioctl_assign_irq(kvm, &assigned_irq); 718 if (r) 719 goto out; 720 break; 721 } 722 case KVM_DEASSIGN_DEV_IRQ: { 723 struct kvm_assigned_irq assigned_irq; 724 725 r = -EFAULT; 726 if (copy_from_user(&assigned_irq, argp, sizeof assigned_irq)) 727 goto out; 728 r = kvm_vm_ioctl_deassign_dev_irq(kvm, &assigned_irq); 729 if (r) 730 goto out; 731 break; 732 } 733 case KVM_DEASSIGN_PCI_DEVICE: { 734 struct kvm_assigned_pci_dev assigned_dev; 735 736 r = -EFAULT; 737 if (copy_from_user(&assigned_dev, argp, sizeof assigned_dev)) 738 goto out; 739 r = kvm_vm_ioctl_deassign_device(kvm, &assigned_dev); 740 if (r) 741 goto out; 742 break; 743 } 744#ifdef KVM_CAP_IRQ_ROUTING 745 case KVM_SET_GSI_ROUTING: { 746 struct kvm_irq_routing routing; 747 struct kvm_irq_routing __user *urouting; 748 struct kvm_irq_routing_entry *entries; 749 750 r = -EFAULT; 751 if (copy_from_user(&routing, argp, sizeof(routing))) 752 goto out; 753 r = -EINVAL; 754 if (routing.nr >= KVM_MAX_IRQ_ROUTES) 755 goto out; 756 if (routing.flags) 757 goto out; 758 r = -ENOMEM; 759 entries = vmalloc(routing.nr * sizeof(*entries)); 760 if (!entries) 761 goto out; 762 r = -EFAULT; 763 urouting = argp; 764 if (copy_from_user(entries, urouting->entries, 765 routing.nr * sizeof(*entries))) 766 goto out_free_irq_routing; 767 r = kvm_set_irq_routing(kvm, entries, routing.nr, 768 routing.flags); 769 out_free_irq_routing: 770 vfree(entries); 771 break; 772 } 773#endif /* KVM_CAP_IRQ_ROUTING */ 774#ifdef __KVM_HAVE_MSIX 775 case KVM_ASSIGN_SET_MSIX_NR: { 776 struct kvm_assigned_msix_nr entry_nr; 777 r = -EFAULT; 778 if (copy_from_user(&entry_nr, argp, sizeof entry_nr)) 779 goto out; 780 r = kvm_vm_ioctl_set_msix_nr(kvm, &entry_nr); 781 if (r) 782 goto out; 783 break; 784 } 785 case KVM_ASSIGN_SET_MSIX_ENTRY: { 786 struct kvm_assigned_msix_entry entry; 787 r = -EFAULT; 788 if (copy_from_user(&entry, argp, sizeof entry)) 789 goto out; 790 r = kvm_vm_ioctl_set_msix_entry(kvm, &entry); 791 if (r) 792 goto out; 793 break; 794 } 795#endif 796 default: 797 r = -ENOTTY; 798 break; 799 } 800out: 801 return r; 802} 803