at v2.6.34-rc2 820 lines 20 kB view raw
1/* 2 * Kernel-based Virtual Machine - device assignment support 3 * 4 * Copyright (C) 2006-9 Red Hat, Inc 5 * 6 * This work is licensed under the terms of the GNU GPL, version 2. See 7 * the COPYING file in the top-level directory. 8 * 9 */ 10 11#include <linux/kvm_host.h> 12#include <linux/kvm.h> 13#include <linux/uaccess.h> 14#include <linux/vmalloc.h> 15#include <linux/errno.h> 16#include <linux/spinlock.h> 17#include <linux/pci.h> 18#include <linux/interrupt.h> 19#include "irq.h" 20 21static struct kvm_assigned_dev_kernel *kvm_find_assigned_dev(struct list_head *head, 22 int assigned_dev_id) 23{ 24 struct list_head *ptr; 25 struct kvm_assigned_dev_kernel *match; 26 27 list_for_each(ptr, head) { 28 match = list_entry(ptr, struct kvm_assigned_dev_kernel, list); 29 if (match->assigned_dev_id == assigned_dev_id) 30 return match; 31 } 32 return NULL; 33} 34 35static int find_index_from_host_irq(struct kvm_assigned_dev_kernel 36 *assigned_dev, int irq) 37{ 38 int i, index; 39 struct msix_entry *host_msix_entries; 40 41 host_msix_entries = assigned_dev->host_msix_entries; 42 43 index = -1; 44 for (i = 0; i < assigned_dev->entries_nr; i++) 45 if (irq == host_msix_entries[i].vector) { 46 index = i; 47 break; 48 } 49 if (index < 0) { 50 printk(KERN_WARNING "Fail to find correlated MSI-X entry!\n"); 51 return 0; 52 } 53 54 return index; 55} 56 57static void kvm_assigned_dev_interrupt_work_handler(struct work_struct *work) 58{ 59 struct kvm_assigned_dev_kernel *assigned_dev; 60 struct kvm *kvm; 61 int i; 62 63 assigned_dev = container_of(work, struct kvm_assigned_dev_kernel, 64 interrupt_work); 65 kvm = assigned_dev->kvm; 66 67 spin_lock_irq(&assigned_dev->assigned_dev_lock); 68 if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSIX) { 69 struct kvm_guest_msix_entry *guest_entries = 70 assigned_dev->guest_msix_entries; 71 for (i = 0; i < assigned_dev->entries_nr; i++) { 72 if (!(guest_entries[i].flags & 73 KVM_ASSIGNED_MSIX_PENDING)) 74 continue; 75 guest_entries[i].flags &= ~KVM_ASSIGNED_MSIX_PENDING; 76 kvm_set_irq(assigned_dev->kvm, 77 assigned_dev->irq_source_id, 78 guest_entries[i].vector, 1); 79 } 80 } else 81 kvm_set_irq(assigned_dev->kvm, assigned_dev->irq_source_id, 82 assigned_dev->guest_irq, 1); 83 84 spin_unlock_irq(&assigned_dev->assigned_dev_lock); 85} 86 87static irqreturn_t kvm_assigned_dev_intr(int irq, void *dev_id) 88{ 89 unsigned long flags; 90 struct kvm_assigned_dev_kernel *assigned_dev = 91 (struct kvm_assigned_dev_kernel *) dev_id; 92 93 spin_lock_irqsave(&assigned_dev->assigned_dev_lock, flags); 94 if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSIX) { 95 int index = find_index_from_host_irq(assigned_dev, irq); 96 if (index < 0) 97 goto out; 98 assigned_dev->guest_msix_entries[index].flags |= 99 KVM_ASSIGNED_MSIX_PENDING; 100 } 101 102 schedule_work(&assigned_dev->interrupt_work); 103 104 if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_GUEST_INTX) { 105 disable_irq_nosync(irq); 106 assigned_dev->host_irq_disabled = true; 107 } 108 109out: 110 spin_unlock_irqrestore(&assigned_dev->assigned_dev_lock, flags); 111 return IRQ_HANDLED; 112} 113 114/* Ack the irq line for an assigned device */ 115static void kvm_assigned_dev_ack_irq(struct kvm_irq_ack_notifier *kian) 116{ 117 struct kvm_assigned_dev_kernel *dev; 118 unsigned long flags; 119 120 if (kian->gsi == -1) 121 return; 122 123 dev = container_of(kian, struct kvm_assigned_dev_kernel, 124 ack_notifier); 125 126 kvm_set_irq(dev->kvm, dev->irq_source_id, dev->guest_irq, 0); 127 128 /* The guest irq may be shared so this ack may be 129 * from another device. 130 */ 131 spin_lock_irqsave(&dev->assigned_dev_lock, flags); 132 if (dev->host_irq_disabled) { 133 enable_irq(dev->host_irq); 134 dev->host_irq_disabled = false; 135 } 136 spin_unlock_irqrestore(&dev->assigned_dev_lock, flags); 137} 138 139static void deassign_guest_irq(struct kvm *kvm, 140 struct kvm_assigned_dev_kernel *assigned_dev) 141{ 142 kvm_unregister_irq_ack_notifier(kvm, &assigned_dev->ack_notifier); 143 assigned_dev->ack_notifier.gsi = -1; 144 145 if (assigned_dev->irq_source_id != -1) 146 kvm_free_irq_source_id(kvm, assigned_dev->irq_source_id); 147 assigned_dev->irq_source_id = -1; 148 assigned_dev->irq_requested_type &= ~(KVM_DEV_IRQ_GUEST_MASK); 149} 150 151/* The function implicit hold kvm->lock mutex due to cancel_work_sync() */ 152static void deassign_host_irq(struct kvm *kvm, 153 struct kvm_assigned_dev_kernel *assigned_dev) 154{ 155 /* 156 * In kvm_free_device_irq, cancel_work_sync return true if: 157 * 1. work is scheduled, and then cancelled. 158 * 2. work callback is executed. 159 * 160 * The first one ensured that the irq is disabled and no more events 161 * would happen. But for the second one, the irq may be enabled (e.g. 162 * for MSI). So we disable irq here to prevent further events. 163 * 164 * Notice this maybe result in nested disable if the interrupt type is 165 * INTx, but it's OK for we are going to free it. 166 * 167 * If this function is a part of VM destroy, please ensure that till 168 * now, the kvm state is still legal for probably we also have to wait 169 * interrupt_work done. 170 */ 171 if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSIX) { 172 int i; 173 for (i = 0; i < assigned_dev->entries_nr; i++) 174 disable_irq_nosync(assigned_dev-> 175 host_msix_entries[i].vector); 176 177 cancel_work_sync(&assigned_dev->interrupt_work); 178 179 for (i = 0; i < assigned_dev->entries_nr; i++) 180 free_irq(assigned_dev->host_msix_entries[i].vector, 181 (void *)assigned_dev); 182 183 assigned_dev->entries_nr = 0; 184 kfree(assigned_dev->host_msix_entries); 185 kfree(assigned_dev->guest_msix_entries); 186 pci_disable_msix(assigned_dev->dev); 187 } else { 188 /* Deal with MSI and INTx */ 189 disable_irq_nosync(assigned_dev->host_irq); 190 cancel_work_sync(&assigned_dev->interrupt_work); 191 192 free_irq(assigned_dev->host_irq, (void *)assigned_dev); 193 194 if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSI) 195 pci_disable_msi(assigned_dev->dev); 196 } 197 198 assigned_dev->irq_requested_type &= ~(KVM_DEV_IRQ_HOST_MASK); 199} 200 201static int kvm_deassign_irq(struct kvm *kvm, 202 struct kvm_assigned_dev_kernel *assigned_dev, 203 unsigned long irq_requested_type) 204{ 205 unsigned long guest_irq_type, host_irq_type; 206 207 if (!irqchip_in_kernel(kvm)) 208 return -EINVAL; 209 /* no irq assignment to deassign */ 210 if (!assigned_dev->irq_requested_type) 211 return -ENXIO; 212 213 host_irq_type = irq_requested_type & KVM_DEV_IRQ_HOST_MASK; 214 guest_irq_type = irq_requested_type & KVM_DEV_IRQ_GUEST_MASK; 215 216 if (host_irq_type) 217 deassign_host_irq(kvm, assigned_dev); 218 if (guest_irq_type) 219 deassign_guest_irq(kvm, assigned_dev); 220 221 return 0; 222} 223 224static void kvm_free_assigned_irq(struct kvm *kvm, 225 struct kvm_assigned_dev_kernel *assigned_dev) 226{ 227 kvm_deassign_irq(kvm, assigned_dev, assigned_dev->irq_requested_type); 228} 229 230static void kvm_free_assigned_device(struct kvm *kvm, 231 struct kvm_assigned_dev_kernel 232 *assigned_dev) 233{ 234 kvm_free_assigned_irq(kvm, assigned_dev); 235 236 pci_reset_function(assigned_dev->dev); 237 238 pci_release_regions(assigned_dev->dev); 239 pci_disable_device(assigned_dev->dev); 240 pci_dev_put(assigned_dev->dev); 241 242 list_del(&assigned_dev->list); 243 kfree(assigned_dev); 244} 245 246void kvm_free_all_assigned_devices(struct kvm *kvm) 247{ 248 struct list_head *ptr, *ptr2; 249 struct kvm_assigned_dev_kernel *assigned_dev; 250 251 list_for_each_safe(ptr, ptr2, &kvm->arch.assigned_dev_head) { 252 assigned_dev = list_entry(ptr, 253 struct kvm_assigned_dev_kernel, 254 list); 255 256 kvm_free_assigned_device(kvm, assigned_dev); 257 } 258} 259 260static int assigned_device_enable_host_intx(struct kvm *kvm, 261 struct kvm_assigned_dev_kernel *dev) 262{ 263 dev->host_irq = dev->dev->irq; 264 /* Even though this is PCI, we don't want to use shared 265 * interrupts. Sharing host devices with guest-assigned devices 266 * on the same interrupt line is not a happy situation: there 267 * are going to be long delays in accepting, acking, etc. 268 */ 269 if (request_irq(dev->host_irq, kvm_assigned_dev_intr, 270 0, "kvm_assigned_intx_device", (void *)dev)) 271 return -EIO; 272 return 0; 273} 274 275#ifdef __KVM_HAVE_MSI 276static int assigned_device_enable_host_msi(struct kvm *kvm, 277 struct kvm_assigned_dev_kernel *dev) 278{ 279 int r; 280 281 if (!dev->dev->msi_enabled) { 282 r = pci_enable_msi(dev->dev); 283 if (r) 284 return r; 285 } 286 287 dev->host_irq = dev->dev->irq; 288 if (request_irq(dev->host_irq, kvm_assigned_dev_intr, 0, 289 "kvm_assigned_msi_device", (void *)dev)) { 290 pci_disable_msi(dev->dev); 291 return -EIO; 292 } 293 294 return 0; 295} 296#endif 297 298#ifdef __KVM_HAVE_MSIX 299static int assigned_device_enable_host_msix(struct kvm *kvm, 300 struct kvm_assigned_dev_kernel *dev) 301{ 302 int i, r = -EINVAL; 303 304 /* host_msix_entries and guest_msix_entries should have been 305 * initialized */ 306 if (dev->entries_nr == 0) 307 return r; 308 309 r = pci_enable_msix(dev->dev, dev->host_msix_entries, dev->entries_nr); 310 if (r) 311 return r; 312 313 for (i = 0; i < dev->entries_nr; i++) { 314 r = request_irq(dev->host_msix_entries[i].vector, 315 kvm_assigned_dev_intr, 0, 316 "kvm_assigned_msix_device", 317 (void *)dev); 318 /* FIXME: free requested_irq's on failure */ 319 if (r) 320 return r; 321 } 322 323 return 0; 324} 325 326#endif 327 328static int assigned_device_enable_guest_intx(struct kvm *kvm, 329 struct kvm_assigned_dev_kernel *dev, 330 struct kvm_assigned_irq *irq) 331{ 332 dev->guest_irq = irq->guest_irq; 333 dev->ack_notifier.gsi = irq->guest_irq; 334 return 0; 335} 336 337#ifdef __KVM_HAVE_MSI 338static int assigned_device_enable_guest_msi(struct kvm *kvm, 339 struct kvm_assigned_dev_kernel *dev, 340 struct kvm_assigned_irq *irq) 341{ 342 dev->guest_irq = irq->guest_irq; 343 dev->ack_notifier.gsi = -1; 344 dev->host_irq_disabled = false; 345 return 0; 346} 347#endif 348 349#ifdef __KVM_HAVE_MSIX 350static int assigned_device_enable_guest_msix(struct kvm *kvm, 351 struct kvm_assigned_dev_kernel *dev, 352 struct kvm_assigned_irq *irq) 353{ 354 dev->guest_irq = irq->guest_irq; 355 dev->ack_notifier.gsi = -1; 356 dev->host_irq_disabled = false; 357 return 0; 358} 359#endif 360 361static int assign_host_irq(struct kvm *kvm, 362 struct kvm_assigned_dev_kernel *dev, 363 __u32 host_irq_type) 364{ 365 int r = -EEXIST; 366 367 if (dev->irq_requested_type & KVM_DEV_IRQ_HOST_MASK) 368 return r; 369 370 switch (host_irq_type) { 371 case KVM_DEV_IRQ_HOST_INTX: 372 r = assigned_device_enable_host_intx(kvm, dev); 373 break; 374#ifdef __KVM_HAVE_MSI 375 case KVM_DEV_IRQ_HOST_MSI: 376 r = assigned_device_enable_host_msi(kvm, dev); 377 break; 378#endif 379#ifdef __KVM_HAVE_MSIX 380 case KVM_DEV_IRQ_HOST_MSIX: 381 r = assigned_device_enable_host_msix(kvm, dev); 382 break; 383#endif 384 default: 385 r = -EINVAL; 386 } 387 388 if (!r) 389 dev->irq_requested_type |= host_irq_type; 390 391 return r; 392} 393 394static int assign_guest_irq(struct kvm *kvm, 395 struct kvm_assigned_dev_kernel *dev, 396 struct kvm_assigned_irq *irq, 397 unsigned long guest_irq_type) 398{ 399 int id; 400 int r = -EEXIST; 401 402 if (dev->irq_requested_type & KVM_DEV_IRQ_GUEST_MASK) 403 return r; 404 405 id = kvm_request_irq_source_id(kvm); 406 if (id < 0) 407 return id; 408 409 dev->irq_source_id = id; 410 411 switch (guest_irq_type) { 412 case KVM_DEV_IRQ_GUEST_INTX: 413 r = assigned_device_enable_guest_intx(kvm, dev, irq); 414 break; 415#ifdef __KVM_HAVE_MSI 416 case KVM_DEV_IRQ_GUEST_MSI: 417 r = assigned_device_enable_guest_msi(kvm, dev, irq); 418 break; 419#endif 420#ifdef __KVM_HAVE_MSIX 421 case KVM_DEV_IRQ_GUEST_MSIX: 422 r = assigned_device_enable_guest_msix(kvm, dev, irq); 423 break; 424#endif 425 default: 426 r = -EINVAL; 427 } 428 429 if (!r) { 430 dev->irq_requested_type |= guest_irq_type; 431 kvm_register_irq_ack_notifier(kvm, &dev->ack_notifier); 432 } else 433 kvm_free_irq_source_id(kvm, dev->irq_source_id); 434 435 return r; 436} 437 438/* TODO Deal with KVM_DEV_IRQ_ASSIGNED_MASK_MSIX */ 439static int kvm_vm_ioctl_assign_irq(struct kvm *kvm, 440 struct kvm_assigned_irq *assigned_irq) 441{ 442 int r = -EINVAL; 443 struct kvm_assigned_dev_kernel *match; 444 unsigned long host_irq_type, guest_irq_type; 445 446 if (!capable(CAP_SYS_RAWIO)) 447 return -EPERM; 448 449 if (!irqchip_in_kernel(kvm)) 450 return r; 451 452 mutex_lock(&kvm->lock); 453 r = -ENODEV; 454 match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head, 455 assigned_irq->assigned_dev_id); 456 if (!match) 457 goto out; 458 459 host_irq_type = (assigned_irq->flags & KVM_DEV_IRQ_HOST_MASK); 460 guest_irq_type = (assigned_irq->flags & KVM_DEV_IRQ_GUEST_MASK); 461 462 r = -EINVAL; 463 /* can only assign one type at a time */ 464 if (hweight_long(host_irq_type) > 1) 465 goto out; 466 if (hweight_long(guest_irq_type) > 1) 467 goto out; 468 if (host_irq_type == 0 && guest_irq_type == 0) 469 goto out; 470 471 r = 0; 472 if (host_irq_type) 473 r = assign_host_irq(kvm, match, host_irq_type); 474 if (r) 475 goto out; 476 477 if (guest_irq_type) 478 r = assign_guest_irq(kvm, match, assigned_irq, guest_irq_type); 479out: 480 mutex_unlock(&kvm->lock); 481 return r; 482} 483 484static int kvm_vm_ioctl_deassign_dev_irq(struct kvm *kvm, 485 struct kvm_assigned_irq 486 *assigned_irq) 487{ 488 int r = -ENODEV; 489 struct kvm_assigned_dev_kernel *match; 490 491 mutex_lock(&kvm->lock); 492 493 match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head, 494 assigned_irq->assigned_dev_id); 495 if (!match) 496 goto out; 497 498 r = kvm_deassign_irq(kvm, match, assigned_irq->flags); 499out: 500 mutex_unlock(&kvm->lock); 501 return r; 502} 503 504static int kvm_vm_ioctl_assign_device(struct kvm *kvm, 505 struct kvm_assigned_pci_dev *assigned_dev) 506{ 507 int r = 0, idx; 508 struct kvm_assigned_dev_kernel *match; 509 struct pci_dev *dev; 510 511 mutex_lock(&kvm->lock); 512 idx = srcu_read_lock(&kvm->srcu); 513 514 match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head, 515 assigned_dev->assigned_dev_id); 516 if (match) { 517 /* device already assigned */ 518 r = -EEXIST; 519 goto out; 520 } 521 522 match = kzalloc(sizeof(struct kvm_assigned_dev_kernel), GFP_KERNEL); 523 if (match == NULL) { 524 printk(KERN_INFO "%s: Couldn't allocate memory\n", 525 __func__); 526 r = -ENOMEM; 527 goto out; 528 } 529 dev = pci_get_domain_bus_and_slot(assigned_dev->segnr, 530 assigned_dev->busnr, 531 assigned_dev->devfn); 532 if (!dev) { 533 printk(KERN_INFO "%s: host device not found\n", __func__); 534 r = -EINVAL; 535 goto out_free; 536 } 537 if (pci_enable_device(dev)) { 538 printk(KERN_INFO "%s: Could not enable PCI device\n", __func__); 539 r = -EBUSY; 540 goto out_put; 541 } 542 r = pci_request_regions(dev, "kvm_assigned_device"); 543 if (r) { 544 printk(KERN_INFO "%s: Could not get access to device regions\n", 545 __func__); 546 goto out_disable; 547 } 548 549 pci_reset_function(dev); 550 551 match->assigned_dev_id = assigned_dev->assigned_dev_id; 552 match->host_segnr = assigned_dev->segnr; 553 match->host_busnr = assigned_dev->busnr; 554 match->host_devfn = assigned_dev->devfn; 555 match->flags = assigned_dev->flags; 556 match->dev = dev; 557 spin_lock_init(&match->assigned_dev_lock); 558 match->irq_source_id = -1; 559 match->kvm = kvm; 560 match->ack_notifier.irq_acked = kvm_assigned_dev_ack_irq; 561 INIT_WORK(&match->interrupt_work, 562 kvm_assigned_dev_interrupt_work_handler); 563 564 list_add(&match->list, &kvm->arch.assigned_dev_head); 565 566 if (assigned_dev->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU) { 567 if (!kvm->arch.iommu_domain) { 568 r = kvm_iommu_map_guest(kvm); 569 if (r) 570 goto out_list_del; 571 } 572 r = kvm_assign_device(kvm, match); 573 if (r) 574 goto out_list_del; 575 } 576 577out: 578 srcu_read_unlock(&kvm->srcu, idx); 579 mutex_unlock(&kvm->lock); 580 return r; 581out_list_del: 582 list_del(&match->list); 583 pci_release_regions(dev); 584out_disable: 585 pci_disable_device(dev); 586out_put: 587 pci_dev_put(dev); 588out_free: 589 kfree(match); 590 srcu_read_unlock(&kvm->srcu, idx); 591 mutex_unlock(&kvm->lock); 592 return r; 593} 594 595static int kvm_vm_ioctl_deassign_device(struct kvm *kvm, 596 struct kvm_assigned_pci_dev *assigned_dev) 597{ 598 int r = 0; 599 struct kvm_assigned_dev_kernel *match; 600 601 mutex_lock(&kvm->lock); 602 603 match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head, 604 assigned_dev->assigned_dev_id); 605 if (!match) { 606 printk(KERN_INFO "%s: device hasn't been assigned before, " 607 "so cannot be deassigned\n", __func__); 608 r = -EINVAL; 609 goto out; 610 } 611 612 if (match->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU) 613 kvm_deassign_device(kvm, match); 614 615 kvm_free_assigned_device(kvm, match); 616 617out: 618 mutex_unlock(&kvm->lock); 619 return r; 620} 621 622 623#ifdef __KVM_HAVE_MSIX 624static int kvm_vm_ioctl_set_msix_nr(struct kvm *kvm, 625 struct kvm_assigned_msix_nr *entry_nr) 626{ 627 int r = 0; 628 struct kvm_assigned_dev_kernel *adev; 629 630 mutex_lock(&kvm->lock); 631 632 adev = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head, 633 entry_nr->assigned_dev_id); 634 if (!adev) { 635 r = -EINVAL; 636 goto msix_nr_out; 637 } 638 639 if (adev->entries_nr == 0) { 640 adev->entries_nr = entry_nr->entry_nr; 641 if (adev->entries_nr == 0 || 642 adev->entries_nr >= KVM_MAX_MSIX_PER_DEV) { 643 r = -EINVAL; 644 goto msix_nr_out; 645 } 646 647 adev->host_msix_entries = kzalloc(sizeof(struct msix_entry) * 648 entry_nr->entry_nr, 649 GFP_KERNEL); 650 if (!adev->host_msix_entries) { 651 r = -ENOMEM; 652 goto msix_nr_out; 653 } 654 adev->guest_msix_entries = kzalloc( 655 sizeof(struct kvm_guest_msix_entry) * 656 entry_nr->entry_nr, GFP_KERNEL); 657 if (!adev->guest_msix_entries) { 658 kfree(adev->host_msix_entries); 659 r = -ENOMEM; 660 goto msix_nr_out; 661 } 662 } else /* Not allowed set MSI-X number twice */ 663 r = -EINVAL; 664msix_nr_out: 665 mutex_unlock(&kvm->lock); 666 return r; 667} 668 669static int kvm_vm_ioctl_set_msix_entry(struct kvm *kvm, 670 struct kvm_assigned_msix_entry *entry) 671{ 672 int r = 0, i; 673 struct kvm_assigned_dev_kernel *adev; 674 675 mutex_lock(&kvm->lock); 676 677 adev = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head, 678 entry->assigned_dev_id); 679 680 if (!adev) { 681 r = -EINVAL; 682 goto msix_entry_out; 683 } 684 685 for (i = 0; i < adev->entries_nr; i++) 686 if (adev->guest_msix_entries[i].vector == 0 || 687 adev->guest_msix_entries[i].entry == entry->entry) { 688 adev->guest_msix_entries[i].entry = entry->entry; 689 adev->guest_msix_entries[i].vector = entry->gsi; 690 adev->host_msix_entries[i].entry = entry->entry; 691 break; 692 } 693 if (i == adev->entries_nr) { 694 r = -ENOSPC; 695 goto msix_entry_out; 696 } 697 698msix_entry_out: 699 mutex_unlock(&kvm->lock); 700 701 return r; 702} 703#endif 704 705long kvm_vm_ioctl_assigned_device(struct kvm *kvm, unsigned ioctl, 706 unsigned long arg) 707{ 708 void __user *argp = (void __user *)arg; 709 int r = -ENOTTY; 710 711 switch (ioctl) { 712 case KVM_ASSIGN_PCI_DEVICE: { 713 struct kvm_assigned_pci_dev assigned_dev; 714 715 r = -EFAULT; 716 if (copy_from_user(&assigned_dev, argp, sizeof assigned_dev)) 717 goto out; 718 r = kvm_vm_ioctl_assign_device(kvm, &assigned_dev); 719 if (r) 720 goto out; 721 break; 722 } 723 case KVM_ASSIGN_IRQ: { 724 r = -EOPNOTSUPP; 725 break; 726 } 727#ifdef KVM_CAP_ASSIGN_DEV_IRQ 728 case KVM_ASSIGN_DEV_IRQ: { 729 struct kvm_assigned_irq assigned_irq; 730 731 r = -EFAULT; 732 if (copy_from_user(&assigned_irq, argp, sizeof assigned_irq)) 733 goto out; 734 r = kvm_vm_ioctl_assign_irq(kvm, &assigned_irq); 735 if (r) 736 goto out; 737 break; 738 } 739 case KVM_DEASSIGN_DEV_IRQ: { 740 struct kvm_assigned_irq assigned_irq; 741 742 r = -EFAULT; 743 if (copy_from_user(&assigned_irq, argp, sizeof assigned_irq)) 744 goto out; 745 r = kvm_vm_ioctl_deassign_dev_irq(kvm, &assigned_irq); 746 if (r) 747 goto out; 748 break; 749 } 750#endif 751#ifdef KVM_CAP_DEVICE_DEASSIGNMENT 752 case KVM_DEASSIGN_PCI_DEVICE: { 753 struct kvm_assigned_pci_dev assigned_dev; 754 755 r = -EFAULT; 756 if (copy_from_user(&assigned_dev, argp, sizeof assigned_dev)) 757 goto out; 758 r = kvm_vm_ioctl_deassign_device(kvm, &assigned_dev); 759 if (r) 760 goto out; 761 break; 762 } 763#endif 764#ifdef KVM_CAP_IRQ_ROUTING 765 case KVM_SET_GSI_ROUTING: { 766 struct kvm_irq_routing routing; 767 struct kvm_irq_routing __user *urouting; 768 struct kvm_irq_routing_entry *entries; 769 770 r = -EFAULT; 771 if (copy_from_user(&routing, argp, sizeof(routing))) 772 goto out; 773 r = -EINVAL; 774 if (routing.nr >= KVM_MAX_IRQ_ROUTES) 775 goto out; 776 if (routing.flags) 777 goto out; 778 r = -ENOMEM; 779 entries = vmalloc(routing.nr * sizeof(*entries)); 780 if (!entries) 781 goto out; 782 r = -EFAULT; 783 urouting = argp; 784 if (copy_from_user(entries, urouting->entries, 785 routing.nr * sizeof(*entries))) 786 goto out_free_irq_routing; 787 r = kvm_set_irq_routing(kvm, entries, routing.nr, 788 routing.flags); 789 out_free_irq_routing: 790 vfree(entries); 791 break; 792 } 793#endif /* KVM_CAP_IRQ_ROUTING */ 794#ifdef __KVM_HAVE_MSIX 795 case KVM_ASSIGN_SET_MSIX_NR: { 796 struct kvm_assigned_msix_nr entry_nr; 797 r = -EFAULT; 798 if (copy_from_user(&entry_nr, argp, sizeof entry_nr)) 799 goto out; 800 r = kvm_vm_ioctl_set_msix_nr(kvm, &entry_nr); 801 if (r) 802 goto out; 803 break; 804 } 805 case KVM_ASSIGN_SET_MSIX_ENTRY: { 806 struct kvm_assigned_msix_entry entry; 807 r = -EFAULT; 808 if (copy_from_user(&entry, argp, sizeof entry)) 809 goto out; 810 r = kvm_vm_ioctl_set_msix_entry(kvm, &entry); 811 if (r) 812 goto out; 813 break; 814 } 815#endif 816 } 817out: 818 return r; 819} 820