at v3.18-rc2 25 kB view raw
1/* 2 * Kernel-based Virtual Machine - device assignment support 3 * 4 * Copyright (C) 2010 Red Hat, Inc. and/or its affiliates. 5 * 6 * This work is licensed under the terms of the GNU GPL, version 2. See 7 * the COPYING file in the top-level directory. 8 * 9 */ 10 11#include <linux/kvm_host.h> 12#include <linux/kvm.h> 13#include <linux/uaccess.h> 14#include <linux/vmalloc.h> 15#include <linux/errno.h> 16#include <linux/spinlock.h> 17#include <linux/pci.h> 18#include <linux/interrupt.h> 19#include <linux/slab.h> 20#include <linux/namei.h> 21#include <linux/fs.h> 22#include "irq.h" 23 24static struct kvm_assigned_dev_kernel *kvm_find_assigned_dev(struct list_head *head, 25 int assigned_dev_id) 26{ 27 struct list_head *ptr; 28 struct kvm_assigned_dev_kernel *match; 29 30 list_for_each(ptr, head) { 31 match = list_entry(ptr, struct kvm_assigned_dev_kernel, list); 32 if (match->assigned_dev_id == assigned_dev_id) 33 return match; 34 } 35 return NULL; 36} 37 38static int find_index_from_host_irq(struct kvm_assigned_dev_kernel 39 *assigned_dev, int irq) 40{ 41 int i, index; 42 struct msix_entry *host_msix_entries; 43 44 host_msix_entries = assigned_dev->host_msix_entries; 45 46 index = -1; 47 for (i = 0; i < assigned_dev->entries_nr; i++) 48 if (irq == host_msix_entries[i].vector) { 49 index = i; 50 break; 51 } 52 if (index < 0) 53 printk(KERN_WARNING "Fail to find correlated MSI-X entry!\n"); 54 55 return index; 56} 57 58static irqreturn_t kvm_assigned_dev_intx(int irq, void *dev_id) 59{ 60 struct kvm_assigned_dev_kernel *assigned_dev = dev_id; 61 int ret; 62 63 spin_lock(&assigned_dev->intx_lock); 64 if (pci_check_and_mask_intx(assigned_dev->dev)) { 65 assigned_dev->host_irq_disabled = true; 66 ret = IRQ_WAKE_THREAD; 67 } else 68 ret = IRQ_NONE; 69 spin_unlock(&assigned_dev->intx_lock); 70 71 return ret; 72} 73 74static void 75kvm_assigned_dev_raise_guest_irq(struct kvm_assigned_dev_kernel *assigned_dev, 76 int vector) 77{ 78 if (unlikely(assigned_dev->irq_requested_type & 79 KVM_DEV_IRQ_GUEST_INTX)) { 80 spin_lock(&assigned_dev->intx_mask_lock); 81 if (!(assigned_dev->flags & KVM_DEV_ASSIGN_MASK_INTX)) 82 kvm_set_irq(assigned_dev->kvm, 83 assigned_dev->irq_source_id, vector, 1, 84 false); 85 spin_unlock(&assigned_dev->intx_mask_lock); 86 } else 87 kvm_set_irq(assigned_dev->kvm, assigned_dev->irq_source_id, 88 vector, 1, false); 89} 90 91static irqreturn_t kvm_assigned_dev_thread_intx(int irq, void *dev_id) 92{ 93 struct kvm_assigned_dev_kernel *assigned_dev = dev_id; 94 95 if (!(assigned_dev->flags & KVM_DEV_ASSIGN_PCI_2_3)) { 96 spin_lock_irq(&assigned_dev->intx_lock); 97 disable_irq_nosync(irq); 98 assigned_dev->host_irq_disabled = true; 99 spin_unlock_irq(&assigned_dev->intx_lock); 100 } 101 102 kvm_assigned_dev_raise_guest_irq(assigned_dev, 103 assigned_dev->guest_irq); 104 105 return IRQ_HANDLED; 106} 107 108#ifdef __KVM_HAVE_MSI 109static irqreturn_t kvm_assigned_dev_msi(int irq, void *dev_id) 110{ 111 struct kvm_assigned_dev_kernel *assigned_dev = dev_id; 112 int ret = kvm_set_irq_inatomic(assigned_dev->kvm, 113 assigned_dev->irq_source_id, 114 assigned_dev->guest_irq, 1); 115 return unlikely(ret == -EWOULDBLOCK) ? IRQ_WAKE_THREAD : IRQ_HANDLED; 116} 117 118static irqreturn_t kvm_assigned_dev_thread_msi(int irq, void *dev_id) 119{ 120 struct kvm_assigned_dev_kernel *assigned_dev = dev_id; 121 122 kvm_assigned_dev_raise_guest_irq(assigned_dev, 123 assigned_dev->guest_irq); 124 125 return IRQ_HANDLED; 126} 127#endif 128 129#ifdef __KVM_HAVE_MSIX 130static irqreturn_t kvm_assigned_dev_msix(int irq, void *dev_id) 131{ 132 struct kvm_assigned_dev_kernel *assigned_dev = dev_id; 133 int index = find_index_from_host_irq(assigned_dev, irq); 134 u32 vector; 135 int ret = 0; 136 137 if (index >= 0) { 138 vector = assigned_dev->guest_msix_entries[index].vector; 139 ret = kvm_set_irq_inatomic(assigned_dev->kvm, 140 assigned_dev->irq_source_id, 141 vector, 1); 142 } 143 144 return unlikely(ret == -EWOULDBLOCK) ? IRQ_WAKE_THREAD : IRQ_HANDLED; 145} 146 147static irqreturn_t kvm_assigned_dev_thread_msix(int irq, void *dev_id) 148{ 149 struct kvm_assigned_dev_kernel *assigned_dev = dev_id; 150 int index = find_index_from_host_irq(assigned_dev, irq); 151 u32 vector; 152 153 if (index >= 0) { 154 vector = assigned_dev->guest_msix_entries[index].vector; 155 kvm_assigned_dev_raise_guest_irq(assigned_dev, vector); 156 } 157 158 return IRQ_HANDLED; 159} 160#endif 161 162/* Ack the irq line for an assigned device */ 163static void kvm_assigned_dev_ack_irq(struct kvm_irq_ack_notifier *kian) 164{ 165 struct kvm_assigned_dev_kernel *dev = 166 container_of(kian, struct kvm_assigned_dev_kernel, 167 ack_notifier); 168 169 kvm_set_irq(dev->kvm, dev->irq_source_id, dev->guest_irq, 0, false); 170 171 spin_lock(&dev->intx_mask_lock); 172 173 if (!(dev->flags & KVM_DEV_ASSIGN_MASK_INTX)) { 174 bool reassert = false; 175 176 spin_lock_irq(&dev->intx_lock); 177 /* 178 * The guest IRQ may be shared so this ack can come from an 179 * IRQ for another guest device. 180 */ 181 if (dev->host_irq_disabled) { 182 if (!(dev->flags & KVM_DEV_ASSIGN_PCI_2_3)) 183 enable_irq(dev->host_irq); 184 else if (!pci_check_and_unmask_intx(dev->dev)) 185 reassert = true; 186 dev->host_irq_disabled = reassert; 187 } 188 spin_unlock_irq(&dev->intx_lock); 189 190 if (reassert) 191 kvm_set_irq(dev->kvm, dev->irq_source_id, 192 dev->guest_irq, 1, false); 193 } 194 195 spin_unlock(&dev->intx_mask_lock); 196} 197 198static void deassign_guest_irq(struct kvm *kvm, 199 struct kvm_assigned_dev_kernel *assigned_dev) 200{ 201 if (assigned_dev->ack_notifier.gsi != -1) 202 kvm_unregister_irq_ack_notifier(kvm, 203 &assigned_dev->ack_notifier); 204 205 kvm_set_irq(assigned_dev->kvm, assigned_dev->irq_source_id, 206 assigned_dev->guest_irq, 0, false); 207 208 if (assigned_dev->irq_source_id != -1) 209 kvm_free_irq_source_id(kvm, assigned_dev->irq_source_id); 210 assigned_dev->irq_source_id = -1; 211 assigned_dev->irq_requested_type &= ~(KVM_DEV_IRQ_GUEST_MASK); 212} 213 214/* The function implicit hold kvm->lock mutex due to cancel_work_sync() */ 215static void deassign_host_irq(struct kvm *kvm, 216 struct kvm_assigned_dev_kernel *assigned_dev) 217{ 218 /* 219 * We disable irq here to prevent further events. 220 * 221 * Notice this maybe result in nested disable if the interrupt type is 222 * INTx, but it's OK for we are going to free it. 223 * 224 * If this function is a part of VM destroy, please ensure that till 225 * now, the kvm state is still legal for probably we also have to wait 226 * on a currently running IRQ handler. 227 */ 228 if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSIX) { 229 int i; 230 for (i = 0; i < assigned_dev->entries_nr; i++) 231 disable_irq(assigned_dev->host_msix_entries[i].vector); 232 233 for (i = 0; i < assigned_dev->entries_nr; i++) 234 free_irq(assigned_dev->host_msix_entries[i].vector, 235 assigned_dev); 236 237 assigned_dev->entries_nr = 0; 238 kfree(assigned_dev->host_msix_entries); 239 kfree(assigned_dev->guest_msix_entries); 240 pci_disable_msix(assigned_dev->dev); 241 } else { 242 /* Deal with MSI and INTx */ 243 if ((assigned_dev->irq_requested_type & 244 KVM_DEV_IRQ_HOST_INTX) && 245 (assigned_dev->flags & KVM_DEV_ASSIGN_PCI_2_3)) { 246 spin_lock_irq(&assigned_dev->intx_lock); 247 pci_intx(assigned_dev->dev, false); 248 spin_unlock_irq(&assigned_dev->intx_lock); 249 synchronize_irq(assigned_dev->host_irq); 250 } else 251 disable_irq(assigned_dev->host_irq); 252 253 free_irq(assigned_dev->host_irq, assigned_dev); 254 255 if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSI) 256 pci_disable_msi(assigned_dev->dev); 257 } 258 259 assigned_dev->irq_requested_type &= ~(KVM_DEV_IRQ_HOST_MASK); 260} 261 262static int kvm_deassign_irq(struct kvm *kvm, 263 struct kvm_assigned_dev_kernel *assigned_dev, 264 unsigned long irq_requested_type) 265{ 266 unsigned long guest_irq_type, host_irq_type; 267 268 if (!irqchip_in_kernel(kvm)) 269 return -EINVAL; 270 /* no irq assignment to deassign */ 271 if (!assigned_dev->irq_requested_type) 272 return -ENXIO; 273 274 host_irq_type = irq_requested_type & KVM_DEV_IRQ_HOST_MASK; 275 guest_irq_type = irq_requested_type & KVM_DEV_IRQ_GUEST_MASK; 276 277 if (host_irq_type) 278 deassign_host_irq(kvm, assigned_dev); 279 if (guest_irq_type) 280 deassign_guest_irq(kvm, assigned_dev); 281 282 return 0; 283} 284 285static void kvm_free_assigned_irq(struct kvm *kvm, 286 struct kvm_assigned_dev_kernel *assigned_dev) 287{ 288 kvm_deassign_irq(kvm, assigned_dev, assigned_dev->irq_requested_type); 289} 290 291static void kvm_free_assigned_device(struct kvm *kvm, 292 struct kvm_assigned_dev_kernel 293 *assigned_dev) 294{ 295 kvm_free_assigned_irq(kvm, assigned_dev); 296 297 pci_reset_function(assigned_dev->dev); 298 if (pci_load_and_free_saved_state(assigned_dev->dev, 299 &assigned_dev->pci_saved_state)) 300 printk(KERN_INFO "%s: Couldn't reload %s saved state\n", 301 __func__, dev_name(&assigned_dev->dev->dev)); 302 else 303 pci_restore_state(assigned_dev->dev); 304 305 pci_clear_dev_assigned(assigned_dev->dev); 306 307 pci_release_regions(assigned_dev->dev); 308 pci_disable_device(assigned_dev->dev); 309 pci_dev_put(assigned_dev->dev); 310 311 list_del(&assigned_dev->list); 312 kfree(assigned_dev); 313} 314 315void kvm_free_all_assigned_devices(struct kvm *kvm) 316{ 317 struct list_head *ptr, *ptr2; 318 struct kvm_assigned_dev_kernel *assigned_dev; 319 320 list_for_each_safe(ptr, ptr2, &kvm->arch.assigned_dev_head) { 321 assigned_dev = list_entry(ptr, 322 struct kvm_assigned_dev_kernel, 323 list); 324 325 kvm_free_assigned_device(kvm, assigned_dev); 326 } 327} 328 329static int assigned_device_enable_host_intx(struct kvm *kvm, 330 struct kvm_assigned_dev_kernel *dev) 331{ 332 irq_handler_t irq_handler; 333 unsigned long flags; 334 335 dev->host_irq = dev->dev->irq; 336 337 /* 338 * We can only share the IRQ line with other host devices if we are 339 * able to disable the IRQ source at device-level - independently of 340 * the guest driver. Otherwise host devices may suffer from unbounded 341 * IRQ latencies when the guest keeps the line asserted. 342 */ 343 if (dev->flags & KVM_DEV_ASSIGN_PCI_2_3) { 344 irq_handler = kvm_assigned_dev_intx; 345 flags = IRQF_SHARED; 346 } else { 347 irq_handler = NULL; 348 flags = IRQF_ONESHOT; 349 } 350 if (request_threaded_irq(dev->host_irq, irq_handler, 351 kvm_assigned_dev_thread_intx, flags, 352 dev->irq_name, dev)) 353 return -EIO; 354 355 if (dev->flags & KVM_DEV_ASSIGN_PCI_2_3) { 356 spin_lock_irq(&dev->intx_lock); 357 pci_intx(dev->dev, true); 358 spin_unlock_irq(&dev->intx_lock); 359 } 360 return 0; 361} 362 363#ifdef __KVM_HAVE_MSI 364static int assigned_device_enable_host_msi(struct kvm *kvm, 365 struct kvm_assigned_dev_kernel *dev) 366{ 367 int r; 368 369 if (!dev->dev->msi_enabled) { 370 r = pci_enable_msi(dev->dev); 371 if (r) 372 return r; 373 } 374 375 dev->host_irq = dev->dev->irq; 376 if (request_threaded_irq(dev->host_irq, kvm_assigned_dev_msi, 377 kvm_assigned_dev_thread_msi, 0, 378 dev->irq_name, dev)) { 379 pci_disable_msi(dev->dev); 380 return -EIO; 381 } 382 383 return 0; 384} 385#endif 386 387#ifdef __KVM_HAVE_MSIX 388static int assigned_device_enable_host_msix(struct kvm *kvm, 389 struct kvm_assigned_dev_kernel *dev) 390{ 391 int i, r = -EINVAL; 392 393 /* host_msix_entries and guest_msix_entries should have been 394 * initialized */ 395 if (dev->entries_nr == 0) 396 return r; 397 398 r = pci_enable_msix_exact(dev->dev, 399 dev->host_msix_entries, dev->entries_nr); 400 if (r) 401 return r; 402 403 for (i = 0; i < dev->entries_nr; i++) { 404 r = request_threaded_irq(dev->host_msix_entries[i].vector, 405 kvm_assigned_dev_msix, 406 kvm_assigned_dev_thread_msix, 407 0, dev->irq_name, dev); 408 if (r) 409 goto err; 410 } 411 412 return 0; 413err: 414 for (i -= 1; i >= 0; i--) 415 free_irq(dev->host_msix_entries[i].vector, dev); 416 pci_disable_msix(dev->dev); 417 return r; 418} 419 420#endif 421 422static int assigned_device_enable_guest_intx(struct kvm *kvm, 423 struct kvm_assigned_dev_kernel *dev, 424 struct kvm_assigned_irq *irq) 425{ 426 dev->guest_irq = irq->guest_irq; 427 dev->ack_notifier.gsi = irq->guest_irq; 428 return 0; 429} 430 431#ifdef __KVM_HAVE_MSI 432static int assigned_device_enable_guest_msi(struct kvm *kvm, 433 struct kvm_assigned_dev_kernel *dev, 434 struct kvm_assigned_irq *irq) 435{ 436 dev->guest_irq = irq->guest_irq; 437 dev->ack_notifier.gsi = -1; 438 return 0; 439} 440#endif 441 442#ifdef __KVM_HAVE_MSIX 443static int assigned_device_enable_guest_msix(struct kvm *kvm, 444 struct kvm_assigned_dev_kernel *dev, 445 struct kvm_assigned_irq *irq) 446{ 447 dev->guest_irq = irq->guest_irq; 448 dev->ack_notifier.gsi = -1; 449 return 0; 450} 451#endif 452 453static int assign_host_irq(struct kvm *kvm, 454 struct kvm_assigned_dev_kernel *dev, 455 __u32 host_irq_type) 456{ 457 int r = -EEXIST; 458 459 if (dev->irq_requested_type & KVM_DEV_IRQ_HOST_MASK) 460 return r; 461 462 snprintf(dev->irq_name, sizeof(dev->irq_name), "kvm:%s", 463 pci_name(dev->dev)); 464 465 switch (host_irq_type) { 466 case KVM_DEV_IRQ_HOST_INTX: 467 r = assigned_device_enable_host_intx(kvm, dev); 468 break; 469#ifdef __KVM_HAVE_MSI 470 case KVM_DEV_IRQ_HOST_MSI: 471 r = assigned_device_enable_host_msi(kvm, dev); 472 break; 473#endif 474#ifdef __KVM_HAVE_MSIX 475 case KVM_DEV_IRQ_HOST_MSIX: 476 r = assigned_device_enable_host_msix(kvm, dev); 477 break; 478#endif 479 default: 480 r = -EINVAL; 481 } 482 dev->host_irq_disabled = false; 483 484 if (!r) 485 dev->irq_requested_type |= host_irq_type; 486 487 return r; 488} 489 490static int assign_guest_irq(struct kvm *kvm, 491 struct kvm_assigned_dev_kernel *dev, 492 struct kvm_assigned_irq *irq, 493 unsigned long guest_irq_type) 494{ 495 int id; 496 int r = -EEXIST; 497 498 if (dev->irq_requested_type & KVM_DEV_IRQ_GUEST_MASK) 499 return r; 500 501 id = kvm_request_irq_source_id(kvm); 502 if (id < 0) 503 return id; 504 505 dev->irq_source_id = id; 506 507 switch (guest_irq_type) { 508 case KVM_DEV_IRQ_GUEST_INTX: 509 r = assigned_device_enable_guest_intx(kvm, dev, irq); 510 break; 511#ifdef __KVM_HAVE_MSI 512 case KVM_DEV_IRQ_GUEST_MSI: 513 r = assigned_device_enable_guest_msi(kvm, dev, irq); 514 break; 515#endif 516#ifdef __KVM_HAVE_MSIX 517 case KVM_DEV_IRQ_GUEST_MSIX: 518 r = assigned_device_enable_guest_msix(kvm, dev, irq); 519 break; 520#endif 521 default: 522 r = -EINVAL; 523 } 524 525 if (!r) { 526 dev->irq_requested_type |= guest_irq_type; 527 if (dev->ack_notifier.gsi != -1) 528 kvm_register_irq_ack_notifier(kvm, &dev->ack_notifier); 529 } else { 530 kvm_free_irq_source_id(kvm, dev->irq_source_id); 531 dev->irq_source_id = -1; 532 } 533 534 return r; 535} 536 537/* TODO Deal with KVM_DEV_IRQ_ASSIGNED_MASK_MSIX */ 538static int kvm_vm_ioctl_assign_irq(struct kvm *kvm, 539 struct kvm_assigned_irq *assigned_irq) 540{ 541 int r = -EINVAL; 542 struct kvm_assigned_dev_kernel *match; 543 unsigned long host_irq_type, guest_irq_type; 544 545 if (!irqchip_in_kernel(kvm)) 546 return r; 547 548 mutex_lock(&kvm->lock); 549 r = -ENODEV; 550 match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head, 551 assigned_irq->assigned_dev_id); 552 if (!match) 553 goto out; 554 555 host_irq_type = (assigned_irq->flags & KVM_DEV_IRQ_HOST_MASK); 556 guest_irq_type = (assigned_irq->flags & KVM_DEV_IRQ_GUEST_MASK); 557 558 r = -EINVAL; 559 /* can only assign one type at a time */ 560 if (hweight_long(host_irq_type) > 1) 561 goto out; 562 if (hweight_long(guest_irq_type) > 1) 563 goto out; 564 if (host_irq_type == 0 && guest_irq_type == 0) 565 goto out; 566 567 r = 0; 568 if (host_irq_type) 569 r = assign_host_irq(kvm, match, host_irq_type); 570 if (r) 571 goto out; 572 573 if (guest_irq_type) 574 r = assign_guest_irq(kvm, match, assigned_irq, guest_irq_type); 575out: 576 mutex_unlock(&kvm->lock); 577 return r; 578} 579 580static int kvm_vm_ioctl_deassign_dev_irq(struct kvm *kvm, 581 struct kvm_assigned_irq 582 *assigned_irq) 583{ 584 int r = -ENODEV; 585 struct kvm_assigned_dev_kernel *match; 586 unsigned long irq_type; 587 588 mutex_lock(&kvm->lock); 589 590 match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head, 591 assigned_irq->assigned_dev_id); 592 if (!match) 593 goto out; 594 595 irq_type = assigned_irq->flags & (KVM_DEV_IRQ_HOST_MASK | 596 KVM_DEV_IRQ_GUEST_MASK); 597 r = kvm_deassign_irq(kvm, match, irq_type); 598out: 599 mutex_unlock(&kvm->lock); 600 return r; 601} 602 603/* 604 * We want to test whether the caller has been granted permissions to 605 * use this device. To be able to configure and control the device, 606 * the user needs access to PCI configuration space and BAR resources. 607 * These are accessed through PCI sysfs. PCI config space is often 608 * passed to the process calling this ioctl via file descriptor, so we 609 * can't rely on access to that file. We can check for permissions 610 * on each of the BAR resource files, which is a pretty clear 611 * indicator that the user has been granted access to the device. 612 */ 613static int probe_sysfs_permissions(struct pci_dev *dev) 614{ 615#ifdef CONFIG_SYSFS 616 int i; 617 bool bar_found = false; 618 619 for (i = PCI_STD_RESOURCES; i <= PCI_STD_RESOURCE_END; i++) { 620 char *kpath, *syspath; 621 struct path path; 622 struct inode *inode; 623 int r; 624 625 if (!pci_resource_len(dev, i)) 626 continue; 627 628 kpath = kobject_get_path(&dev->dev.kobj, GFP_KERNEL); 629 if (!kpath) 630 return -ENOMEM; 631 632 /* Per sysfs-rules, sysfs is always at /sys */ 633 syspath = kasprintf(GFP_KERNEL, "/sys%s/resource%d", kpath, i); 634 kfree(kpath); 635 if (!syspath) 636 return -ENOMEM; 637 638 r = kern_path(syspath, LOOKUP_FOLLOW, &path); 639 kfree(syspath); 640 if (r) 641 return r; 642 643 inode = path.dentry->d_inode; 644 645 r = inode_permission(inode, MAY_READ | MAY_WRITE | MAY_ACCESS); 646 path_put(&path); 647 if (r) 648 return r; 649 650 bar_found = true; 651 } 652 653 /* If no resources, probably something special */ 654 if (!bar_found) 655 return -EPERM; 656 657 return 0; 658#else 659 return -EINVAL; /* No way to control the device without sysfs */ 660#endif 661} 662 663static int kvm_vm_ioctl_assign_device(struct kvm *kvm, 664 struct kvm_assigned_pci_dev *assigned_dev) 665{ 666 int r = 0, idx; 667 struct kvm_assigned_dev_kernel *match; 668 struct pci_dev *dev; 669 670 if (!(assigned_dev->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU)) 671 return -EINVAL; 672 673 mutex_lock(&kvm->lock); 674 idx = srcu_read_lock(&kvm->srcu); 675 676 match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head, 677 assigned_dev->assigned_dev_id); 678 if (match) { 679 /* device already assigned */ 680 r = -EEXIST; 681 goto out; 682 } 683 684 match = kzalloc(sizeof(struct kvm_assigned_dev_kernel), GFP_KERNEL); 685 if (match == NULL) { 686 printk(KERN_INFO "%s: Couldn't allocate memory\n", 687 __func__); 688 r = -ENOMEM; 689 goto out; 690 } 691 dev = pci_get_domain_bus_and_slot(assigned_dev->segnr, 692 assigned_dev->busnr, 693 assigned_dev->devfn); 694 if (!dev) { 695 printk(KERN_INFO "%s: host device not found\n", __func__); 696 r = -EINVAL; 697 goto out_free; 698 } 699 700 /* Don't allow bridges to be assigned */ 701 if (dev->hdr_type != PCI_HEADER_TYPE_NORMAL) { 702 r = -EPERM; 703 goto out_put; 704 } 705 706 r = probe_sysfs_permissions(dev); 707 if (r) 708 goto out_put; 709 710 if (pci_enable_device(dev)) { 711 printk(KERN_INFO "%s: Could not enable PCI device\n", __func__); 712 r = -EBUSY; 713 goto out_put; 714 } 715 r = pci_request_regions(dev, "kvm_assigned_device"); 716 if (r) { 717 printk(KERN_INFO "%s: Could not get access to device regions\n", 718 __func__); 719 goto out_disable; 720 } 721 722 pci_reset_function(dev); 723 pci_save_state(dev); 724 match->pci_saved_state = pci_store_saved_state(dev); 725 if (!match->pci_saved_state) 726 printk(KERN_DEBUG "%s: Couldn't store %s saved state\n", 727 __func__, dev_name(&dev->dev)); 728 729 if (!pci_intx_mask_supported(dev)) 730 assigned_dev->flags &= ~KVM_DEV_ASSIGN_PCI_2_3; 731 732 match->assigned_dev_id = assigned_dev->assigned_dev_id; 733 match->host_segnr = assigned_dev->segnr; 734 match->host_busnr = assigned_dev->busnr; 735 match->host_devfn = assigned_dev->devfn; 736 match->flags = assigned_dev->flags; 737 match->dev = dev; 738 spin_lock_init(&match->intx_lock); 739 spin_lock_init(&match->intx_mask_lock); 740 match->irq_source_id = -1; 741 match->kvm = kvm; 742 match->ack_notifier.irq_acked = kvm_assigned_dev_ack_irq; 743 744 list_add(&match->list, &kvm->arch.assigned_dev_head); 745 746 if (!kvm->arch.iommu_domain) { 747 r = kvm_iommu_map_guest(kvm); 748 if (r) 749 goto out_list_del; 750 } 751 r = kvm_assign_device(kvm, match); 752 if (r) 753 goto out_list_del; 754 755out: 756 srcu_read_unlock(&kvm->srcu, idx); 757 mutex_unlock(&kvm->lock); 758 return r; 759out_list_del: 760 if (pci_load_and_free_saved_state(dev, &match->pci_saved_state)) 761 printk(KERN_INFO "%s: Couldn't reload %s saved state\n", 762 __func__, dev_name(&dev->dev)); 763 list_del(&match->list); 764 pci_release_regions(dev); 765out_disable: 766 pci_disable_device(dev); 767out_put: 768 pci_dev_put(dev); 769out_free: 770 kfree(match); 771 srcu_read_unlock(&kvm->srcu, idx); 772 mutex_unlock(&kvm->lock); 773 return r; 774} 775 776static int kvm_vm_ioctl_deassign_device(struct kvm *kvm, 777 struct kvm_assigned_pci_dev *assigned_dev) 778{ 779 int r = 0; 780 struct kvm_assigned_dev_kernel *match; 781 782 mutex_lock(&kvm->lock); 783 784 match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head, 785 assigned_dev->assigned_dev_id); 786 if (!match) { 787 printk(KERN_INFO "%s: device hasn't been assigned before, " 788 "so cannot be deassigned\n", __func__); 789 r = -EINVAL; 790 goto out; 791 } 792 793 kvm_deassign_device(kvm, match); 794 795 kvm_free_assigned_device(kvm, match); 796 797out: 798 mutex_unlock(&kvm->lock); 799 return r; 800} 801 802 803#ifdef __KVM_HAVE_MSIX 804static int kvm_vm_ioctl_set_msix_nr(struct kvm *kvm, 805 struct kvm_assigned_msix_nr *entry_nr) 806{ 807 int r = 0; 808 struct kvm_assigned_dev_kernel *adev; 809 810 mutex_lock(&kvm->lock); 811 812 adev = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head, 813 entry_nr->assigned_dev_id); 814 if (!adev) { 815 r = -EINVAL; 816 goto msix_nr_out; 817 } 818 819 if (adev->entries_nr == 0) { 820 adev->entries_nr = entry_nr->entry_nr; 821 if (adev->entries_nr == 0 || 822 adev->entries_nr > KVM_MAX_MSIX_PER_DEV) { 823 r = -EINVAL; 824 goto msix_nr_out; 825 } 826 827 adev->host_msix_entries = kzalloc(sizeof(struct msix_entry) * 828 entry_nr->entry_nr, 829 GFP_KERNEL); 830 if (!adev->host_msix_entries) { 831 r = -ENOMEM; 832 goto msix_nr_out; 833 } 834 adev->guest_msix_entries = 835 kzalloc(sizeof(struct msix_entry) * entry_nr->entry_nr, 836 GFP_KERNEL); 837 if (!adev->guest_msix_entries) { 838 kfree(adev->host_msix_entries); 839 r = -ENOMEM; 840 goto msix_nr_out; 841 } 842 } else /* Not allowed set MSI-X number twice */ 843 r = -EINVAL; 844msix_nr_out: 845 mutex_unlock(&kvm->lock); 846 return r; 847} 848 849static int kvm_vm_ioctl_set_msix_entry(struct kvm *kvm, 850 struct kvm_assigned_msix_entry *entry) 851{ 852 int r = 0, i; 853 struct kvm_assigned_dev_kernel *adev; 854 855 mutex_lock(&kvm->lock); 856 857 adev = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head, 858 entry->assigned_dev_id); 859 860 if (!adev) { 861 r = -EINVAL; 862 goto msix_entry_out; 863 } 864 865 for (i = 0; i < adev->entries_nr; i++) 866 if (adev->guest_msix_entries[i].vector == 0 || 867 adev->guest_msix_entries[i].entry == entry->entry) { 868 adev->guest_msix_entries[i].entry = entry->entry; 869 adev->guest_msix_entries[i].vector = entry->gsi; 870 adev->host_msix_entries[i].entry = entry->entry; 871 break; 872 } 873 if (i == adev->entries_nr) { 874 r = -ENOSPC; 875 goto msix_entry_out; 876 } 877 878msix_entry_out: 879 mutex_unlock(&kvm->lock); 880 881 return r; 882} 883#endif 884 885static int kvm_vm_ioctl_set_pci_irq_mask(struct kvm *kvm, 886 struct kvm_assigned_pci_dev *assigned_dev) 887{ 888 int r = 0; 889 struct kvm_assigned_dev_kernel *match; 890 891 mutex_lock(&kvm->lock); 892 893 match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head, 894 assigned_dev->assigned_dev_id); 895 if (!match) { 896 r = -ENODEV; 897 goto out; 898 } 899 900 spin_lock(&match->intx_mask_lock); 901 902 match->flags &= ~KVM_DEV_ASSIGN_MASK_INTX; 903 match->flags |= assigned_dev->flags & KVM_DEV_ASSIGN_MASK_INTX; 904 905 if (match->irq_requested_type & KVM_DEV_IRQ_GUEST_INTX) { 906 if (assigned_dev->flags & KVM_DEV_ASSIGN_MASK_INTX) { 907 kvm_set_irq(match->kvm, match->irq_source_id, 908 match->guest_irq, 0, false); 909 /* 910 * Masking at hardware-level is performed on demand, 911 * i.e. when an IRQ actually arrives at the host. 912 */ 913 } else if (!(assigned_dev->flags & KVM_DEV_ASSIGN_PCI_2_3)) { 914 /* 915 * Unmask the IRQ line if required. Unmasking at 916 * device level will be performed by user space. 917 */ 918 spin_lock_irq(&match->intx_lock); 919 if (match->host_irq_disabled) { 920 enable_irq(match->host_irq); 921 match->host_irq_disabled = false; 922 } 923 spin_unlock_irq(&match->intx_lock); 924 } 925 } 926 927 spin_unlock(&match->intx_mask_lock); 928 929out: 930 mutex_unlock(&kvm->lock); 931 return r; 932} 933 934long kvm_vm_ioctl_assigned_device(struct kvm *kvm, unsigned ioctl, 935 unsigned long arg) 936{ 937 void __user *argp = (void __user *)arg; 938 int r; 939 940 switch (ioctl) { 941 case KVM_ASSIGN_PCI_DEVICE: { 942 struct kvm_assigned_pci_dev assigned_dev; 943 944 r = -EFAULT; 945 if (copy_from_user(&assigned_dev, argp, sizeof assigned_dev)) 946 goto out; 947 r = kvm_vm_ioctl_assign_device(kvm, &assigned_dev); 948 if (r) 949 goto out; 950 break; 951 } 952 case KVM_ASSIGN_IRQ: { 953 r = -EOPNOTSUPP; 954 break; 955 } 956 case KVM_ASSIGN_DEV_IRQ: { 957 struct kvm_assigned_irq assigned_irq; 958 959 r = -EFAULT; 960 if (copy_from_user(&assigned_irq, argp, sizeof assigned_irq)) 961 goto out; 962 r = kvm_vm_ioctl_assign_irq(kvm, &assigned_irq); 963 if (r) 964 goto out; 965 break; 966 } 967 case KVM_DEASSIGN_DEV_IRQ: { 968 struct kvm_assigned_irq assigned_irq; 969 970 r = -EFAULT; 971 if (copy_from_user(&assigned_irq, argp, sizeof assigned_irq)) 972 goto out; 973 r = kvm_vm_ioctl_deassign_dev_irq(kvm, &assigned_irq); 974 if (r) 975 goto out; 976 break; 977 } 978 case KVM_DEASSIGN_PCI_DEVICE: { 979 struct kvm_assigned_pci_dev assigned_dev; 980 981 r = -EFAULT; 982 if (copy_from_user(&assigned_dev, argp, sizeof assigned_dev)) 983 goto out; 984 r = kvm_vm_ioctl_deassign_device(kvm, &assigned_dev); 985 if (r) 986 goto out; 987 break; 988 } 989#ifdef __KVM_HAVE_MSIX 990 case KVM_ASSIGN_SET_MSIX_NR: { 991 struct kvm_assigned_msix_nr entry_nr; 992 r = -EFAULT; 993 if (copy_from_user(&entry_nr, argp, sizeof entry_nr)) 994 goto out; 995 r = kvm_vm_ioctl_set_msix_nr(kvm, &entry_nr); 996 if (r) 997 goto out; 998 break; 999 } 1000 case KVM_ASSIGN_SET_MSIX_ENTRY: { 1001 struct kvm_assigned_msix_entry entry; 1002 r = -EFAULT; 1003 if (copy_from_user(&entry, argp, sizeof entry)) 1004 goto out; 1005 r = kvm_vm_ioctl_set_msix_entry(kvm, &entry); 1006 if (r) 1007 goto out; 1008 break; 1009 } 1010#endif 1011 case KVM_ASSIGN_SET_INTX_MASK: { 1012 struct kvm_assigned_pci_dev assigned_dev; 1013 1014 r = -EFAULT; 1015 if (copy_from_user(&assigned_dev, argp, sizeof assigned_dev)) 1016 goto out; 1017 r = kvm_vm_ioctl_set_pci_irq_mask(kvm, &assigned_dev); 1018 break; 1019 } 1020 default: 1021 r = -ENOTTY; 1022 break; 1023 } 1024out: 1025 return r; 1026}