Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'vfio-v3.16-rc1' of git://github.com/awilliam/linux-vfio into next

Pull VFIO updates from Alex Williamson:
"A handful of VFIO bug fixes for v3.16"

* tag 'vfio-v3.16-rc1' of git://github.com/awilliam/linux-vfio:
drivers/vfio/pci: Fix wrong MSI interrupt count
drivers/vfio: Rework offsetofend()
vfio/iommu_type1: Avoid overflow
vfio/pci: Fix unchecked return value
vfio/pci: Fix sizing of DPA and THP express capabilities

+26 -37
+3 -3
drivers/vfio/pci/vfio_pci.c
··· 57 57 58 58 ret = vfio_config_init(vdev); 59 59 if (ret) { 60 - pci_load_and_free_saved_state(pdev, &vdev->pci_saved_state); 60 + kfree(vdev->pci_saved_state); 61 + vdev->pci_saved_state = NULL; 61 62 pci_disable_device(pdev); 62 63 return ret; 63 64 } ··· 197 196 if (pos) { 198 197 pci_read_config_word(vdev->pdev, 199 198 pos + PCI_MSI_FLAGS, &flags); 200 - 201 - return 1 << (flags & PCI_MSI_FLAGS_QMASK); 199 + return 1 << ((flags & PCI_MSI_FLAGS_QMASK) >> 1); 202 200 } 203 201 } else if (irq_type == VFIO_PCI_MSIX_IRQ_INDEX) { 204 202 u8 pos;
+3 -4
drivers/vfio/pci/vfio_pci_config.c
··· 1126 1126 return pcibios_err_to_errno(ret); 1127 1127 1128 1128 byte &= PCI_DPA_CAP_SUBSTATE_MASK; 1129 - byte = round_up(byte + 1, 4); 1130 - return PCI_DPA_BASE_SIZEOF + byte; 1129 + return PCI_DPA_BASE_SIZEOF + byte + 1; 1131 1130 case PCI_EXT_CAP_ID_TPH: 1132 1131 ret = pci_read_config_dword(pdev, epos + PCI_TPH_CAP, &dword); 1133 1132 if (ret) ··· 1135 1136 if ((dword & PCI_TPH_CAP_LOC_MASK) == PCI_TPH_LOC_CAP) { 1136 1137 int sts; 1137 1138 1138 - sts = byte & PCI_TPH_CAP_ST_MASK; 1139 + sts = dword & PCI_TPH_CAP_ST_MASK; 1139 1140 sts >>= PCI_TPH_CAP_ST_SHIFT; 1140 - return PCI_TPH_BASE_SIZEOF + round_up(sts * 2, 4); 1141 + return PCI_TPH_BASE_SIZEOF + (sts * 2) + 2; 1141 1142 } 1142 1143 return PCI_TPH_BASE_SIZEOF; 1143 1144 default:
+18 -27
drivers/vfio/vfio_iommu_type1.c
··· 524 524 static int vfio_dma_do_map(struct vfio_iommu *iommu, 525 525 struct vfio_iommu_type1_dma_map *map) 526 526 { 527 - dma_addr_t end, iova; 527 + dma_addr_t iova = map->iova; 528 528 unsigned long vaddr = map->vaddr; 529 529 size_t size = map->size; 530 530 long npage; ··· 533 533 struct vfio_dma *dma; 534 534 unsigned long pfn; 535 535 536 - end = map->iova + map->size; 536 + /* Verify that none of our __u64 fields overflow */ 537 + if (map->size != size || map->vaddr != vaddr || map->iova != iova) 538 + return -EINVAL; 537 539 538 540 mask = ((uint64_t)1 << __ffs(vfio_pgsize_bitmap(iommu))) - 1; 541 + 542 + WARN_ON(mask & PAGE_MASK); 539 543 540 544 /* READ/WRITE from device perspective */ 541 545 if (map->flags & VFIO_DMA_MAP_FLAG_WRITE) ··· 547 543 if (map->flags & VFIO_DMA_MAP_FLAG_READ) 548 544 prot |= IOMMU_READ; 549 545 550 - if (!prot) 551 - return -EINVAL; /* No READ/WRITE? */ 552 - 553 - if (vaddr & mask) 554 - return -EINVAL; 555 - if (map->iova & mask) 556 - return -EINVAL; 557 - if (!map->size || map->size & mask) 546 + if (!prot || !size || (size | iova | vaddr) & mask) 558 547 return -EINVAL; 559 548 560 - WARN_ON(mask & PAGE_MASK); 561 - 562 - /* Don't allow IOVA wrap */ 563 - if (end && end < map->iova) 564 - return -EINVAL; 565 - 566 - /* Don't allow virtual address wrap */ 567 - if (vaddr + map->size && vaddr + map->size < vaddr) 549 + /* Don't allow IOVA or virtual address wrap */ 550 + if (iova + size - 1 < iova || vaddr + size - 1 < vaddr) 568 551 return -EINVAL; 569 552 570 553 mutex_lock(&iommu->lock); 571 554 572 - if (vfio_find_dma(iommu, map->iova, map->size)) { 555 + if (vfio_find_dma(iommu, iova, size)) { 573 556 mutex_unlock(&iommu->lock); 574 557 return -EEXIST; 575 558 } ··· 567 576 return -ENOMEM; 568 577 } 569 578 570 - dma->iova = map->iova; 571 - dma->vaddr = map->vaddr; 579 + dma->iova = iova; 580 + dma->vaddr = vaddr; 572 581 dma->prot = prot; 573 582 574 583 /* Insert zero-sized and grow as we map chunks of it */ 575 584 vfio_link_dma(iommu, dma); 576 585 577 - for (iova = map->iova; iova < end; iova += size, vaddr += size) { 586 + while (size) { 578 587 /* Pin a contiguous chunk of memory */ 579 - npage = vfio_pin_pages(vaddr, (end - iova) >> PAGE_SHIFT, 580 - prot, &pfn); 588 + npage = vfio_pin_pages(vaddr + dma->size, 589 + size >> PAGE_SHIFT, prot, &pfn); 581 590 if (npage <= 0) { 582 591 WARN_ON(!npage); 583 592 ret = (int)npage; ··· 585 594 } 586 595 587 596 /* Map it! */ 588 - ret = vfio_iommu_map(iommu, iova, pfn, npage, prot); 597 + ret = vfio_iommu_map(iommu, iova + dma->size, pfn, npage, prot); 589 598 if (ret) { 590 599 vfio_unpin_pages(pfn, npage, prot, true); 591 600 break; 592 601 } 593 602 594 - size = npage << PAGE_SHIFT; 595 - dma->size += size; 603 + size -= npage << PAGE_SHIFT; 604 + dma->size += npage << PAGE_SHIFT; 596 605 } 597 606 598 607 if (ret)
+2 -3
include/linux/vfio.h
··· 86 86 * from user space. This allows us to easily determine if the provided 87 87 * structure is sized to include various fields. 88 88 */ 89 - #define offsetofend(TYPE, MEMBER) ({ \ 90 - TYPE tmp; \ 91 - offsetof(TYPE, MEMBER) + sizeof(tmp.MEMBER); }) \ 89 + #define offsetofend(TYPE, MEMBER) \ 90 + (offsetof(TYPE, MEMBER) + sizeof(((TYPE *)0)->MEMBER)) 92 91 93 92 /* 94 93 * External user API