Merge git://git.infradead.org/~dwmw2/iommu-2.6.31

* git://git.infradead.org/~dwmw2/iommu-2.6.31:
intel-iommu: Fix enabling snooping feature by mistake
intel-iommu: Mask physical address to correct page size in intel_map_single()
intel-iommu: Correct sglist size calculation.

+13 -12
+13 -12
drivers/pci/intel-iommu.c
··· 1505 } 1506 1507 set_bit(num, iommu->domain_ids); 1508 - set_bit(iommu->seq_id, &domain->iommu_bmp); 1509 iommu->domains[num] = domain; 1510 id = num; 1511 } ··· 1647 tmp->devfn); 1648 } 1649 1650 static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn, 1651 struct scatterlist *sg, unsigned long phys_pfn, 1652 unsigned long nr_pages, int prot) ··· 1682 uint64_t tmp; 1683 1684 if (!sg_res) { 1685 - sg_res = (sg->offset + sg->length + VTD_PAGE_SIZE - 1) >> VTD_PAGE_SHIFT; 1686 sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset; 1687 sg->dma_length = sg->length; 1688 pteval = page_to_phys(sg_page(sg)) | prot; ··· 2422 return ret; 2423 } 2424 2425 - /* Returns a number of VTD pages, but aligned to MM page size */ 2426 - static inline unsigned long aligned_nrpages(unsigned long host_addr, 2427 - size_t size) 2428 - { 2429 - host_addr &= ~PAGE_MASK; 2430 - return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT; 2431 - } 2432 - 2433 /* This takes a number of _MM_ pages, not VTD pages */ 2434 static struct iova *intel_alloc_iova(struct device *dev, 2435 struct dmar_domain *domain, ··· 2550 int prot = 0; 2551 int ret; 2552 struct intel_iommu *iommu; 2553 2554 BUG_ON(dir == DMA_NONE); 2555 ··· 2585 * is not a big problem 2586 */ 2587 ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova->pfn_lo), 2588 - paddr >> VTD_PAGE_SHIFT, size, prot); 2589 if (ret) 2590 goto error; 2591 ··· 2875 2876 start_vpfn = mm_to_dma_pfn(iova->pfn_lo); 2877 2878 - ret = domain_sg_mapping(domain, start_vpfn, sglist, mm_to_dma_pfn(size), prot); 2879 if (unlikely(ret)) { 2880 /* clear the page */ 2881 dma_pte_clear_range(domain, start_vpfn, ··· 3408 3409 domain->iommu_count = 0; 3410 domain->iommu_coherency = 0; 3411 domain->max_addr = 0; 3412 3413 /* always allocate the top pgd */
··· 1505 } 1506 1507 set_bit(num, iommu->domain_ids); 1508 iommu->domains[num] = domain; 1509 id = num; 1510 } ··· 1648 tmp->devfn); 1649 } 1650 1651 + /* Returns a number of VTD pages, but aligned to MM page size */ 1652 + static inline unsigned long aligned_nrpages(unsigned long host_addr, 1653 + size_t size) 1654 + { 1655 + host_addr &= ~PAGE_MASK; 1656 + return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT; 1657 + } 1658 + 1659 static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn, 1660 struct scatterlist *sg, unsigned long phys_pfn, 1661 unsigned long nr_pages, int prot) ··· 1675 uint64_t tmp; 1676 1677 if (!sg_res) { 1678 + sg_res = aligned_nrpages(sg->offset, sg->length); 1679 sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset; 1680 sg->dma_length = sg->length; 1681 pteval = page_to_phys(sg_page(sg)) | prot; ··· 2415 return ret; 2416 } 2417 2418 /* This takes a number of _MM_ pages, not VTD pages */ 2419 static struct iova *intel_alloc_iova(struct device *dev, 2420 struct dmar_domain *domain, ··· 2551 int prot = 0; 2552 int ret; 2553 struct intel_iommu *iommu; 2554 + unsigned long paddr_pfn = paddr >> PAGE_SHIFT; 2555 2556 BUG_ON(dir == DMA_NONE); 2557 ··· 2585 * is not a big problem 2586 */ 2587 ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova->pfn_lo), 2588 + mm_to_dma_pfn(paddr_pfn), size, prot); 2589 if (ret) 2590 goto error; 2591 ··· 2875 2876 start_vpfn = mm_to_dma_pfn(iova->pfn_lo); 2877 2878 + ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot); 2879 if (unlikely(ret)) { 2880 /* clear the page */ 2881 dma_pte_clear_range(domain, start_vpfn, ··· 3408 3409 domain->iommu_count = 0; 3410 domain->iommu_coherency = 0; 3411 + domain->iommu_snooping = 0; 3412 domain->max_addr = 0; 3413 3414 /* always allocate the top pgd */