Merge git://git.infradead.org/~dwmw2/iommu-2.6.31

* git://git.infradead.org/~dwmw2/iommu-2.6.31:
intel-iommu: Fix enabling snooping feature by mistake
intel-iommu: Mask physical address to correct page size in intel_map_single()
intel-iommu: Correct sglist size calculation.

+13 -12
+13 -12
drivers/pci/intel-iommu.c
··· 1505 1505 } 1506 1506 1507 1507 set_bit(num, iommu->domain_ids); 1508 - set_bit(iommu->seq_id, &domain->iommu_bmp); 1509 1508 iommu->domains[num] = domain; 1510 1509 id = num; 1511 1510 } ··· 1647 1648 tmp->devfn); 1648 1649 } 1649 1650 1651 + /* Returns a number of VTD pages, but aligned to MM page size */ 1652 + static inline unsigned long aligned_nrpages(unsigned long host_addr, 1653 + size_t size) 1654 + { 1655 + host_addr &= ~PAGE_MASK; 1656 + return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT; 1657 + } 1658 + 1650 1659 static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn, 1651 1660 struct scatterlist *sg, unsigned long phys_pfn, 1652 1661 unsigned long nr_pages, int prot) ··· 1682 1675 uint64_t tmp; 1683 1676 1684 1677 if (!sg_res) { 1685 - sg_res = (sg->offset + sg->length + VTD_PAGE_SIZE - 1) >> VTD_PAGE_SHIFT; 1678 + sg_res = aligned_nrpages(sg->offset, sg->length); 1686 1679 sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset; 1687 1680 sg->dma_length = sg->length; 1688 1681 pteval = page_to_phys(sg_page(sg)) | prot; ··· 2422 2415 return ret; 2423 2416 } 2424 2417 2425 - /* Returns a number of VTD pages, but aligned to MM page size */ 2426 - static inline unsigned long aligned_nrpages(unsigned long host_addr, 2427 - size_t size) 2428 - { 2429 - host_addr &= ~PAGE_MASK; 2430 - return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT; 2431 - } 2432 - 2433 2418 /* This takes a number of _MM_ pages, not VTD pages */ 2434 2419 static struct iova *intel_alloc_iova(struct device *dev, 2435 2420 struct dmar_domain *domain, ··· 2550 2551 int prot = 0; 2551 2552 int ret; 2552 2553 struct intel_iommu *iommu; 2554 + unsigned long paddr_pfn = paddr >> PAGE_SHIFT; 2553 2555 2554 2556 BUG_ON(dir == DMA_NONE); 2555 2557 ··· 2585 2585 * is not a big problem 2586 2586 */ 2587 2587 ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova->pfn_lo), 2588 - paddr >> VTD_PAGE_SHIFT, size, prot); 2588 + mm_to_dma_pfn(paddr_pfn), size, prot); 2589 2589 if (ret) 2590 2590 goto error; 2591 2591 ··· 2875 2875 2876 2876 start_vpfn = mm_to_dma_pfn(iova->pfn_lo); 2877 2877 2878 - ret = domain_sg_mapping(domain, start_vpfn, sglist, mm_to_dma_pfn(size), prot); 2878 + ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot); 2879 2879 if (unlikely(ret)) { 2880 2880 /* clear the page */ 2881 2881 dma_pte_clear_range(domain, start_vpfn, ··· 3408 3408 3409 3409 domain->iommu_count = 0; 3410 3410 domain->iommu_coherency = 0; 3411 + domain->iommu_snooping = 0; 3411 3412 domain->max_addr = 0; 3412 3413 3413 3414 /* always allocate the top pgd */