x86/amd-iommu: Fix rounding-bug in __unmap_single

In the __unmap_single function the dma_addr is rounded down
to a page boundary before the dma pages are unmapped. The
address is later also used to flush the TLB entries for that
mapping. But without the offset into the dma page the amount
of pages to flush might be miscalculated in the TLB flushing
path. This patch fixes this bug by using the original
address to flush the TLB.

Cc: stable@kernel.org
Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>

+3 -1
+3 -1
arch/x86/kernel/amd_iommu.c
··· 1953 size_t size, 1954 int dir) 1955 { 1956 dma_addr_t i, start; 1957 unsigned int pages; 1958 ··· 1961 (dma_addr + size > dma_dom->aperture_size)) 1962 return; 1963 1964 pages = iommu_num_pages(dma_addr, size, PAGE_SIZE); 1965 dma_addr &= PAGE_MASK; 1966 start = dma_addr; ··· 1976 dma_ops_free_addresses(dma_dom, dma_addr, pages); 1977 1978 if (amd_iommu_unmap_flush || dma_dom->need_flush) { 1979 - iommu_flush_pages(&dma_dom->domain, dma_addr, size); 1980 dma_dom->need_flush = false; 1981 } 1982 }
··· 1953 size_t size, 1954 int dir) 1955 { 1956 + dma_addr_t flush_addr; 1957 dma_addr_t i, start; 1958 unsigned int pages; 1959 ··· 1960 (dma_addr + size > dma_dom->aperture_size)) 1961 return; 1962 1963 + flush_addr = dma_addr; 1964 pages = iommu_num_pages(dma_addr, size, PAGE_SIZE); 1965 dma_addr &= PAGE_MASK; 1966 start = dma_addr; ··· 1974 dma_ops_free_addresses(dma_dom, dma_addr, pages); 1975 1976 if (amd_iommu_unmap_flush || dma_dom->need_flush) { 1977 + iommu_flush_pages(&dma_dom->domain, flush_addr, size); 1978 dma_dom->need_flush = false; 1979 } 1980 }