VT-d: adapt domain map and unmap functions for IOMMU API

Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>

+20 -17
+20 -13
drivers/pci/intel-iommu.c
··· 3047 vm_domain_remove_one_dev_info(dmar_domain, pdev); 3048 } 3049 3050 - int intel_iommu_map_address(struct dmar_domain *domain, dma_addr_t iova, 3051 - u64 hpa, size_t size, int prot) 3052 { 3053 u64 max_addr; 3054 int addr_width; 3055 int ret; 3056 3057 max_addr = (iova & VTD_PAGE_MASK) + VTD_PAGE_ALIGN(size); 3058 - if (domain->max_addr < max_addr) { 3059 int min_agaw; 3060 u64 end; 3061 3062 /* check if minimum agaw is sufficient for mapped address */ 3063 - min_agaw = vm_domain_min_agaw(domain); 3064 addr_width = agaw_to_width(min_agaw); 3065 end = DOMAIN_MAX_ADDR(addr_width); 3066 end = end & VTD_PAGE_MASK; ··· 3078 __func__, min_agaw, max_addr); 3079 return -EFAULT; 3080 } 3081 - domain->max_addr = max_addr; 3082 } 3083 3084 - ret = domain_page_mapping(domain, iova, hpa, size, prot); 3085 return ret; 3086 } 3087 - EXPORT_SYMBOL_GPL(intel_iommu_map_address); 3088 3089 - void intel_iommu_unmap_address(struct dmar_domain *domain, 3090 - dma_addr_t iova, size_t size) 3091 { 3092 dma_addr_t base; 3093 3094 /* The address might not be aligned */ 3095 base = iova & VTD_PAGE_MASK; 3096 size = VTD_PAGE_ALIGN(size); 3097 - dma_pte_clear_range(domain, base, base + size); 3098 3099 - if (domain->max_addr == base + size) 3100 - domain->max_addr = base; 3101 } 3102 - EXPORT_SYMBOL_GPL(intel_iommu_unmap_address); 3103 3104 int intel_iommu_found(void) 3105 {
··· 3047 vm_domain_remove_one_dev_info(dmar_domain, pdev); 3048 } 3049 3050 + static int intel_iommu_map_range(struct iommu_domain *domain, 3051 + unsigned long iova, phys_addr_t hpa, 3052 + size_t size, int iommu_prot) 3053 { 3054 + struct dmar_domain *dmar_domain = domain->priv; 3055 u64 max_addr; 3056 int addr_width; 3057 + int prot = 0; 3058 int ret; 3059 3060 + if (iommu_prot & IOMMU_READ) 3061 + prot |= DMA_PTE_READ; 3062 + if (iommu_prot & IOMMU_WRITE) 3063 + prot |= DMA_PTE_WRITE; 3064 + 3065 max_addr = (iova & VTD_PAGE_MASK) + VTD_PAGE_ALIGN(size); 3066 + if (dmar_domain->max_addr < max_addr) { 3067 int min_agaw; 3068 u64 end; 3069 3070 /* check if minimum agaw is sufficient for mapped address */ 3071 + min_agaw = vm_domain_min_agaw(dmar_domain); 3072 addr_width = agaw_to_width(min_agaw); 3073 end = DOMAIN_MAX_ADDR(addr_width); 3074 end = end & VTD_PAGE_MASK; ··· 3070 __func__, min_agaw, max_addr); 3071 return -EFAULT; 3072 } 3073 + dmar_domain->max_addr = max_addr; 3074 } 3075 3076 + ret = domain_page_mapping(dmar_domain, iova, hpa, size, prot); 3077 return ret; 3078 } 3079 3080 + static void intel_iommu_unmap_range(struct iommu_domain *domain, 3081 + unsigned long iova, size_t size) 3082 { 3083 + struct dmar_domain *dmar_domain = domain->priv; 3084 dma_addr_t base; 3085 3086 /* The address might not be aligned */ 3087 base = iova & VTD_PAGE_MASK; 3088 size = VTD_PAGE_ALIGN(size); 3089 + dma_pte_clear_range(dmar_domain, base, base + size); 3090 3091 + if (dmar_domain->max_addr == base + size) 3092 + dmar_domain->max_addr = base; 3093 } 3094 3095 int intel_iommu_found(void) 3096 {
-4
include/linux/intel-iommu.h
··· 330 331 extern void qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu); 332 333 - int intel_iommu_map_address(struct dmar_domain *domain, dma_addr_t iova, 334 - u64 hpa, size_t size, int prot); 335 - void intel_iommu_unmap_address(struct dmar_domain *domain, 336 - dma_addr_t iova, size_t size); 337 u64 intel_iommu_iova_to_phys(struct dmar_domain *domain, u64 iova); 338 339 #ifdef CONFIG_DMAR
··· 330 331 extern void qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu); 332 333 u64 intel_iommu_iova_to_phys(struct dmar_domain *domain, u64 iova); 334 335 #ifdef CONFIG_DMAR