VT-d: adapt domain map and unmap functions for IOMMU API

Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>

+20 -17
+20 -13
drivers/pci/intel-iommu.c
··· 3047 3047 vm_domain_remove_one_dev_info(dmar_domain, pdev); 3048 3048 } 3049 3049 3050 - int intel_iommu_map_address(struct dmar_domain *domain, dma_addr_t iova, 3051 - u64 hpa, size_t size, int prot) 3050 + static int intel_iommu_map_range(struct iommu_domain *domain, 3051 + unsigned long iova, phys_addr_t hpa, 3052 + size_t size, int iommu_prot) 3052 3053 { 3054 + struct dmar_domain *dmar_domain = domain->priv; 3053 3055 u64 max_addr; 3054 3056 int addr_width; 3057 + int prot = 0; 3055 3058 int ret; 3056 3059 3060 + if (iommu_prot & IOMMU_READ) 3061 + prot |= DMA_PTE_READ; 3062 + if (iommu_prot & IOMMU_WRITE) 3063 + prot |= DMA_PTE_WRITE; 3064 + 3057 3065 max_addr = (iova & VTD_PAGE_MASK) + VTD_PAGE_ALIGN(size); 3058 - if (domain->max_addr < max_addr) { 3066 + if (dmar_domain->max_addr < max_addr) { 3059 3067 int min_agaw; 3060 3068 u64 end; 3061 3069 3062 3070 /* check if minimum agaw is sufficient for mapped address */ 3063 - min_agaw = vm_domain_min_agaw(domain); 3071 + min_agaw = vm_domain_min_agaw(dmar_domain); 3064 3072 addr_width = agaw_to_width(min_agaw); 3065 3073 end = DOMAIN_MAX_ADDR(addr_width); 3066 3074 end = end & VTD_PAGE_MASK; ··· 3078 3070 __func__, min_agaw, max_addr); 3079 3071 return -EFAULT; 3080 3072 } 3081 - domain->max_addr = max_addr; 3073 + dmar_domain->max_addr = max_addr; 3082 3074 } 3083 3075 3084 - ret = domain_page_mapping(domain, iova, hpa, size, prot); 3076 + ret = domain_page_mapping(dmar_domain, iova, hpa, size, prot); 3085 3077 return ret; 3086 3078 } 3087 - EXPORT_SYMBOL_GPL(intel_iommu_map_address); 3088 3079 3089 - void intel_iommu_unmap_address(struct dmar_domain *domain, 3090 - dma_addr_t iova, size_t size) 3080 + static void intel_iommu_unmap_range(struct iommu_domain *domain, 3081 + unsigned long iova, size_t size) 3091 3082 { 3083 + struct dmar_domain *dmar_domain = domain->priv; 3092 3084 dma_addr_t base; 3093 3085 3094 3086 /* The address might not be aligned */ 3095 3087 base = iova & VTD_PAGE_MASK; 3096 3088 size = VTD_PAGE_ALIGN(size); 3097 - dma_pte_clear_range(domain, base, base + size); 3089 + dma_pte_clear_range(dmar_domain, base, base + size); 3098 3090 3099 - if (domain->max_addr == base + size) 3100 - domain->max_addr = base; 3091 + if (dmar_domain->max_addr == base + size) 3092 + dmar_domain->max_addr = base; 3101 3093 } 3102 - EXPORT_SYMBOL_GPL(intel_iommu_unmap_address); 3103 3094 3104 3095 int intel_iommu_found(void) 3105 3096 {
-4
include/linux/intel-iommu.h
··· 330 330 331 331 extern void qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu); 332 332 333 - int intel_iommu_map_address(struct dmar_domain *domain, dma_addr_t iova, 334 - u64 hpa, size_t size, int prot); 335 - void intel_iommu_unmap_address(struct dmar_domain *domain, 336 - dma_addr_t iova, size_t size); 337 333 u64 intel_iommu_iova_to_phys(struct dmar_domain *domain, u64 iova); 338 334 339 335 #ifdef CONFIG_DMAR