Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

iommu: Make IOVA domain page size explicit

Systems may contain heterogeneous IOMMUs supporting differing minimum
page sizes, which may also not be common with the CPU page size.
Thus it is practical to have an explicit notion of IOVA granularity
to simplify handling of mapping and allocation constraints.

As an initial step, move the IOVA page granularity from an implicit
compile-time constant to a per-domain property so we can make use
of it in IOVA domain context at runtime. To keep the abstraction tidy,
extend the little API of inline iova_* helpers to parallel some of the
equivalent PAGE_* macros.

Signed-off-by: Robin Murphy <robin.murphy@arm.com>
Signed-off-by: Joerg Roedel <jroedel@suse.de>

authored by

Robin Murphy and committed by
Joerg Roedel
0fb5fe87 1b722500

+49 -7
+6 -3
drivers/iommu/intel-iommu.c
··· 1635 1635 struct iova *iova; 1636 1636 int i; 1637 1637 1638 - init_iova_domain(&reserved_iova_list, IOVA_START_PFN, DMA_32BIT_PFN); 1638 + init_iova_domain(&reserved_iova_list, VTD_PAGE_SIZE, IOVA_START_PFN, 1639 + DMA_32BIT_PFN); 1639 1640 1640 1641 lockdep_set_class(&reserved_iova_list.iova_rbtree_lock, 1641 1642 &reserved_rbtree_key); ··· 1694 1693 int adjust_width, agaw; 1695 1694 unsigned long sagaw; 1696 1695 1697 - init_iova_domain(&domain->iovad, IOVA_START_PFN, DMA_32BIT_PFN); 1696 + init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN, 1697 + DMA_32BIT_PFN); 1698 1698 domain_reserve_special_ranges(domain); 1699 1699 1700 1700 /* calculate AGAW */ ··· 4318 4316 { 4319 4317 int adjust_width; 4320 4318 4321 - init_iova_domain(&domain->iovad, IOVA_START_PFN, DMA_32BIT_PFN); 4319 + init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN, 4320 + DMA_32BIT_PFN); 4322 4321 domain_reserve_special_ranges(domain); 4323 4322 4324 4323 /* calculate AGAW */
+10 -2
drivers/iommu/iova.c
··· 55 55 } 56 56 57 57 void 58 - init_iova_domain(struct iova_domain *iovad, unsigned long start_pfn, 59 - unsigned long pfn_32bit) 58 + init_iova_domain(struct iova_domain *iovad, unsigned long granule, 59 + unsigned long start_pfn, unsigned long pfn_32bit) 60 60 { 61 + /* 62 + * IOVA granularity will normally be equal to the smallest 63 + * supported IOMMU page size; both *must* be capable of 64 + * representing individual CPU pages exactly. 65 + */ 66 + BUG_ON((granule > PAGE_SIZE) || !is_power_of_2(granule)); 67 + 61 68 spin_lock_init(&iovad->iova_rbtree_lock); 62 69 iovad->rbroot = RB_ROOT; 63 70 iovad->cached32_node = NULL; 71 + iovad->granule = granule; 64 72 iovad->start_pfn = start_pfn; 65 73 iovad->dma_32bit_pfn = pfn_32bit; 66 74 }
+33 -2
include/linux/iova.h
··· 28 28 spinlock_t iova_rbtree_lock; /* Lock to protect update of rbtree */ 29 29 struct rb_root rbroot; /* iova domain rbtree root */ 30 30 struct rb_node *cached32_node; /* Save last alloced node */ 31 + unsigned long granule; /* pfn granularity for this domain */ 31 32 unsigned long start_pfn; /* Lower limit for this domain */ 32 33 unsigned long dma_32bit_pfn; 33 34 }; ··· 36 35 static inline unsigned long iova_size(struct iova *iova) 37 36 { 38 37 return iova->pfn_hi - iova->pfn_lo + 1; 38 + } 39 + 40 + static inline unsigned long iova_shift(struct iova_domain *iovad) 41 + { 42 + return __ffs(iovad->granule); 43 + } 44 + 45 + static inline unsigned long iova_mask(struct iova_domain *iovad) 46 + { 47 + return iovad->granule - 1; 48 + } 49 + 50 + static inline size_t iova_offset(struct iova_domain *iovad, dma_addr_t iova) 51 + { 52 + return iova & iova_mask(iovad); 53 + } 54 + 55 + static inline size_t iova_align(struct iova_domain *iovad, size_t size) 56 + { 57 + return ALIGN(size, iovad->granule); 58 + } 59 + 60 + static inline dma_addr_t iova_dma_addr(struct iova_domain *iovad, struct iova *iova) 61 + { 62 + return (dma_addr_t)iova->pfn_lo << iova_shift(iovad); 63 + } 64 + 65 + static inline unsigned long iova_pfn(struct iova_domain *iovad, dma_addr_t iova) 66 + { 67 + return iova >> iova_shift(iovad); 39 68 } 40 69 41 70 int iommu_iova_cache_init(void); ··· 81 50 struct iova *reserve_iova(struct iova_domain *iovad, unsigned long pfn_lo, 82 51 unsigned long pfn_hi); 83 52 void copy_reserved_iova(struct iova_domain *from, struct iova_domain *to); 84 - void init_iova_domain(struct iova_domain *iovad, unsigned long start_pfn, 85 - unsigned long pfn_32bit); 53 + void init_iova_domain(struct iova_domain *iovad, unsigned long granule, 54 + unsigned long start_pfn, unsigned long pfn_32bit); 86 55 struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn); 87 56 void put_iova_domain(struct iova_domain *iovad); 88 57 struct iova *split_and_remove_iova(struct iova_domain *iovad,