Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

iommu: Add calls for IOMMU_DEBUG_PAGEALLOC

Add calls for the new iommu debug config IOMMU_DEBUG_PAGEALLOC:
- iommu_debug_init: Enable the debug mode if configured by the user.
- iommu_debug_map: Track iommu pages mapped, using physical address.
- iommu_debug_unmap_begin: Track start of iommu unmap operation, with
IOVA and size.
- iommu_debug_unmap_end: Track the end of unmap operation, passing the
actual unmapped size versus the tracked one at unmap_begin.

We have to do the unmap_begin/end as once pages are unmapped we lose
the information of the physical address.
This is racy, but the API is racy by construction as it uses refcounts
and doesn't attempt to lock/synchronize with the IOMMU API as that will
be costly, meaning that possibility of false negative exists.

Reviewed-by: Samiullah Khawaja <skhawaja@google.com>
Reviewed-by: Lu Baolu <baolu.lu@linux.intel.com>
Reviewed-by: Pranjal Shrivastava <praan@google.com>
Signed-off-by: Mostafa Saleh <smostafa@google.com>
Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>

authored by

Mostafa Saleh and committed by
Joerg Roedel
ccc21213 466ae697

+96 -2
+28
drivers/iommu/iommu-debug-pagealloc.c
··· 5 5 * IOMMU API debug page alloc sanitizer 6 6 */ 7 7 #include <linux/atomic.h> 8 + #include <linux/iommu.h> 8 9 #include <linux/iommu-debug-pagealloc.h> 9 10 #include <linux/kernel.h> 10 11 #include <linux/page_ext.h> 11 12 13 + #include "iommu-priv.h" 14 + 12 15 static bool needed; 16 + DEFINE_STATIC_KEY_FALSE(iommu_debug_initialized); 13 17 14 18 struct iommu_debug_metadata { 15 19 atomic_t ref; ··· 28 24 .size = sizeof(struct iommu_debug_metadata), 29 25 .need = need_iommu_debug, 30 26 }; 27 + 28 + void __iommu_debug_map(struct iommu_domain *domain, phys_addr_t phys, size_t size) 29 + { 30 + } 31 + 32 + void __iommu_debug_unmap_begin(struct iommu_domain *domain, 33 + unsigned long iova, size_t size) 34 + { 35 + } 36 + 37 + void __iommu_debug_unmap_end(struct iommu_domain *domain, 38 + unsigned long iova, size_t size, 39 + size_t unmapped) 40 + { 41 + } 42 + 43 + void iommu_debug_init(void) 44 + { 45 + if (!needed) 46 + return; 47 + 48 + pr_info("iommu: Debugging page allocations, expect overhead or disable iommu.debug_pagealloc"); 49 + static_branch_enable(&iommu_debug_initialized); 50 + } 31 51 32 52 static int __init iommu_debug_pagealloc(char *str) 33 53 {
+58
drivers/iommu/iommu-priv.h
··· 5 5 #define __LINUX_IOMMU_PRIV_H 6 6 7 7 #include <linux/iommu.h> 8 + #include <linux/iommu-debug-pagealloc.h> 8 9 #include <linux/msi.h> 9 10 10 11 static inline const struct iommu_ops *dev_iommu_ops(struct device *dev) ··· 66 65 int iommu_replace_device_pasid(struct iommu_domain *domain, 67 66 struct device *dev, ioasid_t pasid, 68 67 struct iommu_attach_handle *handle); 68 + 69 + #ifdef CONFIG_IOMMU_DEBUG_PAGEALLOC 70 + 71 + void __iommu_debug_map(struct iommu_domain *domain, phys_addr_t phys, 72 + size_t size); 73 + void __iommu_debug_unmap_begin(struct iommu_domain *domain, 74 + unsigned long iova, size_t size); 75 + void __iommu_debug_unmap_end(struct iommu_domain *domain, 76 + unsigned long iova, size_t size, size_t unmapped); 77 + 78 + static inline void iommu_debug_map(struct iommu_domain *domain, 79 + phys_addr_t phys, size_t size) 80 + { 81 + if (static_branch_unlikely(&iommu_debug_initialized)) 82 + __iommu_debug_map(domain, phys, size); 83 + } 84 + 85 + static inline void iommu_debug_unmap_begin(struct iommu_domain *domain, 86 + unsigned long iova, size_t size) 87 + { 88 + if (static_branch_unlikely(&iommu_debug_initialized)) 89 + __iommu_debug_unmap_begin(domain, iova, size); 90 + } 91 + 92 + static inline void iommu_debug_unmap_end(struct iommu_domain *domain, 93 + unsigned long iova, size_t size, 94 + size_t unmapped) 95 + { 96 + if (static_branch_unlikely(&iommu_debug_initialized)) 97 + __iommu_debug_unmap_end(domain, iova, size, unmapped); 98 + } 99 + 100 + void iommu_debug_init(void); 101 + 102 + #else 103 + static inline void iommu_debug_map(struct iommu_domain *domain, 104 + phys_addr_t phys, size_t size) 105 + { 106 + } 107 + 108 + static inline void iommu_debug_unmap_begin(struct iommu_domain *domain, 109 + unsigned long iova, size_t size) 110 + { 111 + } 112 + 113 + static inline void iommu_debug_unmap_end(struct iommu_domain *domain, 114 + unsigned long iova, size_t size, 115 + size_t unmapped) 116 + { 117 + } 118 + 119 + static inline void iommu_debug_init(void) 120 + { 121 + } 122 + 123 + #endif /* CONFIG_IOMMU_DEBUG_PAGEALLOC */ 124 + 69 125 #endif /* __LINUX_IOMMU_PRIV_H */
+9 -2
drivers/iommu/iommu.c
··· 237 237 if (!nb) 238 238 return -ENOMEM; 239 239 240 + iommu_debug_init(); 241 + 240 242 for (int i = 0; i < ARRAY_SIZE(iommu_buses); i++) { 241 243 nb[i].notifier_call = iommu_bus_notifier; 242 244 bus_register_notifier(iommu_buses[i], &nb[i]); ··· 2631 2629 } 2632 2630 2633 2631 /* unroll mapping in case something went wrong */ 2634 - if (ret) 2632 + if (ret) { 2635 2633 iommu_unmap(domain, orig_iova, orig_size - size); 2636 - else 2634 + } else { 2637 2635 trace_map(orig_iova, orig_paddr, orig_size); 2636 + iommu_debug_map(domain, orig_paddr, orig_size); 2637 + } 2638 2638 2639 2639 return ret; 2640 2640 } ··· 2698 2694 2699 2695 pr_debug("unmap this: iova 0x%lx size 0x%zx\n", iova, size); 2700 2696 2697 + iommu_debug_unmap_begin(domain, iova, size); 2698 + 2701 2699 /* 2702 2700 * Keep iterating until we either unmap 'size' bytes (or more) 2703 2701 * or we hit an area that isn't mapped. ··· 2720 2714 } 2721 2715 2722 2716 trace_unmap(orig_iova, size, unmapped); 2717 + iommu_debug_unmap_end(domain, orig_iova, size, unmapped); 2723 2718 return unmapped; 2724 2719 } 2725 2720
+1
include/linux/iommu-debug-pagealloc.h
··· 9 9 #define __LINUX_IOMMU_DEBUG_PAGEALLOC_H 10 10 11 11 #ifdef CONFIG_IOMMU_DEBUG_PAGEALLOC 12 + DECLARE_STATIC_KEY_FALSE(iommu_debug_initialized); 12 13 13 14 extern struct page_ext_operations page_iommu_debug_ops; 14 15