Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2025 - Google Inc
4 * Author: Mostafa Saleh <smostafa@google.com>
5 * IOMMU API debug page alloc sanitizer
6 */
7#include <linux/atomic.h>
8#include <linux/iommu.h>
9#include <linux/iommu-debug-pagealloc.h>
10#include <linux/kernel.h>
11#include <linux/page_ext.h>
12#include <linux/page_owner.h>
13
14#include "iommu-priv.h"
15
16static bool needed;
17DEFINE_STATIC_KEY_FALSE(iommu_debug_initialized);
18
19struct iommu_debug_metadata {
20 atomic_t ref;
21};
22
23static __init bool need_iommu_debug(void)
24{
25 return needed;
26}
27
28struct page_ext_operations page_iommu_debug_ops = {
29 .size = sizeof(struct iommu_debug_metadata),
30 .need = need_iommu_debug,
31};
32
33static struct iommu_debug_metadata *get_iommu_data(struct page_ext *page_ext)
34{
35 return page_ext_data(page_ext, &page_iommu_debug_ops);
36}
37
38static void iommu_debug_inc_page(phys_addr_t phys)
39{
40 struct page_ext *page_ext = page_ext_from_phys(phys);
41 struct iommu_debug_metadata *d;
42
43 if (!page_ext)
44 return;
45
46 d = get_iommu_data(page_ext);
47 WARN_ON(atomic_inc_return_relaxed(&d->ref) <= 0);
48 page_ext_put(page_ext);
49}
50
51static void iommu_debug_dec_page(phys_addr_t phys)
52{
53 struct page_ext *page_ext = page_ext_from_phys(phys);
54 struct iommu_debug_metadata *d;
55
56 if (!page_ext)
57 return;
58
59 d = get_iommu_data(page_ext);
60 WARN_ON(atomic_dec_return_relaxed(&d->ref) < 0);
61 page_ext_put(page_ext);
62}
63
64/*
65 * IOMMU page size doesn't have to match the CPU page size. So, we use
66 * the smallest IOMMU page size to refcount the pages in the vmemmap.
67 * That is important as both map and unmap has to use the same page size
68 * to update the refcount to avoid double counting the same page.
69 * And as we can't know from iommu_unmap() what was the original page size
70 * used for map, we just use the minimum supported one for both.
71 */
72static size_t iommu_debug_page_size(struct iommu_domain *domain)
73{
74 return 1UL << __ffs(domain->pgsize_bitmap);
75}
76
77static bool iommu_debug_page_count(const struct page *page)
78{
79 unsigned int ref;
80 struct page_ext *page_ext = page_ext_get(page);
81 struct iommu_debug_metadata *d = get_iommu_data(page_ext);
82
83 ref = atomic_read(&d->ref);
84 page_ext_put(page_ext);
85 return ref != 0;
86}
87
88void __iommu_debug_check_unmapped(const struct page *page, int numpages)
89{
90 while (numpages--) {
91 if (WARN_ON(iommu_debug_page_count(page))) {
92 pr_warn("iommu: Detected page leak!\n");
93 dump_page_owner(page);
94 }
95 page++;
96 }
97}
98
99void __iommu_debug_map(struct iommu_domain *domain, phys_addr_t phys, size_t size)
100{
101 size_t off, end;
102 size_t page_size = iommu_debug_page_size(domain);
103
104 if (WARN_ON(!phys || check_add_overflow(phys, size, &end)))
105 return;
106
107 for (off = 0 ; off < size ; off += page_size)
108 iommu_debug_inc_page(phys + off);
109}
110
111static void __iommu_debug_update_iova(struct iommu_domain *domain,
112 unsigned long iova, size_t size, bool inc)
113{
114 size_t off, end;
115 size_t page_size = iommu_debug_page_size(domain);
116
117 if (WARN_ON(check_add_overflow(iova, size, &end)))
118 return;
119
120 for (off = 0 ; off < size ; off += page_size) {
121 phys_addr_t phys = iommu_iova_to_phys(domain, iova + off);
122
123 if (!phys)
124 continue;
125
126 if (inc)
127 iommu_debug_inc_page(phys);
128 else
129 iommu_debug_dec_page(phys);
130 }
131}
132
133void __iommu_debug_unmap_begin(struct iommu_domain *domain,
134 unsigned long iova, size_t size)
135{
136 __iommu_debug_update_iova(domain, iova, size, false);
137}
138
139void __iommu_debug_unmap_end(struct iommu_domain *domain,
140 unsigned long iova, size_t size,
141 size_t unmapped)
142{
143 if ((unmapped == size) || WARN_ON_ONCE(unmapped > size))
144 return;
145
146 /* If unmap failed, re-increment the refcount. */
147 __iommu_debug_update_iova(domain, iova + unmapped,
148 size - unmapped, true);
149}
150
151void iommu_debug_init(void)
152{
153 if (!needed)
154 return;
155
156 pr_info("iommu: Debugging page allocations, expect overhead or disable iommu.debug_pagealloc");
157 static_branch_enable(&iommu_debug_initialized);
158}
159
160static int __init iommu_debug_pagealloc(char *str)
161{
162 return kstrtobool(str, &needed);
163}
164early_param("iommu.debug_pagealloc", iommu_debug_pagealloc);