Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Helpers for IOMMU drivers implementing SVA
4 */
5#include <linux/mmu_context.h>
6#include <linux/mutex.h>
7#include <linux/sched/mm.h>
8#include <linux/iommu.h>
9
10#include "iommu-priv.h"
11
12static DEFINE_MUTEX(iommu_sva_lock);
13static bool iommu_sva_present;
14static LIST_HEAD(iommu_sva_mms);
15static struct iommu_domain *iommu_sva_domain_alloc(struct device *dev,
16 struct mm_struct *mm);
17
18/* Allocate a PASID for the mm within range (inclusive) */
19static struct iommu_mm_data *iommu_alloc_mm_data(struct mm_struct *mm, struct device *dev)
20{
21 struct iommu_mm_data *iommu_mm;
22 ioasid_t pasid;
23
24 lockdep_assert_held(&iommu_sva_lock);
25
26 if (!arch_pgtable_dma_compat(mm))
27 return ERR_PTR(-EBUSY);
28
29 iommu_mm = mm->iommu_mm;
30 /* Is a PASID already associated with this mm? */
31 if (iommu_mm) {
32 if (iommu_mm->pasid >= dev->iommu->max_pasids)
33 return ERR_PTR(-EOVERFLOW);
34 return iommu_mm;
35 }
36
37 iommu_mm = kzalloc(sizeof(struct iommu_mm_data), GFP_KERNEL);
38 if (!iommu_mm)
39 return ERR_PTR(-ENOMEM);
40
41 pasid = iommu_alloc_global_pasid(dev);
42 if (pasid == IOMMU_PASID_INVALID) {
43 kfree(iommu_mm);
44 return ERR_PTR(-ENOSPC);
45 }
46 iommu_mm->pasid = pasid;
47 iommu_mm->mm = mm;
48 INIT_LIST_HEAD(&iommu_mm->sva_domains);
49 /*
50 * Make sure the write to mm->iommu_mm is not reordered in front of
51 * initialization to iommu_mm fields. If it does, readers may see a
52 * valid iommu_mm with uninitialized values.
53 */
54 smp_store_release(&mm->iommu_mm, iommu_mm);
55 return iommu_mm;
56}
57
58/**
59 * iommu_sva_bind_device() - Bind a process address space to a device
60 * @dev: the device
61 * @mm: the mm to bind, caller must hold a reference to mm_users
62 *
63 * Create a bond between device and address space, allowing the device to
64 * access the mm using the PASID returned by iommu_sva_get_pasid(). If a
65 * bond already exists between @device and @mm, an additional internal
66 * reference is taken. Caller must call iommu_sva_unbind_device()
67 * to release each reference.
68 *
69 * On error, returns an ERR_PTR value.
70 */
71struct iommu_sva *iommu_sva_bind_device(struct device *dev, struct mm_struct *mm)
72{
73 struct iommu_group *group = dev->iommu_group;
74 struct iommu_attach_handle *attach_handle;
75 struct iommu_mm_data *iommu_mm;
76 struct iommu_domain *domain;
77 struct iommu_sva *handle;
78 int ret;
79
80 if (!group)
81 return ERR_PTR(-ENODEV);
82
83 mutex_lock(&iommu_sva_lock);
84
85 /* Allocate mm->pasid if necessary. */
86 iommu_mm = iommu_alloc_mm_data(mm, dev);
87 if (IS_ERR(iommu_mm)) {
88 ret = PTR_ERR(iommu_mm);
89 goto out_unlock;
90 }
91
92 /* A bond already exists, just take a reference`. */
93 attach_handle = iommu_attach_handle_get(group, iommu_mm->pasid, IOMMU_DOMAIN_SVA);
94 if (!IS_ERR(attach_handle)) {
95 handle = container_of(attach_handle, struct iommu_sva, handle);
96 if (attach_handle->domain->mm != mm) {
97 ret = -EBUSY;
98 goto out_unlock;
99 }
100 refcount_inc(&handle->users);
101 mutex_unlock(&iommu_sva_lock);
102 return handle;
103 }
104
105 if (PTR_ERR(attach_handle) != -ENOENT) {
106 ret = PTR_ERR(attach_handle);
107 goto out_unlock;
108 }
109
110 handle = kzalloc(sizeof(*handle), GFP_KERNEL);
111 if (!handle) {
112 ret = -ENOMEM;
113 goto out_unlock;
114 }
115
116 /* Search for an existing domain. */
117 list_for_each_entry(domain, &mm->iommu_mm->sva_domains, next) {
118 ret = iommu_attach_device_pasid(domain, dev, iommu_mm->pasid,
119 &handle->handle);
120 if (!ret) {
121 domain->users++;
122 goto out;
123 }
124 }
125
126 /* Allocate a new domain and set it on device pasid. */
127 domain = iommu_sva_domain_alloc(dev, mm);
128 if (IS_ERR(domain)) {
129 ret = PTR_ERR(domain);
130 goto out_free_handle;
131 }
132
133 ret = iommu_attach_device_pasid(domain, dev, iommu_mm->pasid,
134 &handle->handle);
135 if (ret)
136 goto out_free_domain;
137 domain->users = 1;
138
139 if (list_empty(&iommu_mm->sva_domains)) {
140 if (list_empty(&iommu_sva_mms))
141 iommu_sva_present = true;
142 list_add(&iommu_mm->mm_list_elm, &iommu_sva_mms);
143 }
144 list_add(&domain->next, &iommu_mm->sva_domains);
145out:
146 refcount_set(&handle->users, 1);
147 mutex_unlock(&iommu_sva_lock);
148 handle->dev = dev;
149 return handle;
150
151out_free_domain:
152 iommu_domain_free(domain);
153out_free_handle:
154 kfree(handle);
155out_unlock:
156 mutex_unlock(&iommu_sva_lock);
157 return ERR_PTR(ret);
158}
159EXPORT_SYMBOL_GPL(iommu_sva_bind_device);
160
161/**
162 * iommu_sva_unbind_device() - Remove a bond created with iommu_sva_bind_device
163 * @handle: the handle returned by iommu_sva_bind_device()
164 *
165 * Put reference to a bond between device and address space. The device should
166 * not be issuing any more transaction for this PASID. All outstanding page
167 * requests for this PASID must have been flushed to the IOMMU.
168 */
169void iommu_sva_unbind_device(struct iommu_sva *handle)
170{
171 struct iommu_domain *domain = handle->handle.domain;
172 struct iommu_mm_data *iommu_mm = domain->mm->iommu_mm;
173 struct device *dev = handle->dev;
174
175 mutex_lock(&iommu_sva_lock);
176 if (!refcount_dec_and_test(&handle->users)) {
177 mutex_unlock(&iommu_sva_lock);
178 return;
179 }
180
181 iommu_detach_device_pasid(domain, dev, iommu_mm->pasid);
182 if (--domain->users == 0) {
183 list_del(&domain->next);
184 iommu_domain_free(domain);
185 }
186
187 if (list_empty(&iommu_mm->sva_domains)) {
188 list_del(&iommu_mm->mm_list_elm);
189 if (list_empty(&iommu_sva_mms))
190 iommu_sva_present = false;
191 }
192
193 mutex_unlock(&iommu_sva_lock);
194 kfree(handle);
195}
196EXPORT_SYMBOL_GPL(iommu_sva_unbind_device);
197
198u32 iommu_sva_get_pasid(struct iommu_sva *handle)
199{
200 struct iommu_domain *domain = handle->handle.domain;
201
202 return mm_get_enqcmd_pasid(domain->mm);
203}
204EXPORT_SYMBOL_GPL(iommu_sva_get_pasid);
205
206void mm_pasid_drop(struct mm_struct *mm)
207{
208 struct iommu_mm_data *iommu_mm = mm->iommu_mm;
209
210 if (!iommu_mm)
211 return;
212
213 iommu_free_global_pasid(iommu_mm->pasid);
214 kfree(iommu_mm);
215}
216
217/*
218 * I/O page fault handler for SVA
219 */
220static enum iommu_page_response_code
221iommu_sva_handle_mm(struct iommu_fault *fault, struct mm_struct *mm)
222{
223 vm_fault_t ret;
224 struct vm_area_struct *vma;
225 unsigned int access_flags = 0;
226 unsigned int fault_flags = FAULT_FLAG_REMOTE;
227 struct iommu_fault_page_request *prm = &fault->prm;
228 enum iommu_page_response_code status = IOMMU_PAGE_RESP_INVALID;
229
230 if (!(prm->flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID))
231 return status;
232
233 if (!mmget_not_zero(mm))
234 return status;
235
236 mmap_read_lock(mm);
237
238 vma = vma_lookup(mm, prm->addr);
239 if (!vma)
240 /* Unmapped area */
241 goto out_put_mm;
242
243 if (prm->perm & IOMMU_FAULT_PERM_READ)
244 access_flags |= VM_READ;
245
246 if (prm->perm & IOMMU_FAULT_PERM_WRITE) {
247 access_flags |= VM_WRITE;
248 fault_flags |= FAULT_FLAG_WRITE;
249 }
250
251 if (prm->perm & IOMMU_FAULT_PERM_EXEC) {
252 access_flags |= VM_EXEC;
253 fault_flags |= FAULT_FLAG_INSTRUCTION;
254 }
255
256 if (!(prm->perm & IOMMU_FAULT_PERM_PRIV))
257 fault_flags |= FAULT_FLAG_USER;
258
259 if (access_flags & ~vma->vm_flags)
260 /* Access fault */
261 goto out_put_mm;
262
263 ret = handle_mm_fault(vma, prm->addr, fault_flags, NULL);
264 status = ret & VM_FAULT_ERROR ? IOMMU_PAGE_RESP_INVALID :
265 IOMMU_PAGE_RESP_SUCCESS;
266
267out_put_mm:
268 mmap_read_unlock(mm);
269 mmput(mm);
270
271 return status;
272}
273
274static void iommu_sva_handle_iopf(struct work_struct *work)
275{
276 struct iopf_fault *iopf;
277 struct iopf_group *group;
278 enum iommu_page_response_code status = IOMMU_PAGE_RESP_SUCCESS;
279
280 group = container_of(work, struct iopf_group, work);
281 list_for_each_entry(iopf, &group->faults, list) {
282 /*
283 * For the moment, errors are sticky: don't handle subsequent
284 * faults in the group if there is an error.
285 */
286 if (status != IOMMU_PAGE_RESP_SUCCESS)
287 break;
288
289 status = iommu_sva_handle_mm(&iopf->fault,
290 group->attach_handle->domain->mm);
291 }
292
293 iopf_group_response(group, status);
294 iopf_free_group(group);
295}
296
297static int iommu_sva_iopf_handler(struct iopf_group *group)
298{
299 struct iommu_fault_param *fault_param = group->fault_param;
300
301 INIT_WORK(&group->work, iommu_sva_handle_iopf);
302 if (!queue_work(fault_param->queue->wq, &group->work))
303 return -EBUSY;
304
305 return 0;
306}
307
308static struct iommu_domain *iommu_sva_domain_alloc(struct device *dev,
309 struct mm_struct *mm)
310{
311 const struct iommu_ops *ops = dev_iommu_ops(dev);
312 struct iommu_domain *domain;
313
314 if (!ops->domain_alloc_sva)
315 return ERR_PTR(-EOPNOTSUPP);
316
317 domain = ops->domain_alloc_sva(dev, mm);
318 if (IS_ERR(domain))
319 return domain;
320
321 domain->type = IOMMU_DOMAIN_SVA;
322 domain->cookie_type = IOMMU_COOKIE_SVA;
323 mmgrab(mm);
324 domain->mm = mm;
325 domain->owner = ops;
326 domain->iopf_handler = iommu_sva_iopf_handler;
327
328 return domain;
329}
330
331void iommu_sva_invalidate_kva_range(unsigned long start, unsigned long end)
332{
333 struct iommu_mm_data *iommu_mm;
334
335 guard(mutex)(&iommu_sva_lock);
336 if (!iommu_sva_present)
337 return;
338
339 list_for_each_entry(iommu_mm, &iommu_sva_mms, mm_list_elm)
340 mmu_notifier_arch_invalidate_secondary_tlbs(iommu_mm->mm, start, end);
341}