Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2013 Red Hat
4 * Author: Rob Clark <robdclark@gmail.com>
5 */
6
7#include <linux/adreno-smmu-priv.h>
8#include <linux/io-pgtable.h>
9#include "msm_drv.h"
10#include "msm_mmu.h"
11
12struct msm_iommu {
13 struct msm_mmu base;
14 struct iommu_domain *domain;
15 atomic_t pagetables;
16};
17
18#define to_msm_iommu(x) container_of(x, struct msm_iommu, base)
19
20struct msm_iommu_pagetable {
21 struct msm_mmu base;
22 struct msm_mmu *parent;
23 struct io_pgtable_ops *pgtbl_ops;
24 phys_addr_t ttbr;
25 u32 asid;
26};
27static struct msm_iommu_pagetable *to_pagetable(struct msm_mmu *mmu)
28{
29 return container_of(mmu, struct msm_iommu_pagetable, base);
30}
31
32static int msm_iommu_pagetable_unmap(struct msm_mmu *mmu, u64 iova,
33 size_t size)
34{
35 struct msm_iommu_pagetable *pagetable = to_pagetable(mmu);
36 struct io_pgtable_ops *ops = pagetable->pgtbl_ops;
37 size_t unmapped = 0;
38
39 /* Unmap the block one page at a time */
40 while (size) {
41 unmapped += ops->unmap(ops, iova, 4096, NULL);
42 iova += 4096;
43 size -= 4096;
44 }
45
46 iommu_flush_iotlb_all(to_msm_iommu(pagetable->parent)->domain);
47
48 return (unmapped == size) ? 0 : -EINVAL;
49}
50
51static int msm_iommu_pagetable_map(struct msm_mmu *mmu, u64 iova,
52 struct sg_table *sgt, size_t len, int prot)
53{
54 struct msm_iommu_pagetable *pagetable = to_pagetable(mmu);
55 struct io_pgtable_ops *ops = pagetable->pgtbl_ops;
56 struct scatterlist *sg;
57 size_t mapped = 0;
58 u64 addr = iova;
59 unsigned int i;
60
61 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
62 size_t size = sg->length;
63 phys_addr_t phys = sg_phys(sg);
64
65 /* Map the block one page at a time */
66 while (size) {
67 if (ops->map(ops, addr, phys, 4096, prot, GFP_KERNEL)) {
68 msm_iommu_pagetable_unmap(mmu, iova, mapped);
69 return -EINVAL;
70 }
71
72 phys += 4096;
73 addr += 4096;
74 size -= 4096;
75 mapped += 4096;
76 }
77 }
78
79 return 0;
80}
81
82static void msm_iommu_pagetable_destroy(struct msm_mmu *mmu)
83{
84 struct msm_iommu_pagetable *pagetable = to_pagetable(mmu);
85 struct msm_iommu *iommu = to_msm_iommu(pagetable->parent);
86 struct adreno_smmu_priv *adreno_smmu =
87 dev_get_drvdata(pagetable->parent->dev);
88
89 /*
90 * If this is the last attached pagetable for the parent,
91 * disable TTBR0 in the arm-smmu driver
92 */
93 if (atomic_dec_return(&iommu->pagetables) == 0)
94 adreno_smmu->set_ttbr0_cfg(adreno_smmu->cookie, NULL);
95
96 free_io_pgtable_ops(pagetable->pgtbl_ops);
97 kfree(pagetable);
98}
99
100int msm_iommu_pagetable_params(struct msm_mmu *mmu,
101 phys_addr_t *ttbr, int *asid)
102{
103 struct msm_iommu_pagetable *pagetable;
104
105 if (mmu->type != MSM_MMU_IOMMU_PAGETABLE)
106 return -EINVAL;
107
108 pagetable = to_pagetable(mmu);
109
110 if (ttbr)
111 *ttbr = pagetable->ttbr;
112
113 if (asid)
114 *asid = pagetable->asid;
115
116 return 0;
117}
118
119static const struct msm_mmu_funcs pagetable_funcs = {
120 .map = msm_iommu_pagetable_map,
121 .unmap = msm_iommu_pagetable_unmap,
122 .destroy = msm_iommu_pagetable_destroy,
123};
124
125static void msm_iommu_tlb_flush_all(void *cookie)
126{
127}
128
129static void msm_iommu_tlb_flush_walk(unsigned long iova, size_t size,
130 size_t granule, void *cookie)
131{
132}
133
134static void msm_iommu_tlb_add_page(struct iommu_iotlb_gather *gather,
135 unsigned long iova, size_t granule, void *cookie)
136{
137}
138
139static const struct iommu_flush_ops null_tlb_ops = {
140 .tlb_flush_all = msm_iommu_tlb_flush_all,
141 .tlb_flush_walk = msm_iommu_tlb_flush_walk,
142 .tlb_add_page = msm_iommu_tlb_add_page,
143};
144
145struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent)
146{
147 struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(parent->dev);
148 struct msm_iommu *iommu = to_msm_iommu(parent);
149 struct msm_iommu_pagetable *pagetable;
150 const struct io_pgtable_cfg *ttbr1_cfg = NULL;
151 struct io_pgtable_cfg ttbr0_cfg;
152 int ret;
153
154 /* Get the pagetable configuration from the domain */
155 if (adreno_smmu->cookie)
156 ttbr1_cfg = adreno_smmu->get_ttbr1_cfg(adreno_smmu->cookie);
157 if (!ttbr1_cfg)
158 return ERR_PTR(-ENODEV);
159
160 pagetable = kzalloc(sizeof(*pagetable), GFP_KERNEL);
161 if (!pagetable)
162 return ERR_PTR(-ENOMEM);
163
164 msm_mmu_init(&pagetable->base, parent->dev, &pagetable_funcs,
165 MSM_MMU_IOMMU_PAGETABLE);
166
167 /* Clone the TTBR1 cfg as starting point for TTBR0 cfg: */
168 ttbr0_cfg = *ttbr1_cfg;
169
170 /* The incoming cfg will have the TTBR1 quirk enabled */
171 ttbr0_cfg.quirks &= ~IO_PGTABLE_QUIRK_ARM_TTBR1;
172 ttbr0_cfg.tlb = &null_tlb_ops;
173
174 pagetable->pgtbl_ops = alloc_io_pgtable_ops(ARM_64_LPAE_S1,
175 &ttbr0_cfg, iommu->domain);
176
177 if (!pagetable->pgtbl_ops) {
178 kfree(pagetable);
179 return ERR_PTR(-ENOMEM);
180 }
181
182 /*
183 * If this is the first pagetable that we've allocated, send it back to
184 * the arm-smmu driver as a trigger to set up TTBR0
185 */
186 if (atomic_inc_return(&iommu->pagetables) == 1) {
187 ret = adreno_smmu->set_ttbr0_cfg(adreno_smmu->cookie, &ttbr0_cfg);
188 if (ret) {
189 free_io_pgtable_ops(pagetable->pgtbl_ops);
190 kfree(pagetable);
191 return ERR_PTR(ret);
192 }
193 }
194
195 /* Needed later for TLB flush */
196 pagetable->parent = parent;
197 pagetable->ttbr = ttbr0_cfg.arm_lpae_s1_cfg.ttbr;
198
199 /*
200 * TODO we would like each set of page tables to have a unique ASID
201 * to optimize TLB invalidation. But iommu_flush_iotlb_all() will
202 * end up flushing the ASID used for TTBR1 pagetables, which is not
203 * what we want. So for now just use the same ASID as TTBR1.
204 */
205 pagetable->asid = 0;
206
207 return &pagetable->base;
208}
209
210static int msm_fault_handler(struct iommu_domain *domain, struct device *dev,
211 unsigned long iova, int flags, void *arg)
212{
213 struct msm_iommu *iommu = arg;
214 if (iommu->base.handler)
215 return iommu->base.handler(iommu->base.arg, iova, flags);
216 pr_warn_ratelimited("*** fault: iova=%16lx, flags=%d\n", iova, flags);
217 return 0;
218}
219
220static void msm_iommu_detach(struct msm_mmu *mmu)
221{
222 struct msm_iommu *iommu = to_msm_iommu(mmu);
223
224 iommu_detach_device(iommu->domain, mmu->dev);
225}
226
227static int msm_iommu_map(struct msm_mmu *mmu, uint64_t iova,
228 struct sg_table *sgt, size_t len, int prot)
229{
230 struct msm_iommu *iommu = to_msm_iommu(mmu);
231 size_t ret;
232
233 /* The arm-smmu driver expects the addresses to be sign extended */
234 if (iova & BIT_ULL(48))
235 iova |= GENMASK_ULL(63, 49);
236
237 ret = iommu_map_sgtable(iommu->domain, iova, sgt, prot);
238 WARN_ON(!ret);
239
240 return (ret == len) ? 0 : -EINVAL;
241}
242
243static int msm_iommu_unmap(struct msm_mmu *mmu, uint64_t iova, size_t len)
244{
245 struct msm_iommu *iommu = to_msm_iommu(mmu);
246
247 if (iova & BIT_ULL(48))
248 iova |= GENMASK_ULL(63, 49);
249
250 iommu_unmap(iommu->domain, iova, len);
251
252 return 0;
253}
254
255static void msm_iommu_destroy(struct msm_mmu *mmu)
256{
257 struct msm_iommu *iommu = to_msm_iommu(mmu);
258 iommu_domain_free(iommu->domain);
259 kfree(iommu);
260}
261
262static const struct msm_mmu_funcs funcs = {
263 .detach = msm_iommu_detach,
264 .map = msm_iommu_map,
265 .unmap = msm_iommu_unmap,
266 .destroy = msm_iommu_destroy,
267};
268
269struct msm_mmu *msm_iommu_new(struct device *dev, struct iommu_domain *domain)
270{
271 struct msm_iommu *iommu;
272 int ret;
273
274 if (!domain)
275 return ERR_PTR(-ENODEV);
276
277 iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
278 if (!iommu)
279 return ERR_PTR(-ENOMEM);
280
281 iommu->domain = domain;
282 msm_mmu_init(&iommu->base, dev, &funcs, MSM_MMU_IOMMU);
283 iommu_set_fault_handler(domain, msm_fault_handler, iommu);
284
285 atomic_set(&iommu->pagetables, 0);
286
287 ret = iommu_attach_device(iommu->domain, dev);
288 if (ret) {
289 kfree(iommu);
290 return ERR_PTR(ret);
291 }
292
293 return &iommu->base;
294}