Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * IOMMU API for Renesas VMSA-compatible IPMMU
4 * Author: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
5 *
6 * Copyright (C) 2014 Renesas Electronics Corporation
7 */
8
9#include <linux/bitmap.h>
10#include <linux/delay.h>
11#include <linux/dma-iommu.h>
12#include <linux/dma-mapping.h>
13#include <linux/err.h>
14#include <linux/export.h>
15#include <linux/init.h>
16#include <linux/interrupt.h>
17#include <linux/io.h>
18#include <linux/io-pgtable.h>
19#include <linux/iommu.h>
20#include <linux/of.h>
21#include <linux/of_device.h>
22#include <linux/of_iommu.h>
23#include <linux/of_platform.h>
24#include <linux/platform_device.h>
25#include <linux/sizes.h>
26#include <linux/slab.h>
27#include <linux/sys_soc.h>
28
29#if defined(CONFIG_ARM) && !defined(CONFIG_IOMMU_DMA)
30#include <asm/dma-iommu.h>
31#include <asm/pgalloc.h>
32#else
33#define arm_iommu_create_mapping(...) NULL
34#define arm_iommu_attach_device(...) -ENODEV
35#define arm_iommu_release_mapping(...) do {} while (0)
36#define arm_iommu_detach_device(...) do {} while (0)
37#endif
38
39#define IPMMU_CTX_MAX 8U
40#define IPMMU_CTX_INVALID -1
41
42#define IPMMU_UTLB_MAX 48U
43
44struct ipmmu_features {
45 bool use_ns_alias_offset;
46 bool has_cache_leaf_nodes;
47 unsigned int number_of_contexts;
48 unsigned int num_utlbs;
49 bool setup_imbuscr;
50 bool twobit_imttbcr_sl0;
51 bool reserved_context;
52 bool cache_snoop;
53 unsigned int ctx_offset_base;
54 unsigned int ctx_offset_stride;
55 unsigned int utlb_offset_base;
56};
57
58struct ipmmu_vmsa_device {
59 struct device *dev;
60 void __iomem *base;
61 struct iommu_device iommu;
62 struct ipmmu_vmsa_device *root;
63 const struct ipmmu_features *features;
64 unsigned int num_ctx;
65 spinlock_t lock; /* Protects ctx and domains[] */
66 DECLARE_BITMAP(ctx, IPMMU_CTX_MAX);
67 struct ipmmu_vmsa_domain *domains[IPMMU_CTX_MAX];
68 s8 utlb_ctx[IPMMU_UTLB_MAX];
69
70 struct iommu_group *group;
71 struct dma_iommu_mapping *mapping;
72};
73
74struct ipmmu_vmsa_domain {
75 struct ipmmu_vmsa_device *mmu;
76 struct iommu_domain io_domain;
77
78 struct io_pgtable_cfg cfg;
79 struct io_pgtable_ops *iop;
80
81 unsigned int context_id;
82 struct mutex mutex; /* Protects mappings */
83};
84
85static struct ipmmu_vmsa_domain *to_vmsa_domain(struct iommu_domain *dom)
86{
87 return container_of(dom, struct ipmmu_vmsa_domain, io_domain);
88}
89
90static struct ipmmu_vmsa_device *to_ipmmu(struct device *dev)
91{
92 return dev_iommu_priv_get(dev);
93}
94
95#define TLB_LOOP_TIMEOUT 100 /* 100us */
96
97/* -----------------------------------------------------------------------------
98 * Registers Definition
99 */
100
101#define IM_NS_ALIAS_OFFSET 0x800
102
103/* MMU "context" registers */
104#define IMCTR 0x0000 /* R-Car Gen2/3 */
105#define IMCTR_INTEN (1 << 2) /* R-Car Gen2/3 */
106#define IMCTR_FLUSH (1 << 1) /* R-Car Gen2/3 */
107#define IMCTR_MMUEN (1 << 0) /* R-Car Gen2/3 */
108
109#define IMTTBCR 0x0008 /* R-Car Gen2/3 */
110#define IMTTBCR_EAE (1 << 31) /* R-Car Gen2/3 */
111#define IMTTBCR_SH0_INNER_SHAREABLE (3 << 12) /* R-Car Gen2 only */
112#define IMTTBCR_ORGN0_WB_WA (1 << 10) /* R-Car Gen2 only */
113#define IMTTBCR_IRGN0_WB_WA (1 << 8) /* R-Car Gen2 only */
114#define IMTTBCR_SL0_TWOBIT_LVL_1 (2 << 6) /* R-Car Gen3 only */
115#define IMTTBCR_SL0_LVL_1 (1 << 4) /* R-Car Gen2 only */
116
117#define IMBUSCR 0x000c /* R-Car Gen2 only */
118#define IMBUSCR_DVM (1 << 2) /* R-Car Gen2 only */
119#define IMBUSCR_BUSSEL_MASK (3 << 0) /* R-Car Gen2 only */
120
121#define IMTTLBR0 0x0010 /* R-Car Gen2/3 */
122#define IMTTUBR0 0x0014 /* R-Car Gen2/3 */
123
124#define IMSTR 0x0020 /* R-Car Gen2/3 */
125#define IMSTR_MHIT (1 << 4) /* R-Car Gen2/3 */
126#define IMSTR_ABORT (1 << 2) /* R-Car Gen2/3 */
127#define IMSTR_PF (1 << 1) /* R-Car Gen2/3 */
128#define IMSTR_TF (1 << 0) /* R-Car Gen2/3 */
129
130#define IMMAIR0 0x0028 /* R-Car Gen2/3 */
131
132#define IMELAR 0x0030 /* R-Car Gen2/3, IMEAR on R-Car Gen2 */
133#define IMEUAR 0x0034 /* R-Car Gen3 only */
134
135/* uTLB registers */
136#define IMUCTR(n) ((n) < 32 ? IMUCTR0(n) : IMUCTR32(n))
137#define IMUCTR0(n) (0x0300 + ((n) * 16)) /* R-Car Gen2/3 */
138#define IMUCTR32(n) (0x0600 + (((n) - 32) * 16)) /* R-Car Gen3 only */
139#define IMUCTR_TTSEL_MMU(n) ((n) << 4) /* R-Car Gen2/3 */
140#define IMUCTR_FLUSH (1 << 1) /* R-Car Gen2/3 */
141#define IMUCTR_MMUEN (1 << 0) /* R-Car Gen2/3 */
142
143#define IMUASID(n) ((n) < 32 ? IMUASID0(n) : IMUASID32(n))
144#define IMUASID0(n) (0x0308 + ((n) * 16)) /* R-Car Gen2/3 */
145#define IMUASID32(n) (0x0608 + (((n) - 32) * 16)) /* R-Car Gen3 only */
146
147/* -----------------------------------------------------------------------------
148 * Root device handling
149 */
150
151static struct platform_driver ipmmu_driver;
152
153static bool ipmmu_is_root(struct ipmmu_vmsa_device *mmu)
154{
155 return mmu->root == mmu;
156}
157
158static int __ipmmu_check_device(struct device *dev, void *data)
159{
160 struct ipmmu_vmsa_device *mmu = dev_get_drvdata(dev);
161 struct ipmmu_vmsa_device **rootp = data;
162
163 if (ipmmu_is_root(mmu))
164 *rootp = mmu;
165
166 return 0;
167}
168
169static struct ipmmu_vmsa_device *ipmmu_find_root(void)
170{
171 struct ipmmu_vmsa_device *root = NULL;
172
173 return driver_for_each_device(&ipmmu_driver.driver, NULL, &root,
174 __ipmmu_check_device) == 0 ? root : NULL;
175}
176
177/* -----------------------------------------------------------------------------
178 * Read/Write Access
179 */
180
181static u32 ipmmu_read(struct ipmmu_vmsa_device *mmu, unsigned int offset)
182{
183 return ioread32(mmu->base + offset);
184}
185
186static void ipmmu_write(struct ipmmu_vmsa_device *mmu, unsigned int offset,
187 u32 data)
188{
189 iowrite32(data, mmu->base + offset);
190}
191
192static unsigned int ipmmu_ctx_reg(struct ipmmu_vmsa_device *mmu,
193 unsigned int context_id, unsigned int reg)
194{
195 return mmu->features->ctx_offset_base +
196 context_id * mmu->features->ctx_offset_stride + reg;
197}
198
199static u32 ipmmu_ctx_read(struct ipmmu_vmsa_device *mmu,
200 unsigned int context_id, unsigned int reg)
201{
202 return ipmmu_read(mmu, ipmmu_ctx_reg(mmu, context_id, reg));
203}
204
205static void ipmmu_ctx_write(struct ipmmu_vmsa_device *mmu,
206 unsigned int context_id, unsigned int reg, u32 data)
207{
208 ipmmu_write(mmu, ipmmu_ctx_reg(mmu, context_id, reg), data);
209}
210
211static u32 ipmmu_ctx_read_root(struct ipmmu_vmsa_domain *domain,
212 unsigned int reg)
213{
214 return ipmmu_ctx_read(domain->mmu->root, domain->context_id, reg);
215}
216
217static void ipmmu_ctx_write_root(struct ipmmu_vmsa_domain *domain,
218 unsigned int reg, u32 data)
219{
220 ipmmu_ctx_write(domain->mmu->root, domain->context_id, reg, data);
221}
222
223static void ipmmu_ctx_write_all(struct ipmmu_vmsa_domain *domain,
224 unsigned int reg, u32 data)
225{
226 if (domain->mmu != domain->mmu->root)
227 ipmmu_ctx_write(domain->mmu, domain->context_id, reg, data);
228
229 ipmmu_ctx_write(domain->mmu->root, domain->context_id, reg, data);
230}
231
232static u32 ipmmu_utlb_reg(struct ipmmu_vmsa_device *mmu, unsigned int reg)
233{
234 return mmu->features->utlb_offset_base + reg;
235}
236
237static void ipmmu_imuasid_write(struct ipmmu_vmsa_device *mmu,
238 unsigned int utlb, u32 data)
239{
240 ipmmu_write(mmu, ipmmu_utlb_reg(mmu, IMUASID(utlb)), data);
241}
242
243static void ipmmu_imuctr_write(struct ipmmu_vmsa_device *mmu,
244 unsigned int utlb, u32 data)
245{
246 ipmmu_write(mmu, ipmmu_utlb_reg(mmu, IMUCTR(utlb)), data);
247}
248
249/* -----------------------------------------------------------------------------
250 * TLB and microTLB Management
251 */
252
253/* Wait for any pending TLB invalidations to complete */
254static void ipmmu_tlb_sync(struct ipmmu_vmsa_domain *domain)
255{
256 unsigned int count = 0;
257
258 while (ipmmu_ctx_read_root(domain, IMCTR) & IMCTR_FLUSH) {
259 cpu_relax();
260 if (++count == TLB_LOOP_TIMEOUT) {
261 dev_err_ratelimited(domain->mmu->dev,
262 "TLB sync timed out -- MMU may be deadlocked\n");
263 return;
264 }
265 udelay(1);
266 }
267}
268
269static void ipmmu_tlb_invalidate(struct ipmmu_vmsa_domain *domain)
270{
271 u32 reg;
272
273 reg = ipmmu_ctx_read_root(domain, IMCTR);
274 reg |= IMCTR_FLUSH;
275 ipmmu_ctx_write_all(domain, IMCTR, reg);
276
277 ipmmu_tlb_sync(domain);
278}
279
280/*
281 * Enable MMU translation for the microTLB.
282 */
283static void ipmmu_utlb_enable(struct ipmmu_vmsa_domain *domain,
284 unsigned int utlb)
285{
286 struct ipmmu_vmsa_device *mmu = domain->mmu;
287
288 /*
289 * TODO: Reference-count the microTLB as several bus masters can be
290 * connected to the same microTLB.
291 */
292
293 /* TODO: What should we set the ASID to ? */
294 ipmmu_imuasid_write(mmu, utlb, 0);
295 /* TODO: Do we need to flush the microTLB ? */
296 ipmmu_imuctr_write(mmu, utlb, IMUCTR_TTSEL_MMU(domain->context_id) |
297 IMUCTR_FLUSH | IMUCTR_MMUEN);
298 mmu->utlb_ctx[utlb] = domain->context_id;
299}
300
301/*
302 * Disable MMU translation for the microTLB.
303 */
304static void ipmmu_utlb_disable(struct ipmmu_vmsa_domain *domain,
305 unsigned int utlb)
306{
307 struct ipmmu_vmsa_device *mmu = domain->mmu;
308
309 ipmmu_imuctr_write(mmu, utlb, 0);
310 mmu->utlb_ctx[utlb] = IPMMU_CTX_INVALID;
311}
312
313static void ipmmu_tlb_flush_all(void *cookie)
314{
315 struct ipmmu_vmsa_domain *domain = cookie;
316
317 ipmmu_tlb_invalidate(domain);
318}
319
320static void ipmmu_tlb_flush(unsigned long iova, size_t size,
321 size_t granule, void *cookie)
322{
323 ipmmu_tlb_flush_all(cookie);
324}
325
326static const struct iommu_flush_ops ipmmu_flush_ops = {
327 .tlb_flush_all = ipmmu_tlb_flush_all,
328 .tlb_flush_walk = ipmmu_tlb_flush,
329 .tlb_flush_leaf = ipmmu_tlb_flush,
330};
331
332/* -----------------------------------------------------------------------------
333 * Domain/Context Management
334 */
335
336static int ipmmu_domain_allocate_context(struct ipmmu_vmsa_device *mmu,
337 struct ipmmu_vmsa_domain *domain)
338{
339 unsigned long flags;
340 int ret;
341
342 spin_lock_irqsave(&mmu->lock, flags);
343
344 ret = find_first_zero_bit(mmu->ctx, mmu->num_ctx);
345 if (ret != mmu->num_ctx) {
346 mmu->domains[ret] = domain;
347 set_bit(ret, mmu->ctx);
348 } else
349 ret = -EBUSY;
350
351 spin_unlock_irqrestore(&mmu->lock, flags);
352
353 return ret;
354}
355
356static void ipmmu_domain_free_context(struct ipmmu_vmsa_device *mmu,
357 unsigned int context_id)
358{
359 unsigned long flags;
360
361 spin_lock_irqsave(&mmu->lock, flags);
362
363 clear_bit(context_id, mmu->ctx);
364 mmu->domains[context_id] = NULL;
365
366 spin_unlock_irqrestore(&mmu->lock, flags);
367}
368
369static void ipmmu_domain_setup_context(struct ipmmu_vmsa_domain *domain)
370{
371 u64 ttbr;
372 u32 tmp;
373
374 /* TTBR0 */
375 ttbr = domain->cfg.arm_lpae_s1_cfg.ttbr;
376 ipmmu_ctx_write_root(domain, IMTTLBR0, ttbr);
377 ipmmu_ctx_write_root(domain, IMTTUBR0, ttbr >> 32);
378
379 /*
380 * TTBCR
381 * We use long descriptors and allocate the whole 32-bit VA space to
382 * TTBR0.
383 */
384 if (domain->mmu->features->twobit_imttbcr_sl0)
385 tmp = IMTTBCR_SL0_TWOBIT_LVL_1;
386 else
387 tmp = IMTTBCR_SL0_LVL_1;
388
389 if (domain->mmu->features->cache_snoop)
390 tmp |= IMTTBCR_SH0_INNER_SHAREABLE | IMTTBCR_ORGN0_WB_WA |
391 IMTTBCR_IRGN0_WB_WA;
392
393 ipmmu_ctx_write_root(domain, IMTTBCR, IMTTBCR_EAE | tmp);
394
395 /* MAIR0 */
396 ipmmu_ctx_write_root(domain, IMMAIR0,
397 domain->cfg.arm_lpae_s1_cfg.mair);
398
399 /* IMBUSCR */
400 if (domain->mmu->features->setup_imbuscr)
401 ipmmu_ctx_write_root(domain, IMBUSCR,
402 ipmmu_ctx_read_root(domain, IMBUSCR) &
403 ~(IMBUSCR_DVM | IMBUSCR_BUSSEL_MASK));
404
405 /*
406 * IMSTR
407 * Clear all interrupt flags.
408 */
409 ipmmu_ctx_write_root(domain, IMSTR, ipmmu_ctx_read_root(domain, IMSTR));
410
411 /*
412 * IMCTR
413 * Enable the MMU and interrupt generation. The long-descriptor
414 * translation table format doesn't use TEX remapping. Don't enable AF
415 * software management as we have no use for it. Flush the TLB as
416 * required when modifying the context registers.
417 */
418 ipmmu_ctx_write_all(domain, IMCTR,
419 IMCTR_INTEN | IMCTR_FLUSH | IMCTR_MMUEN);
420}
421
422static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain)
423{
424 int ret;
425
426 /*
427 * Allocate the page table operations.
428 *
429 * VMSA states in section B3.6.3 "Control of Secure or Non-secure memory
430 * access, Long-descriptor format" that the NStable bit being set in a
431 * table descriptor will result in the NStable and NS bits of all child
432 * entries being ignored and considered as being set. The IPMMU seems
433 * not to comply with this, as it generates a secure access page fault
434 * if any of the NStable and NS bits isn't set when running in
435 * non-secure mode.
436 */
437 domain->cfg.quirks = IO_PGTABLE_QUIRK_ARM_NS;
438 domain->cfg.pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K;
439 domain->cfg.ias = 32;
440 domain->cfg.oas = 40;
441 domain->cfg.tlb = &ipmmu_flush_ops;
442 domain->io_domain.geometry.aperture_end = DMA_BIT_MASK(32);
443 domain->io_domain.geometry.force_aperture = true;
444 /*
445 * TODO: Add support for coherent walk through CCI with DVM and remove
446 * cache handling. For now, delegate it to the io-pgtable code.
447 */
448 domain->cfg.coherent_walk = false;
449 domain->cfg.iommu_dev = domain->mmu->root->dev;
450
451 /*
452 * Find an unused context.
453 */
454 ret = ipmmu_domain_allocate_context(domain->mmu->root, domain);
455 if (ret < 0)
456 return ret;
457
458 domain->context_id = ret;
459
460 domain->iop = alloc_io_pgtable_ops(ARM_32_LPAE_S1, &domain->cfg,
461 domain);
462 if (!domain->iop) {
463 ipmmu_domain_free_context(domain->mmu->root,
464 domain->context_id);
465 return -EINVAL;
466 }
467
468 ipmmu_domain_setup_context(domain);
469 return 0;
470}
471
472static void ipmmu_domain_destroy_context(struct ipmmu_vmsa_domain *domain)
473{
474 if (!domain->mmu)
475 return;
476
477 /*
478 * Disable the context. Flush the TLB as required when modifying the
479 * context registers.
480 *
481 * TODO: Is TLB flush really needed ?
482 */
483 ipmmu_ctx_write_all(domain, IMCTR, IMCTR_FLUSH);
484 ipmmu_tlb_sync(domain);
485 ipmmu_domain_free_context(domain->mmu->root, domain->context_id);
486}
487
488/* -----------------------------------------------------------------------------
489 * Fault Handling
490 */
491
492static irqreturn_t ipmmu_domain_irq(struct ipmmu_vmsa_domain *domain)
493{
494 const u32 err_mask = IMSTR_MHIT | IMSTR_ABORT | IMSTR_PF | IMSTR_TF;
495 struct ipmmu_vmsa_device *mmu = domain->mmu;
496 unsigned long iova;
497 u32 status;
498
499 status = ipmmu_ctx_read_root(domain, IMSTR);
500 if (!(status & err_mask))
501 return IRQ_NONE;
502
503 iova = ipmmu_ctx_read_root(domain, IMELAR);
504 if (IS_ENABLED(CONFIG_64BIT))
505 iova |= (u64)ipmmu_ctx_read_root(domain, IMEUAR) << 32;
506
507 /*
508 * Clear the error status flags. Unlike traditional interrupt flag
509 * registers that must be cleared by writing 1, this status register
510 * seems to require 0. The error address register must be read before,
511 * otherwise its value will be 0.
512 */
513 ipmmu_ctx_write_root(domain, IMSTR, 0);
514
515 /* Log fatal errors. */
516 if (status & IMSTR_MHIT)
517 dev_err_ratelimited(mmu->dev, "Multiple TLB hits @0x%lx\n",
518 iova);
519 if (status & IMSTR_ABORT)
520 dev_err_ratelimited(mmu->dev, "Page Table Walk Abort @0x%lx\n",
521 iova);
522
523 if (!(status & (IMSTR_PF | IMSTR_TF)))
524 return IRQ_NONE;
525
526 /*
527 * Try to handle page faults and translation faults.
528 *
529 * TODO: We need to look up the faulty device based on the I/O VA. Use
530 * the IOMMU device for now.
531 */
532 if (!report_iommu_fault(&domain->io_domain, mmu->dev, iova, 0))
533 return IRQ_HANDLED;
534
535 dev_err_ratelimited(mmu->dev,
536 "Unhandled fault: status 0x%08x iova 0x%lx\n",
537 status, iova);
538
539 return IRQ_HANDLED;
540}
541
542static irqreturn_t ipmmu_irq(int irq, void *dev)
543{
544 struct ipmmu_vmsa_device *mmu = dev;
545 irqreturn_t status = IRQ_NONE;
546 unsigned int i;
547 unsigned long flags;
548
549 spin_lock_irqsave(&mmu->lock, flags);
550
551 /*
552 * Check interrupts for all active contexts.
553 */
554 for (i = 0; i < mmu->num_ctx; i++) {
555 if (!mmu->domains[i])
556 continue;
557 if (ipmmu_domain_irq(mmu->domains[i]) == IRQ_HANDLED)
558 status = IRQ_HANDLED;
559 }
560
561 spin_unlock_irqrestore(&mmu->lock, flags);
562
563 return status;
564}
565
566/* -----------------------------------------------------------------------------
567 * IOMMU Operations
568 */
569
570static struct iommu_domain *__ipmmu_domain_alloc(unsigned type)
571{
572 struct ipmmu_vmsa_domain *domain;
573
574 domain = kzalloc(sizeof(*domain), GFP_KERNEL);
575 if (!domain)
576 return NULL;
577
578 mutex_init(&domain->mutex);
579
580 return &domain->io_domain;
581}
582
583static struct iommu_domain *ipmmu_domain_alloc(unsigned type)
584{
585 struct iommu_domain *io_domain = NULL;
586
587 switch (type) {
588 case IOMMU_DOMAIN_UNMANAGED:
589 io_domain = __ipmmu_domain_alloc(type);
590 break;
591
592 case IOMMU_DOMAIN_DMA:
593 io_domain = __ipmmu_domain_alloc(type);
594 if (io_domain && iommu_get_dma_cookie(io_domain)) {
595 kfree(io_domain);
596 io_domain = NULL;
597 }
598 break;
599 }
600
601 return io_domain;
602}
603
604static void ipmmu_domain_free(struct iommu_domain *io_domain)
605{
606 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
607
608 /*
609 * Free the domain resources. We assume that all devices have already
610 * been detached.
611 */
612 iommu_put_dma_cookie(io_domain);
613 ipmmu_domain_destroy_context(domain);
614 free_io_pgtable_ops(domain->iop);
615 kfree(domain);
616}
617
618static int ipmmu_attach_device(struct iommu_domain *io_domain,
619 struct device *dev)
620{
621 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
622 struct ipmmu_vmsa_device *mmu = to_ipmmu(dev);
623 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
624 unsigned int i;
625 int ret = 0;
626
627 if (!mmu) {
628 dev_err(dev, "Cannot attach to IPMMU\n");
629 return -ENXIO;
630 }
631
632 mutex_lock(&domain->mutex);
633
634 if (!domain->mmu) {
635 /* The domain hasn't been used yet, initialize it. */
636 domain->mmu = mmu;
637 ret = ipmmu_domain_init_context(domain);
638 if (ret < 0) {
639 dev_err(dev, "Unable to initialize IPMMU context\n");
640 domain->mmu = NULL;
641 } else {
642 dev_info(dev, "Using IPMMU context %u\n",
643 domain->context_id);
644 }
645 } else if (domain->mmu != mmu) {
646 /*
647 * Something is wrong, we can't attach two devices using
648 * different IOMMUs to the same domain.
649 */
650 dev_err(dev, "Can't attach IPMMU %s to domain on IPMMU %s\n",
651 dev_name(mmu->dev), dev_name(domain->mmu->dev));
652 ret = -EINVAL;
653 } else
654 dev_info(dev, "Reusing IPMMU context %u\n", domain->context_id);
655
656 mutex_unlock(&domain->mutex);
657
658 if (ret < 0)
659 return ret;
660
661 for (i = 0; i < fwspec->num_ids; ++i)
662 ipmmu_utlb_enable(domain, fwspec->ids[i]);
663
664 return 0;
665}
666
667static void ipmmu_detach_device(struct iommu_domain *io_domain,
668 struct device *dev)
669{
670 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
671 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
672 unsigned int i;
673
674 for (i = 0; i < fwspec->num_ids; ++i)
675 ipmmu_utlb_disable(domain, fwspec->ids[i]);
676
677 /*
678 * TODO: Optimize by disabling the context when no device is attached.
679 */
680}
681
682static int ipmmu_map(struct iommu_domain *io_domain, unsigned long iova,
683 phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
684{
685 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
686
687 if (!domain)
688 return -ENODEV;
689
690 return domain->iop->map(domain->iop, iova, paddr, size, prot);
691}
692
693static size_t ipmmu_unmap(struct iommu_domain *io_domain, unsigned long iova,
694 size_t size, struct iommu_iotlb_gather *gather)
695{
696 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
697
698 return domain->iop->unmap(domain->iop, iova, size, gather);
699}
700
701static void ipmmu_flush_iotlb_all(struct iommu_domain *io_domain)
702{
703 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
704
705 if (domain->mmu)
706 ipmmu_tlb_flush_all(domain);
707}
708
709static void ipmmu_iotlb_sync(struct iommu_domain *io_domain,
710 struct iommu_iotlb_gather *gather)
711{
712 ipmmu_flush_iotlb_all(io_domain);
713}
714
715static phys_addr_t ipmmu_iova_to_phys(struct iommu_domain *io_domain,
716 dma_addr_t iova)
717{
718 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
719
720 /* TODO: Is locking needed ? */
721
722 return domain->iop->iova_to_phys(domain->iop, iova);
723}
724
725static int ipmmu_init_platform_device(struct device *dev,
726 struct of_phandle_args *args)
727{
728 struct platform_device *ipmmu_pdev;
729
730 ipmmu_pdev = of_find_device_by_node(args->np);
731 if (!ipmmu_pdev)
732 return -ENODEV;
733
734 dev_iommu_priv_set(dev, platform_get_drvdata(ipmmu_pdev));
735
736 return 0;
737}
738
739static const struct soc_device_attribute soc_rcar_gen3[] = {
740 { .soc_id = "r8a774a1", },
741 { .soc_id = "r8a774b1", },
742 { .soc_id = "r8a774c0", },
743 { .soc_id = "r8a7795", },
744 { .soc_id = "r8a7796", },
745 { .soc_id = "r8a77965", },
746 { .soc_id = "r8a77970", },
747 { .soc_id = "r8a77990", },
748 { .soc_id = "r8a77995", },
749 { /* sentinel */ }
750};
751
752static const struct soc_device_attribute soc_rcar_gen3_whitelist[] = {
753 { .soc_id = "r8a774b1", },
754 { .soc_id = "r8a774c0", },
755 { .soc_id = "r8a7795", .revision = "ES3.*" },
756 { .soc_id = "r8a77965", },
757 { .soc_id = "r8a77990", },
758 { .soc_id = "r8a77995", },
759 { /* sentinel */ }
760};
761
762static const char * const rcar_gen3_slave_whitelist[] = {
763};
764
765static bool ipmmu_slave_whitelist(struct device *dev)
766{
767 unsigned int i;
768
769 /*
770 * For R-Car Gen3 use a white list to opt-in slave devices.
771 * For Other SoCs, this returns true anyway.
772 */
773 if (!soc_device_match(soc_rcar_gen3))
774 return true;
775
776 /* Check whether this R-Car Gen3 can use the IPMMU correctly or not */
777 if (!soc_device_match(soc_rcar_gen3_whitelist))
778 return false;
779
780 /* Check whether this slave device can work with the IPMMU */
781 for (i = 0; i < ARRAY_SIZE(rcar_gen3_slave_whitelist); i++) {
782 if (!strcmp(dev_name(dev), rcar_gen3_slave_whitelist[i]))
783 return true;
784 }
785
786 /* Otherwise, do not allow use of IPMMU */
787 return false;
788}
789
790static int ipmmu_of_xlate(struct device *dev,
791 struct of_phandle_args *spec)
792{
793 if (!ipmmu_slave_whitelist(dev))
794 return -ENODEV;
795
796 iommu_fwspec_add_ids(dev, spec->args, 1);
797
798 /* Initialize once - xlate() will call multiple times */
799 if (to_ipmmu(dev))
800 return 0;
801
802 return ipmmu_init_platform_device(dev, spec);
803}
804
805static int ipmmu_init_arm_mapping(struct device *dev)
806{
807 struct ipmmu_vmsa_device *mmu = to_ipmmu(dev);
808 int ret;
809
810 /*
811 * Create the ARM mapping, used by the ARM DMA mapping core to allocate
812 * VAs. This will allocate a corresponding IOMMU domain.
813 *
814 * TODO:
815 * - Create one mapping per context (TLB).
816 * - Make the mapping size configurable ? We currently use a 2GB mapping
817 * at a 1GB offset to ensure that NULL VAs will fault.
818 */
819 if (!mmu->mapping) {
820 struct dma_iommu_mapping *mapping;
821
822 mapping = arm_iommu_create_mapping(&platform_bus_type,
823 SZ_1G, SZ_2G);
824 if (IS_ERR(mapping)) {
825 dev_err(mmu->dev, "failed to create ARM IOMMU mapping\n");
826 ret = PTR_ERR(mapping);
827 goto error;
828 }
829
830 mmu->mapping = mapping;
831 }
832
833 /* Attach the ARM VA mapping to the device. */
834 ret = arm_iommu_attach_device(dev, mmu->mapping);
835 if (ret < 0) {
836 dev_err(dev, "Failed to attach device to VA mapping\n");
837 goto error;
838 }
839
840 return 0;
841
842error:
843 if (mmu->mapping)
844 arm_iommu_release_mapping(mmu->mapping);
845
846 return ret;
847}
848
849static struct iommu_device *ipmmu_probe_device(struct device *dev)
850{
851 struct ipmmu_vmsa_device *mmu = to_ipmmu(dev);
852
853 /*
854 * Only let through devices that have been verified in xlate()
855 */
856 if (!mmu)
857 return ERR_PTR(-ENODEV);
858
859 return &mmu->iommu;
860}
861
862static void ipmmu_probe_finalize(struct device *dev)
863{
864 int ret = 0;
865
866 if (IS_ENABLED(CONFIG_ARM) && !IS_ENABLED(CONFIG_IOMMU_DMA))
867 ret = ipmmu_init_arm_mapping(dev);
868
869 if (ret)
870 dev_err(dev, "Can't create IOMMU mapping - DMA-OPS will not work\n");
871}
872
873static void ipmmu_release_device(struct device *dev)
874{
875 arm_iommu_detach_device(dev);
876}
877
878static struct iommu_group *ipmmu_find_group(struct device *dev)
879{
880 struct ipmmu_vmsa_device *mmu = to_ipmmu(dev);
881 struct iommu_group *group;
882
883 if (mmu->group)
884 return iommu_group_ref_get(mmu->group);
885
886 group = iommu_group_alloc();
887 if (!IS_ERR(group))
888 mmu->group = group;
889
890 return group;
891}
892
893static const struct iommu_ops ipmmu_ops = {
894 .domain_alloc = ipmmu_domain_alloc,
895 .domain_free = ipmmu_domain_free,
896 .attach_dev = ipmmu_attach_device,
897 .detach_dev = ipmmu_detach_device,
898 .map = ipmmu_map,
899 .unmap = ipmmu_unmap,
900 .flush_iotlb_all = ipmmu_flush_iotlb_all,
901 .iotlb_sync = ipmmu_iotlb_sync,
902 .iova_to_phys = ipmmu_iova_to_phys,
903 .probe_device = ipmmu_probe_device,
904 .release_device = ipmmu_release_device,
905 .probe_finalize = ipmmu_probe_finalize,
906 .device_group = IS_ENABLED(CONFIG_ARM) && !IS_ENABLED(CONFIG_IOMMU_DMA)
907 ? generic_device_group : ipmmu_find_group,
908 .pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K,
909 .of_xlate = ipmmu_of_xlate,
910};
911
912/* -----------------------------------------------------------------------------
913 * Probe/remove and init
914 */
915
916static void ipmmu_device_reset(struct ipmmu_vmsa_device *mmu)
917{
918 unsigned int i;
919
920 /* Disable all contexts. */
921 for (i = 0; i < mmu->num_ctx; ++i)
922 ipmmu_ctx_write(mmu, i, IMCTR, 0);
923}
924
925static const struct ipmmu_features ipmmu_features_default = {
926 .use_ns_alias_offset = true,
927 .has_cache_leaf_nodes = false,
928 .number_of_contexts = 1, /* software only tested with one context */
929 .num_utlbs = 32,
930 .setup_imbuscr = true,
931 .twobit_imttbcr_sl0 = false,
932 .reserved_context = false,
933 .cache_snoop = true,
934 .ctx_offset_base = 0,
935 .ctx_offset_stride = 0x40,
936 .utlb_offset_base = 0,
937};
938
939static const struct ipmmu_features ipmmu_features_rcar_gen3 = {
940 .use_ns_alias_offset = false,
941 .has_cache_leaf_nodes = true,
942 .number_of_contexts = 8,
943 .num_utlbs = 48,
944 .setup_imbuscr = false,
945 .twobit_imttbcr_sl0 = true,
946 .reserved_context = true,
947 .cache_snoop = false,
948 .ctx_offset_base = 0,
949 .ctx_offset_stride = 0x40,
950 .utlb_offset_base = 0,
951};
952
953static const struct of_device_id ipmmu_of_ids[] = {
954 {
955 .compatible = "renesas,ipmmu-vmsa",
956 .data = &ipmmu_features_default,
957 }, {
958 .compatible = "renesas,ipmmu-r8a774a1",
959 .data = &ipmmu_features_rcar_gen3,
960 }, {
961 .compatible = "renesas,ipmmu-r8a774b1",
962 .data = &ipmmu_features_rcar_gen3,
963 }, {
964 .compatible = "renesas,ipmmu-r8a774c0",
965 .data = &ipmmu_features_rcar_gen3,
966 }, {
967 .compatible = "renesas,ipmmu-r8a7795",
968 .data = &ipmmu_features_rcar_gen3,
969 }, {
970 .compatible = "renesas,ipmmu-r8a7796",
971 .data = &ipmmu_features_rcar_gen3,
972 }, {
973 .compatible = "renesas,ipmmu-r8a77965",
974 .data = &ipmmu_features_rcar_gen3,
975 }, {
976 .compatible = "renesas,ipmmu-r8a77970",
977 .data = &ipmmu_features_rcar_gen3,
978 }, {
979 .compatible = "renesas,ipmmu-r8a77990",
980 .data = &ipmmu_features_rcar_gen3,
981 }, {
982 .compatible = "renesas,ipmmu-r8a77995",
983 .data = &ipmmu_features_rcar_gen3,
984 }, {
985 /* Terminator */
986 },
987};
988
989static int ipmmu_probe(struct platform_device *pdev)
990{
991 struct ipmmu_vmsa_device *mmu;
992 struct resource *res;
993 int irq;
994 int ret;
995
996 mmu = devm_kzalloc(&pdev->dev, sizeof(*mmu), GFP_KERNEL);
997 if (!mmu) {
998 dev_err(&pdev->dev, "cannot allocate device data\n");
999 return -ENOMEM;
1000 }
1001
1002 mmu->dev = &pdev->dev;
1003 spin_lock_init(&mmu->lock);
1004 bitmap_zero(mmu->ctx, IPMMU_CTX_MAX);
1005 mmu->features = of_device_get_match_data(&pdev->dev);
1006 memset(mmu->utlb_ctx, IPMMU_CTX_INVALID, mmu->features->num_utlbs);
1007 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40));
1008
1009 /* Map I/O memory and request IRQ. */
1010 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1011 mmu->base = devm_ioremap_resource(&pdev->dev, res);
1012 if (IS_ERR(mmu->base))
1013 return PTR_ERR(mmu->base);
1014
1015 /*
1016 * The IPMMU has two register banks, for secure and non-secure modes.
1017 * The bank mapped at the beginning of the IPMMU address space
1018 * corresponds to the running mode of the CPU. When running in secure
1019 * mode the non-secure register bank is also available at an offset.
1020 *
1021 * Secure mode operation isn't clearly documented and is thus currently
1022 * not implemented in the driver. Furthermore, preliminary tests of
1023 * non-secure operation with the main register bank were not successful.
1024 * Offset the registers base unconditionally to point to the non-secure
1025 * alias space for now.
1026 */
1027 if (mmu->features->use_ns_alias_offset)
1028 mmu->base += IM_NS_ALIAS_OFFSET;
1029
1030 mmu->num_ctx = min(IPMMU_CTX_MAX, mmu->features->number_of_contexts);
1031
1032 /*
1033 * Determine if this IPMMU instance is a root device by checking for
1034 * the lack of has_cache_leaf_nodes flag or renesas,ipmmu-main property.
1035 */
1036 if (!mmu->features->has_cache_leaf_nodes ||
1037 !of_find_property(pdev->dev.of_node, "renesas,ipmmu-main", NULL))
1038 mmu->root = mmu;
1039 else
1040 mmu->root = ipmmu_find_root();
1041
1042 /*
1043 * Wait until the root device has been registered for sure.
1044 */
1045 if (!mmu->root)
1046 return -EPROBE_DEFER;
1047
1048 /* Root devices have mandatory IRQs */
1049 if (ipmmu_is_root(mmu)) {
1050 irq = platform_get_irq(pdev, 0);
1051 if (irq < 0)
1052 return irq;
1053
1054 ret = devm_request_irq(&pdev->dev, irq, ipmmu_irq, 0,
1055 dev_name(&pdev->dev), mmu);
1056 if (ret < 0) {
1057 dev_err(&pdev->dev, "failed to request IRQ %d\n", irq);
1058 return ret;
1059 }
1060
1061 ipmmu_device_reset(mmu);
1062
1063 if (mmu->features->reserved_context) {
1064 dev_info(&pdev->dev, "IPMMU context 0 is reserved\n");
1065 set_bit(0, mmu->ctx);
1066 }
1067 }
1068
1069 /*
1070 * Register the IPMMU to the IOMMU subsystem in the following cases:
1071 * - R-Car Gen2 IPMMU (all devices registered)
1072 * - R-Car Gen3 IPMMU (leaf devices only - skip root IPMMU-MM device)
1073 */
1074 if (!mmu->features->has_cache_leaf_nodes || !ipmmu_is_root(mmu)) {
1075 ret = iommu_device_sysfs_add(&mmu->iommu, &pdev->dev, NULL,
1076 dev_name(&pdev->dev));
1077 if (ret)
1078 return ret;
1079
1080 iommu_device_set_ops(&mmu->iommu, &ipmmu_ops);
1081 iommu_device_set_fwnode(&mmu->iommu,
1082 &pdev->dev.of_node->fwnode);
1083
1084 ret = iommu_device_register(&mmu->iommu);
1085 if (ret)
1086 return ret;
1087
1088#if defined(CONFIG_IOMMU_DMA)
1089 if (!iommu_present(&platform_bus_type))
1090 bus_set_iommu(&platform_bus_type, &ipmmu_ops);
1091#endif
1092 }
1093
1094 /*
1095 * We can't create the ARM mapping here as it requires the bus to have
1096 * an IOMMU, which only happens when bus_set_iommu() is called in
1097 * ipmmu_init() after the probe function returns.
1098 */
1099
1100 platform_set_drvdata(pdev, mmu);
1101
1102 return 0;
1103}
1104
1105static int ipmmu_remove(struct platform_device *pdev)
1106{
1107 struct ipmmu_vmsa_device *mmu = platform_get_drvdata(pdev);
1108
1109 iommu_device_sysfs_remove(&mmu->iommu);
1110 iommu_device_unregister(&mmu->iommu);
1111
1112 arm_iommu_release_mapping(mmu->mapping);
1113
1114 ipmmu_device_reset(mmu);
1115
1116 return 0;
1117}
1118
1119#ifdef CONFIG_PM_SLEEP
1120static int ipmmu_resume_noirq(struct device *dev)
1121{
1122 struct ipmmu_vmsa_device *mmu = dev_get_drvdata(dev);
1123 unsigned int i;
1124
1125 /* Reset root MMU and restore contexts */
1126 if (ipmmu_is_root(mmu)) {
1127 ipmmu_device_reset(mmu);
1128
1129 for (i = 0; i < mmu->num_ctx; i++) {
1130 if (!mmu->domains[i])
1131 continue;
1132
1133 ipmmu_domain_setup_context(mmu->domains[i]);
1134 }
1135 }
1136
1137 /* Re-enable active micro-TLBs */
1138 for (i = 0; i < mmu->features->num_utlbs; i++) {
1139 if (mmu->utlb_ctx[i] == IPMMU_CTX_INVALID)
1140 continue;
1141
1142 ipmmu_utlb_enable(mmu->root->domains[mmu->utlb_ctx[i]], i);
1143 }
1144
1145 return 0;
1146}
1147
1148static const struct dev_pm_ops ipmmu_pm = {
1149 SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(NULL, ipmmu_resume_noirq)
1150};
1151#define DEV_PM_OPS &ipmmu_pm
1152#else
1153#define DEV_PM_OPS NULL
1154#endif /* CONFIG_PM_SLEEP */
1155
1156static struct platform_driver ipmmu_driver = {
1157 .driver = {
1158 .name = "ipmmu-vmsa",
1159 .of_match_table = of_match_ptr(ipmmu_of_ids),
1160 .pm = DEV_PM_OPS,
1161 },
1162 .probe = ipmmu_probe,
1163 .remove = ipmmu_remove,
1164};
1165
1166static int __init ipmmu_init(void)
1167{
1168 struct device_node *np;
1169 static bool setup_done;
1170 int ret;
1171
1172 if (setup_done)
1173 return 0;
1174
1175 np = of_find_matching_node(NULL, ipmmu_of_ids);
1176 if (!np)
1177 return 0;
1178
1179 of_node_put(np);
1180
1181 ret = platform_driver_register(&ipmmu_driver);
1182 if (ret < 0)
1183 return ret;
1184
1185#if defined(CONFIG_ARM) && !defined(CONFIG_IOMMU_DMA)
1186 if (!iommu_present(&platform_bus_type))
1187 bus_set_iommu(&platform_bus_type, &ipmmu_ops);
1188#endif
1189
1190 setup_done = true;
1191 return 0;
1192}
1193subsys_initcall(ipmmu_init);