Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

iommu/shmobile: Add iommu driver for Renesas IPMMU modules

This is the Renesas IPMMU driver and IOMMU API implementation.

The IPMMU module supports the MMU function and the PMB function. The
MMU function provides address translation by pagetable compatible with
ARMv6. The PMB function provides address translation including
tile-linear translation. This patch implements the MMU function.

The iommu driver does not register a platform driver directly because:
- the register space of the MMU function and the PMB function
have a common register (used for settings flush), so they should ideally
have a way to appropriately share this register.
- the MMU function uses the IOMMU API while the PMB function does not.
- the two functions may be used independently.

Signed-off-by: Hideki EIRAKU <hdk@igel.co.jp>
Signed-off-by: Joerg Roedel <joro@8bytes.org>

authored by

Hideki EIRAKU and committed by
Joerg Roedel
c2c460f7 88b62b91

+659
+74
drivers/iommu/Kconfig
··· 187 187 188 188 Say N unless you need kernel log message for IOMMU debugging 189 189 190 + config SHMOBILE_IPMMU 191 + bool 192 + 193 + config SHMOBILE_IPMMU_TLB 194 + bool 195 + 196 + config SHMOBILE_IOMMU 197 + bool "IOMMU for Renesas IPMMU/IPMMUI" 198 + default n 199 + depends on (ARM && ARCH_SHMOBILE) 200 + select IOMMU_API 201 + select ARM_DMA_USE_IOMMU 202 + select SHMOBILE_IPMMU 203 + select SHMOBILE_IPMMU_TLB 204 + help 205 + Support for Renesas IPMMU/IPMMUI. This option enables 206 + remapping of DMA memory accesses from all of the IP blocks 207 + on the ICB. 208 + 209 + Warning: Drivers (including userspace drivers of UIO 210 + devices) of the IP blocks on the ICB *must* use addresses 211 + allocated from the IPMMU (iova) for DMA with this option 212 + enabled. 213 + 214 + If unsure, say N. 215 + 216 + choice 217 + prompt "IPMMU/IPMMUI address space size" 218 + default SHMOBILE_IOMMU_ADDRSIZE_2048MB 219 + depends on SHMOBILE_IOMMU 220 + help 221 + This option sets IPMMU/IPMMUI address space size by 222 + adjusting the 1st level page table size. The page table size 223 + is calculated as follows: 224 + 225 + page table size = number of page table entries * 4 bytes 226 + number of page table entries = address space size / 1 MiB 227 + 228 + For example, when the address space size is 2048 MiB, the 229 + 1st level page table size is 8192 bytes. 230 + 231 + config SHMOBILE_IOMMU_ADDRSIZE_2048MB 232 + bool "2 GiB" 233 + 234 + config SHMOBILE_IOMMU_ADDRSIZE_1024MB 235 + bool "1 GiB" 236 + 237 + config SHMOBILE_IOMMU_ADDRSIZE_512MB 238 + bool "512 MiB" 239 + 240 + config SHMOBILE_IOMMU_ADDRSIZE_256MB 241 + bool "256 MiB" 242 + 243 + config SHMOBILE_IOMMU_ADDRSIZE_128MB 244 + bool "128 MiB" 245 + 246 + config SHMOBILE_IOMMU_ADDRSIZE_64MB 247 + bool "64 MiB" 248 + 249 + config SHMOBILE_IOMMU_ADDRSIZE_32MB 250 + bool "32 MiB" 251 + 252 + endchoice 253 + 254 + config SHMOBILE_IOMMU_L1SIZE 255 + int 256 + default 8192 if SHMOBILE_IOMMU_ADDRSIZE_2048MB 257 + default 4096 if SHMOBILE_IOMMU_ADDRSIZE_1024MB 258 + default 2048 if SHMOBILE_IOMMU_ADDRSIZE_512MB 259 + default 1024 if SHMOBILE_IOMMU_ADDRSIZE_256MB 260 + default 512 if SHMOBILE_IOMMU_ADDRSIZE_128MB 261 + default 256 if SHMOBILE_IOMMU_ADDRSIZE_64MB 262 + default 128 if SHMOBILE_IOMMU_ADDRSIZE_32MB 263 + 190 264 endif # IOMMU_SUPPORT
+2
drivers/iommu/Makefile
··· 13 13 obj-$(CONFIG_TEGRA_IOMMU_GART) += tegra-gart.o 14 14 obj-$(CONFIG_TEGRA_IOMMU_SMMU) += tegra-smmu.o 15 15 obj-$(CONFIG_EXYNOS_IOMMU) += exynos-iommu.o 16 + obj-$(CONFIG_SHMOBILE_IOMMU) += shmobile-iommu.o 17 + obj-$(CONFIG_SHMOBILE_IPMMU) += shmobile-ipmmu.o
+395
drivers/iommu/shmobile-iommu.c
··· 1 + /* 2 + * IOMMU for IPMMU/IPMMUI 3 + * Copyright (C) 2012 Hideki EIRAKU 4 + * 5 + * This program is free software; you can redistribute it and/or modify 6 + * it under the terms of the GNU General Public License as published by 7 + * the Free Software Foundation; version 2 of the License. 8 + */ 9 + 10 + #include <linux/dma-mapping.h> 11 + #include <linux/io.h> 12 + #include <linux/iommu.h> 13 + #include <linux/platform_device.h> 14 + #include <linux/sizes.h> 15 + #include <linux/slab.h> 16 + #include <asm/dma-iommu.h> 17 + #include "shmobile-ipmmu.h" 18 + 19 + #define L1_SIZE CONFIG_SHMOBILE_IOMMU_L1SIZE 20 + #define L1_LEN (L1_SIZE / 4) 21 + #define L1_ALIGN L1_SIZE 22 + #define L2_SIZE SZ_1K 23 + #define L2_LEN (L2_SIZE / 4) 24 + #define L2_ALIGN L2_SIZE 25 + 26 + struct shmobile_iommu_domain_pgtable { 27 + uint32_t *pgtable; 28 + dma_addr_t handle; 29 + }; 30 + 31 + struct shmobile_iommu_archdata { 32 + struct list_head attached_list; 33 + struct dma_iommu_mapping *iommu_mapping; 34 + spinlock_t attach_lock; 35 + struct shmobile_iommu_domain *attached; 36 + int num_attached_devices; 37 + struct shmobile_ipmmu *ipmmu; 38 + }; 39 + 40 + struct shmobile_iommu_domain { 41 + struct shmobile_iommu_domain_pgtable l1, l2[L1_LEN]; 42 + spinlock_t map_lock; 43 + spinlock_t attached_list_lock; 44 + struct list_head attached_list; 45 + }; 46 + 47 + static struct shmobile_iommu_archdata *ipmmu_archdata; 48 + static struct kmem_cache *l1cache, *l2cache; 49 + 50 + static int pgtable_alloc(struct shmobile_iommu_domain_pgtable *pgtable, 51 + struct kmem_cache *cache, size_t size) 52 + { 53 + pgtable->pgtable = kmem_cache_zalloc(cache, GFP_ATOMIC); 54 + if (!pgtable->pgtable) 55 + return -ENOMEM; 56 + pgtable->handle = dma_map_single(NULL, pgtable->pgtable, size, 57 + DMA_TO_DEVICE); 58 + return 0; 59 + } 60 + 61 + static void pgtable_free(struct shmobile_iommu_domain_pgtable *pgtable, 62 + struct kmem_cache *cache, size_t size) 63 + { 64 + dma_unmap_single(NULL, pgtable->handle, size, DMA_TO_DEVICE); 65 + kmem_cache_free(cache, pgtable->pgtable); 66 + } 67 + 68 + static uint32_t pgtable_read(struct shmobile_iommu_domain_pgtable *pgtable, 69 + unsigned int index) 70 + { 71 + return pgtable->pgtable[index]; 72 + } 73 + 74 + static void pgtable_write(struct shmobile_iommu_domain_pgtable *pgtable, 75 + unsigned int index, unsigned int count, uint32_t val) 76 + { 77 + unsigned int i; 78 + 79 + for (i = 0; i < count; i++) 80 + pgtable->pgtable[index + i] = val; 81 + dma_sync_single_for_device(NULL, pgtable->handle + index * sizeof(val), 82 + sizeof(val) * count, DMA_TO_DEVICE); 83 + } 84 + 85 + static int shmobile_iommu_domain_init(struct iommu_domain *domain) 86 + { 87 + struct shmobile_iommu_domain *sh_domain; 88 + int i, ret; 89 + 90 + sh_domain = kmalloc(sizeof(*sh_domain), GFP_KERNEL); 91 + if (!sh_domain) 92 + return -ENOMEM; 93 + ret = pgtable_alloc(&sh_domain->l1, l1cache, L1_SIZE); 94 + if (ret < 0) { 95 + kfree(sh_domain); 96 + return ret; 97 + } 98 + for (i = 0; i < L1_LEN; i++) 99 + sh_domain->l2[i].pgtable = NULL; 100 + spin_lock_init(&sh_domain->map_lock); 101 + spin_lock_init(&sh_domain->attached_list_lock); 102 + INIT_LIST_HEAD(&sh_domain->attached_list); 103 + domain->priv = sh_domain; 104 + return 0; 105 + } 106 + 107 + static void shmobile_iommu_domain_destroy(struct iommu_domain *domain) 108 + { 109 + struct shmobile_iommu_domain *sh_domain = domain->priv; 110 + int i; 111 + 112 + for (i = 0; i < L1_LEN; i++) { 113 + if (sh_domain->l2[i].pgtable) 114 + pgtable_free(&sh_domain->l2[i], l2cache, L2_SIZE); 115 + } 116 + pgtable_free(&sh_domain->l1, l1cache, L1_SIZE); 117 + kfree(sh_domain); 118 + domain->priv = NULL; 119 + } 120 + 121 + static int shmobile_iommu_attach_device(struct iommu_domain *domain, 122 + struct device *dev) 123 + { 124 + struct shmobile_iommu_archdata *archdata = dev->archdata.iommu; 125 + struct shmobile_iommu_domain *sh_domain = domain->priv; 126 + int ret = -EBUSY; 127 + 128 + if (!archdata) 129 + return -ENODEV; 130 + spin_lock(&sh_domain->attached_list_lock); 131 + spin_lock(&archdata->attach_lock); 132 + if (archdata->attached != sh_domain) { 133 + if (archdata->attached) 134 + goto err; 135 + ipmmu_tlb_set(archdata->ipmmu, sh_domain->l1.handle, L1_SIZE, 136 + 0); 137 + ipmmu_tlb_flush(archdata->ipmmu); 138 + archdata->attached = sh_domain; 139 + archdata->num_attached_devices = 0; 140 + list_add(&archdata->attached_list, &sh_domain->attached_list); 141 + } 142 + archdata->num_attached_devices++; 143 + ret = 0; 144 + err: 145 + spin_unlock(&archdata->attach_lock); 146 + spin_unlock(&sh_domain->attached_list_lock); 147 + return ret; 148 + } 149 + 150 + static void shmobile_iommu_detach_device(struct iommu_domain *domain, 151 + struct device *dev) 152 + { 153 + struct shmobile_iommu_archdata *archdata = dev->archdata.iommu; 154 + struct shmobile_iommu_domain *sh_domain = domain->priv; 155 + 156 + if (!archdata) 157 + return; 158 + spin_lock(&sh_domain->attached_list_lock); 159 + spin_lock(&archdata->attach_lock); 160 + archdata->num_attached_devices--; 161 + if (!archdata->num_attached_devices) { 162 + ipmmu_tlb_set(archdata->ipmmu, 0, 0, 0); 163 + ipmmu_tlb_flush(archdata->ipmmu); 164 + archdata->attached = NULL; 165 + list_del(&archdata->attached_list); 166 + } 167 + spin_unlock(&archdata->attach_lock); 168 + spin_unlock(&sh_domain->attached_list_lock); 169 + } 170 + 171 + static void domain_tlb_flush(struct shmobile_iommu_domain *sh_domain) 172 + { 173 + struct shmobile_iommu_archdata *archdata; 174 + 175 + spin_lock(&sh_domain->attached_list_lock); 176 + list_for_each_entry(archdata, &sh_domain->attached_list, attached_list) 177 + ipmmu_tlb_flush(archdata->ipmmu); 178 + spin_unlock(&sh_domain->attached_list_lock); 179 + } 180 + 181 + static int l2alloc(struct shmobile_iommu_domain *sh_domain, 182 + unsigned int l1index) 183 + { 184 + int ret; 185 + 186 + if (!sh_domain->l2[l1index].pgtable) { 187 + ret = pgtable_alloc(&sh_domain->l2[l1index], l2cache, L2_SIZE); 188 + if (ret < 0) 189 + return ret; 190 + } 191 + pgtable_write(&sh_domain->l1, l1index, 1, 192 + sh_domain->l2[l1index].handle | 0x1); 193 + return 0; 194 + } 195 + 196 + static void l2realfree(struct shmobile_iommu_domain_pgtable *l2) 197 + { 198 + if (l2->pgtable) 199 + pgtable_free(l2, l2cache, L2_SIZE); 200 + } 201 + 202 + static void l2free(struct shmobile_iommu_domain *sh_domain, 203 + unsigned int l1index, 204 + struct shmobile_iommu_domain_pgtable *l2) 205 + { 206 + pgtable_write(&sh_domain->l1, l1index, 1, 0); 207 + if (sh_domain->l2[l1index].pgtable) { 208 + *l2 = sh_domain->l2[l1index]; 209 + sh_domain->l2[l1index].pgtable = NULL; 210 + } 211 + } 212 + 213 + static int shmobile_iommu_map(struct iommu_domain *domain, unsigned long iova, 214 + phys_addr_t paddr, size_t size, int prot) 215 + { 216 + struct shmobile_iommu_domain_pgtable l2 = { .pgtable = NULL }; 217 + struct shmobile_iommu_domain *sh_domain = domain->priv; 218 + unsigned int l1index, l2index; 219 + int ret; 220 + 221 + l1index = iova >> 20; 222 + switch (size) { 223 + case SZ_4K: 224 + l2index = (iova >> 12) & 0xff; 225 + spin_lock(&sh_domain->map_lock); 226 + ret = l2alloc(sh_domain, l1index); 227 + if (!ret) 228 + pgtable_write(&sh_domain->l2[l1index], l2index, 1, 229 + paddr | 0xff2); 230 + spin_unlock(&sh_domain->map_lock); 231 + break; 232 + case SZ_64K: 233 + l2index = (iova >> 12) & 0xf0; 234 + spin_lock(&sh_domain->map_lock); 235 + ret = l2alloc(sh_domain, l1index); 236 + if (!ret) 237 + pgtable_write(&sh_domain->l2[l1index], l2index, 0x10, 238 + paddr | 0xff1); 239 + spin_unlock(&sh_domain->map_lock); 240 + break; 241 + case SZ_1M: 242 + spin_lock(&sh_domain->map_lock); 243 + l2free(sh_domain, l1index, &l2); 244 + pgtable_write(&sh_domain->l1, l1index, 1, paddr | 0xc02); 245 + spin_unlock(&sh_domain->map_lock); 246 + ret = 0; 247 + break; 248 + default: 249 + ret = -EINVAL; 250 + } 251 + if (!ret) 252 + domain_tlb_flush(sh_domain); 253 + l2realfree(&l2); 254 + return ret; 255 + } 256 + 257 + static size_t shmobile_iommu_unmap(struct iommu_domain *domain, 258 + unsigned long iova, size_t size) 259 + { 260 + struct shmobile_iommu_domain_pgtable l2 = { .pgtable = NULL }; 261 + struct shmobile_iommu_domain *sh_domain = domain->priv; 262 + unsigned int l1index, l2index; 263 + uint32_t l2entry = 0; 264 + size_t ret = 0; 265 + 266 + l1index = iova >> 20; 267 + if (!(iova & 0xfffff) && size >= SZ_1M) { 268 + spin_lock(&sh_domain->map_lock); 269 + l2free(sh_domain, l1index, &l2); 270 + spin_unlock(&sh_domain->map_lock); 271 + ret = SZ_1M; 272 + goto done; 273 + } 274 + l2index = (iova >> 12) & 0xff; 275 + spin_lock(&sh_domain->map_lock); 276 + if (sh_domain->l2[l1index].pgtable) 277 + l2entry = pgtable_read(&sh_domain->l2[l1index], l2index); 278 + switch (l2entry & 3) { 279 + case 1: 280 + if (l2index & 0xf) 281 + break; 282 + pgtable_write(&sh_domain->l2[l1index], l2index, 0x10, 0); 283 + ret = SZ_64K; 284 + break; 285 + case 2: 286 + pgtable_write(&sh_domain->l2[l1index], l2index, 1, 0); 287 + ret = SZ_4K; 288 + break; 289 + } 290 + spin_unlock(&sh_domain->map_lock); 291 + done: 292 + if (ret) 293 + domain_tlb_flush(sh_domain); 294 + l2realfree(&l2); 295 + return ret; 296 + } 297 + 298 + static phys_addr_t shmobile_iommu_iova_to_phys(struct iommu_domain *domain, 299 + unsigned long iova) 300 + { 301 + struct shmobile_iommu_domain *sh_domain = domain->priv; 302 + uint32_t l1entry = 0, l2entry = 0; 303 + unsigned int l1index, l2index; 304 + 305 + l1index = iova >> 20; 306 + l2index = (iova >> 12) & 0xff; 307 + spin_lock(&sh_domain->map_lock); 308 + if (sh_domain->l2[l1index].pgtable) 309 + l2entry = pgtable_read(&sh_domain->l2[l1index], l2index); 310 + else 311 + l1entry = pgtable_read(&sh_domain->l1, l1index); 312 + spin_unlock(&sh_domain->map_lock); 313 + switch (l2entry & 3) { 314 + case 1: 315 + return (l2entry & ~0xffff) | (iova & 0xffff); 316 + case 2: 317 + return (l2entry & ~0xfff) | (iova & 0xfff); 318 + default: 319 + if ((l1entry & 3) == 2) 320 + return (l1entry & ~0xfffff) | (iova & 0xfffff); 321 + return 0; 322 + } 323 + } 324 + 325 + static int find_dev_name(struct shmobile_ipmmu *ipmmu, const char *dev_name) 326 + { 327 + unsigned int i, n = ipmmu->num_dev_names; 328 + 329 + for (i = 0; i < n; i++) { 330 + if (strcmp(ipmmu->dev_names[i], dev_name) == 0) 331 + return 1; 332 + } 333 + return 0; 334 + } 335 + 336 + static int shmobile_iommu_add_device(struct device *dev) 337 + { 338 + struct shmobile_iommu_archdata *archdata = ipmmu_archdata; 339 + struct dma_iommu_mapping *mapping; 340 + 341 + if (!find_dev_name(archdata->ipmmu, dev_name(dev))) 342 + return 0; 343 + mapping = archdata->iommu_mapping; 344 + if (!mapping) { 345 + mapping = arm_iommu_create_mapping(&platform_bus_type, 0, 346 + L1_LEN << 20, 0); 347 + if (IS_ERR(mapping)) 348 + return PTR_ERR(mapping); 349 + archdata->iommu_mapping = mapping; 350 + } 351 + dev->archdata.iommu = archdata; 352 + if (arm_iommu_attach_device(dev, mapping)) 353 + pr_err("arm_iommu_attach_device failed\n"); 354 + return 0; 355 + } 356 + 357 + static struct iommu_ops shmobile_iommu_ops = { 358 + .domain_init = shmobile_iommu_domain_init, 359 + .domain_destroy = shmobile_iommu_domain_destroy, 360 + .attach_dev = shmobile_iommu_attach_device, 361 + .detach_dev = shmobile_iommu_detach_device, 362 + .map = shmobile_iommu_map, 363 + .unmap = shmobile_iommu_unmap, 364 + .iova_to_phys = shmobile_iommu_iova_to_phys, 365 + .add_device = shmobile_iommu_add_device, 366 + .pgsize_bitmap = SZ_1M | SZ_64K | SZ_4K, 367 + }; 368 + 369 + int ipmmu_iommu_init(struct shmobile_ipmmu *ipmmu) 370 + { 371 + static struct shmobile_iommu_archdata *archdata; 372 + 373 + l1cache = kmem_cache_create("shmobile-iommu-pgtable1", L1_SIZE, 374 + L1_ALIGN, SLAB_HWCACHE_ALIGN, NULL); 375 + if (!l1cache) 376 + return -ENOMEM; 377 + l2cache = kmem_cache_create("shmobile-iommu-pgtable2", L2_SIZE, 378 + L2_ALIGN, SLAB_HWCACHE_ALIGN, NULL); 379 + if (!l2cache) { 380 + kmem_cache_destroy(l1cache); 381 + return -ENOMEM; 382 + } 383 + archdata = kmalloc(sizeof(*archdata), GFP_KERNEL); 384 + if (!archdata) { 385 + kmem_cache_destroy(l1cache); 386 + kmem_cache_destroy(l2cache); 387 + return -ENOMEM; 388 + } 389 + spin_lock_init(&archdata->attach_lock); 390 + archdata->attached = NULL; 391 + archdata->ipmmu = ipmmu; 392 + ipmmu_archdata = archdata; 393 + bus_set_iommu(&platform_bus_type, &shmobile_iommu_ops); 394 + return 0; 395 + }
+136
drivers/iommu/shmobile-ipmmu.c
··· 1 + /* 2 + * IPMMU/IPMMUI 3 + * Copyright (C) 2012 Hideki EIRAKU 4 + * 5 + * This program is free software; you can redistribute it and/or modify 6 + * it under the terms of the GNU General Public License as published by 7 + * the Free Software Foundation; version 2 of the License. 8 + */ 9 + 10 + #include <linux/err.h> 11 + #include <linux/export.h> 12 + #include <linux/io.h> 13 + #include <linux/platform_device.h> 14 + #include <linux/slab.h> 15 + #include <linux/platform_data/sh_ipmmu.h> 16 + #include "shmobile-ipmmu.h" 17 + 18 + #define IMCTR1 0x000 19 + #define IMCTR2 0x004 20 + #define IMASID 0x010 21 + #define IMTTBR 0x014 22 + #define IMTTBCR 0x018 23 + 24 + #define IMCTR1_TLBEN (1 << 0) 25 + #define IMCTR1_FLUSH (1 << 1) 26 + 27 + static void ipmmu_reg_write(struct shmobile_ipmmu *ipmmu, unsigned long reg_off, 28 + unsigned long data) 29 + { 30 + iowrite32(data, ipmmu->ipmmu_base + reg_off); 31 + } 32 + 33 + void ipmmu_tlb_flush(struct shmobile_ipmmu *ipmmu) 34 + { 35 + if (!ipmmu) 36 + return; 37 + 38 + mutex_lock(&ipmmu->flush_lock); 39 + if (ipmmu->tlb_enabled) 40 + ipmmu_reg_write(ipmmu, IMCTR1, IMCTR1_FLUSH | IMCTR1_TLBEN); 41 + else 42 + ipmmu_reg_write(ipmmu, IMCTR1, IMCTR1_FLUSH); 43 + mutex_unlock(&ipmmu->flush_lock); 44 + } 45 + 46 + void ipmmu_tlb_set(struct shmobile_ipmmu *ipmmu, unsigned long phys, int size, 47 + int asid) 48 + { 49 + if (!ipmmu) 50 + return; 51 + 52 + mutex_lock(&ipmmu->flush_lock); 53 + switch (size) { 54 + default: 55 + ipmmu->tlb_enabled = 0; 56 + break; 57 + case 0x2000: 58 + ipmmu_reg_write(ipmmu, IMTTBCR, 1); 59 + ipmmu->tlb_enabled = 1; 60 + break; 61 + case 0x1000: 62 + ipmmu_reg_write(ipmmu, IMTTBCR, 2); 63 + ipmmu->tlb_enabled = 1; 64 + break; 65 + case 0x800: 66 + ipmmu_reg_write(ipmmu, IMTTBCR, 3); 67 + ipmmu->tlb_enabled = 1; 68 + break; 69 + case 0x400: 70 + ipmmu_reg_write(ipmmu, IMTTBCR, 4); 71 + ipmmu->tlb_enabled = 1; 72 + break; 73 + case 0x200: 74 + ipmmu_reg_write(ipmmu, IMTTBCR, 5); 75 + ipmmu->tlb_enabled = 1; 76 + break; 77 + case 0x100: 78 + ipmmu_reg_write(ipmmu, IMTTBCR, 6); 79 + ipmmu->tlb_enabled = 1; 80 + break; 81 + case 0x80: 82 + ipmmu_reg_write(ipmmu, IMTTBCR, 7); 83 + ipmmu->tlb_enabled = 1; 84 + break; 85 + } 86 + ipmmu_reg_write(ipmmu, IMTTBR, phys); 87 + ipmmu_reg_write(ipmmu, IMASID, asid); 88 + mutex_unlock(&ipmmu->flush_lock); 89 + } 90 + 91 + static int ipmmu_probe(struct platform_device *pdev) 92 + { 93 + struct shmobile_ipmmu *ipmmu; 94 + struct resource *res; 95 + struct shmobile_ipmmu_platform_data *pdata = pdev->dev.platform_data; 96 + 97 + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 98 + if (!res) { 99 + dev_err(&pdev->dev, "cannot get platform resources\n"); 100 + return -ENOENT; 101 + } 102 + ipmmu = devm_kzalloc(&pdev->dev, sizeof(*ipmmu), GFP_KERNEL); 103 + if (!ipmmu) { 104 + dev_err(&pdev->dev, "cannot allocate device data\n"); 105 + return -ENOMEM; 106 + } 107 + mutex_init(&ipmmu->flush_lock); 108 + ipmmu->dev = &pdev->dev; 109 + ipmmu->ipmmu_base = devm_ioremap_nocache(&pdev->dev, res->start, 110 + resource_size(res)); 111 + if (!ipmmu->ipmmu_base) { 112 + dev_err(&pdev->dev, "ioremap_nocache failed\n"); 113 + return -ENOMEM; 114 + } 115 + ipmmu->dev_names = pdata->dev_names; 116 + ipmmu->num_dev_names = pdata->num_dev_names; 117 + platform_set_drvdata(pdev, ipmmu); 118 + ipmmu_reg_write(ipmmu, IMCTR1, 0x0); /* disable TLB */ 119 + ipmmu_reg_write(ipmmu, IMCTR2, 0x0); /* disable PMB */ 120 + ipmmu_iommu_init(ipmmu); 121 + return 0; 122 + } 123 + 124 + static struct platform_driver ipmmu_driver = { 125 + .probe = ipmmu_probe, 126 + .driver = { 127 + .owner = THIS_MODULE, 128 + .name = "ipmmu", 129 + }, 130 + }; 131 + 132 + static int __init ipmmu_init(void) 133 + { 134 + return platform_driver_register(&ipmmu_driver); 135 + } 136 + subsys_initcall(ipmmu_init);
+34
drivers/iommu/shmobile-ipmmu.h
··· 1 + /* shmobile-ipmmu.h 2 + * 3 + * Copyright (C) 2012 Hideki EIRAKU 4 + * 5 + * This program is free software; you can redistribute it and/or modify 6 + * it under the terms of the GNU General Public License as published by 7 + * the Free Software Foundation; version 2 of the License. 8 + */ 9 + 10 + #ifndef __SHMOBILE_IPMMU_H__ 11 + #define __SHMOBILE_IPMMU_H__ 12 + 13 + struct shmobile_ipmmu { 14 + struct device *dev; 15 + void __iomem *ipmmu_base; 16 + int tlb_enabled; 17 + struct mutex flush_lock; 18 + const char * const *dev_names; 19 + unsigned int num_dev_names; 20 + }; 21 + 22 + #ifdef CONFIG_SHMOBILE_IPMMU_TLB 23 + void ipmmu_tlb_flush(struct shmobile_ipmmu *ipmmu); 24 + void ipmmu_tlb_set(struct shmobile_ipmmu *ipmmu, unsigned long phys, int size, 25 + int asid); 26 + int ipmmu_iommu_init(struct shmobile_ipmmu *ipmmu); 27 + #else 28 + static inline int ipmmu_iommu_init(struct shmobile_ipmmu *ipmmu) 29 + { 30 + return -EINVAL; 31 + } 32 + #endif 33 + 34 + #endif /* __SHMOBILE_IPMMU_H__ */
+18
include/linux/platform_data/sh_ipmmu.h
··· 1 + /* sh_ipmmu.h 2 + * 3 + * Copyright (C) 2012 Hideki EIRAKU 4 + * 5 + * This program is free software; you can redistribute it and/or modify 6 + * it under the terms of the GNU General Public License as published by 7 + * the Free Software Foundation; version 2 of the License. 8 + */ 9 + 10 + #ifndef __SH_IPMMU_H__ 11 + #define __SH_IPMMU_H__ 12 + 13 + struct shmobile_ipmmu_platform_data { 14 + const char * const *dev_names; 15 + unsigned int num_dev_names; 16 + }; 17 + 18 + #endif /* __SH_IPMMU_H__ */