Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/etnaviv: rework MMU handling

This reworks the MMU handling to make it possible to have multiple MMU contexts.
A context is basically one instance of GPU page tables. Currently we have one
set of page tables per GPU, which isn't all that clever, as it has the
following two consequences:

1. All GPU clients (aka processes) are sharing the same pagetables, which means
there is no isolation between clients, but only between GPU assigned memory
spaces and the rest of the system. Better than nothing, but also not great.

2. Clients operating on the same set of buffers with different etnaviv GPU
cores, e.g. a workload using both the 2D and 3D GPU, need to map the used
buffers into the pagetable sets of each used GPU.

This patch reworks all the MMU handling to introduce the abstraction of the
MMU context. A context can be shared across different GPU cores, as long as
they have compatible MMU implementations, which is the case for all systems
with Vivante GPUs seen in the wild.

As MMUv1 is not able to change pagetables on the fly, without a
"stop the world" operation, which stops GPU, changes pagetables via CPU
interaction, restarts GPU, the implementation introduces a shared context on
MMUv1, which is returned whenever there is a request for a new context.

This patch assigns a MMU context to each GPU, so on MMUv2 systems there is
still one set of pagetables per GPU, but due to the shared context MMUv1
systems see a change in behavior as now a single pagetable set is used
across all GPU cores.

Signed-off-by: Lucas Stach <l.stach@pengutronix.de>
Reviewed-by: Philipp Zabel <p.zabel@pengutronix.de>
Reviewed-by: Guido Günther <agx@sigxcpu.org>

+464 -433
+5 -5
drivers/gpu/drm/etnaviv/etnaviv_buffer.c
··· 207 207 return buffer->user_size / 8; 208 208 } 209 209 210 - u16 etnaviv_buffer_config_pta(struct etnaviv_gpu *gpu) 210 + u16 etnaviv_buffer_config_pta(struct etnaviv_gpu *gpu, unsigned short id) 211 211 { 212 212 struct etnaviv_cmdbuf *buffer = &gpu->buffer; 213 213 ··· 216 216 buffer->user_size = 0; 217 217 218 218 CMD_LOAD_STATE(buffer, VIVS_MMUv2_PTA_CONFIG, 219 - VIVS_MMUv2_PTA_CONFIG_INDEX(0)); 219 + VIVS_MMUv2_PTA_CONFIG_INDEX(id)); 220 220 221 221 CMD_END(buffer); 222 222 ··· 315 315 u32 return_target, return_dwords; 316 316 u32 link_target, link_dwords; 317 317 bool switch_context = gpu->exec_state != exec_state; 318 - unsigned int new_flush_seq = READ_ONCE(gpu->mmu->flush_seq); 318 + unsigned int new_flush_seq = READ_ONCE(gpu->mmu_context->flush_seq); 319 319 bool need_flush = gpu->flush_seq != new_flush_seq; 320 320 321 321 lockdep_assert_held(&gpu->lock); ··· 339 339 340 340 /* flush command */ 341 341 if (need_flush) { 342 - if (gpu->mmu->version == ETNAVIV_IOMMU_V1) 342 + if (gpu->mmu_context->global->version == ETNAVIV_IOMMU_V1) 343 343 extra_dwords += 1; 344 344 else 345 345 extra_dwords += 3; ··· 353 353 354 354 if (need_flush) { 355 355 /* Add the MMU flush */ 356 - if (gpu->mmu->version == ETNAVIV_IOMMU_V1) { 356 + if (gpu->mmu_context->global->version == ETNAVIV_IOMMU_V1) { 357 357 CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_MMU, 358 358 VIVS_GL_FLUSH_MMU_FLUSH_FEMMU | 359 359 VIVS_GL_FLUSH_MMU_FLUSH_UNK1 |
+4 -4
drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c
··· 60 60 } 61 61 62 62 int etnaviv_cmdbuf_suballoc_map(struct etnaviv_cmdbuf_suballoc *suballoc, 63 - struct etnaviv_iommu *mmu, 63 + struct etnaviv_iommu_context *context, 64 64 struct etnaviv_vram_mapping *mapping, 65 65 u32 memory_base) 66 66 { 67 - return etnaviv_iommu_get_suballoc_va(mmu, mapping, memory_base, 67 + return etnaviv_iommu_get_suballoc_va(context, mapping, memory_base, 68 68 suballoc->paddr, SUBALLOC_SIZE); 69 69 } 70 70 71 - void etnaviv_cmdbuf_suballoc_unmap(struct etnaviv_iommu *mmu, 71 + void etnaviv_cmdbuf_suballoc_unmap(struct etnaviv_iommu_context *context, 72 72 struct etnaviv_vram_mapping *mapping) 73 73 { 74 - etnaviv_iommu_put_suballoc_va(mmu, mapping); 74 + etnaviv_iommu_put_suballoc_va(context, mapping); 75 75 } 76 76 77 77 void etnaviv_cmdbuf_suballoc_destroy(struct etnaviv_cmdbuf_suballoc *suballoc)
+3 -3
drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.h
··· 9 9 #include <linux/types.h> 10 10 11 11 struct device; 12 - struct etnaviv_iommu; 12 + struct etnaviv_iommu_context; 13 13 struct etnaviv_vram_mapping; 14 14 struct etnaviv_cmdbuf_suballoc; 15 15 struct etnaviv_perfmon_request; ··· 28 28 etnaviv_cmdbuf_suballoc_new(struct device *dev); 29 29 void etnaviv_cmdbuf_suballoc_destroy(struct etnaviv_cmdbuf_suballoc *suballoc); 30 30 int etnaviv_cmdbuf_suballoc_map(struct etnaviv_cmdbuf_suballoc *suballoc, 31 - struct etnaviv_iommu *mmu, 31 + struct etnaviv_iommu_context *context, 32 32 struct etnaviv_vram_mapping *mapping, 33 33 u32 memory_base); 34 - void etnaviv_cmdbuf_suballoc_unmap(struct etnaviv_iommu *mmu, 34 + void etnaviv_cmdbuf_suballoc_unmap(struct etnaviv_iommu_context *context, 35 35 struct etnaviv_vram_mapping *mapping); 36 36 37 37
+3 -3
drivers/gpu/drm/etnaviv/etnaviv_drv.c
··· 119 119 120 120 seq_printf(m, "Active Objects (%s):\n", dev_name(gpu->dev)); 121 121 122 - mutex_lock(&gpu->mmu->lock); 123 - drm_mm_print(&gpu->mmu->mm, &p); 124 - mutex_unlock(&gpu->mmu->lock); 122 + mutex_lock(&gpu->mmu_context->lock); 123 + drm_mm_print(&gpu->mmu_context->mm, &p); 124 + mutex_unlock(&gpu->mmu_context->lock); 125 125 126 126 return 0; 127 127 }
+3 -1
drivers/gpu/drm/etnaviv/etnaviv_drv.h
··· 22 22 struct etnaviv_mmu; 23 23 struct etnaviv_gem_object; 24 24 struct etnaviv_gem_submit; 25 + struct etnaviv_iommu_global; 25 26 26 27 struct etnaviv_file_private { 27 28 /* ··· 38 37 struct etnaviv_gpu *gpu[ETNA_MAX_PIPES]; 39 38 40 39 struct etnaviv_cmdbuf_suballoc *cmdbuf_suballoc; 40 + struct etnaviv_iommu_global *mmu_global; 41 41 42 42 /* list of GEM objects: */ 43 43 struct mutex gem_lock; ··· 71 69 uintptr_t ptr, u32 size, u32 flags, u32 *handle); 72 70 u16 etnaviv_buffer_init(struct etnaviv_gpu *gpu); 73 71 u16 etnaviv_buffer_config_mmuv2(struct etnaviv_gpu *gpu, u32 mtlb_addr, u32 safe_addr); 74 - u16 etnaviv_buffer_config_pta(struct etnaviv_gpu *gpu); 72 + u16 etnaviv_buffer_config_pta(struct etnaviv_gpu *gpu, unsigned short id); 75 73 void etnaviv_buffer_end(struct etnaviv_gpu *gpu); 76 74 void etnaviv_sync_point_queue(struct etnaviv_gpu *gpu, unsigned int event); 77 75 void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state,
+6 -6
drivers/gpu/drm/etnaviv/etnaviv_dump.c
··· 93 93 } 94 94 95 95 static void etnaviv_core_dump_mmu(struct core_dump_iterator *iter, 96 - struct etnaviv_iommu *mmu, size_t mmu_size) 96 + struct etnaviv_iommu_context *mmu, size_t mmu_size) 97 97 { 98 98 etnaviv_iommu_dump(mmu, iter->data); 99 99 ··· 125 125 return; 126 126 etnaviv_dump_core = false; 127 127 128 - mutex_lock(&gpu->mmu->lock); 128 + mutex_lock(&gpu->mmu_context->lock); 129 129 130 - mmu_size = etnaviv_iommu_dump_size(gpu->mmu); 130 + mmu_size = etnaviv_iommu_dump_size(gpu->mmu_context); 131 131 132 132 /* We always dump registers, mmu, ring, hanging cmdbuf and end marker */ 133 133 n_obj = 5; ··· 157 157 iter.start = __vmalloc(file_size, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY, 158 158 PAGE_KERNEL); 159 159 if (!iter.start) { 160 - mutex_unlock(&gpu->mmu->lock); 160 + mutex_unlock(&gpu->mmu_context->lock); 161 161 dev_warn(gpu->dev, "failed to allocate devcoredump file\n"); 162 162 return; 163 163 } ··· 169 169 memset(iter.hdr, 0, iter.data - iter.start); 170 170 171 171 etnaviv_core_dump_registers(&iter, gpu); 172 - etnaviv_core_dump_mmu(&iter, gpu->mmu, mmu_size); 172 + etnaviv_core_dump_mmu(&iter, gpu->mmu_context, mmu_size); 173 173 etnaviv_core_dump_mem(&iter, ETDUMP_BUF_RING, gpu->buffer.vaddr, 174 174 gpu->buffer.size, 175 175 etnaviv_cmdbuf_get_va(&gpu->buffer, ··· 221 221 obj->base.size); 222 222 } 223 223 224 - mutex_unlock(&gpu->mmu->lock); 224 + mutex_unlock(&gpu->mmu_context->lock); 225 225 226 226 etnaviv_core_dump_header(&iter, ETDUMP_BUF_END, iter.data); 227 227
+12 -12
drivers/gpu/drm/etnaviv/etnaviv_gem.c
··· 223 223 224 224 static struct etnaviv_vram_mapping * 225 225 etnaviv_gem_get_vram_mapping(struct etnaviv_gem_object *obj, 226 - struct etnaviv_iommu *mmu) 226 + struct etnaviv_iommu_context *context) 227 227 { 228 228 struct etnaviv_vram_mapping *mapping; 229 229 230 230 list_for_each_entry(mapping, &obj->vram_list, obj_node) { 231 - if (mapping->mmu == mmu) 231 + if (mapping->context == context) 232 232 return mapping; 233 233 } 234 234 ··· 256 256 int ret = 0; 257 257 258 258 mutex_lock(&etnaviv_obj->lock); 259 - mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, gpu->mmu); 259 + mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, gpu->mmu_context); 260 260 if (mapping) { 261 261 /* 262 262 * Holding the object lock prevents the use count changing ··· 265 265 * the MMU owns this mapping to close this race. 266 266 */ 267 267 if (mapping->use == 0) { 268 - mutex_lock(&gpu->mmu->lock); 269 - if (mapping->mmu == gpu->mmu) 268 + mutex_lock(&gpu->mmu_context->lock); 269 + if (mapping->context == gpu->mmu_context) 270 270 mapping->use += 1; 271 271 else 272 272 mapping = NULL; 273 - mutex_unlock(&gpu->mmu->lock); 273 + mutex_unlock(&gpu->mmu_context->lock); 274 274 if (mapping) 275 275 goto out; 276 276 } else { ··· 303 303 list_del(&mapping->obj_node); 304 304 } 305 305 306 - mapping->mmu = gpu->mmu; 306 + mapping->context = gpu->mmu_context; 307 307 mapping->use = 1; 308 308 309 - ret = etnaviv_iommu_map_gem(gpu->mmu, etnaviv_obj, gpu->memory_base, 310 - mapping); 309 + ret = etnaviv_iommu_map_gem(gpu->mmu_context, etnaviv_obj, 310 + gpu->memory_base, mapping); 311 311 if (ret < 0) 312 312 kfree(mapping); 313 313 else ··· 525 525 526 526 list_for_each_entry_safe(mapping, tmp, &etnaviv_obj->vram_list, 527 527 obj_node) { 528 - struct etnaviv_iommu *mmu = mapping->mmu; 528 + struct etnaviv_iommu_context *context = mapping->context; 529 529 530 530 WARN_ON(mapping->use); 531 531 532 - if (mmu) 533 - etnaviv_iommu_unmap_gem(mmu, mapping); 532 + if (context) 533 + etnaviv_iommu_unmap_gem(context, mapping); 534 534 535 535 list_del(&mapping->obj_node); 536 536 kfree(mapping);
+1 -1
drivers/gpu/drm/etnaviv/etnaviv_gem.h
··· 25 25 struct list_head scan_node; 26 26 struct list_head mmu_node; 27 27 struct etnaviv_gem_object *object; 28 - struct etnaviv_iommu *mmu; 28 + struct etnaviv_iommu_context *context; 29 29 struct drm_mm_node vram_node; 30 30 unsigned int use; 31 31 u32 iova;
+20 -11
drivers/gpu/drm/etnaviv/etnaviv_gpu.c
··· 681 681 etnaviv_gpu_setup_pulse_eater(gpu); 682 682 683 683 /* setup the MMU */ 684 - etnaviv_iommu_restore(gpu); 684 + etnaviv_iommu_restore(gpu, gpu->mmu_context); 685 685 686 686 /* Start command processor */ 687 687 prefetch = etnaviv_buffer_init(gpu); ··· 754 754 goto fail; 755 755 } 756 756 757 - gpu->mmu = etnaviv_iommu_new(gpu); 758 - if (IS_ERR(gpu->mmu)) { 759 - dev_err(gpu->dev, "Failed to instantiate GPU IOMMU\n"); 760 - ret = PTR_ERR(gpu->mmu); 757 + ret = etnaviv_iommu_global_init(gpu); 758 + if (ret) 761 759 goto fail; 760 + 761 + gpu->mmu_context = etnaviv_iommu_context_init(priv->mmu_global); 762 + if (IS_ERR(gpu->mmu_context)) { 763 + dev_err(gpu->dev, "Failed to instantiate GPU IOMMU\n"); 764 + ret = PTR_ERR(gpu->mmu_context); 765 + goto iommu_global_fini; 762 766 } 763 767 764 - ret = etnaviv_cmdbuf_suballoc_map(priv->cmdbuf_suballoc, gpu->mmu, 768 + ret = etnaviv_cmdbuf_suballoc_map(priv->cmdbuf_suballoc, 769 + gpu->mmu_context, 765 770 &gpu->cmdbuf_mapping, 766 771 gpu->memory_base); 767 772 if (ret) { ··· 782 777 goto unmap_suballoc; 783 778 } 784 779 785 - if (gpu->mmu->version == ETNAVIV_IOMMU_V1 && 780 + if (!(gpu->identity.minor_features1 & chipMinorFeatures1_MMU_VERSION) && 786 781 etnaviv_cmdbuf_get_va(&gpu->buffer, &gpu->cmdbuf_mapping) > 0x80000000) { 787 782 ret = -EINVAL; 788 783 dev_err(gpu->dev, ··· 813 808 free_buffer: 814 809 etnaviv_cmdbuf_free(&gpu->buffer); 815 810 unmap_suballoc: 816 - etnaviv_cmdbuf_suballoc_unmap(gpu->mmu, &gpu->cmdbuf_mapping); 811 + etnaviv_cmdbuf_suballoc_unmap(gpu->mmu_context, &gpu->cmdbuf_mapping); 817 812 destroy_iommu: 818 - etnaviv_iommu_destroy(gpu->mmu); 813 + etnaviv_iommu_context_put(gpu->mmu_context); 814 + iommu_global_fini: 815 + etnaviv_iommu_global_fini(gpu); 819 816 fail: 820 817 pm_runtime_mark_last_busy(gpu->dev); 821 818 pm_runtime_put_autosuspend(gpu->dev); ··· 1690 1683 1691 1684 if (gpu->initialized) { 1692 1685 etnaviv_cmdbuf_free(&gpu->buffer); 1693 - etnaviv_cmdbuf_suballoc_unmap(gpu->mmu, &gpu->cmdbuf_mapping); 1694 - etnaviv_iommu_destroy(gpu->mmu); 1686 + etnaviv_cmdbuf_suballoc_unmap(gpu->mmu_context, 1687 + &gpu->cmdbuf_mapping); 1688 + etnaviv_iommu_context_put(gpu->mmu_context); 1689 + etnaviv_iommu_global_fini(gpu); 1695 1690 gpu->initialized = false; 1696 1691 } 1697 1692
+2 -1
drivers/gpu/drm/etnaviv/etnaviv_gpu.h
··· 8 8 9 9 #include "etnaviv_cmdbuf.h" 10 10 #include "etnaviv_gem.h" 11 + #include "etnaviv_mmu.h" 11 12 #include "etnaviv_drv.h" 12 13 13 14 struct etnaviv_gem_submit; ··· 137 136 void __iomem *mmio; 138 137 int irq; 139 138 140 - struct etnaviv_iommu *mmu; 139 + struct etnaviv_iommu_context *mmu_context; 141 140 unsigned int flush_seq; 142 141 143 142 /* Power Control: */
+70 -83
drivers/gpu/drm/etnaviv/etnaviv_iommu.c
··· 11 11 12 12 #include "etnaviv_gpu.h" 13 13 #include "etnaviv_mmu.h" 14 - #include "etnaviv_iommu.h" 15 14 #include "state_hi.xml.h" 16 15 17 16 #define PT_SIZE SZ_2M ··· 18 19 19 20 #define GPU_MEM_START 0x80000000 20 21 21 - struct etnaviv_iommuv1_domain { 22 - struct etnaviv_iommu_domain base; 22 + struct etnaviv_iommuv1_context { 23 + struct etnaviv_iommu_context base; 23 24 u32 *pgtable_cpu; 24 25 dma_addr_t pgtable_dma; 25 26 }; 26 27 27 - static struct etnaviv_iommuv1_domain * 28 - to_etnaviv_domain(struct etnaviv_iommu_domain *domain) 28 + static struct etnaviv_iommuv1_context * 29 + to_v1_context(struct etnaviv_iommu_context *context) 29 30 { 30 - return container_of(domain, struct etnaviv_iommuv1_domain, base); 31 + return container_of(context, struct etnaviv_iommuv1_context, base); 31 32 } 32 33 33 - static int __etnaviv_iommu_init(struct etnaviv_iommuv1_domain *etnaviv_domain) 34 + static void etnaviv_iommuv1_free(struct etnaviv_iommu_context *context) 34 35 { 35 - u32 *p; 36 - int i; 36 + struct etnaviv_iommuv1_context *v1_context = to_v1_context(context); 37 37 38 - etnaviv_domain->base.bad_page_cpu = 39 - dma_alloc_wc(etnaviv_domain->base.dev, SZ_4K, 40 - &etnaviv_domain->base.bad_page_dma, 41 - GFP_KERNEL); 42 - if (!etnaviv_domain->base.bad_page_cpu) 43 - return -ENOMEM; 38 + drm_mm_takedown(&context->mm); 44 39 45 - p = etnaviv_domain->base.bad_page_cpu; 46 - for (i = 0; i < SZ_4K / 4; i++) 47 - *p++ = 0xdead55aa; 40 + dma_free_wc(context->global->dev, PT_SIZE, v1_context->pgtable_cpu, 41 + v1_context->pgtable_dma); 48 42 49 - etnaviv_domain->pgtable_cpu = dma_alloc_wc(etnaviv_domain->base.dev, 50 - PT_SIZE, 51 - &etnaviv_domain->pgtable_dma, 52 - GFP_KERNEL); 53 - if (!etnaviv_domain->pgtable_cpu) { 54 - dma_free_wc(etnaviv_domain->base.dev, SZ_4K, 55 - etnaviv_domain->base.bad_page_cpu, 56 - etnaviv_domain->base.bad_page_dma); 57 - return -ENOMEM; 58 - } 43 + context->global->v1.shared_context = NULL; 59 44 60 - memset32(etnaviv_domain->pgtable_cpu, etnaviv_domain->base.bad_page_dma, 61 - PT_ENTRIES); 62 - 63 - return 0; 45 + kfree(v1_context); 64 46 } 65 47 66 - static void etnaviv_iommuv1_domain_free(struct etnaviv_iommu_domain *domain) 67 - { 68 - struct etnaviv_iommuv1_domain *etnaviv_domain = 69 - to_etnaviv_domain(domain); 70 - 71 - dma_free_wc(etnaviv_domain->base.dev, PT_SIZE, 72 - etnaviv_domain->pgtable_cpu, etnaviv_domain->pgtable_dma); 73 - 74 - dma_free_wc(etnaviv_domain->base.dev, SZ_4K, 75 - etnaviv_domain->base.bad_page_cpu, 76 - etnaviv_domain->base.bad_page_dma); 77 - 78 - kfree(etnaviv_domain); 79 - } 80 - 81 - static int etnaviv_iommuv1_map(struct etnaviv_iommu_domain *domain, 48 + static int etnaviv_iommuv1_map(struct etnaviv_iommu_context *context, 82 49 unsigned long iova, phys_addr_t paddr, 83 50 size_t size, int prot) 84 51 { 85 - struct etnaviv_iommuv1_domain *etnaviv_domain = to_etnaviv_domain(domain); 52 + struct etnaviv_iommuv1_context *v1_context = to_v1_context(context); 86 53 unsigned int index = (iova - GPU_MEM_START) / SZ_4K; 87 54 88 55 if (size != SZ_4K) 89 56 return -EINVAL; 90 57 91 - etnaviv_domain->pgtable_cpu[index] = paddr; 58 + v1_context->pgtable_cpu[index] = paddr; 92 59 93 60 return 0; 94 61 } 95 62 96 - static size_t etnaviv_iommuv1_unmap(struct etnaviv_iommu_domain *domain, 63 + static size_t etnaviv_iommuv1_unmap(struct etnaviv_iommu_context *context, 97 64 unsigned long iova, size_t size) 98 65 { 99 - struct etnaviv_iommuv1_domain *etnaviv_domain = 100 - to_etnaviv_domain(domain); 66 + struct etnaviv_iommuv1_context *v1_context = to_v1_context(context); 101 67 unsigned int index = (iova - GPU_MEM_START) / SZ_4K; 102 68 103 69 if (size != SZ_4K) 104 70 return -EINVAL; 105 71 106 - etnaviv_domain->pgtable_cpu[index] = etnaviv_domain->base.bad_page_dma; 72 + v1_context->pgtable_cpu[index] = context->global->bad_page_dma; 107 73 108 74 return SZ_4K; 109 75 } 110 76 111 - static size_t etnaviv_iommuv1_dump_size(struct etnaviv_iommu_domain *domain) 77 + static size_t etnaviv_iommuv1_dump_size(struct etnaviv_iommu_context *context) 112 78 { 113 79 return PT_SIZE; 114 80 } 115 81 116 - static void etnaviv_iommuv1_dump(struct etnaviv_iommu_domain *domain, void *buf) 82 + static void etnaviv_iommuv1_dump(struct etnaviv_iommu_context *context, 83 + void *buf) 117 84 { 118 - struct etnaviv_iommuv1_domain *etnaviv_domain = 119 - to_etnaviv_domain(domain); 85 + struct etnaviv_iommuv1_context *v1_context = to_v1_context(context); 120 86 121 - memcpy(buf, etnaviv_domain->pgtable_cpu, PT_SIZE); 87 + memcpy(buf, v1_context->pgtable_cpu, PT_SIZE); 122 88 } 123 89 124 - void etnaviv_iommuv1_restore(struct etnaviv_gpu *gpu) 90 + static void etnaviv_iommuv1_restore(struct etnaviv_gpu *gpu, 91 + struct etnaviv_iommu_context *context) 125 92 { 126 - struct etnaviv_iommuv1_domain *etnaviv_domain = 127 - to_etnaviv_domain(gpu->mmu->domain); 93 + struct etnaviv_iommuv1_context *v1_context = to_v1_context(context); 128 94 u32 pgtable; 129 95 130 96 /* set base addresses */ ··· 100 136 gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_PE, gpu->memory_base); 101 137 102 138 /* set page table address in MC */ 103 - pgtable = (u32)etnaviv_domain->pgtable_dma; 139 + pgtable = (u32)v1_context->pgtable_dma; 104 140 105 141 gpu_write(gpu, VIVS_MC_MMU_FE_PAGE_TABLE, pgtable); 106 142 gpu_write(gpu, VIVS_MC_MMU_TX_PAGE_TABLE, pgtable); ··· 109 145 gpu_write(gpu, VIVS_MC_MMU_RA_PAGE_TABLE, pgtable); 110 146 } 111 147 112 - static const struct etnaviv_iommu_domain_ops etnaviv_iommuv1_ops = { 113 - .free = etnaviv_iommuv1_domain_free, 148 + 149 + const struct etnaviv_iommu_ops etnaviv_iommuv1_ops = { 150 + .free = etnaviv_iommuv1_free, 114 151 .map = etnaviv_iommuv1_map, 115 152 .unmap = etnaviv_iommuv1_unmap, 116 153 .dump_size = etnaviv_iommuv1_dump_size, 117 154 .dump = etnaviv_iommuv1_dump, 155 + .restore = etnaviv_iommuv1_restore, 118 156 }; 119 157 120 - struct etnaviv_iommu_domain * 121 - etnaviv_iommuv1_domain_alloc(struct etnaviv_gpu *gpu) 158 + struct etnaviv_iommu_context * 159 + etnaviv_iommuv1_context_alloc(struct etnaviv_iommu_global *global) 122 160 { 123 - struct etnaviv_iommuv1_domain *etnaviv_domain; 124 - struct etnaviv_iommu_domain *domain; 125 - int ret; 161 + struct etnaviv_iommuv1_context *v1_context; 162 + struct etnaviv_iommu_context *context; 126 163 127 - etnaviv_domain = kzalloc(sizeof(*etnaviv_domain), GFP_KERNEL); 128 - if (!etnaviv_domain) 164 + mutex_lock(&global->lock); 165 + 166 + /* 167 + * MMUv1 does not support switching between different contexts without 168 + * a stop the world operation, so we only support a single shared 169 + * context with this version. 170 + */ 171 + if (global->v1.shared_context) { 172 + context = global->v1.shared_context; 173 + etnaviv_iommu_context_get(context); 174 + mutex_unlock(&global->lock); 175 + return context; 176 + } 177 + 178 + v1_context = kzalloc(sizeof(*v1_context), GFP_KERNEL); 179 + if (!v1_context) 129 180 return NULL; 130 181 131 - domain = &etnaviv_domain->base; 132 - 133 - domain->dev = gpu->dev; 134 - domain->base = GPU_MEM_START; 135 - domain->size = PT_ENTRIES * SZ_4K; 136 - domain->ops = &etnaviv_iommuv1_ops; 137 - 138 - ret = __etnaviv_iommu_init(etnaviv_domain); 139 - if (ret) 182 + v1_context->pgtable_cpu = dma_alloc_wc(global->dev, PT_SIZE, 183 + &v1_context->pgtable_dma, 184 + GFP_KERNEL); 185 + if (!v1_context->pgtable_cpu) 140 186 goto out_free; 141 187 142 - return &etnaviv_domain->base; 188 + memset32(v1_context->pgtable_cpu, global->bad_page_dma, PT_ENTRIES); 189 + 190 + context = &v1_context->base; 191 + context->global = global; 192 + kref_init(&context->refcount); 193 + mutex_init(&context->lock); 194 + INIT_LIST_HEAD(&context->mappings); 195 + drm_mm_init(&context->mm, GPU_MEM_START, PT_ENTRIES * SZ_4K); 196 + context->global->v1.shared_context = context; 197 + 198 + mutex_unlock(&global->lock); 199 + 200 + return context; 143 201 144 202 out_free: 145 - kfree(etnaviv_domain); 203 + mutex_unlock(&global->lock); 204 + kfree(v1_context); 146 205 return NULL; 147 206 }
-20
drivers/gpu/drm/etnaviv/etnaviv_iommu.h
··· 1 - /* SPDX-License-Identifier: GPL-2.0 */ 2 - /* 3 - * Copyright (C) 2014-2018 Etnaviv Project 4 - */ 5 - 6 - #ifndef __ETNAVIV_IOMMU_H__ 7 - #define __ETNAVIV_IOMMU_H__ 8 - 9 - struct etnaviv_gpu; 10 - struct etnaviv_iommu_domain; 11 - 12 - struct etnaviv_iommu_domain * 13 - etnaviv_iommuv1_domain_alloc(struct etnaviv_gpu *gpu); 14 - void etnaviv_iommuv1_restore(struct etnaviv_gpu *gpu); 15 - 16 - struct etnaviv_iommu_domain * 17 - etnaviv_iommuv2_domain_alloc(struct etnaviv_gpu *gpu); 18 - void etnaviv_iommuv2_restore(struct etnaviv_gpu *gpu); 19 - 20 - #endif /* __ETNAVIV_IOMMU_H__ */
+109 -157
drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
··· 13 13 #include "etnaviv_cmdbuf.h" 14 14 #include "etnaviv_gpu.h" 15 15 #include "etnaviv_mmu.h" 16 - #include "etnaviv_iommu.h" 17 16 #include "state.xml.h" 18 17 #include "state_hi.xml.h" 19 18 ··· 27 28 28 29 #define MMUv2_MAX_STLB_ENTRIES 1024 29 30 30 - struct etnaviv_iommuv2_domain { 31 - struct etnaviv_iommu_domain base; 32 - /* P(age) T(able) A(rray) */ 33 - u64 *pta_cpu; 34 - dma_addr_t pta_dma; 31 + struct etnaviv_iommuv2_context { 32 + struct etnaviv_iommu_context base; 33 + unsigned short id; 35 34 /* M(aster) TLB aka first level pagetable */ 36 35 u32 *mtlb_cpu; 37 36 dma_addr_t mtlb_dma; ··· 38 41 dma_addr_t stlb_dma[MMUv2_MAX_STLB_ENTRIES]; 39 42 }; 40 43 41 - static struct etnaviv_iommuv2_domain * 42 - to_etnaviv_domain(struct etnaviv_iommu_domain *domain) 44 + static struct etnaviv_iommuv2_context * 45 + to_v2_context(struct etnaviv_iommu_context *context) 43 46 { 44 - return container_of(domain, struct etnaviv_iommuv2_domain, base); 47 + return container_of(context, struct etnaviv_iommuv2_context, base); 45 48 } 46 49 50 + static void etnaviv_iommuv2_free(struct etnaviv_iommu_context *context) 51 + { 52 + struct etnaviv_iommuv2_context *v2_context = to_v2_context(context); 53 + int i; 54 + 55 + drm_mm_takedown(&context->mm); 56 + 57 + for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++) { 58 + if (v2_context->stlb_cpu[i]) 59 + dma_free_wc(context->global->dev, SZ_4K, 60 + v2_context->stlb_cpu[i], 61 + v2_context->stlb_dma[i]); 62 + } 63 + 64 + dma_free_wc(context->global->dev, SZ_4K, v2_context->mtlb_cpu, 65 + v2_context->mtlb_dma); 66 + 67 + clear_bit(v2_context->id, context->global->v2.pta_alloc); 68 + 69 + vfree(v2_context); 70 + } 47 71 static int 48 - etnaviv_iommuv2_ensure_stlb(struct etnaviv_iommuv2_domain *etnaviv_domain, 72 + etnaviv_iommuv2_ensure_stlb(struct etnaviv_iommuv2_context *v2_context, 49 73 int stlb) 50 74 { 51 - if (etnaviv_domain->stlb_cpu[stlb]) 75 + if (v2_context->stlb_cpu[stlb]) 52 76 return 0; 53 77 54 - etnaviv_domain->stlb_cpu[stlb] = 55 - dma_alloc_wc(etnaviv_domain->base.dev, SZ_4K, 56 - &etnaviv_domain->stlb_dma[stlb], 78 + v2_context->stlb_cpu[stlb] = 79 + dma_alloc_wc(v2_context->base.global->dev, SZ_4K, 80 + &v2_context->stlb_dma[stlb], 57 81 GFP_KERNEL); 58 82 59 - if (!etnaviv_domain->stlb_cpu[stlb]) 83 + if (!v2_context->stlb_cpu[stlb]) 60 84 return -ENOMEM; 61 85 62 - memset32(etnaviv_domain->stlb_cpu[stlb], MMUv2_PTE_EXCEPTION, 86 + memset32(v2_context->stlb_cpu[stlb], MMUv2_PTE_EXCEPTION, 63 87 SZ_4K / sizeof(u32)); 64 88 65 - etnaviv_domain->mtlb_cpu[stlb] = etnaviv_domain->stlb_dma[stlb] | 66 - MMUv2_PTE_PRESENT; 89 + v2_context->mtlb_cpu[stlb] = 90 + v2_context->stlb_dma[stlb] | MMUv2_PTE_PRESENT; 91 + 67 92 return 0; 68 93 } 69 94 70 - static int etnaviv_iommuv2_map(struct etnaviv_iommu_domain *domain, 95 + static int etnaviv_iommuv2_map(struct etnaviv_iommu_context *context, 71 96 unsigned long iova, phys_addr_t paddr, 72 97 size_t size, int prot) 73 98 { 74 - struct etnaviv_iommuv2_domain *etnaviv_domain = 75 - to_etnaviv_domain(domain); 99 + struct etnaviv_iommuv2_context *v2_context = to_v2_context(context); 76 100 int mtlb_entry, stlb_entry, ret; 77 101 u32 entry = lower_32_bits(paddr) | MMUv2_PTE_PRESENT; 78 102 ··· 109 91 mtlb_entry = (iova & MMUv2_MTLB_MASK) >> MMUv2_MTLB_SHIFT; 110 92 stlb_entry = (iova & MMUv2_STLB_MASK) >> MMUv2_STLB_SHIFT; 111 93 112 - ret = etnaviv_iommuv2_ensure_stlb(etnaviv_domain, mtlb_entry); 94 + ret = etnaviv_iommuv2_ensure_stlb(v2_context, mtlb_entry); 113 95 if (ret) 114 96 return ret; 115 97 116 - etnaviv_domain->stlb_cpu[mtlb_entry][stlb_entry] = entry; 98 + v2_context->stlb_cpu[mtlb_entry][stlb_entry] = entry; 117 99 118 100 return 0; 119 101 } 120 102 121 - static size_t etnaviv_iommuv2_unmap(struct etnaviv_iommu_domain *domain, 103 + static size_t etnaviv_iommuv2_unmap(struct etnaviv_iommu_context *context, 122 104 unsigned long iova, size_t size) 123 105 { 124 - struct etnaviv_iommuv2_domain *etnaviv_domain = 125 - to_etnaviv_domain(domain); 106 + struct etnaviv_iommuv2_context *etnaviv_domain = to_v2_context(context); 126 107 int mtlb_entry, stlb_entry; 127 108 128 109 if (size != SZ_4K) ··· 135 118 return SZ_4K; 136 119 } 137 120 138 - static int etnaviv_iommuv2_init(struct etnaviv_iommuv2_domain *etnaviv_domain) 121 + static size_t etnaviv_iommuv2_dump_size(struct etnaviv_iommu_context *context) 139 122 { 140 - int ret; 141 - 142 - /* allocate scratch page */ 143 - etnaviv_domain->base.bad_page_cpu = 144 - dma_alloc_wc(etnaviv_domain->base.dev, SZ_4K, 145 - &etnaviv_domain->base.bad_page_dma, 146 - GFP_KERNEL); 147 - if (!etnaviv_domain->base.bad_page_cpu) { 148 - ret = -ENOMEM; 149 - goto fail_mem; 150 - } 151 - 152 - memset32(etnaviv_domain->base.bad_page_cpu, 0xdead55aa, 153 - SZ_4K / sizeof(u32)); 154 - 155 - etnaviv_domain->pta_cpu = dma_alloc_wc(etnaviv_domain->base.dev, 156 - SZ_4K, &etnaviv_domain->pta_dma, 157 - GFP_KERNEL); 158 - if (!etnaviv_domain->pta_cpu) { 159 - ret = -ENOMEM; 160 - goto fail_mem; 161 - } 162 - 163 - etnaviv_domain->mtlb_cpu = dma_alloc_wc(etnaviv_domain->base.dev, 164 - SZ_4K, &etnaviv_domain->mtlb_dma, 165 - GFP_KERNEL); 166 - if (!etnaviv_domain->mtlb_cpu) { 167 - ret = -ENOMEM; 168 - goto fail_mem; 169 - } 170 - 171 - memset32(etnaviv_domain->mtlb_cpu, MMUv2_PTE_EXCEPTION, 172 - MMUv2_MAX_STLB_ENTRIES); 173 - 174 - return 0; 175 - 176 - fail_mem: 177 - if (etnaviv_domain->base.bad_page_cpu) 178 - dma_free_wc(etnaviv_domain->base.dev, SZ_4K, 179 - etnaviv_domain->base.bad_page_cpu, 180 - etnaviv_domain->base.bad_page_dma); 181 - 182 - if (etnaviv_domain->pta_cpu) 183 - dma_free_wc(etnaviv_domain->base.dev, SZ_4K, 184 - etnaviv_domain->pta_cpu, etnaviv_domain->pta_dma); 185 - 186 - if (etnaviv_domain->mtlb_cpu) 187 - dma_free_wc(etnaviv_domain->base.dev, SZ_4K, 188 - etnaviv_domain->mtlb_cpu, etnaviv_domain->mtlb_dma); 189 - 190 - return ret; 191 - } 192 - 193 - static void etnaviv_iommuv2_domain_free(struct etnaviv_iommu_domain *domain) 194 - { 195 - struct etnaviv_iommuv2_domain *etnaviv_domain = 196 - to_etnaviv_domain(domain); 197 - int i; 198 - 199 - dma_free_wc(etnaviv_domain->base.dev, SZ_4K, 200 - etnaviv_domain->base.bad_page_cpu, 201 - etnaviv_domain->base.bad_page_dma); 202 - 203 - dma_free_wc(etnaviv_domain->base.dev, SZ_4K, 204 - etnaviv_domain->pta_cpu, etnaviv_domain->pta_dma); 205 - 206 - dma_free_wc(etnaviv_domain->base.dev, SZ_4K, 207 - etnaviv_domain->mtlb_cpu, etnaviv_domain->mtlb_dma); 208 - 209 - for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++) { 210 - if (etnaviv_domain->stlb_cpu[i]) 211 - dma_free_wc(etnaviv_domain->base.dev, SZ_4K, 212 - etnaviv_domain->stlb_cpu[i], 213 - etnaviv_domain->stlb_dma[i]); 214 - } 215 - 216 - vfree(etnaviv_domain); 217 - } 218 - 219 - static size_t etnaviv_iommuv2_dump_size(struct etnaviv_iommu_domain *domain) 220 - { 221 - struct etnaviv_iommuv2_domain *etnaviv_domain = 222 - to_etnaviv_domain(domain); 123 + struct etnaviv_iommuv2_context *v2_context = to_v2_context(context); 223 124 size_t dump_size = SZ_4K; 224 125 int i; 225 126 226 127 for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++) 227 - if (etnaviv_domain->mtlb_cpu[i] & MMUv2_PTE_PRESENT) 128 + if (v2_context->mtlb_cpu[i] & MMUv2_PTE_PRESENT) 228 129 dump_size += SZ_4K; 229 130 230 131 return dump_size; 231 132 } 232 133 233 - static void etnaviv_iommuv2_dump(struct etnaviv_iommu_domain *domain, void *buf) 134 + static void etnaviv_iommuv2_dump(struct etnaviv_iommu_context *context, void *buf) 234 135 { 235 - struct etnaviv_iommuv2_domain *etnaviv_domain = 236 - to_etnaviv_domain(domain); 136 + struct etnaviv_iommuv2_context *v2_context = to_v2_context(context); 237 137 int i; 238 138 239 - memcpy(buf, etnaviv_domain->mtlb_cpu, SZ_4K); 139 + memcpy(buf, v2_context->mtlb_cpu, SZ_4K); 240 140 buf += SZ_4K; 241 141 for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++, buf += SZ_4K) 242 - if (etnaviv_domain->mtlb_cpu[i] & MMUv2_PTE_PRESENT) 243 - memcpy(buf, etnaviv_domain->stlb_cpu[i], SZ_4K); 142 + if (v2_context->mtlb_cpu[i] & MMUv2_PTE_PRESENT) 143 + memcpy(buf, v2_context->stlb_cpu[i], SZ_4K); 244 144 } 245 145 246 - static void etnaviv_iommuv2_restore_nonsec(struct etnaviv_gpu *gpu) 146 + static void etnaviv_iommuv2_restore_nonsec(struct etnaviv_gpu *gpu, 147 + struct etnaviv_iommu_context *context) 247 148 { 248 - struct etnaviv_iommuv2_domain *etnaviv_domain = 249 - to_etnaviv_domain(gpu->mmu->domain); 149 + struct etnaviv_iommuv2_context *v2_context = to_v2_context(context); 250 150 u16 prefetch; 251 151 252 152 /* If the MMU is already enabled the state is still there. */ ··· 171 237 return; 172 238 173 239 prefetch = etnaviv_buffer_config_mmuv2(gpu, 174 - (u32)etnaviv_domain->mtlb_dma, 175 - (u32)etnaviv_domain->base.bad_page_dma); 240 + (u32)v2_context->mtlb_dma, 241 + (u32)context->global->bad_page_dma); 176 242 etnaviv_gpu_start_fe(gpu, (u32)etnaviv_cmdbuf_get_pa(&gpu->buffer), 177 243 prefetch); 178 244 etnaviv_gpu_wait_idle(gpu, 100); ··· 180 246 gpu_write(gpu, VIVS_MMUv2_CONTROL, VIVS_MMUv2_CONTROL_ENABLE); 181 247 } 182 248 183 - static void etnaviv_iommuv2_restore_sec(struct etnaviv_gpu *gpu) 249 + static void etnaviv_iommuv2_restore_sec(struct etnaviv_gpu *gpu, 250 + struct etnaviv_iommu_context *context) 184 251 { 185 - struct etnaviv_iommuv2_domain *etnaviv_domain = 186 - to_etnaviv_domain(gpu->mmu->domain); 252 + struct etnaviv_iommuv2_context *v2_context = to_v2_context(context); 187 253 u16 prefetch; 188 254 189 255 /* If the MMU is already enabled the state is still there. */ ··· 191 257 return; 192 258 193 259 gpu_write(gpu, VIVS_MMUv2_PTA_ADDRESS_LOW, 194 - lower_32_bits(etnaviv_domain->pta_dma)); 260 + lower_32_bits(context->global->v2.pta_dma)); 195 261 gpu_write(gpu, VIVS_MMUv2_PTA_ADDRESS_HIGH, 196 - upper_32_bits(etnaviv_domain->pta_dma)); 262 + upper_32_bits(context->global->v2.pta_dma)); 197 263 gpu_write(gpu, VIVS_MMUv2_PTA_CONTROL, VIVS_MMUv2_PTA_CONTROL_ENABLE); 198 264 199 265 gpu_write(gpu, VIVS_MMUv2_NONSEC_SAFE_ADDR_LOW, 200 - lower_32_bits(etnaviv_domain->base.bad_page_dma)); 266 + lower_32_bits(context->global->bad_page_dma)); 201 267 gpu_write(gpu, VIVS_MMUv2_SEC_SAFE_ADDR_LOW, 202 - lower_32_bits(etnaviv_domain->base.bad_page_dma)); 268 + lower_32_bits(context->global->bad_page_dma)); 203 269 gpu_write(gpu, VIVS_MMUv2_SAFE_ADDRESS_CONFIG, 204 270 VIVS_MMUv2_SAFE_ADDRESS_CONFIG_NON_SEC_SAFE_ADDR_HIGH( 205 - upper_32_bits(etnaviv_domain->base.bad_page_dma)) | 271 + upper_32_bits(context->global->bad_page_dma)) | 206 272 VIVS_MMUv2_SAFE_ADDRESS_CONFIG_SEC_SAFE_ADDR_HIGH( 207 - upper_32_bits(etnaviv_domain->base.bad_page_dma))); 273 + upper_32_bits(context->global->bad_page_dma))); 208 274 209 - etnaviv_domain->pta_cpu[0] = etnaviv_domain->mtlb_dma | 210 - VIVS_MMUv2_CONFIGURATION_MODE_MODE4_K; 275 + context->global->v2.pta_cpu[0] = v2_context->mtlb_dma | 276 + VIVS_MMUv2_CONFIGURATION_MODE_MODE4_K; 211 277 212 278 /* trigger a PTA load through the FE */ 213 - prefetch = etnaviv_buffer_config_pta(gpu); 279 + prefetch = etnaviv_buffer_config_pta(gpu, v2_context->id); 214 280 etnaviv_gpu_start_fe(gpu, (u32)etnaviv_cmdbuf_get_pa(&gpu->buffer), 215 281 prefetch); 216 282 etnaviv_gpu_wait_idle(gpu, 100); ··· 218 284 gpu_write(gpu, VIVS_MMUv2_SEC_CONTROL, VIVS_MMUv2_SEC_CONTROL_ENABLE); 219 285 } 220 286 221 - void etnaviv_iommuv2_restore(struct etnaviv_gpu *gpu) 287 + static void etnaviv_iommuv2_restore(struct etnaviv_gpu *gpu, 288 + struct etnaviv_iommu_context *context) 222 289 { 223 290 switch (gpu->sec_mode) { 224 291 case ETNA_SEC_NONE: 225 - etnaviv_iommuv2_restore_nonsec(gpu); 292 + etnaviv_iommuv2_restore_nonsec(gpu, context); 226 293 break; 227 294 case ETNA_SEC_KERNEL: 228 - etnaviv_iommuv2_restore_sec(gpu); 295 + etnaviv_iommuv2_restore_sec(gpu, context); 229 296 break; 230 297 default: 231 298 WARN(1, "unhandled GPU security mode\n"); ··· 234 299 } 235 300 } 236 301 237 - static const struct etnaviv_iommu_domain_ops etnaviv_iommuv2_ops = { 238 - .free = etnaviv_iommuv2_domain_free, 302 + const struct etnaviv_iommu_ops etnaviv_iommuv2_ops = { 303 + .free = etnaviv_iommuv2_free, 239 304 .map = etnaviv_iommuv2_map, 240 305 .unmap = etnaviv_iommuv2_unmap, 241 306 .dump_size = etnaviv_iommuv2_dump_size, 242 307 .dump = etnaviv_iommuv2_dump, 308 + .restore = etnaviv_iommuv2_restore, 243 309 }; 244 310 245 - struct etnaviv_iommu_domain * 246 - etnaviv_iommuv2_domain_alloc(struct etnaviv_gpu *gpu) 311 + struct etnaviv_iommu_context * 312 + etnaviv_iommuv2_context_alloc(struct etnaviv_iommu_global *global) 247 313 { 248 - struct etnaviv_iommuv2_domain *etnaviv_domain; 249 - struct etnaviv_iommu_domain *domain; 250 - int ret; 314 + struct etnaviv_iommuv2_context *v2_context; 315 + struct etnaviv_iommu_context *context; 251 316 252 - etnaviv_domain = vzalloc(sizeof(*etnaviv_domain)); 253 - if (!etnaviv_domain) 317 + v2_context = vzalloc(sizeof(*v2_context)); 318 + if (!v2_context) 254 319 return NULL; 255 320 256 - domain = &etnaviv_domain->base; 257 - 258 - domain->dev = gpu->dev; 259 - domain->base = SZ_4K; 260 - domain->size = (u64)SZ_1G * 4 - SZ_4K; 261 - domain->ops = &etnaviv_iommuv2_ops; 262 - 263 - ret = etnaviv_iommuv2_init(etnaviv_domain); 264 - if (ret) 321 + mutex_lock(&global->lock); 322 + v2_context->id = find_first_zero_bit(global->v2.pta_alloc, 323 + ETNAVIV_PTA_ENTRIES); 324 + if (v2_context->id < ETNAVIV_PTA_ENTRIES) { 325 + set_bit(v2_context->id, global->v2.pta_alloc); 326 + } else { 327 + mutex_unlock(&global->lock); 265 328 goto out_free; 329 + } 330 + mutex_unlock(&global->lock); 266 331 267 - return &etnaviv_domain->base; 332 + v2_context->mtlb_cpu = dma_alloc_wc(global->dev, SZ_4K, 333 + &v2_context->mtlb_dma, GFP_KERNEL); 334 + if (!v2_context->mtlb_cpu) 335 + goto out_free_id; 268 336 337 + memset32(v2_context->mtlb_cpu, MMUv2_PTE_EXCEPTION, 338 + MMUv2_MAX_STLB_ENTRIES); 339 + 340 + context = &v2_context->base; 341 + context->global = global; 342 + kref_init(&context->refcount); 343 + mutex_init(&context->lock); 344 + INIT_LIST_HEAD(&context->mappings); 345 + drm_mm_init(&context->mm, SZ_4K, (u64)SZ_1G * 4 - SZ_4K); 346 + 347 + return context; 348 + 349 + out_free_id: 350 + clear_bit(v2_context->id, global->v2.pta_alloc); 269 351 out_free: 270 - vfree(etnaviv_domain); 352 + vfree(v2_context); 271 353 return NULL; 272 354 }
+162 -101
drivers/gpu/drm/etnaviv/etnaviv_mmu.c
··· 3 3 * Copyright (C) 2015-2018 Etnaviv Project 4 4 */ 5 5 6 + #include <linux/dma-mapping.h> 6 7 #include <linux/scatterlist.h> 7 8 8 9 #include "common.xml.h" ··· 11 10 #include "etnaviv_drv.h" 12 11 #include "etnaviv_gem.h" 13 12 #include "etnaviv_gpu.h" 14 - #include "etnaviv_iommu.h" 15 13 #include "etnaviv_mmu.h" 16 14 17 - static void etnaviv_domain_unmap(struct etnaviv_iommu_domain *domain, 15 + static void etnaviv_context_unmap(struct etnaviv_iommu_context *context, 18 16 unsigned long iova, size_t size) 19 17 { 20 18 size_t unmapped_page, unmapped = 0; ··· 26 26 } 27 27 28 28 while (unmapped < size) { 29 - unmapped_page = domain->ops->unmap(domain, iova, pgsize); 29 + unmapped_page = context->global->ops->unmap(context, iova, 30 + pgsize); 30 31 if (!unmapped_page) 31 32 break; 32 33 ··· 36 35 } 37 36 } 38 37 39 - static int etnaviv_domain_map(struct etnaviv_iommu_domain *domain, 38 + static int etnaviv_context_map(struct etnaviv_iommu_context *context, 40 39 unsigned long iova, phys_addr_t paddr, 41 40 size_t size, int prot) 42 41 { ··· 52 51 } 53 52 54 53 while (size) { 55 - ret = domain->ops->map(domain, iova, paddr, pgsize, prot); 54 + ret = context->global->ops->map(context, iova, paddr, pgsize, 55 + prot); 56 56 if (ret) 57 57 break; 58 58 ··· 64 62 65 63 /* unroll mapping in case something went wrong */ 66 64 if (ret) 67 - etnaviv_domain_unmap(domain, orig_iova, orig_size - size); 65 + etnaviv_context_unmap(context, orig_iova, orig_size - size); 68 66 69 67 return ret; 70 68 } 71 69 72 - static int etnaviv_iommu_map(struct etnaviv_iommu *iommu, u32 iova, 70 + static int etnaviv_iommu_map(struct etnaviv_iommu_context *context, u32 iova, 73 71 struct sg_table *sgt, unsigned len, int prot) 74 - { 75 - struct etnaviv_iommu_domain *domain = iommu->domain; 76 - struct scatterlist *sg; 72 + { struct scatterlist *sg; 77 73 unsigned int da = iova; 78 74 unsigned int i, j; 79 75 int ret; 80 76 81 - if (!domain || !sgt) 77 + if (!context || !sgt) 82 78 return -EINVAL; 83 79 84 80 for_each_sg(sgt->sgl, sg, sgt->nents, i) { ··· 85 85 86 86 VERB("map[%d]: %08x %08x(%zx)", i, iova, pa, bytes); 87 87 88 - ret = etnaviv_domain_map(domain, da, pa, bytes, prot); 88 + ret = etnaviv_context_map(context, da, pa, bytes, prot); 89 89 if (ret) 90 90 goto fail; 91 91 ··· 100 100 for_each_sg(sgt->sgl, sg, i, j) { 101 101 size_t bytes = sg_dma_len(sg) + sg->offset; 102 102 103 - etnaviv_domain_unmap(domain, da, bytes); 103 + etnaviv_context_unmap(context, da, bytes); 104 104 da += bytes; 105 105 } 106 106 return ret; 107 107 } 108 108 109 - static void etnaviv_iommu_unmap(struct etnaviv_iommu *iommu, u32 iova, 109 + static void etnaviv_iommu_unmap(struct etnaviv_iommu_context *context, u32 iova, 110 110 struct sg_table *sgt, unsigned len) 111 111 { 112 - struct etnaviv_iommu_domain *domain = iommu->domain; 113 112 struct scatterlist *sg; 114 113 unsigned int da = iova; 115 114 int i; ··· 116 117 for_each_sg(sgt->sgl, sg, sgt->nents, i) { 117 118 size_t bytes = sg_dma_len(sg) + sg->offset; 118 119 119 - etnaviv_domain_unmap(domain, da, bytes); 120 + etnaviv_context_unmap(context, da, bytes); 120 121 121 122 VERB("unmap[%d]: %08x(%zx)", i, iova, bytes); 122 123 ··· 126 127 } 127 128 } 128 129 129 - static void etnaviv_iommu_remove_mapping(struct etnaviv_iommu *mmu, 130 + static void etnaviv_iommu_remove_mapping(struct etnaviv_iommu_context *context, 130 131 struct etnaviv_vram_mapping *mapping) 131 132 { 132 133 struct etnaviv_gem_object *etnaviv_obj = mapping->object; 133 134 134 - etnaviv_iommu_unmap(mmu, mapping->vram_node.start, 135 + etnaviv_iommu_unmap(context, mapping->vram_node.start, 135 136 etnaviv_obj->sgt, etnaviv_obj->base.size); 136 137 drm_mm_remove_node(&mapping->vram_node); 137 138 } 138 139 139 - static int etnaviv_iommu_find_iova(struct etnaviv_iommu *mmu, 140 + static int etnaviv_iommu_find_iova(struct etnaviv_iommu_context *context, 140 141 struct drm_mm_node *node, size_t size) 141 142 { 142 143 struct etnaviv_vram_mapping *free = NULL; 143 144 enum drm_mm_insert_mode mode = DRM_MM_INSERT_LOW; 144 145 int ret; 145 146 146 - lockdep_assert_held(&mmu->lock); 147 + lockdep_assert_held(&context->lock); 147 148 148 149 while (1) { 149 150 struct etnaviv_vram_mapping *m, *n; ··· 151 152 struct list_head list; 152 153 bool found; 153 154 154 - ret = drm_mm_insert_node_in_range(&mmu->mm, node, 155 + ret = drm_mm_insert_node_in_range(&context->mm, node, 155 156 size, 0, 0, 0, U64_MAX, mode); 156 157 if (ret != -ENOSPC) 157 158 break; 158 159 159 160 /* Try to retire some entries */ 160 - drm_mm_scan_init(&scan, &mmu->mm, size, 0, 0, mode); 161 + drm_mm_scan_init(&scan, &context->mm, size, 0, 0, mode); 161 162 162 163 found = 0; 163 164 INIT_LIST_HEAD(&list); 164 - list_for_each_entry(free, &mmu->mappings, mmu_node) { 165 + list_for_each_entry(free, &context->mappings, mmu_node) { 165 166 /* If this vram node has not been used, skip this. */ 166 167 if (!free->vram_node.mm) 167 168 continue; ··· 203 204 * this mapping. 204 205 */ 205 206 list_for_each_entry_safe(m, n, &list, scan_node) { 206 - etnaviv_iommu_remove_mapping(mmu, m); 207 - m->mmu = NULL; 207 + etnaviv_iommu_remove_mapping(context, m); 208 + m->context = NULL; 208 209 list_del_init(&m->mmu_node); 209 210 list_del_init(&m->scan_node); 210 211 } ··· 220 221 return ret; 221 222 } 222 223 223 - int etnaviv_iommu_map_gem(struct etnaviv_iommu *mmu, 224 + int etnaviv_iommu_map_gem(struct etnaviv_iommu_context *context, 224 225 struct etnaviv_gem_object *etnaviv_obj, u32 memory_base, 225 226 struct etnaviv_vram_mapping *mapping) 226 227 { ··· 230 231 231 232 lockdep_assert_held(&etnaviv_obj->lock); 232 233 233 - mutex_lock(&mmu->lock); 234 + mutex_lock(&context->lock); 234 235 235 236 /* v1 MMU can optimize single entry (contiguous) scatterlists */ 236 - if (mmu->version == ETNAVIV_IOMMU_V1 && 237 + if (context->global->version == ETNAVIV_IOMMU_V1 && 237 238 sgt->nents == 1 && !(etnaviv_obj->flags & ETNA_BO_FORCE_MMU)) { 238 239 u32 iova; 239 240 240 241 iova = sg_dma_address(sgt->sgl) - memory_base; 241 242 if (iova < 0x80000000 - sg_dma_len(sgt->sgl)) { 242 243 mapping->iova = iova; 243 - list_add_tail(&mapping->mmu_node, &mmu->mappings); 244 + list_add_tail(&mapping->mmu_node, &context->mappings); 244 245 ret = 0; 245 246 goto unlock; 246 247 } ··· 248 249 249 250 node = &mapping->vram_node; 250 251 251 - ret = etnaviv_iommu_find_iova(mmu, node, etnaviv_obj->base.size); 252 + ret = etnaviv_iommu_find_iova(context, node, etnaviv_obj->base.size); 252 253 if (ret < 0) 253 254 goto unlock; 254 255 255 256 mapping->iova = node->start; 256 - ret = etnaviv_iommu_map(mmu, node->start, sgt, etnaviv_obj->base.size, 257 + ret = etnaviv_iommu_map(context, node->start, sgt, etnaviv_obj->base.size, 257 258 ETNAVIV_PROT_READ | ETNAVIV_PROT_WRITE); 258 259 259 260 if (ret < 0) { ··· 261 262 goto unlock; 262 263 } 263 264 264 - list_add_tail(&mapping->mmu_node, &mmu->mappings); 265 - mmu->flush_seq++; 265 + list_add_tail(&mapping->mmu_node, &context->mappings); 266 + context->flush_seq++; 266 267 unlock: 267 - mutex_unlock(&mmu->lock); 268 + mutex_unlock(&context->lock); 268 269 269 270 return ret; 270 271 } 271 272 272 - void etnaviv_iommu_unmap_gem(struct etnaviv_iommu *mmu, 273 + void etnaviv_iommu_unmap_gem(struct etnaviv_iommu_context *context, 273 274 struct etnaviv_vram_mapping *mapping) 274 275 { 275 276 WARN_ON(mapping->use); 276 277 277 - mutex_lock(&mmu->lock); 278 + mutex_lock(&context->lock); 278 279 279 280 /* If the vram node is on the mm, unmap and remove the node */ 280 - if (mapping->vram_node.mm == &mmu->mm) 281 - etnaviv_iommu_remove_mapping(mmu, mapping); 281 + if (mapping->vram_node.mm == &context->mm) 282 + etnaviv_iommu_remove_mapping(context, mapping); 282 283 283 284 list_del(&mapping->mmu_node); 284 - mmu->flush_seq++; 285 - mutex_unlock(&mmu->lock); 285 + context->flush_seq++; 286 + mutex_unlock(&context->lock); 286 287 } 287 288 288 - void etnaviv_iommu_destroy(struct etnaviv_iommu *mmu) 289 + static void etnaviv_iommu_context_free(struct kref *kref) 289 290 { 290 - drm_mm_takedown(&mmu->mm); 291 - mmu->domain->ops->free(mmu->domain); 292 - kfree(mmu); 291 + struct etnaviv_iommu_context *context = 292 + container_of(kref, struct etnaviv_iommu_context, refcount); 293 + 294 + context->global->ops->free(context); 295 + } 296 + void etnaviv_iommu_context_put(struct etnaviv_iommu_context *context) 297 + { 298 + kref_put(&context->refcount, etnaviv_iommu_context_free); 293 299 } 294 300 295 - struct etnaviv_iommu *etnaviv_iommu_new(struct etnaviv_gpu *gpu) 301 + struct etnaviv_iommu_context * 302 + etnaviv_iommu_context_init(struct etnaviv_iommu_global *global) 296 303 { 297 - enum etnaviv_iommu_version version; 298 - struct etnaviv_iommu *mmu; 299 - 300 - mmu = kzalloc(sizeof(*mmu), GFP_KERNEL); 301 - if (!mmu) 302 - return ERR_PTR(-ENOMEM); 303 - 304 - if (!(gpu->identity.minor_features1 & chipMinorFeatures1_MMU_VERSION)) { 305 - mmu->domain = etnaviv_iommuv1_domain_alloc(gpu); 306 - version = ETNAVIV_IOMMU_V1; 307 - } else { 308 - mmu->domain = etnaviv_iommuv2_domain_alloc(gpu); 309 - version = ETNAVIV_IOMMU_V2; 310 - } 311 - 312 - if (!mmu->domain) { 313 - dev_err(gpu->dev, "Failed to allocate GPU IOMMU domain\n"); 314 - kfree(mmu); 315 - return ERR_PTR(-ENOMEM); 316 - } 317 - 318 - mmu->gpu = gpu; 319 - mmu->version = version; 320 - mutex_init(&mmu->lock); 321 - INIT_LIST_HEAD(&mmu->mappings); 322 - 323 - drm_mm_init(&mmu->mm, mmu->domain->base, mmu->domain->size); 324 - 325 - return mmu; 326 - } 327 - 328 - void etnaviv_iommu_restore(struct etnaviv_gpu *gpu) 329 - { 330 - if (gpu->mmu->version == ETNAVIV_IOMMU_V1) 331 - etnaviv_iommuv1_restore(gpu); 304 + if (global->version == ETNAVIV_IOMMU_V1) 305 + return etnaviv_iommuv1_context_alloc(global); 332 306 else 333 - etnaviv_iommuv2_restore(gpu); 307 + return etnaviv_iommuv2_context_alloc(global); 334 308 } 335 309 336 - int etnaviv_iommu_get_suballoc_va(struct etnaviv_iommu *mmu, 310 + void etnaviv_iommu_restore(struct etnaviv_gpu *gpu, 311 + struct etnaviv_iommu_context *context) 312 + { 313 + context->global->ops->restore(gpu, context); 314 + } 315 + 316 + int etnaviv_iommu_get_suballoc_va(struct etnaviv_iommu_context *context, 337 317 struct etnaviv_vram_mapping *mapping, 338 318 u32 memory_base, dma_addr_t paddr, 339 319 size_t size) 340 320 { 341 - mutex_lock(&mmu->lock); 321 + mutex_lock(&context->lock); 342 322 343 323 /* 344 324 * For MMUv1 we don't add the suballoc region to the pagetables, as ··· 325 347 * window. Instead we manufacture a mapping to make it look uniform 326 348 * to the upper layers. 327 349 */ 328 - if (mmu->version == ETNAVIV_IOMMU_V1) { 350 + if (context->global->version == ETNAVIV_IOMMU_V1) { 329 351 mapping->iova = paddr - memory_base; 330 352 } else { 331 353 struct drm_mm_node *node = &mapping->vram_node; 332 354 int ret; 333 355 334 - ret = etnaviv_iommu_find_iova(mmu, node, size); 356 + ret = etnaviv_iommu_find_iova(context, node, size); 335 357 if (ret < 0) { 336 - mutex_unlock(&mmu->lock); 358 + mutex_unlock(&context->lock); 337 359 return ret; 338 360 } 339 361 340 362 mapping->iova = node->start; 341 - ret = etnaviv_domain_map(mmu->domain, node->start, paddr, size, 342 - ETNAVIV_PROT_READ); 363 + ret = etnaviv_context_map(context, node->start, paddr, size, 364 + ETNAVIV_PROT_READ); 343 365 344 366 if (ret < 0) { 345 367 drm_mm_remove_node(node); 346 - mutex_unlock(&mmu->lock); 368 + mutex_unlock(&context->lock); 347 369 return ret; 348 370 } 349 371 350 - mmu->flush_seq++; 372 + context->flush_seq++; 351 373 } 352 374 353 - list_add_tail(&mapping->mmu_node, &mmu->mappings); 375 + list_add_tail(&mapping->mmu_node, &context->mappings); 354 376 mapping->use = 1; 355 377 356 - mutex_unlock(&mmu->lock); 378 + mutex_unlock(&context->lock); 357 379 358 380 return 0; 359 381 } 360 382 361 - void etnaviv_iommu_put_suballoc_va(struct etnaviv_iommu *mmu, 383 + void etnaviv_iommu_put_suballoc_va(struct etnaviv_iommu_context *context, 362 384 struct etnaviv_vram_mapping *mapping) 363 385 { 364 386 struct drm_mm_node *node = &mapping->vram_node; ··· 368 390 369 391 mapping->use = 0; 370 392 371 - if (mmu->version == ETNAVIV_IOMMU_V1) 393 + if (context->global->version == ETNAVIV_IOMMU_V1) 372 394 return; 373 395 374 - mutex_lock(&mmu->lock); 375 - etnaviv_domain_unmap(mmu->domain, node->start, node->size); 396 + mutex_lock(&context->lock); 397 + etnaviv_context_unmap(context, node->start, node->size); 376 398 drm_mm_remove_node(node); 377 - mutex_unlock(&mmu->lock); 399 + mutex_unlock(&context->lock); 378 400 } 379 401 380 - size_t etnaviv_iommu_dump_size(struct etnaviv_iommu *iommu) 402 + size_t etnaviv_iommu_dump_size(struct etnaviv_iommu_context *context) 381 403 { 382 - return iommu->domain->ops->dump_size(iommu->domain); 404 + return context->global->ops->dump_size(context); 383 405 } 384 406 385 - void etnaviv_iommu_dump(struct etnaviv_iommu *iommu, void *buf) 407 + void etnaviv_iommu_dump(struct etnaviv_iommu_context *context, void *buf) 386 408 { 387 - iommu->domain->ops->dump(iommu->domain, buf); 409 + context->global->ops->dump(context, buf); 410 + } 411 + 412 + int etnaviv_iommu_global_init(struct etnaviv_gpu *gpu) 413 + { 414 + enum etnaviv_iommu_version version = ETNAVIV_IOMMU_V1; 415 + struct etnaviv_drm_private *priv = gpu->drm->dev_private; 416 + struct etnaviv_iommu_global *global; 417 + struct device *dev = gpu->drm->dev; 418 + 419 + if (gpu->identity.minor_features1 & chipMinorFeatures1_MMU_VERSION) 420 + version = ETNAVIV_IOMMU_V2; 421 + 422 + if (priv->mmu_global) { 423 + if (priv->mmu_global->version != version) { 424 + dev_err(gpu->dev, 425 + "MMU version doesn't match global version\n"); 426 + return -ENXIO; 427 + } 428 + 429 + priv->mmu_global->use++; 430 + return 0; 431 + } 432 + 433 + global = kzalloc(sizeof(*global), GFP_KERNEL); 434 + if (!global) 435 + return -ENOMEM; 436 + 437 + global->bad_page_cpu = dma_alloc_wc(dev, SZ_4K, &global->bad_page_dma, 438 + GFP_KERNEL); 439 + if (!global->bad_page_cpu) 440 + goto free_global; 441 + 442 + memset32(global->bad_page_cpu, 0xdead55aa, SZ_4K / sizeof(u32)); 443 + 444 + if (version == ETNAVIV_IOMMU_V2) { 445 + global->v2.pta_cpu = dma_alloc_wc(dev, ETNAVIV_PTA_SIZE, 446 + &global->v2.pta_dma, GFP_KERNEL); 447 + if (!global->v2.pta_cpu) 448 + goto free_bad_page; 449 + } 450 + 451 + global->dev = dev; 452 + global->version = version; 453 + global->use = 1; 454 + mutex_init(&global->lock); 455 + 456 + if (version == ETNAVIV_IOMMU_V1) 457 + global->ops = &etnaviv_iommuv1_ops; 458 + else 459 + global->ops = &etnaviv_iommuv2_ops; 460 + 461 + priv->mmu_global = global; 462 + 463 + return 0; 464 + 465 + free_bad_page: 466 + dma_free_wc(dev, SZ_4K, global->bad_page_cpu, global->bad_page_dma); 467 + free_global: 468 + kfree(global); 469 + 470 + return -ENOMEM; 471 + } 472 + 473 + void etnaviv_iommu_global_fini(struct etnaviv_gpu *gpu) 474 + { 475 + struct etnaviv_drm_private *priv = gpu->drm->dev_private; 476 + struct etnaviv_iommu_global *global = priv->mmu_global; 477 + 478 + if (--global->use > 0) 479 + return; 480 + 481 + if (global->v2.pta_cpu) 482 + dma_free_wc(global->dev, ETNAVIV_PTA_SIZE, 483 + global->v2.pta_cpu, global->v2.pta_dma); 484 + 485 + if (global->bad_page_cpu) 486 + dma_free_wc(global->dev, SZ_4K, 487 + global->bad_page_cpu, global->bad_page_dma); 488 + 489 + mutex_destroy(&global->lock); 490 + kfree(global); 491 + 492 + priv->mmu_global = NULL; 388 493 }
+64 -25
drivers/gpu/drm/etnaviv/etnaviv_mmu.h
··· 16 16 17 17 struct etnaviv_gpu; 18 18 struct etnaviv_vram_mapping; 19 - struct etnaviv_iommu_domain; 19 + struct etnaviv_iommu_global; 20 + struct etnaviv_iommu_context; 20 21 21 - struct etnaviv_iommu_domain_ops { 22 - void (*free)(struct etnaviv_iommu_domain *); 23 - int (*map)(struct etnaviv_iommu_domain *domain, unsigned long iova, 22 + struct etnaviv_iommu_ops { 23 + struct etnaviv_iommu_context *(*init)(struct etnaviv_iommu_global *); 24 + void (*free)(struct etnaviv_iommu_context *); 25 + int (*map)(struct etnaviv_iommu_context *context, unsigned long iova, 24 26 phys_addr_t paddr, size_t size, int prot); 25 - size_t (*unmap)(struct etnaviv_iommu_domain *domain, unsigned long iova, 27 + size_t (*unmap)(struct etnaviv_iommu_context *context, unsigned long iova, 26 28 size_t size); 27 - size_t (*dump_size)(struct etnaviv_iommu_domain *); 28 - void (*dump)(struct etnaviv_iommu_domain *, void *); 29 + size_t (*dump_size)(struct etnaviv_iommu_context *); 30 + void (*dump)(struct etnaviv_iommu_context *, void *); 31 + void (*restore)(struct etnaviv_gpu *, struct etnaviv_iommu_context *); 29 32 }; 30 33 31 - struct etnaviv_iommu_domain { 34 + extern const struct etnaviv_iommu_ops etnaviv_iommuv1_ops; 35 + extern const struct etnaviv_iommu_ops etnaviv_iommuv2_ops; 36 + 37 + #define ETNAVIV_PTA_SIZE SZ_4K 38 + #define ETNAVIV_PTA_ENTRIES (ETNAVIV_PTA_SIZE / sizeof(u64)) 39 + 40 + struct etnaviv_iommu_global { 32 41 struct device *dev; 42 + enum etnaviv_iommu_version version; 43 + const struct etnaviv_iommu_ops *ops; 44 + unsigned int use; 45 + struct mutex lock; 46 + 33 47 void *bad_page_cpu; 34 48 dma_addr_t bad_page_dma; 35 - u64 base; 36 - u64 size; 37 49 38 - const struct etnaviv_iommu_domain_ops *ops; 50 + /* 51 + * This union holds members needed by either MMUv1 or MMUv2, which 52 + * can not exist at the same time. 53 + */ 54 + union { 55 + struct { 56 + struct etnaviv_iommu_context *shared_context; 57 + } v1; 58 + struct { 59 + /* P(age) T(able) A(rray) */ 60 + u64 *pta_cpu; 61 + dma_addr_t pta_dma; 62 + struct spinlock pta_lock; 63 + DECLARE_BITMAP(pta_alloc, ETNAVIV_PTA_ENTRIES); 64 + } v2; 65 + }; 39 66 }; 40 67 41 - struct etnaviv_iommu { 42 - struct etnaviv_gpu *gpu; 43 - struct etnaviv_iommu_domain *domain; 44 - 45 - enum etnaviv_iommu_version version; 68 + struct etnaviv_iommu_context { 69 + struct kref refcount; 70 + struct etnaviv_iommu_global *global; 46 71 47 72 /* memory manager for GPU address area */ 48 73 struct mutex lock; ··· 76 51 unsigned int flush_seq; 77 52 }; 78 53 54 + int etnaviv_iommu_global_init(struct etnaviv_gpu *gpu); 55 + void etnaviv_iommu_global_fini(struct etnaviv_gpu *gpu); 56 + 79 57 struct etnaviv_gem_object; 80 58 81 - int etnaviv_iommu_map_gem(struct etnaviv_iommu *mmu, 59 + int etnaviv_iommu_map_gem(struct etnaviv_iommu_context *context, 82 60 struct etnaviv_gem_object *etnaviv_obj, u32 memory_base, 83 61 struct etnaviv_vram_mapping *mapping); 84 - void etnaviv_iommu_unmap_gem(struct etnaviv_iommu *mmu, 62 + void etnaviv_iommu_unmap_gem(struct etnaviv_iommu_context *context, 85 63 struct etnaviv_vram_mapping *mapping); 86 64 87 - int etnaviv_iommu_get_suballoc_va(struct etnaviv_iommu *mmu, 65 + int etnaviv_iommu_get_suballoc_va(struct etnaviv_iommu_context *ctx, 88 66 struct etnaviv_vram_mapping *mapping, 89 67 u32 memory_base, dma_addr_t paddr, 90 68 size_t size); 91 - void etnaviv_iommu_put_suballoc_va(struct etnaviv_iommu *mmu, 69 + void etnaviv_iommu_put_suballoc_va(struct etnaviv_iommu_context *ctx, 92 70 struct etnaviv_vram_mapping *mapping); 93 71 94 - size_t etnaviv_iommu_dump_size(struct etnaviv_iommu *iommu); 95 - void etnaviv_iommu_dump(struct etnaviv_iommu *iommu, void *buf); 72 + size_t etnaviv_iommu_dump_size(struct etnaviv_iommu_context *ctx); 73 + void etnaviv_iommu_dump(struct etnaviv_iommu_context *ctx, void *buf); 96 74 97 - struct etnaviv_iommu *etnaviv_iommu_new(struct etnaviv_gpu *gpu); 98 - void etnaviv_iommu_destroy(struct etnaviv_iommu *iommu); 99 - void etnaviv_iommu_restore(struct etnaviv_gpu *gpu); 75 + struct etnaviv_iommu_context * 76 + etnaviv_iommu_context_init(struct etnaviv_iommu_global *global); 77 + static inline void etnaviv_iommu_context_get(struct etnaviv_iommu_context *ctx) 78 + { 79 + kref_get(&ctx->refcount); 80 + } 81 + void etnaviv_iommu_context_put(struct etnaviv_iommu_context *ctx); 82 + void etnaviv_iommu_restore(struct etnaviv_gpu *gpu, 83 + struct etnaviv_iommu_context *ctx); 84 + 85 + struct etnaviv_iommu_context * 86 + etnaviv_iommuv1_context_alloc(struct etnaviv_iommu_global *global); 87 + struct etnaviv_iommu_context * 88 + etnaviv_iommuv2_context_alloc(struct etnaviv_iommu_global *global); 100 89 101 90 #endif /* __ETNAVIV_MMU_H__ */