Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/etnaviv: split out cmdbuf mapping into address space

This allows to decouple the cmdbuf suballocator create and mapping
the region into the GPU address space. Allowing multiple AS to share
a single cmdbuf suballoc.

Signed-off-by: Lucas Stach <l.stach@pengutronix.de>
Reviewed-by: Philipp Zabel <p.zabel@pengutronix.de>
Reviewed-by: Guido Günther <agx@sigxcpu.org>

+117 -69
+13 -10
drivers/gpu/drm/etnaviv/etnaviv_buffer.c
··· 118 118 u32 *ptr = buf->vaddr + off; 119 119 120 120 dev_info(gpu->dev, "virt %p phys 0x%08x free 0x%08x\n", 121 - ptr, etnaviv_cmdbuf_get_va(buf) + off, size - len * 4 - off); 121 + ptr, etnaviv_cmdbuf_get_va(buf, &gpu->cmdbuf_mapping) + 122 + off, size - len * 4 - off); 122 123 123 124 print_hex_dump(KERN_INFO, "cmd ", DUMP_PREFIX_OFFSET, 16, 4, 124 125 ptr, len * 4, 0); ··· 152 151 if (buffer->user_size + cmd_dwords * sizeof(u64) > buffer->size) 153 152 buffer->user_size = 0; 154 153 155 - return etnaviv_cmdbuf_get_va(buffer) + buffer->user_size; 154 + return etnaviv_cmdbuf_get_va(buffer, &gpu->cmdbuf_mapping) + 155 + buffer->user_size; 156 156 } 157 157 158 158 u16 etnaviv_buffer_init(struct etnaviv_gpu *gpu) ··· 166 164 buffer->user_size = 0; 167 165 168 166 CMD_WAIT(buffer); 169 - CMD_LINK(buffer, 2, etnaviv_cmdbuf_get_va(buffer) + 170 - buffer->user_size - 4); 167 + CMD_LINK(buffer, 2, etnaviv_cmdbuf_get_va(buffer, &gpu->cmdbuf_mapping) 168 + + buffer->user_size - 4); 171 169 172 170 return buffer->user_size / 8; 173 171 } ··· 293 291 294 292 /* Append waitlink */ 295 293 CMD_WAIT(buffer); 296 - CMD_LINK(buffer, 2, etnaviv_cmdbuf_get_va(buffer) + 297 - buffer->user_size - 4); 294 + CMD_LINK(buffer, 2, etnaviv_cmdbuf_get_va(buffer, &gpu->cmdbuf_mapping) 295 + + buffer->user_size - 4); 298 296 299 297 /* 300 298 * Kick off the 'sync point' command by replacing the previous ··· 321 319 if (drm_debug & DRM_UT_DRIVER) 322 320 etnaviv_buffer_dump(gpu, buffer, 0, 0x50); 323 321 324 - link_target = etnaviv_cmdbuf_get_va(cmdbuf); 322 + link_target = etnaviv_cmdbuf_get_va(cmdbuf, &gpu->cmdbuf_mapping); 325 323 link_dwords = cmdbuf->size / 8; 326 324 327 325 /* ··· 414 412 CMD_LOAD_STATE(buffer, VIVS_GL_EVENT, VIVS_GL_EVENT_EVENT_ID(event) | 415 413 VIVS_GL_EVENT_FROM_PE); 416 414 CMD_WAIT(buffer); 417 - CMD_LINK(buffer, 2, etnaviv_cmdbuf_get_va(buffer) + 418 - buffer->user_size - 4); 415 + CMD_LINK(buffer, 2, etnaviv_cmdbuf_get_va(buffer, &gpu->cmdbuf_mapping) 416 + + buffer->user_size - 4); 419 417 420 418 if (drm_debug & DRM_UT_DRIVER) 421 419 pr_info("stream link to 0x%08x @ 0x%08x %p\n", 422 - return_target, etnaviv_cmdbuf_get_va(cmdbuf), 420 + return_target, 421 + etnaviv_cmdbuf_get_va(cmdbuf, &gpu->cmdbuf_mapping), 423 422 cmdbuf->vaddr); 424 423 425 424 if (drm_debug & DRM_UT_DRIVER) {
+19 -16
drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c
··· 8 8 #include <drm/drm_mm.h> 9 9 10 10 #include "etnaviv_cmdbuf.h" 11 + #include "etnaviv_gem.h" 11 12 #include "etnaviv_gpu.h" 12 13 #include "etnaviv_mmu.h" 13 14 #include "etnaviv_perfmon.h" ··· 22 21 struct etnaviv_gpu *gpu; 23 22 void *vaddr; 24 23 dma_addr_t paddr; 25 - 26 - /* GPU mapping */ 27 - u32 iova; 28 - struct drm_mm_node vram_node; /* only used on MMUv2 */ 29 24 30 25 /* allocation management */ 31 26 struct mutex lock; ··· 51 54 goto free_suballoc; 52 55 } 53 56 54 - ret = etnaviv_iommu_get_suballoc_va(gpu, suballoc->paddr, 55 - &suballoc->vram_node, SUBALLOC_SIZE, 56 - &suballoc->iova); 57 - if (ret) 58 - goto free_dma; 59 - 60 57 return suballoc; 61 58 62 - free_dma: 63 - dma_free_wc(gpu->dev, SUBALLOC_SIZE, suballoc->vaddr, suballoc->paddr); 64 59 free_suballoc: 65 60 kfree(suballoc); 66 61 67 62 return ERR_PTR(ret); 68 63 } 69 64 65 + int etnaviv_cmdbuf_suballoc_map(struct etnaviv_cmdbuf_suballoc *suballoc, 66 + struct etnaviv_iommu *mmu, 67 + struct etnaviv_vram_mapping *mapping, 68 + u32 memory_base) 69 + { 70 + return etnaviv_iommu_get_suballoc_va(mmu, mapping, memory_base, 71 + suballoc->paddr, SUBALLOC_SIZE); 72 + } 73 + 74 + void etnaviv_cmdbuf_suballoc_unmap(struct etnaviv_iommu *mmu, 75 + struct etnaviv_vram_mapping *mapping) 76 + { 77 + etnaviv_iommu_put_suballoc_va(mmu, mapping); 78 + } 79 + 70 80 void etnaviv_cmdbuf_suballoc_destroy(struct etnaviv_cmdbuf_suballoc *suballoc) 71 81 { 72 - etnaviv_iommu_put_suballoc_va(suballoc->gpu, &suballoc->vram_node, 73 - SUBALLOC_SIZE, suballoc->iova); 74 82 dma_free_wc(suballoc->gpu->dev, SUBALLOC_SIZE, suballoc->vaddr, 75 83 suballoc->paddr); 76 84 kfree(suballoc); ··· 129 127 wake_up_all(&suballoc->free_event); 130 128 } 131 129 132 - u32 etnaviv_cmdbuf_get_va(struct etnaviv_cmdbuf *buf) 130 + u32 etnaviv_cmdbuf_get_va(struct etnaviv_cmdbuf *buf, 131 + struct etnaviv_vram_mapping *mapping) 133 132 { 134 - return buf->suballoc->iova + buf->suballoc_offset; 133 + return mapping->iova + buf->suballoc_offset; 135 134 } 136 135 137 136 dma_addr_t etnaviv_cmdbuf_get_pa(struct etnaviv_cmdbuf *buf)
+10 -1
drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.h
··· 9 9 #include <linux/types.h> 10 10 11 11 struct etnaviv_gpu; 12 + struct etnaviv_iommu; 13 + struct etnaviv_vram_mapping; 12 14 struct etnaviv_cmdbuf_suballoc; 13 15 struct etnaviv_perfmon_request; 14 16 ··· 27 25 struct etnaviv_cmdbuf_suballoc * 28 26 etnaviv_cmdbuf_suballoc_new(struct etnaviv_gpu * gpu); 29 27 void etnaviv_cmdbuf_suballoc_destroy(struct etnaviv_cmdbuf_suballoc *suballoc); 28 + int etnaviv_cmdbuf_suballoc_map(struct etnaviv_cmdbuf_suballoc *suballoc, 29 + struct etnaviv_iommu *mmu, 30 + struct etnaviv_vram_mapping *mapping, 31 + u32 memory_base); 32 + void etnaviv_cmdbuf_suballoc_unmap(struct etnaviv_iommu *mmu, 33 + struct etnaviv_vram_mapping *mapping); 30 34 31 35 32 36 int etnaviv_cmdbuf_init(struct etnaviv_cmdbuf_suballoc *suballoc, 33 37 struct etnaviv_cmdbuf *cmdbuf, u32 size); 34 38 void etnaviv_cmdbuf_free(struct etnaviv_cmdbuf *cmdbuf); 35 39 36 - u32 etnaviv_cmdbuf_get_va(struct etnaviv_cmdbuf *buf); 40 + u32 etnaviv_cmdbuf_get_va(struct etnaviv_cmdbuf *buf, 41 + struct etnaviv_vram_mapping *mapping); 37 42 dma_addr_t etnaviv_cmdbuf_get_pa(struct etnaviv_cmdbuf *buf); 38 43 39 44 #endif /* __ETNAVIV_CMDBUF_H__ */
+4 -2
drivers/gpu/drm/etnaviv/etnaviv_dump.c
··· 172 172 etnaviv_core_dump_mmu(&iter, gpu->mmu, mmu_size); 173 173 etnaviv_core_dump_mem(&iter, ETDUMP_BUF_RING, gpu->buffer.vaddr, 174 174 gpu->buffer.size, 175 - etnaviv_cmdbuf_get_va(&gpu->buffer)); 175 + etnaviv_cmdbuf_get_va(&gpu->buffer, 176 + &gpu->cmdbuf_mapping)); 176 177 177 178 etnaviv_core_dump_mem(&iter, ETDUMP_BUF_CMD, 178 179 submit->cmdbuf.vaddr, submit->cmdbuf.size, 179 - etnaviv_cmdbuf_get_va(&submit->cmdbuf)); 180 + etnaviv_cmdbuf_get_va(&submit->cmdbuf, 181 + &gpu->cmdbuf_mapping)); 180 182 181 183 /* Reserve space for the bomap */ 182 184 if (n_bomap_pages) {
+15 -4
drivers/gpu/drm/etnaviv/etnaviv_gpu.c
··· 687 687 prefetch = etnaviv_buffer_init(gpu); 688 688 689 689 gpu_write(gpu, VIVS_HI_INTR_ENBL, ~0U); 690 - etnaviv_gpu_start_fe(gpu, etnaviv_cmdbuf_get_va(&gpu->buffer), 691 - prefetch); 690 + etnaviv_gpu_start_fe(gpu, etnaviv_cmdbuf_get_va(&gpu->buffer, 691 + &gpu->cmdbuf_mapping), prefetch); 692 692 } 693 693 694 694 int etnaviv_gpu_init(struct etnaviv_gpu *gpu) ··· 767 767 goto destroy_iommu; 768 768 } 769 769 770 + ret = etnaviv_cmdbuf_suballoc_map(gpu->cmdbuf_suballoc, gpu->mmu, 771 + &gpu->cmdbuf_mapping, 772 + gpu->memory_base); 773 + if (ret) { 774 + dev_err(gpu->dev, "failed to map cmdbuf suballoc\n"); 775 + goto destroy_suballoc; 776 + } 777 + 770 778 /* Create buffer: */ 771 779 ret = etnaviv_cmdbuf_init(gpu->cmdbuf_suballoc, &gpu->buffer, 772 780 PAGE_SIZE); 773 781 if (ret) { 774 782 dev_err(gpu->dev, "could not create command buffer\n"); 775 - goto destroy_suballoc; 783 + goto unmap_suballoc; 776 784 } 777 785 778 786 if (gpu->mmu->version == ETNAVIV_IOMMU_V1 && 779 - etnaviv_cmdbuf_get_va(&gpu->buffer) > 0x80000000) { 787 + etnaviv_cmdbuf_get_va(&gpu->buffer, &gpu->cmdbuf_mapping) > 0x80000000) { 780 788 ret = -EINVAL; 781 789 dev_err(gpu->dev, 782 790 "command buffer outside valid memory window\n"); ··· 813 805 814 806 free_buffer: 815 807 etnaviv_cmdbuf_free(&gpu->buffer); 808 + unmap_suballoc: 809 + etnaviv_cmdbuf_suballoc_unmap(gpu->mmu, &gpu->cmdbuf_mapping); 816 810 destroy_suballoc: 817 811 etnaviv_cmdbuf_suballoc_destroy(gpu->cmdbuf_suballoc); 818 812 destroy_iommu: ··· 1691 1681 1692 1682 if (gpu->initialized) { 1693 1683 etnaviv_cmdbuf_free(&gpu->buffer); 1684 + etnaviv_cmdbuf_suballoc_unmap(gpu->mmu, &gpu->cmdbuf_mapping); 1694 1685 etnaviv_cmdbuf_suballoc_destroy(gpu->cmdbuf_suballoc); 1695 1686 etnaviv_iommu_destroy(gpu->mmu); 1696 1687 gpu->initialized = false;
+2 -1
drivers/gpu/drm/etnaviv/etnaviv_gpu.h
··· 7 7 #define __ETNAVIV_GPU_H__ 8 8 9 9 #include "etnaviv_cmdbuf.h" 10 + #include "etnaviv_gem.h" 10 11 #include "etnaviv_drv.h" 11 12 12 13 struct etnaviv_gem_submit; ··· 85 84 }; 86 85 87 86 struct etnaviv_cmdbuf_suballoc; 88 - struct etnaviv_cmdbuf; 89 87 struct regulator; 90 88 struct clk; 91 89 ··· 102 102 bool initialized; 103 103 104 104 /* 'ring'-buffer: */ 105 + struct etnaviv_vram_mapping cmdbuf_mapping; 105 106 struct etnaviv_cmdbuf buffer; 106 107 int exec_state; 107 108
+48 -29
drivers/gpu/drm/etnaviv/etnaviv_mmu.c
··· 334 334 etnaviv_iommuv2_restore(gpu); 335 335 } 336 336 337 - int etnaviv_iommu_get_suballoc_va(struct etnaviv_gpu *gpu, dma_addr_t paddr, 338 - struct drm_mm_node *vram_node, size_t size, 339 - u32 *iova) 337 + int etnaviv_iommu_get_suballoc_va(struct etnaviv_iommu *mmu, 338 + struct etnaviv_vram_mapping *mapping, 339 + u32 memory_base, dma_addr_t paddr, 340 + size_t size) 340 341 { 341 - struct etnaviv_iommu *mmu = gpu->mmu; 342 + mutex_lock(&mmu->lock); 342 343 344 + /* 345 + * For MMUv1 we don't add the suballoc region to the pagetables, as 346 + * those GPUs can only work with cmdbufs accessed through the linear 347 + * window. Instead we manufacture a mapping to make it look uniform 348 + * to the upper layers. 349 + */ 343 350 if (mmu->version == ETNAVIV_IOMMU_V1) { 344 - *iova = paddr - gpu->memory_base; 345 - return 0; 351 + mapping->iova = paddr - memory_base; 346 352 } else { 353 + struct drm_mm_node *node = &mapping->vram_node; 347 354 int ret; 348 355 349 - mutex_lock(&mmu->lock); 350 - ret = etnaviv_iommu_find_iova(mmu, vram_node, size); 356 + ret = etnaviv_iommu_find_iova(mmu, node, size); 351 357 if (ret < 0) { 352 358 mutex_unlock(&mmu->lock); 353 359 return ret; 354 360 } 355 - ret = etnaviv_domain_map(mmu->domain, vram_node->start, paddr, 356 - size, ETNAVIV_PROT_READ); 357 - if (ret < 0) { 358 - drm_mm_remove_node(vram_node); 359 - mutex_unlock(&mmu->lock); 360 - return ret; 361 - } 362 - gpu->mmu->need_flush = true; 363 - mutex_unlock(&mmu->lock); 364 361 365 - *iova = (u32)vram_node->start; 366 - return 0; 362 + mapping->iova = node->start; 363 + ret = etnaviv_domain_map(mmu->domain, node->start, paddr, size, 364 + ETNAVIV_PROT_READ); 365 + 366 + if (ret < 0) { 367 + drm_mm_remove_node(node); 368 + mutex_unlock(&mmu->lock); 369 + return ret; 370 + } 371 + 372 + mmu->need_flush = true; 367 373 } 374 + 375 + list_add_tail(&mapping->mmu_node, &mmu->mappings); 376 + mapping->use = 1; 377 + 378 + mutex_unlock(&mmu->lock); 379 + 380 + return 0; 368 381 } 369 382 370 - void etnaviv_iommu_put_suballoc_va(struct etnaviv_gpu *gpu, 371 - struct drm_mm_node *vram_node, size_t size, 372 - u32 iova) 383 + void etnaviv_iommu_put_suballoc_va(struct etnaviv_iommu *mmu, 384 + struct etnaviv_vram_mapping *mapping) 373 385 { 374 - struct etnaviv_iommu *mmu = gpu->mmu; 386 + struct drm_mm_node *node = &mapping->vram_node; 375 387 376 - if (mmu->version == ETNAVIV_IOMMU_V2) { 377 - mutex_lock(&mmu->lock); 378 - etnaviv_domain_unmap(mmu->domain, iova, size); 379 - drm_mm_remove_node(vram_node); 380 - mutex_unlock(&mmu->lock); 381 - } 388 + if (!mapping->use) 389 + return; 390 + 391 + mapping->use = 0; 392 + 393 + if (mmu->version == ETNAVIV_IOMMU_V1) 394 + return; 395 + 396 + mutex_lock(&mmu->lock); 397 + etnaviv_domain_unmap(mmu->domain, node->start, node->size); 398 + drm_mm_remove_node(node); 399 + mutex_unlock(&mmu->lock); 382 400 } 401 + 383 402 size_t etnaviv_iommu_dump_size(struct etnaviv_iommu *iommu) 384 403 { 385 404 return iommu->domain->ops->dump_size(iommu->domain);
+6 -6
drivers/gpu/drm/etnaviv/etnaviv_mmu.h
··· 59 59 void etnaviv_iommu_unmap_gem(struct etnaviv_iommu *mmu, 60 60 struct etnaviv_vram_mapping *mapping); 61 61 62 - int etnaviv_iommu_get_suballoc_va(struct etnaviv_gpu *gpu, dma_addr_t paddr, 63 - struct drm_mm_node *vram_node, size_t size, 64 - u32 *iova); 65 - void etnaviv_iommu_put_suballoc_va(struct etnaviv_gpu *gpu, 66 - struct drm_mm_node *vram_node, size_t size, 67 - u32 iova); 62 + int etnaviv_iommu_get_suballoc_va(struct etnaviv_iommu *mmu, 63 + struct etnaviv_vram_mapping *mapping, 64 + u32 memory_base, dma_addr_t paddr, 65 + size_t size); 66 + void etnaviv_iommu_put_suballoc_va(struct etnaviv_iommu *mmu, 67 + struct etnaviv_vram_mapping *mapping); 68 68 69 69 size_t etnaviv_iommu_dump_size(struct etnaviv_iommu *iommu); 70 70 void etnaviv_iommu_dump(struct etnaviv_iommu *iommu, void *buf);