Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/etnaviv: move cmdbuf into submit object

Less dynamic allocations and slims down the cmdbuf object to only the
required information, as everything else is already available in the
submit object.

This also simplifies buffer and mappings lifetime management, as they
are now exlusively attached to the submit object and not additionally
to the cmdbuf.

Signed-off-by: Lucas Stach <l.stach@pengutronix.de>

+71 -100
+5 -5
drivers/gpu/drm/etnaviv/etnaviv_buffer.c
··· 166 166 167 167 u16 etnaviv_buffer_init(struct etnaviv_gpu *gpu) 168 168 { 169 - struct etnaviv_cmdbuf *buffer = gpu->buffer; 169 + struct etnaviv_cmdbuf *buffer = &gpu->buffer; 170 170 171 171 lockdep_assert_held(&gpu->lock); 172 172 ··· 182 182 183 183 u16 etnaviv_buffer_config_mmuv2(struct etnaviv_gpu *gpu, u32 mtlb_addr, u32 safe_addr) 184 184 { 185 - struct etnaviv_cmdbuf *buffer = gpu->buffer; 185 + struct etnaviv_cmdbuf *buffer = &gpu->buffer; 186 186 187 187 lockdep_assert_held(&gpu->lock); 188 188 ··· 217 217 218 218 void etnaviv_buffer_end(struct etnaviv_gpu *gpu) 219 219 { 220 - struct etnaviv_cmdbuf *buffer = gpu->buffer; 220 + struct etnaviv_cmdbuf *buffer = &gpu->buffer; 221 221 unsigned int waitlink_offset = buffer->user_size - 16; 222 222 u32 link_target, flush = 0; 223 223 ··· 261 261 /* Append a 'sync point' to the ring buffer. */ 262 262 void etnaviv_sync_point_queue(struct etnaviv_gpu *gpu, unsigned int event) 263 263 { 264 - struct etnaviv_cmdbuf *buffer = gpu->buffer; 264 + struct etnaviv_cmdbuf *buffer = &gpu->buffer; 265 265 unsigned int waitlink_offset = buffer->user_size - 16; 266 266 u32 dwords, target; 267 267 ··· 300 300 void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state, 301 301 unsigned int event, struct etnaviv_cmdbuf *cmdbuf) 302 302 { 303 - struct etnaviv_cmdbuf *buffer = gpu->buffer; 303 + struct etnaviv_cmdbuf *buffer = &gpu->buffer; 304 304 unsigned int waitlink_offset = buffer->user_size - 16; 305 305 u32 return_target, return_dwords; 306 306 u32 link_target, link_dwords;
+4 -13
drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c
··· 86 86 kfree(suballoc); 87 87 } 88 88 89 - struct etnaviv_cmdbuf * 90 - etnaviv_cmdbuf_new(struct etnaviv_cmdbuf_suballoc *suballoc, u32 size, 91 - size_t nr_bos) 89 + int etnaviv_cmdbuf_init(struct etnaviv_cmdbuf_suballoc *suballoc, 90 + struct etnaviv_cmdbuf *cmdbuf, u32 size) 92 91 { 93 - struct etnaviv_cmdbuf *cmdbuf; 94 - size_t sz = size_vstruct(nr_bos, sizeof(cmdbuf->bo_map[0]), 95 - sizeof(*cmdbuf)); 96 92 int granule_offs, order, ret; 97 - 98 - cmdbuf = kzalloc(sz, GFP_KERNEL); 99 - if (!cmdbuf) 100 - return NULL; 101 93 102 94 cmdbuf->suballoc = suballoc; 103 95 cmdbuf->size = size; ··· 108 116 if (!ret) { 109 117 dev_err(suballoc->gpu->dev, 110 118 "Timeout waiting for cmdbuf space\n"); 111 - return NULL; 119 + return -ETIMEDOUT; 112 120 } 113 121 goto retry; 114 122 } ··· 116 124 cmdbuf->suballoc_offset = granule_offs * SUBALLOC_GRANULE; 117 125 cmdbuf->vaddr = suballoc->vaddr + cmdbuf->suballoc_offset; 118 126 119 - return cmdbuf; 127 + return 0; 120 128 } 121 129 122 130 void etnaviv_cmdbuf_free(struct etnaviv_cmdbuf *cmdbuf) ··· 132 140 suballoc->free_space = 1; 133 141 mutex_unlock(&suballoc->lock); 134 142 wake_up_all(&suballoc->free_event); 135 - kfree(cmdbuf); 136 143 } 137 144 138 145 u32 etnaviv_cmdbuf_get_va(struct etnaviv_cmdbuf *buf)
+3 -10
drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.h
··· 33 33 void *vaddr; 34 34 u32 size; 35 35 u32 user_size; 36 - /* fence after which this buffer is to be disposed */ 37 - struct dma_fence *fence; 38 - /* per GPU in-flight list */ 39 - struct list_head node; 40 - /* BOs attached to this command buffer */ 41 - unsigned int nr_bos; 42 - struct etnaviv_vram_mapping *bo_map[0]; 43 36 }; 44 37 45 38 struct etnaviv_cmdbuf_suballoc * 46 39 etnaviv_cmdbuf_suballoc_new(struct etnaviv_gpu * gpu); 47 40 void etnaviv_cmdbuf_suballoc_destroy(struct etnaviv_cmdbuf_suballoc *suballoc); 48 41 49 - struct etnaviv_cmdbuf * 50 - etnaviv_cmdbuf_new(struct etnaviv_cmdbuf_suballoc *suballoc, u32 size, 51 - size_t nr_bos); 42 + 43 + int etnaviv_cmdbuf_init(struct etnaviv_cmdbuf_suballoc *suballoc, 44 + struct etnaviv_cmdbuf *cmdbuf, u32 size); 52 45 void etnaviv_cmdbuf_free(struct etnaviv_cmdbuf *cmdbuf); 53 46 54 47 u32 etnaviv_cmdbuf_get_va(struct etnaviv_cmdbuf *buf);
+1 -1
drivers/gpu/drm/etnaviv/etnaviv_drv.c
··· 172 172 173 173 static void etnaviv_buffer_dump(struct etnaviv_gpu *gpu, struct seq_file *m) 174 174 { 175 - struct etnaviv_cmdbuf *buf = gpu->buffer; 175 + struct etnaviv_cmdbuf *buf = &gpu->buffer; 176 176 u32 size = buf->size; 177 177 u32 *ptr = buf->vaddr; 178 178 u32 i;
+11 -10
drivers/gpu/drm/etnaviv/etnaviv_dump.c
··· 120 120 struct core_dump_iterator iter; 121 121 struct etnaviv_vram_mapping *vram; 122 122 struct etnaviv_gem_object *obj; 123 - struct etnaviv_cmdbuf *cmd; 123 + struct etnaviv_gem_submit *submit; 124 124 unsigned int n_obj, n_bomap_pages; 125 125 size_t file_size, mmu_size; 126 126 __le64 *bomap, *bomap_start; ··· 132 132 n_bomap_pages = 0; 133 133 file_size = ARRAY_SIZE(etnaviv_dump_registers) * 134 134 sizeof(struct etnaviv_dump_registers) + 135 - mmu_size + gpu->buffer->size; 135 + mmu_size + gpu->buffer.size; 136 136 137 137 /* Add in the active command buffers */ 138 - list_for_each_entry(cmd, &gpu->active_cmd_list, node) { 139 - file_size += cmd->size; 138 + list_for_each_entry(submit, &gpu->active_submit_list, node) { 139 + file_size += submit->cmdbuf.size; 140 140 n_obj++; 141 141 } 142 142 ··· 176 176 177 177 etnaviv_core_dump_registers(&iter, gpu); 178 178 etnaviv_core_dump_mmu(&iter, gpu, mmu_size); 179 - etnaviv_core_dump_mem(&iter, ETDUMP_BUF_RING, gpu->buffer->vaddr, 180 - gpu->buffer->size, 181 - etnaviv_cmdbuf_get_va(gpu->buffer)); 179 + etnaviv_core_dump_mem(&iter, ETDUMP_BUF_RING, gpu->buffer.vaddr, 180 + gpu->buffer.size, 181 + etnaviv_cmdbuf_get_va(&gpu->buffer)); 182 182 183 - list_for_each_entry(cmd, &gpu->active_cmd_list, node) 184 - etnaviv_core_dump_mem(&iter, ETDUMP_BUF_CMD, cmd->vaddr, 185 - cmd->size, etnaviv_cmdbuf_get_va(cmd)); 183 + list_for_each_entry(submit, &gpu->active_submit_list, node) 184 + etnaviv_core_dump_mem(&iter, ETDUMP_BUF_CMD, 185 + submit->cmdbuf.vaddr, submit->cmdbuf.size, 186 + etnaviv_cmdbuf_get_va(&submit->cmdbuf)); 186 187 187 188 /* Reserve space for the bomap */ 188 189 if (n_bomap_pages) {
+3
drivers/gpu/drm/etnaviv/etnaviv_gem.h
··· 18 18 #define __ETNAVIV_GEM_H__ 19 19 20 20 #include <linux/reservation.h> 21 + #include "etnaviv_cmdbuf.h" 21 22 #include "etnaviv_drv.h" 22 23 23 24 struct dma_fence; ··· 104 103 struct kref refcount; 105 104 struct etnaviv_gpu *gpu; 106 105 struct dma_fence *out_fence, *in_fence; 106 + struct list_head node; /* GPU active submit list */ 107 + struct etnaviv_cmdbuf cmdbuf; 107 108 u32 exec_state; 108 109 u32 flags; 109 110 unsigned int nr_pmrs;
+13 -15
drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
··· 354 354 container_of(kref, struct etnaviv_gem_submit, refcount); 355 355 unsigned i; 356 356 357 + if (submit->cmdbuf.suballoc) 358 + etnaviv_cmdbuf_free(&submit->cmdbuf); 359 + 357 360 for (i = 0; i < submit->nr_bos; i++) { 358 361 struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj; 359 362 ··· 394 391 struct drm_etnaviv_gem_submit_pmr *pmrs; 395 392 struct drm_etnaviv_gem_submit_bo *bos; 396 393 struct etnaviv_gem_submit *submit; 397 - struct etnaviv_cmdbuf *cmdbuf; 398 394 struct etnaviv_gpu *gpu; 399 395 struct sync_file *sync_file = NULL; 400 396 struct ww_acquire_ctx ticket; ··· 434 432 relocs = kvmalloc_array(args->nr_relocs, sizeof(*relocs), GFP_KERNEL); 435 433 pmrs = kvmalloc_array(args->nr_pmrs, sizeof(*pmrs), GFP_KERNEL); 436 434 stream = kvmalloc_array(1, args->stream_size, GFP_KERNEL); 437 - cmdbuf = etnaviv_cmdbuf_new(gpu->cmdbuf_suballoc, 438 - ALIGN(args->stream_size, 8) + 8, 439 - args->nr_bos); 440 - if (!bos || !relocs || !pmrs || !stream || !cmdbuf) { 435 + if (!bos || !relocs || !pmrs || !stream) { 441 436 ret = -ENOMEM; 442 437 goto err_submit_cmds; 443 438 } 444 - 445 - cmdbuf->ctx = file->driver_priv; 446 439 447 440 ret = copy_from_user(bos, u64_to_user_ptr(args->bos), 448 441 args->nr_bos * sizeof(*bos)); ··· 483 486 goto err_submit_ww_acquire; 484 487 } 485 488 489 + ret = etnaviv_cmdbuf_init(gpu->cmdbuf_suballoc, &submit->cmdbuf, 490 + ALIGN(args->stream_size, 8) + 8); 491 + if (ret) 492 + goto err_submit_objects; 493 + 494 + submit->cmdbuf.ctx = file->driver_priv; 486 495 submit->exec_state = args->exec_state; 487 496 submit->flags = args->flags; 488 497 ··· 531 528 if (ret) 532 529 goto err_submit_objects; 533 530 534 - memcpy(cmdbuf->vaddr, stream, args->stream_size); 535 - cmdbuf->user_size = ALIGN(args->stream_size, 8); 531 + memcpy(submit->cmdbuf.vaddr, stream, args->stream_size); 532 + submit->cmdbuf.user_size = ALIGN(args->stream_size, 8); 536 533 537 - ret = etnaviv_gpu_submit(gpu, submit, cmdbuf); 534 + ret = etnaviv_gpu_submit(gpu, submit); 538 535 if (ret) 539 536 goto err_submit_objects; 540 537 541 538 submit_attach_object_fences(submit); 542 - 543 - cmdbuf = NULL; 544 539 545 540 if (args->flags & ETNA_SUBMIT_FENCE_FD_OUT) { 546 541 /* ··· 567 566 err_submit_cmds: 568 567 if (ret && (out_fence_fd >= 0)) 569 568 put_unused_fd(out_fence_fd); 570 - /* if we still own the cmdbuf */ 571 - if (cmdbuf) 572 - etnaviv_cmdbuf_free(cmdbuf); 573 569 if (stream) 574 570 kvfree(stream); 575 571 if (bos)
+26 -42
drivers/gpu/drm/etnaviv/etnaviv_gpu.c
··· 644 644 prefetch = etnaviv_buffer_init(gpu); 645 645 646 646 gpu_write(gpu, VIVS_HI_INTR_ENBL, ~0U); 647 - etnaviv_gpu_start_fe(gpu, etnaviv_cmdbuf_get_va(gpu->buffer), 647 + etnaviv_gpu_start_fe(gpu, etnaviv_cmdbuf_get_va(&gpu->buffer), 648 648 prefetch); 649 649 } 650 650 ··· 717 717 } 718 718 719 719 /* Create buffer: */ 720 - gpu->buffer = etnaviv_cmdbuf_new(gpu->cmdbuf_suballoc, PAGE_SIZE, 0); 721 - if (!gpu->buffer) { 722 - ret = -ENOMEM; 720 + ret = etnaviv_cmdbuf_init(gpu->cmdbuf_suballoc, &gpu->buffer, 721 + PAGE_SIZE); 722 + if (ret) { 723 723 dev_err(gpu->dev, "could not create command buffer\n"); 724 724 goto destroy_iommu; 725 725 } 726 726 727 727 if (gpu->mmu->version == ETNAVIV_IOMMU_V1 && 728 - etnaviv_cmdbuf_get_va(gpu->buffer) > 0x80000000) { 728 + etnaviv_cmdbuf_get_va(&gpu->buffer) > 0x80000000) { 729 729 ret = -EINVAL; 730 730 dev_err(gpu->dev, 731 731 "command buffer outside valid memory window\n"); ··· 751 751 return 0; 752 752 753 753 free_buffer: 754 - etnaviv_cmdbuf_free(gpu->buffer); 755 - gpu->buffer = NULL; 754 + etnaviv_cmdbuf_free(&gpu->buffer); 756 755 destroy_iommu: 757 756 etnaviv_iommu_destroy(gpu->mmu); 758 757 gpu->mmu = NULL; ··· 1200 1201 struct etnaviv_gpu *gpu = container_of(work, struct etnaviv_gpu, 1201 1202 retire_work); 1202 1203 u32 fence = gpu->completed_fence; 1203 - struct etnaviv_cmdbuf *cmdbuf, *tmp; 1204 + struct etnaviv_gem_submit *submit, *tmp; 1204 1205 unsigned int i; 1205 1206 1206 1207 mutex_lock(&gpu->lock); 1207 - list_for_each_entry_safe(cmdbuf, tmp, &gpu->active_cmd_list, node) { 1208 - if (!dma_fence_is_signaled(cmdbuf->fence)) 1208 + list_for_each_entry_safe(submit, tmp, &gpu->active_submit_list, node) { 1209 + if (!dma_fence_is_signaled(submit->out_fence)) 1209 1210 break; 1210 1211 1211 - list_del(&cmdbuf->node); 1212 - dma_fence_put(cmdbuf->fence); 1212 + list_del(&submit->node); 1213 1213 1214 - for (i = 0; i < cmdbuf->nr_bos; i++) { 1215 - struct etnaviv_vram_mapping *mapping = cmdbuf->bo_map[i]; 1216 - struct etnaviv_gem_object *etnaviv_obj = mapping->object; 1214 + for (i = 0; i < submit->nr_bos; i++) 1215 + atomic_dec(&submit->bos[i].obj->gpu_active); 1217 1216 1218 - atomic_dec(&etnaviv_obj->gpu_active); 1219 - /* drop the refcount taken in etnaviv_gpu_submit */ 1220 - etnaviv_gem_mapping_unreference(mapping); 1221 - } 1222 - 1223 - etnaviv_cmdbuf_free(cmdbuf); 1217 + etnaviv_submit_put(submit); 1224 1218 /* 1225 1219 * We need to balance the runtime PM count caused by 1226 1220 * each submission. Upon submission, we increment ··· 1367 1375 1368 1376 /* add bo's to gpu's ring, and kick gpu: */ 1369 1377 int etnaviv_gpu_submit(struct etnaviv_gpu *gpu, 1370 - struct etnaviv_gem_submit *submit, struct etnaviv_cmdbuf *cmdbuf) 1378 + struct etnaviv_gem_submit *submit) 1371 1379 { 1372 - struct dma_fence *fence; 1373 1380 unsigned int i, nr_events = 1, event[3]; 1374 1381 int ret; 1375 1382 ··· 1394 1403 1395 1404 mutex_lock(&gpu->lock); 1396 1405 1397 - fence = etnaviv_gpu_fence_alloc(gpu); 1398 - if (!fence) { 1406 + submit->out_fence = etnaviv_gpu_fence_alloc(gpu); 1407 + if (!submit->out_fence) { 1399 1408 for (i = 0; i < nr_events; i++) 1400 1409 event_free(gpu, event[i]); 1401 1410 ··· 1403 1412 goto out_unlock; 1404 1413 } 1405 1414 1406 - gpu->event[event[0]].fence = fence; 1407 - submit->out_fence = dma_fence_get(fence); 1408 1415 gpu->active_fence = submit->out_fence->seqno; 1409 1416 1410 1417 if (submit->nr_pmrs) { ··· 1412 1423 etnaviv_sync_point_queue(gpu, event[1]); 1413 1424 } 1414 1425 1415 - etnaviv_buffer_queue(gpu, submit->exec_state, event[0], cmdbuf); 1426 + kref_get(&submit->refcount); 1427 + gpu->event[event[0]].fence = submit->out_fence; 1428 + etnaviv_buffer_queue(gpu, submit->exec_state, event[0], 1429 + &submit->cmdbuf); 1416 1430 1417 1431 if (submit->nr_pmrs) { 1418 1432 gpu->event[event[2]].sync_point = &sync_point_perfmon_sample_post; ··· 1424 1432 etnaviv_sync_point_queue(gpu, event[2]); 1425 1433 } 1426 1434 1427 - cmdbuf->fence = fence; 1428 - list_add_tail(&cmdbuf->node, &gpu->active_cmd_list); 1435 + list_add_tail(&submit->node, &gpu->active_submit_list); 1429 1436 1430 1437 /* We're committed to adding this command buffer, hold a PM reference */ 1431 1438 pm_runtime_get_noresume(gpu->dev); 1432 1439 1433 1440 for (i = 0; i < submit->nr_bos; i++) { 1434 1441 struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj; 1435 - 1436 - /* Each cmdbuf takes a refcount on the mapping */ 1437 - etnaviv_gem_mapping_reference(submit->bos[i].mapping); 1438 - cmdbuf->bo_map[i] = submit->bos[i].mapping; 1439 1442 atomic_inc(&etnaviv_obj->gpu_active); 1440 1443 } 1441 - cmdbuf->nr_bos = submit->nr_bos; 1442 1444 hangcheck_timer_reset(gpu); 1443 1445 ret = 0; 1444 1446 ··· 1611 1625 1612 1626 static int etnaviv_gpu_hw_suspend(struct etnaviv_gpu *gpu) 1613 1627 { 1614 - if (gpu->buffer) { 1628 + if (gpu->buffer.suballoc) { 1615 1629 /* Replace the last WAIT with END */ 1616 1630 mutex_lock(&gpu->lock); 1617 1631 etnaviv_buffer_end(gpu); ··· 1728 1742 gpu->fence_context = dma_fence_context_alloc(1); 1729 1743 spin_lock_init(&gpu->fence_spinlock); 1730 1744 1731 - INIT_LIST_HEAD(&gpu->active_cmd_list); 1745 + INIT_LIST_HEAD(&gpu->active_submit_list); 1732 1746 INIT_WORK(&gpu->retire_work, retire_worker); 1733 1747 INIT_WORK(&gpu->sync_point_work, sync_point_worker); 1734 1748 INIT_WORK(&gpu->recover_work, recover_worker); ··· 1763 1777 etnaviv_gpu_hw_suspend(gpu); 1764 1778 #endif 1765 1779 1766 - if (gpu->buffer) { 1767 - etnaviv_cmdbuf_free(gpu->buffer); 1768 - gpu->buffer = NULL; 1769 - } 1780 + if (gpu->buffer.suballoc) 1781 + etnaviv_cmdbuf_free(&gpu->buffer); 1770 1782 1771 1783 if (gpu->cmdbuf_suballoc) { 1772 1784 etnaviv_cmdbuf_suballoc_destroy(gpu->cmdbuf_suballoc); ··· 1902 1918 return ret; 1903 1919 1904 1920 /* Re-initialise the basic hardware state */ 1905 - if (gpu->drm && gpu->buffer) { 1921 + if (gpu->drm && gpu->buffer.suballoc) { 1906 1922 ret = etnaviv_gpu_hw_resume(gpu); 1907 1923 if (ret) { 1908 1924 etnaviv_gpu_clk_disable(gpu);
+4 -3
drivers/gpu/drm/etnaviv/etnaviv_gpu.h
··· 20 20 #include <linux/clk.h> 21 21 #include <linux/regulator/consumer.h> 22 22 23 + #include "etnaviv_cmdbuf.h" 23 24 #include "etnaviv_drv.h" 24 25 25 26 struct etnaviv_gem_submit; ··· 110 109 struct workqueue_struct *wq; 111 110 112 111 /* 'ring'-buffer: */ 113 - struct etnaviv_cmdbuf *buffer; 112 + struct etnaviv_cmdbuf buffer; 114 113 int exec_state; 115 114 116 115 /* bus base address of memory */ ··· 123 122 spinlock_t event_spinlock; 124 123 125 124 /* list of currently in-flight command buffers */ 126 - struct list_head active_cmd_list; 125 + struct list_head active_submit_list; 127 126 128 127 u32 idle_mask; 129 128 ··· 203 202 int etnaviv_gpu_wait_obj_inactive(struct etnaviv_gpu *gpu, 204 203 struct etnaviv_gem_object *etnaviv_obj, struct timespec *timeout); 205 204 int etnaviv_gpu_submit(struct etnaviv_gpu *gpu, 206 - struct etnaviv_gem_submit *submit, struct etnaviv_cmdbuf *cmdbuf); 205 + struct etnaviv_gem_submit *submit); 207 206 int etnaviv_gpu_pm_get_sync(struct etnaviv_gpu *gpu); 208 207 void etnaviv_gpu_pm_put(struct etnaviv_gpu *gpu); 209 208 int etnaviv_gpu_wait_idle(struct etnaviv_gpu *gpu, unsigned int timeout_ms);
+1 -1
drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
··· 229 229 prefetch = etnaviv_buffer_config_mmuv2(gpu, 230 230 (u32)etnaviv_domain->mtlb_dma, 231 231 (u32)etnaviv_domain->base.bad_page_dma); 232 - etnaviv_gpu_start_fe(gpu, (u32)etnaviv_cmdbuf_get_pa(gpu->buffer), 232 + etnaviv_gpu_start_fe(gpu, (u32)etnaviv_cmdbuf_get_pa(&gpu->buffer), 233 233 prefetch); 234 234 etnaviv_gpu_wait_idle(gpu, 100); 235 235