Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/etnaviv: reference MMU context when setting up hardware state

Move the refcount manipulation of the MMU context to the point where the
hardware state is programmed. At that point it is also known if a previous
MMU state is still there, or the state needs to be reprogrammed with a
potentially different context.

Cc: stable@vger.kernel.org # 5.4
Signed-off-by: Lucas Stach <l.stach@pengutronix.de>
Tested-by: Michael Walle <michael@walle.cc>
Tested-by: Marek Vasut <marex@denx.de>
Reviewed-by: Christian Gmeiner <christian.gmeiner@gmail.com>

+24 -12
+12 -12
drivers/gpu/drm/etnaviv/etnaviv_gpu.c
··· 647 647 gpu->fe_running = true; 648 648 } 649 649 650 - static void etnaviv_gpu_start_fe_idleloop(struct etnaviv_gpu *gpu) 650 + static void etnaviv_gpu_start_fe_idleloop(struct etnaviv_gpu *gpu, 651 + struct etnaviv_iommu_context *context) 651 652 { 652 - u32 address = etnaviv_cmdbuf_get_va(&gpu->buffer, 653 - &gpu->mmu_context->cmdbuf_mapping); 654 653 u16 prefetch; 654 + u32 address; 655 655 656 656 /* setup the MMU */ 657 - etnaviv_iommu_restore(gpu, gpu->mmu_context); 657 + etnaviv_iommu_restore(gpu, context); 658 658 659 659 /* Start command processor */ 660 660 prefetch = etnaviv_buffer_init(gpu); 661 + address = etnaviv_cmdbuf_get_va(&gpu->buffer, 662 + &gpu->mmu_context->cmdbuf_mapping); 661 663 662 664 etnaviv_gpu_start_fe(gpu, address, prefetch); 663 665 } ··· 1377 1375 goto out_unlock; 1378 1376 } 1379 1377 1380 - if (!gpu->fe_running) { 1381 - gpu->mmu_context = etnaviv_iommu_context_get(submit->mmu_context); 1382 - etnaviv_gpu_start_fe_idleloop(gpu); 1383 - } else { 1384 - if (submit->prev_mmu_context) 1385 - etnaviv_iommu_context_put(submit->prev_mmu_context); 1386 - submit->prev_mmu_context = etnaviv_iommu_context_get(gpu->mmu_context); 1387 - } 1378 + if (!gpu->fe_running) 1379 + etnaviv_gpu_start_fe_idleloop(gpu, submit->mmu_context); 1380 + 1381 + if (submit->prev_mmu_context) 1382 + etnaviv_iommu_context_put(submit->prev_mmu_context); 1383 + submit->prev_mmu_context = etnaviv_iommu_context_get(gpu->mmu_context); 1388 1384 1389 1385 if (submit->nr_pmrs) { 1390 1386 gpu->event[event[1]].sync_point = &sync_point_perfmon_sample_pre;
+4
drivers/gpu/drm/etnaviv/etnaviv_iommu.c
··· 92 92 struct etnaviv_iommuv1_context *v1_context = to_v1_context(context); 93 93 u32 pgtable; 94 94 95 + if (gpu->mmu_context) 96 + etnaviv_iommu_context_put(gpu->mmu_context); 97 + gpu->mmu_context = etnaviv_iommu_context_get(context); 98 + 95 99 /* set base addresses */ 96 100 gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_RA, context->global->memory_base); 97 101 gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_FE, context->global->memory_base);
+8
drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
··· 172 172 if (gpu_read(gpu, VIVS_MMUv2_CONTROL) & VIVS_MMUv2_CONTROL_ENABLE) 173 173 return; 174 174 175 + if (gpu->mmu_context) 176 + etnaviv_iommu_context_put(gpu->mmu_context); 177 + gpu->mmu_context = etnaviv_iommu_context_get(context); 178 + 175 179 prefetch = etnaviv_buffer_config_mmuv2(gpu, 176 180 (u32)v2_context->mtlb_dma, 177 181 (u32)context->global->bad_page_dma); ··· 195 191 /* If the MMU is already enabled the state is still there. */ 196 192 if (gpu_read(gpu, VIVS_MMUv2_SEC_CONTROL) & VIVS_MMUv2_SEC_CONTROL_ENABLE) 197 193 return; 194 + 195 + if (gpu->mmu_context) 196 + etnaviv_iommu_context_put(gpu->mmu_context); 197 + gpu->mmu_context = etnaviv_iommu_context_get(context); 198 198 199 199 gpu_write(gpu, VIVS_MMUv2_PTA_ADDRESS_LOW, 200 200 lower_32_bits(context->global->v2.pta_dma));