Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/amdgpu: implement gmc_v6_0_emit_flush_gpu_tlb

Unify tlb flushing for gmc v6.

Signed-off-by: Christian König <christian.koenig@amd.com>
Acked-by: Chunming Zhou <david1.zhou@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>

authored by

Christian König and committed by
Alex Deucher
4fef88bd 7ef11047

+29 -33
+4 -21
drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
··· 38 38 #include "dce/dce_6_0_sh_mask.h" 39 39 #include "gca/gfx_7_2_enum.h" 40 40 #include "si_enums.h" 41 + #include "si.h" 41 42 42 43 static void gfx_v6_0_set_ring_funcs(struct amdgpu_device *adev); 43 44 static void gfx_v6_0_set_irq_funcs(struct amdgpu_device *adev); ··· 2360 2359 { 2361 2360 int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX); 2362 2361 2363 - /* write new base address */ 2364 - amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); 2365 - amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) | 2366 - WRITE_DATA_DST_SEL(0))); 2367 - if (vmid < 8) { 2368 - amdgpu_ring_write(ring, (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vmid )); 2369 - } else { 2370 - amdgpu_ring_write(ring, (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + (vmid - 8))); 2371 - } 2372 - amdgpu_ring_write(ring, 0); 2373 - amdgpu_ring_write(ring, pd_addr >> 12); 2374 - 2375 - /* bits 0-15 are the VM contexts0-15 */ 2376 - amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); 2377 - amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) | 2378 - WRITE_DATA_DST_SEL(0))); 2379 - amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST); 2380 - amdgpu_ring_write(ring, 0); 2381 - amdgpu_ring_write(ring, 1 << vmid); 2362 + amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pasid, pd_addr); 2382 2363 2383 2364 /* wait for the invalidate to complete */ 2384 2365 amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5)); ··· 3511 3528 5 + /* gfx_v6_0_ring_emit_hdp_invalidate */ 3512 3529 14 + 14 + 14 + /* gfx_v6_0_ring_emit_fence x3 for user fence, vm fence */ 3513 3530 7 + 4 + /* gfx_v6_0_ring_emit_pipeline_sync */ 3514 - 17 + 6 + /* gfx_v6_0_ring_emit_vm_flush */ 3531 + SI_FLUSH_GPU_TLB_NUM_WREG * 5 + 7 + 6 + /* gfx_v6_0_ring_emit_vm_flush */ 3515 3532 3 + 2, /* gfx_v6_ring_emit_cntxcntl including vgt flush */ 3516 3533 .emit_ib_size = 6, /* gfx_v6_0_ring_emit_ib */ 3517 3534 .emit_ib = gfx_v6_0_ring_emit_ib, ··· 3538 3555 5 + /* gfx_v6_0_ring_emit_hdp_flush */ 3539 3556 5 + /* gfx_v6_0_ring_emit_hdp_invalidate */ 3540 3557 7 + /* gfx_v6_0_ring_emit_pipeline_sync */ 3541 - 17 + /* gfx_v6_0_ring_emit_vm_flush */ 3558 + SI_FLUSH_GPU_TLB_NUM_WREG * 5 + 7 + /* gfx_v6_0_ring_emit_vm_flush */ 3542 3559 14 + 14 + 14, /* gfx_v6_0_ring_emit_fence x3 for user fence, vm fence */ 3543 3560 .emit_ib_size = 6, /* gfx_v6_0_ring_emit_ib */ 3544 3561 .emit_ib = gfx_v6_0_ring_emit_ib,
+20
drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
··· 362 362 WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid); 363 363 } 364 364 365 + static uint64_t gmc_v6_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring, 366 + unsigned vmid, unsigned pasid, 367 + uint64_t pd_addr) 368 + { 369 + uint32_t reg; 370 + 371 + /* write new base address */ 372 + if (vmid < 8) 373 + reg = mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vmid; 374 + else 375 + reg = mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + (vmid - 8); 376 + amdgpu_ring_emit_wreg(ring, reg, pd_addr >> 12); 377 + 378 + /* bits 0-15 are the VM contexts0-15 */ 379 + amdgpu_ring_emit_wreg(ring, mmVM_INVALIDATE_REQUEST, 1 << vmid); 380 + 381 + return pd_addr; 382 + } 383 + 365 384 static int gmc_v6_0_set_pte_pde(struct amdgpu_device *adev, void *cpu_pt_addr, 366 385 uint32_t gpu_page_idx, uint64_t addr, 367 386 uint64_t flags) ··· 1145 1126 1146 1127 static const struct amdgpu_gmc_funcs gmc_v6_0_gmc_funcs = { 1147 1128 .flush_gpu_tlb = gmc_v6_0_flush_gpu_tlb, 1129 + .emit_flush_gpu_tlb = gmc_v6_0_emit_flush_gpu_tlb, 1148 1130 .set_pte_pde = gmc_v6_0_set_pte_pde, 1149 1131 .set_prt = gmc_v6_0_set_prt, 1150 1132 .get_vm_pde = gmc_v6_0_get_vm_pde,
+2
drivers/gpu/drm/amd/amdgpu/si.h
··· 24 24 #ifndef __SI_H__ 25 25 #define __SI_H__ 26 26 27 + #define SI_FLUSH_GPU_TLB_NUM_WREG 2 28 + 27 29 void si_srbm_select(struct amdgpu_device *adev, 28 30 u32 me, u32 pipe, u32 queue, u32 vmid); 29 31 int si_set_ip_blocks(struct amdgpu_device *adev);
+3 -12
drivers/gpu/drm/amd/amdgpu/si_dma.c
··· 24 24 #include <drm/drmP.h> 25 25 #include "amdgpu.h" 26 26 #include "amdgpu_trace.h" 27 + #include "si.h" 27 28 #include "sid.h" 28 29 29 30 const u32 sdma_offsets[SDMA_MAX_INSTANCE] = ··· 477 476 unsigned vmid, unsigned pasid, 478 477 uint64_t pd_addr) 479 478 { 480 - amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0)); 481 - if (vmid < 8) 482 - amdgpu_ring_write(ring, (0xf << 16) | (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vmid)); 483 - else 484 - amdgpu_ring_write(ring, (0xf << 16) | (VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + (vmid - 8))); 485 - amdgpu_ring_write(ring, pd_addr >> 12); 486 - 487 - /* bits 0-7 are the VM contexts0-7 */ 488 - amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0)); 489 - amdgpu_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST)); 490 - amdgpu_ring_write(ring, 1 << vmid); 479 + amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pasid, pd_addr); 491 480 492 481 /* wait for invalidate to complete */ 493 482 amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_POLL_REG_MEM, 0, 0, 0, 0)); ··· 775 784 3 + /* si_dma_ring_emit_hdp_flush */ 776 785 3 + /* si_dma_ring_emit_hdp_invalidate */ 777 786 6 + /* si_dma_ring_emit_pipeline_sync */ 778 - 12 + /* si_dma_ring_emit_vm_flush */ 787 + SI_FLUSH_GPU_TLB_NUM_WREG * 3 + 6 + /* si_dma_ring_emit_vm_flush */ 779 788 9 + 9 + 9, /* si_dma_ring_emit_fence x3 for user fence, vm fence */ 780 789 .emit_ib_size = 7 + 3, /* si_dma_ring_emit_ib */ 781 790 .emit_ib = si_dma_ring_emit_ib,