Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/amdgpu: Add mem_sync implementation for all the ASICs.

Implement the .mem_sync hook defined earlier.

v2: Rename functions

Signed-off-by: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
Reviewed-by: Luben Tuikov <luben.tuikov@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>

authored by

Andrey Grodzovsky and committed by
Alex Deucher
2f9ce2a3 22301177

+94 -5
+26 -1
drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
··· 8020 8020 return 0; 8021 8021 } 8022 8022 8023 + static void gfx_v10_0_emit_mem_sync(struct amdgpu_ring *ring) 8024 + { 8025 + const unsigned int gcr_cntl = 8026 + PACKET3_ACQUIRE_MEM_GCR_CNTL_GL2_INV(1) | 8027 + PACKET3_ACQUIRE_MEM_GCR_CNTL_GL2_WB(1) | 8028 + PACKET3_ACQUIRE_MEM_GCR_CNTL_GLM_INV(1) | 8029 + PACKET3_ACQUIRE_MEM_GCR_CNTL_GLM_WB(1) | 8030 + PACKET3_ACQUIRE_MEM_GCR_CNTL_GL1_INV(1) | 8031 + PACKET3_ACQUIRE_MEM_GCR_CNTL_GLV_INV(1) | 8032 + PACKET3_ACQUIRE_MEM_GCR_CNTL_GLK_INV(1) | 8033 + PACKET3_ACQUIRE_MEM_GCR_CNTL_GLI_INV(1); 8034 + 8035 + /* ACQUIRE_MEM - make one or more surfaces valid for use by the subsequent operations */ 8036 + amdgpu_ring_write(ring, PACKET3(PACKET3_ACQUIRE_MEM, 6)); 8037 + amdgpu_ring_write(ring, 0); /* CP_COHER_CNTL */ 8038 + amdgpu_ring_write(ring, 0xffffffff); /* CP_COHER_SIZE */ 8039 + amdgpu_ring_write(ring, 0xffffff); /* CP_COHER_SIZE_HI */ 8040 + amdgpu_ring_write(ring, 0); /* CP_COHER_BASE */ 8041 + amdgpu_ring_write(ring, 0); /* CP_COHER_BASE_HI */ 8042 + amdgpu_ring_write(ring, 0x0000000A); /* POLL_INTERVAL */ 8043 + amdgpu_ring_write(ring, gcr_cntl); /* GCR_CNTL */ 8044 + } 8045 + 8023 8046 static const struct amd_ip_funcs gfx_v10_0_ip_funcs = { 8024 8047 .name = "gfx_v10_0", 8025 8048 .early_init = gfx_v10_0_early_init, ··· 8090 8067 3 + /* CNTX_CTRL */ 8091 8068 5 + /* HDP_INVL */ 8092 8069 8 + 8 + /* FENCE x2 */ 8093 - 2, /* SWITCH_BUFFER */ 8070 + 2 + /* SWITCH_BUFFER */ 8071 + 8, /* gfx_v10_0_emit_mem_sync */ 8094 8072 .emit_ib_size = 4, /* gfx_v10_0_ring_emit_ib_gfx */ 8095 8073 .emit_ib = gfx_v10_0_ring_emit_ib_gfx, 8096 8074 .emit_fence = gfx_v10_0_ring_emit_fence, ··· 8113 8089 .emit_reg_wait = gfx_v10_0_ring_emit_reg_wait, 8114 8090 .emit_reg_write_reg_wait = gfx_v10_0_ring_emit_reg_write_reg_wait, 8115 8091 .soft_recovery = gfx_v10_0_ring_soft_recovery, 8092 + .emit_mem_sync = gfx_v10_0_emit_mem_sync, 8116 8093 }; 8117 8094 8118 8095 static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_compute = {
+15 -1
drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
··· 3465 3465 return 0; 3466 3466 } 3467 3467 3468 + static void gfx_v6_0_emit_mem_sync(struct amdgpu_ring *ring) 3469 + { 3470 + amdgpu_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3)); 3471 + amdgpu_ring_write(ring, PACKET3_TCL1_ACTION_ENA | 3472 + PACKET3_TC_ACTION_ENA | 3473 + PACKET3_SH_KCACHE_ACTION_ENA | 3474 + PACKET3_SH_ICACHE_ACTION_ENA); /* CP_COHER_CNTL */ 3475 + amdgpu_ring_write(ring, 0xffffffff); /* CP_COHER_SIZE */ 3476 + amdgpu_ring_write(ring, 0); /* CP_COHER_BASE */ 3477 + amdgpu_ring_write(ring, 0x0000000A); /* poll interval */ 3478 + } 3479 + 3468 3480 static const struct amd_ip_funcs gfx_v6_0_ip_funcs = { 3469 3481 .name = "gfx_v6_0", 3470 3482 .early_init = gfx_v6_0_early_init, ··· 3507 3495 14 + 14 + 14 + /* gfx_v6_0_ring_emit_fence x3 for user fence, vm fence */ 3508 3496 7 + 4 + /* gfx_v6_0_ring_emit_pipeline_sync */ 3509 3497 SI_FLUSH_GPU_TLB_NUM_WREG * 5 + 7 + 6 + /* gfx_v6_0_ring_emit_vm_flush */ 3510 - 3 + 2, /* gfx_v6_ring_emit_cntxcntl including vgt flush */ 3498 + 3 + 2 + /* gfx_v6_ring_emit_cntxcntl including vgt flush */ 3499 + 5, /* SURFACE_SYNC */ 3511 3500 .emit_ib_size = 6, /* gfx_v6_0_ring_emit_ib */ 3512 3501 .emit_ib = gfx_v6_0_ring_emit_ib, 3513 3502 .emit_fence = gfx_v6_0_ring_emit_fence, ··· 3519 3506 .insert_nop = amdgpu_ring_insert_nop, 3520 3507 .emit_cntxcntl = gfx_v6_ring_emit_cntxcntl, 3521 3508 .emit_wreg = gfx_v6_0_ring_emit_wreg, 3509 + .emit_mem_sync = gfx_v6_0_emit_mem_sync, 3522 3510 }; 3523 3511 3524 3512 static const struct amdgpu_ring_funcs gfx_v6_0_ring_funcs_compute = {
+15 -1
drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
··· 4998 4998 return 0; 4999 4999 } 5000 5000 5001 + static void gfx_v7_0_emit_mem_sync(struct amdgpu_ring *ring) 5002 + { 5003 + amdgpu_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3)); 5004 + amdgpu_ring_write(ring, PACKET3_TCL1_ACTION_ENA | 5005 + PACKET3_TC_ACTION_ENA | 5006 + PACKET3_SH_KCACHE_ACTION_ENA | 5007 + PACKET3_SH_ICACHE_ACTION_ENA); /* CP_COHER_CNTL */ 5008 + amdgpu_ring_write(ring, 0xffffffff); /* CP_COHER_SIZE */ 5009 + amdgpu_ring_write(ring, 0); /* CP_COHER_BASE */ 5010 + amdgpu_ring_write(ring, 0x0000000A); /* poll interval */ 5011 + } 5012 + 5001 5013 static const struct amd_ip_funcs gfx_v7_0_ip_funcs = { 5002 5014 .name = "gfx_v7_0", 5003 5015 .early_init = gfx_v7_0_early_init, ··· 5042 5030 12 + 12 + 12 + /* gfx_v7_0_ring_emit_fence_gfx x3 for user fence, vm fence */ 5043 5031 7 + 4 + /* gfx_v7_0_ring_emit_pipeline_sync */ 5044 5032 CIK_FLUSH_GPU_TLB_NUM_WREG * 5 + 7 + 6 + /* gfx_v7_0_ring_emit_vm_flush */ 5045 - 3 + 4, /* gfx_v7_ring_emit_cntxcntl including vgt flush*/ 5033 + 3 + 4 + /* gfx_v7_ring_emit_cntxcntl including vgt flush*/ 5034 + 5, /* SURFACE_SYNC */ 5046 5035 .emit_ib_size = 4, /* gfx_v7_0_ring_emit_ib_gfx */ 5047 5036 .emit_ib = gfx_v7_0_ring_emit_ib_gfx, 5048 5037 .emit_fence = gfx_v7_0_ring_emit_fence_gfx, ··· 5058 5045 .emit_cntxcntl = gfx_v7_ring_emit_cntxcntl, 5059 5046 .emit_wreg = gfx_v7_0_ring_emit_wreg, 5060 5047 .soft_recovery = gfx_v7_0_ring_soft_recovery, 5048 + .emit_mem_sync = gfx_v7_0_emit_mem_sync, 5061 5049 }; 5062 5050 5063 5051 static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = {
+16 -1
drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
··· 6817 6817 return 0; 6818 6818 } 6819 6819 6820 + static void gfx_v8_0_emit_mem_sync(struct amdgpu_ring *ring) 6821 + { 6822 + amdgpu_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3)); 6823 + amdgpu_ring_write(ring, PACKET3_TCL1_ACTION_ENA | 6824 + PACKET3_TC_ACTION_ENA | 6825 + PACKET3_SH_KCACHE_ACTION_ENA | 6826 + PACKET3_SH_ICACHE_ACTION_ENA | 6827 + PACKET3_TC_WB_ACTION_ENA); /* CP_COHER_CNTL */ 6828 + amdgpu_ring_write(ring, 0xffffffff); /* CP_COHER_SIZE */ 6829 + amdgpu_ring_write(ring, 0); /* CP_COHER_BASE */ 6830 + amdgpu_ring_write(ring, 0x0000000A); /* poll interval */ 6831 + } 6832 + 6820 6833 static const struct amd_ip_funcs gfx_v8_0_ip_funcs = { 6821 6834 .name = "gfx_v8_0", 6822 6835 .early_init = gfx_v8_0_early_init, ··· 6876 6863 3 + /* CNTX_CTRL */ 6877 6864 5 + /* HDP_INVL */ 6878 6865 12 + 12 + /* FENCE x2 */ 6879 - 2, /* SWITCH_BUFFER */ 6866 + 2 + /* SWITCH_BUFFER */ 6867 + 5, /* SURFACE_SYNC */ 6880 6868 .emit_ib_size = 4, /* gfx_v8_0_ring_emit_ib_gfx */ 6881 6869 .emit_ib = gfx_v8_0_ring_emit_ib_gfx, 6882 6870 .emit_fence = gfx_v8_0_ring_emit_fence_gfx, ··· 6895 6881 .patch_cond_exec = gfx_v8_0_ring_emit_patch_cond_exec, 6896 6882 .emit_wreg = gfx_v8_0_ring_emit_wreg, 6897 6883 .soft_recovery = gfx_v8_0_ring_soft_recovery, 6884 + .emit_mem_sync = gfx_v8_0_emit_mem_sync, 6898 6885 }; 6899 6886 6900 6887 static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {
+22 -1
drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
··· 6634 6634 return 0; 6635 6635 } 6636 6636 6637 + static void gfx_v9_0_emit_mem_sync(struct amdgpu_ring *ring) 6638 + { 6639 + const unsigned int cp_coher_cntl = 6640 + PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_SH_ICACHE_ACTION_ENA(1) | 6641 + PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_SH_KCACHE_ACTION_ENA(1) | 6642 + PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TC_ACTION_ENA(1) | 6643 + PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TCL1_ACTION_ENA(1) | 6644 + PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TC_WB_ACTION_ENA(1); 6645 + 6646 + /* ACQUIRE_MEM -make one or more surfaces valid for use by the subsequent operations */ 6647 + amdgpu_ring_write(ring, PACKET3(PACKET3_ACQUIRE_MEM, 5)); 6648 + amdgpu_ring_write(ring, cp_coher_cntl); /* CP_COHER_CNTL */ 6649 + amdgpu_ring_write(ring, 0xffffffff); /* CP_COHER_SIZE */ 6650 + amdgpu_ring_write(ring, 0xffffff); /* CP_COHER_SIZE_HI */ 6651 + amdgpu_ring_write(ring, 0); /* CP_COHER_BASE */ 6652 + amdgpu_ring_write(ring, 0); /* CP_COHER_BASE_HI */ 6653 + amdgpu_ring_write(ring, 0x0000000A); /* POLL_INTERVAL */ 6654 + } 6655 + 6637 6656 static const struct amd_ip_funcs gfx_v9_0_ip_funcs = { 6638 6657 .name = "gfx_v9_0", 6639 6658 .early_init = gfx_v9_0_early_init, ··· 6699 6680 3 + /* CNTX_CTRL */ 6700 6681 5 + /* HDP_INVL */ 6701 6682 8 + 8 + /* FENCE x2 */ 6702 - 2, /* SWITCH_BUFFER */ 6683 + 2 + /* SWITCH_BUFFER */ 6684 + 7, /* gfx_v9_0_emit_mem_sync */ 6703 6685 .emit_ib_size = 4, /* gfx_v9_0_ring_emit_ib_gfx */ 6704 6686 .emit_ib = gfx_v9_0_ring_emit_ib_gfx, 6705 6687 .emit_fence = gfx_v9_0_ring_emit_fence, ··· 6721 6701 .emit_reg_wait = gfx_v9_0_ring_emit_reg_wait, 6722 6702 .emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait, 6723 6703 .soft_recovery = gfx_v9_0_ring_soft_recovery, 6704 + .emit_mem_sync = gfx_v9_0_emit_mem_sync, 6724 6705 }; 6725 6706 6726 6707 static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = {