Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/amdgpu: add flags to emit_ib interface v2

Replace the last bool type parameter with a general flags parameter,
to make the last parameter be able to contain more information.

v2: drop setting need_ctx_switch = false

Reviewed-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com>
Signed-off-by: Jack Xiao <Jack.Xiao@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>

authored by

Jack Xiao and committed by
Alex Deucher
c4c905ec a7cd9771

+34 -34
+5 -5
drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
··· 202 202 amdgpu_asic_flush_hdp(adev, ring); 203 203 } 204 204 205 + if (need_ctx_switch) 206 + status |= AMDGPU_HAVE_CTX_SWITCH; 207 + 205 208 skip_preamble = ring->current_ctx == fence_ctx; 206 209 if (job && ring->funcs->emit_cntxcntl) { 207 - if (need_ctx_switch) 208 - status |= AMDGPU_HAVE_CTX_SWITCH; 209 210 status |= job->preamble_status; 210 - 211 211 amdgpu_ring_emit_cntxcntl(ring, status); 212 212 } 213 213 ··· 221 221 !amdgpu_sriov_vf(adev)) /* for SRIOV preemption, Preamble CE ib must be inserted anyway */ 222 222 continue; 223 223 224 - amdgpu_ring_emit_ib(ring, job, ib, need_ctx_switch); 225 - need_ctx_switch = false; 224 + amdgpu_ring_emit_ib(ring, job, ib, status); 225 + status &= ~AMDGPU_HAVE_CTX_SWITCH; 226 226 } 227 227 228 228 if (ring->funcs->emit_tmz)
+2 -2
drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
··· 131 131 void (*emit_ib)(struct amdgpu_ring *ring, 132 132 struct amdgpu_job *job, 133 133 struct amdgpu_ib *ib, 134 - bool ctx_switch); 134 + uint32_t flags); 135 135 void (*emit_fence)(struct amdgpu_ring *ring, uint64_t addr, 136 136 uint64_t seq, unsigned flags); 137 137 void (*emit_pipeline_sync)(struct amdgpu_ring *ring); ··· 229 229 #define amdgpu_ring_get_rptr(r) (r)->funcs->get_rptr((r)) 230 230 #define amdgpu_ring_get_wptr(r) (r)->funcs->get_wptr((r)) 231 231 #define amdgpu_ring_set_wptr(r) (r)->funcs->set_wptr((r)) 232 - #define amdgpu_ring_emit_ib(r, job, ib, c) ((r)->funcs->emit_ib((r), (job), (ib), (c))) 232 + #define amdgpu_ring_emit_ib(r, job, ib, flags) ((r)->funcs->emit_ib((r), (job), (ib), (flags))) 233 233 #define amdgpu_ring_emit_pipeline_sync(r) (r)->funcs->emit_pipeline_sync((r)) 234 234 #define amdgpu_ring_emit_vm_flush(r, vmid, addr) (r)->funcs->emit_vm_flush((r), (vmid), (addr)) 235 235 #define amdgpu_ring_emit_fence(r, addr, seq, flags) (r)->funcs->emit_fence((r), (addr), (seq), (flags))
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
··· 1035 1035 void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, 1036 1036 struct amdgpu_job *job, 1037 1037 struct amdgpu_ib *ib, 1038 - bool ctx_switch) 1038 + uint32_t flags) 1039 1039 { 1040 1040 amdgpu_ring_write(ring, VCE_CMD_IB); 1041 1041 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
··· 66 66 int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx); 67 67 int amdgpu_vce_ring_parse_cs_vm(struct amdgpu_cs_parser *p, uint32_t ib_idx); 68 68 void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_job *job, 69 - struct amdgpu_ib *ib, bool ctx_switch); 69 + struct amdgpu_ib *ib, uint32_t flags); 70 70 void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, 71 71 unsigned flags); 72 72 int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring);
+1 -1
drivers/gpu/drm/amd/amdgpu/cik_sdma.c
··· 220 220 static void cik_sdma_ring_emit_ib(struct amdgpu_ring *ring, 221 221 struct amdgpu_job *job, 222 222 struct amdgpu_ib *ib, 223 - bool ctx_switch) 223 + uint32_t flags) 224 224 { 225 225 unsigned vmid = AMDGPU_JOB_GET_VMID(job); 226 226 u32 extra_bits = vmid & 0xf;
+2 -2
drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
··· 1842 1842 static void gfx_v6_0_ring_emit_ib(struct amdgpu_ring *ring, 1843 1843 struct amdgpu_job *job, 1844 1844 struct amdgpu_ib *ib, 1845 - bool ctx_switch) 1845 + uint32_t flags) 1846 1846 { 1847 1847 unsigned vmid = AMDGPU_JOB_GET_VMID(job); 1848 1848 u32 header, control = 0; 1849 1849 1850 1850 /* insert SWITCH_BUFFER packet before first IB in the ring frame */ 1851 - if (ctx_switch) { 1851 + if (flags & AMDGPU_HAVE_CTX_SWITCH) { 1852 1852 amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0)); 1853 1853 amdgpu_ring_write(ring, 0); 1854 1854 }
+3 -3
drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
··· 2228 2228 static void gfx_v7_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, 2229 2229 struct amdgpu_job *job, 2230 2230 struct amdgpu_ib *ib, 2231 - bool ctx_switch) 2231 + uint32_t flags) 2232 2232 { 2233 2233 unsigned vmid = AMDGPU_JOB_GET_VMID(job); 2234 2234 u32 header, control = 0; 2235 2235 2236 2236 /* insert SWITCH_BUFFER packet before first IB in the ring frame */ 2237 - if (ctx_switch) { 2237 + if (flags & AMDGPU_HAVE_CTX_SWITCH) { 2238 2238 amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0)); 2239 2239 amdgpu_ring_write(ring, 0); 2240 2240 } ··· 2259 2259 static void gfx_v7_0_ring_emit_ib_compute(struct amdgpu_ring *ring, 2260 2260 struct amdgpu_job *job, 2261 2261 struct amdgpu_ib *ib, 2262 - bool ctx_switch) 2262 + uint32_t flags) 2263 2263 { 2264 2264 unsigned vmid = AMDGPU_JOB_GET_VMID(job); 2265 2265 u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
+2 -2
drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
··· 6047 6047 static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, 6048 6048 struct amdgpu_job *job, 6049 6049 struct amdgpu_ib *ib, 6050 - bool ctx_switch) 6050 + uint32_t flags) 6051 6051 { 6052 6052 unsigned vmid = AMDGPU_JOB_GET_VMID(job); 6053 6053 u32 header, control = 0; ··· 6079 6079 static void gfx_v8_0_ring_emit_ib_compute(struct amdgpu_ring *ring, 6080 6080 struct amdgpu_job *job, 6081 6081 struct amdgpu_ib *ib, 6082 - bool ctx_switch) 6082 + uint32_t flags) 6083 6083 { 6084 6084 unsigned vmid = AMDGPU_JOB_GET_VMID(job); 6085 6085 u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
+2 -2
drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
··· 3972 3972 static void gfx_v9_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, 3973 3973 struct amdgpu_job *job, 3974 3974 struct amdgpu_ib *ib, 3975 - bool ctx_switch) 3975 + uint32_t flags) 3976 3976 { 3977 3977 unsigned vmid = AMDGPU_JOB_GET_VMID(job); 3978 3978 u32 header, control = 0; ··· 4005 4005 static void gfx_v9_0_ring_emit_ib_compute(struct amdgpu_ring *ring, 4006 4006 struct amdgpu_job *job, 4007 4007 struct amdgpu_ib *ib, 4008 - bool ctx_switch) 4008 + uint32_t flags) 4009 4009 { 4010 4010 unsigned vmid = AMDGPU_JOB_GET_VMID(job); 4011 4011 u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vmid << 24);
+1 -1
drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
··· 247 247 static void sdma_v2_4_ring_emit_ib(struct amdgpu_ring *ring, 248 248 struct amdgpu_job *job, 249 249 struct amdgpu_ib *ib, 250 - bool ctx_switch) 250 + uint32_t flags) 251 251 { 252 252 unsigned vmid = AMDGPU_JOB_GET_VMID(job); 253 253
+1 -1
drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
··· 421 421 static void sdma_v3_0_ring_emit_ib(struct amdgpu_ring *ring, 422 422 struct amdgpu_job *job, 423 423 struct amdgpu_ib *ib, 424 - bool ctx_switch) 424 + uint32_t flags) 425 425 { 426 426 unsigned vmid = AMDGPU_JOB_GET_VMID(job); 427 427
+1 -1
drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
··· 500 500 static void sdma_v4_0_ring_emit_ib(struct amdgpu_ring *ring, 501 501 struct amdgpu_job *job, 502 502 struct amdgpu_ib *ib, 503 - bool ctx_switch) 503 + uint32_t flags) 504 504 { 505 505 unsigned vmid = AMDGPU_JOB_GET_VMID(job); 506 506
+1 -1
drivers/gpu/drm/amd/amdgpu/si_dma.c
··· 63 63 static void si_dma_ring_emit_ib(struct amdgpu_ring *ring, 64 64 struct amdgpu_job *job, 65 65 struct amdgpu_ib *ib, 66 - bool ctx_switch) 66 + uint32_t flags) 67 67 { 68 68 unsigned vmid = AMDGPU_JOB_GET_VMID(job); 69 69 /* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
+1 -1
drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
··· 511 511 static void uvd_v4_2_ring_emit_ib(struct amdgpu_ring *ring, 512 512 struct amdgpu_job *job, 513 513 struct amdgpu_ib *ib, 514 - bool ctx_switch) 514 + uint32_t flags) 515 515 { 516 516 amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_BASE, 0)); 517 517 amdgpu_ring_write(ring, ib->gpu_addr);
+1 -1
drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
··· 526 526 static void uvd_v5_0_ring_emit_ib(struct amdgpu_ring *ring, 527 527 struct amdgpu_job *job, 528 528 struct amdgpu_ib *ib, 529 - bool ctx_switch) 529 + uint32_t flags) 530 530 { 531 531 amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW, 0)); 532 532 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
+2 -2
drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
··· 977 977 static void uvd_v6_0_ring_emit_ib(struct amdgpu_ring *ring, 978 978 struct amdgpu_job *job, 979 979 struct amdgpu_ib *ib, 980 - bool ctx_switch) 980 + uint32_t flags) 981 981 { 982 982 unsigned vmid = AMDGPU_JOB_GET_VMID(job); 983 983 ··· 1003 1003 static void uvd_v6_0_enc_ring_emit_ib(struct amdgpu_ring *ring, 1004 1004 struct amdgpu_job *job, 1005 1005 struct amdgpu_ib *ib, 1006 - bool ctx_switch) 1006 + uint32_t flags) 1007 1007 { 1008 1008 unsigned vmid = AMDGPU_JOB_GET_VMID(job); 1009 1009
+2 -2
drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
··· 1272 1272 static void uvd_v7_0_ring_emit_ib(struct amdgpu_ring *ring, 1273 1273 struct amdgpu_job *job, 1274 1274 struct amdgpu_ib *ib, 1275 - bool ctx_switch) 1275 + uint32_t flags) 1276 1276 { 1277 1277 struct amdgpu_device *adev = ring->adev; 1278 1278 unsigned vmid = AMDGPU_JOB_GET_VMID(job); ··· 1303 1303 static void uvd_v7_0_enc_ring_emit_ib(struct amdgpu_ring *ring, 1304 1304 struct amdgpu_job *job, 1305 1305 struct amdgpu_ib *ib, 1306 - bool ctx_switch) 1306 + uint32_t flags) 1307 1307 { 1308 1308 unsigned vmid = AMDGPU_JOB_GET_VMID(job); 1309 1309
+1 -1
drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
··· 834 834 static void vce_v3_0_ring_emit_ib(struct amdgpu_ring *ring, 835 835 struct amdgpu_job *job, 836 836 struct amdgpu_ib *ib, 837 - bool ctx_switch) 837 + uint32_t flags) 838 838 { 839 839 unsigned vmid = AMDGPU_JOB_GET_VMID(job); 840 840
+1 -1
drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
··· 947 947 #endif 948 948 949 949 static void vce_v4_0_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_job *job, 950 - struct amdgpu_ib *ib, bool ctx_switch) 950 + struct amdgpu_ib *ib, uint32_t flags) 951 951 { 952 952 unsigned vmid = AMDGPU_JOB_GET_VMID(job); 953 953
+3 -3
drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
··· 1371 1371 static void vcn_v1_0_dec_ring_emit_ib(struct amdgpu_ring *ring, 1372 1372 struct amdgpu_job *job, 1373 1373 struct amdgpu_ib *ib, 1374 - bool ctx_switch) 1374 + uint32_t flags) 1375 1375 { 1376 1376 struct amdgpu_device *adev = ring->adev; 1377 1377 unsigned vmid = AMDGPU_JOB_GET_VMID(job); ··· 1531 1531 static void vcn_v1_0_enc_ring_emit_ib(struct amdgpu_ring *ring, 1532 1532 struct amdgpu_job *job, 1533 1533 struct amdgpu_ib *ib, 1534 - bool ctx_switch) 1534 + uint32_t flags) 1535 1535 { 1536 1536 unsigned vmid = AMDGPU_JOB_GET_VMID(job); 1537 1537 ··· 1736 1736 static void vcn_v1_0_jpeg_ring_emit_ib(struct amdgpu_ring *ring, 1737 1737 struct amdgpu_job *job, 1738 1738 struct amdgpu_ib *ib, 1739 - bool ctx_switch) 1739 + uint32_t flags) 1740 1740 { 1741 1741 struct amdgpu_device *adev = ring->adev; 1742 1742 unsigned vmid = AMDGPU_JOB_GET_VMID(job);