Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/amdgpu: Retire amdgpu_ring.ready flag v4

Start using drm_gpu_scheduler.ready isntead.

v3:
Add helper function to run ring test and set
sched.ready flag status accordingly, clean explicit
sched.ready sets from the IP specific files.

v4: Add kerneldoc and rebase.

Signed-off-by: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>

authored by

Andrey Grodzovsky and committed by
Alex Deucher
c66ed765 faf6e1a8

+129 -187
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
··· 144 144 KGD_MAX_QUEUES); 145 145 146 146 /* remove the KIQ bit as well */ 147 - if (adev->gfx.kiq.ring.ready) 147 + if (adev->gfx.kiq.ring.sched.ready) 148 148 clear_bit(amdgpu_gfx_queue_to_bit(adev, 149 149 adev->gfx.kiq.ring.me - 1, 150 150 adev->gfx.kiq.ring.pipe,
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
··· 786 786 if (adev->in_gpu_reset) 787 787 return -EIO; 788 788 789 - if (ring->ready) 789 + if (ring->sched.ready) 790 790 return invalidate_tlbs_with_kiq(adev, pasid); 791 791 792 792 for (vmid = 0; vmid < 16; vmid++) {
+3 -3
drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
··· 146 146 fence_ctx = 0; 147 147 } 148 148 149 - if (!ring->ready) { 149 + if (!ring->sched.ready) { 150 150 dev_err(adev->dev, "couldn't schedule ib on ring <%s>\n", ring->name); 151 151 return -EINVAL; 152 152 } ··· 351 351 struct amdgpu_ring *ring = adev->rings[i]; 352 352 long tmo; 353 353 354 - if (!ring || !ring->ready) 354 + if (!ring || !ring->sched.ready) 355 355 continue; 356 356 357 357 /* skip IB tests for KIQ in general for the below reasons: ··· 375 375 376 376 r = amdgpu_ring_test_ib(ring, tmo); 377 377 if (r) { 378 - ring->ready = false; 378 + ring->sched.ready = false; 379 379 380 380 if (ring == &adev->gfx.gfx_ring[0]) { 381 381 /* oh, oh, that's really bad */
+9 -9
drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
··· 336 336 case AMDGPU_HW_IP_GFX: 337 337 type = AMD_IP_BLOCK_TYPE_GFX; 338 338 for (i = 0; i < adev->gfx.num_gfx_rings; i++) 339 - if (adev->gfx.gfx_ring[i].ready) 339 + if (adev->gfx.gfx_ring[i].sched.ready) 340 340 ++num_rings; 341 341 ib_start_alignment = 32; 342 342 ib_size_alignment = 32; ··· 344 344 case AMDGPU_HW_IP_COMPUTE: 345 345 type = AMD_IP_BLOCK_TYPE_GFX; 346 346 for (i = 0; i < adev->gfx.num_compute_rings; i++) 347 - if (adev->gfx.compute_ring[i].ready) 347 + if (adev->gfx.compute_ring[i].sched.ready) 348 348 ++num_rings; 349 349 ib_start_alignment = 32; 350 350 ib_size_alignment = 32; ··· 352 352 case AMDGPU_HW_IP_DMA: 353 353 type = AMD_IP_BLOCK_TYPE_SDMA; 354 354 for (i = 0; i < adev->sdma.num_instances; i++) 355 - if (adev->sdma.instance[i].ring.ready) 355 + if (adev->sdma.instance[i].ring.sched.ready) 356 356 ++num_rings; 357 357 ib_start_alignment = 256; 358 358 ib_size_alignment = 4; ··· 363 363 if (adev->uvd.harvest_config & (1 << i)) 364 364 continue; 365 365 366 - if (adev->uvd.inst[i].ring.ready) 366 + if (adev->uvd.inst[i].ring.sched.ready) 367 367 ++num_rings; 368 368 } 369 369 ib_start_alignment = 64; ··· 372 372 case AMDGPU_HW_IP_VCE: 373 373 type = AMD_IP_BLOCK_TYPE_VCE; 374 374 for (i = 0; i < adev->vce.num_rings; i++) 375 - if (adev->vce.ring[i].ready) 375 + if (adev->vce.ring[i].sched.ready) 376 376 ++num_rings; 377 377 ib_start_alignment = 4; 378 378 ib_size_alignment = 1; ··· 384 384 continue; 385 385 386 386 for (j = 0; j < adev->uvd.num_enc_rings; j++) 387 - if (adev->uvd.inst[i].ring_enc[j].ready) 387 + if (adev->uvd.inst[i].ring_enc[j].sched.ready) 388 388 ++num_rings; 389 389 } 390 390 ib_start_alignment = 64; ··· 392 392 break; 393 393 case AMDGPU_HW_IP_VCN_DEC: 394 394 type = AMD_IP_BLOCK_TYPE_VCN; 395 - if (adev->vcn.ring_dec.ready) 395 + if (adev->vcn.ring_dec.sched.ready) 396 396 ++num_rings; 397 397 ib_start_alignment = 16; 398 398 ib_size_alignment = 16; ··· 400 400 case AMDGPU_HW_IP_VCN_ENC: 401 401 type = AMD_IP_BLOCK_TYPE_VCN; 402 402 for (i = 0; i < adev->vcn.num_enc_rings; i++) 403 - if (adev->vcn.ring_enc[i].ready) 403 + if (adev->vcn.ring_enc[i].sched.ready) 404 404 ++num_rings; 405 405 ib_start_alignment = 64; 406 406 ib_size_alignment = 1; 407 407 break; 408 408 case AMDGPU_HW_IP_VCN_JPEG: 409 409 type = AMD_IP_BLOCK_TYPE_VCN; 410 - if (adev->vcn.ring_jpeg.ready) 410 + if (adev->vcn.ring_jpeg.sched.ready) 411 411 ++num_rings; 412 412 ib_start_alignment = 16; 413 413 ib_size_alignment = 16;
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
··· 2129 2129 2130 2130 for (i = 0; i < AMDGPU_MAX_RINGS; i++) { 2131 2131 struct amdgpu_ring *ring = adev->rings[i]; 2132 - if (ring && ring->ready) 2132 + if (ring && ring->sched.ready) 2133 2133 amdgpu_fence_wait_empty(ring); 2134 2134 } 2135 2135
+21 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
··· 338 338 */ 339 339 void amdgpu_ring_fini(struct amdgpu_ring *ring) 340 340 { 341 - ring->ready = false; 341 + ring->sched.ready = false; 342 342 343 343 /* Not to finish a ring which is not initialized */ 344 344 if (!(ring->adev) || !(ring->adev->rings[ring->idx])) ··· 499 499 #if defined(CONFIG_DEBUG_FS) 500 500 debugfs_remove(ring->ent); 501 501 #endif 502 + } 503 + 504 + /** 505 + * amdgpu_ring_test_helper - tests ring and set sched readiness status 506 + * 507 + * @ring: ring to try the recovery on 508 + * 509 + * Tests ring and set sched readiness status 510 + * 511 + * Returns 0 on success, error on failure. 512 + */ 513 + int amdgpu_ring_test_helper(struct amdgpu_ring *ring) 514 + { 515 + int r; 516 + 517 + r = amdgpu_ring_test_ring(ring); 518 + 519 + ring->sched.ready = !r; 520 + 521 + return r; 502 522 }
+2 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
··· 189 189 uint64_t gpu_addr; 190 190 uint64_t ptr_mask; 191 191 uint32_t buf_mask; 192 - bool ready; 193 192 u32 idx; 194 193 u32 me; 195 194 u32 pipe; ··· 311 312 ring->wptr &= ring->ptr_mask; 312 313 ring->count_dw -= count_dw; 313 314 } 315 + 316 + int amdgpu_ring_test_helper(struct amdgpu_ring *ring); 314 317 315 318 #endif
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
··· 1970 1970 unsigned i; 1971 1971 int r; 1972 1972 1973 - if (direct_submit && !ring->ready) { 1973 + if (direct_submit && !ring->sched.ready) { 1974 1974 DRM_ERROR("Trying to move memory with ring turned off.\n"); 1975 1975 return -EINVAL; 1976 1976 }
+5 -7
drivers/gpu/drm/amd/amdgpu/cik_sdma.c
··· 316 316 WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl); 317 317 WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], 0); 318 318 } 319 - sdma0->ready = false; 320 - sdma1->ready = false; 319 + sdma0->sched.ready = false; 320 + sdma1->sched.ready = false; 321 321 } 322 322 323 323 /** ··· 494 494 /* enable DMA IBs */ 495 495 WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl); 496 496 497 - ring->ready = true; 497 + ring->sched.ready = true; 498 498 } 499 499 500 500 cik_sdma_enable(adev, true); 501 501 502 502 for (i = 0; i < adev->sdma.num_instances; i++) { 503 503 ring = &adev->sdma.instance[i].ring; 504 - r = amdgpu_ring_test_ring(ring); 505 - if (r) { 506 - ring->ready = false; 504 + r = amdgpu_ring_test_helper(ring); 505 + if (r) 507 506 return r; 508 - } 509 507 510 508 if (adev->mman.buffer_funcs_ring == ring) 511 509 amdgpu_ttm_set_buffer_funcs_status(adev, true);
+5 -11
drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
··· 1950 1950 CP_ME_CNTL__CE_HALT_MASK)); 1951 1951 WREG32(mmSCRATCH_UMSK, 0); 1952 1952 for (i = 0; i < adev->gfx.num_gfx_rings; i++) 1953 - adev->gfx.gfx_ring[i].ready = false; 1953 + adev->gfx.gfx_ring[i].sched.ready = false; 1954 1954 for (i = 0; i < adev->gfx.num_compute_rings; i++) 1955 - adev->gfx.compute_ring[i].ready = false; 1955 + adev->gfx.compute_ring[i].sched.ready = false; 1956 1956 } 1957 1957 udelay(50); 1958 1958 } ··· 2124 2124 2125 2125 /* start the rings */ 2126 2126 gfx_v6_0_cp_gfx_start(adev); 2127 - ring->ready = true; 2128 - r = amdgpu_ring_test_ring(ring); 2129 - if (r) { 2130 - ring->ready = false; 2127 + r = amdgpu_ring_test_helper(ring); 2128 + if (r) 2131 2129 return r; 2132 - } 2133 2130 2134 2131 return 0; 2135 2132 } ··· 2224 2227 WREG32(mmCP_RB2_CNTL, tmp); 2225 2228 WREG32(mmCP_RB2_BASE, ring->gpu_addr >> 8); 2226 2229 2227 - adev->gfx.compute_ring[0].ready = false; 2228 - adev->gfx.compute_ring[1].ready = false; 2229 2230 2230 2231 for (i = 0; i < 2; i++) { 2231 - r = amdgpu_ring_test_ring(&adev->gfx.compute_ring[i]); 2232 + r = amdgpu_ring_test_helper(&adev->gfx.compute_ring[i]); 2232 2233 if (r) 2233 2234 return r; 2234 - adev->gfx.compute_ring[i].ready = true; 2235 2235 } 2236 2236 2237 2237 return 0;
+5 -11
drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
··· 2403 2403 } else { 2404 2404 WREG32(mmCP_ME_CNTL, (CP_ME_CNTL__ME_HALT_MASK | CP_ME_CNTL__PFP_HALT_MASK | CP_ME_CNTL__CE_HALT_MASK)); 2405 2405 for (i = 0; i < adev->gfx.num_gfx_rings; i++) 2406 - adev->gfx.gfx_ring[i].ready = false; 2406 + adev->gfx.gfx_ring[i].sched.ready = false; 2407 2407 } 2408 2408 udelay(50); 2409 2409 } ··· 2613 2613 2614 2614 /* start the ring */ 2615 2615 gfx_v7_0_cp_gfx_start(adev); 2616 - ring->ready = true; 2617 - r = amdgpu_ring_test_ring(ring); 2618 - if (r) { 2619 - ring->ready = false; 2616 + r = amdgpu_ring_test_helper(ring); 2617 + if (r) 2620 2618 return r; 2621 - } 2622 2619 2623 2620 return 0; 2624 2621 } ··· 2672 2675 } else { 2673 2676 WREG32(mmCP_MEC_CNTL, (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK)); 2674 2677 for (i = 0; i < adev->gfx.num_compute_rings; i++) 2675 - adev->gfx.compute_ring[i].ready = false; 2678 + adev->gfx.compute_ring[i].sched.ready = false; 2676 2679 } 2677 2680 udelay(50); 2678 2681 } ··· 3103 3106 3104 3107 for (i = 0; i < adev->gfx.num_compute_rings; i++) { 3105 3108 ring = &adev->gfx.compute_ring[i]; 3106 - ring->ready = true; 3107 - r = amdgpu_ring_test_ring(ring); 3108 - if (r) 3109 - ring->ready = false; 3109 + amdgpu_ring_test_helper(ring); 3110 3110 } 3111 3111 3112 3112 return 0;
+11 -18
drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
··· 1629 1629 return 0; 1630 1630 1631 1631 /* bail if the compute ring is not ready */ 1632 - if (!ring->ready) 1632 + if (!ring->sched.ready) 1633 1633 return 0; 1634 1634 1635 1635 tmp = RREG32(mmGB_EDC_MODE); ··· 4197 4197 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, 1); 4198 4198 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, 1); 4199 4199 for (i = 0; i < adev->gfx.num_gfx_rings; i++) 4200 - adev->gfx.gfx_ring[i].ready = false; 4200 + adev->gfx.gfx_ring[i].sched.ready = false; 4201 4201 } 4202 4202 WREG32(mmCP_ME_CNTL, tmp); 4203 4203 udelay(50); ··· 4379 4379 /* start the ring */ 4380 4380 amdgpu_ring_clear_ring(ring); 4381 4381 gfx_v8_0_cp_gfx_start(adev); 4382 - ring->ready = true; 4383 - r = amdgpu_ring_test_ring(ring); 4384 - if (r) 4385 - ring->ready = false; 4382 + ring->sched.ready = true; 4383 + r = amdgpu_ring_test_helper(ring); 4386 4384 4387 4385 return r; 4388 4386 } ··· 4394 4396 } else { 4395 4397 WREG32(mmCP_MEC_CNTL, (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK)); 4396 4398 for (i = 0; i < adev->gfx.num_compute_rings; i++) 4397 - adev->gfx.compute_ring[i].ready = false; 4398 - adev->gfx.kiq.ring.ready = false; 4399 + adev->gfx.compute_ring[i].sched.ready = false; 4400 + adev->gfx.kiq.ring.sched.ready = false; 4399 4401 } 4400 4402 udelay(50); 4401 4403 } ··· 4471 4473 amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr)); 4472 4474 } 4473 4475 4474 - r = amdgpu_ring_test_ring(kiq_ring); 4475 - if (r) { 4476 + r = amdgpu_ring_test_helper(kiq_ring); 4477 + if (r) 4476 4478 DRM_ERROR("KCQ enable failed\n"); 4477 - kiq_ring->ready = false; 4478 - } 4479 4479 return r; 4480 4480 } 4481 4481 ··· 4777 4781 amdgpu_bo_kunmap(ring->mqd_obj); 4778 4782 ring->mqd_ptr = NULL; 4779 4783 amdgpu_bo_unreserve(ring->mqd_obj); 4780 - ring->ready = true; 4784 + ring->sched.ready = true; 4781 4785 return 0; 4782 4786 } 4783 4787 ··· 4816 4820 */ 4817 4821 for (i = adev->gfx.num_compute_rings - 1; i >= 0; i--) { 4818 4822 ring = &adev->gfx.compute_ring[i]; 4819 - ring->ready = true; 4820 - r = amdgpu_ring_test_ring(ring); 4821 - if (r) 4822 - ring->ready = false; 4823 + r = amdgpu_ring_test_helper(ring); 4823 4824 } 4824 4825 4825 4826 done: ··· 4892 4899 amdgpu_ring_write(kiq_ring, 0); 4893 4900 amdgpu_ring_write(kiq_ring, 0); 4894 4901 } 4895 - r = amdgpu_ring_test_ring(kiq_ring); 4902 + r = amdgpu_ring_test_helper(kiq_ring); 4896 4903 if (r) 4897 4904 DRM_ERROR("KCQ disable failed\n"); 4898 4905
+11 -19
drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
··· 2537 2537 tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, enable ? 0 : 1); 2538 2538 if (!enable) { 2539 2539 for (i = 0; i < adev->gfx.num_gfx_rings; i++) 2540 - adev->gfx.gfx_ring[i].ready = false; 2540 + adev->gfx.gfx_ring[i].sched.ready = false; 2541 2541 } 2542 2542 WREG32_SOC15(GC, 0, mmCP_ME_CNTL, tmp); 2543 2543 udelay(50); ··· 2727 2727 2728 2728 /* start the ring */ 2729 2729 gfx_v9_0_cp_gfx_start(adev); 2730 - ring->ready = true; 2730 + ring->sched.ready = true; 2731 2731 2732 2732 return 0; 2733 2733 } ··· 2742 2742 WREG32_SOC15(GC, 0, mmCP_MEC_CNTL, 2743 2743 (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK)); 2744 2744 for (i = 0; i < adev->gfx.num_compute_rings; i++) 2745 - adev->gfx.compute_ring[i].ready = false; 2746 - adev->gfx.kiq.ring.ready = false; 2745 + adev->gfx.compute_ring[i].sched.ready = false; 2746 + adev->gfx.kiq.ring.sched.ready = false; 2747 2747 } 2748 2748 udelay(50); 2749 2749 } ··· 2866 2866 amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr)); 2867 2867 } 2868 2868 2869 - r = amdgpu_ring_test_ring(kiq_ring); 2870 - if (r) { 2869 + r = amdgpu_ring_test_helper(kiq_ring); 2870 + if (r) 2871 2871 DRM_ERROR("KCQ enable failed\n"); 2872 - kiq_ring->ready = false; 2873 - } 2874 2872 2875 2873 return r; 2876 2874 } ··· 3247 3249 amdgpu_bo_kunmap(ring->mqd_obj); 3248 3250 ring->mqd_ptr = NULL; 3249 3251 amdgpu_bo_unreserve(ring->mqd_obj); 3250 - ring->ready = true; 3252 + ring->sched.ready = true; 3251 3253 return 0; 3252 3254 } 3253 3255 ··· 3312 3314 return r; 3313 3315 3314 3316 ring = &adev->gfx.gfx_ring[0]; 3315 - r = amdgpu_ring_test_ring(ring); 3316 - if (r) { 3317 - ring->ready = false; 3317 + r = amdgpu_ring_test_helper(ring); 3318 + if (r) 3318 3319 return r; 3319 - } 3320 3320 3321 3321 for (i = 0; i < adev->gfx.num_compute_rings; i++) { 3322 3322 ring = &adev->gfx.compute_ring[i]; 3323 - 3324 - ring->ready = true; 3325 - r = amdgpu_ring_test_ring(ring); 3326 - if (r) 3327 - ring->ready = false; 3323 + amdgpu_ring_test_helper(ring); 3328 3324 } 3329 3325 3330 3326 gfx_v9_0_enable_gui_idle_interrupt(adev, true); ··· 3383 3391 amdgpu_ring_write(kiq_ring, 0); 3384 3392 amdgpu_ring_write(kiq_ring, 0); 3385 3393 } 3386 - r = amdgpu_ring_test_ring(kiq_ring); 3394 + r = amdgpu_ring_test_helper(kiq_ring); 3387 3395 if (r) 3388 3396 DRM_ERROR("KCQ disable failed\n"); 3389 3397
+1 -1
drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
··· 381 381 struct amdgpu_vmhub *hub = &adev->vmhub[i]; 382 382 u32 tmp = gmc_v9_0_get_invalidate_req(vmid, flush_type); 383 383 384 - if (adev->gfx.kiq.ring.ready && 384 + if (adev->gfx.kiq.ring.sched.ready && 385 385 (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev)) && 386 386 !adev->in_gpu_reset) { 387 387 r = amdgpu_kiq_reg_write_reg_wait(adev, hub->vm_inv_eng0_req + eng,
+5 -7
drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
··· 349 349 ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0); 350 350 WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl); 351 351 } 352 - sdma0->ready = false; 353 - sdma1->ready = false; 352 + sdma0->sched.ready = false; 353 + sdma1->sched.ready = false; 354 354 } 355 355 356 356 /** ··· 471 471 /* enable DMA IBs */ 472 472 WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl); 473 473 474 - ring->ready = true; 474 + ring->sched.ready = true; 475 475 } 476 476 477 477 sdma_v2_4_enable(adev, true); 478 478 for (i = 0; i < adev->sdma.num_instances; i++) { 479 479 ring = &adev->sdma.instance[i].ring; 480 - r = amdgpu_ring_test_ring(ring); 481 - if (r) { 482 - ring->ready = false; 480 + r = amdgpu_ring_test_helper(ring); 481 + if (r) 483 482 return r; 484 - } 485 483 486 484 if (adev->mman.buffer_funcs_ring == ring) 487 485 amdgpu_ttm_set_buffer_funcs_status(adev, true);
+5 -7
drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
··· 523 523 ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0); 524 524 WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl); 525 525 } 526 - sdma0->ready = false; 527 - sdma1->ready = false; 526 + sdma0->sched.ready = false; 527 + sdma1->sched.ready = false; 528 528 } 529 529 530 530 /** ··· 739 739 /* enable DMA IBs */ 740 740 WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl); 741 741 742 - ring->ready = true; 742 + ring->sched.ready = true; 743 743 } 744 744 745 745 /* unhalt the MEs */ ··· 749 749 750 750 for (i = 0; i < adev->sdma.num_instances; i++) { 751 751 ring = &adev->sdma.instance[i].ring; 752 - r = amdgpu_ring_test_ring(ring); 753 - if (r) { 754 - ring->ready = false; 752 + r = amdgpu_ring_test_helper(ring); 753 + if (r) 755 754 return r; 756 - } 757 755 758 756 if (adev->mman.buffer_funcs_ring == ring) 759 757 amdgpu_ttm_set_buffer_funcs_status(adev, true);
+10 -14
drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
··· 634 634 WREG32_SDMA(i, mmSDMA0_GFX_IB_CNTL, ib_cntl); 635 635 } 636 636 637 - sdma0->ready = false; 638 - sdma1->ready = false; 637 + sdma0->sched.ready = false; 638 + sdma1->sched.ready = false; 639 639 } 640 640 641 641 /** ··· 675 675 WREG32_SDMA(i, mmSDMA0_PAGE_IB_CNTL, ib_cntl); 676 676 } 677 677 678 - sdma0->ready = false; 679 - sdma1->ready = false; 678 + sdma0->sched.ready = false; 679 + sdma1->sched.ready = false; 680 680 } 681 681 682 682 /** ··· 863 863 /* enable DMA IBs */ 864 864 WREG32_SDMA(i, mmSDMA0_GFX_IB_CNTL, ib_cntl); 865 865 866 - ring->ready = true; 866 + ring->sched.ready = true; 867 867 } 868 868 869 869 /** ··· 956 956 /* enable DMA IBs */ 957 957 WREG32_SDMA(i, mmSDMA0_PAGE_IB_CNTL, ib_cntl); 958 958 959 - ring->ready = true; 959 + ring->sched.ready = true; 960 960 } 961 961 962 962 static void ··· 1144 1144 for (i = 0; i < adev->sdma.num_instances; i++) { 1145 1145 ring = &adev->sdma.instance[i].ring; 1146 1146 1147 - r = amdgpu_ring_test_ring(ring); 1148 - if (r) { 1149 - ring->ready = false; 1147 + r = amdgpu_ring_test_helper(ring); 1148 + if (r) 1150 1149 return r; 1151 - } 1152 1150 1153 1151 if (adev->sdma.has_page_queue) { 1154 1152 struct amdgpu_ring *page = &adev->sdma.instance[i].page; 1155 1153 1156 - r = amdgpu_ring_test_ring(page); 1157 - if (r) { 1158 - page->ready = false; 1154 + r = amdgpu_ring_test_helper(page); 1155 + if (r) 1159 1156 return r; 1160 - } 1161 1157 } 1162 1158 1163 1159 if (adev->mman.buffer_funcs_ring == ring)
+4 -6
drivers/gpu/drm/amd/amdgpu/si_dma.c
··· 122 122 123 123 if (adev->mman.buffer_funcs_ring == ring) 124 124 amdgpu_ttm_set_buffer_funcs_status(adev, false); 125 - ring->ready = false; 125 + ring->sched.ready = false; 126 126 } 127 127 } 128 128 ··· 175 175 WREG32(DMA_RB_WPTR + sdma_offsets[i], lower_32_bits(ring->wptr) << 2); 176 176 WREG32(DMA_RB_CNTL + sdma_offsets[i], rb_cntl | DMA_RB_ENABLE); 177 177 178 - ring->ready = true; 178 + ring->sched.ready = true; 179 179 180 - r = amdgpu_ring_test_ring(ring); 181 - if (r) { 182 - ring->ready = false; 180 + r = amdgpu_ring_test_helper(ring); 181 + if (r) 183 182 return r; 184 - } 185 183 186 184 if (adev->mman.buffer_funcs_ring == ring) 187 185 amdgpu_ttm_set_buffer_funcs_status(adev, true);
+3 -6
drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
··· 162 162 uvd_v4_2_enable_mgcg(adev, true); 163 163 amdgpu_asic_set_uvd_clocks(adev, 10000, 10000); 164 164 165 - ring->ready = true; 166 - r = amdgpu_ring_test_ring(ring); 167 - if (r) { 168 - ring->ready = false; 165 + r = amdgpu_ring_test_helper(ring); 166 + if (r) 169 167 goto done; 170 - } 171 168 172 169 r = amdgpu_ring_alloc(ring, 10); 173 170 if (r) { ··· 215 218 if (RREG32(mmUVD_STATUS) != 0) 216 219 uvd_v4_2_stop(adev); 217 220 218 - ring->ready = false; 221 + ring->sched.ready = false; 219 222 220 223 return 0; 221 224 }
+3 -6
drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
··· 158 158 uvd_v5_0_set_clockgating_state(adev, AMD_CG_STATE_UNGATE); 159 159 uvd_v5_0_enable_mgcg(adev, true); 160 160 161 - ring->ready = true; 162 - r = amdgpu_ring_test_ring(ring); 163 - if (r) { 164 - ring->ready = false; 161 + r = amdgpu_ring_test_helper(ring); 162 + if (r) 165 163 goto done; 166 - } 167 164 168 165 r = amdgpu_ring_alloc(ring, 10); 169 166 if (r) { ··· 212 215 if (RREG32(mmUVD_STATUS) != 0) 213 216 uvd_v5_0_stop(adev); 214 217 215 - ring->ready = false; 218 + ring->sched.ready = false; 216 219 217 220 return 0; 218 221 }
+5 -11
drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
··· 476 476 uvd_v6_0_set_clockgating_state(adev, AMD_CG_STATE_UNGATE); 477 477 uvd_v6_0_enable_mgcg(adev, true); 478 478 479 - ring->ready = true; 480 - r = amdgpu_ring_test_ring(ring); 481 - if (r) { 482 - ring->ready = false; 479 + r = amdgpu_ring_test_helper(ring); 480 + if (r) 483 481 goto done; 484 - } 485 482 486 483 r = amdgpu_ring_alloc(ring, 10); 487 484 if (r) { ··· 510 513 if (uvd_v6_0_enc_support(adev)) { 511 514 for (i = 0; i < adev->uvd.num_enc_rings; ++i) { 512 515 ring = &adev->uvd.inst->ring_enc[i]; 513 - ring->ready = true; 514 - r = amdgpu_ring_test_ring(ring); 515 - if (r) { 516 - ring->ready = false; 516 + r = amdgpu_ring_test_helper(ring); 517 + if (r) 517 518 goto done; 518 - } 519 519 } 520 520 } 521 521 ··· 542 548 if (RREG32(mmUVD_STATUS) != 0) 543 549 uvd_v6_0_stop(adev); 544 550 545 - ring->ready = false; 551 + ring->sched.ready = false; 546 552 547 553 return 0; 548 554 }
+5 -11
drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
··· 540 540 ring = &adev->uvd.inst[j].ring; 541 541 542 542 if (!amdgpu_sriov_vf(adev)) { 543 - ring->ready = true; 544 - r = amdgpu_ring_test_ring(ring); 545 - if (r) { 546 - ring->ready = false; 543 + r = amdgpu_ring_test_helper(ring); 544 + if (r) 547 545 goto done; 548 - } 549 546 550 547 r = amdgpu_ring_alloc(ring, 10); 551 548 if (r) { ··· 579 582 580 583 for (i = 0; i < adev->uvd.num_enc_rings; ++i) { 581 584 ring = &adev->uvd.inst[j].ring_enc[i]; 582 - ring->ready = true; 583 - r = amdgpu_ring_test_ring(ring); 584 - if (r) { 585 - ring->ready = false; 585 + r = amdgpu_ring_test_helper(ring); 586 + if (r) 586 587 goto done; 587 - } 588 588 } 589 589 } 590 590 done: ··· 613 619 for (i = 0; i < adev->uvd.num_uvd_inst; ++i) { 614 620 if (adev->uvd.harvest_config & (1 << i)) 615 621 continue; 616 - adev->uvd.inst[i].ring.ready = false; 622 + adev->uvd.inst[i].ring.sched.ready = false; 617 623 } 618 624 619 625 return 0;
+1 -5
drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
··· 463 463 464 464 amdgpu_asic_set_vce_clocks(adev, 10000, 10000); 465 465 vce_v2_0_enable_mgcg(adev, true, false); 466 - for (i = 0; i < adev->vce.num_rings; i++) 467 - adev->vce.ring[i].ready = false; 468 466 469 467 for (i = 0; i < adev->vce.num_rings; i++) { 470 - r = amdgpu_ring_test_ring(&adev->vce.ring[i]); 468 + r = amdgpu_ring_test_helper(&adev->vce.ring[i]); 471 469 if (r) 472 470 return r; 473 - else 474 - adev->vce.ring[i].ready = true; 475 471 } 476 472 477 473 DRM_INFO("VCE initialized successfully.\n");
+1 -6
drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
··· 474 474 475 475 amdgpu_asic_set_vce_clocks(adev, 10000, 10000); 476 476 477 - for (i = 0; i < adev->vce.num_rings; i++) 478 - adev->vce.ring[i].ready = false; 479 - 480 477 for (i = 0; i < adev->vce.num_rings; i++) { 481 - r = amdgpu_ring_test_ring(&adev->vce.ring[i]); 478 + r = amdgpu_ring_test_helper(&adev->vce.ring[i]); 482 479 if (r) 483 480 return r; 484 - else 485 - adev->vce.ring[i].ready = true; 486 481 } 487 482 488 483 DRM_INFO("VCE initialized successfully.\n");
+2 -7
drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
··· 519 519 if (r) 520 520 return r; 521 521 522 - for (i = 0; i < adev->vce.num_rings; i++) 523 - adev->vce.ring[i].ready = false; 524 - 525 522 for (i = 0; i < adev->vce.num_rings; i++) { 526 - r = amdgpu_ring_test_ring(&adev->vce.ring[i]); 523 + r = amdgpu_ring_test_helper(&adev->vce.ring[i]); 527 524 if (r) 528 525 return r; 529 - else 530 - adev->vce.ring[i].ready = true; 531 526 } 532 527 533 528 DRM_INFO("VCE initialized successfully.\n"); ··· 544 549 } 545 550 546 551 for (i = 0; i < adev->vce.num_rings; i++) 547 - adev->vce.ring[i].ready = false; 552 + adev->vce.ring[i].sched.ready = false; 548 553 549 554 return 0; 550 555 }
+8 -16
drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
··· 176 176 struct amdgpu_ring *ring = &adev->vcn.ring_dec; 177 177 int i, r; 178 178 179 - ring->ready = true; 180 - r = amdgpu_ring_test_ring(ring); 181 - if (r) { 182 - ring->ready = false; 179 + r = amdgpu_ring_test_helper(ring); 180 + if (r) 183 181 goto done; 184 - } 185 182 186 183 for (i = 0; i < adev->vcn.num_enc_rings; ++i) { 187 184 ring = &adev->vcn.ring_enc[i]; 188 - ring->ready = true; 189 - r = amdgpu_ring_test_ring(ring); 190 - if (r) { 191 - ring->ready = false; 185 + ring->sched.ready = true; 186 + r = amdgpu_ring_test_helper(ring); 187 + if (r) 192 188 goto done; 193 - } 194 189 } 195 190 196 191 ring = &adev->vcn.ring_jpeg; 197 - ring->ready = true; 198 - r = amdgpu_ring_test_ring(ring); 199 - if (r) { 200 - ring->ready = false; 192 + r = amdgpu_ring_test_helper(ring); 193 + if (r) 201 194 goto done; 202 - } 203 195 204 196 done: 205 197 if (!r) ··· 216 224 if (RREG32_SOC15(VCN, 0, mmUVD_STATUS)) 217 225 vcn_v1_0_stop(adev); 218 226 219 - ring->ready = false; 227 + ring->sched.ready = false; 220 228 221 229 return 0; 222 230 }