Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/radeon: Only flush HDP cache for indirect buffers from userspace

It isn't necessary for command streams generated by the kernel (at least
not while we aren't storing ring or indirect buffers in VRAM).

Signed-off-by: Michel Dänzer <michel.daenzer@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>

authored by

Michel Dänzer and committed by
Alex Deucher
1538a9e0 701e1e78

+73 -66
+4 -4
drivers/gpu/drm/radeon/cik.c
··· 3801 3801 radeon_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1)); 3802 3802 radeon_ring_write(ring, ((scratch - PACKET3_SET_UCONFIG_REG_START) >> 2)); 3803 3803 radeon_ring_write(ring, 0xDEADBEEF); 3804 - radeon_ring_unlock_commit(rdev, ring); 3804 + radeon_ring_unlock_commit(rdev, ring, false); 3805 3805 3806 3806 for (i = 0; i < rdev->usec_timeout; i++) { 3807 3807 tmp = RREG32(scratch); ··· 4004 4004 return r; 4005 4005 } 4006 4006 4007 - radeon_ring_unlock_commit(rdev, ring); 4007 + radeon_ring_unlock_commit(rdev, ring, false); 4008 4008 radeon_semaphore_free(rdev, &sem, *fence); 4009 4009 4010 4010 return r; ··· 4103 4103 ib.ptr[1] = ((scratch - PACKET3_SET_UCONFIG_REG_START) >> 2); 4104 4104 ib.ptr[2] = 0xDEADBEEF; 4105 4105 ib.length_dw = 3; 4106 - r = radeon_ib_schedule(rdev, &ib, NULL); 4106 + r = radeon_ib_schedule(rdev, &ib, NULL, false); 4107 4107 if (r) { 4108 4108 radeon_scratch_free(rdev, scratch); 4109 4109 radeon_ib_free(rdev, &ib); ··· 4324 4324 radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */ 4325 4325 radeon_ring_write(ring, 0x00000010); /* VGT_OUT_DEALLOC_CNTL */ 4326 4326 4327 - radeon_ring_unlock_commit(rdev, ring); 4327 + radeon_ring_unlock_commit(rdev, ring, false); 4328 4328 4329 4329 return 0; 4330 4330 }
+3 -3
drivers/gpu/drm/radeon/cik_sdma.c
··· 596 596 return r; 597 597 } 598 598 599 - radeon_ring_unlock_commit(rdev, ring); 599 + radeon_ring_unlock_commit(rdev, ring, false); 600 600 radeon_semaphore_free(rdev, &sem, *fence); 601 601 602 602 return r; ··· 638 638 radeon_ring_write(ring, upper_32_bits(rdev->vram_scratch.gpu_addr)); 639 639 radeon_ring_write(ring, 1); /* number of DWs to follow */ 640 640 radeon_ring_write(ring, 0xDEADBEEF); 641 - radeon_ring_unlock_commit(rdev, ring); 641 + radeon_ring_unlock_commit(rdev, ring, false); 642 642 643 643 for (i = 0; i < rdev->usec_timeout; i++) { 644 644 tmp = readl(ptr); ··· 695 695 ib.ptr[4] = 0xDEADBEEF; 696 696 ib.length_dw = 5; 697 697 698 - r = radeon_ib_schedule(rdev, &ib, NULL); 698 + r = radeon_ib_schedule(rdev, &ib, NULL, false); 699 699 if (r) { 700 700 radeon_ib_free(rdev, &ib); 701 701 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
+2 -2
drivers/gpu/drm/radeon/evergreen.c
··· 2869 2869 radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1)); 2870 2870 radeon_ring_write(ring, 0); 2871 2871 radeon_ring_write(ring, 0); 2872 - radeon_ring_unlock_commit(rdev, ring); 2872 + radeon_ring_unlock_commit(rdev, ring, false); 2873 2873 2874 2874 cp_me = 0xff; 2875 2875 WREG32(CP_ME_CNTL, cp_me); ··· 2912 2912 radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */ 2913 2913 radeon_ring_write(ring, 0x00000010); /* */ 2914 2914 2915 - radeon_ring_unlock_commit(rdev, ring); 2915 + radeon_ring_unlock_commit(rdev, ring, false); 2916 2916 2917 2917 return 0; 2918 2918 }
+1 -1
drivers/gpu/drm/radeon/evergreen_dma.c
··· 155 155 return r; 156 156 } 157 157 158 - radeon_ring_unlock_commit(rdev, ring); 158 + radeon_ring_unlock_commit(rdev, ring, false); 159 159 radeon_semaphore_free(rdev, &sem, *fence); 160 160 161 161 return r;
+2 -2
drivers/gpu/drm/radeon/ni.c
··· 1505 1505 radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1)); 1506 1506 radeon_ring_write(ring, 0); 1507 1507 radeon_ring_write(ring, 0); 1508 - radeon_ring_unlock_commit(rdev, ring); 1508 + radeon_ring_unlock_commit(rdev, ring, false); 1509 1509 1510 1510 cayman_cp_enable(rdev, true); 1511 1511 ··· 1547 1547 radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */ 1548 1548 radeon_ring_write(ring, 0x00000010); /* */ 1549 1549 1550 - radeon_ring_unlock_commit(rdev, ring); 1550 + radeon_ring_unlock_commit(rdev, ring, false); 1551 1551 1552 1552 /* XXX init other rings */ 1553 1553
+4 -4
drivers/gpu/drm/radeon/r100.c
··· 925 925 if (fence) { 926 926 r = radeon_fence_emit(rdev, fence, RADEON_RING_TYPE_GFX_INDEX); 927 927 } 928 - radeon_ring_unlock_commit(rdev, ring); 928 + radeon_ring_unlock_commit(rdev, ring, false); 929 929 return r; 930 930 } 931 931 ··· 958 958 RADEON_ISYNC_ANY3D_IDLE2D | 959 959 RADEON_ISYNC_WAIT_IDLEGUI | 960 960 RADEON_ISYNC_CPSCRATCH_IDLEGUI); 961 - radeon_ring_unlock_commit(rdev, ring); 961 + radeon_ring_unlock_commit(rdev, ring, false); 962 962 } 963 963 964 964 ··· 3638 3638 } 3639 3639 radeon_ring_write(ring, PACKET0(scratch, 0)); 3640 3640 radeon_ring_write(ring, 0xDEADBEEF); 3641 - radeon_ring_unlock_commit(rdev, ring); 3641 + radeon_ring_unlock_commit(rdev, ring, false); 3642 3642 for (i = 0; i < rdev->usec_timeout; i++) { 3643 3643 tmp = RREG32(scratch); 3644 3644 if (tmp == 0xDEADBEEF) { ··· 3700 3700 ib.ptr[6] = PACKET2(0); 3701 3701 ib.ptr[7] = PACKET2(0); 3702 3702 ib.length_dw = 8; 3703 - r = radeon_ib_schedule(rdev, &ib, NULL); 3703 + r = radeon_ib_schedule(rdev, &ib, NULL, false); 3704 3704 if (r) { 3705 3705 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r); 3706 3706 goto free_ib;
+1 -1
drivers/gpu/drm/radeon/r200.c
··· 121 121 if (fence) { 122 122 r = radeon_fence_emit(rdev, fence, RADEON_RING_TYPE_GFX_INDEX); 123 123 } 124 - radeon_ring_unlock_commit(rdev, ring); 124 + radeon_ring_unlock_commit(rdev, ring, false); 125 125 return r; 126 126 } 127 127
+1 -1
drivers/gpu/drm/radeon/r300.c
··· 295 295 radeon_ring_write(ring, 296 296 R300_GEOMETRY_ROUND_NEAREST | 297 297 R300_COLOR_ROUND_NEAREST); 298 - radeon_ring_unlock_commit(rdev, ring); 298 + radeon_ring_unlock_commit(rdev, ring, false); 299 299 } 300 300 301 301 static void r300_errata(struct radeon_device *rdev)
+2 -2
drivers/gpu/drm/radeon/r420.c
··· 219 219 radeon_ring_write(ring, PACKET0(R300_CP_RESYNC_ADDR, 1)); 220 220 radeon_ring_write(ring, rdev->config.r300.resync_scratch); 221 221 radeon_ring_write(ring, 0xDEADBEEF); 222 - radeon_ring_unlock_commit(rdev, ring); 222 + radeon_ring_unlock_commit(rdev, ring, false); 223 223 } 224 224 225 225 static void r420_cp_errata_fini(struct radeon_device *rdev) ··· 232 232 radeon_ring_lock(rdev, ring, 8); 233 233 radeon_ring_write(ring, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); 234 234 radeon_ring_write(ring, R300_RB3D_DC_FINISH); 235 - radeon_ring_unlock_commit(rdev, ring); 235 + radeon_ring_unlock_commit(rdev, ring, false); 236 236 radeon_scratch_free(rdev, rdev->config.r300.resync_scratch); 237 237 } 238 238
+4 -4
drivers/gpu/drm/radeon/r600.c
··· 2547 2547 radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1)); 2548 2548 radeon_ring_write(ring, 0); 2549 2549 radeon_ring_write(ring, 0); 2550 - radeon_ring_unlock_commit(rdev, ring); 2550 + radeon_ring_unlock_commit(rdev, ring, false); 2551 2551 2552 2552 cp_me = 0xff; 2553 2553 WREG32(R_0086D8_CP_ME_CNTL, cp_me); ··· 2683 2683 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); 2684 2684 radeon_ring_write(ring, ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2)); 2685 2685 radeon_ring_write(ring, 0xDEADBEEF); 2686 - radeon_ring_unlock_commit(rdev, ring); 2686 + radeon_ring_unlock_commit(rdev, ring, false); 2687 2687 for (i = 0; i < rdev->usec_timeout; i++) { 2688 2688 tmp = RREG32(scratch); 2689 2689 if (tmp == 0xDEADBEEF) ··· 2845 2845 return r; 2846 2846 } 2847 2847 2848 - radeon_ring_unlock_commit(rdev, ring); 2848 + radeon_ring_unlock_commit(rdev, ring, false); 2849 2849 radeon_semaphore_free(rdev, &sem, *fence); 2850 2850 2851 2851 return r; ··· 3165 3165 ib.ptr[1] = ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2); 3166 3166 ib.ptr[2] = 0xDEADBEEF; 3167 3167 ib.length_dw = 3; 3168 - r = radeon_ib_schedule(rdev, &ib, NULL); 3168 + r = radeon_ib_schedule(rdev, &ib, NULL, false); 3169 3169 if (r) { 3170 3170 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r); 3171 3171 goto free_ib;
+3 -3
drivers/gpu/drm/radeon/r600_dma.c
··· 261 261 radeon_ring_write(ring, rdev->vram_scratch.gpu_addr & 0xfffffffc); 262 262 radeon_ring_write(ring, upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xff); 263 263 radeon_ring_write(ring, 0xDEADBEEF); 264 - radeon_ring_unlock_commit(rdev, ring); 264 + radeon_ring_unlock_commit(rdev, ring, false); 265 265 266 266 for (i = 0; i < rdev->usec_timeout; i++) { 267 267 tmp = readl(ptr); ··· 368 368 ib.ptr[3] = 0xDEADBEEF; 369 369 ib.length_dw = 4; 370 370 371 - r = radeon_ib_schedule(rdev, &ib, NULL); 371 + r = radeon_ib_schedule(rdev, &ib, NULL, false); 372 372 if (r) { 373 373 radeon_ib_free(rdev, &ib); 374 374 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r); ··· 493 493 return r; 494 494 } 495 495 496 - radeon_ring_unlock_commit(rdev, ring); 496 + radeon_ring_unlock_commit(rdev, ring, false); 497 497 radeon_semaphore_free(rdev, &sem, *fence); 498 498 499 499 return r;
+5 -3
drivers/gpu/drm/radeon/radeon.h
··· 968 968 unsigned size); 969 969 void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib); 970 970 int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib, 971 - struct radeon_ib *const_ib); 971 + struct radeon_ib *const_ib, bool hdp_flush); 972 972 int radeon_ib_pool_init(struct radeon_device *rdev); 973 973 void radeon_ib_pool_fini(struct radeon_device *rdev); 974 974 int radeon_ib_ring_tests(struct radeon_device *rdev); ··· 978 978 void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_ring *cp); 979 979 int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ndw); 980 980 int radeon_ring_lock(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ndw); 981 - void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *cp); 982 - void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *cp); 981 + void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *cp, 982 + bool hdp_flush); 983 + void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *cp, 984 + bool hdp_flush); 983 985 void radeon_ring_undo(struct radeon_ring *ring); 984 986 void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_ring *cp); 985 987 int radeon_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
+3 -3
drivers/gpu/drm/radeon/radeon_cs.c
··· 451 451 radeon_vce_note_usage(rdev); 452 452 453 453 radeon_cs_sync_rings(parser); 454 - r = radeon_ib_schedule(rdev, &parser->ib, NULL); 454 + r = radeon_ib_schedule(rdev, &parser->ib, NULL, true); 455 455 if (r) { 456 456 DRM_ERROR("Failed to schedule IB !\n"); 457 457 } ··· 542 542 543 543 if ((rdev->family >= CHIP_TAHITI) && 544 544 (parser->chunk_const_ib_idx != -1)) { 545 - r = radeon_ib_schedule(rdev, &parser->ib, &parser->const_ib); 545 + r = radeon_ib_schedule(rdev, &parser->ib, &parser->const_ib, true); 546 546 } else { 547 - r = radeon_ib_schedule(rdev, &parser->ib, NULL); 547 + r = radeon_ib_schedule(rdev, &parser->ib, NULL, true); 548 548 } 549 549 550 550 out:
+3 -2
drivers/gpu/drm/radeon/radeon_ib.c
··· 107 107 * @rdev: radeon_device pointer 108 108 * @ib: IB object to schedule 109 109 * @const_ib: Const IB to schedule (SI only) 110 + * @hdp_flush: Whether or not to perform an HDP cache flush 110 111 * 111 112 * Schedule an IB on the associated ring (all asics). 112 113 * Returns 0 on success, error on failure. ··· 123 122 * to SI there was just a DE IB. 124 123 */ 125 124 int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib, 126 - struct radeon_ib *const_ib) 125 + struct radeon_ib *const_ib, bool hdp_flush) 127 126 { 128 127 struct radeon_ring *ring = &rdev->ring[ib->ring]; 129 128 int r = 0; ··· 177 176 if (ib->vm) 178 177 radeon_vm_fence(rdev, ib->vm, ib->fence); 179 178 180 - radeon_ring_unlock_commit(rdev, ring); 179 + radeon_ring_unlock_commit(rdev, ring, hdp_flush); 181 180 return 0; 182 181 } 183 182
+10 -6
drivers/gpu/drm/radeon/radeon_ring.c
··· 177 177 * 178 178 * @rdev: radeon_device pointer 179 179 * @ring: radeon_ring structure holding ring information 180 + * @hdp_flush: Whether or not to perform an HDP cache flush 180 181 * 181 182 * Update the wptr (write pointer) to tell the GPU to 182 183 * execute new commands on the ring buffer (all asics). 183 184 */ 184 - void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *ring) 185 + void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *ring, 186 + bool hdp_flush) 185 187 { 186 188 /* If we are emitting the HDP flush via the ring buffer, we need to 187 189 * do it before padding. 188 190 */ 189 - if (rdev->asic->ring[ring->idx]->hdp_flush) 191 + if (hdp_flush && rdev->asic->ring[ring->idx]->hdp_flush) 190 192 rdev->asic->ring[ring->idx]->hdp_flush(rdev, ring); 191 193 /* We pad to match fetch size */ 192 194 while (ring->wptr & ring->align_mask) { ··· 198 196 /* If we are emitting the HDP flush via MMIO, we need to do it after 199 197 * all CPU writes to VRAM finished. 200 198 */ 201 - if (rdev->asic->mmio_hdp_flush) 199 + if (hdp_flush && rdev->asic->mmio_hdp_flush) 202 200 rdev->asic->mmio_hdp_flush(rdev); 203 201 radeon_ring_set_wptr(rdev, ring); 204 202 } ··· 209 207 * 210 208 * @rdev: radeon_device pointer 211 209 * @ring: radeon_ring structure holding ring information 210 + * @hdp_flush: Whether or not to perform an HDP cache flush 212 211 * 213 212 * Call radeon_ring_commit() then unlock the ring (all asics). 214 213 */ 215 - void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *ring) 214 + void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *ring, 215 + bool hdp_flush) 216 216 { 217 - radeon_ring_commit(rdev, ring); 217 + radeon_ring_commit(rdev, ring, hdp_flush); 218 218 mutex_unlock(&rdev->ring_lock); 219 219 } 220 220 ··· 376 372 radeon_ring_write(ring, data[i]); 377 373 } 378 374 379 - radeon_ring_unlock_commit(rdev, ring); 375 + radeon_ring_unlock_commit(rdev, ring, false); 380 376 kfree(data); 381 377 return 0; 382 378 }
+1 -1
drivers/gpu/drm/radeon/radeon_semaphore.c
··· 179 179 continue; 180 180 } 181 181 182 - radeon_ring_commit(rdev, &rdev->ring[i]); 182 + radeon_ring_commit(rdev, &rdev->ring[i], false); 183 183 radeon_fence_note_sync(fence, ring); 184 184 185 185 semaphore->gpu_addr += 8;
+9 -9
drivers/gpu/drm/radeon/radeon_test.c
··· 288 288 return r; 289 289 } 290 290 radeon_fence_emit(rdev, fence, ring->idx); 291 - radeon_ring_unlock_commit(rdev, ring); 291 + radeon_ring_unlock_commit(rdev, ring, false); 292 292 } 293 293 return 0; 294 294 } ··· 313 313 goto out_cleanup; 314 314 } 315 315 radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore); 316 - radeon_ring_unlock_commit(rdev, ringA); 316 + radeon_ring_unlock_commit(rdev, ringA, false); 317 317 318 318 r = radeon_test_create_and_emit_fence(rdev, ringA, &fence1); 319 319 if (r) ··· 325 325 goto out_cleanup; 326 326 } 327 327 radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore); 328 - radeon_ring_unlock_commit(rdev, ringA); 328 + radeon_ring_unlock_commit(rdev, ringA, false); 329 329 330 330 r = radeon_test_create_and_emit_fence(rdev, ringA, &fence2); 331 331 if (r) ··· 344 344 goto out_cleanup; 345 345 } 346 346 radeon_semaphore_emit_signal(rdev, ringB->idx, semaphore); 347 - radeon_ring_unlock_commit(rdev, ringB); 347 + radeon_ring_unlock_commit(rdev, ringB, false); 348 348 349 349 r = radeon_fence_wait(fence1, false); 350 350 if (r) { ··· 365 365 goto out_cleanup; 366 366 } 367 367 radeon_semaphore_emit_signal(rdev, ringB->idx, semaphore); 368 - radeon_ring_unlock_commit(rdev, ringB); 368 + radeon_ring_unlock_commit(rdev, ringB, false); 369 369 370 370 r = radeon_fence_wait(fence2, false); 371 371 if (r) { ··· 408 408 goto out_cleanup; 409 409 } 410 410 radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore); 411 - radeon_ring_unlock_commit(rdev, ringA); 411 + radeon_ring_unlock_commit(rdev, ringA, false); 412 412 413 413 r = radeon_test_create_and_emit_fence(rdev, ringA, &fenceA); 414 414 if (r) ··· 420 420 goto out_cleanup; 421 421 } 422 422 radeon_semaphore_emit_wait(rdev, ringB->idx, semaphore); 423 - radeon_ring_unlock_commit(rdev, ringB); 423 + radeon_ring_unlock_commit(rdev, ringB, false); 424 424 r = radeon_test_create_and_emit_fence(rdev, ringB, &fenceB); 425 425 if (r) 426 426 goto out_cleanup; ··· 442 442 goto out_cleanup; 443 443 } 444 444 radeon_semaphore_emit_signal(rdev, ringC->idx, semaphore); 445 - radeon_ring_unlock_commit(rdev, ringC); 445 + radeon_ring_unlock_commit(rdev, ringC, false); 446 446 447 447 for (i = 0; i < 30; ++i) { 448 448 mdelay(100); ··· 468 468 goto out_cleanup; 469 469 } 470 470 radeon_semaphore_emit_signal(rdev, ringC->idx, semaphore); 471 - radeon_ring_unlock_commit(rdev, ringC); 471 + radeon_ring_unlock_commit(rdev, ringC, false); 472 472 473 473 mdelay(1000); 474 474
+1 -1
drivers/gpu/drm/radeon/radeon_uvd.c
··· 646 646 ib.ptr[i] = PACKET2(0); 647 647 ib.length_dw = 16; 648 648 649 - r = radeon_ib_schedule(rdev, &ib, NULL); 649 + r = radeon_ib_schedule(rdev, &ib, NULL, false); 650 650 if (r) 651 651 goto err; 652 652 ttm_eu_fence_buffer_objects(&ticket, &head, ib.fence);
+3 -3
drivers/gpu/drm/radeon/radeon_vce.c
··· 368 368 for (i = ib.length_dw; i < ib_size_dw; ++i) 369 369 ib.ptr[i] = 0x0; 370 370 371 - r = radeon_ib_schedule(rdev, &ib, NULL); 371 + r = radeon_ib_schedule(rdev, &ib, NULL, false); 372 372 if (r) { 373 373 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r); 374 374 } ··· 425 425 for (i = ib.length_dw; i < ib_size_dw; ++i) 426 426 ib.ptr[i] = 0x0; 427 427 428 - r = radeon_ib_schedule(rdev, &ib, NULL); 428 + r = radeon_ib_schedule(rdev, &ib, NULL, false); 429 429 if (r) { 430 430 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r); 431 431 } ··· 715 715 return r; 716 716 } 717 717 radeon_ring_write(ring, VCE_CMD_END); 718 - radeon_ring_unlock_commit(rdev, ring); 718 + radeon_ring_unlock_commit(rdev, ring, false); 719 719 720 720 for (i = 0; i < rdev->usec_timeout; i++) { 721 721 if (vce_v1_0_get_rptr(rdev, ring) != rptr)
+3 -3
drivers/gpu/drm/radeon/radeon_vm.c
··· 422 422 radeon_asic_vm_pad_ib(rdev, &ib); 423 423 WARN_ON(ib.length_dw > 64); 424 424 425 - r = radeon_ib_schedule(rdev, &ib, NULL); 425 + r = radeon_ib_schedule(rdev, &ib, NULL, false); 426 426 if (r) 427 427 goto error; 428 428 ··· 699 699 radeon_semaphore_sync_to(ib.semaphore, pd->tbo.sync_obj); 700 700 radeon_semaphore_sync_to(ib.semaphore, vm->last_id_use); 701 701 WARN_ON(ib.length_dw > ndw); 702 - r = radeon_ib_schedule(rdev, &ib, NULL); 702 + r = radeon_ib_schedule(rdev, &ib, NULL, false); 703 703 if (r) { 704 704 radeon_ib_free(rdev, &ib); 705 705 return r; ··· 963 963 WARN_ON(ib.length_dw > ndw); 964 964 965 965 radeon_semaphore_sync_to(ib.semaphore, vm->fence); 966 - r = radeon_ib_schedule(rdev, &ib, NULL); 966 + r = radeon_ib_schedule(rdev, &ib, NULL, false); 967 967 if (r) { 968 968 radeon_ib_free(rdev, &ib); 969 969 return r;
+1 -1
drivers/gpu/drm/radeon/rv515.c
··· 124 124 radeon_ring_write(ring, GEOMETRY_ROUND_NEAREST | COLOR_ROUND_NEAREST); 125 125 radeon_ring_write(ring, PACKET0(0x20C8, 0)); 126 126 radeon_ring_write(ring, 0); 127 - radeon_ring_unlock_commit(rdev, ring); 127 + radeon_ring_unlock_commit(rdev, ring, false); 128 128 } 129 129 130 130 int rv515_mc_wait_for_idle(struct radeon_device *rdev)
+1 -1
drivers/gpu/drm/radeon/rv770_dma.c
··· 90 90 return r; 91 91 } 92 92 93 - radeon_ring_unlock_commit(rdev, ring); 93 + radeon_ring_unlock_commit(rdev, ring, false); 94 94 radeon_semaphore_free(rdev, &sem, *fence); 95 95 96 96 return r;
+3 -3
drivers/gpu/drm/radeon/si.c
··· 3541 3541 radeon_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE)); 3542 3542 radeon_ring_write(ring, 0xc000); 3543 3543 radeon_ring_write(ring, 0xe000); 3544 - radeon_ring_unlock_commit(rdev, ring); 3544 + radeon_ring_unlock_commit(rdev, ring, false); 3545 3545 3546 3546 si_cp_enable(rdev, true); 3547 3547 ··· 3570 3570 radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */ 3571 3571 radeon_ring_write(ring, 0x00000010); /* VGT_OUT_DEALLOC_CNTL */ 3572 3572 3573 - radeon_ring_unlock_commit(rdev, ring); 3573 + radeon_ring_unlock_commit(rdev, ring, false); 3574 3574 3575 3575 for (i = RADEON_RING_TYPE_GFX_INDEX; i <= CAYMAN_RING_TYPE_CP2_INDEX; ++i) { 3576 3576 ring = &rdev->ring[i]; ··· 3580 3580 radeon_ring_write(ring, PACKET3_COMPUTE(PACKET3_CLEAR_STATE, 0)); 3581 3581 radeon_ring_write(ring, 0); 3582 3582 3583 - radeon_ring_unlock_commit(rdev, ring); 3583 + radeon_ring_unlock_commit(rdev, ring, false); 3584 3584 } 3585 3585 3586 3586 return 0;
+1 -1
drivers/gpu/drm/radeon/si_dma.c
··· 275 275 return r; 276 276 } 277 277 278 - radeon_ring_unlock_commit(rdev, ring); 278 + radeon_ring_unlock_commit(rdev, ring, false); 279 279 radeon_semaphore_free(rdev, &sem, *fence); 280 280 281 281 return r;
+2 -2
drivers/gpu/drm/radeon/uvd_v1_0.c
··· 124 124 radeon_ring_write(ring, PACKET0(UVD_SEMA_CNTL, 0)); 125 125 radeon_ring_write(ring, 3); 126 126 127 - radeon_ring_unlock_commit(rdev, ring); 127 + radeon_ring_unlock_commit(rdev, ring, false); 128 128 129 129 done: 130 130 /* lower clocks again */ ··· 331 331 } 332 332 radeon_ring_write(ring, PACKET0(UVD_CONTEXT_ID, 0)); 333 333 radeon_ring_write(ring, 0xDEADBEEF); 334 - radeon_ring_unlock_commit(rdev, ring); 334 + radeon_ring_unlock_commit(rdev, ring, false); 335 335 for (i = 0; i < rdev->usec_timeout; i++) { 336 336 tmp = RREG32(UVD_CONTEXT_ID); 337 337 if (tmp == 0xDEADBEEF)