Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/radeon: cope with foreign fences inside the reservation object

Not the whole world is a radeon! :-)

Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Maarten Lankhorst <maarten.lankhorst@canonical.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>

authored by

Maarten Lankhorst and committed by
Alex Deucher
392a250b a0e84764

+66 -25
+1 -1
drivers/gpu/drm/radeon/cik.c
··· 3993 3993 return ERR_PTR(r); 3994 3994 } 3995 3995 3996 - radeon_semaphore_sync_resv(sem, resv, false); 3996 + radeon_semaphore_sync_resv(rdev, sem, resv, false); 3997 3997 radeon_semaphore_sync_rings(rdev, sem, ring->idx); 3998 3998 3999 3999 for (i = 0; i < num_loops; i++) {
+1 -1
drivers/gpu/drm/radeon/cik_sdma.c
··· 571 571 return ERR_PTR(r); 572 572 } 573 573 574 - radeon_semaphore_sync_resv(sem, resv, false); 574 + radeon_semaphore_sync_resv(rdev, sem, resv, false); 575 575 radeon_semaphore_sync_rings(rdev, sem, ring->idx); 576 576 577 577 for (i = 0; i < num_loops; i++) {
+1 -1
drivers/gpu/drm/radeon/evergreen_dma.c
··· 133 133 return ERR_PTR(r); 134 134 } 135 135 136 - radeon_semaphore_sync_resv(sem, resv, false); 136 + radeon_semaphore_sync_resv(rdev, sem, resv, false); 137 137 radeon_semaphore_sync_rings(rdev, sem, ring->idx); 138 138 139 139 for (i = 0; i < num_loops; i++) {
+1 -1
drivers/gpu/drm/radeon/r600.c
··· 2912 2912 return ERR_PTR(r); 2913 2913 } 2914 2914 2915 - radeon_semaphore_sync_resv(sem, resv, false); 2915 + radeon_semaphore_sync_resv(rdev, sem, resv, false); 2916 2916 radeon_semaphore_sync_rings(rdev, sem, ring->idx); 2917 2917 2918 2918 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+1 -1
drivers/gpu/drm/radeon/r600_dma.c
··· 470 470 return ERR_PTR(r); 471 471 } 472 472 473 - radeon_semaphore_sync_resv(sem, resv, false); 473 + radeon_semaphore_sync_resv(rdev, sem, resv, false); 474 474 radeon_semaphore_sync_rings(rdev, sem, ring->idx); 475 475 476 476 for (i = 0; i < num_loops; i++) {
+4 -3
drivers/gpu/drm/radeon/radeon.h
··· 589 589 struct radeon_semaphore *semaphore); 590 590 void radeon_semaphore_sync_fence(struct radeon_semaphore *semaphore, 591 591 struct radeon_fence *fence); 592 - void radeon_semaphore_sync_resv(struct radeon_semaphore *semaphore, 593 - struct reservation_object *resv, 594 - bool shared); 592 + int radeon_semaphore_sync_resv(struct radeon_device *rdev, 593 + struct radeon_semaphore *semaphore, 594 + struct reservation_object *resv, 595 + bool shared); 595 596 int radeon_semaphore_sync_rings(struct radeon_device *rdev, 596 597 struct radeon_semaphore *semaphore, 597 598 int waiting_ring);
+22 -6
drivers/gpu/drm/radeon/radeon_cs.c
··· 249 249 return 0; 250 250 } 251 251 252 - static void radeon_cs_sync_rings(struct radeon_cs_parser *p) 252 + static int radeon_cs_sync_rings(struct radeon_cs_parser *p) 253 253 { 254 - int i; 254 + int i, r = 0; 255 255 256 256 for (i = 0; i < p->nrelocs; i++) { 257 257 struct reservation_object *resv; ··· 260 260 continue; 261 261 262 262 resv = p->relocs[i].robj->tbo.resv; 263 - radeon_semaphore_sync_resv(p->ib.semaphore, resv, 264 - p->relocs[i].tv.shared); 263 + r = radeon_semaphore_sync_resv(p->rdev, p->ib.semaphore, resv, 264 + p->relocs[i].tv.shared); 265 + 266 + if (r) 267 + break; 265 268 } 269 + return r; 266 270 } 267 271 268 272 /* XXX: note that this is called from the legacy UMS CS ioctl as well */ ··· 476 472 return r; 477 473 } 478 474 475 + r = radeon_cs_sync_rings(parser); 476 + if (r) { 477 + if (r != -ERESTARTSYS) 478 + DRM_ERROR("Failed to sync rings: %i\n", r); 479 + return r; 480 + } 481 + 479 482 if (parser->ring == R600_RING_TYPE_UVD_INDEX) 480 483 radeon_uvd_note_usage(rdev); 481 484 else if ((parser->ring == TN_RING_TYPE_VCE1_INDEX) || 482 485 (parser->ring == TN_RING_TYPE_VCE2_INDEX)) 483 486 radeon_vce_note_usage(rdev); 484 487 485 - radeon_cs_sync_rings(parser); 486 488 r = radeon_ib_schedule(rdev, &parser->ib, NULL, true); 487 489 if (r) { 488 490 DRM_ERROR("Failed to schedule IB !\n"); ··· 575 565 if (r) { 576 566 goto out; 577 567 } 578 - radeon_cs_sync_rings(parser); 568 + 569 + r = radeon_cs_sync_rings(parser); 570 + if (r) { 571 + if (r != -ERESTARTSYS) 572 + DRM_ERROR("Failed to sync rings: %i\n", r); 573 + goto out; 574 + } 579 575 radeon_semaphore_sync_fence(parser->ib.semaphore, vm->fence); 580 576 581 577 if ((rdev->family >= CHIP_TAHITI) &&
+9
drivers/gpu/drm/radeon/radeon_fence.c
··· 541 541 uint64_t seq[RADEON_NUM_RINGS] = {}; 542 542 long r; 543 543 544 + /* 545 + * This function should not be called on !radeon fences. 546 + * If this is the case, it would mean this function can 547 + * also be called on radeon fences belonging to another card. 548 + * exclusive_lock is not held in that case. 549 + */ 550 + if (WARN_ON_ONCE(!to_radeon_fence(&fence->base))) 551 + return fence_wait(&fence->base, intr); 552 + 544 553 seq[fence->ring] = fence->seq; 545 554 r = radeon_fence_wait_seq_timeout(fence->rdev, seq, intr, MAX_SCHEDULE_TIMEOUT); 546 555 if (r < 0) {
+22 -7
drivers/gpu/drm/radeon/radeon_semaphore.c
··· 124 124 * 125 125 * Sync to the fence using this semaphore object 126 126 */ 127 - void radeon_semaphore_sync_resv(struct radeon_semaphore *sema, 128 - struct reservation_object *resv, 129 - bool shared) 127 + int radeon_semaphore_sync_resv(struct radeon_device *rdev, 128 + struct radeon_semaphore *sema, 129 + struct reservation_object *resv, 130 + bool shared) 130 131 { 131 132 struct reservation_object_list *flist; 132 133 struct fence *f; 134 + struct radeon_fence *fence; 133 135 unsigned i; 136 + int r = 0; 134 137 135 138 /* always sync to the exclusive fence */ 136 139 f = reservation_object_get_excl(resv); 137 - radeon_semaphore_sync_fence(sema, (struct radeon_fence*)f); 140 + fence = f ? to_radeon_fence(f) : NULL; 141 + if (fence && fence->rdev == rdev) 142 + radeon_semaphore_sync_fence(sema, fence); 143 + else if (f) 144 + r = fence_wait(f, true); 138 145 139 146 flist = reservation_object_get_list(resv); 140 - if (shared || !flist) 141 - return; 147 + if (shared || !flist || r) 148 + return r; 142 149 143 150 for (i = 0; i < flist->shared_count; ++i) { 144 151 f = rcu_dereference_protected(flist->shared[i], 145 152 reservation_object_held(resv)); 146 - radeon_semaphore_sync_fence(sema, (struct radeon_fence*)f); 153 + fence = to_radeon_fence(f); 154 + if (fence && fence->rdev == rdev) 155 + radeon_semaphore_sync_fence(sema, fence); 156 + else 157 + r = fence_wait(f, true); 158 + 159 + if (r) 160 + break; 147 161 } 162 + return r; 148 163 } 149 164 150 165 /**
+2 -2
drivers/gpu/drm/radeon/radeon_vm.c
··· 698 698 if (ib.length_dw != 0) { 699 699 radeon_asic_vm_pad_ib(rdev, &ib); 700 700 701 - radeon_semaphore_sync_resv(ib.semaphore, pd->tbo.resv, false); 701 + radeon_semaphore_sync_resv(rdev, ib.semaphore, pd->tbo.resv, false); 702 702 radeon_semaphore_sync_fence(ib.semaphore, vm->last_id_use); 703 703 WARN_ON(ib.length_dw > ndw); 704 704 r = radeon_ib_schedule(rdev, &ib, NULL, false); ··· 825 825 unsigned nptes; 826 826 uint64_t pte; 827 827 828 - radeon_semaphore_sync_resv(ib->semaphore, pt->tbo.resv, false); 828 + radeon_semaphore_sync_resv(rdev, ib->semaphore, pt->tbo.resv, false); 829 829 830 830 if ((addr & ~mask) == (end & ~mask)) 831 831 nptes = end - addr;
+1 -1
drivers/gpu/drm/radeon/rv770_dma.c
··· 67 67 return ERR_PTR(r); 68 68 } 69 69 70 - radeon_semaphore_sync_resv(sem, resv, false); 70 + radeon_semaphore_sync_resv(rdev, sem, resv, false); 71 71 radeon_semaphore_sync_rings(rdev, sem, ring->idx); 72 72 73 73 for (i = 0; i < num_loops; i++) {
+1 -1
drivers/gpu/drm/radeon/si_dma.c
··· 252 252 return ERR_PTR(r); 253 253 } 254 254 255 - radeon_semaphore_sync_resv(sem, resv, false); 255 + radeon_semaphore_sync_resv(rdev, sem, resv, false); 256 256 radeon_semaphore_sync_rings(rdev, sem, ring->idx); 257 257 258 258 for (i = 0; i < num_loops; i++) {