Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/amdgpu: fix PRT teardown on VM fini v3

v2: new approach fixing this by registering a fence callback for
all users of the VM on teardown
v3: agd: rebase

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Nicolai Hähnle <nicolai.haehnle@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>

authored by

Christian König and committed by
Alex Deucher
451bc8eb 0b15f2fc

+84 -28
+83 -27
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
··· 1188 1188 bool enable; 1189 1189 1190 1190 spin_lock_irqsave(&adev->vm_manager.prt_lock, flags); 1191 - enable = !!atomic_read(&adev->vm_manager.num_prt_mappings); 1191 + enable = !!atomic_read(&adev->vm_manager.num_prt_users); 1192 1192 adev->gart.gart_funcs->set_prt(adev, enable); 1193 1193 spin_unlock_irqrestore(&adev->vm_manager.prt_lock, flags); 1194 + } 1195 + 1196 + /** 1197 + * amdgpu_vm_prt_put - add a PRT user 1198 + */ 1199 + static void amdgpu_vm_prt_get(struct amdgpu_device *adev) 1200 + { 1201 + if (atomic_inc_return(&adev->vm_manager.num_prt_users) == 1) 1202 + amdgpu_vm_update_prt_state(adev); 1194 1203 } 1195 1204 1196 1205 /** ··· 1207 1198 */ 1208 1199 static void amdgpu_vm_prt_put(struct amdgpu_device *adev) 1209 1200 { 1210 - if (atomic_dec_return(&adev->vm_manager.num_prt_mappings) == 0) 1201 + if (atomic_dec_return(&adev->vm_manager.num_prt_users) == 0) 1211 1202 amdgpu_vm_update_prt_state(adev); 1212 1203 } 1213 1204 1214 1205 /** 1215 - * amdgpu_vm_prt - callback for updating the PRT status 1206 + * amdgpu_vm_prt_cb - callback for updating the PRT status 1216 1207 */ 1217 1208 static void amdgpu_vm_prt_cb(struct dma_fence *fence, struct dma_fence_cb *_cb) 1218 1209 { ··· 1220 1211 1221 1212 amdgpu_vm_prt_put(cb->adev); 1222 1213 kfree(cb); 1214 + } 1215 + 1216 + /** 1217 + * amdgpu_vm_add_prt_cb - add callback for updating the PRT status 1218 + */ 1219 + static void amdgpu_vm_add_prt_cb(struct amdgpu_device *adev, 1220 + struct dma_fence *fence) 1221 + { 1222 + struct amdgpu_prt_cb *cb = kmalloc(sizeof(struct amdgpu_prt_cb), 1223 + GFP_KERNEL); 1224 + 1225 + if (!cb) { 1226 + /* Last resort when we are OOM */ 1227 + if (fence) 1228 + dma_fence_wait(fence, false); 1229 + 1230 + amdgpu_vm_prt_put(cb->adev); 1231 + } else { 1232 + cb->adev = adev; 1233 + if (!fence || dma_fence_add_callback(fence, &cb->cb, 1234 + amdgpu_vm_prt_cb)) 1235 + amdgpu_vm_prt_cb(fence, &cb->cb); 1236 + } 1223 1237 } 1224 1238 1225 1239 /** ··· 1260 1228 struct amdgpu_bo_va_mapping *mapping, 1261 1229 struct dma_fence *fence) 1262 1230 { 1263 - if (mapping->flags & AMDGPU_PTE_PRT) { 1264 - struct amdgpu_prt_cb *cb = kmalloc(sizeof(struct amdgpu_prt_cb), 1265 - GFP_KERNEL); 1266 - 1267 - if (!cb) { 1268 - /* Last resort when we are OOM */ 1269 - if (fence) 1270 - dma_fence_wait(fence, false); 1271 - 1272 - amdgpu_vm_prt_put(cb->adev); 1273 - } else { 1274 - cb->adev = adev; 1275 - if (!fence || dma_fence_add_callback(fence, &cb->cb, 1276 - amdgpu_vm_prt_cb)) 1277 - amdgpu_vm_prt_cb(fence, &cb->cb); 1278 - } 1279 - } 1231 + if (mapping->flags & AMDGPU_PTE_PRT) 1232 + amdgpu_vm_add_prt_cb(adev, fence); 1280 1233 kfree(mapping); 1234 + } 1235 + 1236 + /** 1237 + * amdgpu_vm_prt_fini - finish all prt mappings 1238 + * 1239 + * @adev: amdgpu_device pointer 1240 + * @vm: requested vm 1241 + * 1242 + * Register a cleanup callback to disable PRT support after VM dies. 1243 + */ 1244 + static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) 1245 + { 1246 + struct reservation_object *resv = vm->page_directory->tbo.resv; 1247 + struct dma_fence *excl, **shared; 1248 + unsigned i, shared_count; 1249 + int r; 1250 + 1251 + r = reservation_object_get_fences_rcu(resv, &excl, 1252 + &shared_count, &shared); 1253 + if (r) { 1254 + /* Not enough memory to grab the fence list, as last resort 1255 + * block for all the fences to complete. 1256 + */ 1257 + reservation_object_wait_timeout_rcu(resv, true, false, 1258 + MAX_SCHEDULE_TIMEOUT); 1259 + return; 1260 + } 1261 + 1262 + /* Add a callback for each fence in the reservation object */ 1263 + amdgpu_vm_prt_get(adev); 1264 + amdgpu_vm_add_prt_cb(adev, excl); 1265 + 1266 + for (i = 0; i < shared_count; ++i) { 1267 + amdgpu_vm_prt_get(adev); 1268 + amdgpu_vm_add_prt_cb(adev, shared[i]); 1269 + } 1270 + 1271 + kfree(shared); 1281 1272 } 1282 1273 1283 1274 /** ··· 1450 1395 if (!adev->gart.gart_funcs->set_prt) 1451 1396 return -EINVAL; 1452 1397 1453 - if (atomic_inc_return(&adev->vm_manager.num_prt_mappings) == 1) 1454 - amdgpu_vm_update_prt_state(adev); 1398 + amdgpu_vm_prt_get(adev); 1455 1399 } 1456 1400 1457 1401 /* make sure object fit at this offset */ ··· 1753 1699 void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) 1754 1700 { 1755 1701 struct amdgpu_bo_va_mapping *mapping, *tmp; 1702 + bool prt_fini_called = false; 1756 1703 int i; 1757 1704 1758 1705 amd_sched_entity_fini(vm->entity.sched, &vm->entity); ··· 1767 1712 kfree(mapping); 1768 1713 } 1769 1714 list_for_each_entry_safe(mapping, tmp, &vm->freed, list) { 1770 - if (mapping->flags & AMDGPU_PTE_PRT) 1771 - continue; 1715 + if (mapping->flags & AMDGPU_PTE_PRT && !prt_fini_called) { 1716 + amdgpu_vm_prt_fini(adev, vm); 1717 + prt_fini_called = true; 1718 + } 1772 1719 1773 1720 list_del(&mapping->list); 1774 - kfree(mapping); 1721 + amdgpu_vm_free_mapping(adev, vm, mapping, NULL); 1775 1722 } 1776 - amdgpu_vm_clear_freed(adev, vm); 1777 1723 1778 1724 for (i = 0; i < amdgpu_vm_num_pdes(adev); i++) { 1779 1725 struct amdgpu_bo *pt = vm->page_tables[i].bo; ··· 1821 1765 atomic_set(&adev->vm_manager.vm_pte_next_ring, 0); 1822 1766 atomic64_set(&adev->vm_manager.client_counter, 0); 1823 1767 spin_lock_init(&adev->vm_manager.prt_lock); 1824 - atomic_set(&adev->vm_manager.num_prt_mappings, 0); 1768 + atomic_set(&adev->vm_manager.num_prt_users, 0); 1825 1769 } 1826 1770 1827 1771 /**
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
··· 164 164 165 165 /* partial resident texture handling */ 166 166 spinlock_t prt_lock; 167 - atomic_t num_prt_mappings; 167 + atomic_t num_prt_users; 168 168 }; 169 169 170 170 void amdgpu_vm_manager_init(struct amdgpu_device *adev);