Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/ttm: remove no_wait_reserve, v3

All items on the lru list are always reservable, so this is a stupid
thing to keep. Not only that, it is used in a way which would
guarantee deadlocks if it were ever to be set to block on reserve.

This is a lot of churn, but mostly because of the removal of the
argument which can be nested arbitrarily deeply in many places.

No change of code in this patch except removal of the no_wait_reserve
argument, the previous patch removed the use of no_wait_reserve.

v2:
- Warn if -EBUSY is returned on reservation, all objects on the list
should be reservable. Adjusted patch slightly due to conflicts.
v3:
- Focus on no_wait_reserve removal only.

Signed-off-by: Maarten Lankhorst <maarten.lankhorst@canonical.com>
Reviewed-by: Thomas Hellstrom <thellstrom@vmware.com>
Signed-off-by: Dave Airlie <airlied@redhat.com>

authored by

Maarten Lankhorst and committed by
Dave Airlie
97a875cb e7ab2019

+107 -114
+5 -5
drivers/gpu/drm/ast/ast_ttm.c
··· 186 186 187 187 static int ast_bo_move(struct ttm_buffer_object *bo, 188 188 bool evict, bool interruptible, 189 - bool no_wait_reserve, bool no_wait_gpu, 189 + bool no_wait_gpu, 190 190 struct ttm_mem_reg *new_mem) 191 191 { 192 192 int r; 193 - r = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem); 193 + r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem); 194 194 return r; 195 195 } 196 196 ··· 383 383 ast_ttm_placement(bo, pl_flag); 384 384 for (i = 0; i < bo->placement.num_placement; i++) 385 385 bo->placements[i] |= TTM_PL_FLAG_NO_EVICT; 386 - ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false); 386 + ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false); 387 387 if (ret) 388 388 return ret; 389 389 ··· 406 406 407 407 for (i = 0; i < bo->placement.num_placement ; i++) 408 408 bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT; 409 - ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false); 409 + ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false); 410 410 if (ret) 411 411 return ret; 412 412 ··· 431 431 for (i = 0; i < bo->placement.num_placement ; i++) 432 432 bo->placements[i] |= TTM_PL_FLAG_NO_EVICT; 433 433 434 - ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false); 434 + ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false); 435 435 if (ret) { 436 436 DRM_ERROR("pushing to VRAM failed\n"); 437 437 return ret;
+5 -5
drivers/gpu/drm/cirrus/cirrus_ttm.c
··· 186 186 187 187 static int cirrus_bo_move(struct ttm_buffer_object *bo, 188 188 bool evict, bool interruptible, 189 - bool no_wait_reserve, bool no_wait_gpu, 189 + bool no_wait_gpu, 190 190 struct ttm_mem_reg *new_mem) 191 191 { 192 192 int r; 193 - r = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem); 193 + r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem); 194 194 return r; 195 195 } 196 196 ··· 388 388 cirrus_ttm_placement(bo, pl_flag); 389 389 for (i = 0; i < bo->placement.num_placement; i++) 390 390 bo->placements[i] |= TTM_PL_FLAG_NO_EVICT; 391 - ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false); 391 + ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false); 392 392 if (ret) 393 393 return ret; 394 394 ··· 411 411 412 412 for (i = 0; i < bo->placement.num_placement ; i++) 413 413 bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT; 414 - ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false); 414 + ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false); 415 415 if (ret) 416 416 return ret; 417 417 ··· 436 436 for (i = 0; i < bo->placement.num_placement ; i++) 437 437 bo->placements[i] |= TTM_PL_FLAG_NO_EVICT; 438 438 439 - ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false); 439 + ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false); 440 440 if (ret) { 441 441 DRM_ERROR("pushing to VRAM failed\n"); 442 442 return ret;
+5 -5
drivers/gpu/drm/mgag200/mgag200_ttm.c
··· 186 186 187 187 static int mgag200_bo_move(struct ttm_buffer_object *bo, 188 188 bool evict, bool interruptible, 189 - bool no_wait_reserve, bool no_wait_gpu, 189 + bool no_wait_gpu, 190 190 struct ttm_mem_reg *new_mem) 191 191 { 192 192 int r; 193 - r = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem); 193 + r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem); 194 194 return r; 195 195 } 196 196 ··· 382 382 mgag200_ttm_placement(bo, pl_flag); 383 383 for (i = 0; i < bo->placement.num_placement; i++) 384 384 bo->placements[i] |= TTM_PL_FLAG_NO_EVICT; 385 - ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false); 385 + ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false); 386 386 if (ret) 387 387 return ret; 388 388 ··· 405 405 406 406 for (i = 0; i < bo->placement.num_placement ; i++) 407 407 bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT; 408 - ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false); 408 + ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false); 409 409 if (ret) 410 410 return ret; 411 411 ··· 430 430 for (i = 0; i < bo->placement.num_placement ; i++) 431 431 bo->placements[i] |= TTM_PL_FLAG_NO_EVICT; 432 432 433 - ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false); 433 + ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false); 434 434 if (ret) { 435 435 DRM_ERROR("pushing to VRAM failed\n"); 436 436 return ret;
+26 -29
drivers/gpu/drm/nouveau/nouveau_bo.c
··· 315 315 316 316 nouveau_bo_placement_set(nvbo, memtype, 0); 317 317 318 - ret = nouveau_bo_validate(nvbo, false, false, false); 318 + ret = nouveau_bo_validate(nvbo, false, false); 319 319 if (ret == 0) { 320 320 switch (bo->mem.mem_type) { 321 321 case TTM_PL_VRAM: ··· 351 351 352 352 nouveau_bo_placement_set(nvbo, bo->mem.placement, 0); 353 353 354 - ret = nouveau_bo_validate(nvbo, false, false, false); 354 + ret = nouveau_bo_validate(nvbo, false, false); 355 355 if (ret == 0) { 356 356 switch (bo->mem.mem_type) { 357 357 case TTM_PL_VRAM: ··· 392 392 393 393 int 394 394 nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible, 395 - bool no_wait_reserve, bool no_wait_gpu) 395 + bool no_wait_gpu) 396 396 { 397 397 int ret; 398 398 399 - ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement, interruptible, 400 - no_wait_reserve, no_wait_gpu); 399 + ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement, 400 + interruptible, no_wait_gpu); 401 401 if (ret) 402 402 return ret; 403 403 ··· 556 556 static int 557 557 nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan, 558 558 struct nouveau_bo *nvbo, bool evict, 559 - bool no_wait_reserve, bool no_wait_gpu, 560 - struct ttm_mem_reg *new_mem) 559 + bool no_wait_gpu, struct ttm_mem_reg *new_mem) 561 560 { 562 561 struct nouveau_fence *fence = NULL; 563 562 int ret; ··· 566 567 return ret; 567 568 568 569 ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, evict, 569 - no_wait_reserve, no_wait_gpu, new_mem); 570 + no_wait_gpu, new_mem); 570 571 nouveau_fence_unref(&fence); 571 572 return ret; 572 573 } ··· 964 965 965 966 static int 966 967 nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr, 967 - bool no_wait_reserve, bool no_wait_gpu, 968 - struct ttm_mem_reg *new_mem) 968 + bool no_wait_gpu, struct ttm_mem_reg *new_mem) 969 969 { 970 970 struct nouveau_drm *drm = nouveau_bdev(bo->bdev); 971 971 struct nouveau_channel *chan = chan = drm->channel; ··· 993 995 ret = drm->ttm.move(chan, bo, &bo->mem, new_mem); 994 996 if (ret == 0) { 995 997 ret = nouveau_bo_move_accel_cleanup(chan, nvbo, evict, 996 - no_wait_reserve, 997 998 no_wait_gpu, new_mem); 998 999 } 999 1000 ··· 1061 1064 1062 1065 static int 1063 1066 nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr, 1064 - bool no_wait_reserve, bool no_wait_gpu, 1065 - struct ttm_mem_reg *new_mem) 1067 + bool no_wait_gpu, struct ttm_mem_reg *new_mem) 1066 1068 { 1067 1069 u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING; 1068 1070 struct ttm_placement placement; ··· 1074 1078 1075 1079 tmp_mem = *new_mem; 1076 1080 tmp_mem.mm_node = NULL; 1077 - ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu); 1081 + ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_gpu); 1078 1082 if (ret) 1079 1083 return ret; 1080 1084 ··· 1082 1086 if (ret) 1083 1087 goto out; 1084 1088 1085 - ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, &tmp_mem); 1089 + ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_gpu, &tmp_mem); 1086 1090 if (ret) 1087 1091 goto out; 1088 1092 1089 - ret = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, new_mem); 1093 + ret = ttm_bo_move_ttm(bo, true, no_wait_gpu, new_mem); 1090 1094 out: 1091 1095 ttm_bo_mem_put(bo, &tmp_mem); 1092 1096 return ret; ··· 1094 1098 1095 1099 static int 1096 1100 nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr, 1097 - bool no_wait_reserve, bool no_wait_gpu, 1098 - struct ttm_mem_reg *new_mem) 1101 + bool no_wait_gpu, struct ttm_mem_reg *new_mem) 1099 1102 { 1100 1103 u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING; 1101 1104 struct ttm_placement placement; ··· 1107 1112 1108 1113 tmp_mem = *new_mem; 1109 1114 tmp_mem.mm_node = NULL; 1110 - ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu); 1115 + ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_gpu); 1111 1116 if (ret) 1112 1117 return ret; 1113 1118 1114 - ret = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem); 1119 + ret = ttm_bo_move_ttm(bo, true, no_wait_gpu, &tmp_mem); 1115 1120 if (ret) 1116 1121 goto out; 1117 1122 1118 - ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, new_mem); 1123 + ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_gpu, new_mem); 1119 1124 if (ret) 1120 1125 goto out; 1121 1126 ··· 1190 1195 1191 1196 static int 1192 1197 nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr, 1193 - bool no_wait_reserve, bool no_wait_gpu, 1194 - struct ttm_mem_reg *new_mem) 1198 + bool no_wait_gpu, struct ttm_mem_reg *new_mem) 1195 1199 { 1196 1200 struct nouveau_drm *drm = nouveau_bdev(bo->bdev); 1197 1201 struct nouveau_bo *nvbo = nouveau_bo(bo); ··· 1214 1220 1215 1221 /* CPU copy if we have no accelerated method available */ 1216 1222 if (!drm->ttm.move) { 1217 - ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem); 1223 + ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem); 1218 1224 goto out; 1219 1225 } 1220 1226 1221 1227 /* Hardware assisted copy. */ 1222 1228 if (new_mem->mem_type == TTM_PL_SYSTEM) 1223 - ret = nouveau_bo_move_flipd(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem); 1229 + ret = nouveau_bo_move_flipd(bo, evict, intr, 1230 + no_wait_gpu, new_mem); 1224 1231 else if (old_mem->mem_type == TTM_PL_SYSTEM) 1225 - ret = nouveau_bo_move_flips(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem); 1232 + ret = nouveau_bo_move_flips(bo, evict, intr, 1233 + no_wait_gpu, new_mem); 1226 1234 else 1227 - ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem); 1235 + ret = nouveau_bo_move_m2mf(bo, evict, intr, 1236 + no_wait_gpu, new_mem); 1228 1237 1229 1238 if (!ret) 1230 1239 goto out; 1231 1240 1232 1241 /* Fallback to software copy. */ 1233 - ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem); 1242 + ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem); 1234 1243 1235 1244 out: 1236 1245 if (nv_device(drm->device)->card_type < NV_50) { ··· 1340 1343 nvbo->placement.fpfn = 0; 1341 1344 nvbo->placement.lpfn = mappable; 1342 1345 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_VRAM, 0); 1343 - return nouveau_bo_validate(nvbo, false, true, false); 1346 + return nouveau_bo_validate(nvbo, false, false); 1344 1347 } 1345 1348 1346 1349 static int
+1 -1
drivers/gpu/drm/nouveau/nouveau_bo.h
··· 76 76 void nouveau_bo_wr32(struct nouveau_bo *, unsigned index, u32 val); 77 77 void nouveau_bo_fence(struct nouveau_bo *, struct nouveau_fence *); 78 78 int nouveau_bo_validate(struct nouveau_bo *, bool interruptible, 79 - bool no_wait_reserve, bool no_wait_gpu); 79 + bool no_wait_gpu); 80 80 81 81 struct nouveau_vma * 82 82 nouveau_bo_vma_find(struct nouveau_bo *, struct nouveau_vm *);
+1 -1
drivers/gpu/drm/nouveau/nouveau_gem.c
··· 433 433 return ret; 434 434 } 435 435 436 - ret = nouveau_bo_validate(nvbo, true, false, false); 436 + ret = nouveau_bo_validate(nvbo, true, false); 437 437 if (unlikely(ret)) { 438 438 if (ret != -ERESTARTSYS) 439 439 NV_ERROR(drm, "fail ttm_validate\n");
+4 -4
drivers/gpu/drm/radeon/radeon_object.c
··· 250 250 } 251 251 for (i = 0; i < bo->placement.num_placement; i++) 252 252 bo->placements[i] |= TTM_PL_FLAG_NO_EVICT; 253 - r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false, false); 253 + r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); 254 254 if (likely(r == 0)) { 255 255 bo->pin_count = 1; 256 256 if (gpu_addr != NULL) ··· 279 279 return 0; 280 280 for (i = 0; i < bo->placement.num_placement; i++) 281 281 bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT; 282 - r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false, false); 282 + r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); 283 283 if (unlikely(r != 0)) 284 284 dev_err(bo->rdev->dev, "%p validate failed for unpin\n", bo); 285 285 return r; ··· 365 365 retry: 366 366 radeon_ttm_placement_from_domain(bo, domain); 367 367 r = ttm_bo_validate(&bo->tbo, &bo->placement, 368 - true, false, false); 368 + true, false); 369 369 if (unlikely(r)) { 370 370 if (r != -ERESTARTSYS && domain == RADEON_GEM_DOMAIN_VRAM) { 371 371 domain |= RADEON_GEM_DOMAIN_GTT; ··· 585 585 /* hurrah the memory is not visible ! */ 586 586 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM); 587 587 rbo->placement.lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT; 588 - r = ttm_bo_validate(bo, &rbo->placement, false, true, false); 588 + r = ttm_bo_validate(bo, &rbo->placement, false, false); 589 589 if (unlikely(r != 0)) 590 590 return r; 591 591 offset = bo->mem.start << PAGE_SHIFT;
+16 -15
drivers/gpu/drm/radeon/radeon_ttm.c
··· 216 216 } 217 217 218 218 static int radeon_move_blit(struct ttm_buffer_object *bo, 219 - bool evict, int no_wait_reserve, bool no_wait_gpu, 219 + bool evict, bool no_wait_gpu, 220 220 struct ttm_mem_reg *new_mem, 221 221 struct ttm_mem_reg *old_mem) 222 222 { ··· 266 266 &fence); 267 267 /* FIXME: handle copy error */ 268 268 r = ttm_bo_move_accel_cleanup(bo, (void *)fence, 269 - evict, no_wait_reserve, no_wait_gpu, new_mem); 269 + evict, no_wait_gpu, new_mem); 270 270 radeon_fence_unref(&fence); 271 271 return r; 272 272 } 273 273 274 274 static int radeon_move_vram_ram(struct ttm_buffer_object *bo, 275 275 bool evict, bool interruptible, 276 - bool no_wait_reserve, bool no_wait_gpu, 276 + bool no_wait_gpu, 277 277 struct ttm_mem_reg *new_mem) 278 278 { 279 279 struct radeon_device *rdev; ··· 294 294 placement.busy_placement = &placements; 295 295 placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; 296 296 r = ttm_bo_mem_space(bo, &placement, &tmp_mem, 297 - interruptible, no_wait_reserve, no_wait_gpu); 297 + interruptible, no_wait_gpu); 298 298 if (unlikely(r)) { 299 299 return r; 300 300 } ··· 308 308 if (unlikely(r)) { 309 309 goto out_cleanup; 310 310 } 311 - r = radeon_move_blit(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem, old_mem); 311 + r = radeon_move_blit(bo, true, no_wait_gpu, &tmp_mem, old_mem); 312 312 if (unlikely(r)) { 313 313 goto out_cleanup; 314 314 } 315 - r = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, new_mem); 315 + r = ttm_bo_move_ttm(bo, true, no_wait_gpu, new_mem); 316 316 out_cleanup: 317 317 ttm_bo_mem_put(bo, &tmp_mem); 318 318 return r; ··· 320 320 321 321 static int radeon_move_ram_vram(struct ttm_buffer_object *bo, 322 322 bool evict, bool interruptible, 323 - bool no_wait_reserve, bool no_wait_gpu, 323 + bool no_wait_gpu, 324 324 struct ttm_mem_reg *new_mem) 325 325 { 326 326 struct radeon_device *rdev; ··· 340 340 placement.num_busy_placement = 1; 341 341 placement.busy_placement = &placements; 342 342 placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; 343 - r = ttm_bo_mem_space(bo, &placement, &tmp_mem, interruptible, no_wait_reserve, no_wait_gpu); 343 + r = ttm_bo_mem_space(bo, &placement, &tmp_mem, 344 + interruptible, no_wait_gpu); 344 345 if (unlikely(r)) { 345 346 return r; 346 347 } 347 - r = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem); 348 + r = ttm_bo_move_ttm(bo, true, no_wait_gpu, &tmp_mem); 348 349 if (unlikely(r)) { 349 350 goto out_cleanup; 350 351 } 351 - r = radeon_move_blit(bo, true, no_wait_reserve, no_wait_gpu, new_mem, old_mem); 352 + r = radeon_move_blit(bo, true, no_wait_gpu, new_mem, old_mem); 352 353 if (unlikely(r)) { 353 354 goto out_cleanup; 354 355 } ··· 360 359 361 360 static int radeon_bo_move(struct ttm_buffer_object *bo, 362 361 bool evict, bool interruptible, 363 - bool no_wait_reserve, bool no_wait_gpu, 362 + bool no_wait_gpu, 364 363 struct ttm_mem_reg *new_mem) 365 364 { 366 365 struct radeon_device *rdev; ··· 389 388 if (old_mem->mem_type == TTM_PL_VRAM && 390 389 new_mem->mem_type == TTM_PL_SYSTEM) { 391 390 r = radeon_move_vram_ram(bo, evict, interruptible, 392 - no_wait_reserve, no_wait_gpu, new_mem); 391 + no_wait_gpu, new_mem); 393 392 } else if (old_mem->mem_type == TTM_PL_SYSTEM && 394 393 new_mem->mem_type == TTM_PL_VRAM) { 395 394 r = radeon_move_ram_vram(bo, evict, interruptible, 396 - no_wait_reserve, no_wait_gpu, new_mem); 395 + no_wait_gpu, new_mem); 397 396 } else { 398 - r = radeon_move_blit(bo, evict, no_wait_reserve, no_wait_gpu, new_mem, old_mem); 397 + r = radeon_move_blit(bo, evict, no_wait_gpu, new_mem, old_mem); 399 398 } 400 399 401 400 if (r) { 402 401 memcpy: 403 - r = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem); 402 + r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem); 404 403 } 405 404 return r; 406 405 }
+24 -22
drivers/gpu/drm/ttm/ttm_bo.c
··· 366 366 static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, 367 367 struct ttm_mem_reg *mem, 368 368 bool evict, bool interruptible, 369 - bool no_wait_reserve, bool no_wait_gpu) 369 + bool no_wait_gpu) 370 370 { 371 371 struct ttm_bo_device *bdev = bo->bdev; 372 372 bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem); ··· 420 420 421 421 if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) && 422 422 !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) 423 - ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, mem); 423 + ret = ttm_bo_move_ttm(bo, evict, no_wait_gpu, mem); 424 424 else if (bdev->driver->move) 425 425 ret = bdev->driver->move(bo, evict, interruptible, 426 - no_wait_reserve, no_wait_gpu, mem); 426 + no_wait_gpu, mem); 427 427 else 428 - ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, mem); 428 + ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, mem); 429 429 430 430 if (ret) { 431 431 if (bdev->driver->move_notify) { ··· 749 749 EXPORT_SYMBOL(ttm_bo_unlock_delayed_workqueue); 750 750 751 751 static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible, 752 - bool no_wait_reserve, bool no_wait_gpu) 752 + bool no_wait_gpu) 753 753 { 754 754 struct ttm_bo_device *bdev = bo->bdev; 755 755 struct ttm_mem_reg evict_mem; ··· 780 780 placement.num_busy_placement = 0; 781 781 bdev->driver->evict_flags(bo, &placement); 782 782 ret = ttm_bo_mem_space(bo, &placement, &evict_mem, interruptible, 783 - no_wait_reserve, no_wait_gpu); 783 + no_wait_gpu); 784 784 if (ret) { 785 785 if (ret != -ERESTARTSYS) { 786 786 pr_err("Failed to find memory space for buffer 0x%p eviction\n", ··· 791 791 } 792 792 793 793 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible, 794 - no_wait_reserve, no_wait_gpu); 794 + no_wait_gpu); 795 795 if (ret) { 796 796 if (ret != -ERESTARTSYS) 797 797 pr_err("Buffer eviction failed\n"); ··· 805 805 806 806 static int ttm_mem_evict_first(struct ttm_bo_device *bdev, 807 807 uint32_t mem_type, 808 - bool interruptible, bool no_wait_reserve, 808 + bool interruptible, 809 809 bool no_wait_gpu) 810 810 { 811 811 struct ttm_bo_global *glob = bdev->glob; ··· 841 841 842 842 ttm_bo_list_ref_sub(bo, put_count, true); 843 843 844 - ret = ttm_bo_evict(bo, interruptible, no_wait_reserve, no_wait_gpu); 844 + ret = ttm_bo_evict(bo, interruptible, no_wait_gpu); 845 845 ttm_bo_unreserve(bo); 846 846 847 847 kref_put(&bo->list_kref, ttm_bo_release_list); ··· 866 866 struct ttm_placement *placement, 867 867 struct ttm_mem_reg *mem, 868 868 bool interruptible, 869 - bool no_wait_reserve, 870 869 bool no_wait_gpu) 871 870 { 872 871 struct ttm_bo_device *bdev = bo->bdev; ··· 878 879 return ret; 879 880 if (mem->mm_node) 880 881 break; 881 - ret = ttm_mem_evict_first(bdev, mem_type, interruptible, 882 - no_wait_reserve, no_wait_gpu); 882 + ret = ttm_mem_evict_first(bdev, mem_type, 883 + interruptible, no_wait_gpu); 883 884 if (unlikely(ret != 0)) 884 885 return ret; 885 886 } while (1); ··· 944 945 int ttm_bo_mem_space(struct ttm_buffer_object *bo, 945 946 struct ttm_placement *placement, 946 947 struct ttm_mem_reg *mem, 947 - bool interruptible, bool no_wait_reserve, 948 + bool interruptible, 948 949 bool no_wait_gpu) 949 950 { 950 951 struct ttm_bo_device *bdev = bo->bdev; ··· 1035 1036 } 1036 1037 1037 1038 ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem, 1038 - interruptible, no_wait_reserve, no_wait_gpu); 1039 + interruptible, no_wait_gpu); 1039 1040 if (ret == 0 && mem->mm_node) { 1040 1041 mem->placement = cur_flags; 1041 1042 return 0; ··· 1050 1051 1051 1052 int ttm_bo_move_buffer(struct ttm_buffer_object *bo, 1052 1053 struct ttm_placement *placement, 1053 - bool interruptible, bool no_wait_reserve, 1054 + bool interruptible, 1054 1055 bool no_wait_gpu) 1055 1056 { 1056 1057 int ret = 0; ··· 1077 1078 /* 1078 1079 * Determine where to move the buffer. 1079 1080 */ 1080 - ret = ttm_bo_mem_space(bo, placement, &mem, interruptible, no_wait_reserve, no_wait_gpu); 1081 + ret = ttm_bo_mem_space(bo, placement, &mem, 1082 + interruptible, no_wait_gpu); 1081 1083 if (ret) 1082 1084 goto out_unlock; 1083 - ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait_reserve, no_wait_gpu); 1085 + ret = ttm_bo_handle_move_mem(bo, &mem, false, 1086 + interruptible, no_wait_gpu); 1084 1087 out_unlock: 1085 1088 if (ret && mem.mm_node) 1086 1089 ttm_bo_mem_put(bo, &mem); ··· 1111 1110 1112 1111 int ttm_bo_validate(struct ttm_buffer_object *bo, 1113 1112 struct ttm_placement *placement, 1114 - bool interruptible, bool no_wait_reserve, 1113 + bool interruptible, 1115 1114 bool no_wait_gpu) 1116 1115 { 1117 1116 int ret; ··· 1127 1126 */ 1128 1127 ret = ttm_bo_mem_compat(placement, &bo->mem); 1129 1128 if (ret < 0) { 1130 - ret = ttm_bo_move_buffer(bo, placement, interruptible, no_wait_reserve, no_wait_gpu); 1129 + ret = ttm_bo_move_buffer(bo, placement, interruptible, 1130 + no_wait_gpu); 1131 1131 if (ret) 1132 1132 return ret; 1133 1133 } else { ··· 1241 1239 goto out_err; 1242 1240 } 1243 1241 1244 - ret = ttm_bo_validate(bo, placement, interruptible, false, false); 1242 + ret = ttm_bo_validate(bo, placement, interruptible, false); 1245 1243 if (ret) 1246 1244 goto out_err; 1247 1245 ··· 1327 1325 spin_lock(&glob->lru_lock); 1328 1326 while (!list_empty(&man->lru)) { 1329 1327 spin_unlock(&glob->lru_lock); 1330 - ret = ttm_mem_evict_first(bdev, mem_type, false, false, false); 1328 + ret = ttm_mem_evict_first(bdev, mem_type, false, false); 1331 1329 if (ret) { 1332 1330 if (allow_errors) { 1333 1331 return ret; ··· 1839 1837 evict_mem.mem_type = TTM_PL_SYSTEM; 1840 1838 1841 1839 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, 1842 - false, false, false); 1840 + false, false); 1843 1841 if (unlikely(ret != 0)) 1844 1842 goto out; 1845 1843 }
+3 -3
drivers/gpu/drm/ttm/ttm_bo_util.c
··· 43 43 } 44 44 45 45 int ttm_bo_move_ttm(struct ttm_buffer_object *bo, 46 - bool evict, bool no_wait_reserve, 46 + bool evict, 47 47 bool no_wait_gpu, struct ttm_mem_reg *new_mem) 48 48 { 49 49 struct ttm_tt *ttm = bo->ttm; ··· 314 314 } 315 315 316 316 int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, 317 - bool evict, bool no_wait_reserve, bool no_wait_gpu, 317 + bool evict, bool no_wait_gpu, 318 318 struct ttm_mem_reg *new_mem) 319 319 { 320 320 struct ttm_bo_device *bdev = bo->bdev; ··· 611 611 612 612 int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, 613 613 void *sync_obj, 614 - bool evict, bool no_wait_reserve, 614 + bool evict, 615 615 bool no_wait_gpu, 616 616 struct ttm_mem_reg *new_mem) 617 617 {
+6 -7
drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
··· 66 66 if (unlikely(ret != 0)) 67 67 goto err; 68 68 69 - ret = ttm_bo_validate(bo, placement, interruptible, false, false); 69 + ret = ttm_bo_validate(bo, placement, interruptible, false); 70 70 71 71 ttm_bo_unreserve(bo); 72 72 ··· 123 123 else 124 124 placement = &vmw_vram_gmr_placement; 125 125 126 - ret = ttm_bo_validate(bo, placement, interruptible, false, false); 126 + ret = ttm_bo_validate(bo, placement, interruptible, false); 127 127 if (likely(ret == 0) || ret == -ERESTARTSYS) 128 128 goto err_unreserve; 129 129 ··· 138 138 else 139 139 placement = &vmw_vram_placement; 140 140 141 - ret = ttm_bo_validate(bo, placement, interruptible, false, false); 141 + ret = ttm_bo_validate(bo, placement, interruptible, false); 142 142 143 143 err_unreserve: 144 144 ttm_bo_unreserve(bo); ··· 223 223 if (bo->mem.mem_type == TTM_PL_VRAM && 224 224 bo->mem.start < bo->num_pages && 225 225 bo->mem.start > 0) 226 - (void) ttm_bo_validate(bo, &vmw_sys_placement, false, 227 - false, false); 226 + (void) ttm_bo_validate(bo, &vmw_sys_placement, false, false); 228 227 229 - ret = ttm_bo_validate(bo, &placement, interruptible, false, false); 228 + ret = ttm_bo_validate(bo, &placement, interruptible, false); 230 229 231 230 /* For some reason we didn't up at the start of vram */ 232 231 WARN_ON(ret == 0 && bo->offset != 0); ··· 314 315 placement.num_placement = 1; 315 316 placement.placement = &pl_flags; 316 317 317 - ret = ttm_bo_validate(bo, &placement, false, true, true); 318 + ret = ttm_bo_validate(bo, &placement, false, true); 318 319 319 320 BUG_ON(ret != 0 || bo->mem.mem_type != old_mem_type); 320 321 }
+2 -2
drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
··· 1245 1245 * used as a GMR, this will return -ENOMEM. 1246 1246 */ 1247 1247 1248 - ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, true, false, false); 1248 + ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, true, false); 1249 1249 if (likely(ret == 0 || ret == -ERESTARTSYS)) 1250 1250 return ret; 1251 1251 ··· 1255 1255 */ 1256 1256 1257 1257 DRM_INFO("Falling through to VRAM.\n"); 1258 - ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false, false); 1258 + ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false); 1259 1259 return ret; 1260 1260 } 1261 1261
+1 -1
drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
··· 1018 1018 backup_dirty = res->backup_dirty; 1019 1019 ret = ttm_bo_validate(&res->backup->base, 1020 1020 res->func->backup_placement, 1021 - true, false, false); 1021 + true, false); 1022 1022 1023 1023 if (unlikely(ret != 0)) 1024 1024 goto out_no_validate;
+1 -2
include/drm/ttm/ttm_bo_api.h
··· 337 337 * @bo: The buffer object. 338 338 * @placement: Proposed placement for the buffer object. 339 339 * @interruptible: Sleep interruptible if sleeping. 340 - * @no_wait_reserve: Return immediately if other buffers are busy. 341 340 * @no_wait_gpu: Return immediately if the GPU is busy. 342 341 * 343 342 * Changes placement and caching policy of the buffer object ··· 349 350 */ 350 351 extern int ttm_bo_validate(struct ttm_buffer_object *bo, 351 352 struct ttm_placement *placement, 352 - bool interruptible, bool no_wait_reserve, 353 + bool interruptible, 353 354 bool no_wait_gpu); 354 355 355 356 /**
+7 -12
include/drm/ttm/ttm_bo_driver.h
··· 394 394 */ 395 395 int (*move) (struct ttm_buffer_object *bo, 396 396 bool evict, bool interruptible, 397 - bool no_wait_reserve, bool no_wait_gpu, 397 + bool no_wait_gpu, 398 398 struct ttm_mem_reg *new_mem); 399 399 400 400 /** ··· 703 703 * @proposed_placement: Proposed new placement for the buffer object. 704 704 * @mem: A struct ttm_mem_reg. 705 705 * @interruptible: Sleep interruptible when sliping. 706 - * @no_wait_reserve: Return immediately if other buffers are busy. 707 706 * @no_wait_gpu: Return immediately if the GPU is busy. 708 707 * 709 708 * Allocate memory space for the buffer object pointed to by @bo, using ··· 718 719 struct ttm_placement *placement, 719 720 struct ttm_mem_reg *mem, 720 721 bool interruptible, 721 - bool no_wait_reserve, bool no_wait_gpu); 722 + bool no_wait_gpu); 722 723 723 724 extern void ttm_bo_mem_put(struct ttm_buffer_object *bo, 724 725 struct ttm_mem_reg *mem); ··· 900 901 * 901 902 * @bo: A pointer to a struct ttm_buffer_object. 902 903 * @evict: 1: This is an eviction. Don't try to pipeline. 903 - * @no_wait_reserve: Return immediately if other buffers are busy. 904 904 * @no_wait_gpu: Return immediately if the GPU is busy. 905 905 * @new_mem: struct ttm_mem_reg indicating where to move. 906 906 * ··· 914 916 */ 915 917 916 918 extern int ttm_bo_move_ttm(struct ttm_buffer_object *bo, 917 - bool evict, bool no_wait_reserve, 918 - bool no_wait_gpu, struct ttm_mem_reg *new_mem); 919 + bool evict, bool no_wait_gpu, 920 + struct ttm_mem_reg *new_mem); 919 921 920 922 /** 921 923 * ttm_bo_move_memcpy 922 924 * 923 925 * @bo: A pointer to a struct ttm_buffer_object. 924 926 * @evict: 1: This is an eviction. Don't try to pipeline. 925 - * @no_wait_reserve: Return immediately if other buffers are busy. 926 927 * @no_wait_gpu: Return immediately if the GPU is busy. 927 928 * @new_mem: struct ttm_mem_reg indicating where to move. 928 929 * ··· 936 939 */ 937 940 938 941 extern int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, 939 - bool evict, bool no_wait_reserve, 940 - bool no_wait_gpu, struct ttm_mem_reg *new_mem); 942 + bool evict, bool no_wait_gpu, 943 + struct ttm_mem_reg *new_mem); 941 944 942 945 /** 943 946 * ttm_bo_free_old_node ··· 954 957 * @bo: A pointer to a struct ttm_buffer_object. 955 958 * @sync_obj: A sync object that signals when moving is complete. 956 959 * @evict: This is an evict move. Don't return until the buffer is idle. 957 - * @no_wait_reserve: Return immediately if other buffers are busy. 958 960 * @no_wait_gpu: Return immediately if the GPU is busy. 959 961 * @new_mem: struct ttm_mem_reg indicating where to move. 960 962 * ··· 967 971 968 972 extern int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, 969 973 void *sync_obj, 970 - bool evict, bool no_wait_reserve, 971 - bool no_wait_gpu, 974 + bool evict, bool no_wait_gpu, 972 975 struct ttm_mem_reg *new_mem); 973 976 /** 974 977 * ttm_io_prot