Merge branch 'drm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6

* 'drm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6:
drm: don't drop handle reference on unload
drm/ttm: Fix two race conditions + fix busy codepaths

+75 -17
+1 -1
drivers/gpu/drm/i915/intel_fb.c
··· 238 239 drm_framebuffer_cleanup(&ifb->base); 240 if (ifb->obj) { 241 - drm_gem_object_handle_unreference(ifb->obj); 242 drm_gem_object_unreference(ifb->obj); 243 } 244 245 return 0;
··· 238 239 drm_framebuffer_cleanup(&ifb->base); 240 if (ifb->obj) { 241 drm_gem_object_unreference(ifb->obj); 242 + ifb->obj = NULL; 243 } 244 245 return 0;
-1
drivers/gpu/drm/nouveau/nouveau_fbcon.c
··· 352 353 if (nouveau_fb->nvbo) { 354 nouveau_bo_unmap(nouveau_fb->nvbo); 355 - drm_gem_object_handle_unreference_unlocked(nouveau_fb->nvbo->gem); 356 drm_gem_object_unreference_unlocked(nouveau_fb->nvbo->gem); 357 nouveau_fb->nvbo = NULL; 358 }
··· 352 353 if (nouveau_fb->nvbo) { 354 nouveau_bo_unmap(nouveau_fb->nvbo); 355 drm_gem_object_unreference_unlocked(nouveau_fb->nvbo->gem); 356 nouveau_fb->nvbo = NULL; 357 }
-1
drivers/gpu/drm/nouveau/nouveau_notifier.c
··· 79 mutex_lock(&dev->struct_mutex); 80 nouveau_bo_unpin(chan->notifier_bo); 81 mutex_unlock(&dev->struct_mutex); 82 - drm_gem_object_handle_unreference_unlocked(chan->notifier_bo->gem); 83 drm_gem_object_unreference_unlocked(chan->notifier_bo->gem); 84 drm_mm_takedown(&chan->notifier_heap); 85 }
··· 79 mutex_lock(&dev->struct_mutex); 80 nouveau_bo_unpin(chan->notifier_bo); 81 mutex_unlock(&dev->struct_mutex); 82 drm_gem_object_unreference_unlocked(chan->notifier_bo->gem); 83 drm_mm_takedown(&chan->notifier_heap); 84 }
-1
drivers/gpu/drm/radeon/radeon_fb.c
··· 97 radeon_bo_unpin(rbo); 98 radeon_bo_unreserve(rbo); 99 } 100 - drm_gem_object_handle_unreference(gobj); 101 drm_gem_object_unreference_unlocked(gobj); 102 } 103
··· 97 radeon_bo_unpin(rbo); 98 radeon_bo_unreserve(rbo); 99 } 100 drm_gem_object_unreference_unlocked(gobj); 101 } 102
+71 -12
drivers/gpu/drm/ttm/ttm_bo.c
··· 442 } 443 444 /** 445 * If bo idle, remove from delayed- and lru lists, and unref. 446 * If not idle, and already on delayed list, do nothing. 447 * If not idle, and not on delayed list, put on delayed list, ··· 493 int ret; 494 495 spin_lock(&bo->lock); 496 (void) ttm_bo_wait(bo, false, false, !remove_all); 497 498 if (!bo->sync_obj) { ··· 502 spin_unlock(&bo->lock); 503 504 spin_lock(&glob->lru_lock); 505 - put_count = ttm_bo_del_from_lru(bo); 506 507 - ret = ttm_bo_reserve_locked(bo, false, false, false, 0); 508 - BUG_ON(ret); 509 - if (bo->ttm) 510 - ttm_tt_unbind(bo->ttm); 511 512 if (!list_empty(&bo->ddestroy)) { 513 list_del_init(&bo->ddestroy); 514 ++put_count; 515 } 516 - if (bo->mem.mm_node) { 517 - drm_mm_put_block(bo->mem.mm_node); 518 - bo->mem.mm_node = NULL; 519 - } 520 - spin_unlock(&glob->lru_lock); 521 522 - atomic_set(&bo->reserved, 0); 523 524 while (put_count--) 525 kref_put(&bo->list_kref, ttm_bo_ref_bug); 526 527 return 0; 528 } 529 - 530 spin_lock(&glob->lru_lock); 531 if (list_empty(&bo->ddestroy)) { 532 void *sync_obj = bo->sync_obj;
··· 442 } 443 444 /** 445 + * Call bo::reserved and with the lru lock held. 446 + * Will release GPU memory type usage on destruction. 447 + * This is the place to put in driver specific hooks. 448 + * Will release the bo::reserved lock and the 449 + * lru lock on exit. 450 + */ 451 + 452 + static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo) 453 + { 454 + struct ttm_bo_global *glob = bo->glob; 455 + 456 + if (bo->ttm) { 457 + 458 + /** 459 + * Release the lru_lock, since we don't want to have 460 + * an atomic requirement on ttm_tt[unbind|destroy]. 461 + */ 462 + 463 + spin_unlock(&glob->lru_lock); 464 + ttm_tt_unbind(bo->ttm); 465 + ttm_tt_destroy(bo->ttm); 466 + bo->ttm = NULL; 467 + spin_lock(&glob->lru_lock); 468 + } 469 + 470 + if (bo->mem.mm_node) { 471 + drm_mm_put_block(bo->mem.mm_node); 472 + bo->mem.mm_node = NULL; 473 + } 474 + 475 + atomic_set(&bo->reserved, 0); 476 + wake_up_all(&bo->event_queue); 477 + spin_unlock(&glob->lru_lock); 478 + } 479 + 480 + 481 + /** 482 * If bo idle, remove from delayed- and lru lists, and unref. 483 * If not idle, and already on delayed list, do nothing. 484 * If not idle, and not on delayed list, put on delayed list, ··· 456 int ret; 457 458 spin_lock(&bo->lock); 459 + retry: 460 (void) ttm_bo_wait(bo, false, false, !remove_all); 461 462 if (!bo->sync_obj) { ··· 464 spin_unlock(&bo->lock); 465 466 spin_lock(&glob->lru_lock); 467 + ret = ttm_bo_reserve_locked(bo, false, !remove_all, false, 0); 468 469 + /** 470 + * Someone else has the object reserved. Bail and retry. 471 + */ 472 + 473 + if (unlikely(ret == -EBUSY)) { 474 + spin_unlock(&glob->lru_lock); 475 + spin_lock(&bo->lock); 476 + goto requeue; 477 + } 478 + 479 + /** 480 + * We can re-check for sync object without taking 481 + * the bo::lock since setting the sync object requires 482 + * also bo::reserved. A busy object at this point may 483 + * be caused by another thread starting an accelerated 484 + * eviction. 485 + */ 486 + 487 + if (unlikely(bo->sync_obj)) { 488 + atomic_set(&bo->reserved, 0); 489 + wake_up_all(&bo->event_queue); 490 + spin_unlock(&glob->lru_lock); 491 + spin_lock(&bo->lock); 492 + if (remove_all) 493 + goto retry; 494 + else 495 + goto requeue; 496 + } 497 + 498 + put_count = ttm_bo_del_from_lru(bo); 499 500 if (!list_empty(&bo->ddestroy)) { 501 list_del_init(&bo->ddestroy); 502 ++put_count; 503 } 504 505 + ttm_bo_cleanup_memtype_use(bo); 506 507 while (put_count--) 508 kref_put(&bo->list_kref, ttm_bo_ref_bug); 509 510 return 0; 511 } 512 + requeue: 513 spin_lock(&glob->lru_lock); 514 if (list_empty(&bo->ddestroy)) { 515 void *sync_obj = bo->sync_obj;
+3 -1
include/drm/ttm/ttm_bo_api.h
··· 246 247 atomic_t reserved; 248 249 - 250 /** 251 * Members protected by the bo::lock 252 */ 253 254 void *sync_obj_arg;
··· 246 247 atomic_t reserved; 248 249 /** 250 * Members protected by the bo::lock 251 + * In addition, setting sync_obj to anything else 252 + * than NULL requires bo::reserved to be held. This allows for 253 + * checking NULL while reserved but not holding bo::lock. 254 */ 255 256 void *sync_obj_arg;