Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/radeon: switch to drm_*{get,put} helpers

drm_*_reference() and drm_*_unreference() functions are just
compatibility alias for drm_*_get() and drm_*_put() adn should not be
used by new code. So convert all users of compatibility functions to use
the new APIs.

Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Cihangir Akturk <cakturk@gmail.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>

authored by

Cihangir Akturk and committed by
Alex Deucher
07f65bb2 f62facc2

+28 -28
+1 -1
drivers/gpu/drm/radeon/radeon_cs.c
··· 437 437 if (bo == NULL) 438 438 continue; 439 439 440 - drm_gem_object_unreference_unlocked(&bo->gem_base); 440 + drm_gem_object_put_unlocked(&bo->gem_base); 441 441 } 442 442 } 443 443 kfree(parser->track);
+3 -3
drivers/gpu/drm/radeon/radeon_cursor.c
··· 307 307 robj = gem_to_radeon_bo(obj); 308 308 ret = radeon_bo_reserve(robj, false); 309 309 if (ret != 0) { 310 - drm_gem_object_unreference_unlocked(obj); 310 + drm_gem_object_put_unlocked(obj); 311 311 return ret; 312 312 } 313 313 /* Only 27 bit offset for legacy cursor */ ··· 317 317 radeon_bo_unreserve(robj); 318 318 if (ret) { 319 319 DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret); 320 - drm_gem_object_unreference_unlocked(obj); 320 + drm_gem_object_put_unlocked(obj); 321 321 return ret; 322 322 } 323 323 ··· 352 352 radeon_bo_unpin(robj); 353 353 radeon_bo_unreserve(robj); 354 354 } 355 - drm_gem_object_unreference_unlocked(radeon_crtc->cursor_bo); 355 + drm_gem_object_put_unlocked(radeon_crtc->cursor_bo); 356 356 } 357 357 358 358 radeon_crtc->cursor_bo = obj;
+6 -6
drivers/gpu/drm/radeon/radeon_display.c
··· 267 267 } else 268 268 DRM_ERROR("failed to reserve buffer after flip\n"); 269 269 270 - drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base); 270 + drm_gem_object_put_unlocked(&work->old_rbo->gem_base); 271 271 kfree(work); 272 272 } 273 273 ··· 504 504 obj = old_radeon_fb->obj; 505 505 506 506 /* take a reference to the old object */ 507 - drm_gem_object_reference(obj); 507 + drm_gem_object_get(obj); 508 508 work->old_rbo = gem_to_radeon_bo(obj); 509 509 510 510 new_radeon_fb = to_radeon_framebuffer(fb); ··· 603 603 radeon_bo_unreserve(new_rbo); 604 604 605 605 cleanup: 606 - drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base); 606 + drm_gem_object_put_unlocked(&work->old_rbo->gem_base); 607 607 dma_fence_put(work->fence); 608 608 kfree(work); 609 609 return r; ··· 1288 1288 { 1289 1289 struct radeon_framebuffer *radeon_fb = to_radeon_framebuffer(fb); 1290 1290 1291 - drm_gem_object_unreference_unlocked(radeon_fb->obj); 1291 + drm_gem_object_put_unlocked(radeon_fb->obj); 1292 1292 drm_framebuffer_cleanup(fb); 1293 1293 kfree(radeon_fb); 1294 1294 } ··· 1348 1348 1349 1349 radeon_fb = kzalloc(sizeof(*radeon_fb), GFP_KERNEL); 1350 1350 if (radeon_fb == NULL) { 1351 - drm_gem_object_unreference_unlocked(obj); 1351 + drm_gem_object_put_unlocked(obj); 1352 1352 return ERR_PTR(-ENOMEM); 1353 1353 } 1354 1354 1355 1355 ret = radeon_framebuffer_init(dev, radeon_fb, mode_cmd, obj); 1356 1356 if (ret) { 1357 1357 kfree(radeon_fb); 1358 - drm_gem_object_unreference_unlocked(obj); 1358 + drm_gem_object_put_unlocked(obj); 1359 1359 return ERR_PTR(ret); 1360 1360 } 1361 1361
+2 -2
drivers/gpu/drm/radeon/radeon_fb.c
··· 118 118 radeon_bo_unpin(rbo); 119 119 radeon_bo_unreserve(rbo); 120 120 } 121 - drm_gem_object_unreference_unlocked(gobj); 121 + drm_gem_object_put_unlocked(gobj); 122 122 } 123 123 124 124 static int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev, ··· 299 299 300 300 } 301 301 if (fb && ret) { 302 - drm_gem_object_unreference_unlocked(gobj); 302 + drm_gem_object_put_unlocked(gobj); 303 303 drm_framebuffer_unregister_private(fb); 304 304 drm_framebuffer_cleanup(fb); 305 305 kfree(fb);
+15 -15
drivers/gpu/drm/radeon/radeon_gem.c
··· 271 271 } 272 272 r = drm_gem_handle_create(filp, gobj, &handle); 273 273 /* drop reference from allocate - handle holds it now */ 274 - drm_gem_object_unreference_unlocked(gobj); 274 + drm_gem_object_put_unlocked(gobj); 275 275 if (r) { 276 276 up_read(&rdev->exclusive_lock); 277 277 r = radeon_gem_handle_lockup(rdev, r); ··· 352 352 353 353 r = drm_gem_handle_create(filp, gobj, &handle); 354 354 /* drop reference from allocate - handle holds it now */ 355 - drm_gem_object_unreference_unlocked(gobj); 355 + drm_gem_object_put_unlocked(gobj); 356 356 if (r) 357 357 goto handle_lockup; 358 358 ··· 361 361 return 0; 362 362 363 363 release_object: 364 - drm_gem_object_unreference_unlocked(gobj); 364 + drm_gem_object_put_unlocked(gobj); 365 365 366 366 handle_lockup: 367 367 up_read(&rdev->exclusive_lock); ··· 395 395 396 396 r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain); 397 397 398 - drm_gem_object_unreference_unlocked(gobj); 398 + drm_gem_object_put_unlocked(gobj); 399 399 up_read(&rdev->exclusive_lock); 400 400 r = radeon_gem_handle_lockup(robj->rdev, r); 401 401 return r; ··· 414 414 } 415 415 robj = gem_to_radeon_bo(gobj); 416 416 if (radeon_ttm_tt_has_userptr(robj->tbo.ttm)) { 417 - drm_gem_object_unreference_unlocked(gobj); 417 + drm_gem_object_put_unlocked(gobj); 418 418 return -EPERM; 419 419 } 420 420 *offset_p = radeon_bo_mmap_offset(robj); 421 - drm_gem_object_unreference_unlocked(gobj); 421 + drm_gem_object_put_unlocked(gobj); 422 422 return 0; 423 423 } 424 424 ··· 453 453 454 454 cur_placement = ACCESS_ONCE(robj->tbo.mem.mem_type); 455 455 args->domain = radeon_mem_type_to_domain(cur_placement); 456 - drm_gem_object_unreference_unlocked(gobj); 456 + drm_gem_object_put_unlocked(gobj); 457 457 return r; 458 458 } 459 459 ··· 485 485 if (rdev->asic->mmio_hdp_flush && 486 486 radeon_mem_type_to_domain(cur_placement) == RADEON_GEM_DOMAIN_VRAM) 487 487 robj->rdev->asic->mmio_hdp_flush(rdev); 488 - drm_gem_object_unreference_unlocked(gobj); 488 + drm_gem_object_put_unlocked(gobj); 489 489 r = radeon_gem_handle_lockup(rdev, r); 490 490 return r; 491 491 } ··· 504 504 return -ENOENT; 505 505 robj = gem_to_radeon_bo(gobj); 506 506 r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch); 507 - drm_gem_object_unreference_unlocked(gobj); 507 + drm_gem_object_put_unlocked(gobj); 508 508 return r; 509 509 } 510 510 ··· 527 527 radeon_bo_get_tiling_flags(rbo, &args->tiling_flags, &args->pitch); 528 528 radeon_bo_unreserve(rbo); 529 529 out: 530 - drm_gem_object_unreference_unlocked(gobj); 530 + drm_gem_object_put_unlocked(gobj); 531 531 return r; 532 532 } 533 533 ··· 661 661 r = radeon_bo_reserve(rbo, false); 662 662 if (r) { 663 663 args->operation = RADEON_VA_RESULT_ERROR; 664 - drm_gem_object_unreference_unlocked(gobj); 664 + drm_gem_object_put_unlocked(gobj); 665 665 return r; 666 666 } 667 667 bo_va = radeon_vm_bo_find(&fpriv->vm, rbo); 668 668 if (!bo_va) { 669 669 args->operation = RADEON_VA_RESULT_ERROR; 670 670 radeon_bo_unreserve(rbo); 671 - drm_gem_object_unreference_unlocked(gobj); 671 + drm_gem_object_put_unlocked(gobj); 672 672 return -ENOENT; 673 673 } 674 674 ··· 695 695 args->operation = RADEON_VA_RESULT_ERROR; 696 696 } 697 697 out: 698 - drm_gem_object_unreference_unlocked(gobj); 698 + drm_gem_object_put_unlocked(gobj); 699 699 return r; 700 700 } 701 701 ··· 736 736 737 737 radeon_bo_unreserve(robj); 738 738 out: 739 - drm_gem_object_unreference_unlocked(gobj); 739 + drm_gem_object_put_unlocked(gobj); 740 740 return r; 741 741 } 742 742 ··· 762 762 763 763 r = drm_gem_handle_create(file_priv, gobj, &handle); 764 764 /* drop reference from allocate - handle holds it now */ 765 - drm_gem_object_unreference_unlocked(gobj); 765 + drm_gem_object_put_unlocked(gobj); 766 766 if (r) { 767 767 return r; 768 768 }
+1 -1
drivers/gpu/drm/radeon/radeon_object.c
··· 445 445 list_del_init(&bo->list); 446 446 mutex_unlock(&bo->rdev->gem.mutex); 447 447 /* this should unref the ttm bo */ 448 - drm_gem_object_unreference_unlocked(&bo->gem_base); 448 + drm_gem_object_put_unlocked(&bo->gem_base); 449 449 } 450 450 } 451 451