Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/radeon: remove _unlocked suffix in drm_gem_object_put_unlocked

Spelling out _unlocked for each and every driver is a annoying.
Especially if we consider how many drivers, do not know (or need to)
about the horror stories involving struct_mutex.

Just drop the suffix. It makes the API cleaner.

Done via the following script:

__from=drm_gem_object_put_unlocked
__to=drm_gem_object_put
for __file in $(git grep --name-only $__from); do
sed -i "s/$__from/$__to/g" $__file;
done

Cc: Alex Deucher <alexander.deucher@amd.com>
Cc: "Christian König" <christian.koenig@amd.com>
Cc: "David (ChunMing) Zhou" <David1.Zhou@amd.com>
Signed-off-by: Emil Velikov <emil.velikov@collabora.com>
Acked-by: Sam Ravnborg <sam@ravnborg.org>
Acked-by: Thomas Zimmermann <tzimmermann@suse.de>
Link: https://patchwork.freedesktop.org/patch/msgid/20200515095118.2743122-30-emil.l.velikov@gmail.com

authored by

Emil Velikov and committed by
Emil Velikov
f11fb66a 9c86fb18

+26 -26
+1 -1
drivers/gpu/drm/radeon/radeon_cs.c
··· 443 443 if (bo == NULL) 444 444 continue; 445 445 446 - drm_gem_object_put_unlocked(&bo->tbo.base); 446 + drm_gem_object_put(&bo->tbo.base); 447 447 } 448 448 } 449 449 kfree(parser->track);
+3 -3
drivers/gpu/drm/radeon/radeon_cursor.c
··· 309 309 robj = gem_to_radeon_bo(obj); 310 310 ret = radeon_bo_reserve(robj, false); 311 311 if (ret != 0) { 312 - drm_gem_object_put_unlocked(obj); 312 + drm_gem_object_put(obj); 313 313 return ret; 314 314 } 315 315 /* Only 27 bit offset for legacy cursor */ ··· 319 319 radeon_bo_unreserve(robj); 320 320 if (ret) { 321 321 DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret); 322 - drm_gem_object_put_unlocked(obj); 322 + drm_gem_object_put(obj); 323 323 return ret; 324 324 } 325 325 ··· 354 354 radeon_bo_unpin(robj); 355 355 radeon_bo_unreserve(robj); 356 356 } 357 - drm_gem_object_put_unlocked(radeon_crtc->cursor_bo); 357 + drm_gem_object_put(radeon_crtc->cursor_bo); 358 358 } 359 359 360 360 radeon_crtc->cursor_bo = obj;
+4 -4
drivers/gpu/drm/radeon/radeon_display.c
··· 281 281 } else 282 282 DRM_ERROR("failed to reserve buffer after flip\n"); 283 283 284 - drm_gem_object_put_unlocked(&work->old_rbo->tbo.base); 284 + drm_gem_object_put(&work->old_rbo->tbo.base); 285 285 kfree(work); 286 286 } 287 287 ··· 613 613 radeon_bo_unreserve(new_rbo); 614 614 615 615 cleanup: 616 - drm_gem_object_put_unlocked(&work->old_rbo->tbo.base); 616 + drm_gem_object_put(&work->old_rbo->tbo.base); 617 617 dma_fence_put(work->fence); 618 618 kfree(work); 619 619 return r; ··· 1337 1337 1338 1338 fb = kzalloc(sizeof(*fb), GFP_KERNEL); 1339 1339 if (fb == NULL) { 1340 - drm_gem_object_put_unlocked(obj); 1340 + drm_gem_object_put(obj); 1341 1341 return ERR_PTR(-ENOMEM); 1342 1342 } 1343 1343 1344 1344 ret = radeon_framebuffer_init(dev, fb, mode_cmd, obj); 1345 1345 if (ret) { 1346 1346 kfree(fb); 1347 - drm_gem_object_put_unlocked(obj); 1347 + drm_gem_object_put(obj); 1348 1348 return ERR_PTR(ret); 1349 1349 } 1350 1350
+2 -2
drivers/gpu/drm/radeon/radeon_fb.c
··· 119 119 radeon_bo_unpin(rbo); 120 120 radeon_bo_unreserve(rbo); 121 121 } 122 - drm_gem_object_put_unlocked(gobj); 122 + drm_gem_object_put(gobj); 123 123 } 124 124 125 125 static int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev, ··· 298 298 299 299 } 300 300 if (fb && ret) { 301 - drm_gem_object_put_unlocked(gobj); 301 + drm_gem_object_put(gobj); 302 302 drm_framebuffer_unregister_private(fb); 303 303 drm_framebuffer_cleanup(fb); 304 304 kfree(fb);
+15 -15
drivers/gpu/drm/radeon/radeon_gem.c
··· 275 275 } 276 276 r = drm_gem_handle_create(filp, gobj, &handle); 277 277 /* drop reference from allocate - handle holds it now */ 278 - drm_gem_object_put_unlocked(gobj); 278 + drm_gem_object_put(gobj); 279 279 if (r) { 280 280 up_read(&rdev->exclusive_lock); 281 281 r = radeon_gem_handle_lockup(rdev, r); ··· 359 359 360 360 r = drm_gem_handle_create(filp, gobj, &handle); 361 361 /* drop reference from allocate - handle holds it now */ 362 - drm_gem_object_put_unlocked(gobj); 362 + drm_gem_object_put(gobj); 363 363 if (r) 364 364 goto handle_lockup; 365 365 ··· 368 368 return 0; 369 369 370 370 release_object: 371 - drm_gem_object_put_unlocked(gobj); 371 + drm_gem_object_put(gobj); 372 372 373 373 handle_lockup: 374 374 up_read(&rdev->exclusive_lock); ··· 402 402 403 403 r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain); 404 404 405 - drm_gem_object_put_unlocked(gobj); 405 + drm_gem_object_put(gobj); 406 406 up_read(&rdev->exclusive_lock); 407 407 r = radeon_gem_handle_lockup(robj->rdev, r); 408 408 return r; ··· 421 421 } 422 422 robj = gem_to_radeon_bo(gobj); 423 423 if (radeon_ttm_tt_has_userptr(robj->tbo.ttm)) { 424 - drm_gem_object_put_unlocked(gobj); 424 + drm_gem_object_put(gobj); 425 425 return -EPERM; 426 426 } 427 427 *offset_p = radeon_bo_mmap_offset(robj); 428 - drm_gem_object_put_unlocked(gobj); 428 + drm_gem_object_put(gobj); 429 429 return 0; 430 430 } 431 431 ··· 460 460 461 461 cur_placement = READ_ONCE(robj->tbo.mem.mem_type); 462 462 args->domain = radeon_mem_type_to_domain(cur_placement); 463 - drm_gem_object_put_unlocked(gobj); 463 + drm_gem_object_put(gobj); 464 464 return r; 465 465 } 466 466 ··· 492 492 if (rdev->asic->mmio_hdp_flush && 493 493 radeon_mem_type_to_domain(cur_placement) == RADEON_GEM_DOMAIN_VRAM) 494 494 robj->rdev->asic->mmio_hdp_flush(rdev); 495 - drm_gem_object_put_unlocked(gobj); 495 + drm_gem_object_put(gobj); 496 496 r = radeon_gem_handle_lockup(rdev, r); 497 497 return r; 498 498 } ··· 511 511 return -ENOENT; 512 512 robj = gem_to_radeon_bo(gobj); 513 513 r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch); 514 - drm_gem_object_put_unlocked(gobj); 514 + drm_gem_object_put(gobj); 515 515 return r; 516 516 } 517 517 ··· 534 534 radeon_bo_get_tiling_flags(rbo, &args->tiling_flags, &args->pitch); 535 535 radeon_bo_unreserve(rbo); 536 536 out: 537 - drm_gem_object_put_unlocked(gobj); 537 + drm_gem_object_put(gobj); 538 538 return r; 539 539 } 540 540 ··· 668 668 r = radeon_bo_reserve(rbo, false); 669 669 if (r) { 670 670 args->operation = RADEON_VA_RESULT_ERROR; 671 - drm_gem_object_put_unlocked(gobj); 671 + drm_gem_object_put(gobj); 672 672 return r; 673 673 } 674 674 bo_va = radeon_vm_bo_find(&fpriv->vm, rbo); 675 675 if (!bo_va) { 676 676 args->operation = RADEON_VA_RESULT_ERROR; 677 677 radeon_bo_unreserve(rbo); 678 - drm_gem_object_put_unlocked(gobj); 678 + drm_gem_object_put(gobj); 679 679 return -ENOENT; 680 680 } 681 681 ··· 702 702 args->operation = RADEON_VA_RESULT_ERROR; 703 703 } 704 704 out: 705 - drm_gem_object_put_unlocked(gobj); 705 + drm_gem_object_put(gobj); 706 706 return r; 707 707 } 708 708 ··· 743 743 744 744 radeon_bo_unreserve(robj); 745 745 out: 746 - drm_gem_object_put_unlocked(gobj); 746 + drm_gem_object_put(gobj); 747 747 return r; 748 748 } 749 749 ··· 769 769 770 770 r = drm_gem_handle_create(file_priv, gobj, &handle); 771 771 /* drop reference from allocate - handle holds it now */ 772 - drm_gem_object_put_unlocked(gobj); 772 + drm_gem_object_put(gobj); 773 773 if (r) { 774 774 return r; 775 775 }
+1 -1
drivers/gpu/drm/radeon/radeon_object.c
··· 448 448 list_del_init(&bo->list); 449 449 mutex_unlock(&bo->rdev->gem.mutex); 450 450 /* this should unref the ttm bo */ 451 - drm_gem_object_put_unlocked(&bo->tbo.base); 451 + drm_gem_object_put(&bo->tbo.base); 452 452 } 453 453 } 454 454