Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/msm/gem: Add msm_gem_assert_locked()

All use of msm_gem_is_locked() is just for WARN_ON()s, so extract out
into an msm_gem_assert_locked() patch.

Signed-off-by: Rob Clark <robdclark@chromium.org>
Patchwork: https://patchwork.freedesktop.org/patch/496136/
Link: https://lore.kernel.org/r/20220802155152.1727594-15-robdclark@gmail.com

+25 -19
+18 -18
drivers/gpu/drm/msm/msm_gem.c
··· 97 97 { 98 98 struct msm_gem_object *msm_obj = to_msm_bo(obj); 99 99 100 - GEM_WARN_ON(!msm_gem_is_locked(obj)); 100 + msm_gem_assert_locked(obj); 101 101 102 102 if (!msm_obj->pages) { 103 103 struct drm_device *dev = obj->dev; ··· 183 183 struct msm_gem_object *msm_obj = to_msm_bo(obj); 184 184 struct page **p; 185 185 186 - GEM_WARN_ON(!msm_gem_is_locked(obj)); 186 + msm_gem_assert_locked(obj); 187 187 188 188 if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) { 189 189 return ERR_PTR(-EBUSY); ··· 278 278 struct drm_device *dev = obj->dev; 279 279 int ret; 280 280 281 - GEM_WARN_ON(!msm_gem_is_locked(obj)); 281 + msm_gem_assert_locked(obj); 282 282 283 283 /* Make it mmapable */ 284 284 ret = drm_gem_create_mmap_offset(obj); ··· 307 307 struct msm_gem_object *msm_obj = to_msm_bo(obj); 308 308 struct msm_gem_vma *vma; 309 309 310 - GEM_WARN_ON(!msm_gem_is_locked(obj)); 310 + msm_gem_assert_locked(obj); 311 311 312 312 vma = kzalloc(sizeof(*vma), GFP_KERNEL); 313 313 if (!vma) ··· 326 326 struct msm_gem_object *msm_obj = to_msm_bo(obj); 327 327 struct msm_gem_vma *vma; 328 328 329 - GEM_WARN_ON(!msm_gem_is_locked(obj)); 329 + msm_gem_assert_locked(obj); 330 330 331 331 list_for_each_entry(vma, &msm_obj->vmas, list) { 332 332 if (vma->aspace == aspace) ··· 357 357 struct msm_gem_object *msm_obj = to_msm_bo(obj); 358 358 struct msm_gem_vma *vma; 359 359 360 - GEM_WARN_ON(!msm_gem_is_locked(obj)); 360 + msm_gem_assert_locked(obj); 361 361 362 362 list_for_each_entry(vma, &msm_obj->vmas, list) { 363 363 if (vma->aspace) { ··· 375 375 struct msm_gem_object *msm_obj = to_msm_bo(obj); 376 376 struct msm_gem_vma *vma, *tmp; 377 377 378 - GEM_WARN_ON(!msm_gem_is_locked(obj)); 378 + msm_gem_assert_locked(obj); 379 379 380 380 list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) { 381 381 del_vma(vma); ··· 388 388 { 389 389 struct msm_gem_vma *vma; 390 390 391 - GEM_WARN_ON(!msm_gem_is_locked(obj)); 391 + msm_gem_assert_locked(obj); 392 392 393 393 vma = lookup_vma(obj, aspace); 394 394 ··· 428 428 if (msm_obj->flags & MSM_BO_CACHED_COHERENT) 429 429 prot |= IOMMU_CACHE; 430 430 431 - GEM_WARN_ON(!msm_gem_is_locked(obj)); 431 + msm_gem_assert_locked(obj); 432 432 433 433 if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) 434 434 return -EBUSY; ··· 448 448 { 449 449 struct msm_gem_object *msm_obj = to_msm_bo(obj); 450 450 451 - GEM_WARN_ON(!msm_gem_is_locked(obj)); 451 + msm_gem_assert_locked(obj); 452 452 453 453 msm_obj->pin_count--; 454 454 GEM_WARN_ON(msm_obj->pin_count < 0); ··· 469 469 struct msm_gem_vma *vma; 470 470 int ret; 471 471 472 - GEM_WARN_ON(!msm_gem_is_locked(obj)); 472 + msm_gem_assert_locked(obj); 473 473 474 474 vma = get_vma_locked(obj, aspace, range_start, range_end); 475 475 if (IS_ERR(vma)) ··· 630 630 struct msm_gem_object *msm_obj = to_msm_bo(obj); 631 631 int ret = 0; 632 632 633 - GEM_WARN_ON(!msm_gem_is_locked(obj)); 633 + msm_gem_assert_locked(obj); 634 634 635 635 if (obj->import_attach) 636 636 return ERR_PTR(-ENODEV); ··· 703 703 { 704 704 struct msm_gem_object *msm_obj = to_msm_bo(obj); 705 705 706 - GEM_WARN_ON(!msm_gem_is_locked(obj)); 706 + msm_gem_assert_locked(obj); 707 707 GEM_WARN_ON(msm_obj->vmap_count < 1); 708 708 709 709 msm_obj->vmap_count--; ··· 745 745 struct drm_device *dev = obj->dev; 746 746 struct msm_gem_object *msm_obj = to_msm_bo(obj); 747 747 748 - GEM_WARN_ON(!msm_gem_is_locked(obj)); 748 + msm_gem_assert_locked(obj); 749 749 GEM_WARN_ON(!is_purgeable(msm_obj)); 750 750 751 751 /* Get rid of any iommu mapping(s): */ ··· 782 782 struct drm_device *dev = obj->dev; 783 783 struct msm_gem_object *msm_obj = to_msm_bo(obj); 784 784 785 - GEM_WARN_ON(!msm_gem_is_locked(obj)); 785 + msm_gem_assert_locked(obj); 786 786 GEM_WARN_ON(is_unevictable(msm_obj)); 787 787 788 788 /* Get rid of any iommu mapping(s): */ ··· 797 797 { 798 798 struct msm_gem_object *msm_obj = to_msm_bo(obj); 799 799 800 - GEM_WARN_ON(!msm_gem_is_locked(obj)); 800 + msm_gem_assert_locked(obj); 801 801 802 802 if (!msm_obj->vaddr || GEM_WARN_ON(!is_vunmapable(msm_obj))) 803 803 return; ··· 811 811 struct msm_drm_private *priv = obj->dev->dev_private; 812 812 struct msm_gem_object *msm_obj = to_msm_bo(obj); 813 813 814 - GEM_WARN_ON(!msm_gem_is_locked(&msm_obj->base)); 814 + msm_gem_assert_locked(&msm_obj->base); 815 815 816 816 if (!msm_obj->pages) { 817 817 GEM_WARN_ON(msm_obj->pin_count); ··· 831 831 832 832 bool msm_gem_active(struct drm_gem_object *obj) 833 833 { 834 - GEM_WARN_ON(!msm_gem_is_locked(obj)); 834 + msm_gem_assert_locked(obj); 835 835 836 836 if (to_msm_bo(obj)->pin_count) 837 837 return true;
+7 -1
drivers/gpu/drm/msm/msm_gem.h
··· 215 215 return dma_resv_is_locked(obj->resv) || (kref_read(&obj->refcount) == 0); 216 216 } 217 217 218 + static inline void 219 + msm_gem_assert_locked(struct drm_gem_object *obj) 220 + { 221 + GEM_WARN_ON(!msm_gem_is_locked(obj)); 222 + } 223 + 218 224 /* imported/exported objects are not purgeable: */ 219 225 static inline bool is_unpurgeable(struct msm_gem_object *msm_obj) 220 226 { ··· 235 229 236 230 static inline bool is_vunmapable(struct msm_gem_object *msm_obj) 237 231 { 238 - GEM_WARN_ON(!msm_gem_is_locked(&msm_obj->base)); 232 + msm_gem_assert_locked(&msm_obj->base); 239 233 return (msm_obj->vmap_count == 0) && msm_obj->vaddr; 240 234 } 241 235