Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'etnaviv/next' of https://git.pengutronix.de/git/lst/linux into drm-next

this cycle has been fairly calm in etnaviv land with most of the action
happening on the userspace side.

Notable changes:
- Improvements to CONFIG option handling to make it harder for users to
shoot themselves in the foot due to kernel misconfiguration.
- Tweaked GEM object population, so that userspace can take considerate
action when memory allocation fails, rather than waking the raging OOM
killer beast.

* 'etnaviv/next' of https://git.pengutronix.de/git/lst/linux:
drm/etnaviv: switch GEM allocations to __GFP_RETRY_MAYFAIL
drm/etnaviv: don't fail GPU bind when CONFIG_THERMAL isn't enabled
drm/etnaviv: switch to drm_*{get,put} helpers
drm/etnaviv: select CMA and DMA_CMA if available
drm/etnaviv: populate GEM objects on cpu_prep
drm/etnaviv: reduce allocation failure message severity
drm/etnaviv: don't trigger OOM killer when page allocation fails

+37 -32
+2
drivers/gpu/drm/etnaviv/Kconfig
··· 10 10 select IOMMU_API 11 11 select IOMMU_SUPPORT 12 12 select WANT_DEV_COREDUMP 13 + select CMA if HAVE_DMA_CONTIGUOUS 14 + select DMA_CMA if HAVE_DMA_CONTIGUOUS 13 15 help 14 16 DRM driver for Vivante GPUs. 15 17
+4 -4
drivers/gpu/drm/etnaviv/etnaviv_drv.c
··· 316 316 317 317 ret = etnaviv_gem_cpu_prep(obj, args->op, &TS(args->timeout)); 318 318 319 - drm_gem_object_unreference_unlocked(obj); 319 + drm_gem_object_put_unlocked(obj); 320 320 321 321 return ret; 322 322 } ··· 337 337 338 338 ret = etnaviv_gem_cpu_fini(obj); 339 339 340 - drm_gem_object_unreference_unlocked(obj); 340 + drm_gem_object_put_unlocked(obj); 341 341 342 342 return ret; 343 343 } ··· 357 357 return -ENOENT; 358 358 359 359 ret = etnaviv_gem_mmap_offset(obj, &args->offset); 360 - drm_gem_object_unreference_unlocked(obj); 360 + drm_gem_object_put_unlocked(obj); 361 361 362 362 return ret; 363 363 } ··· 446 446 447 447 ret = etnaviv_gem_wait_bo(gpu, obj, timeout); 448 448 449 - drm_gem_object_unreference_unlocked(obj); 449 + drm_gem_object_put_unlocked(obj); 450 450 451 451 return ret; 452 452 }
+23 -22
drivers/gpu/drm/etnaviv/etnaviv_gem.c
··· 68 68 struct page **p = drm_gem_get_pages(&etnaviv_obj->base); 69 69 70 70 if (IS_ERR(p)) { 71 - dev_err(dev->dev, "could not get pages: %ld\n", PTR_ERR(p)); 71 + dev_dbg(dev->dev, "could not get pages: %ld\n", PTR_ERR(p)); 72 72 return PTR_ERR(p); 73 73 } 74 74 ··· 265 265 { 266 266 struct etnaviv_gem_object *etnaviv_obj = mapping->object; 267 267 268 - drm_gem_object_reference(&etnaviv_obj->base); 268 + drm_gem_object_get(&etnaviv_obj->base); 269 269 270 270 mutex_lock(&etnaviv_obj->lock); 271 271 WARN_ON(mapping->use == 0); ··· 282 282 mapping->use -= 1; 283 283 mutex_unlock(&etnaviv_obj->lock); 284 284 285 - drm_gem_object_unreference_unlocked(&etnaviv_obj->base); 285 + drm_gem_object_put_unlocked(&etnaviv_obj->base); 286 286 } 287 287 288 288 struct etnaviv_vram_mapping *etnaviv_gem_mapping_get( ··· 358 358 return ERR_PTR(ret); 359 359 360 360 /* Take a reference on the object */ 361 - drm_gem_object_reference(obj); 361 + drm_gem_object_get(obj); 362 362 return mapping; 363 363 } 364 364 ··· 413 413 bool write = !!(op & ETNA_PREP_WRITE); 414 414 int ret; 415 415 416 + if (!etnaviv_obj->sgt) { 417 + void *ret; 418 + 419 + mutex_lock(&etnaviv_obj->lock); 420 + ret = etnaviv_gem_get_pages(etnaviv_obj); 421 + mutex_unlock(&etnaviv_obj->lock); 422 + if (IS_ERR(ret)) 423 + return PTR_ERR(ret); 424 + } 425 + 416 426 if (op & ETNA_PREP_NOSYNC) { 417 427 if (!reservation_object_test_signaled_rcu(etnaviv_obj->resv, 418 428 write)) ··· 437 427 } 438 428 439 429 if (etnaviv_obj->flags & ETNA_BO_CACHED) { 440 - if (!etnaviv_obj->sgt) { 441 - void *ret; 442 - 443 - mutex_lock(&etnaviv_obj->lock); 444 - ret = etnaviv_gem_get_pages(etnaviv_obj); 445 - mutex_unlock(&etnaviv_obj->lock); 446 - if (IS_ERR(ret)) 447 - return PTR_ERR(ret); 448 - } 449 - 450 430 dma_sync_sg_for_cpu(dev->dev, etnaviv_obj->sgt->sgl, 451 431 etnaviv_obj->sgt->nents, 452 432 etnaviv_op_to_dma_dir(op)); ··· 662 662 * going to pin these pages. 663 663 */ 664 664 mapping = obj->filp->f_mapping; 665 - mapping_set_gfp_mask(mapping, GFP_HIGHUSER); 665 + mapping_set_gfp_mask(mapping, GFP_HIGHUSER | 666 + __GFP_RETRY_MAYFAIL | __GFP_NOWARN); 666 667 } 667 668 668 669 if (ret) ··· 672 671 return obj; 673 672 674 673 fail: 675 - drm_gem_object_unreference_unlocked(obj); 674 + drm_gem_object_put_unlocked(obj); 676 675 return ERR_PTR(ret); 677 676 } 678 677 ··· 689 688 690 689 ret = etnaviv_gem_obj_add(dev, obj); 691 690 if (ret < 0) { 692 - drm_gem_object_unreference_unlocked(obj); 691 + drm_gem_object_put_unlocked(obj); 693 692 return ret; 694 693 } 695 694 696 695 ret = drm_gem_handle_create(file, obj, handle); 697 696 698 697 /* drop reference from allocate - handle holds it now */ 699 - drm_gem_object_unreference_unlocked(obj); 698 + drm_gem_object_put_unlocked(obj); 700 699 701 700 return ret; 702 701 } ··· 713 712 714 713 ret = etnaviv_gem_obj_add(dev, obj); 715 714 if (ret < 0) { 716 - drm_gem_object_unreference_unlocked(obj); 715 + drm_gem_object_put_unlocked(obj); 717 716 return ERR_PTR(ret); 718 717 } 719 718 ··· 801 800 } 802 801 803 802 mutex_unlock(&etnaviv_obj->lock); 804 - drm_gem_object_unreference_unlocked(&etnaviv_obj->base); 803 + drm_gem_object_put_unlocked(&etnaviv_obj->base); 805 804 806 805 mmput(work->mm); 807 806 put_task_struct(work->task); ··· 859 858 } 860 859 861 860 get_task_struct(current); 862 - drm_gem_object_reference(&etnaviv_obj->base); 861 + drm_gem_object_get(&etnaviv_obj->base); 863 862 864 863 work->mm = mm; 865 864 work->task = current; ··· 925 924 ret = drm_gem_handle_create(file, &etnaviv_obj->base, handle); 926 925 unreference: 927 926 /* drop reference from allocate - handle holds it now */ 928 - drm_gem_object_unreference_unlocked(&etnaviv_obj->base); 927 + drm_gem_object_put_unlocked(&etnaviv_obj->base); 929 928 return ret; 930 929 }
+1 -1
drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
··· 146 146 return &etnaviv_obj->base; 147 147 148 148 fail: 149 - drm_gem_object_unreference_unlocked(&etnaviv_obj->base); 149 + drm_gem_object_put_unlocked(&etnaviv_obj->base); 150 150 151 151 return ERR_PTR(ret); 152 152 }
+2 -2
drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
··· 88 88 * Take a refcount on the object. The file table lock 89 89 * prevents the object_idr's refcount on this being dropped. 90 90 */ 91 - drm_gem_object_reference(obj); 91 + drm_gem_object_get(obj); 92 92 93 93 submit->bos[i].obj = to_etnaviv_bo(obj); 94 94 } ··· 291 291 struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj; 292 292 293 293 submit_unlock_object(submit, i); 294 - drm_gem_object_unreference_unlocked(&etnaviv_obj->base); 294 + drm_gem_object_put_unlocked(&etnaviv_obj->base); 295 295 } 296 296 297 297 ww_acquire_fini(&submit->ticket);
+5 -3
drivers/gpu/drm/etnaviv/etnaviv_gpu.c
··· 1622 1622 struct etnaviv_gpu *gpu = dev_get_drvdata(dev); 1623 1623 int ret; 1624 1624 1625 - gpu->cooling = thermal_of_cooling_device_register(dev->of_node, 1625 + if (IS_ENABLED(CONFIG_THERMAL)) { 1626 + gpu->cooling = thermal_of_cooling_device_register(dev->of_node, 1626 1627 (char *)dev_name(dev), gpu, &cooling_ops); 1627 - if (IS_ERR(gpu->cooling)) 1628 - return PTR_ERR(gpu->cooling); 1628 + if (IS_ERR(gpu->cooling)) 1629 + return PTR_ERR(gpu->cooling); 1630 + } 1629 1631 1630 1632 #ifdef CONFIG_PM 1631 1633 ret = pm_runtime_get_sync(gpu->dev);