Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/shmem: Add madvise state and purge helpers

Add support to the shmem GEM helpers for tracking madvise state and
purging pages. This is based on the msm implementation.

The BO provides a list_head, but the list management is handled outside
of the shmem helpers as there are different locking requirements.

Cc: Tomeu Vizoso <tomeu.vizoso@collabora.com>
Cc: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Cc: Maxime Ripard <maxime.ripard@bootlin.com>
Cc: Sean Paul <sean@poorly.run>
Cc: David Airlie <airlied@linux.ie>
Cc: Daniel Vetter <daniel@ffwll.ch>
Cc: Eric Anholt <eric@anholt.net>
Acked-by: Acked-by: Alyssa Rosenzweig <alyssa.rosenzweig@collabora.com>
Signed-off-by: Rob Herring <robh@kernel.org>
Link: https://patchwork.freedesktop.org/patch/msgid/20190805143358.21245-1-robh@kernel.org

+72
+57
drivers/gpu/drm/drm_gem_shmem_helper.c
··· 75 75 shmem = to_drm_gem_shmem_obj(obj); 76 76 mutex_init(&shmem->pages_lock); 77 77 mutex_init(&shmem->vmap_lock); 78 + INIT_LIST_HEAD(&shmem->madv_list); 78 79 79 80 /* 80 81 * Our buffers are kept pinned, so allocating them ··· 362 361 return shmem; 363 362 } 364 363 EXPORT_SYMBOL(drm_gem_shmem_create_with_handle); 364 + 365 + /* Update madvise status, returns true if not purged, else 366 + * false or -errno. 367 + */ 368 + int drm_gem_shmem_madvise(struct drm_gem_object *obj, int madv) 369 + { 370 + struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); 371 + 372 + mutex_lock(&shmem->pages_lock); 373 + 374 + if (shmem->madv >= 0) 375 + shmem->madv = madv; 376 + 377 + madv = shmem->madv; 378 + 379 + mutex_unlock(&shmem->pages_lock); 380 + 381 + return (madv >= 0); 382 + } 383 + EXPORT_SYMBOL(drm_gem_shmem_madvise); 384 + 385 + void drm_gem_shmem_purge_locked(struct drm_gem_object *obj) 386 + { 387 + struct drm_device *dev = obj->dev; 388 + struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); 389 + 390 + WARN_ON(!drm_gem_shmem_is_purgeable(shmem)); 391 + 392 + drm_gem_shmem_put_pages_locked(shmem); 393 + 394 + shmem->madv = -1; 395 + 396 + drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping); 397 + drm_gem_free_mmap_offset(obj); 398 + 399 + /* Our goal here is to return as much of the memory as 400 + * is possible back to the system as we are called from OOM. 401 + * To do this we must instruct the shmfs to drop all of its 402 + * backing pages, *now*. 403 + */ 404 + shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1); 405 + 406 + invalidate_mapping_pages(file_inode(obj->filp)->i_mapping, 407 + 0, (loff_t)-1); 408 + } 409 + EXPORT_SYMBOL(drm_gem_shmem_purge_locked); 410 + 411 + void drm_gem_shmem_purge(struct drm_gem_object *obj) 412 + { 413 + struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); 414 + 415 + mutex_lock(&shmem->pages_lock); 416 + drm_gem_shmem_purge_locked(obj); 417 + mutex_unlock(&shmem->pages_lock); 418 + } 419 + EXPORT_SYMBOL(drm_gem_shmem_purge); 365 420 366 421 /** 367 422 * drm_gem_shmem_dumb_create - Create a dumb shmem buffer object
+15
include/drm/drm_gem_shmem_helper.h
··· 44 44 */ 45 45 unsigned int pages_use_count; 46 46 47 + int madv; 48 + struct list_head madv_list; 49 + 47 50 /** 48 51 * @pages_mark_dirty_on_put: 49 52 * ··· 123 120 void drm_gem_shmem_unpin(struct drm_gem_object *obj); 124 121 void *drm_gem_shmem_vmap(struct drm_gem_object *obj); 125 122 void drm_gem_shmem_vunmap(struct drm_gem_object *obj, void *vaddr); 123 + 124 + int drm_gem_shmem_madvise(struct drm_gem_object *obj, int madv); 125 + 126 + static inline bool drm_gem_shmem_is_purgeable(struct drm_gem_shmem_object *shmem) 127 + { 128 + return (shmem->madv > 0) && 129 + !shmem->vmap_use_count && shmem->sgt && 130 + !shmem->base.dma_buf && !shmem->base.import_attach; 131 + } 132 + 133 + void drm_gem_shmem_purge_locked(struct drm_gem_object *obj); 134 + void drm_gem_shmem_purge(struct drm_gem_object *obj); 126 135 127 136 struct drm_gem_shmem_object * 128 137 drm_gem_shmem_create_with_handle(struct drm_file *file_priv,