Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/sched: Fix drm_sched_fence_free() so it can be passed an uninitialized fence

drm_sched_job_cleanup() will pass an uninitialized fence to
drm_sched_fence_free(), which will cause to_drm_sched_fence() to return
a NULL fence object, causing a NULL pointer deref when this NULL object
is passed to kmem_cache_free().

Let's create a new drm_sched_fence_free() function that takes a
drm_sched_fence pointer and suffix the old function with _rcu. While at
it, complain if drm_sched_fence_free() is passed an initialized fence
or if drm_sched_fence_free_rcu() is passed an uninitialized fence.

Fixes: dbe48d030b28 ("drm/sched: Split drm_sched_job_init")
Signed-off-by: Boris Brezillon <boris.brezillon@collabora.com>
Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Link: https://patchwork.freedesktop.org/patch/msgid/20210903120554.444101-1-boris.brezillon@collabora.com

+21 -12
+19 -10
drivers/gpu/drm/scheduler/sched_fence.c
··· 69 69 return (const char *)fence->sched->name; 70 70 } 71 71 72 - /** 73 - * drm_sched_fence_free - free up the fence memory 74 - * 75 - * @rcu: RCU callback head 76 - * 77 - * Free up the fence memory after the RCU grace period. 78 - */ 79 - void drm_sched_fence_free(struct rcu_head *rcu) 72 + static void drm_sched_fence_free_rcu(struct rcu_head *rcu) 80 73 { 81 74 struct dma_fence *f = container_of(rcu, struct dma_fence, rcu); 82 75 struct drm_sched_fence *fence = to_drm_sched_fence(f); 83 76 84 - kmem_cache_free(sched_fence_slab, fence); 77 + if (!WARN_ON_ONCE(!fence)) 78 + kmem_cache_free(sched_fence_slab, fence); 79 + } 80 + 81 + /** 82 + * drm_sched_fence_free - free up an uninitialized fence 83 + * 84 + * @fence: fence to free 85 + * 86 + * Free up the fence memory. Should only be used if drm_sched_fence_init() 87 + * has not been called yet. 88 + */ 89 + void drm_sched_fence_free(struct drm_sched_fence *fence) 90 + { 91 + /* This function should not be called if the fence has been initialized. */ 92 + if (!WARN_ON_ONCE(fence->sched)) 93 + kmem_cache_free(sched_fence_slab, fence); 85 94 } 86 95 87 96 /** ··· 106 97 struct drm_sched_fence *fence = to_drm_sched_fence(f); 107 98 108 99 dma_fence_put(fence->parent); 109 - call_rcu(&fence->finished.rcu, drm_sched_fence_free); 100 + call_rcu(&fence->finished.rcu, drm_sched_fence_free_rcu); 110 101 } 111 102 112 103 /**
+1 -1
drivers/gpu/drm/scheduler/sched_main.c
··· 750 750 dma_fence_put(&job->s_fence->finished); 751 751 } else { 752 752 /* aborted job before committing to run it */ 753 - drm_sched_fence_free(&job->s_fence->finished.rcu); 753 + drm_sched_fence_free(job->s_fence); 754 754 } 755 755 756 756 job->s_fence = NULL;
+1 -1
include/drm/gpu_scheduler.h
··· 509 509 struct drm_sched_entity *s_entity, void *owner); 510 510 void drm_sched_fence_init(struct drm_sched_fence *fence, 511 511 struct drm_sched_entity *entity); 512 - void drm_sched_fence_free(struct rcu_head *rcu); 512 + void drm_sched_fence_free(struct drm_sched_fence *fence); 513 513 514 514 void drm_sched_fence_scheduled(struct drm_sched_fence *fence); 515 515 void drm_sched_fence_finished(struct drm_sched_fence *fence);