Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/panthor: Prevent potential UAF in group creation

This commit prevents the possibility of a use after free issue in the
GROUP_CREATE ioctl function, which arose as pointer to the group is
accessed in that ioctl function after storing it in the Xarray.
A malicious userspace can second guess the handle of a group and try
to call GROUP_DESTROY ioctl from another thread around the same time
as GROUP_CREATE ioctl.

To prevent the use after free exploit, this commit uses a mark on an
entry of group pool Xarray which is added just before returning from
the GROUP_CREATE ioctl function. The mark is checked for all ioctls
that specify the group handle and so userspace won't be abe to delete
a group that isn't marked yet.

v2: Add R-bs and fixes tags

Fixes: de85488138247 ("drm/panthor: Add the scheduler logical block")
Co-developed-by: Boris Brezillon <boris.brezillon@collabora.com>
Signed-off-by: Boris Brezillon <boris.brezillon@collabora.com>
Signed-off-by: Akash Goel <akash.goel@arm.com>
Reviewed-by: Boris Brezillon <boris.brezillon@collabora.com>
Reviewed-by: Steven Price <steven.price@arm.com>
Reviewed-by: Chia-I Wu <olvaffe@gmail.com>
Link: https://patch.msgid.link/20251127164912.3788155-1-akash.goel@arm.com

authored by

Akash Goel and committed by
Boris Brezillon
eec7e23d 31d3354f

+15 -4
+15 -4
drivers/gpu/drm/panthor/panthor_sched.c
··· 776 776 */ 777 777 #define MAX_GROUPS_PER_POOL 128 778 778 779 + /* 780 + * Mark added on an entry of group pool Xarray to identify if the group has 781 + * been fully initialized and can be accessed elsewhere in the driver code. 782 + */ 783 + #define GROUP_REGISTERED XA_MARK_1 784 + 779 785 /** 780 786 * struct panthor_group_pool - Group pool 781 787 * ··· 2912 2906 return; 2913 2907 2914 2908 xa_lock(&gpool->xa); 2915 - xa_for_each(&gpool->xa, i, group) { 2909 + xa_for_each_marked(&gpool->xa, i, group, GROUP_REGISTERED) { 2916 2910 guard(spinlock)(&group->fdinfo.lock); 2917 2911 pfile->stats.cycles += group->fdinfo.data.cycles; 2918 2912 pfile->stats.time += group->fdinfo.data.time; ··· 3597 3591 3598 3592 group_init_task_info(group); 3599 3593 3594 + xa_set_mark(&gpool->xa, gid, GROUP_REGISTERED); 3595 + 3600 3596 return gid; 3601 3597 3602 3598 err_erase_gid: ··· 3615 3607 struct panthor_device *ptdev = pfile->ptdev; 3616 3608 struct panthor_scheduler *sched = ptdev->scheduler; 3617 3609 struct panthor_group *group; 3610 + 3611 + if (!xa_get_mark(&gpool->xa, group_handle, GROUP_REGISTERED)) 3612 + return -EINVAL; 3618 3613 3619 3614 group = xa_erase(&gpool->xa, group_handle); 3620 3615 if (!group) ··· 3644 3633 } 3645 3634 3646 3635 static struct panthor_group *group_from_handle(struct panthor_group_pool *pool, 3647 - u32 group_handle) 3636 + unsigned long group_handle) 3648 3637 { 3649 3638 struct panthor_group *group; 3650 3639 3651 3640 xa_lock(&pool->xa); 3652 - group = group_get(xa_load(&pool->xa, group_handle)); 3641 + group = group_get(xa_find(&pool->xa, &group_handle, group_handle, GROUP_REGISTERED)); 3653 3642 xa_unlock(&pool->xa); 3654 3643 3655 3644 return group; ··· 3736 3725 return; 3737 3726 3738 3727 xa_lock(&gpool->xa); 3739 - xa_for_each(&gpool->xa, i, group) { 3728 + xa_for_each_marked(&gpool->xa, i, group, GROUP_REGISTERED) { 3740 3729 stats->resident += group->fdinfo.kbo_sizes; 3741 3730 if (group->csg_id >= 0) 3742 3731 stats->active += group->fdinfo.kbo_sizes;