Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

block/loop: Use global lock for ioctl() operation.

syzbot is reporting NULL pointer dereference [1] which is caused by
race condition between ioctl(loop_fd, LOOP_CLR_FD, 0) versus
ioctl(other_loop_fd, LOOP_SET_FD, loop_fd) due to traversing other
loop devices at loop_validate_file() without holding corresponding
lo->lo_ctl_mutex locks.

Since ioctl() request on loop devices is not frequent operation, we don't
need fine grained locking. Let's use global lock in order to allow safe
traversal at loop_validate_file().

Note that syzbot is also reporting circular locking dependency between
bdev->bd_mutex and lo->lo_ctl_mutex [2] which is caused by calling
blkdev_reread_part() with lock held. This patch does not address it.

[1] https://syzkaller.appspot.com/bug?id=f3cfe26e785d85f9ee259f385515291d21bd80a3
[2] https://syzkaller.appspot.com/bug?id=bf154052f0eea4bc7712499e4569505907d15889

Signed-off-by: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
Reported-by: syzbot <syzbot+bf89c128e05dd6c62523@syzkaller.appspotmail.com>
Reviewed-by: Jan Kara <jack@suse.cz>
Signed-off-by: Jan Kara <jack@suse.cz>
Signed-off-by: Jens Axboe <axboe@kernel.dk>

authored by

Tetsuo Handa and committed by
Jens Axboe
310ca162 b1ab5fa3

+29 -30
+29 -29
drivers/block/loop.c
··· 84 84 85 85 static DEFINE_IDR(loop_index_idr); 86 86 static DEFINE_MUTEX(loop_index_mutex); 87 + static DEFINE_MUTEX(loop_ctl_mutex); 87 88 88 89 static int max_part; 89 90 static int part_shift; ··· 1047 1046 */ 1048 1047 if (atomic_read(&lo->lo_refcnt) > 1) { 1049 1048 lo->lo_flags |= LO_FLAGS_AUTOCLEAR; 1050 - mutex_unlock(&lo->lo_ctl_mutex); 1049 + mutex_unlock(&loop_ctl_mutex); 1051 1050 return 0; 1052 1051 } 1053 1052 ··· 1100 1099 if (!part_shift) 1101 1100 lo->lo_disk->flags |= GENHD_FL_NO_PART_SCAN; 1102 1101 loop_unprepare_queue(lo); 1103 - mutex_unlock(&lo->lo_ctl_mutex); 1102 + mutex_unlock(&loop_ctl_mutex); 1104 1103 /* 1105 - * Need not hold lo_ctl_mutex to fput backing file. 1106 - * Calling fput holding lo_ctl_mutex triggers a circular 1104 + * Need not hold loop_ctl_mutex to fput backing file. 1105 + * Calling fput holding loop_ctl_mutex triggers a circular 1107 1106 * lock dependency possibility warning as fput can take 1108 - * bd_mutex which is usually taken before lo_ctl_mutex. 1107 + * bd_mutex which is usually taken before loop_ctl_mutex. 1109 1108 */ 1110 1109 fput(filp); 1111 1110 return 0; ··· 1210 1209 int ret; 1211 1210 1212 1211 if (lo->lo_state != Lo_bound) { 1213 - mutex_unlock(&lo->lo_ctl_mutex); 1212 + mutex_unlock(&loop_ctl_mutex); 1214 1213 return -ENXIO; 1215 1214 } 1216 1215 ··· 1229 1228 lo->lo_encrypt_key_size); 1230 1229 } 1231 1230 1232 - /* Drop lo_ctl_mutex while we call into the filesystem. */ 1231 + /* Drop loop_ctl_mutex while we call into the filesystem. */ 1233 1232 path = lo->lo_backing_file->f_path; 1234 1233 path_get(&path); 1235 - mutex_unlock(&lo->lo_ctl_mutex); 1234 + mutex_unlock(&loop_ctl_mutex); 1236 1235 ret = vfs_getattr(&path, &stat, STATX_INO, AT_STATX_SYNC_AS_STAT); 1237 1236 if (!ret) { 1238 1237 info->lo_device = huge_encode_dev(stat.dev); ··· 1324 1323 int err; 1325 1324 1326 1325 if (!arg) { 1327 - mutex_unlock(&lo->lo_ctl_mutex); 1326 + mutex_unlock(&loop_ctl_mutex); 1328 1327 return -EINVAL; 1329 1328 } 1330 1329 err = loop_get_status(lo, &info64); ··· 1342 1341 int err; 1343 1342 1344 1343 if (!arg) { 1345 - mutex_unlock(&lo->lo_ctl_mutex); 1344 + mutex_unlock(&loop_ctl_mutex); 1346 1345 return -EINVAL; 1347 1346 } 1348 1347 err = loop_get_status(lo, &info64); ··· 1400 1399 struct loop_device *lo = bdev->bd_disk->private_data; 1401 1400 int err; 1402 1401 1403 - err = mutex_lock_killable_nested(&lo->lo_ctl_mutex, 1); 1402 + err = mutex_lock_killable_nested(&loop_ctl_mutex, 1); 1404 1403 if (err) 1405 1404 goto out_unlocked; 1406 1405 ··· 1412 1411 err = loop_change_fd(lo, bdev, arg); 1413 1412 break; 1414 1413 case LOOP_CLR_FD: 1415 - /* loop_clr_fd would have unlocked lo_ctl_mutex on success */ 1414 + /* loop_clr_fd would have unlocked loop_ctl_mutex on success */ 1416 1415 err = loop_clr_fd(lo); 1417 1416 if (!err) 1418 1417 goto out_unlocked; ··· 1425 1424 break; 1426 1425 case LOOP_GET_STATUS: 1427 1426 err = loop_get_status_old(lo, (struct loop_info __user *) arg); 1428 - /* loop_get_status() unlocks lo_ctl_mutex */ 1427 + /* loop_get_status() unlocks loop_ctl_mutex */ 1429 1428 goto out_unlocked; 1430 1429 case LOOP_SET_STATUS64: 1431 1430 err = -EPERM; ··· 1435 1434 break; 1436 1435 case LOOP_GET_STATUS64: 1437 1436 err = loop_get_status64(lo, (struct loop_info64 __user *) arg); 1438 - /* loop_get_status() unlocks lo_ctl_mutex */ 1437 + /* loop_get_status() unlocks loop_ctl_mutex */ 1439 1438 goto out_unlocked; 1440 1439 case LOOP_SET_CAPACITY: 1441 1440 err = -EPERM; ··· 1455 1454 default: 1456 1455 err = lo->ioctl ? lo->ioctl(lo, cmd, arg) : -EINVAL; 1457 1456 } 1458 - mutex_unlock(&lo->lo_ctl_mutex); 1457 + mutex_unlock(&loop_ctl_mutex); 1459 1458 1460 1459 out_unlocked: 1461 1460 return err; ··· 1572 1571 int err; 1573 1572 1574 1573 if (!arg) { 1575 - mutex_unlock(&lo->lo_ctl_mutex); 1574 + mutex_unlock(&loop_ctl_mutex); 1576 1575 return -EINVAL; 1577 1576 } 1578 1577 err = loop_get_status(lo, &info64); ··· 1589 1588 1590 1589 switch(cmd) { 1591 1590 case LOOP_SET_STATUS: 1592 - err = mutex_lock_killable(&lo->lo_ctl_mutex); 1591 + err = mutex_lock_killable(&loop_ctl_mutex); 1593 1592 if (!err) { 1594 1593 err = loop_set_status_compat(lo, 1595 1594 (const struct compat_loop_info __user *)arg); 1596 - mutex_unlock(&lo->lo_ctl_mutex); 1595 + mutex_unlock(&loop_ctl_mutex); 1597 1596 } 1598 1597 break; 1599 1598 case LOOP_GET_STATUS: 1600 - err = mutex_lock_killable(&lo->lo_ctl_mutex); 1599 + err = mutex_lock_killable(&loop_ctl_mutex); 1601 1600 if (!err) { 1602 1601 err = loop_get_status_compat(lo, 1603 1602 (struct compat_loop_info __user *)arg); 1604 - /* loop_get_status() unlocks lo_ctl_mutex */ 1603 + /* loop_get_status() unlocks loop_ctl_mutex */ 1605 1604 } 1606 1605 break; 1607 1606 case LOOP_SET_CAPACITY: ··· 1648 1647 if (atomic_dec_return(&lo->lo_refcnt)) 1649 1648 return; 1650 1649 1651 - mutex_lock(&lo->lo_ctl_mutex); 1650 + mutex_lock(&loop_ctl_mutex); 1652 1651 if (lo->lo_flags & LO_FLAGS_AUTOCLEAR) { 1653 1652 /* 1654 1653 * In autoclear mode, stop the loop thread ··· 1666 1665 blk_mq_unfreeze_queue(lo->lo_queue); 1667 1666 } 1668 1667 1669 - mutex_unlock(&lo->lo_ctl_mutex); 1668 + mutex_unlock(&loop_ctl_mutex); 1670 1669 } 1671 1670 1672 1671 static void lo_release(struct gendisk *disk, fmode_t mode) ··· 1712 1711 struct loop_device *lo = ptr; 1713 1712 struct loop_func_table *xfer = data; 1714 1713 1715 - mutex_lock(&lo->lo_ctl_mutex); 1714 + mutex_lock(&loop_ctl_mutex); 1716 1715 if (lo->lo_encryption == xfer) 1717 1716 loop_release_xfer(lo); 1718 - mutex_unlock(&lo->lo_ctl_mutex); 1717 + mutex_unlock(&loop_ctl_mutex); 1719 1718 return 0; 1720 1719 } 1721 1720 ··· 1896 1895 if (!part_shift) 1897 1896 disk->flags |= GENHD_FL_NO_PART_SCAN; 1898 1897 disk->flags |= GENHD_FL_EXT_DEVT; 1899 - mutex_init(&lo->lo_ctl_mutex); 1900 1898 atomic_set(&lo->lo_refcnt, 0); 1901 1899 lo->lo_number = i; 1902 1900 spin_lock_init(&lo->lo_lock); ··· 2008 2008 ret = loop_lookup(&lo, parm); 2009 2009 if (ret < 0) 2010 2010 break; 2011 - ret = mutex_lock_killable(&lo->lo_ctl_mutex); 2011 + ret = mutex_lock_killable(&loop_ctl_mutex); 2012 2012 if (ret) 2013 2013 break; 2014 2014 if (lo->lo_state != Lo_unbound) { 2015 2015 ret = -EBUSY; 2016 - mutex_unlock(&lo->lo_ctl_mutex); 2016 + mutex_unlock(&loop_ctl_mutex); 2017 2017 break; 2018 2018 } 2019 2019 if (atomic_read(&lo->lo_refcnt) > 0) { 2020 2020 ret = -EBUSY; 2021 - mutex_unlock(&lo->lo_ctl_mutex); 2021 + mutex_unlock(&loop_ctl_mutex); 2022 2022 break; 2023 2023 } 2024 2024 lo->lo_disk->private_data = NULL; 2025 - mutex_unlock(&lo->lo_ctl_mutex); 2025 + mutex_unlock(&loop_ctl_mutex); 2026 2026 idr_remove(&loop_index_idr, lo->lo_number); 2027 2027 loop_remove(lo); 2028 2028 break;
-1
drivers/block/loop.h
··· 54 54 55 55 spinlock_t lo_lock; 56 56 int lo_state; 57 - struct mutex lo_ctl_mutex; 58 57 struct kthread_worker worker; 59 58 struct task_struct *worker_task; 60 59 bool use_dio;