Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'fs-for_v6.1-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/jack/linux-fs

Pull ext2, udf, reiserfs, and quota updates from Jan Kara:

- Fix for udf to make splicing work again

- More disk format sanity checks for ext2 to avoid crashes found by
syzbot

- More quota disk format checks to avoid crashes found by fuzzing

- Reiserfs & isofs cleanups

* tag 'fs-for_v6.1-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/jack/linux-fs:
quota: Add more checking after reading from quota file
quota: Replace all block number checking with helper function
quota: Check next/prev free block number after reading from quota file
ext2: Use kvmalloc() for group descriptor array
ext2: Add sanity checks for group and filesystem size
udf: Support splicing to file
isofs: delete unnecessary checks before brelse()
fs/reiserfs: replace ternary operator with min() and min_t()

+86 -30
+17 -5
fs/ext2/super.c
··· 163 163 db_count = sbi->s_gdb_count; 164 164 for (i = 0; i < db_count; i++) 165 165 brelse(sbi->s_group_desc[i]); 166 - kfree(sbi->s_group_desc); 166 + kvfree(sbi->s_group_desc); 167 167 kfree(sbi->s_debts); 168 168 percpu_counter_destroy(&sbi->s_freeblocks_counter); 169 169 percpu_counter_destroy(&sbi->s_freeinodes_counter); ··· 1052 1052 sbi->s_blocks_per_group); 1053 1053 goto failed_mount; 1054 1054 } 1055 + /* At least inode table, bitmaps, and sb have to fit in one group */ 1056 + if (sbi->s_blocks_per_group <= sbi->s_itb_per_group + 3) { 1057 + ext2_msg(sb, KERN_ERR, 1058 + "error: #blocks per group smaller than metadata size: %lu <= %lu", 1059 + sbi->s_blocks_per_group, sbi->s_inodes_per_group + 3); 1060 + goto failed_mount; 1061 + } 1055 1062 if (sbi->s_frags_per_group > sb->s_blocksize * 8) { 1056 1063 ext2_msg(sb, KERN_ERR, 1057 1064 "error: #fragments per group too big: %lu", ··· 1072 1065 sbi->s_inodes_per_group); 1073 1066 goto failed_mount; 1074 1067 } 1068 + if (sb_bdev_nr_blocks(sb) < le32_to_cpu(es->s_blocks_count)) { 1069 + ext2_msg(sb, KERN_ERR, 1070 + "bad geometry: block count %u exceeds size of device (%u blocks)", 1071 + le32_to_cpu(es->s_blocks_count), 1072 + (unsigned)sb_bdev_nr_blocks(sb)); 1073 + goto failed_mount; 1074 + } 1075 1075 1076 - if (EXT2_BLOCKS_PER_GROUP(sb) == 0) 1077 - goto cantfind_ext2; 1078 1076 sbi->s_groups_count = ((le32_to_cpu(es->s_blocks_count) - 1079 1077 le32_to_cpu(es->s_first_data_block) - 1) 1080 1078 / EXT2_BLOCKS_PER_GROUP(sb)) + 1; ··· 1092 1080 } 1093 1081 db_count = (sbi->s_groups_count + EXT2_DESC_PER_BLOCK(sb) - 1) / 1094 1082 EXT2_DESC_PER_BLOCK(sb); 1095 - sbi->s_group_desc = kmalloc_array(db_count, 1083 + sbi->s_group_desc = kvmalloc_array(db_count, 1096 1084 sizeof(struct buffer_head *), 1097 1085 GFP_KERNEL); 1098 1086 if (sbi->s_group_desc == NULL) { ··· 1218 1206 for (i = 0; i < db_count; i++) 1219 1207 brelse(sbi->s_group_desc[i]); 1220 1208 failed_mount_group_desc: 1221 - kfree(sbi->s_group_desc); 1209 + kvfree(sbi->s_group_desc); 1222 1210 kfree(sbi->s_debts); 1223 1211 failed_mount: 1224 1212 brelse(bh);
+3 -6
fs/isofs/inode.c
··· 1277 1277 } while (more_entries); 1278 1278 out: 1279 1279 kfree(tmpde); 1280 - if (bh) 1281 - brelse(bh); 1280 + brelse(bh); 1282 1281 return 0; 1283 1282 1284 1283 out_nomem: 1285 - if (bh) 1286 - brelse(bh); 1284 + brelse(bh); 1287 1285 return -ENOMEM; 1288 1286 1289 1287 out_noread: ··· 1484 1486 ret = 0; 1485 1487 out: 1486 1488 kfree(tmpde); 1487 - if (bh) 1488 - brelse(bh); 1489 + brelse(bh); 1489 1490 return ret; 1490 1491 1491 1492 out_badread:
+61 -12
fs/quota/quota_tree.c
··· 71 71 return ret; 72 72 } 73 73 74 + static inline int do_check_range(struct super_block *sb, const char *val_name, 75 + uint val, uint min_val, uint max_val) 76 + { 77 + if (val < min_val || val > max_val) { 78 + quota_error(sb, "Getting %s %u out of range %u-%u", 79 + val_name, val, min_val, max_val); 80 + return -EUCLEAN; 81 + } 82 + 83 + return 0; 84 + } 85 + 86 + static int check_dquot_block_header(struct qtree_mem_dqinfo *info, 87 + struct qt_disk_dqdbheader *dh) 88 + { 89 + int err = 0; 90 + 91 + err = do_check_range(info->dqi_sb, "dqdh_next_free", 92 + le32_to_cpu(dh->dqdh_next_free), 0, 93 + info->dqi_blocks - 1); 94 + if (err) 95 + return err; 96 + err = do_check_range(info->dqi_sb, "dqdh_prev_free", 97 + le32_to_cpu(dh->dqdh_prev_free), 0, 98 + info->dqi_blocks - 1); 99 + if (err) 100 + return err; 101 + err = do_check_range(info->dqi_sb, "dqdh_entries", 102 + le16_to_cpu(dh->dqdh_entries), 0, 103 + qtree_dqstr_in_blk(info)); 104 + 105 + return err; 106 + } 107 + 74 108 /* Remove empty block from list and return it */ 75 109 static int get_free_dqblk(struct qtree_mem_dqinfo *info) 76 110 { ··· 118 84 blk = info->dqi_free_blk; 119 85 ret = read_blk(info, blk, buf); 120 86 if (ret < 0) 87 + goto out_buf; 88 + ret = check_dquot_block_header(info, dh); 89 + if (ret) 121 90 goto out_buf; 122 91 info->dqi_free_blk = le32_to_cpu(dh->dqdh_next_free); 123 92 } ··· 269 232 *err = read_blk(info, blk, buf); 270 233 if (*err < 0) 271 234 goto out_buf; 235 + *err = check_dquot_block_header(info, dh); 236 + if (*err) 237 + goto out_buf; 272 238 } else { 273 239 blk = get_free_dqblk(info); 274 240 if ((int)blk < 0) { ··· 353 313 } 354 314 ref = (__le32 *)buf; 355 315 newblk = le32_to_cpu(ref[get_index(info, dquot->dq_id, depth)]); 316 + ret = do_check_range(dquot->dq_sb, "block", newblk, 0, 317 + info->dqi_blocks - 1); 318 + if (ret) 319 + goto out_buf; 356 320 if (!newblk) 357 321 newson = 1; 358 322 if (depth == info->dqi_qtree_depth - 1) { ··· 468 424 goto out_buf; 469 425 } 470 426 dh = (struct qt_disk_dqdbheader *)buf; 427 + ret = check_dquot_block_header(info, dh); 428 + if (ret) 429 + goto out_buf; 471 430 le16_add_cpu(&dh->dqdh_entries, -1); 472 431 if (!le16_to_cpu(dh->dqdh_entries)) { /* Block got free? */ 473 432 ret = remove_free_dqentry(info, buf, blk); ··· 527 480 goto out_buf; 528 481 } 529 482 newblk = le32_to_cpu(ref[get_index(info, dquot->dq_id, depth)]); 530 - if (newblk < QT_TREEOFF || newblk >= info->dqi_blocks) { 531 - quota_error(dquot->dq_sb, "Getting block too big (%u >= %u)", 532 - newblk, info->dqi_blocks); 533 - ret = -EUCLEAN; 483 + ret = do_check_range(dquot->dq_sb, "block", newblk, QT_TREEOFF, 484 + info->dqi_blocks - 1); 485 + if (ret) 534 486 goto out_buf; 535 - } 536 487 537 488 if (depth == info->dqi_qtree_depth - 1) { 538 489 ret = free_dqentry(info, dquot, newblk); ··· 631 586 blk = le32_to_cpu(ref[get_index(info, dquot->dq_id, depth)]); 632 587 if (!blk) /* No reference? */ 633 588 goto out_buf; 634 - if (blk < QT_TREEOFF || blk >= info->dqi_blocks) { 635 - quota_error(dquot->dq_sb, "Getting block too big (%u >= %u)", 636 - blk, info->dqi_blocks); 637 - ret = -EUCLEAN; 589 + ret = do_check_range(dquot->dq_sb, "block", blk, QT_TREEOFF, 590 + info->dqi_blocks - 1); 591 + if (ret) 638 592 goto out_buf; 639 - } 640 593 641 594 if (depth < info->dqi_qtree_depth - 1) 642 595 ret = find_tree_dqentry(info, dquot, blk, depth+1); ··· 748 705 goto out_buf; 749 706 } 750 707 for (i = __get_index(info, *id, depth); i < epb; i++) { 751 - if (ref[i] == cpu_to_le32(0)) { 708 + uint blk_no = le32_to_cpu(ref[i]); 709 + 710 + if (blk_no == 0) { 752 711 *id += level_inc; 753 712 continue; 754 713 } 714 + ret = do_check_range(info->dqi_sb, "block", blk_no, 0, 715 + info->dqi_blocks - 1); 716 + if (ret) 717 + goto out_buf; 755 718 if (depth == info->dqi_qtree_depth - 1) { 756 719 ret = 0; 757 720 goto out_buf; 758 721 } 759 - ret = find_next_id(info, id, le32_to_cpu(ref[i]), depth + 1); 722 + ret = find_next_id(info, id, blk_no, depth + 1); 760 723 if (ret != -ENOENT) 761 724 break; 762 725 }
+1 -1
fs/reiserfs/prints.c
··· 456 456 to = B_NR_ITEMS(bh); 457 457 } else { 458 458 from = first; 459 - to = last < B_NR_ITEMS(bh) ? last : B_NR_ITEMS(bh); 459 + to = min_t(int, last, B_NR_ITEMS(bh)); 460 460 } 461 461 462 462 reiserfs_printk("INTERNAL NODE (%ld) contains %z\n", bh->b_blocknr, bh);
+1 -1
fs/reiserfs/resize.c
··· 97 97 * using the copy_size var below allows this code to work for 98 98 * both shrinking and expanding the FS. 99 99 */ 100 - copy_size = bmap_nr_new < bmap_nr ? bmap_nr_new : bmap_nr; 100 + copy_size = min(bmap_nr_new, bmap_nr); 101 101 copy_size = 102 102 copy_size * sizeof(struct reiserfs_list_bitmap_node *); 103 103 for (i = 0; i < JOURNAL_NUM_BITMAPS; i++) {
+2 -5
fs/reiserfs/super.c
··· 2504 2504 len = i_size - off; 2505 2505 toread = len; 2506 2506 while (toread > 0) { 2507 - tocopy = 2508 - sb->s_blocksize - offset < 2509 - toread ? sb->s_blocksize - offset : toread; 2507 + tocopy = min_t(unsigned long, sb->s_blocksize - offset, toread); 2510 2508 tmp_bh.b_state = 0; 2511 2509 /* 2512 2510 * Quota files are without tails so we can safely ··· 2552 2554 return -EIO; 2553 2555 } 2554 2556 while (towrite > 0) { 2555 - tocopy = sb->s_blocksize - offset < towrite ? 2556 - sb->s_blocksize - offset : towrite; 2557 + tocopy = min_t(unsigned long, sb->s_blocksize - offset, towrite); 2557 2558 tmp_bh.b_state = 0; 2558 2559 reiserfs_write_lock(sb); 2559 2560 err = reiserfs_get_block(inode, blk, &tmp_bh, GET_BLOCK_CREATE);
+1
fs/udf/file.c
··· 252 252 .release = udf_release_file, 253 253 .fsync = generic_file_fsync, 254 254 .splice_read = generic_file_splice_read, 255 + .splice_write = iter_file_splice_write, 255 256 .llseek = generic_file_llseek, 256 257 }; 257 258