Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

quota: Detect loops in quota tree

Syzbot has found that when it creates corrupted quota files where the
quota tree contains a loop, we will deadlock when tryling to insert a
dquot. Add loop detection into functions traversing the quota tree.

Signed-off-by: Jan Kara <jack@suse.cz>

Jan Kara a898cb62 ccb49011

+105 -38
+96 -32
fs/quota/quota_tree.c
··· 21 21 MODULE_DESCRIPTION("Quota trie support"); 22 22 MODULE_LICENSE("GPL"); 23 23 24 + /* 25 + * Maximum quota tree depth we support. Only to limit recursion when working 26 + * with the tree. 27 + */ 28 + #define MAX_QTREE_DEPTH 6 29 + 24 30 #define __QUOTA_QT_PARANOIA 25 31 26 32 static int __get_index(struct qtree_mem_dqinfo *info, qid_t id, int depth) ··· 333 327 334 328 /* Insert reference to structure into the trie */ 335 329 static int do_insert_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot, 336 - uint *treeblk, int depth) 330 + uint *blks, int depth) 337 331 { 338 332 char *buf = kmalloc(info->dqi_usable_bs, GFP_KERNEL); 339 333 int ret = 0, newson = 0, newact = 0; 340 334 __le32 *ref; 341 335 uint newblk; 336 + int i; 342 337 343 338 if (!buf) 344 339 return -ENOMEM; 345 - if (!*treeblk) { 340 + if (!blks[depth]) { 346 341 ret = get_free_dqblk(info); 347 342 if (ret < 0) 348 343 goto out_buf; 349 - *treeblk = ret; 344 + for (i = 0; i < depth; i++) 345 + if (ret == blks[i]) { 346 + quota_error(dquot->dq_sb, 347 + "Free block already used in tree: block %u", 348 + ret); 349 + ret = -EIO; 350 + goto out_buf; 351 + } 352 + blks[depth] = ret; 350 353 memset(buf, 0, info->dqi_usable_bs); 351 354 newact = 1; 352 355 } else { 353 - ret = read_blk(info, *treeblk, buf); 356 + ret = read_blk(info, blks[depth], buf); 354 357 if (ret < 0) { 355 358 quota_error(dquot->dq_sb, "Can't read tree quota " 356 - "block %u", *treeblk); 359 + "block %u", blks[depth]); 357 360 goto out_buf; 358 361 } 359 362 } ··· 372 357 info->dqi_blocks - 1); 373 358 if (ret) 374 359 goto out_buf; 375 - if (!newblk) 360 + if (!newblk) { 376 361 newson = 1; 362 + } else { 363 + for (i = 0; i <= depth; i++) 364 + if (newblk == blks[i]) { 365 + quota_error(dquot->dq_sb, 366 + "Cycle in quota tree detected: block %u index %u", 367 + blks[depth], 368 + get_index(info, dquot->dq_id, depth)); 369 + ret = -EIO; 370 + goto out_buf; 371 + } 372 + } 373 + blks[depth + 1] = newblk; 377 374 if (depth == info->dqi_qtree_depth - 1) { 378 375 #ifdef __QUOTA_QT_PARANOIA 379 376 if (newblk) { ··· 397 370 goto out_buf; 398 371 } 399 372 #endif 400 - newblk = find_free_dqentry(info, dquot, &ret); 373 + blks[depth + 1] = find_free_dqentry(info, dquot, &ret); 401 374 } else { 402 - ret = do_insert_tree(info, dquot, &newblk, depth+1); 375 + ret = do_insert_tree(info, dquot, blks, depth + 1); 403 376 } 404 377 if (newson && ret >= 0) { 405 378 ref[get_index(info, dquot->dq_id, depth)] = 406 - cpu_to_le32(newblk); 407 - ret = write_blk(info, *treeblk, buf); 379 + cpu_to_le32(blks[depth + 1]); 380 + ret = write_blk(info, blks[depth], buf); 408 381 } else if (newact && ret < 0) { 409 - put_free_dqblk(info, buf, *treeblk); 382 + put_free_dqblk(info, buf, blks[depth]); 410 383 } 411 384 out_buf: 412 385 kfree(buf); ··· 417 390 static inline int dq_insert_tree(struct qtree_mem_dqinfo *info, 418 391 struct dquot *dquot) 419 392 { 420 - int tmp = QT_TREEOFF; 393 + uint blks[MAX_QTREE_DEPTH] = { QT_TREEOFF }; 421 394 422 395 #ifdef __QUOTA_QT_PARANOIA 423 396 if (info->dqi_blocks <= QT_TREEOFF) { ··· 425 398 return -EIO; 426 399 } 427 400 #endif 428 - return do_insert_tree(info, dquot, &tmp, 0); 401 + if (info->dqi_qtree_depth >= MAX_QTREE_DEPTH) { 402 + quota_error(dquot->dq_sb, "Quota tree depth too big!"); 403 + return -EIO; 404 + } 405 + return do_insert_tree(info, dquot, blks, 0); 429 406 } 430 407 431 408 /* ··· 542 511 543 512 /* Remove reference to dquot from tree */ 544 513 static int remove_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot, 545 - uint *blk, int depth) 514 + uint *blks, int depth) 546 515 { 547 516 char *buf = kmalloc(info->dqi_usable_bs, GFP_KERNEL); 548 517 int ret = 0; 549 518 uint newblk; 550 519 __le32 *ref = (__le32 *)buf; 520 + int i; 551 521 552 522 if (!buf) 553 523 return -ENOMEM; 554 - ret = read_blk(info, *blk, buf); 524 + ret = read_blk(info, blks[depth], buf); 555 525 if (ret < 0) { 556 526 quota_error(dquot->dq_sb, "Can't read quota data block %u", 557 - *blk); 527 + blks[depth]); 558 528 goto out_buf; 559 529 } 560 530 newblk = le32_to_cpu(ref[get_index(info, dquot->dq_id, depth)]); ··· 564 532 if (ret) 565 533 goto out_buf; 566 534 535 + for (i = 0; i <= depth; i++) 536 + if (newblk == blks[i]) { 537 + quota_error(dquot->dq_sb, 538 + "Cycle in quota tree detected: block %u index %u", 539 + blks[depth], 540 + get_index(info, dquot->dq_id, depth)); 541 + ret = -EIO; 542 + goto out_buf; 543 + } 567 544 if (depth == info->dqi_qtree_depth - 1) { 568 545 ret = free_dqentry(info, dquot, newblk); 569 - newblk = 0; 546 + blks[depth + 1] = 0; 570 547 } else { 571 - ret = remove_tree(info, dquot, &newblk, depth+1); 548 + blks[depth + 1] = newblk; 549 + ret = remove_tree(info, dquot, blks, depth + 1); 572 550 } 573 - if (ret >= 0 && !newblk) { 574 - int i; 551 + if (ret >= 0 && !blks[depth + 1]) { 575 552 ref[get_index(info, dquot->dq_id, depth)] = cpu_to_le32(0); 576 553 /* Block got empty? */ 577 554 for (i = 0; i < (info->dqi_usable_bs >> 2) && !ref[i]; i++) 578 555 ; 579 556 /* Don't put the root block into the free block list */ 580 557 if (i == (info->dqi_usable_bs >> 2) 581 - && *blk != QT_TREEOFF) { 582 - put_free_dqblk(info, buf, *blk); 583 - *blk = 0; 558 + && blks[depth] != QT_TREEOFF) { 559 + put_free_dqblk(info, buf, blks[depth]); 560 + blks[depth] = 0; 584 561 } else { 585 - ret = write_blk(info, *blk, buf); 562 + ret = write_blk(info, blks[depth], buf); 586 563 if (ret < 0) 587 564 quota_error(dquot->dq_sb, 588 565 "Can't write quota tree block %u", 589 - *blk); 566 + blks[depth]); 590 567 } 591 568 } 592 569 out_buf: ··· 606 565 /* Delete dquot from tree */ 607 566 int qtree_delete_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot) 608 567 { 609 - uint tmp = QT_TREEOFF; 568 + uint blks[MAX_QTREE_DEPTH] = { QT_TREEOFF }; 610 569 611 570 if (!dquot->dq_off) /* Even not allocated? */ 612 571 return 0; 613 - return remove_tree(info, dquot, &tmp, 0); 572 + if (info->dqi_qtree_depth >= MAX_QTREE_DEPTH) { 573 + quota_error(dquot->dq_sb, "Quota tree depth too big!"); 574 + return -EIO; 575 + } 576 + return remove_tree(info, dquot, blks, 0); 614 577 } 615 578 EXPORT_SYMBOL(qtree_delete_dquot); 616 579 ··· 658 613 659 614 /* Find entry for given id in the tree */ 660 615 static loff_t find_tree_dqentry(struct qtree_mem_dqinfo *info, 661 - struct dquot *dquot, uint blk, int depth) 616 + struct dquot *dquot, uint *blks, int depth) 662 617 { 663 618 char *buf = kmalloc(info->dqi_usable_bs, GFP_KERNEL); 664 619 loff_t ret = 0; 665 620 __le32 *ref = (__le32 *)buf; 621 + uint blk; 622 + int i; 666 623 667 624 if (!buf) 668 625 return -ENOMEM; 669 - ret = read_blk(info, blk, buf); 626 + ret = read_blk(info, blks[depth], buf); 670 627 if (ret < 0) { 671 628 quota_error(dquot->dq_sb, "Can't read quota tree block %u", 672 - blk); 629 + blks[depth]); 673 630 goto out_buf; 674 631 } 675 632 ret = 0; ··· 683 636 if (ret) 684 637 goto out_buf; 685 638 639 + /* Check for cycles in the tree */ 640 + for (i = 0; i <= depth; i++) 641 + if (blk == blks[i]) { 642 + quota_error(dquot->dq_sb, 643 + "Cycle in quota tree detected: block %u index %u", 644 + blks[depth], 645 + get_index(info, dquot->dq_id, depth)); 646 + ret = -EIO; 647 + goto out_buf; 648 + } 649 + blks[depth + 1] = blk; 686 650 if (depth < info->dqi_qtree_depth - 1) 687 - ret = find_tree_dqentry(info, dquot, blk, depth+1); 651 + ret = find_tree_dqentry(info, dquot, blks, depth + 1); 688 652 else 689 653 ret = find_block_dqentry(info, dquot, blk); 690 654 out_buf: ··· 707 649 static inline loff_t find_dqentry(struct qtree_mem_dqinfo *info, 708 650 struct dquot *dquot) 709 651 { 710 - return find_tree_dqentry(info, dquot, QT_TREEOFF, 0); 652 + uint blks[MAX_QTREE_DEPTH] = { QT_TREEOFF }; 653 + 654 + if (info->dqi_qtree_depth >= MAX_QTREE_DEPTH) { 655 + quota_error(dquot->dq_sb, "Quota tree depth too big!"); 656 + return -EIO; 657 + } 658 + return find_tree_dqentry(info, dquot, blks, 0); 711 659 } 712 660 713 661 int qtree_read_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
+9 -6
fs/quota/quota_v2.c
··· 168 168 i_size_read(sb_dqopt(sb)->files[type])); 169 169 goto out_free; 170 170 } 171 - if (qinfo->dqi_free_blk >= qinfo->dqi_blocks) { 172 - quota_error(sb, "Free block number too big (%u >= %u).", 173 - qinfo->dqi_free_blk, qinfo->dqi_blocks); 171 + if (qinfo->dqi_free_blk && (qinfo->dqi_free_blk <= QT_TREEOFF || 172 + qinfo->dqi_free_blk >= qinfo->dqi_blocks)) { 173 + quota_error(sb, "Free block number %u out of range (%u, %u).", 174 + qinfo->dqi_free_blk, QT_TREEOFF, qinfo->dqi_blocks); 174 175 goto out_free; 175 176 } 176 - if (qinfo->dqi_free_entry >= qinfo->dqi_blocks) { 177 - quota_error(sb, "Block with free entry too big (%u >= %u).", 178 - qinfo->dqi_free_entry, qinfo->dqi_blocks); 177 + if (qinfo->dqi_free_entry && (qinfo->dqi_free_entry <= QT_TREEOFF || 178 + qinfo->dqi_free_entry >= qinfo->dqi_blocks)) { 179 + quota_error(sb, "Block with free entry %u out of range (%u, %u).", 180 + qinfo->dqi_free_entry, QT_TREEOFF, 181 + qinfo->dqi_blocks); 179 182 goto out_free; 180 183 } 181 184 ret = 0;