Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'fs_for_v6.9-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/jack/linux-fs

Pull ext2, isofs, udf, and quota updates from Jan Kara:
"A lot of material this time:

- removal of a lot of GFP_NOFS usage from ext2, udf, quota (either it
was legacy or replaced with scoped memalloc_nofs_*() API)

- removal of BUG_ONs in quota code

- conversion of UDF to the new mount API

- tightening quota on disk format verification

- fix some potentially unsafe use of RCU pointers in quota code and
annotate everything properly to make sparse happy

- a few other small quota, ext2, udf, and isofs fixes"

* tag 'fs_for_v6.9-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/jack/linux-fs: (26 commits)
udf: remove SLAB_MEM_SPREAD flag usage
quota: remove SLAB_MEM_SPREAD flag usage
isofs: remove SLAB_MEM_SPREAD flag usage
ext2: remove SLAB_MEM_SPREAD flag usage
ext2: mark as deprecated
udf: convert to new mount API
udf: convert novrs to an option flag
MAINTAINERS: add missing git address for ext2 entry
quota: Detect loops in quota tree
quota: Properly annotate i_dquot arrays with __rcu
quota: Fix rcu annotations of inode dquot pointers
isofs: handle CDs with bad root inode but good Joliet root directory
udf: Avoid invalid LVID used on mount
quota: Fix potential NULL pointer dereference
quota: Drop GFP_NOFS instances under dquot->dq_lock and dqio_sem
quota: Set nofs allocation context when acquiring dqio_sem
ext2: Remove GFP_NOFS use in ext2_xattr_cache_insert()
ext2: Drop GFP_NOFS use in ext2_get_blocks()
ext2: Drop GFP_NOFS allocation from ext2_init_block_alloc_info()
udf: Remove GFP_NOFS allocation in udf_expand_file_adinicb()
...

+607 -422
+1
MAINTAINERS
··· 8021 8021 L: linux-ext4@vger.kernel.org 8022 8022 S: Maintained 8023 8023 F: Documentation/filesystems/ext2.rst 8024 + T: git git://git.kernel.org/pub/scm/linux/kernel/git/jack/linux-fs.git 8024 8025 F: fs/ext2/ 8025 8026 F: include/linux/ext2* 8026 8027
+11 -4
fs/ext2/Kconfig
··· 1 1 # SPDX-License-Identifier: GPL-2.0-only 2 2 config EXT2_FS 3 - tristate "Second extended fs support" 3 + tristate "Second extended fs support (DEPRECATED)" 4 4 select BUFFER_HEAD 5 5 select FS_IOMAP 6 6 select LEGACY_DIRECT_IO 7 7 help 8 8 Ext2 is a standard Linux file system for hard disks. 9 9 10 - To compile this file system support as a module, choose M here: the 11 - module will be called ext2. 10 + This filesystem driver is deprecated because it does not properly 11 + support inode time stamps beyond 03:14:07 UTC on 19 January 2038. 12 12 13 - If unsure, say Y. 13 + Ext2 users are advised to use ext4 driver to access their filesystem. 14 + The driver is fully compatible, supports filesystems without journal 15 + or extents, and also supports larger time stamps if the filesystem 16 + is created with at least 256 byte inodes. 17 + 18 + This code is kept as a simple reference for filesystem developers. 19 + 20 + If unsure, say N. 14 21 15 22 config EXT2_FS_XATTR 16 23 bool "Ext2 extended attributes"
+1 -1
fs/ext2/balloc.c
··· 412 412 struct ext2_block_alloc_info *block_i; 413 413 struct super_block *sb = inode->i_sb; 414 414 415 - block_i = kmalloc(sizeof(*block_i), GFP_NOFS); 415 + block_i = kmalloc(sizeof(*block_i), GFP_KERNEL); 416 416 if (block_i) { 417 417 struct ext2_reserve_window_node *rsv = &block_i->rsv_window_node; 418 418
+1 -1
fs/ext2/ext2.h
··· 674 674 struct inode vfs_inode; 675 675 struct list_head i_orphan; /* unlinked but open inodes */ 676 676 #ifdef CONFIG_QUOTA 677 - struct dquot *i_dquot[MAXQUOTAS]; 677 + struct dquot __rcu *i_dquot[MAXQUOTAS]; 678 678 #endif 679 679 }; 680 680
+1 -1
fs/ext2/inode.c
··· 754 754 */ 755 755 err = sb_issue_zeroout(inode->i_sb, 756 756 le32_to_cpu(chain[depth-1].key), count, 757 - GFP_NOFS); 757 + GFP_KERNEL); 758 758 if (err) { 759 759 mutex_unlock(&ei->truncate_mutex); 760 760 goto cleanup;
+1 -1
fs/ext2/super.c
··· 319 319 static ssize_t ext2_quota_write(struct super_block *sb, int type, const char *data, size_t len, loff_t off); 320 320 static int ext2_quota_on(struct super_block *sb, int type, int format_id, 321 321 const struct path *path); 322 - static struct dquot **ext2_get_dquots(struct inode *inode) 322 + static struct dquot __rcu **ext2_get_dquots(struct inode *inode) 323 323 { 324 324 return EXT2_I(inode)->i_dquot; 325 325 }
+1 -1
fs/ext2/xattr.c
··· 874 874 __u32 hash = le32_to_cpu(HDR(bh)->h_hash); 875 875 int error; 876 876 877 - error = mb_cache_entry_create(cache, GFP_NOFS, hash, bh->b_blocknr, 877 + error = mb_cache_entry_create(cache, GFP_KERNEL, hash, bh->b_blocknr, 878 878 true); 879 879 if (error) { 880 880 if (error == -EBUSY) {
+1 -1
fs/ext4/ext4.h
··· 1158 1158 tid_t i_datasync_tid; 1159 1159 1160 1160 #ifdef CONFIG_QUOTA 1161 - struct dquot *i_dquot[MAXQUOTAS]; 1161 + struct dquot __rcu *i_dquot[MAXQUOTAS]; 1162 1162 #endif 1163 1163 1164 1164 /* Precomputed uuid+inum+igen checksum for seeding inode checksums */
+1 -1
fs/ext4/super.c
··· 1599 1599 static int ext4_quota_enable(struct super_block *sb, int type, int format_id, 1600 1600 unsigned int flags); 1601 1601 1602 - static struct dquot **ext4_get_dquots(struct inode *inode) 1602 + static struct dquot __rcu **ext4_get_dquots(struct inode *inode) 1603 1603 { 1604 1604 return EXT4_I(inode)->i_dquot; 1605 1605 }
+1 -1
fs/f2fs/f2fs.h
··· 830 830 spinlock_t i_size_lock; /* protect last_disk_size */ 831 831 832 832 #ifdef CONFIG_QUOTA 833 - struct dquot *i_dquot[MAXQUOTAS]; 833 + struct dquot __rcu *i_dquot[MAXQUOTAS]; 834 834 835 835 /* quota space reservation, managed internally by quota code */ 836 836 qsize_t i_reserved_quota;
+1 -1
fs/f2fs/super.c
··· 2768 2768 return dquot_initialize(inode); 2769 2769 } 2770 2770 2771 - static struct dquot **f2fs_get_dquots(struct inode *inode) 2771 + static struct dquot __rcu **f2fs_get_dquots(struct inode *inode) 2772 2772 { 2773 2773 return F2FS_I(inode)->i_dquot; 2774 2774 }
+16 -2
fs/isofs/inode.c
··· 908 908 * we then decide whether to use the Joliet descriptor. 909 909 */ 910 910 inode = isofs_iget(s, sbi->s_firstdatazone, 0); 911 - if (IS_ERR(inode)) 912 - goto out_no_root; 911 + 912 + /* 913 + * Fix for broken CDs with a corrupt root inode but a correct Joliet 914 + * root directory. 915 + */ 916 + if (IS_ERR(inode)) { 917 + if (joliet_level && sbi->s_firstdatazone != first_data_zone) { 918 + printk(KERN_NOTICE 919 + "ISOFS: root inode is unusable. " 920 + "Disabling Rock Ridge and switching to Joliet."); 921 + sbi->s_rock = 0; 922 + inode = NULL; 923 + } else { 924 + goto out_no_root; 925 + } 926 + } 913 927 914 928 /* 915 929 * Fix for broken CDs with Rock Ridge and empty ISO root directory but
+1 -1
fs/jfs/jfs_incore.h
··· 92 92 } link; 93 93 } u; 94 94 #ifdef CONFIG_QUOTA 95 - struct dquot *i_dquot[MAXQUOTAS]; 95 + struct dquot __rcu *i_dquot[MAXQUOTAS]; 96 96 #endif 97 97 u32 dev; /* will die when we get wide dev_t */ 98 98 struct inode vfs_inode;
+1 -1
fs/jfs/super.c
··· 824 824 return len - towrite; 825 825 } 826 826 827 - static struct dquot **jfs_get_dquots(struct inode *inode) 827 + static struct dquot __rcu **jfs_get_dquots(struct inode *inode) 828 828 { 829 829 return JFS_IP(inode)->i_dquot; 830 830 }
+1 -1
fs/ocfs2/inode.h
··· 65 65 tid_t i_sync_tid; 66 66 tid_t i_datasync_tid; 67 67 68 - struct dquot *i_dquot[MAXQUOTAS]; 68 + struct dquot __rcu *i_dquot[MAXQUOTAS]; 69 69 }; 70 70 71 71 /*
+12
fs/ocfs2/quota_global.c
··· 447 447 int err; 448 448 struct quota_info *dqopt = sb_dqopt(sb); 449 449 struct ocfs2_mem_dqinfo *info = dqopt->info[type].dqi_priv; 450 + unsigned int memalloc; 450 451 451 452 down_write(&dqopt->dqio_sem); 453 + memalloc = memalloc_nofs_save(); 452 454 err = ocfs2_qinfo_lock(info, 1); 453 455 if (err < 0) 454 456 goto out_sem; 455 457 err = __ocfs2_global_write_info(sb, type); 456 458 ocfs2_qinfo_unlock(info, 1); 457 459 out_sem: 460 + memalloc_nofs_restore(memalloc); 458 461 up_write(&dqopt->dqio_sem); 459 462 return err; 460 463 } ··· 604 601 struct ocfs2_mem_dqinfo *oinfo = sb_dqinfo(sb, type)->dqi_priv; 605 602 struct ocfs2_super *osb = OCFS2_SB(sb); 606 603 int status = 0; 604 + unsigned int memalloc; 607 605 608 606 trace_ocfs2_sync_dquot_helper(from_kqid(&init_user_ns, dquot->dq_id), 609 607 dquot->dq_id.type, ··· 622 618 goto out_ilock; 623 619 } 624 620 down_write(&sb_dqopt(sb)->dqio_sem); 621 + memalloc = memalloc_nofs_save(); 625 622 status = ocfs2_sync_dquot(dquot); 626 623 if (status < 0) 627 624 mlog_errno(status); ··· 630 625 status = ocfs2_local_write_dquot(dquot); 631 626 if (status < 0) 632 627 mlog_errno(status); 628 + memalloc_nofs_restore(memalloc); 633 629 up_write(&sb_dqopt(sb)->dqio_sem); 634 630 ocfs2_commit_trans(osb, handle); 635 631 out_ilock: ··· 668 662 handle_t *handle; 669 663 struct ocfs2_super *osb = OCFS2_SB(dquot->dq_sb); 670 664 int status = 0; 665 + unsigned int memalloc; 671 666 672 667 trace_ocfs2_write_dquot(from_kqid(&init_user_ns, dquot->dq_id), 673 668 dquot->dq_id.type); ··· 680 673 goto out; 681 674 } 682 675 down_write(&sb_dqopt(dquot->dq_sb)->dqio_sem); 676 + memalloc = memalloc_nofs_save(); 683 677 status = ocfs2_local_write_dquot(dquot); 678 + memalloc_nofs_restore(memalloc); 684 679 up_write(&sb_dqopt(dquot->dq_sb)->dqio_sem); 685 680 ocfs2_commit_trans(osb, handle); 686 681 out: ··· 929 920 struct ocfs2_mem_dqinfo *oinfo = sb_dqinfo(sb, type)->dqi_priv; 930 921 handle_t *handle; 931 922 struct ocfs2_super *osb = OCFS2_SB(sb); 923 + unsigned int memalloc; 932 924 933 925 trace_ocfs2_mark_dquot_dirty(from_kqid(&init_user_ns, dquot->dq_id), 934 926 type); ··· 956 946 goto out_ilock; 957 947 } 958 948 down_write(&sb_dqopt(sb)->dqio_sem); 949 + memalloc = memalloc_nofs_save(); 959 950 status = ocfs2_sync_dquot(dquot); 960 951 if (status < 0) { 961 952 mlog_errno(status); ··· 965 954 /* Now write updated local dquot structure */ 966 955 status = ocfs2_local_write_dquot(dquot); 967 956 out_dlock: 957 + memalloc_nofs_restore(memalloc); 968 958 up_write(&sb_dqopt(sb)->dqio_sem); 969 959 ocfs2_commit_trans(osb, handle); 970 960 out_ilock:
+3
fs/ocfs2/quota_local.c
··· 470 470 int bit, chunk; 471 471 struct ocfs2_recovery_chunk *rchunk, *next; 472 472 qsize_t spacechange, inodechange; 473 + unsigned int memalloc; 473 474 474 475 trace_ocfs2_recover_local_quota_file((unsigned long)lqinode->i_ino, type); 475 476 ··· 522 521 goto out_drop_lock; 523 522 } 524 523 down_write(&sb_dqopt(sb)->dqio_sem); 524 + memalloc = memalloc_nofs_save(); 525 525 spin_lock(&dquot->dq_dqb_lock); 526 526 /* Add usage from quota entry into quota changes 527 527 * of our node. Auxiliary variables are important ··· 555 553 unlock_buffer(qbh); 556 554 ocfs2_journal_dirty(handle, qbh); 557 555 out_commit: 556 + memalloc_nofs_restore(memalloc); 558 557 up_write(&sb_dqopt(sb)->dqio_sem); 559 558 ocfs2_commit_trans(OCFS2_SB(sb), handle); 560 559 out_drop_lock:
+1 -1
fs/ocfs2/super.c
··· 122 122 static int ocfs2_enable_quotas(struct ocfs2_super *osb); 123 123 static void ocfs2_disable_quotas(struct ocfs2_super *osb); 124 124 125 - static struct dquot **ocfs2_get_dquots(struct inode *inode) 125 + static struct dquot __rcu **ocfs2_get_dquots(struct inode *inode) 126 126 { 127 127 return OCFS2_I(inode)->i_dquot; 128 128 }
+98 -74
fs/quota/dquot.c
··· 399 399 EXPORT_SYMBOL(dquot_mark_dquot_dirty); 400 400 401 401 /* Dirtify all the dquots - this can block when journalling */ 402 - static inline int mark_all_dquot_dirty(struct dquot * const *dquot) 402 + static inline int mark_all_dquot_dirty(struct dquot __rcu * const *dquots) 403 403 { 404 404 int ret, err, cnt; 405 + struct dquot *dquot; 405 406 406 407 ret = err = 0; 407 408 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 408 - if (dquot[cnt]) 409 + dquot = srcu_dereference(dquots[cnt], &dquot_srcu); 410 + if (dquot) 409 411 /* Even in case of error we have to continue */ 410 - ret = mark_dquot_dirty(dquot[cnt]); 412 + ret = mark_dquot_dirty(dquot); 411 413 if (!err) 412 414 err = ret; 413 415 } ··· 877 875 } 878 876 879 877 /* Need to release dquot? */ 880 - #ifdef CONFIG_QUOTA_DEBUG 881 - /* sanity check */ 882 - BUG_ON(!list_empty(&dquot->dq_free)); 883 - #endif 878 + WARN_ON_ONCE(!list_empty(&dquot->dq_free)); 884 879 put_releasing_dquots(dquot); 885 880 atomic_dec(&dquot->dq_count); 886 881 spin_unlock(&dq_list_lock); ··· 986 987 * smp_mb__before_atomic() in dquot_acquire(). 987 988 */ 988 989 smp_rmb(); 989 - #ifdef CONFIG_QUOTA_DEBUG 990 - BUG_ON(!dquot->dq_sb); /* Has somebody invalidated entry under us? */ 991 - #endif 990 + /* Has somebody invalidated entry under us? */ 991 + WARN_ON_ONCE(hlist_unhashed(&dquot->dq_hash)); 992 992 out: 993 993 if (empty) 994 994 do_destroy_dquot(empty); ··· 996 998 } 997 999 EXPORT_SYMBOL(dqget); 998 1000 999 - static inline struct dquot **i_dquot(struct inode *inode) 1001 + static inline struct dquot __rcu **i_dquot(struct inode *inode) 1000 1002 { 1001 1003 return inode->i_sb->s_op->get_dquots(inode); 1002 1004 } 1003 1005 1004 1006 static int dqinit_needed(struct inode *inode, int type) 1005 1007 { 1006 - struct dquot * const *dquots; 1008 + struct dquot __rcu * const *dquots; 1007 1009 int cnt; 1008 1010 1009 1011 if (IS_NOQUOTA(inode)) ··· 1093 1095 */ 1094 1096 spin_lock(&dq_data_lock); 1095 1097 if (!IS_NOQUOTA(inode)) { 1096 - struct dquot **dquots = i_dquot(inode); 1097 - struct dquot *dquot = dquots[type]; 1098 + struct dquot __rcu **dquots = i_dquot(inode); 1099 + struct dquot *dquot = srcu_dereference_check( 1100 + dquots[type], &dquot_srcu, 1101 + lockdep_is_held(&dq_data_lock)); 1098 1102 1099 1103 #ifdef CONFIG_QUOTA_DEBUG 1100 1104 if (unlikely(inode_get_rsv_space(inode) > 0)) 1101 1105 reserved = 1; 1102 1106 #endif 1103 - dquots[type] = NULL; 1107 + rcu_assign_pointer(dquots[type], NULL); 1104 1108 if (dquot) 1105 1109 dqput(dquot); 1106 1110 } ··· 1455 1455 static int __dquot_initialize(struct inode *inode, int type) 1456 1456 { 1457 1457 int cnt, init_needed = 0; 1458 - struct dquot **dquots, *got[MAXQUOTAS] = {}; 1458 + struct dquot __rcu **dquots; 1459 + struct dquot *got[MAXQUOTAS] = {}; 1459 1460 struct super_block *sb = inode->i_sb; 1460 1461 qsize_t rsv; 1461 1462 int ret = 0; ··· 1531 1530 if (!got[cnt]) 1532 1531 continue; 1533 1532 if (!dquots[cnt]) { 1534 - dquots[cnt] = got[cnt]; 1533 + rcu_assign_pointer(dquots[cnt], got[cnt]); 1535 1534 got[cnt] = NULL; 1536 1535 /* 1537 1536 * Make quota reservation system happy if someone ··· 1539 1538 */ 1540 1539 rsv = inode_get_rsv_space(inode); 1541 1540 if (unlikely(rsv)) { 1541 + struct dquot *dquot = srcu_dereference_check( 1542 + dquots[cnt], &dquot_srcu, 1543 + lockdep_is_held(&dq_data_lock)); 1544 + 1542 1545 spin_lock(&inode->i_lock); 1543 1546 /* Get reservation again under proper lock */ 1544 1547 rsv = __inode_get_rsv_space(inode); 1545 - spin_lock(&dquots[cnt]->dq_dqb_lock); 1546 - dquots[cnt]->dq_dqb.dqb_rsvspace += rsv; 1547 - spin_unlock(&dquots[cnt]->dq_dqb_lock); 1548 + spin_lock(&dquot->dq_dqb_lock); 1549 + dquot->dq_dqb.dqb_rsvspace += rsv; 1550 + spin_unlock(&dquot->dq_dqb_lock); 1548 1551 spin_unlock(&inode->i_lock); 1549 1552 } 1550 1553 } ··· 1570 1565 1571 1566 bool dquot_initialize_needed(struct inode *inode) 1572 1567 { 1573 - struct dquot **dquots; 1568 + struct dquot __rcu **dquots; 1574 1569 int i; 1575 1570 1576 1571 if (!inode_quota_active(inode)) ··· 1595 1590 static void __dquot_drop(struct inode *inode) 1596 1591 { 1597 1592 int cnt; 1598 - struct dquot **dquots = i_dquot(inode); 1593 + struct dquot __rcu **dquots = i_dquot(inode); 1599 1594 struct dquot *put[MAXQUOTAS]; 1600 1595 1601 1596 spin_lock(&dq_data_lock); 1602 1597 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 1603 - put[cnt] = dquots[cnt]; 1604 - dquots[cnt] = NULL; 1598 + put[cnt] = srcu_dereference_check(dquots[cnt], &dquot_srcu, 1599 + lockdep_is_held(&dq_data_lock)); 1600 + rcu_assign_pointer(dquots[cnt], NULL); 1605 1601 } 1606 1602 spin_unlock(&dq_data_lock); 1607 1603 dqput_all(put); ··· 1610 1604 1611 1605 void dquot_drop(struct inode *inode) 1612 1606 { 1613 - struct dquot * const *dquots; 1607 + struct dquot __rcu * const *dquots; 1614 1608 int cnt; 1615 1609 1616 1610 if (IS_NOQUOTA(inode)) ··· 1683 1677 int cnt, ret = 0, index; 1684 1678 struct dquot_warn warn[MAXQUOTAS]; 1685 1679 int reserve = flags & DQUOT_SPACE_RESERVE; 1686 - struct dquot **dquots; 1680 + struct dquot __rcu **dquots; 1681 + struct dquot *dquot; 1687 1682 1688 1683 if (!inode_quota_active(inode)) { 1689 1684 if (reserve) { ··· 1704 1697 index = srcu_read_lock(&dquot_srcu); 1705 1698 spin_lock(&inode->i_lock); 1706 1699 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 1707 - if (!dquots[cnt]) 1700 + dquot = srcu_dereference(dquots[cnt], &dquot_srcu); 1701 + if (!dquot) 1708 1702 continue; 1709 1703 if (reserve) { 1710 - ret = dquot_add_space(dquots[cnt], 0, number, flags, 1711 - &warn[cnt]); 1704 + ret = dquot_add_space(dquot, 0, number, flags, &warn[cnt]); 1712 1705 } else { 1713 - ret = dquot_add_space(dquots[cnt], number, 0, flags, 1714 - &warn[cnt]); 1706 + ret = dquot_add_space(dquot, number, 0, flags, &warn[cnt]); 1715 1707 } 1716 1708 if (ret) { 1717 1709 /* Back out changes we already did */ 1718 1710 for (cnt--; cnt >= 0; cnt--) { 1719 - if (!dquots[cnt]) 1711 + dquot = srcu_dereference(dquots[cnt], &dquot_srcu); 1712 + if (!dquot) 1720 1713 continue; 1721 - spin_lock(&dquots[cnt]->dq_dqb_lock); 1714 + spin_lock(&dquot->dq_dqb_lock); 1722 1715 if (reserve) 1723 - dquot_free_reserved_space(dquots[cnt], 1724 - number); 1716 + dquot_free_reserved_space(dquot, number); 1725 1717 else 1726 - dquot_decr_space(dquots[cnt], number); 1727 - spin_unlock(&dquots[cnt]->dq_dqb_lock); 1718 + dquot_decr_space(dquot, number); 1719 + spin_unlock(&dquot->dq_dqb_lock); 1728 1720 } 1729 1721 spin_unlock(&inode->i_lock); 1730 1722 goto out_flush_warn; ··· 1753 1747 { 1754 1748 int cnt, ret = 0, index; 1755 1749 struct dquot_warn warn[MAXQUOTAS]; 1756 - struct dquot * const *dquots; 1750 + struct dquot __rcu * const *dquots; 1751 + struct dquot *dquot; 1757 1752 1758 1753 if (!inode_quota_active(inode)) 1759 1754 return 0; ··· 1765 1758 index = srcu_read_lock(&dquot_srcu); 1766 1759 spin_lock(&inode->i_lock); 1767 1760 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 1768 - if (!dquots[cnt]) 1761 + dquot = srcu_dereference(dquots[cnt], &dquot_srcu); 1762 + if (!dquot) 1769 1763 continue; 1770 - ret = dquot_add_inodes(dquots[cnt], 1, &warn[cnt]); 1764 + ret = dquot_add_inodes(dquot, 1, &warn[cnt]); 1771 1765 if (ret) { 1772 1766 for (cnt--; cnt >= 0; cnt--) { 1773 - if (!dquots[cnt]) 1767 + dquot = srcu_dereference(dquots[cnt], &dquot_srcu); 1768 + if (!dquot) 1774 1769 continue; 1775 1770 /* Back out changes we already did */ 1776 - spin_lock(&dquots[cnt]->dq_dqb_lock); 1777 - dquot_decr_inodes(dquots[cnt], 1); 1778 - spin_unlock(&dquots[cnt]->dq_dqb_lock); 1771 + spin_lock(&dquot->dq_dqb_lock); 1772 + dquot_decr_inodes(dquot, 1); 1773 + spin_unlock(&dquot->dq_dqb_lock); 1779 1774 } 1780 1775 goto warn_put_all; 1781 1776 } ··· 1798 1789 */ 1799 1790 void dquot_claim_space_nodirty(struct inode *inode, qsize_t number) 1800 1791 { 1801 - struct dquot **dquots; 1792 + struct dquot __rcu **dquots; 1793 + struct dquot *dquot; 1802 1794 int cnt, index; 1803 1795 1804 1796 if (!inode_quota_active(inode)) { ··· 1815 1805 spin_lock(&inode->i_lock); 1816 1806 /* Claim reserved quotas to allocated quotas */ 1817 1807 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 1818 - if (dquots[cnt]) { 1819 - struct dquot *dquot = dquots[cnt]; 1820 - 1808 + dquot = srcu_dereference(dquots[cnt], &dquot_srcu); 1809 + if (dquot) { 1821 1810 spin_lock(&dquot->dq_dqb_lock); 1822 1811 if (WARN_ON_ONCE(dquot->dq_dqb.dqb_rsvspace < number)) 1823 1812 number = dquot->dq_dqb.dqb_rsvspace; ··· 1840 1831 */ 1841 1832 void dquot_reclaim_space_nodirty(struct inode *inode, qsize_t number) 1842 1833 { 1843 - struct dquot **dquots; 1834 + struct dquot __rcu **dquots; 1835 + struct dquot *dquot; 1844 1836 int cnt, index; 1845 1837 1846 1838 if (!inode_quota_active(inode)) { ··· 1857 1847 spin_lock(&inode->i_lock); 1858 1848 /* Claim reserved quotas to allocated quotas */ 1859 1849 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 1860 - if (dquots[cnt]) { 1861 - struct dquot *dquot = dquots[cnt]; 1862 - 1850 + dquot = srcu_dereference(dquots[cnt], &dquot_srcu); 1851 + if (dquot) { 1863 1852 spin_lock(&dquot->dq_dqb_lock); 1864 1853 if (WARN_ON_ONCE(dquot->dq_dqb.dqb_curspace < number)) 1865 1854 number = dquot->dq_dqb.dqb_curspace; ··· 1884 1875 { 1885 1876 unsigned int cnt; 1886 1877 struct dquot_warn warn[MAXQUOTAS]; 1887 - struct dquot **dquots; 1878 + struct dquot __rcu **dquots; 1879 + struct dquot *dquot; 1888 1880 int reserve = flags & DQUOT_SPACE_RESERVE, index; 1889 1881 1890 1882 if (!inode_quota_active(inode)) { ··· 1906 1896 int wtype; 1907 1897 1908 1898 warn[cnt].w_type = QUOTA_NL_NOWARN; 1909 - if (!dquots[cnt]) 1899 + dquot = srcu_dereference(dquots[cnt], &dquot_srcu); 1900 + if (!dquot) 1910 1901 continue; 1911 - spin_lock(&dquots[cnt]->dq_dqb_lock); 1912 - wtype = info_bdq_free(dquots[cnt], number); 1902 + spin_lock(&dquot->dq_dqb_lock); 1903 + wtype = info_bdq_free(dquot, number); 1913 1904 if (wtype != QUOTA_NL_NOWARN) 1914 - prepare_warning(&warn[cnt], dquots[cnt], wtype); 1905 + prepare_warning(&warn[cnt], dquot, wtype); 1915 1906 if (reserve) 1916 - dquot_free_reserved_space(dquots[cnt], number); 1907 + dquot_free_reserved_space(dquot, number); 1917 1908 else 1918 - dquot_decr_space(dquots[cnt], number); 1919 - spin_unlock(&dquots[cnt]->dq_dqb_lock); 1909 + dquot_decr_space(dquot, number); 1910 + spin_unlock(&dquot->dq_dqb_lock); 1920 1911 } 1921 1912 if (reserve) 1922 1913 *inode_reserved_space(inode) -= number; ··· 1941 1930 { 1942 1931 unsigned int cnt; 1943 1932 struct dquot_warn warn[MAXQUOTAS]; 1944 - struct dquot * const *dquots; 1933 + struct dquot __rcu * const *dquots; 1934 + struct dquot *dquot; 1945 1935 int index; 1946 1936 1947 1937 if (!inode_quota_active(inode)) ··· 1953 1941 spin_lock(&inode->i_lock); 1954 1942 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 1955 1943 int wtype; 1956 - 1957 1944 warn[cnt].w_type = QUOTA_NL_NOWARN; 1958 - if (!dquots[cnt]) 1945 + dquot = srcu_dereference(dquots[cnt], &dquot_srcu); 1946 + if (!dquot) 1959 1947 continue; 1960 - spin_lock(&dquots[cnt]->dq_dqb_lock); 1961 - wtype = info_idq_free(dquots[cnt], 1); 1948 + spin_lock(&dquot->dq_dqb_lock); 1949 + wtype = info_idq_free(dquot, 1); 1962 1950 if (wtype != QUOTA_NL_NOWARN) 1963 - prepare_warning(&warn[cnt], dquots[cnt], wtype); 1964 - dquot_decr_inodes(dquots[cnt], 1); 1965 - spin_unlock(&dquots[cnt]->dq_dqb_lock); 1951 + prepare_warning(&warn[cnt], dquot, wtype); 1952 + dquot_decr_inodes(dquot, 1); 1953 + spin_unlock(&dquot->dq_dqb_lock); 1966 1954 } 1967 1955 spin_unlock(&inode->i_lock); 1968 1956 mark_all_dquot_dirty(dquots); ··· 1988 1976 qsize_t cur_space; 1989 1977 qsize_t rsv_space = 0; 1990 1978 qsize_t inode_usage = 1; 1979 + struct dquot __rcu **dquots; 1991 1980 struct dquot *transfer_from[MAXQUOTAS] = {}; 1992 - int cnt, ret = 0; 1981 + int cnt, index, ret = 0; 1993 1982 char is_valid[MAXQUOTAS] = {}; 1994 1983 struct dquot_warn warn_to[MAXQUOTAS]; 1995 1984 struct dquot_warn warn_from_inodes[MAXQUOTAS]; ··· 2021 2008 } 2022 2009 cur_space = __inode_get_bytes(inode); 2023 2010 rsv_space = __inode_get_rsv_space(inode); 2011 + dquots = i_dquot(inode); 2024 2012 /* 2025 2013 * Build the transfer_from list, check limits, and update usage in 2026 2014 * the target structures. ··· 2036 2022 if (!sb_has_quota_active(inode->i_sb, cnt)) 2037 2023 continue; 2038 2024 is_valid[cnt] = 1; 2039 - transfer_from[cnt] = i_dquot(inode)[cnt]; 2025 + transfer_from[cnt] = srcu_dereference_check(dquots[cnt], 2026 + &dquot_srcu, lockdep_is_held(&dq_data_lock)); 2040 2027 ret = dquot_add_inodes(transfer_to[cnt], inode_usage, 2041 2028 &warn_to[cnt]); 2042 2029 if (ret) ··· 2076 2061 rsv_space); 2077 2062 spin_unlock(&transfer_from[cnt]->dq_dqb_lock); 2078 2063 } 2079 - i_dquot(inode)[cnt] = transfer_to[cnt]; 2064 + rcu_assign_pointer(dquots[cnt], transfer_to[cnt]); 2080 2065 } 2081 2066 spin_unlock(&inode->i_lock); 2082 2067 spin_unlock(&dq_data_lock); 2083 2068 2084 - mark_all_dquot_dirty(transfer_from); 2085 - mark_all_dquot_dirty(transfer_to); 2069 + /* 2070 + * These arrays are local and we hold dquot references so we don't need 2071 + * the srcu protection but still take dquot_srcu to avoid warning in 2072 + * mark_all_dquot_dirty(). 2073 + */ 2074 + index = srcu_read_lock(&dquot_srcu); 2075 + mark_all_dquot_dirty((struct dquot __rcu **)transfer_from); 2076 + mark_all_dquot_dirty((struct dquot __rcu **)transfer_to); 2077 + srcu_read_unlock(&dquot_srcu, index); 2078 + 2086 2079 flush_warnings(warn_to); 2087 2080 flush_warnings(warn_from_inodes); 2088 2081 flush_warnings(warn_from_space); ··· 2411 2388 lockdep_assert_held_write(&sb->s_umount); 2412 2389 2413 2390 /* Just unsuspend quotas? */ 2414 - BUG_ON(flags & DQUOT_SUSPENDED); 2391 + if (WARN_ON_ONCE(flags & DQUOT_SUSPENDED)) 2392 + return -EINVAL; 2415 2393 2416 2394 if (!fmt) 2417 2395 return -ESRCH;
+108 -44
fs/quota/quota_tree.c
··· 21 21 MODULE_DESCRIPTION("Quota trie support"); 22 22 MODULE_LICENSE("GPL"); 23 23 24 + /* 25 + * Maximum quota tree depth we support. Only to limit recursion when working 26 + * with the tree. 27 + */ 28 + #define MAX_QTREE_DEPTH 6 29 + 24 30 #define __QUOTA_QT_PARANOIA 25 31 26 32 static int __get_index(struct qtree_mem_dqinfo *info, qid_t id, int depth) ··· 114 108 /* Remove empty block from list and return it */ 115 109 static int get_free_dqblk(struct qtree_mem_dqinfo *info) 116 110 { 117 - char *buf = kmalloc(info->dqi_usable_bs, GFP_NOFS); 111 + char *buf = kmalloc(info->dqi_usable_bs, GFP_KERNEL); 118 112 struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf; 119 113 int ret, blk; 120 114 ··· 166 160 static int remove_free_dqentry(struct qtree_mem_dqinfo *info, char *buf, 167 161 uint blk) 168 162 { 169 - char *tmpbuf = kmalloc(info->dqi_usable_bs, GFP_NOFS); 163 + char *tmpbuf = kmalloc(info->dqi_usable_bs, GFP_KERNEL); 170 164 struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf; 171 165 uint nextblk = le32_to_cpu(dh->dqdh_next_free); 172 166 uint prevblk = le32_to_cpu(dh->dqdh_prev_free); ··· 213 207 static int insert_free_dqentry(struct qtree_mem_dqinfo *info, char *buf, 214 208 uint blk) 215 209 { 216 - char *tmpbuf = kmalloc(info->dqi_usable_bs, GFP_NOFS); 210 + char *tmpbuf = kmalloc(info->dqi_usable_bs, GFP_KERNEL); 217 211 struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf; 218 212 int err; 219 213 ··· 261 255 { 262 256 uint blk, i; 263 257 struct qt_disk_dqdbheader *dh; 264 - char *buf = kmalloc(info->dqi_usable_bs, GFP_NOFS); 258 + char *buf = kmalloc(info->dqi_usable_bs, GFP_KERNEL); 265 259 char *ddquot; 266 260 267 261 *err = 0; ··· 333 327 334 328 /* Insert reference to structure into the trie */ 335 329 static int do_insert_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot, 336 - uint *treeblk, int depth) 330 + uint *blks, int depth) 337 331 { 338 - char *buf = kmalloc(info->dqi_usable_bs, GFP_NOFS); 332 + char *buf = kmalloc(info->dqi_usable_bs, GFP_KERNEL); 339 333 int ret = 0, newson = 0, newact = 0; 340 334 __le32 *ref; 341 335 uint newblk; 336 + int i; 342 337 343 338 if (!buf) 344 339 return -ENOMEM; 345 - if (!*treeblk) { 340 + if (!blks[depth]) { 346 341 ret = get_free_dqblk(info); 347 342 if (ret < 0) 348 343 goto out_buf; 349 - *treeblk = ret; 344 + for (i = 0; i < depth; i++) 345 + if (ret == blks[i]) { 346 + quota_error(dquot->dq_sb, 347 + "Free block already used in tree: block %u", 348 + ret); 349 + ret = -EIO; 350 + goto out_buf; 351 + } 352 + blks[depth] = ret; 350 353 memset(buf, 0, info->dqi_usable_bs); 351 354 newact = 1; 352 355 } else { 353 - ret = read_blk(info, *treeblk, buf); 356 + ret = read_blk(info, blks[depth], buf); 354 357 if (ret < 0) { 355 358 quota_error(dquot->dq_sb, "Can't read tree quota " 356 - "block %u", *treeblk); 359 + "block %u", blks[depth]); 357 360 goto out_buf; 358 361 } 359 362 } ··· 372 357 info->dqi_blocks - 1); 373 358 if (ret) 374 359 goto out_buf; 375 - if (!newblk) 360 + if (!newblk) { 376 361 newson = 1; 362 + } else { 363 + for (i = 0; i <= depth; i++) 364 + if (newblk == blks[i]) { 365 + quota_error(dquot->dq_sb, 366 + "Cycle in quota tree detected: block %u index %u", 367 + blks[depth], 368 + get_index(info, dquot->dq_id, depth)); 369 + ret = -EIO; 370 + goto out_buf; 371 + } 372 + } 373 + blks[depth + 1] = newblk; 377 374 if (depth == info->dqi_qtree_depth - 1) { 378 375 #ifdef __QUOTA_QT_PARANOIA 379 376 if (newblk) { ··· 397 370 goto out_buf; 398 371 } 399 372 #endif 400 - newblk = find_free_dqentry(info, dquot, &ret); 373 + blks[depth + 1] = find_free_dqentry(info, dquot, &ret); 401 374 } else { 402 - ret = do_insert_tree(info, dquot, &newblk, depth+1); 375 + ret = do_insert_tree(info, dquot, blks, depth + 1); 403 376 } 404 377 if (newson && ret >= 0) { 405 378 ref[get_index(info, dquot->dq_id, depth)] = 406 - cpu_to_le32(newblk); 407 - ret = write_blk(info, *treeblk, buf); 379 + cpu_to_le32(blks[depth + 1]); 380 + ret = write_blk(info, blks[depth], buf); 408 381 } else if (newact && ret < 0) { 409 - put_free_dqblk(info, buf, *treeblk); 382 + put_free_dqblk(info, buf, blks[depth]); 410 383 } 411 384 out_buf: 412 385 kfree(buf); ··· 417 390 static inline int dq_insert_tree(struct qtree_mem_dqinfo *info, 418 391 struct dquot *dquot) 419 392 { 420 - int tmp = QT_TREEOFF; 393 + uint blks[MAX_QTREE_DEPTH] = { QT_TREEOFF }; 421 394 422 395 #ifdef __QUOTA_QT_PARANOIA 423 396 if (info->dqi_blocks <= QT_TREEOFF) { ··· 425 398 return -EIO; 426 399 } 427 400 #endif 428 - return do_insert_tree(info, dquot, &tmp, 0); 401 + if (info->dqi_qtree_depth >= MAX_QTREE_DEPTH) { 402 + quota_error(dquot->dq_sb, "Quota tree depth too big!"); 403 + return -EIO; 404 + } 405 + return do_insert_tree(info, dquot, blks, 0); 429 406 } 430 407 431 408 /* ··· 441 410 int type = dquot->dq_id.type; 442 411 struct super_block *sb = dquot->dq_sb; 443 412 ssize_t ret; 444 - char *ddquot = kmalloc(info->dqi_entry_size, GFP_NOFS); 413 + char *ddquot = kmalloc(info->dqi_entry_size, GFP_KERNEL); 445 414 446 415 if (!ddquot) 447 416 return -ENOMEM; ··· 480 449 uint blk) 481 450 { 482 451 struct qt_disk_dqdbheader *dh; 483 - char *buf = kmalloc(info->dqi_usable_bs, GFP_NOFS); 452 + char *buf = kmalloc(info->dqi_usable_bs, GFP_KERNEL); 484 453 int ret = 0; 485 454 486 455 if (!buf) ··· 542 511 543 512 /* Remove reference to dquot from tree */ 544 513 static int remove_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot, 545 - uint *blk, int depth) 514 + uint *blks, int depth) 546 515 { 547 - char *buf = kmalloc(info->dqi_usable_bs, GFP_NOFS); 516 + char *buf = kmalloc(info->dqi_usable_bs, GFP_KERNEL); 548 517 int ret = 0; 549 518 uint newblk; 550 519 __le32 *ref = (__le32 *)buf; 520 + int i; 551 521 552 522 if (!buf) 553 523 return -ENOMEM; 554 - ret = read_blk(info, *blk, buf); 524 + ret = read_blk(info, blks[depth], buf); 555 525 if (ret < 0) { 556 526 quota_error(dquot->dq_sb, "Can't read quota data block %u", 557 - *blk); 527 + blks[depth]); 558 528 goto out_buf; 559 529 } 560 530 newblk = le32_to_cpu(ref[get_index(info, dquot->dq_id, depth)]); ··· 564 532 if (ret) 565 533 goto out_buf; 566 534 535 + for (i = 0; i <= depth; i++) 536 + if (newblk == blks[i]) { 537 + quota_error(dquot->dq_sb, 538 + "Cycle in quota tree detected: block %u index %u", 539 + blks[depth], 540 + get_index(info, dquot->dq_id, depth)); 541 + ret = -EIO; 542 + goto out_buf; 543 + } 567 544 if (depth == info->dqi_qtree_depth - 1) { 568 545 ret = free_dqentry(info, dquot, newblk); 569 - newblk = 0; 546 + blks[depth + 1] = 0; 570 547 } else { 571 - ret = remove_tree(info, dquot, &newblk, depth+1); 548 + blks[depth + 1] = newblk; 549 + ret = remove_tree(info, dquot, blks, depth + 1); 572 550 } 573 - if (ret >= 0 && !newblk) { 574 - int i; 551 + if (ret >= 0 && !blks[depth + 1]) { 575 552 ref[get_index(info, dquot->dq_id, depth)] = cpu_to_le32(0); 576 553 /* Block got empty? */ 577 554 for (i = 0; i < (info->dqi_usable_bs >> 2) && !ref[i]; i++) 578 555 ; 579 556 /* Don't put the root block into the free block list */ 580 557 if (i == (info->dqi_usable_bs >> 2) 581 - && *blk != QT_TREEOFF) { 582 - put_free_dqblk(info, buf, *blk); 583 - *blk = 0; 558 + && blks[depth] != QT_TREEOFF) { 559 + put_free_dqblk(info, buf, blks[depth]); 560 + blks[depth] = 0; 584 561 } else { 585 - ret = write_blk(info, *blk, buf); 562 + ret = write_blk(info, blks[depth], buf); 586 563 if (ret < 0) 587 564 quota_error(dquot->dq_sb, 588 565 "Can't write quota tree block %u", 589 - *blk); 566 + blks[depth]); 590 567 } 591 568 } 592 569 out_buf: ··· 606 565 /* Delete dquot from tree */ 607 566 int qtree_delete_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot) 608 567 { 609 - uint tmp = QT_TREEOFF; 568 + uint blks[MAX_QTREE_DEPTH] = { QT_TREEOFF }; 610 569 611 570 if (!dquot->dq_off) /* Even not allocated? */ 612 571 return 0; 613 - return remove_tree(info, dquot, &tmp, 0); 572 + if (info->dqi_qtree_depth >= MAX_QTREE_DEPTH) { 573 + quota_error(dquot->dq_sb, "Quota tree depth too big!"); 574 + return -EIO; 575 + } 576 + return remove_tree(info, dquot, blks, 0); 614 577 } 615 578 EXPORT_SYMBOL(qtree_delete_dquot); 616 579 ··· 622 577 static loff_t find_block_dqentry(struct qtree_mem_dqinfo *info, 623 578 struct dquot *dquot, uint blk) 624 579 { 625 - char *buf = kmalloc(info->dqi_usable_bs, GFP_NOFS); 580 + char *buf = kmalloc(info->dqi_usable_bs, GFP_KERNEL); 626 581 loff_t ret = 0; 627 582 int i; 628 583 char *ddquot; ··· 658 613 659 614 /* Find entry for given id in the tree */ 660 615 static loff_t find_tree_dqentry(struct qtree_mem_dqinfo *info, 661 - struct dquot *dquot, uint blk, int depth) 616 + struct dquot *dquot, uint *blks, int depth) 662 617 { 663 - char *buf = kmalloc(info->dqi_usable_bs, GFP_NOFS); 618 + char *buf = kmalloc(info->dqi_usable_bs, GFP_KERNEL); 664 619 loff_t ret = 0; 665 620 __le32 *ref = (__le32 *)buf; 621 + uint blk; 622 + int i; 666 623 667 624 if (!buf) 668 625 return -ENOMEM; 669 - ret = read_blk(info, blk, buf); 626 + ret = read_blk(info, blks[depth], buf); 670 627 if (ret < 0) { 671 628 quota_error(dquot->dq_sb, "Can't read quota tree block %u", 672 - blk); 629 + blks[depth]); 673 630 goto out_buf; 674 631 } 675 632 ret = 0; ··· 683 636 if (ret) 684 637 goto out_buf; 685 638 639 + /* Check for cycles in the tree */ 640 + for (i = 0; i <= depth; i++) 641 + if (blk == blks[i]) { 642 + quota_error(dquot->dq_sb, 643 + "Cycle in quota tree detected: block %u index %u", 644 + blks[depth], 645 + get_index(info, dquot->dq_id, depth)); 646 + ret = -EIO; 647 + goto out_buf; 648 + } 649 + blks[depth + 1] = blk; 686 650 if (depth < info->dqi_qtree_depth - 1) 687 - ret = find_tree_dqentry(info, dquot, blk, depth+1); 651 + ret = find_tree_dqentry(info, dquot, blks, depth + 1); 688 652 else 689 653 ret = find_block_dqentry(info, dquot, blk); 690 654 out_buf: ··· 707 649 static inline loff_t find_dqentry(struct qtree_mem_dqinfo *info, 708 650 struct dquot *dquot) 709 651 { 710 - return find_tree_dqentry(info, dquot, QT_TREEOFF, 0); 652 + uint blks[MAX_QTREE_DEPTH] = { QT_TREEOFF }; 653 + 654 + if (info->dqi_qtree_depth >= MAX_QTREE_DEPTH) { 655 + quota_error(dquot->dq_sb, "Quota tree depth too big!"); 656 + return -EIO; 657 + } 658 + return find_tree_dqentry(info, dquot, blks, 0); 711 659 } 712 660 713 661 int qtree_read_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot) ··· 748 684 } 749 685 dquot->dq_off = offset; 750 686 } 751 - ddquot = kmalloc(info->dqi_entry_size, GFP_NOFS); 687 + ddquot = kmalloc(info->dqi_entry_size, GFP_KERNEL); 752 688 if (!ddquot) 753 689 return -ENOMEM; 754 690 ret = sb->s_op->quota_read(sb, type, ddquot, info->dqi_entry_size, ··· 792 728 static int find_next_id(struct qtree_mem_dqinfo *info, qid_t *id, 793 729 unsigned int blk, int depth) 794 730 { 795 - char *buf = kmalloc(info->dqi_usable_bs, GFP_NOFS); 731 + char *buf = kmalloc(info->dqi_usable_bs, GFP_KERNEL); 796 732 __le32 *ref = (__le32 *)buf; 797 733 ssize_t ret; 798 734 unsigned int epb = info->dqi_usable_bs >> 2;
+6
fs/quota/quota_v1.c
··· 160 160 { 161 161 struct quota_info *dqopt = sb_dqopt(sb); 162 162 struct v1_disk_dqblk dqblk; 163 + unsigned int memalloc; 163 164 int ret; 164 165 165 166 down_read(&dqopt->dqio_sem); 167 + memalloc = memalloc_nofs_save(); 166 168 ret = sb->s_op->quota_read(sb, type, (char *)&dqblk, 167 169 sizeof(struct v1_disk_dqblk), v1_dqoff(0)); 168 170 if (ret != sizeof(struct v1_disk_dqblk)) { ··· 181 179 dqopt->info[type].dqi_bgrace = 182 180 dqblk.dqb_btime ? dqblk.dqb_btime : MAX_DQ_TIME; 183 181 out: 182 + memalloc_nofs_restore(memalloc); 184 183 up_read(&dqopt->dqio_sem); 185 184 return ret; 186 185 } ··· 190 187 { 191 188 struct quota_info *dqopt = sb_dqopt(sb); 192 189 struct v1_disk_dqblk dqblk; 190 + unsigned int memalloc; 193 191 int ret; 194 192 195 193 down_write(&dqopt->dqio_sem); 194 + memalloc = memalloc_nofs_save(); 196 195 ret = sb->s_op->quota_read(sb, type, (char *)&dqblk, 197 196 sizeof(struct v1_disk_dqblk), v1_dqoff(0)); 198 197 if (ret != sizeof(struct v1_disk_dqblk)) { ··· 214 209 else if (ret >= 0) 215 210 ret = -EIO; 216 211 out: 212 + memalloc_nofs_restore(memalloc); 217 213 up_write(&dqopt->dqio_sem); 218 214 return ret; 219 215 }
+28 -7
fs/quota/quota_v2.c
··· 96 96 struct qtree_mem_dqinfo *qinfo; 97 97 ssize_t size; 98 98 unsigned int version; 99 + unsigned int memalloc; 99 100 int ret; 100 101 101 102 down_read(&dqopt->dqio_sem); 103 + memalloc = memalloc_nofs_save(); 102 104 ret = v2_read_header(sb, type, &dqhead); 103 105 if (ret < 0) 104 106 goto out; ··· 121 119 ret = -EIO; 122 120 goto out; 123 121 } 124 - info->dqi_priv = kmalloc(sizeof(struct qtree_mem_dqinfo), GFP_NOFS); 122 + info->dqi_priv = kmalloc(sizeof(struct qtree_mem_dqinfo), GFP_KERNEL); 125 123 if (!info->dqi_priv) { 126 124 ret = -ENOMEM; 127 125 goto out; ··· 168 166 i_size_read(sb_dqopt(sb)->files[type])); 169 167 goto out_free; 170 168 } 171 - if (qinfo->dqi_free_blk >= qinfo->dqi_blocks) { 172 - quota_error(sb, "Free block number too big (%u >= %u).", 173 - qinfo->dqi_free_blk, qinfo->dqi_blocks); 169 + if (qinfo->dqi_free_blk && (qinfo->dqi_free_blk <= QT_TREEOFF || 170 + qinfo->dqi_free_blk >= qinfo->dqi_blocks)) { 171 + quota_error(sb, "Free block number %u out of range (%u, %u).", 172 + qinfo->dqi_free_blk, QT_TREEOFF, qinfo->dqi_blocks); 174 173 goto out_free; 175 174 } 176 - if (qinfo->dqi_free_entry >= qinfo->dqi_blocks) { 177 - quota_error(sb, "Block with free entry too big (%u >= %u).", 178 - qinfo->dqi_free_entry, qinfo->dqi_blocks); 175 + if (qinfo->dqi_free_entry && (qinfo->dqi_free_entry <= QT_TREEOFF || 176 + qinfo->dqi_free_entry >= qinfo->dqi_blocks)) { 177 + quota_error(sb, "Block with free entry %u out of range (%u, %u).", 178 + qinfo->dqi_free_entry, QT_TREEOFF, 179 + qinfo->dqi_blocks); 179 180 goto out_free; 180 181 } 181 182 ret = 0; ··· 188 183 info->dqi_priv = NULL; 189 184 } 190 185 out: 186 + memalloc_nofs_restore(memalloc); 191 187 up_read(&dqopt->dqio_sem); 192 188 return ret; 193 189 } ··· 201 195 struct mem_dqinfo *info = &dqopt->info[type]; 202 196 struct qtree_mem_dqinfo *qinfo = info->dqi_priv; 203 197 ssize_t size; 198 + unsigned int memalloc; 204 199 205 200 down_write(&dqopt->dqio_sem); 201 + memalloc = memalloc_nofs_save(); 206 202 spin_lock(&dq_data_lock); 207 203 info->dqi_flags &= ~DQF_INFO_DIRTY; 208 204 dinfo.dqi_bgrace = cpu_to_le32(info->dqi_bgrace); ··· 217 209 dinfo.dqi_free_entry = cpu_to_le32(qinfo->dqi_free_entry); 218 210 size = sb->s_op->quota_write(sb, type, (char *)&dinfo, 219 211 sizeof(struct v2_disk_dqinfo), V2_DQINFOOFF); 212 + memalloc_nofs_restore(memalloc); 220 213 up_write(&dqopt->dqio_sem); 221 214 if (size != sizeof(struct v2_disk_dqinfo)) { 222 215 quota_error(sb, "Can't write info structure"); ··· 337 328 { 338 329 struct quota_info *dqopt = sb_dqopt(dquot->dq_sb); 339 330 int ret; 331 + unsigned int memalloc; 340 332 341 333 down_read(&dqopt->dqio_sem); 334 + memalloc = memalloc_nofs_save(); 342 335 ret = qtree_read_dquot( 343 336 sb_dqinfo(dquot->dq_sb, dquot->dq_id.type)->dqi_priv, 344 337 dquot); 338 + memalloc_nofs_restore(memalloc); 345 339 up_read(&dqopt->dqio_sem); 346 340 return ret; 347 341 } ··· 354 342 struct quota_info *dqopt = sb_dqopt(dquot->dq_sb); 355 343 int ret; 356 344 bool alloc = false; 345 + unsigned int memalloc; 357 346 358 347 /* 359 348 * If space for dquot is already allocated, we don't need any ··· 368 355 } else { 369 356 down_read(&dqopt->dqio_sem); 370 357 } 358 + memalloc = memalloc_nofs_save(); 371 359 ret = qtree_write_dquot( 372 360 sb_dqinfo(dquot->dq_sb, dquot->dq_id.type)->dqi_priv, 373 361 dquot); 362 + memalloc_nofs_restore(memalloc); 374 363 if (alloc) 375 364 up_write(&dqopt->dqio_sem); 376 365 else ··· 383 368 static int v2_release_dquot(struct dquot *dquot) 384 369 { 385 370 struct quota_info *dqopt = sb_dqopt(dquot->dq_sb); 371 + unsigned int memalloc; 386 372 int ret; 387 373 388 374 down_write(&dqopt->dqio_sem); 375 + memalloc = memalloc_nofs_save(); 389 376 ret = qtree_release_dquot(sb_dqinfo(dquot->dq_sb, dquot->dq_id.type)->dqi_priv, dquot); 377 + memalloc_nofs_restore(memalloc); 390 378 up_write(&dqopt->dqio_sem); 391 379 392 380 return ret; ··· 404 386 static int v2_get_next_id(struct super_block *sb, struct kqid *qid) 405 387 { 406 388 struct quota_info *dqopt = sb_dqopt(sb); 389 + unsigned int memalloc; 407 390 int ret; 408 391 409 392 down_read(&dqopt->dqio_sem); 393 + memalloc = memalloc_nofs_save(); 410 394 ret = qtree_get_next_id(sb_dqinfo(sb, qid->type)->dqi_priv, qid); 395 + memalloc_nofs_restore(memalloc); 411 396 up_read(&dqopt->dqio_sem); 412 397 return ret; 413 398 }
+1 -1
fs/reiserfs/reiserfs.h
··· 97 97 struct rw_semaphore i_xattr_sem; 98 98 #endif 99 99 #ifdef CONFIG_QUOTA 100 - struct dquot *i_dquot[MAXQUOTAS]; 100 + struct dquot __rcu *i_dquot[MAXQUOTAS]; 101 101 #endif 102 102 103 103 struct inode vfs_inode;
+1 -1
fs/reiserfs/super.c
··· 801 801 static ssize_t reiserfs_quota_read(struct super_block *, int, char *, size_t, 802 802 loff_t); 803 803 804 - static struct dquot **reiserfs_get_dquots(struct inode *inode) 804 + static struct dquot __rcu **reiserfs_get_dquots(struct inode *inode) 805 805 { 806 806 return REISERFS_I(inode)->i_dquot; 807 807 }
+1 -1
fs/udf/dir.c
··· 67 67 pos_valid = true; 68 68 } 69 69 70 - fname = kmalloc(UDF_NAME_LEN, GFP_NOFS); 70 + fname = kmalloc(UDF_NAME_LEN, GFP_KERNEL); 71 71 if (!fname) { 72 72 ret = -ENOMEM; 73 73 goto out;
+1 -1
fs/udf/inode.c
··· 357 357 return 0; 358 358 } 359 359 360 - page = find_or_create_page(inode->i_mapping, 0, GFP_NOFS); 360 + page = find_or_create_page(inode->i_mapping, 0, GFP_KERNEL); 361 361 if (!page) 362 362 return -ENOMEM; 363 363
+13 -10
fs/udf/namei.c
··· 59 59 child->name[0] == '.' && child->name[1] == '.'; 60 60 int ret; 61 61 62 - fname = kmalloc(UDF_NAME_LEN, GFP_NOFS); 62 + fname = kmalloc(UDF_NAME_LEN, GFP_KERNEL); 63 63 if (!fname) 64 64 return -ENOMEM; 65 65 ··· 566 566 static int udf_symlink(struct mnt_idmap *idmap, struct inode *dir, 567 567 struct dentry *dentry, const char *symname) 568 568 { 569 - struct inode *inode = udf_new_inode(dir, S_IFLNK | 0777); 569 + struct inode *inode; 570 570 struct pathComponent *pc; 571 571 const char *compstart; 572 572 struct extent_position epos = {}; ··· 579 579 struct udf_inode_info *iinfo; 580 580 struct super_block *sb = dir->i_sb; 581 581 582 - if (IS_ERR(inode)) 583 - return PTR_ERR(inode); 582 + name = kmalloc(UDF_NAME_LEN_CS0, GFP_KERNEL); 583 + if (!name) { 584 + err = -ENOMEM; 585 + goto out; 586 + } 587 + 588 + inode = udf_new_inode(dir, S_IFLNK | 0777); 589 + if (IS_ERR(inode)) { 590 + err = PTR_ERR(inode); 591 + goto out; 592 + } 584 593 585 594 iinfo = UDF_I(inode); 586 595 down_write(&iinfo->i_data_sem); 587 - name = kmalloc(UDF_NAME_LEN_CS0, GFP_NOFS); 588 - if (!name) { 589 - err = -ENOMEM; 590 - goto out_no_entry; 591 - } 592 - 593 596 inode->i_data.a_ops = &udf_symlink_aops; 594 597 inode->i_op = &udf_symlink_inode_operations; 595 598 inode_nohighmem(inode);
+290 -261
fs/udf/super.c
··· 40 40 #include <linux/slab.h> 41 41 #include <linux/kernel.h> 42 42 #include <linux/module.h> 43 - #include <linux/parser.h> 44 43 #include <linux/stat.h> 45 44 #include <linux/cdrom.h> 46 45 #include <linux/nls.h> 47 46 #include <linux/vfs.h> 48 47 #include <linux/vmalloc.h> 49 48 #include <linux/errno.h> 50 - #include <linux/mount.h> 51 49 #include <linux/seq_file.h> 52 50 #include <linux/bitmap.h> 53 51 #include <linux/crc-itu-t.h> 54 52 #include <linux/log2.h> 55 53 #include <asm/byteorder.h> 56 54 #include <linux/iversion.h> 55 + #include <linux/fs_context.h> 56 + #include <linux/fs_parser.h> 57 57 58 58 #include "udf_sb.h" 59 59 #include "udf_i.h" ··· 91 91 #define UDF_MAX_FILESIZE (1ULL << 42) 92 92 93 93 /* These are the "meat" - everything else is stuffing */ 94 - static int udf_fill_super(struct super_block *, void *, int); 94 + static int udf_fill_super(struct super_block *sb, struct fs_context *fc); 95 95 static void udf_put_super(struct super_block *); 96 96 static int udf_sync_fs(struct super_block *, int); 97 - static int udf_remount_fs(struct super_block *, int *, char *); 98 97 static void udf_load_logicalvolint(struct super_block *, struct kernel_extent_ad); 99 98 static void udf_open_lvid(struct super_block *); 100 99 static void udf_close_lvid(struct super_block *); 101 100 static unsigned int udf_count_free(struct super_block *); 102 101 static int udf_statfs(struct dentry *, struct kstatfs *); 103 102 static int udf_show_options(struct seq_file *, struct dentry *); 103 + static int udf_init_fs_context(struct fs_context *fc); 104 + static int udf_parse_param(struct fs_context *fc, struct fs_parameter *param); 105 + static int udf_reconfigure(struct fs_context *fc); 106 + static void udf_free_fc(struct fs_context *fc); 107 + static const struct fs_parameter_spec udf_param_spec[]; 104 108 105 109 struct logicalVolIntegrityDescImpUse *udf_sb_lvidiu(struct super_block *sb) 106 110 { ··· 123 119 } 124 120 125 121 /* UDF filesystem type */ 126 - static struct dentry *udf_mount(struct file_system_type *fs_type, 127 - int flags, const char *dev_name, void *data) 122 + static int udf_get_tree(struct fs_context *fc) 128 123 { 129 - return mount_bdev(fs_type, flags, dev_name, data, udf_fill_super); 124 + return get_tree_bdev(fc, udf_fill_super); 130 125 } 126 + 127 + static const struct fs_context_operations udf_context_ops = { 128 + .parse_param = udf_parse_param, 129 + .get_tree = udf_get_tree, 130 + .reconfigure = udf_reconfigure, 131 + .free = udf_free_fc, 132 + }; 131 133 132 134 static struct file_system_type udf_fstype = { 133 135 .owner = THIS_MODULE, 134 136 .name = "udf", 135 - .mount = udf_mount, 136 137 .kill_sb = kill_block_super, 137 138 .fs_flags = FS_REQUIRES_DEV, 139 + .init_fs_context = udf_init_fs_context, 140 + .parameters = udf_param_spec, 138 141 }; 139 142 MODULE_ALIAS_FS("udf"); 140 143 ··· 214 203 .put_super = udf_put_super, 215 204 .sync_fs = udf_sync_fs, 216 205 .statfs = udf_statfs, 217 - .remount_fs = udf_remount_fs, 218 206 .show_options = udf_show_options, 219 207 }; 220 208 221 209 struct udf_options { 222 - unsigned char novrs; 223 210 unsigned int blocksize; 224 211 unsigned int session; 225 212 unsigned int lastblock; ··· 230 221 umode_t dmode; 231 222 struct nls_table *nls_map; 232 223 }; 224 + 225 + /* 226 + * UDF has historically preserved prior mount options across 227 + * a remount, so copy those here if remounting, otherwise set 228 + * initial mount defaults. 229 + */ 230 + static void udf_init_options(struct fs_context *fc, struct udf_options *uopt) 231 + { 232 + if (fc->purpose == FS_CONTEXT_FOR_RECONFIGURE) { 233 + struct super_block *sb = fc->root->d_sb; 234 + struct udf_sb_info *sbi = UDF_SB(sb); 235 + 236 + uopt->flags = sbi->s_flags; 237 + uopt->uid = sbi->s_uid; 238 + uopt->gid = sbi->s_gid; 239 + uopt->umask = sbi->s_umask; 240 + uopt->fmode = sbi->s_fmode; 241 + uopt->dmode = sbi->s_dmode; 242 + uopt->nls_map = NULL; 243 + } else { 244 + uopt->flags = (1 << UDF_FLAG_USE_AD_IN_ICB) | 245 + (1 << UDF_FLAG_STRICT); 246 + /* 247 + * By default we'll use overflow[ug]id when UDF 248 + * inode [ug]id == -1 249 + */ 250 + uopt->uid = make_kuid(current_user_ns(), overflowuid); 251 + uopt->gid = make_kgid(current_user_ns(), overflowgid); 252 + uopt->umask = 0; 253 + uopt->fmode = UDF_INVALID_MODE; 254 + uopt->dmode = UDF_INVALID_MODE; 255 + uopt->nls_map = NULL; 256 + uopt->session = 0xFFFFFFFF; 257 + } 258 + } 259 + 260 + static int udf_init_fs_context(struct fs_context *fc) 261 + { 262 + struct udf_options *uopt; 263 + 264 + uopt = kzalloc(sizeof(*uopt), GFP_KERNEL); 265 + if (!uopt) 266 + return -ENOMEM; 267 + 268 + udf_init_options(fc, uopt); 269 + 270 + fc->fs_private = uopt; 271 + fc->ops = &udf_context_ops; 272 + 273 + return 0; 274 + } 275 + 276 + static void udf_free_fc(struct fs_context *fc) 277 + { 278 + struct udf_options *uopt = fc->fs_private; 279 + 280 + unload_nls(uopt->nls_map); 281 + kfree(fc->fs_private); 282 + } 233 283 234 284 static int __init init_udf_fs(void) 235 285 { ··· 425 357 } 426 358 427 359 /* 428 - * udf_parse_options 360 + * udf_parse_param 429 361 * 430 362 * PURPOSE 431 363 * Parse mount options. ··· 468 400 * yield highly unpredictable results. 469 401 * 470 402 * PRE-CONDITIONS 471 - * options Pointer to mount options string. 472 - * uopts Pointer to mount options variable. 403 + * fc fs_context with pointer to mount options variable. 404 + * param Pointer to fs_parameter being parsed. 473 405 * 474 406 * POST-CONDITIONS 475 - * <return> 1 Mount options parsed okay. 476 - * <return> 0 Error parsing mount options. 407 + * <return> 0 Mount options parsed okay. 408 + * <return> errno Error parsing mount options. 477 409 * 478 410 * HISTORY 479 411 * July 1, 1997 - Andrew E. Mileski ··· 485 417 Opt_noadinicb, Opt_adinicb, Opt_shortad, Opt_longad, 486 418 Opt_gid, Opt_uid, Opt_umask, Opt_session, Opt_lastblock, 487 419 Opt_anchor, Opt_volume, Opt_partition, Opt_fileset, 488 - Opt_rootdir, Opt_utf8, Opt_iocharset, 489 - Opt_err, Opt_uforget, Opt_uignore, Opt_gforget, Opt_gignore, 490 - Opt_fmode, Opt_dmode 420 + Opt_rootdir, Opt_utf8, Opt_iocharset, Opt_err, Opt_fmode, Opt_dmode 491 421 }; 492 422 493 - static const match_table_t tokens = { 494 - {Opt_novrs, "novrs"}, 495 - {Opt_nostrict, "nostrict"}, 496 - {Opt_bs, "bs=%u"}, 497 - {Opt_unhide, "unhide"}, 498 - {Opt_undelete, "undelete"}, 499 - {Opt_noadinicb, "noadinicb"}, 500 - {Opt_adinicb, "adinicb"}, 501 - {Opt_shortad, "shortad"}, 502 - {Opt_longad, "longad"}, 503 - {Opt_uforget, "uid=forget"}, 504 - {Opt_uignore, "uid=ignore"}, 505 - {Opt_gforget, "gid=forget"}, 506 - {Opt_gignore, "gid=ignore"}, 507 - {Opt_gid, "gid=%u"}, 508 - {Opt_uid, "uid=%u"}, 509 - {Opt_umask, "umask=%o"}, 510 - {Opt_session, "session=%u"}, 511 - {Opt_lastblock, "lastblock=%u"}, 512 - {Opt_anchor, "anchor=%u"}, 513 - {Opt_volume, "volume=%u"}, 514 - {Opt_partition, "partition=%u"}, 515 - {Opt_fileset, "fileset=%u"}, 516 - {Opt_rootdir, "rootdir=%u"}, 517 - {Opt_utf8, "utf8"}, 518 - {Opt_iocharset, "iocharset=%s"}, 519 - {Opt_fmode, "mode=%o"}, 520 - {Opt_dmode, "dmode=%o"}, 521 - {Opt_err, NULL} 522 - }; 423 + static const struct fs_parameter_spec udf_param_spec[] = { 424 + fsparam_flag ("novrs", Opt_novrs), 425 + fsparam_flag ("nostrict", Opt_nostrict), 426 + fsparam_u32 ("bs", Opt_bs), 427 + fsparam_flag ("unhide", Opt_unhide), 428 + fsparam_flag ("undelete", Opt_undelete), 429 + fsparam_flag_no ("adinicb", Opt_adinicb), 430 + fsparam_flag ("shortad", Opt_shortad), 431 + fsparam_flag ("longad", Opt_longad), 432 + fsparam_string ("gid", Opt_gid), 433 + fsparam_string ("uid", Opt_uid), 434 + fsparam_u32 ("umask", Opt_umask), 435 + fsparam_u32 ("session", Opt_session), 436 + fsparam_u32 ("lastblock", Opt_lastblock), 437 + fsparam_u32 ("anchor", Opt_anchor), 438 + fsparam_u32 ("volume", Opt_volume), 439 + fsparam_u32 ("partition", Opt_partition), 440 + fsparam_u32 ("fileset", Opt_fileset), 441 + fsparam_u32 ("rootdir", Opt_rootdir), 442 + fsparam_flag ("utf8", Opt_utf8), 443 + fsparam_string ("iocharset", Opt_iocharset), 444 + fsparam_u32 ("mode", Opt_fmode), 445 + fsparam_u32 ("dmode", Opt_dmode), 446 + {} 447 + }; 523 448 524 - static int udf_parse_options(char *options, struct udf_options *uopt, 525 - bool remount) 449 + static int udf_parse_param(struct fs_context *fc, struct fs_parameter *param) 526 450 { 527 - char *p; 528 - int option; 529 451 unsigned int uv; 452 + unsigned int n; 453 + struct udf_options *uopt = fc->fs_private; 454 + struct fs_parse_result result; 455 + int token; 456 + bool remount = (fc->purpose & FS_CONTEXT_FOR_RECONFIGURE); 530 457 531 - uopt->novrs = 0; 532 - uopt->session = 0xFFFFFFFF; 533 - uopt->lastblock = 0; 534 - uopt->anchor = 0; 458 + token = fs_parse(fc, udf_param_spec, param, &result); 459 + if (token < 0) 460 + return token; 535 461 536 - if (!options) 537 - return 1; 538 - 539 - while ((p = strsep(&options, ",")) != NULL) { 540 - substring_t args[MAX_OPT_ARGS]; 541 - int token; 542 - unsigned n; 543 - if (!*p) 544 - continue; 545 - 546 - token = match_token(p, tokens, args); 547 - switch (token) { 548 - case Opt_novrs: 549 - uopt->novrs = 1; 550 - break; 551 - case Opt_bs: 552 - if (match_int(&args[0], &option)) 553 - return 0; 554 - n = option; 555 - if (n != 512 && n != 1024 && n != 2048 && n != 4096) 556 - return 0; 557 - uopt->blocksize = n; 558 - uopt->flags |= (1 << UDF_FLAG_BLOCKSIZE_SET); 559 - break; 560 - case Opt_unhide: 561 - uopt->flags |= (1 << UDF_FLAG_UNHIDE); 562 - break; 563 - case Opt_undelete: 564 - uopt->flags |= (1 << UDF_FLAG_UNDELETE); 565 - break; 566 - case Opt_noadinicb: 462 + switch (token) { 463 + case Opt_novrs: 464 + uopt->flags |= (1 << UDF_FLAG_NOVRS); 465 + break; 466 + case Opt_bs: 467 + n = result.uint_32; 468 + if (n != 512 && n != 1024 && n != 2048 && n != 4096) 469 + return -EINVAL; 470 + uopt->blocksize = n; 471 + uopt->flags |= (1 << UDF_FLAG_BLOCKSIZE_SET); 472 + break; 473 + case Opt_unhide: 474 + uopt->flags |= (1 << UDF_FLAG_UNHIDE); 475 + break; 476 + case Opt_undelete: 477 + uopt->flags |= (1 << UDF_FLAG_UNDELETE); 478 + break; 479 + case Opt_adinicb: 480 + if (result.negated) 567 481 uopt->flags &= ~(1 << UDF_FLAG_USE_AD_IN_ICB); 568 - break; 569 - case Opt_adinicb: 482 + else 570 483 uopt->flags |= (1 << UDF_FLAG_USE_AD_IN_ICB); 571 - break; 572 - case Opt_shortad: 573 - uopt->flags |= (1 << UDF_FLAG_USE_SHORT_AD); 574 - break; 575 - case Opt_longad: 576 - uopt->flags &= ~(1 << UDF_FLAG_USE_SHORT_AD); 577 - break; 578 - case Opt_gid: 579 - if (match_uint(args, &uv)) 580 - return 0; 581 - uopt->gid = make_kgid(current_user_ns(), uv); 582 - if (!gid_valid(uopt->gid)) 583 - return 0; 484 + break; 485 + case Opt_shortad: 486 + uopt->flags |= (1 << UDF_FLAG_USE_SHORT_AD); 487 + break; 488 + case Opt_longad: 489 + uopt->flags &= ~(1 << UDF_FLAG_USE_SHORT_AD); 490 + break; 491 + case Opt_gid: 492 + if (kstrtoint(param->string, 10, &uv) == 0) { 493 + kgid_t gid = make_kgid(current_user_ns(), uv); 494 + if (!gid_valid(gid)) 495 + return -EINVAL; 496 + uopt->gid = gid; 584 497 uopt->flags |= (1 << UDF_FLAG_GID_SET); 585 - break; 586 - case Opt_uid: 587 - if (match_uint(args, &uv)) 588 - return 0; 589 - uopt->uid = make_kuid(current_user_ns(), uv); 590 - if (!uid_valid(uopt->uid)) 591 - return 0; 592 - uopt->flags |= (1 << UDF_FLAG_UID_SET); 593 - break; 594 - case Opt_umask: 595 - if (match_octal(args, &option)) 596 - return 0; 597 - uopt->umask = option; 598 - break; 599 - case Opt_nostrict: 600 - uopt->flags &= ~(1 << UDF_FLAG_STRICT); 601 - break; 602 - case Opt_session: 603 - if (match_int(args, &option)) 604 - return 0; 605 - uopt->session = option; 606 - if (!remount) 607 - uopt->flags |= (1 << UDF_FLAG_SESSION_SET); 608 - break; 609 - case Opt_lastblock: 610 - if (match_int(args, &option)) 611 - return 0; 612 - uopt->lastblock = option; 613 - if (!remount) 614 - uopt->flags |= (1 << UDF_FLAG_LASTBLOCK_SET); 615 - break; 616 - case Opt_anchor: 617 - if (match_int(args, &option)) 618 - return 0; 619 - uopt->anchor = option; 620 - break; 621 - case Opt_volume: 622 - case Opt_partition: 623 - case Opt_fileset: 624 - case Opt_rootdir: 625 - /* Ignored (never implemented properly) */ 626 - break; 627 - case Opt_utf8: 628 - if (!remount) { 629 - unload_nls(uopt->nls_map); 630 - uopt->nls_map = NULL; 631 - } 632 - break; 633 - case Opt_iocharset: 634 - if (!remount) { 635 - unload_nls(uopt->nls_map); 636 - uopt->nls_map = NULL; 637 - } 638 - /* When nls_map is not loaded then UTF-8 is used */ 639 - if (!remount && strcmp(args[0].from, "utf8") != 0) { 640 - uopt->nls_map = load_nls(args[0].from); 641 - if (!uopt->nls_map) { 642 - pr_err("iocharset %s not found\n", 643 - args[0].from); 644 - return 0; 645 - } 646 - } 647 - break; 648 - case Opt_uforget: 649 - uopt->flags |= (1 << UDF_FLAG_UID_FORGET); 650 - break; 651 - case Opt_uignore: 652 - case Opt_gignore: 653 - /* These options are superseeded by uid=<number> */ 654 - break; 655 - case Opt_gforget: 498 + } else if (!strcmp(param->string, "forget")) { 656 499 uopt->flags |= (1 << UDF_FLAG_GID_FORGET); 657 - break; 658 - case Opt_fmode: 659 - if (match_octal(args, &option)) 660 - return 0; 661 - uopt->fmode = option & 0777; 662 - break; 663 - case Opt_dmode: 664 - if (match_octal(args, &option)) 665 - return 0; 666 - uopt->dmode = option & 0777; 667 - break; 668 - default: 669 - pr_err("bad mount option \"%s\" or missing value\n", p); 670 - return 0; 500 + } else if (!strcmp(param->string, "ignore")) { 501 + /* this option is superseded by gid=<number> */ 502 + ; 503 + } else { 504 + return -EINVAL; 671 505 } 506 + break; 507 + case Opt_uid: 508 + if (kstrtoint(param->string, 10, &uv) == 0) { 509 + kuid_t uid = make_kuid(current_user_ns(), uv); 510 + if (!uid_valid(uid)) 511 + return -EINVAL; 512 + uopt->uid = uid; 513 + uopt->flags |= (1 << UDF_FLAG_UID_SET); 514 + } else if (!strcmp(param->string, "forget")) { 515 + uopt->flags |= (1 << UDF_FLAG_UID_FORGET); 516 + } else if (!strcmp(param->string, "ignore")) { 517 + /* this option is superseded by uid=<number> */ 518 + ; 519 + } else { 520 + return -EINVAL; 521 + } 522 + break; 523 + case Opt_umask: 524 + uopt->umask = result.uint_32; 525 + break; 526 + case Opt_nostrict: 527 + uopt->flags &= ~(1 << UDF_FLAG_STRICT); 528 + break; 529 + case Opt_session: 530 + uopt->session = result.uint_32; 531 + if (!remount) 532 + uopt->flags |= (1 << UDF_FLAG_SESSION_SET); 533 + break; 534 + case Opt_lastblock: 535 + uopt->lastblock = result.uint_32; 536 + if (!remount) 537 + uopt->flags |= (1 << UDF_FLAG_LASTBLOCK_SET); 538 + break; 539 + case Opt_anchor: 540 + uopt->anchor = result.uint_32; 541 + break; 542 + case Opt_volume: 543 + case Opt_partition: 544 + case Opt_fileset: 545 + case Opt_rootdir: 546 + /* Ignored (never implemented properly) */ 547 + break; 548 + case Opt_utf8: 549 + if (!remount) { 550 + unload_nls(uopt->nls_map); 551 + uopt->nls_map = NULL; 552 + } 553 + break; 554 + case Opt_iocharset: 555 + if (!remount) { 556 + unload_nls(uopt->nls_map); 557 + uopt->nls_map = NULL; 558 + } 559 + /* When nls_map is not loaded then UTF-8 is used */ 560 + if (!remount && strcmp(param->string, "utf8") != 0) { 561 + uopt->nls_map = load_nls(param->string); 562 + if (!uopt->nls_map) { 563 + errorf(fc, "iocharset %s not found", 564 + param->string); 565 + return -EINVAL;; 566 + } 567 + } 568 + break; 569 + case Opt_fmode: 570 + uopt->fmode = result.uint_32 & 0777; 571 + break; 572 + case Opt_dmode: 573 + uopt->dmode = result.uint_32 & 0777; 574 + break; 575 + default: 576 + return -EINVAL; 672 577 } 673 - return 1; 578 + return 0; 674 579 } 675 580 676 - static int udf_remount_fs(struct super_block *sb, int *flags, char *options) 581 + static int udf_reconfigure(struct fs_context *fc) 677 582 { 678 - struct udf_options uopt; 583 + struct udf_options *uopt = fc->fs_private; 584 + struct super_block *sb = fc->root->d_sb; 679 585 struct udf_sb_info *sbi = UDF_SB(sb); 586 + int readonly = fc->sb_flags & SB_RDONLY; 680 587 int error = 0; 681 588 682 - if (!(*flags & SB_RDONLY) && UDF_QUERY_FLAG(sb, UDF_FLAG_RW_INCOMPAT)) 589 + if (!readonly && UDF_QUERY_FLAG(sb, UDF_FLAG_RW_INCOMPAT)) 683 590 return -EACCES; 684 591 685 592 sync_filesystem(sb); 686 593 687 - uopt.flags = sbi->s_flags; 688 - uopt.uid = sbi->s_uid; 689 - uopt.gid = sbi->s_gid; 690 - uopt.umask = sbi->s_umask; 691 - uopt.fmode = sbi->s_fmode; 692 - uopt.dmode = sbi->s_dmode; 693 - uopt.nls_map = NULL; 694 - 695 - if (!udf_parse_options(options, &uopt, true)) 696 - return -EINVAL; 697 - 698 594 write_lock(&sbi->s_cred_lock); 699 - sbi->s_flags = uopt.flags; 700 - sbi->s_uid = uopt.uid; 701 - sbi->s_gid = uopt.gid; 702 - sbi->s_umask = uopt.umask; 703 - sbi->s_fmode = uopt.fmode; 704 - sbi->s_dmode = uopt.dmode; 595 + sbi->s_flags = uopt->flags; 596 + sbi->s_uid = uopt->uid; 597 + sbi->s_gid = uopt->gid; 598 + sbi->s_umask = uopt->umask; 599 + sbi->s_fmode = uopt->fmode; 600 + sbi->s_dmode = uopt->dmode; 705 601 write_unlock(&sbi->s_cred_lock); 706 602 707 - if ((bool)(*flags & SB_RDONLY) == sb_rdonly(sb)) 603 + if (readonly == sb_rdonly(sb)) 708 604 goto out_unlock; 709 605 710 - if (*flags & SB_RDONLY) 606 + if (readonly) 711 607 udf_close_lvid(sb); 712 608 else 713 609 udf_open_lvid(sb); ··· 895 863 int ret; 896 864 struct timestamp *ts; 897 865 898 - outstr = kmalloc(128, GFP_NOFS); 866 + outstr = kmalloc(128, GFP_KERNEL); 899 867 if (!outstr) 900 868 return -ENOMEM; 901 869 ··· 1570 1538 return ret; 1571 1539 } 1572 1540 1541 + static bool udf_lvid_valid(struct super_block *sb, 1542 + struct logicalVolIntegrityDesc *lvid) 1543 + { 1544 + u32 parts, impuselen; 1545 + 1546 + parts = le32_to_cpu(lvid->numOfPartitions); 1547 + impuselen = le32_to_cpu(lvid->lengthOfImpUse); 1548 + if (parts >= sb->s_blocksize || impuselen >= sb->s_blocksize || 1549 + sizeof(struct logicalVolIntegrityDesc) + impuselen + 1550 + 2 * parts * sizeof(u32) > sb->s_blocksize) 1551 + return false; 1552 + return true; 1553 + } 1554 + 1573 1555 /* 1574 1556 * Find the prevailing Logical Volume Integrity Descriptor. 1575 1557 */ ··· 1594 1548 struct udf_sb_info *sbi = UDF_SB(sb); 1595 1549 struct logicalVolIntegrityDesc *lvid; 1596 1550 int indirections = 0; 1597 - u32 parts, impuselen; 1598 1551 1599 1552 while (++indirections <= UDF_MAX_LVID_NESTING) { 1600 1553 final_bh = NULL; ··· 1615 1570 if (!final_bh) 1616 1571 return; 1617 1572 1618 - brelse(sbi->s_lvid_bh); 1619 - sbi->s_lvid_bh = final_bh; 1620 - 1621 1573 lvid = (struct logicalVolIntegrityDesc *)final_bh->b_data; 1574 + if (udf_lvid_valid(sb, lvid)) { 1575 + brelse(sbi->s_lvid_bh); 1576 + sbi->s_lvid_bh = final_bh; 1577 + } else { 1578 + udf_warn(sb, "Corrupted LVID (parts=%u, impuselen=%u), " 1579 + "ignoring.\n", 1580 + le32_to_cpu(lvid->numOfPartitions), 1581 + le32_to_cpu(lvid->lengthOfImpUse)); 1582 + } 1583 + 1622 1584 if (lvid->nextIntegrityExt.extLength == 0) 1623 - goto check; 1585 + return; 1624 1586 1625 1587 loc = leea_to_cpu(lvid->nextIntegrityExt); 1626 1588 } 1627 1589 1628 1590 udf_warn(sb, "Too many LVID indirections (max %u), ignoring.\n", 1629 1591 UDF_MAX_LVID_NESTING); 1630 - out_err: 1631 1592 brelse(sbi->s_lvid_bh); 1632 1593 sbi->s_lvid_bh = NULL; 1633 - return; 1634 - check: 1635 - parts = le32_to_cpu(lvid->numOfPartitions); 1636 - impuselen = le32_to_cpu(lvid->lengthOfImpUse); 1637 - if (parts >= sb->s_blocksize || impuselen >= sb->s_blocksize || 1638 - sizeof(struct logicalVolIntegrityDesc) + impuselen + 1639 - 2 * parts * sizeof(u32) > sb->s_blocksize) { 1640 - udf_warn(sb, "Corrupted LVID (parts=%u, impuselen=%u), " 1641 - "ignoring.\n", parts, impuselen); 1642 - goto out_err; 1643 - } 1644 1594 } 1645 1595 1646 1596 /* ··· 1985 1945 return -EINVAL; 1986 1946 } 1987 1947 sbi->s_last_block = uopt->lastblock; 1988 - if (!uopt->novrs) { 1948 + if (!UDF_QUERY_FLAG(sb, UDF_FLAG_NOVRS)) { 1989 1949 /* Check that it is NSR02 compliant */ 1990 1950 nsr = udf_check_vsd(sb); 1991 1951 if (!nsr) { ··· 2123 2083 return ret; 2124 2084 } 2125 2085 2126 - static int udf_fill_super(struct super_block *sb, void *options, int silent) 2086 + static int udf_fill_super(struct super_block *sb, struct fs_context *fc) 2127 2087 { 2128 2088 int ret = -EINVAL; 2129 2089 struct inode *inode = NULL; 2130 - struct udf_options uopt; 2090 + struct udf_options *uopt = fc->fs_private; 2131 2091 struct kernel_lb_addr rootdir, fileset; 2132 2092 struct udf_sb_info *sbi; 2133 2093 bool lvid_open = false; 2134 - 2135 - uopt.flags = (1 << UDF_FLAG_USE_AD_IN_ICB) | (1 << UDF_FLAG_STRICT); 2136 - /* By default we'll use overflow[ug]id when UDF inode [ug]id == -1 */ 2137 - uopt.uid = make_kuid(current_user_ns(), overflowuid); 2138 - uopt.gid = make_kgid(current_user_ns(), overflowgid); 2139 - uopt.umask = 0; 2140 - uopt.fmode = UDF_INVALID_MODE; 2141 - uopt.dmode = UDF_INVALID_MODE; 2142 - uopt.nls_map = NULL; 2094 + int silent = fc->sb_flags & SB_SILENT; 2143 2095 2144 2096 sbi = kzalloc(sizeof(*sbi), GFP_KERNEL); 2145 2097 if (!sbi) ··· 2141 2109 2142 2110 mutex_init(&sbi->s_alloc_mutex); 2143 2111 2144 - if (!udf_parse_options((char *)options, &uopt, false)) 2145 - goto parse_options_failure; 2146 - 2147 2112 fileset.logicalBlockNum = 0xFFFFFFFF; 2148 2113 fileset.partitionReferenceNum = 0xFFFF; 2149 2114 2150 - sbi->s_flags = uopt.flags; 2151 - sbi->s_uid = uopt.uid; 2152 - sbi->s_gid = uopt.gid; 2153 - sbi->s_umask = uopt.umask; 2154 - sbi->s_fmode = uopt.fmode; 2155 - sbi->s_dmode = uopt.dmode; 2156 - sbi->s_nls_map = uopt.nls_map; 2115 + sbi->s_flags = uopt->flags; 2116 + sbi->s_uid = uopt->uid; 2117 + sbi->s_gid = uopt->gid; 2118 + sbi->s_umask = uopt->umask; 2119 + sbi->s_fmode = uopt->fmode; 2120 + sbi->s_dmode = uopt->dmode; 2121 + sbi->s_nls_map = uopt->nls_map; 2122 + uopt->nls_map = NULL; 2157 2123 rwlock_init(&sbi->s_cred_lock); 2158 2124 2159 - if (uopt.session == 0xFFFFFFFF) 2125 + if (uopt->session == 0xFFFFFFFF) 2160 2126 sbi->s_session = udf_get_last_session(sb); 2161 2127 else 2162 - sbi->s_session = uopt.session; 2128 + sbi->s_session = uopt->session; 2163 2129 2164 2130 udf_debug("Multi-session=%d\n", sbi->s_session); 2165 2131 ··· 2168 2138 sb->s_magic = UDF_SUPER_MAGIC; 2169 2139 sb->s_time_gran = 1000; 2170 2140 2171 - if (uopt.flags & (1 << UDF_FLAG_BLOCKSIZE_SET)) { 2172 - ret = udf_load_vrs(sb, &uopt, silent, &fileset); 2141 + if (uopt->flags & (1 << UDF_FLAG_BLOCKSIZE_SET)) { 2142 + ret = udf_load_vrs(sb, uopt, silent, &fileset); 2173 2143 } else { 2174 - uopt.blocksize = bdev_logical_block_size(sb->s_bdev); 2175 - while (uopt.blocksize <= 4096) { 2176 - ret = udf_load_vrs(sb, &uopt, silent, &fileset); 2144 + uopt->blocksize = bdev_logical_block_size(sb->s_bdev); 2145 + while (uopt->blocksize <= 4096) { 2146 + ret = udf_load_vrs(sb, uopt, silent, &fileset); 2177 2147 if (ret < 0) { 2178 2148 if (!silent && ret != -EACCES) { 2179 2149 pr_notice("Scanning with blocksize %u failed\n", 2180 - uopt.blocksize); 2150 + uopt->blocksize); 2181 2151 } 2182 2152 brelse(sbi->s_lvid_bh); 2183 2153 sbi->s_lvid_bh = NULL; ··· 2190 2160 } else 2191 2161 break; 2192 2162 2193 - uopt.blocksize <<= 1; 2163 + uopt->blocksize <<= 1; 2194 2164 } 2195 2165 } 2196 2166 if (ret < 0) { ··· 2295 2265 2296 2266 error_out: 2297 2267 iput(sbi->s_vat_inode); 2298 - parse_options_failure: 2299 - unload_nls(uopt.nls_map); 2268 + unload_nls(uopt->nls_map); 2300 2269 if (lvid_open) 2301 2270 udf_close_lvid(sb); 2302 2271 brelse(sbi->s_lvid_bh);
+1
fs/udf/udf_sb.h
··· 23 23 #define UDF_FLAG_STRICT 5 24 24 #define UDF_FLAG_UNDELETE 6 25 25 #define UDF_FLAG_UNHIDE 7 26 + #define UDF_FLAG_NOVRS 8 26 27 #define UDF_FLAG_UID_FORGET 11 /* save -1 for uid to disk */ 27 28 #define UDF_FLAG_GID_FORGET 12 28 29 #define UDF_FLAG_UID_SET 13
+1 -1
include/linux/fs.h
··· 2179 2179 #ifdef CONFIG_QUOTA 2180 2180 ssize_t (*quota_read)(struct super_block *, int, char *, size_t, loff_t); 2181 2181 ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t); 2182 - struct dquot **(*get_dquots)(struct inode *); 2182 + struct dquot __rcu **(*get_dquots)(struct inode *); 2183 2183 #endif 2184 2184 long (*nr_cached_objects)(struct super_block *, 2185 2185 struct shrink_control *);
+1 -1
include/linux/shmem_fs.h
··· 37 37 unsigned int fsflags; /* for FS_IOC_[SG]ETFLAGS */ 38 38 atomic_t stop_eviction; /* hold when working on inode */ 39 39 #ifdef CONFIG_TMPFS_QUOTA 40 - struct dquot *i_dquot[MAXQUOTAS]; 40 + struct dquot __rcu *i_dquot[MAXQUOTAS]; 41 41 #endif 42 42 struct inode vfs_inode; 43 43 };
+1 -1
mm/shmem.c
··· 317 317 dquot_quota_off(sb, type); 318 318 } 319 319 320 - static struct dquot **shmem_get_dquots(struct inode *inode) 320 + static struct dquot __rcu **shmem_get_dquots(struct inode *inode) 321 321 { 322 322 return SHMEM_I(inode)->i_dquot; 323 323 }