Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jack/linux-fs

Pull quota, reiserfs, UDF updates from Jan Kara:
"Scalability improvements for quota, a few reiserfs fixes, and couple
of misc cleanups (udf, ext2)"

* 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jack/linux-fs:
reiserfs: Fix use after free in journal teardown
reiserfs: fix corruption introduced by balance_leaf refactor
udf: avoid redundant memcpy when writing data in ICB
fs/udf: re-use hex_asc_upper_{hi,lo} macros
fs/quota: kernel-doc warning fixes
udf: use linux/uaccess.h
fs/ext2/super.c: Drop memory allocation cast
quota: remove dqptr_sem
quota: simplify remove_inode_dquot_ref()
quota: avoid unnecessary dqget()/dqput() calls
quota: protect Q_GETFMT by dqonoff_mutex

+189 -196
+1 -1
fs/ext2/super.c
··· 161 161 static struct inode *ext2_alloc_inode(struct super_block *sb) 162 162 { 163 163 struct ext2_inode_info *ei; 164 - ei = (struct ext2_inode_info *)kmem_cache_alloc(ext2_inode_cachep, GFP_KERNEL); 164 + ei = kmem_cache_alloc(ext2_inode_cachep, GFP_KERNEL); 165 165 if (!ei) 166 166 return NULL; 167 167 ei->i_block_alloc_info = NULL;
+82 -98
fs/quota/dquot.c
··· 96 96 * Note that some things (eg. sb pointer, type, id) doesn't change during 97 97 * the life of the dquot structure and so needn't to be protected by a lock 98 98 * 99 - * Any operation working on dquots via inode pointers must hold dqptr_sem. If 100 - * operation is just reading pointers from inode (or not using them at all) the 101 - * read lock is enough. If pointers are altered function must hold write lock. 99 + * Operation accessing dquots via inode pointers are protected by dquot_srcu. 100 + * Operation of reading pointer needs srcu_read_lock(&dquot_srcu), and 101 + * synchronize_srcu(&dquot_srcu) is called after clearing pointers from 102 + * inode and before dropping dquot references to avoid use of dquots after 103 + * they are freed. dq_data_lock is used to serialize the pointer setting and 104 + * clearing operations. 102 105 * Special care needs to be taken about S_NOQUOTA inode flag (marking that 103 106 * inode is a quota file). Functions adding pointers from inode to dquots have 104 - * to check this flag under dqptr_sem and then (if S_NOQUOTA is not set) they 105 - * have to do all pointer modifications before dropping dqptr_sem. This makes 107 + * to check this flag under dq_data_lock and then (if S_NOQUOTA is not set) they 108 + * have to do all pointer modifications before dropping dq_data_lock. This makes 106 109 * sure they cannot race with quotaon which first sets S_NOQUOTA flag and 107 110 * then drops all pointers to dquots from an inode. 108 111 * ··· 119 116 * spinlock to internal buffers before writing. 120 117 * 121 118 * Lock ordering (including related VFS locks) is the following: 122 - * dqonoff_mutex > i_mutex > journal_lock > dqptr_sem > dquot->dq_lock > 123 - * dqio_mutex 119 + * dqonoff_mutex > i_mutex > journal_lock > dquot->dq_lock > dqio_mutex 124 120 * dqonoff_mutex > i_mutex comes from dquot_quota_sync, dquot_enable, etc. 125 - * The lock ordering of dqptr_sem imposed by quota code is only dqonoff_sem > 126 - * dqptr_sem. But filesystem has to count with the fact that functions such as 127 - * dquot_alloc_space() acquire dqptr_sem and they usually have to be called 128 - * from inside a transaction to keep filesystem consistency after a crash. Also 129 - * filesystems usually want to do some IO on dquot from ->mark_dirty which is 130 - * called with dqptr_sem held. 131 121 */ 132 122 133 123 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_list_lock); 134 124 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_state_lock); 135 125 __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_data_lock); 136 126 EXPORT_SYMBOL(dq_data_lock); 127 + DEFINE_STATIC_SRCU(dquot_srcu); 137 128 138 129 void __quota_error(struct super_block *sb, const char *func, 139 130 const char *fmt, ...) ··· 730 733 731 734 /* 732 735 * Put reference to dquot 733 - * NOTE: If you change this function please check whether dqput_blocks() works right... 734 736 */ 735 737 void dqput(struct dquot *dquot) 736 738 { ··· 959 963 } 960 964 961 965 /* 962 - * Return 0 if dqput() won't block. 963 - * (note that 1 doesn't necessarily mean blocking) 964 - */ 965 - static inline int dqput_blocks(struct dquot *dquot) 966 - { 967 - if (atomic_read(&dquot->dq_count) <= 1) 968 - return 1; 969 - return 0; 970 - } 971 - 972 - /* 973 966 * Remove references to dquots from inode and add dquot to list for freeing 974 967 * if we have the last reference to dquot 975 - * We can't race with anybody because we hold dqptr_sem for writing... 976 968 */ 977 - static int remove_inode_dquot_ref(struct inode *inode, int type, 978 - struct list_head *tofree_head) 969 + static void remove_inode_dquot_ref(struct inode *inode, int type, 970 + struct list_head *tofree_head) 979 971 { 980 972 struct dquot *dquot = inode->i_dquot[type]; 981 973 982 974 inode->i_dquot[type] = NULL; 983 - if (dquot) { 984 - if (dqput_blocks(dquot)) { 985 - #ifdef CONFIG_QUOTA_DEBUG 986 - if (atomic_read(&dquot->dq_count) != 1) 987 - quota_error(inode->i_sb, "Adding dquot with " 988 - "dq_count %d to dispose list", 989 - atomic_read(&dquot->dq_count)); 990 - #endif 991 - spin_lock(&dq_list_lock); 992 - /* As dquot must have currently users it can't be on 993 - * the free list... */ 994 - list_add(&dquot->dq_free, tofree_head); 995 - spin_unlock(&dq_list_lock); 996 - return 1; 997 - } 998 - else 999 - dqput(dquot); /* We have guaranteed we won't block */ 975 + if (!dquot) 976 + return; 977 + 978 + if (list_empty(&dquot->dq_free)) { 979 + /* 980 + * The inode still has reference to dquot so it can't be in the 981 + * free list 982 + */ 983 + spin_lock(&dq_list_lock); 984 + list_add(&dquot->dq_free, tofree_head); 985 + spin_unlock(&dq_list_lock); 986 + } else { 987 + /* 988 + * Dquot is already in a list to put so we won't drop the last 989 + * reference here. 990 + */ 991 + dqput(dquot); 1000 992 } 1001 - return 0; 1002 993 } 1003 994 1004 995 /* ··· 1020 1037 * We have to scan also I_NEW inodes because they can already 1021 1038 * have quota pointer initialized. Luckily, we need to touch 1022 1039 * only quota pointers and these have separate locking 1023 - * (dqptr_sem). 1040 + * (dq_data_lock). 1024 1041 */ 1042 + spin_lock(&dq_data_lock); 1025 1043 if (!IS_NOQUOTA(inode)) { 1026 1044 if (unlikely(inode_get_rsv_space(inode) > 0)) 1027 1045 reserved = 1; 1028 1046 remove_inode_dquot_ref(inode, type, tofree_head); 1029 1047 } 1048 + spin_unlock(&dq_data_lock); 1030 1049 } 1031 1050 spin_unlock(&inode_sb_list_lock); 1032 1051 #ifdef CONFIG_QUOTA_DEBUG ··· 1046 1061 LIST_HEAD(tofree_head); 1047 1062 1048 1063 if (sb->dq_op) { 1049 - down_write(&sb_dqopt(sb)->dqptr_sem); 1050 1064 remove_dquot_ref(sb, type, &tofree_head); 1051 - up_write(&sb_dqopt(sb)->dqptr_sem); 1065 + synchronize_srcu(&dquot_srcu); 1052 1066 put_dquot_list(&tofree_head); 1053 1067 } 1054 1068 } ··· 1378 1394 /* 1379 1395 * Initialize quota pointers in inode 1380 1396 * 1381 - * We do things in a bit complicated way but by that we avoid calling 1382 - * dqget() and thus filesystem callbacks under dqptr_sem. 1383 - * 1384 1397 * It is better to call this function outside of any transaction as it 1385 1398 * might need a lot of space in journal for dquot structure allocation. 1386 1399 */ 1387 1400 static void __dquot_initialize(struct inode *inode, int type) 1388 1401 { 1389 - int cnt; 1402 + int cnt, init_needed = 0; 1390 1403 struct dquot *got[MAXQUOTAS]; 1391 1404 struct super_block *sb = inode->i_sb; 1392 1405 qsize_t rsv; 1393 1406 1394 - /* First test before acquiring mutex - solves deadlocks when we 1395 - * re-enter the quota code and are already holding the mutex */ 1396 1407 if (!dquot_active(inode)) 1397 1408 return; 1398 1409 ··· 1397 1418 got[cnt] = NULL; 1398 1419 if (type != -1 && cnt != type) 1399 1420 continue; 1421 + /* 1422 + * The i_dquot should have been initialized in most cases, 1423 + * we check it without locking here to avoid unnecessary 1424 + * dqget()/dqput() calls. 1425 + */ 1426 + if (inode->i_dquot[cnt]) 1427 + continue; 1428 + init_needed = 1; 1429 + 1400 1430 switch (cnt) { 1401 1431 case USRQUOTA: 1402 1432 qid = make_kqid_uid(inode->i_uid); ··· 1417 1429 got[cnt] = dqget(sb, qid); 1418 1430 } 1419 1431 1420 - down_write(&sb_dqopt(sb)->dqptr_sem); 1432 + /* All required i_dquot has been initialized */ 1433 + if (!init_needed) 1434 + return; 1435 + 1436 + spin_lock(&dq_data_lock); 1421 1437 if (IS_NOQUOTA(inode)) 1422 1438 goto out_err; 1423 1439 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { ··· 1441 1449 * did a write before quota was turned on 1442 1450 */ 1443 1451 rsv = inode_get_rsv_space(inode); 1444 - if (unlikely(rsv)) { 1445 - spin_lock(&dq_data_lock); 1452 + if (unlikely(rsv)) 1446 1453 dquot_resv_space(inode->i_dquot[cnt], rsv); 1447 - spin_unlock(&dq_data_lock); 1448 - } 1449 1454 } 1450 1455 } 1451 1456 out_err: 1452 - up_write(&sb_dqopt(sb)->dqptr_sem); 1457 + spin_unlock(&dq_data_lock); 1453 1458 /* Drop unused references */ 1454 1459 dqput_all(got); 1455 1460 } ··· 1458 1469 EXPORT_SYMBOL(dquot_initialize); 1459 1470 1460 1471 /* 1461 - * Release all quotas referenced by inode 1472 + * Release all quotas referenced by inode. 1473 + * 1474 + * This function only be called on inode free or converting 1475 + * a file to quota file, no other users for the i_dquot in 1476 + * both cases, so we needn't call synchronize_srcu() after 1477 + * clearing i_dquot. 1462 1478 */ 1463 1479 static void __dquot_drop(struct inode *inode) 1464 1480 { 1465 1481 int cnt; 1466 1482 struct dquot *put[MAXQUOTAS]; 1467 1483 1468 - down_write(&sb_dqopt(inode->i_sb)->dqptr_sem); 1484 + spin_lock(&dq_data_lock); 1469 1485 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 1470 1486 put[cnt] = inode->i_dquot[cnt]; 1471 1487 inode->i_dquot[cnt] = NULL; 1472 1488 } 1473 - up_write(&sb_dqopt(inode->i_sb)->dqptr_sem); 1489 + spin_unlock(&dq_data_lock); 1474 1490 dqput_all(put); 1475 1491 } 1476 1492 ··· 1593 1599 */ 1594 1600 int __dquot_alloc_space(struct inode *inode, qsize_t number, int flags) 1595 1601 { 1596 - int cnt, ret = 0; 1602 + int cnt, ret = 0, index; 1597 1603 struct dquot_warn warn[MAXQUOTAS]; 1598 1604 struct dquot **dquots = inode->i_dquot; 1599 1605 int reserve = flags & DQUOT_SPACE_RESERVE; 1600 1606 1601 - /* 1602 - * First test before acquiring mutex - solves deadlocks when we 1603 - * re-enter the quota code and are already holding the mutex 1604 - */ 1605 1607 if (!dquot_active(inode)) { 1606 1608 inode_incr_space(inode, number, reserve); 1607 1609 goto out; ··· 1606 1616 for (cnt = 0; cnt < MAXQUOTAS; cnt++) 1607 1617 warn[cnt].w_type = QUOTA_NL_NOWARN; 1608 1618 1609 - down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); 1619 + index = srcu_read_lock(&dquot_srcu); 1610 1620 spin_lock(&dq_data_lock); 1611 1621 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 1612 1622 if (!dquots[cnt]) ··· 1633 1643 goto out_flush_warn; 1634 1644 mark_all_dquot_dirty(dquots); 1635 1645 out_flush_warn: 1636 - up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); 1646 + srcu_read_unlock(&dquot_srcu, index); 1637 1647 flush_warnings(warn); 1638 1648 out: 1639 1649 return ret; ··· 1645 1655 */ 1646 1656 int dquot_alloc_inode(const struct inode *inode) 1647 1657 { 1648 - int cnt, ret = 0; 1658 + int cnt, ret = 0, index; 1649 1659 struct dquot_warn warn[MAXQUOTAS]; 1650 1660 struct dquot * const *dquots = inode->i_dquot; 1651 1661 1652 - /* First test before acquiring mutex - solves deadlocks when we 1653 - * re-enter the quota code and are already holding the mutex */ 1654 1662 if (!dquot_active(inode)) 1655 1663 return 0; 1656 1664 for (cnt = 0; cnt < MAXQUOTAS; cnt++) 1657 1665 warn[cnt].w_type = QUOTA_NL_NOWARN; 1658 - down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); 1666 + 1667 + index = srcu_read_lock(&dquot_srcu); 1659 1668 spin_lock(&dq_data_lock); 1660 1669 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 1661 1670 if (!dquots[cnt]) ··· 1674 1685 spin_unlock(&dq_data_lock); 1675 1686 if (ret == 0) 1676 1687 mark_all_dquot_dirty(dquots); 1677 - up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); 1688 + srcu_read_unlock(&dquot_srcu, index); 1678 1689 flush_warnings(warn); 1679 1690 return ret; 1680 1691 } ··· 1685 1696 */ 1686 1697 int dquot_claim_space_nodirty(struct inode *inode, qsize_t number) 1687 1698 { 1688 - int cnt; 1699 + int cnt, index; 1689 1700 1690 1701 if (!dquot_active(inode)) { 1691 1702 inode_claim_rsv_space(inode, number); 1692 1703 return 0; 1693 1704 } 1694 1705 1695 - down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); 1706 + index = srcu_read_lock(&dquot_srcu); 1696 1707 spin_lock(&dq_data_lock); 1697 1708 /* Claim reserved quotas to allocated quotas */ 1698 1709 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { ··· 1704 1715 inode_claim_rsv_space(inode, number); 1705 1716 spin_unlock(&dq_data_lock); 1706 1717 mark_all_dquot_dirty(inode->i_dquot); 1707 - up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); 1718 + srcu_read_unlock(&dquot_srcu, index); 1708 1719 return 0; 1709 1720 } 1710 1721 EXPORT_SYMBOL(dquot_claim_space_nodirty); ··· 1714 1725 */ 1715 1726 void dquot_reclaim_space_nodirty(struct inode *inode, qsize_t number) 1716 1727 { 1717 - int cnt; 1728 + int cnt, index; 1718 1729 1719 1730 if (!dquot_active(inode)) { 1720 1731 inode_reclaim_rsv_space(inode, number); 1721 1732 return; 1722 1733 } 1723 1734 1724 - down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); 1735 + index = srcu_read_lock(&dquot_srcu); 1725 1736 spin_lock(&dq_data_lock); 1726 1737 /* Claim reserved quotas to allocated quotas */ 1727 1738 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { ··· 1733 1744 inode_reclaim_rsv_space(inode, number); 1734 1745 spin_unlock(&dq_data_lock); 1735 1746 mark_all_dquot_dirty(inode->i_dquot); 1736 - up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); 1747 + srcu_read_unlock(&dquot_srcu, index); 1737 1748 return; 1738 1749 } 1739 1750 EXPORT_SYMBOL(dquot_reclaim_space_nodirty); ··· 1746 1757 unsigned int cnt; 1747 1758 struct dquot_warn warn[MAXQUOTAS]; 1748 1759 struct dquot **dquots = inode->i_dquot; 1749 - int reserve = flags & DQUOT_SPACE_RESERVE; 1760 + int reserve = flags & DQUOT_SPACE_RESERVE, index; 1750 1761 1751 - /* First test before acquiring mutex - solves deadlocks when we 1752 - * re-enter the quota code and are already holding the mutex */ 1753 1762 if (!dquot_active(inode)) { 1754 1763 inode_decr_space(inode, number, reserve); 1755 1764 return; 1756 1765 } 1757 1766 1758 - down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); 1767 + index = srcu_read_lock(&dquot_srcu); 1759 1768 spin_lock(&dq_data_lock); 1760 1769 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 1761 1770 int wtype; ··· 1776 1789 goto out_unlock; 1777 1790 mark_all_dquot_dirty(dquots); 1778 1791 out_unlock: 1779 - up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); 1792 + srcu_read_unlock(&dquot_srcu, index); 1780 1793 flush_warnings(warn); 1781 1794 } 1782 1795 EXPORT_SYMBOL(__dquot_free_space); ··· 1789 1802 unsigned int cnt; 1790 1803 struct dquot_warn warn[MAXQUOTAS]; 1791 1804 struct dquot * const *dquots = inode->i_dquot; 1805 + int index; 1792 1806 1793 - /* First test before acquiring mutex - solves deadlocks when we 1794 - * re-enter the quota code and are already holding the mutex */ 1795 1807 if (!dquot_active(inode)) 1796 1808 return; 1797 1809 1798 - down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); 1810 + index = srcu_read_lock(&dquot_srcu); 1799 1811 spin_lock(&dq_data_lock); 1800 1812 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 1801 1813 int wtype; ··· 1809 1823 } 1810 1824 spin_unlock(&dq_data_lock); 1811 1825 mark_all_dquot_dirty(dquots); 1812 - up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); 1826 + srcu_read_unlock(&dquot_srcu, index); 1813 1827 flush_warnings(warn); 1814 1828 } 1815 1829 EXPORT_SYMBOL(dquot_free_inode); ··· 1823 1837 * This operation can block, but only after everything is updated 1824 1838 * A transaction must be started when entering this function. 1825 1839 * 1840 + * We are holding reference on transfer_from & transfer_to, no need to 1841 + * protect them by srcu_read_lock(). 1826 1842 */ 1827 1843 int __dquot_transfer(struct inode *inode, struct dquot **transfer_to) 1828 1844 { ··· 1837 1849 struct dquot_warn warn_from_inodes[MAXQUOTAS]; 1838 1850 struct dquot_warn warn_from_space[MAXQUOTAS]; 1839 1851 1840 - /* First test before acquiring mutex - solves deadlocks when we 1841 - * re-enter the quota code and are already holding the mutex */ 1842 1852 if (IS_NOQUOTA(inode)) 1843 1853 return 0; 1844 1854 /* Initialize the arrays */ ··· 1845 1859 warn_from_inodes[cnt].w_type = QUOTA_NL_NOWARN; 1846 1860 warn_from_space[cnt].w_type = QUOTA_NL_NOWARN; 1847 1861 } 1848 - down_write(&sb_dqopt(inode->i_sb)->dqptr_sem); 1862 + 1863 + spin_lock(&dq_data_lock); 1849 1864 if (IS_NOQUOTA(inode)) { /* File without quota accounting? */ 1850 - up_write(&sb_dqopt(inode->i_sb)->dqptr_sem); 1865 + spin_unlock(&dq_data_lock); 1851 1866 return 0; 1852 1867 } 1853 - spin_lock(&dq_data_lock); 1854 1868 cur_space = inode_get_bytes(inode); 1855 1869 rsv_space = inode_get_rsv_space(inode); 1856 1870 space = cur_space + rsv_space; ··· 1904 1918 inode->i_dquot[cnt] = transfer_to[cnt]; 1905 1919 } 1906 1920 spin_unlock(&dq_data_lock); 1907 - up_write(&sb_dqopt(inode->i_sb)->dqptr_sem); 1908 1921 1909 1922 mark_all_dquot_dirty(transfer_from); 1910 1923 mark_all_dquot_dirty(transfer_to); ··· 1917 1932 return 0; 1918 1933 over_quota: 1919 1934 spin_unlock(&dq_data_lock); 1920 - up_write(&sb_dqopt(inode->i_sb)->dqptr_sem); 1921 1935 flush_warnings(warn_to); 1922 1936 return ret; 1923 1937 }
+1 -1
fs/quota/kqid.c
··· 55 55 /** 56 56 * from_kqid - Create a qid from a kqid user-namespace pair. 57 57 * @targ: The user namespace we want a qid in. 58 - * @kuid: The kernel internal quota identifier to start with. 58 + * @kqid: The kernel internal quota identifier to start with. 59 59 * 60 60 * Map @kqid into the user-namespace specified by @targ and 61 61 * return the resulting qid.
+1 -2
fs/quota/netlink.c
··· 32 32 33 33 /** 34 34 * quota_send_warning - Send warning to userspace about exceeded quota 35 - * @type: The quota type: USRQQUOTA, GRPQUOTA,... 36 - * @id: The user or group id of the quota that was exceeded 35 + * @qid: The kernel internal quota identifier. 37 36 * @dev: The device on which the fs is mounted (sb->s_dev) 38 37 * @warntype: The type of the warning: QUOTA_NL_... 39 38 *
+3 -3
fs/quota/quota.c
··· 79 79 { 80 80 __u32 fmt; 81 81 82 - down_read(&sb_dqopt(sb)->dqptr_sem); 82 + mutex_lock(&sb_dqopt(sb)->dqonoff_mutex); 83 83 if (!sb_has_quota_active(sb, type)) { 84 - up_read(&sb_dqopt(sb)->dqptr_sem); 84 + mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); 85 85 return -ESRCH; 86 86 } 87 87 fmt = sb_dqopt(sb)->info[type].dqi_format->qf_fmt_id; 88 - up_read(&sb_dqopt(sb)->dqptr_sem); 88 + mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); 89 89 if (copy_to_user(addr, &fmt, sizeof(fmt))) 90 90 return -EFAULT; 91 91 return 0;
+63 -48
fs/reiserfs/do_balan.c
··· 286 286 return 0; 287 287 } 288 288 289 - static void balance_leaf_insert_left(struct tree_balance *tb, 290 - struct item_head *ih, const char *body) 289 + static unsigned int balance_leaf_insert_left(struct tree_balance *tb, 290 + struct item_head *const ih, 291 + const char * const body) 291 292 { 292 293 int ret; 293 294 struct buffer_info bi; 294 295 int n = B_NR_ITEMS(tb->L[0]); 296 + unsigned body_shift_bytes = 0; 295 297 296 298 if (tb->item_pos == tb->lnum[0] - 1 && tb->lbytes != -1) { 297 299 /* part of new item falls into L[0] */ ··· 331 329 332 330 put_ih_item_len(ih, new_item_len); 333 331 if (tb->lbytes > tb->zeroes_num) { 334 - body += (tb->lbytes - tb->zeroes_num); 332 + body_shift_bytes = tb->lbytes - tb->zeroes_num; 335 333 tb->zeroes_num = 0; 336 334 } else 337 335 tb->zeroes_num -= tb->lbytes; ··· 351 349 tb->insert_size[0] = 0; 352 350 tb->zeroes_num = 0; 353 351 } 352 + return body_shift_bytes; 354 353 } 355 354 356 355 static void balance_leaf_paste_left_shift_dirent(struct tree_balance *tb, 357 - struct item_head *ih, 358 - const char *body) 356 + struct item_head * const ih, 357 + const char * const body) 359 358 { 360 359 int n = B_NR_ITEMS(tb->L[0]); 361 360 struct buffer_info bi; ··· 416 413 tb->pos_in_item -= tb->lbytes; 417 414 } 418 415 419 - static void balance_leaf_paste_left_shift(struct tree_balance *tb, 420 - struct item_head *ih, 421 - const char *body) 416 + static unsigned int balance_leaf_paste_left_shift(struct tree_balance *tb, 417 + struct item_head * const ih, 418 + const char * const body) 422 419 { 423 420 struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); 424 421 int n = B_NR_ITEMS(tb->L[0]); 425 422 struct buffer_info bi; 423 + int body_shift_bytes = 0; 426 424 427 425 if (is_direntry_le_ih(item_head(tbS0, tb->item_pos))) { 428 426 balance_leaf_paste_left_shift_dirent(tb, ih, body); 429 - return; 427 + return 0; 430 428 } 431 429 432 430 RFALSE(tb->lbytes <= 0, ··· 501 497 * insert_size[0] 502 498 */ 503 499 if (l_n > tb->zeroes_num) { 504 - body += (l_n - tb->zeroes_num); 500 + body_shift_bytes = l_n - tb->zeroes_num; 505 501 tb->zeroes_num = 0; 506 502 } else 507 503 tb->zeroes_num -= l_n; ··· 530 526 */ 531 527 leaf_shift_left(tb, tb->lnum[0], tb->lbytes); 532 528 } 529 + return body_shift_bytes; 533 530 } 534 531 535 532 536 533 /* appended item will be in L[0] in whole */ 537 534 static void balance_leaf_paste_left_whole(struct tree_balance *tb, 538 - struct item_head *ih, 539 - const char *body) 535 + struct item_head * const ih, 536 + const char * const body) 540 537 { 541 538 struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); 542 539 int n = B_NR_ITEMS(tb->L[0]); ··· 589 584 tb->zeroes_num = 0; 590 585 } 591 586 592 - static void balance_leaf_paste_left(struct tree_balance *tb, 593 - struct item_head *ih, const char *body) 587 + static unsigned int balance_leaf_paste_left(struct tree_balance *tb, 588 + struct item_head * const ih, 589 + const char * const body) 594 590 { 595 591 /* we must shift the part of the appended item */ 596 592 if (tb->item_pos == tb->lnum[0] - 1 && tb->lbytes != -1) 597 - balance_leaf_paste_left_shift(tb, ih, body); 593 + return balance_leaf_paste_left_shift(tb, ih, body); 598 594 else 599 595 balance_leaf_paste_left_whole(tb, ih, body); 596 + return 0; 600 597 } 601 598 602 599 /* Shift lnum[0] items from S[0] to the left neighbor L[0] */ 603 - static void balance_leaf_left(struct tree_balance *tb, struct item_head *ih, 604 - const char *body, int flag) 600 + static unsigned int balance_leaf_left(struct tree_balance *tb, 601 + struct item_head * const ih, 602 + const char * const body, int flag) 605 603 { 606 604 if (tb->lnum[0] <= 0) 607 - return; 605 + return 0; 608 606 609 607 /* new item or it part falls to L[0], shift it too */ 610 608 if (tb->item_pos < tb->lnum[0]) { 611 609 BUG_ON(flag != M_INSERT && flag != M_PASTE); 612 610 613 611 if (flag == M_INSERT) 614 - balance_leaf_insert_left(tb, ih, body); 612 + return balance_leaf_insert_left(tb, ih, body); 615 613 else /* M_PASTE */ 616 - balance_leaf_paste_left(tb, ih, body); 614 + return balance_leaf_paste_left(tb, ih, body); 617 615 } else 618 616 /* new item doesn't fall into L[0] */ 619 617 leaf_shift_left(tb, tb->lnum[0], tb->lbytes); 618 + return 0; 620 619 } 621 620 622 621 623 622 static void balance_leaf_insert_right(struct tree_balance *tb, 624 - struct item_head *ih, const char *body) 623 + struct item_head * const ih, 624 + const char * const body) 625 625 { 626 626 627 627 struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); ··· 714 704 715 705 716 706 static void balance_leaf_paste_right_shift_dirent(struct tree_balance *tb, 717 - struct item_head *ih, const char *body) 707 + struct item_head * const ih, 708 + const char * const body) 718 709 { 719 710 struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); 720 711 struct buffer_info bi; ··· 765 754 } 766 755 767 756 static void balance_leaf_paste_right_shift(struct tree_balance *tb, 768 - struct item_head *ih, const char *body) 757 + struct item_head * const ih, 758 + const char * const body) 769 759 { 770 760 struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); 771 761 int n_shift, n_rem, r_zeroes_number, version; ··· 843 831 } 844 832 845 833 static void balance_leaf_paste_right_whole(struct tree_balance *tb, 846 - struct item_head *ih, const char *body) 834 + struct item_head * const ih, 835 + const char * const body) 847 836 { 848 837 struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); 849 838 int n = B_NR_ITEMS(tbS0); ··· 887 874 } 888 875 889 876 static void balance_leaf_paste_right(struct tree_balance *tb, 890 - struct item_head *ih, const char *body) 877 + struct item_head * const ih, 878 + const char * const body) 891 879 { 892 880 struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); 893 881 int n = B_NR_ITEMS(tbS0); ··· 910 896 } 911 897 912 898 /* shift rnum[0] items from S[0] to the right neighbor R[0] */ 913 - static void balance_leaf_right(struct tree_balance *tb, struct item_head *ih, 914 - const char *body, int flag) 899 + static void balance_leaf_right(struct tree_balance *tb, 900 + struct item_head * const ih, 901 + const char * const body, int flag) 915 902 { 916 903 if (tb->rnum[0] <= 0) 917 904 return; ··· 926 911 } 927 912 928 913 static void balance_leaf_new_nodes_insert(struct tree_balance *tb, 929 - struct item_head *ih, 930 - const char *body, 914 + struct item_head * const ih, 915 + const char * const body, 931 916 struct item_head *insert_key, 932 917 struct buffer_head **insert_ptr, 933 918 int i) ··· 1018 1003 1019 1004 /* we append to directory item */ 1020 1005 static void balance_leaf_new_nodes_paste_dirent(struct tree_balance *tb, 1021 - struct item_head *ih, 1022 - const char *body, 1006 + struct item_head * const ih, 1007 + const char * const body, 1023 1008 struct item_head *insert_key, 1024 1009 struct buffer_head **insert_ptr, 1025 1010 int i) ··· 1073 1058 } 1074 1059 1075 1060 static void balance_leaf_new_nodes_paste_shift(struct tree_balance *tb, 1076 - struct item_head *ih, 1077 - const char *body, 1061 + struct item_head * const ih, 1062 + const char * const body, 1078 1063 struct item_head *insert_key, 1079 1064 struct buffer_head **insert_ptr, 1080 1065 int i) ··· 1146 1131 } 1147 1132 1148 1133 static void balance_leaf_new_nodes_paste_whole(struct tree_balance *tb, 1149 - struct item_head *ih, 1150 - const char *body, 1134 + struct item_head * const ih, 1135 + const char * const body, 1151 1136 struct item_head *insert_key, 1152 1137 struct buffer_head **insert_ptr, 1153 1138 int i) ··· 1199 1184 1200 1185 } 1201 1186 static void balance_leaf_new_nodes_paste(struct tree_balance *tb, 1202 - struct item_head *ih, 1203 - const char *body, 1187 + struct item_head * const ih, 1188 + const char * const body, 1204 1189 struct item_head *insert_key, 1205 1190 struct buffer_head **insert_ptr, 1206 1191 int i) ··· 1229 1214 1230 1215 /* Fill new nodes that appear in place of S[0] */ 1231 1216 static void balance_leaf_new_nodes(struct tree_balance *tb, 1232 - struct item_head *ih, 1233 - const char *body, 1217 + struct item_head * const ih, 1218 + const char * const body, 1234 1219 struct item_head *insert_key, 1235 1220 struct buffer_head **insert_ptr, 1236 1221 int flag) ··· 1269 1254 } 1270 1255 1271 1256 static void balance_leaf_finish_node_insert(struct tree_balance *tb, 1272 - struct item_head *ih, 1273 - const char *body) 1257 + struct item_head * const ih, 1258 + const char * const body) 1274 1259 { 1275 1260 struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); 1276 1261 struct buffer_info bi; ··· 1286 1271 } 1287 1272 1288 1273 static void balance_leaf_finish_node_paste_dirent(struct tree_balance *tb, 1289 - struct item_head *ih, 1290 - const char *body) 1274 + struct item_head * const ih, 1275 + const char * const body) 1291 1276 { 1292 1277 struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); 1293 1278 struct item_head *pasted = item_head(tbS0, tb->item_pos); ··· 1320 1305 } 1321 1306 1322 1307 static void balance_leaf_finish_node_paste(struct tree_balance *tb, 1323 - struct item_head *ih, 1324 - const char *body) 1308 + struct item_head * const ih, 1309 + const char * const body) 1325 1310 { 1326 1311 struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path); 1327 1312 struct buffer_info bi; ··· 1364 1349 * of the affected item which remains in S 1365 1350 */ 1366 1351 static void balance_leaf_finish_node(struct tree_balance *tb, 1367 - struct item_head *ih, 1368 - const char *body, int flag) 1352 + struct item_head * const ih, 1353 + const char * const body, int flag) 1369 1354 { 1370 1355 /* if we must insert or append into buffer S[0] */ 1371 1356 if (0 <= tb->item_pos && tb->item_pos < tb->s0num) { ··· 1417 1402 && is_indirect_le_ih(item_head(tbS0, tb->item_pos))) 1418 1403 tb->pos_in_item *= UNFM_P_SIZE; 1419 1404 1420 - balance_leaf_left(tb, ih, body, flag); 1405 + body += balance_leaf_left(tb, ih, body, flag); 1421 1406 1422 1407 /* tb->lnum[0] > 0 */ 1423 1408 /* Calculate new item position */
+16 -6
fs/reiserfs/journal.c
··· 1947 1947 } 1948 1948 } 1949 1949 1950 - /* wait for all commits to finish */ 1951 - cancel_delayed_work(&SB_JOURNAL(sb)->j_work); 1952 1950 1953 1951 /* 1954 1952 * We must release the write lock here because ··· 1954 1956 */ 1955 1957 reiserfs_write_unlock(sb); 1956 1958 1959 + /* 1960 + * Cancel flushing of old commits. Note that neither of these works 1961 + * will be requeued because superblock is being shutdown and doesn't 1962 + * have MS_ACTIVE set. 1963 + */ 1957 1964 cancel_delayed_work_sync(&REISERFS_SB(sb)->old_work); 1958 - flush_workqueue(REISERFS_SB(sb)->commit_wq); 1965 + /* wait for all commits to finish */ 1966 + cancel_delayed_work_sync(&SB_JOURNAL(sb)->j_work); 1959 1967 1960 1968 free_journal_ram(sb); 1961 1969 ··· 4296 4292 if (flush) { 4297 4293 flush_commit_list(sb, jl, 1); 4298 4294 flush_journal_list(sb, jl, 1); 4299 - } else if (!(jl->j_state & LIST_COMMIT_PENDING)) 4300 - queue_delayed_work(REISERFS_SB(sb)->commit_wq, 4301 - &journal->j_work, HZ / 10); 4295 + } else if (!(jl->j_state & LIST_COMMIT_PENDING)) { 4296 + /* 4297 + * Avoid queueing work when sb is being shut down. Transaction 4298 + * will be flushed on journal shutdown. 4299 + */ 4300 + if (sb->s_flags & MS_ACTIVE) 4301 + queue_delayed_work(REISERFS_SB(sb)->commit_wq, 4302 + &journal->j_work, HZ / 10); 4303 + } 4302 4304 4303 4305 /* 4304 4306 * if the next transaction has any chance of wrapping, flush
+3 -2
fs/reiserfs/lbalance.c
··· 899 899 900 900 /* insert item into the leaf node in position before */ 901 901 void leaf_insert_into_buf(struct buffer_info *bi, int before, 902 - struct item_head *inserted_item_ih, 903 - const char *inserted_item_body, int zeros_number) 902 + struct item_head * const inserted_item_ih, 903 + const char * const inserted_item_body, 904 + int zeros_number) 904 905 { 905 906 struct buffer_head *bh = bi->bi_bh; 906 907 int nr, free_space;
+5 -4
fs/reiserfs/reiserfs.h
··· 3216 3216 void leaf_delete_items(struct buffer_info *cur_bi, int last_first, int first, 3217 3217 int del_num, int del_bytes); 3218 3218 void leaf_insert_into_buf(struct buffer_info *bi, int before, 3219 - struct item_head *inserted_item_ih, 3220 - const char *inserted_item_body, int zeros_number); 3221 - void leaf_paste_in_buffer(struct buffer_info *bi, int pasted_item_num, 3222 - int pos_in_item, int paste_size, const char *body, 3219 + struct item_head * const inserted_item_ih, 3220 + const char * const inserted_item_body, 3223 3221 int zeros_number); 3222 + void leaf_paste_in_buffer(struct buffer_info *bi, int pasted_item_num, 3223 + int pos_in_item, int paste_size, 3224 + const char * const body, int zeros_number); 3224 3225 void leaf_cut_from_buffer(struct buffer_info *bi, int cut_item_num, 3225 3226 int pos_in_item, int cut_size); 3226 3227 void leaf_paste_entries(struct buffer_info *bi, int item_num, int before,
+5 -1
fs/reiserfs/super.c
··· 100 100 struct reiserfs_sb_info *sbi = REISERFS_SB(s); 101 101 unsigned long delay; 102 102 103 - if (s->s_flags & MS_RDONLY) 103 + /* 104 + * Avoid scheduling flush when sb is being shut down. It can race 105 + * with journal shutdown and free still queued delayed work. 106 + */ 107 + if (s->s_flags & MS_RDONLY || !(s->s_flags & MS_ACTIVE)) 104 108 return; 105 109 106 110 spin_lock(&sbi->old_work_lock);
-1
fs/super.c
··· 217 217 lockdep_set_class(&s->s_vfs_rename_mutex, &type->s_vfs_rename_key); 218 218 mutex_init(&s->s_dquot.dqio_mutex); 219 219 mutex_init(&s->s_dquot.dqonoff_mutex); 220 - init_rwsem(&s->s_dquot.dqptr_sem); 221 220 s->s_maxbytes = MAX_NON_LFS; 222 221 s->s_op = &default_op; 223 222 s->s_time_gran = 1000000000;
+2 -20
fs/udf/file.c
··· 27 27 28 28 #include "udfdecl.h" 29 29 #include <linux/fs.h> 30 - #include <asm/uaccess.h> 30 + #include <linux/uaccess.h> 31 31 #include <linux/kernel.h> 32 32 #include <linux/string.h> /* memset */ 33 33 #include <linux/capability.h> ··· 100 100 return 0; 101 101 } 102 102 103 - static int udf_adinicb_write_end(struct file *file, 104 - struct address_space *mapping, 105 - loff_t pos, unsigned len, unsigned copied, 106 - struct page *page, void *fsdata) 107 - { 108 - struct inode *inode = mapping->host; 109 - unsigned offset = pos & (PAGE_CACHE_SIZE - 1); 110 - char *kaddr; 111 - struct udf_inode_info *iinfo = UDF_I(inode); 112 - 113 - kaddr = kmap_atomic(page); 114 - memcpy(iinfo->i_ext.i_data + iinfo->i_lenEAttr + offset, 115 - kaddr + offset, copied); 116 - kunmap_atomic(kaddr); 117 - 118 - return simple_write_end(file, mapping, pos, len, copied, page, fsdata); 119 - } 120 - 121 103 static ssize_t udf_adinicb_direct_IO(int rw, struct kiocb *iocb, 122 104 struct iov_iter *iter, 123 105 loff_t offset) ··· 112 130 .readpage = udf_adinicb_readpage, 113 131 .writepage = udf_adinicb_writepage, 114 132 .write_begin = udf_adinicb_write_begin, 115 - .write_end = udf_adinicb_write_end, 133 + .write_end = simple_write_end, 116 134 .direct_IO = udf_adinicb_direct_IO, 117 135 }; 118 136
+1 -1
fs/udf/lowlevel.c
··· 21 21 22 22 #include <linux/blkdev.h> 23 23 #include <linux/cdrom.h> 24 - #include <asm/uaccess.h> 24 + #include <linux/uaccess.h> 25 25 26 26 #include "udf_sb.h" 27 27
+1 -1
fs/udf/super.c
··· 63 63 #include "udf_i.h" 64 64 65 65 #include <linux/init.h> 66 - #include <asm/uaccess.h> 66 + #include <linux/uaccess.h> 67 67 68 68 #define VDS_POS_PRIMARY_VOL_DESC 0 69 69 #define VDS_POS_UNALLOC_SPACE_DESC 1
+1 -1
fs/udf/symlink.c
··· 20 20 */ 21 21 22 22 #include "udfdecl.h" 23 - #include <asm/uaccess.h> 23 + #include <linux/uaccess.h> 24 24 #include <linux/errno.h> 25 25 #include <linux/fs.h> 26 26 #include <linux/time.h>
+4 -5
fs/udf/unicode.c
··· 412 412 int extIndex = 0, newExtIndex = 0, hasExt = 0; 413 413 unsigned short valueCRC; 414 414 uint8_t curr; 415 - const uint8_t hexChar[] = "0123456789ABCDEF"; 416 415 417 416 if (udfName[0] == '.' && 418 417 (udfLen == 1 || (udfLen == 2 && udfName[1] == '.'))) { ··· 476 477 newIndex = 250; 477 478 newName[newIndex++] = CRC_MARK; 478 479 valueCRC = crc_itu_t(0, fidName, fidNameLen); 479 - newName[newIndex++] = hexChar[(valueCRC & 0xf000) >> 12]; 480 - newName[newIndex++] = hexChar[(valueCRC & 0x0f00) >> 8]; 481 - newName[newIndex++] = hexChar[(valueCRC & 0x00f0) >> 4]; 482 - newName[newIndex++] = hexChar[(valueCRC & 0x000f)]; 480 + newName[newIndex++] = hex_asc_upper_hi(valueCRC >> 8); 481 + newName[newIndex++] = hex_asc_upper_lo(valueCRC >> 8); 482 + newName[newIndex++] = hex_asc_upper_hi(valueCRC); 483 + newName[newIndex++] = hex_asc_upper_lo(valueCRC); 483 484 484 485 if (hasExt) { 485 486 newName[newIndex++] = EXT_MARK;
-1
include/linux/quota.h
··· 390 390 unsigned int flags; /* Flags for diskquotas on this device */ 391 391 struct mutex dqio_mutex; /* lock device while I/O in progress */ 392 392 struct mutex dqonoff_mutex; /* Serialize quotaon & quotaoff */ 393 - struct rw_semaphore dqptr_sem; /* serialize ops using quota_info struct, pointers from inode to dquots */ 394 393 struct inode *files[MAXQUOTAS]; /* inodes of quotafiles */ 395 394 struct mem_dqinfo info[MAXQUOTAS]; /* Information for each quota type */ 396 395 const struct quota_format_ops *ops[MAXQUOTAS]; /* Operations for each type */