Merge branch 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jack/linux-fs-2.6

* 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jack/linux-fs-2.6: (33 commits)
quota: stop using QUOTA_OK / NO_QUOTA
dquot: cleanup dquot initialize routine
dquot: move dquot initialization responsibility into the filesystem
dquot: cleanup dquot drop routine
dquot: move dquot drop responsibility into the filesystem
dquot: cleanup dquot transfer routine
dquot: move dquot transfer responsibility into the filesystem
dquot: cleanup inode allocation / freeing routines
dquot: cleanup space allocation / freeing routines
ext3: add writepage sanity checks
ext3: Truncate allocated blocks if direct IO write fails to update i_size
quota: Properly invalidate caches even for filesystems with blocksize < pagesize
quota: generalize quota transfer interface
quota: sb_quota state flags cleanup
jbd: Delay discarding buffers in journal_unmap_buffer
ext3: quota_write cross block boundary behaviour
quota: drop permission checks from xfs_fs_set_xstate/xfs_fs_set_xquota
quota: split out compat_sys_quotactl support from quota.c
quota: split out netlink notification support from quota.c
quota: remove invalid optimization from quota_sync_all
...

Fixed trivial conflicts in fs/namei.c and fs/ufs/inode.c

+1578 -1569
-18
Documentation/filesystems/Locking
··· 460 460 461 461 --------------------------- dquot_operations ------------------------------- 462 462 prototypes: 463 - int (*initialize) (struct inode *, int); 464 - int (*drop) (struct inode *); 465 - int (*alloc_space) (struct inode *, qsize_t, int); 466 - int (*alloc_inode) (const struct inode *, unsigned long); 467 - int (*free_space) (struct inode *, qsize_t); 468 - int (*free_inode) (const struct inode *, unsigned long); 469 - int (*transfer) (struct inode *, struct iattr *); 470 463 int (*write_dquot) (struct dquot *); 471 464 int (*acquire_dquot) (struct dquot *); 472 465 int (*release_dquot) (struct dquot *); ··· 472 479 What filesystem should expect from the generic quota functions: 473 480 474 481 FS recursion Held locks when called 475 - initialize: yes maybe dqonoff_sem 476 - drop: yes - 477 - alloc_space: ->mark_dirty() - 478 - alloc_inode: ->mark_dirty() - 479 - free_space: ->mark_dirty() - 480 - free_inode: ->mark_dirty() - 481 - transfer: yes - 482 482 write_dquot: yes dqonoff_sem or dqptr_sem 483 483 acquire_dquot: yes dqonoff_sem or dqptr_sem 484 484 release_dquot: yes dqonoff_sem or dqptr_sem ··· 480 494 481 495 FS recursion means calling ->quota_read() and ->quota_write() from superblock 482 496 operations. 483 - 484 - ->alloc_space(), ->alloc_inode(), ->free_space(), ->free_inode() are called 485 - only directly by the filesystem and do not call any fs functions only 486 - the ->mark_dirty() operation. 487 497 488 498 More details about quota locking can be found in fs/dquot.c. 489 499
+1 -1
drivers/staging/pohmelfs/inode.c
··· 969 969 970 970 if ((attr->ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) || 971 971 (attr->ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid)) { 972 - err = vfs_dq_transfer(inode, attr) ? -EDQUOT : 0; 972 + err = dquot_transfer(inode, attr); 973 973 if (err) 974 974 goto err_out_exit; 975 975 }
+2 -9
fs/attr.c
··· 12 12 #include <linux/capability.h> 13 13 #include <linux/fsnotify.h> 14 14 #include <linux/fcntl.h> 15 - #include <linux/quotaops.h> 16 15 #include <linux/security.h> 17 16 18 17 /* Taken over from the old code... */ ··· 211 212 error = inode->i_op->setattr(dentry, attr); 212 213 } else { 213 214 error = inode_change_ok(inode, attr); 214 - if (!error) { 215 - if ((ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) || 216 - (ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid)) 217 - error = vfs_dq_transfer(inode, attr) ? 218 - -EDQUOT : 0; 219 - if (!error) 220 - error = inode_setattr(inode, attr); 221 - } 215 + if (!error) 216 + error = inode_setattr(inode, attr); 222 217 } 223 218 224 219 if (ia_valid & ATTR_SIZE)
+7 -5
fs/ext2/balloc.c
··· 570 570 error_return: 571 571 brelse(bitmap_bh); 572 572 release_blocks(sb, freed); 573 - vfs_dq_free_block(inode, freed); 573 + dquot_free_block(inode, freed); 574 574 } 575 575 576 576 /** ··· 1236 1236 unsigned short windowsz = 0; 1237 1237 unsigned long ngroups; 1238 1238 unsigned long num = *count; 1239 + int ret; 1239 1240 1240 1241 *errp = -ENOSPC; 1241 1242 sb = inode->i_sb; ··· 1248 1247 /* 1249 1248 * Check quota for allocation of this block. 1250 1249 */ 1251 - if (vfs_dq_alloc_block(inode, num)) { 1252 - *errp = -EDQUOT; 1250 + ret = dquot_alloc_block(inode, num); 1251 + if (ret) { 1252 + *errp = ret; 1253 1253 return 0; 1254 1254 } 1255 1255 ··· 1411 1409 1412 1410 *errp = 0; 1413 1411 brelse(bitmap_bh); 1414 - vfs_dq_free_block(inode, *count-num); 1412 + dquot_free_block(inode, *count-num); 1415 1413 *count = num; 1416 1414 return ret_block; 1417 1415 ··· 1422 1420 * Undo the block allocation 1423 1421 */ 1424 1422 if (!performed_allocation) 1425 - vfs_dq_free_block(inode, *count); 1423 + dquot_free_block(inode, *count); 1426 1424 brelse(bitmap_bh); 1427 1425 return 0; 1428 1426 }
+3 -2
fs/ext2/file.c
··· 20 20 21 21 #include <linux/time.h> 22 22 #include <linux/pagemap.h> 23 + #include <linux/quotaops.h> 23 24 #include "ext2.h" 24 25 #include "xattr.h" 25 26 #include "acl.h" ··· 71 70 .compat_ioctl = ext2_compat_ioctl, 72 71 #endif 73 72 .mmap = generic_file_mmap, 74 - .open = generic_file_open, 73 + .open = dquot_file_open, 75 74 .release = ext2_release_file, 76 75 .fsync = ext2_fsync, 77 76 .splice_read = generic_file_splice_read, ··· 88 87 .compat_ioctl = ext2_compat_ioctl, 89 88 #endif 90 89 .mmap = xip_file_mmap, 91 - .open = generic_file_open, 90 + .open = dquot_file_open, 92 91 .release = ext2_release_file, 93 92 .fsync = ext2_fsync, 94 93 };
+7 -7
fs/ext2/ialloc.c
··· 121 121 if (!is_bad_inode(inode)) { 122 122 /* Quota is already initialized in iput() */ 123 123 ext2_xattr_delete_inode(inode); 124 - vfs_dq_free_inode(inode); 125 - vfs_dq_drop(inode); 124 + dquot_free_inode(inode); 125 + dquot_drop(inode); 126 126 } 127 127 128 128 es = EXT2_SB(sb)->s_es; ··· 586 586 goto fail_drop; 587 587 } 588 588 589 - if (vfs_dq_alloc_inode(inode)) { 590 - err = -EDQUOT; 589 + dquot_initialize(inode); 590 + err = dquot_alloc_inode(inode); 591 + if (err) 591 592 goto fail_drop; 592 - } 593 593 594 594 err = ext2_init_acl(inode, dir); 595 595 if (err) ··· 605 605 return inode; 606 606 607 607 fail_free_drop: 608 - vfs_dq_free_inode(inode); 608 + dquot_free_inode(inode); 609 609 610 610 fail_drop: 611 - vfs_dq_drop(inode); 611 + dquot_drop(inode); 612 612 inode->i_flags |= S_NOQUOTA; 613 613 inode->i_nlink = 0; 614 614 unlock_new_inode(inode);
+6 -1
fs/ext2/inode.c
··· 60 60 */ 61 61 void ext2_delete_inode (struct inode * inode) 62 62 { 63 + if (!is_bad_inode(inode)) 64 + dquot_initialize(inode); 63 65 truncate_inode_pages(&inode->i_data, 0); 64 66 65 67 if (is_bad_inode(inode)) ··· 1466 1464 error = inode_change_ok(inode, iattr); 1467 1465 if (error) 1468 1466 return error; 1467 + 1468 + if (iattr->ia_valid & ATTR_SIZE) 1469 + dquot_initialize(inode); 1469 1470 if ((iattr->ia_valid & ATTR_UID && iattr->ia_uid != inode->i_uid) || 1470 1471 (iattr->ia_valid & ATTR_GID && iattr->ia_gid != inode->i_gid)) { 1471 - error = vfs_dq_transfer(inode, iattr) ? -EDQUOT : 0; 1472 + error = dquot_transfer(inode, iattr); 1472 1473 if (error) 1473 1474 return error; 1474 1475 }
+34 -17
fs/ext2/namei.c
··· 31 31 */ 32 32 33 33 #include <linux/pagemap.h> 34 + #include <linux/quotaops.h> 34 35 #include "ext2.h" 35 36 #include "xattr.h" 36 37 #include "acl.h" ··· 100 99 */ 101 100 static int ext2_create (struct inode * dir, struct dentry * dentry, int mode, struct nameidata *nd) 102 101 { 103 - struct inode * inode = ext2_new_inode (dir, mode); 104 - int err = PTR_ERR(inode); 105 - if (!IS_ERR(inode)) { 106 - inode->i_op = &ext2_file_inode_operations; 107 - if (ext2_use_xip(inode->i_sb)) { 108 - inode->i_mapping->a_ops = &ext2_aops_xip; 109 - inode->i_fop = &ext2_xip_file_operations; 110 - } else if (test_opt(inode->i_sb, NOBH)) { 111 - inode->i_mapping->a_ops = &ext2_nobh_aops; 112 - inode->i_fop = &ext2_file_operations; 113 - } else { 114 - inode->i_mapping->a_ops = &ext2_aops; 115 - inode->i_fop = &ext2_file_operations; 116 - } 117 - mark_inode_dirty(inode); 118 - err = ext2_add_nondir(dentry, inode); 102 + struct inode *inode; 103 + 104 + dquot_initialize(dir); 105 + 106 + inode = ext2_new_inode(dir, mode); 107 + if (IS_ERR(inode)) 108 + return PTR_ERR(inode); 109 + 110 + inode->i_op = &ext2_file_inode_operations; 111 + if (ext2_use_xip(inode->i_sb)) { 112 + inode->i_mapping->a_ops = &ext2_aops_xip; 113 + inode->i_fop = &ext2_xip_file_operations; 114 + } else if (test_opt(inode->i_sb, NOBH)) { 115 + inode->i_mapping->a_ops = &ext2_nobh_aops; 116 + inode->i_fop = &ext2_file_operations; 117 + } else { 118 + inode->i_mapping->a_ops = &ext2_aops; 119 + inode->i_fop = &ext2_file_operations; 119 120 } 120 - return err; 121 + mark_inode_dirty(inode); 122 + return ext2_add_nondir(dentry, inode); 121 123 } 122 124 123 125 static int ext2_mknod (struct inode * dir, struct dentry *dentry, int mode, dev_t rdev) ··· 130 126 131 127 if (!new_valid_dev(rdev)) 132 128 return -EINVAL; 129 + 130 + dquot_initialize(dir); 133 131 134 132 inode = ext2_new_inode (dir, mode); 135 133 err = PTR_ERR(inode); ··· 156 150 157 151 if (l > sb->s_blocksize) 158 152 goto out; 153 + 154 + dquot_initialize(dir); 159 155 160 156 inode = ext2_new_inode (dir, S_IFLNK | S_IRWXUGO); 161 157 err = PTR_ERR(inode); ··· 202 194 if (inode->i_nlink >= EXT2_LINK_MAX) 203 195 return -EMLINK; 204 196 197 + dquot_initialize(dir); 198 + 205 199 inode->i_ctime = CURRENT_TIME_SEC; 206 200 inode_inc_link_count(inode); 207 201 atomic_inc(&inode->i_count); ··· 225 215 226 216 if (dir->i_nlink >= EXT2_LINK_MAX) 227 217 goto out; 218 + 219 + dquot_initialize(dir); 228 220 229 221 inode_inc_link_count(dir); 230 222 ··· 274 262 struct page * page; 275 263 int err = -ENOENT; 276 264 265 + dquot_initialize(dir); 266 + 277 267 de = ext2_find_entry (dir, &dentry->d_name, &page); 278 268 if (!de) 279 269 goto out; ··· 317 303 struct page * old_page; 318 304 struct ext2_dir_entry_2 * old_de; 319 305 int err = -ENOENT; 306 + 307 + dquot_initialize(old_dir); 308 + dquot_initialize(new_dir); 320 309 321 310 old_de = ext2_find_entry (old_dir, &old_dentry->d_name, &old_page); 322 311 if (!old_de)
+2
fs/ext2/super.c
··· 194 194 static void ext2_clear_inode(struct inode *inode) 195 195 { 196 196 struct ext2_block_alloc_info *rsv = EXT2_I(inode)->i_block_alloc_info; 197 + 198 + dquot_drop(inode); 197 199 ext2_discard_reservation(inode); 198 200 EXT2_I(inode)->i_block_alloc_info = NULL; 199 201 if (unlikely(rsv))
+5 -5
fs/ext2/xattr.c
··· 644 644 the inode. */ 645 645 ea_bdebug(new_bh, "reusing block"); 646 646 647 - error = -EDQUOT; 648 - if (vfs_dq_alloc_block(inode, 1)) { 647 + error = dquot_alloc_block(inode, 1); 648 + if (error) { 649 649 unlock_buffer(new_bh); 650 650 goto cleanup; 651 651 } ··· 702 702 * as if nothing happened and cleanup the unused block */ 703 703 if (error && error != -ENOSPC) { 704 704 if (new_bh && new_bh != old_bh) 705 - vfs_dq_free_block(inode, 1); 705 + dquot_free_block(inode, 1); 706 706 goto cleanup; 707 707 } 708 708 } else ··· 734 734 le32_add_cpu(&HDR(old_bh)->h_refcount, -1); 735 735 if (ce) 736 736 mb_cache_entry_release(ce); 737 - vfs_dq_free_block(inode, 1); 737 + dquot_free_block(inode, 1); 738 738 mark_buffer_dirty(old_bh); 739 739 ea_bdebug(old_bh, "refcount now=%d", 740 740 le32_to_cpu(HDR(old_bh)->h_refcount)); ··· 797 797 mark_buffer_dirty(bh); 798 798 if (IS_SYNC(inode)) 799 799 sync_dirty_buffer(bh); 800 - vfs_dq_free_block(inode, 1); 800 + dquot_free_block(inode, 1); 801 801 } 802 802 EXT2_I(inode)->i_file_acl = 0; 803 803
+6 -5
fs/ext3/balloc.c
··· 676 676 } 677 677 ext3_free_blocks_sb(handle, sb, block, count, &dquot_freed_blocks); 678 678 if (dquot_freed_blocks) 679 - vfs_dq_free_block(inode, dquot_freed_blocks); 679 + dquot_free_block(inode, dquot_freed_blocks); 680 680 return; 681 681 } 682 682 ··· 1502 1502 /* 1503 1503 * Check quota for allocation of this block. 1504 1504 */ 1505 - if (vfs_dq_alloc_block(inode, num)) { 1506 - *errp = -EDQUOT; 1505 + err = dquot_alloc_block(inode, num); 1506 + if (err) { 1507 + *errp = err; 1507 1508 return 0; 1508 1509 } 1509 1510 ··· 1714 1713 1715 1714 *errp = 0; 1716 1715 brelse(bitmap_bh); 1717 - vfs_dq_free_block(inode, *count-num); 1716 + dquot_free_block(inode, *count-num); 1718 1717 *count = num; 1719 1718 return ret_block; 1720 1719 ··· 1729 1728 * Undo the block allocation 1730 1729 */ 1731 1730 if (!performed_allocation) 1732 - vfs_dq_free_block(inode, *count); 1731 + dquot_free_block(inode, *count); 1733 1732 brelse(bitmap_bh); 1734 1733 return 0; 1735 1734 }
+4 -3
fs/ext3/file.c
··· 21 21 #include <linux/time.h> 22 22 #include <linux/fs.h> 23 23 #include <linux/jbd.h> 24 + #include <linux/quotaops.h> 24 25 #include <linux/ext3_fs.h> 25 26 #include <linux/ext3_jbd.h> 26 27 #include "xattr.h" ··· 34 33 */ 35 34 static int ext3_release_file (struct inode * inode, struct file * filp) 36 35 { 37 - if (EXT3_I(inode)->i_state & EXT3_STATE_FLUSH_ON_CLOSE) { 36 + if (ext3_test_inode_state(inode, EXT3_STATE_FLUSH_ON_CLOSE)) { 38 37 filemap_flush(inode->i_mapping); 39 - EXT3_I(inode)->i_state &= ~EXT3_STATE_FLUSH_ON_CLOSE; 38 + ext3_clear_inode_state(inode, EXT3_STATE_FLUSH_ON_CLOSE); 40 39 } 41 40 /* if we are the last writer on the inode, drop the block reservation */ 42 41 if ((filp->f_mode & FMODE_WRITE) && ··· 63 62 .compat_ioctl = ext3_compat_ioctl, 64 63 #endif 65 64 .mmap = generic_file_mmap, 66 - .open = generic_file_open, 65 + .open = dquot_file_open, 67 66 .release = ext3_release_file, 68 67 .fsync = ext3_sync_file, 69 68 .splice_read = generic_file_splice_read,
+8 -8
fs/ext3/ialloc.c
··· 123 123 * Note: we must free any quota before locking the superblock, 124 124 * as writing the quota to disk may need the lock as well. 125 125 */ 126 - vfs_dq_init(inode); 126 + dquot_initialize(inode); 127 127 ext3_xattr_delete_inode(handle, inode); 128 - vfs_dq_free_inode(inode); 129 - vfs_dq_drop(inode); 128 + dquot_free_inode(inode); 129 + dquot_drop(inode); 130 130 131 131 is_directory = S_ISDIR(inode->i_mode); 132 132 ··· 588 588 sizeof(struct ext3_inode) - EXT3_GOOD_OLD_INODE_SIZE : 0; 589 589 590 590 ret = inode; 591 - if (vfs_dq_alloc_inode(inode)) { 592 - err = -EDQUOT; 591 + dquot_initialize(inode); 592 + err = dquot_alloc_inode(inode); 593 + if (err) 593 594 goto fail_drop; 594 - } 595 595 596 596 err = ext3_init_acl(handle, inode, dir); 597 597 if (err) ··· 619 619 return ret; 620 620 621 621 fail_free_drop: 622 - vfs_dq_free_inode(inode); 622 + dquot_free_inode(inode); 623 623 624 624 fail_drop: 625 - vfs_dq_drop(inode); 625 + dquot_drop(inode); 626 626 inode->i_flags |= S_NOQUOTA; 627 627 inode->i_nlink = 0; 628 628 unlock_new_inode(inode);
+27 -14
fs/ext3/inode.c
··· 196 196 { 197 197 handle_t *handle; 198 198 199 + if (!is_bad_inode(inode)) 200 + dquot_initialize(inode); 201 + 199 202 truncate_inode_pages(&inode->i_data, 0); 200 203 201 204 if (is_bad_inode(inode)) ··· 1381 1378 */ 1382 1379 if (pos + len > inode->i_size && ext3_can_truncate(inode)) 1383 1380 ext3_orphan_add(handle, inode); 1384 - EXT3_I(inode)->i_state |= EXT3_STATE_JDATA; 1381 + ext3_set_inode_state(inode, EXT3_STATE_JDATA); 1385 1382 if (inode->i_size > EXT3_I(inode)->i_disksize) { 1386 1383 EXT3_I(inode)->i_disksize = inode->i_size; 1387 1384 ret2 = ext3_mark_inode_dirty(handle, inode); ··· 1420 1417 journal_t *journal; 1421 1418 int err; 1422 1419 1423 - if (EXT3_I(inode)->i_state & EXT3_STATE_JDATA) { 1420 + if (ext3_test_inode_state(inode, EXT3_STATE_JDATA)) { 1424 1421 /* 1425 1422 * This is a REALLY heavyweight approach, but the use of 1426 1423 * bmap on dirty files is expected to be extremely rare: ··· 1439 1436 * everything they get. 1440 1437 */ 1441 1438 1442 - EXT3_I(inode)->i_state &= ~EXT3_STATE_JDATA; 1439 + ext3_clear_inode_state(inode, EXT3_STATE_JDATA); 1443 1440 journal = EXT3_JOURNAL(inode); 1444 1441 journal_lock_updates(journal); 1445 1442 err = journal_flush(journal); ··· 1531 1528 int err; 1532 1529 1533 1530 J_ASSERT(PageLocked(page)); 1531 + WARN_ON_ONCE(IS_RDONLY(inode)); 1534 1532 1535 1533 /* 1536 1534 * We give up here if we're reentered, because it might be for a ··· 1604 1600 int ret = 0; 1605 1601 int err; 1606 1602 1603 + J_ASSERT(PageLocked(page)); 1604 + WARN_ON_ONCE(IS_RDONLY(inode)); 1605 + 1607 1606 if (ext3_journal_current_handle()) 1608 1607 goto out_fail; 1609 1608 ··· 1649 1642 int ret = 0; 1650 1643 int err; 1651 1644 1645 + J_ASSERT(PageLocked(page)); 1646 + WARN_ON_ONCE(IS_RDONLY(inode)); 1647 + 1652 1648 if (ext3_journal_current_handle()) 1653 1649 goto no_write; 1654 1650 ··· 1680 1670 PAGE_CACHE_SIZE, NULL, write_end_fn); 1681 1671 if (ret == 0) 1682 1672 ret = err; 1683 - EXT3_I(inode)->i_state |= EXT3_STATE_JDATA; 1673 + ext3_set_inode_state(inode, EXT3_STATE_JDATA); 1684 1674 unlock_page(page); 1685 1675 } else { 1686 1676 /* ··· 1795 1785 handle = ext3_journal_start(inode, 2); 1796 1786 if (IS_ERR(handle)) { 1797 1787 /* This is really bad luck. We've written the data 1798 - * but cannot extend i_size. Bail out and pretend 1799 - * the write failed... */ 1788 + * but cannot extend i_size. Truncate allocated blocks 1789 + * and pretend the write failed... */ 1790 + ext3_truncate(inode); 1800 1791 ret = PTR_ERR(handle); 1801 1792 goto out; 1802 1793 } ··· 2413 2402 goto out_notrans; 2414 2403 2415 2404 if (inode->i_size == 0 && ext3_should_writeback_data(inode)) 2416 - ei->i_state |= EXT3_STATE_FLUSH_ON_CLOSE; 2405 + ext3_set_inode_state(inode, EXT3_STATE_FLUSH_ON_CLOSE); 2417 2406 2418 2407 /* 2419 2408 * We have to lock the EOF page here, because lock_page() nests ··· 2732 2721 { 2733 2722 /* We have all inode data except xattrs in memory here. */ 2734 2723 return __ext3_get_inode_loc(inode, iloc, 2735 - !(EXT3_I(inode)->i_state & EXT3_STATE_XATTR)); 2724 + !ext3_test_inode_state(inode, EXT3_STATE_XATTR)); 2736 2725 } 2737 2726 2738 2727 void ext3_set_inode_flags(struct inode *inode) ··· 2904 2893 EXT3_GOOD_OLD_INODE_SIZE + 2905 2894 ei->i_extra_isize; 2906 2895 if (*magic == cpu_to_le32(EXT3_XATTR_MAGIC)) 2907 - ei->i_state |= EXT3_STATE_XATTR; 2896 + ext3_set_inode_state(inode, EXT3_STATE_XATTR); 2908 2897 } 2909 2898 } else 2910 2899 ei->i_extra_isize = 0; ··· 2966 2955 2967 2956 /* For fields not not tracking in the in-memory inode, 2968 2957 * initialise them to zero for new inodes. */ 2969 - if (ei->i_state & EXT3_STATE_NEW) 2958 + if (ext3_test_inode_state(inode, EXT3_STATE_NEW)) 2970 2959 memset(raw_inode, 0, EXT3_SB(inode->i_sb)->s_inode_size); 2971 2960 2972 2961 ext3_get_inode_flags(ei); ··· 3063 3052 rc = ext3_journal_dirty_metadata(handle, bh); 3064 3053 if (!err) 3065 3054 err = rc; 3066 - ei->i_state &= ~EXT3_STATE_NEW; 3055 + ext3_clear_inode_state(inode, EXT3_STATE_NEW); 3067 3056 3068 3057 atomic_set(&ei->i_sync_tid, handle->h_transaction->t_tid); 3069 3058 out_brelse: ··· 3151 3140 if (error) 3152 3141 return error; 3153 3142 3143 + if (ia_valid & ATTR_SIZE) 3144 + dquot_initialize(inode); 3154 3145 if ((ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) || 3155 3146 (ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid)) { 3156 3147 handle_t *handle; ··· 3165 3152 error = PTR_ERR(handle); 3166 3153 goto err_out; 3167 3154 } 3168 - error = vfs_dq_transfer(inode, attr) ? -EDQUOT : 0; 3155 + error = dquot_transfer(inode, attr); 3169 3156 if (error) { 3170 3157 ext3_journal_stop(handle); 3171 3158 return error; ··· 3250 3237 ret = 2 * (bpp + indirects) + 2; 3251 3238 3252 3239 #ifdef CONFIG_QUOTA 3253 - /* We know that structure was already allocated during vfs_dq_init so 3240 + /* We know that structure was already allocated during dquot_initialize so 3254 3241 * we will be updating only the data blocks + inodes */ 3255 3242 ret += EXT3_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb); 3256 3243 #endif ··· 3341 3328 * i_size has been changed by generic_commit_write() and we thus need 3342 3329 * to include the updated inode in the current transaction. 3343 3330 * 3344 - * Also, vfs_dq_alloc_space() will always dirty the inode when blocks 3331 + * Also, dquot_alloc_space() will always dirty the inode when blocks 3345 3332 * are allocated to the file. 3346 3333 * 3347 3334 * If the inode is marked synchronous, we don't honour that here - doing
+21 -3
fs/ext3/namei.c
··· 1696 1696 struct inode * inode; 1697 1697 int err, retries = 0; 1698 1698 1699 + dquot_initialize(dir); 1700 + 1699 1701 retry: 1700 1702 handle = ext3_journal_start(dir, EXT3_DATA_TRANS_BLOCKS(dir->i_sb) + 1701 1703 EXT3_INDEX_EXTRA_TRANS_BLOCKS + 3 + ··· 1731 1729 1732 1730 if (!new_valid_dev(rdev)) 1733 1731 return -EINVAL; 1732 + 1733 + dquot_initialize(dir); 1734 1734 1735 1735 retry: 1736 1736 handle = ext3_journal_start(dir, EXT3_DATA_TRANS_BLOCKS(dir->i_sb) + ··· 1769 1765 1770 1766 if (dir->i_nlink >= EXT3_LINK_MAX) 1771 1767 return -EMLINK; 1768 + 1769 + dquot_initialize(dir); 1772 1770 1773 1771 retry: 1774 1772 handle = ext3_journal_start(dir, EXT3_DATA_TRANS_BLOCKS(dir->i_sb) + ··· 2066 2060 2067 2061 /* Initialize quotas before so that eventual writes go in 2068 2062 * separate transaction */ 2069 - vfs_dq_init(dentry->d_inode); 2063 + dquot_initialize(dir); 2064 + dquot_initialize(dentry->d_inode); 2065 + 2070 2066 handle = ext3_journal_start(dir, EXT3_DELETE_TRANS_BLOCKS(dir->i_sb)); 2071 2067 if (IS_ERR(handle)) 2072 2068 return PTR_ERR(handle); ··· 2127 2119 2128 2120 /* Initialize quotas before so that eventual writes go 2129 2121 * in separate transaction */ 2130 - vfs_dq_init(dentry->d_inode); 2122 + dquot_initialize(dir); 2123 + dquot_initialize(dentry->d_inode); 2124 + 2131 2125 handle = ext3_journal_start(dir, EXT3_DELETE_TRANS_BLOCKS(dir->i_sb)); 2132 2126 if (IS_ERR(handle)) 2133 2127 return PTR_ERR(handle); ··· 2183 2173 l = strlen(symname)+1; 2184 2174 if (l > dir->i_sb->s_blocksize) 2185 2175 return -ENAMETOOLONG; 2176 + 2177 + dquot_initialize(dir); 2186 2178 2187 2179 retry: 2188 2180 handle = ext3_journal_start(dir, EXT3_DATA_TRANS_BLOCKS(dir->i_sb) + ··· 2240 2228 2241 2229 if (inode->i_nlink >= EXT3_LINK_MAX) 2242 2230 return -EMLINK; 2231 + 2232 + dquot_initialize(dir); 2233 + 2243 2234 /* 2244 2235 * Return -ENOENT if we've raced with unlink and i_nlink is 0. Doing 2245 2236 * otherwise has the potential to corrupt the orphan inode list. ··· 2293 2278 struct ext3_dir_entry_2 * old_de, * new_de; 2294 2279 int retval, flush_file = 0; 2295 2280 2281 + dquot_initialize(old_dir); 2282 + dquot_initialize(new_dir); 2283 + 2296 2284 old_bh = new_bh = dir_bh = NULL; 2297 2285 2298 2286 /* Initialize quotas before so that eventual writes go 2299 2287 * in separate transaction */ 2300 2288 if (new_dentry->d_inode) 2301 - vfs_dq_init(new_dentry->d_inode); 2289 + dquot_initialize(new_dentry->d_inode); 2302 2290 handle = ext3_journal_start(old_dir, 2 * 2303 2291 EXT3_DATA_TRANS_BLOCKS(old_dir->i_sb) + 2304 2292 EXT3_INDEX_EXTRA_TRANS_BLOCKS + 2);
+127 -127
fs/ext3/super.c
··· 181 181 if (!test_opt (sb, ERRORS_CONT)) { 182 182 journal_t *journal = EXT3_SB(sb)->s_journal; 183 183 184 - EXT3_SB(sb)->s_mount_opt |= EXT3_MOUNT_ABORT; 184 + set_opt(EXT3_SB(sb)->s_mount_opt, ABORT); 185 185 if (journal) 186 186 journal_abort(journal, -EIO); 187 187 } ··· 296 296 "error: remounting filesystem read-only"); 297 297 EXT3_SB(sb)->s_mount_state |= EXT3_ERROR_FS; 298 298 sb->s_flags |= MS_RDONLY; 299 - EXT3_SB(sb)->s_mount_opt |= EXT3_MOUNT_ABORT; 299 + set_opt(EXT3_SB(sb)->s_mount_opt, ABORT); 300 300 if (EXT3_SB(sb)->s_journal) 301 301 journal_abort(EXT3_SB(sb)->s_journal, -EIO); 302 302 } ··· 528 528 static void ext3_clear_inode(struct inode *inode) 529 529 { 530 530 struct ext3_block_alloc_info *rsv = EXT3_I(inode)->i_block_alloc_info; 531 + 532 + dquot_drop(inode); 531 533 ext3_discard_reservation(inode); 532 534 EXT3_I(inode)->i_block_alloc_info = NULL; 533 535 if (unlikely(rsv)) ··· 564 562 if (sbi->s_qf_names[GRPQUOTA]) 565 563 seq_printf(seq, ",grpjquota=%s", sbi->s_qf_names[GRPQUOTA]); 566 564 567 - if (sbi->s_mount_opt & EXT3_MOUNT_USRQUOTA) 565 + if (test_opt(sb, USRQUOTA)) 568 566 seq_puts(seq, ",usrquota"); 569 567 570 - if (sbi->s_mount_opt & EXT3_MOUNT_GRPQUOTA) 568 + if (test_opt(sb, GRPQUOTA)) 571 569 seq_puts(seq, ",grpquota"); 572 570 #endif 573 571 } ··· 658 656 if (test_opt(sb, NOBH)) 659 657 seq_puts(seq, ",nobh"); 660 658 661 - seq_printf(seq, ",data=%s", data_mode_string(sbi->s_mount_opt & 662 - EXT3_MOUNT_DATA_FLAGS)); 659 + seq_printf(seq, ",data=%s", data_mode_string(test_opt(sb, DATA_FLAGS))); 663 660 if (test_opt(sb, DATA_ERR_ABORT)) 664 661 seq_puts(seq, ",data_err=abort"); 665 662 ··· 752 751 const char *data, size_t len, loff_t off); 753 752 754 753 static const struct dquot_operations ext3_quota_operations = { 755 - .initialize = dquot_initialize, 756 - .drop = dquot_drop, 757 - .alloc_space = dquot_alloc_space, 758 - .alloc_inode = dquot_alloc_inode, 759 - .free_space = dquot_free_space, 760 - .free_inode = dquot_free_inode, 761 - .transfer = dquot_transfer, 762 754 .write_dquot = ext3_write_dquot, 763 755 .acquire_dquot = ext3_acquire_dquot, 764 756 .release_dquot = ext3_release_dquot, ··· 890 896 return sb_block; 891 897 } 892 898 899 + #ifdef CONFIG_QUOTA 900 + static int set_qf_name(struct super_block *sb, int qtype, substring_t *args) 901 + { 902 + struct ext3_sb_info *sbi = EXT3_SB(sb); 903 + char *qname; 904 + 905 + if (sb_any_quota_loaded(sb) && 906 + !sbi->s_qf_names[qtype]) { 907 + ext3_msg(sb, KERN_ERR, 908 + "Cannot change journaled " 909 + "quota options when quota turned on"); 910 + return 0; 911 + } 912 + qname = match_strdup(args); 913 + if (!qname) { 914 + ext3_msg(sb, KERN_ERR, 915 + "Not enough memory for storing quotafile name"); 916 + return 0; 917 + } 918 + if (sbi->s_qf_names[qtype] && 919 + strcmp(sbi->s_qf_names[qtype], qname)) { 920 + ext3_msg(sb, KERN_ERR, 921 + "%s quota file already specified", QTYPE2NAME(qtype)); 922 + kfree(qname); 923 + return 0; 924 + } 925 + sbi->s_qf_names[qtype] = qname; 926 + if (strchr(sbi->s_qf_names[qtype], '/')) { 927 + ext3_msg(sb, KERN_ERR, 928 + "quotafile must be on filesystem root"); 929 + kfree(sbi->s_qf_names[qtype]); 930 + sbi->s_qf_names[qtype] = NULL; 931 + return 0; 932 + } 933 + set_opt(sbi->s_mount_opt, QUOTA); 934 + return 1; 935 + } 936 + 937 + static int clear_qf_name(struct super_block *sb, int qtype) { 938 + 939 + struct ext3_sb_info *sbi = EXT3_SB(sb); 940 + 941 + if (sb_any_quota_loaded(sb) && 942 + sbi->s_qf_names[qtype]) { 943 + ext3_msg(sb, KERN_ERR, "Cannot change journaled quota options" 944 + " when quota turned on"); 945 + return 0; 946 + } 947 + /* 948 + * The space will be released later when all options are confirmed 949 + * to be correct 950 + */ 951 + sbi->s_qf_names[qtype] = NULL; 952 + return 1; 953 + } 954 + #endif 955 + 893 956 static int parse_options (char *options, struct super_block *sb, 894 957 unsigned int *inum, unsigned long *journal_devnum, 895 958 ext3_fsblk_t *n_blocks_count, int is_remount) ··· 957 906 int data_opt = 0; 958 907 int option; 959 908 #ifdef CONFIG_QUOTA 960 - int qtype, qfmt; 961 - char *qname; 909 + int qfmt; 962 910 #endif 963 911 964 912 if (!options) ··· 1115 1065 data_opt = EXT3_MOUNT_WRITEBACK_DATA; 1116 1066 datacheck: 1117 1067 if (is_remount) { 1118 - if ((sbi->s_mount_opt & EXT3_MOUNT_DATA_FLAGS) 1119 - == data_opt) 1068 + if (test_opt(sb, DATA_FLAGS) == data_opt) 1120 1069 break; 1121 1070 ext3_msg(sb, KERN_ERR, 1122 1071 "error: cannot change " 1123 1072 "data mode on remount. The filesystem " 1124 1073 "is mounted in data=%s mode and you " 1125 1074 "try to remount it in data=%s mode.", 1126 - data_mode_string(sbi->s_mount_opt & 1127 - EXT3_MOUNT_DATA_FLAGS), 1075 + data_mode_string(test_opt(sb, 1076 + DATA_FLAGS)), 1128 1077 data_mode_string(data_opt)); 1129 1078 return 0; 1130 1079 } else { 1131 - sbi->s_mount_opt &= ~EXT3_MOUNT_DATA_FLAGS; 1080 + clear_opt(sbi->s_mount_opt, DATA_FLAGS); 1132 1081 sbi->s_mount_opt |= data_opt; 1133 1082 } 1134 1083 break; ··· 1139 1090 break; 1140 1091 #ifdef CONFIG_QUOTA 1141 1092 case Opt_usrjquota: 1142 - qtype = USRQUOTA; 1143 - goto set_qf_name; 1093 + if (!set_qf_name(sb, USRQUOTA, &args[0])) 1094 + return 0; 1095 + break; 1144 1096 case Opt_grpjquota: 1145 - qtype = GRPQUOTA; 1146 - set_qf_name: 1147 - if (sb_any_quota_loaded(sb) && 1148 - !sbi->s_qf_names[qtype]) { 1149 - ext3_msg(sb, KERN_ERR, 1150 - "error: cannot change journaled " 1151 - "quota options when quota turned on."); 1097 + if (!set_qf_name(sb, GRPQUOTA, &args[0])) 1152 1098 return 0; 1153 - } 1154 - qname = match_strdup(&args[0]); 1155 - if (!qname) { 1156 - ext3_msg(sb, KERN_ERR, 1157 - "error: not enough memory for " 1158 - "storing quotafile name."); 1159 - return 0; 1160 - } 1161 - if (sbi->s_qf_names[qtype] && 1162 - strcmp(sbi->s_qf_names[qtype], qname)) { 1163 - ext3_msg(sb, KERN_ERR, 1164 - "error: %s quota file already " 1165 - "specified.", QTYPE2NAME(qtype)); 1166 - kfree(qname); 1167 - return 0; 1168 - } 1169 - sbi->s_qf_names[qtype] = qname; 1170 - if (strchr(sbi->s_qf_names[qtype], '/')) { 1171 - ext3_msg(sb, KERN_ERR, 1172 - "error: quotafile must be on " 1173 - "filesystem root."); 1174 - kfree(sbi->s_qf_names[qtype]); 1175 - sbi->s_qf_names[qtype] = NULL; 1176 - return 0; 1177 - } 1178 - set_opt(sbi->s_mount_opt, QUOTA); 1179 1099 break; 1180 1100 case Opt_offusrjquota: 1181 - qtype = USRQUOTA; 1182 - goto clear_qf_name; 1183 - case Opt_offgrpjquota: 1184 - qtype = GRPQUOTA; 1185 - clear_qf_name: 1186 - if (sb_any_quota_loaded(sb) && 1187 - sbi->s_qf_names[qtype]) { 1188 - ext3_msg(sb, KERN_ERR, "error: cannot change " 1189 - "journaled quota options when " 1190 - "quota turned on."); 1101 + if (!clear_qf_name(sb, USRQUOTA)) 1191 1102 return 0; 1192 - } 1193 - /* 1194 - * The space will be released later when all options 1195 - * are confirmed to be correct 1196 - */ 1197 - sbi->s_qf_names[qtype] = NULL; 1103 + break; 1104 + case Opt_offgrpjquota: 1105 + if (!clear_qf_name(sb, GRPQUOTA)) 1106 + return 0; 1198 1107 break; 1199 1108 case Opt_jqfmt_vfsold: 1200 1109 qfmt = QFMT_VFS_OLD; ··· 1251 1244 } 1252 1245 #ifdef CONFIG_QUOTA 1253 1246 if (sbi->s_qf_names[USRQUOTA] || sbi->s_qf_names[GRPQUOTA]) { 1254 - if ((sbi->s_mount_opt & EXT3_MOUNT_USRQUOTA) && 1255 - sbi->s_qf_names[USRQUOTA]) 1247 + if (test_opt(sb, USRQUOTA) && sbi->s_qf_names[USRQUOTA]) 1256 1248 clear_opt(sbi->s_mount_opt, USRQUOTA); 1257 - 1258 - if ((sbi->s_mount_opt & EXT3_MOUNT_GRPQUOTA) && 1259 - sbi->s_qf_names[GRPQUOTA]) 1249 + if (test_opt(sb, GRPQUOTA) && sbi->s_qf_names[GRPQUOTA]) 1260 1250 clear_opt(sbi->s_mount_opt, GRPQUOTA); 1261 1251 1262 - if ((sbi->s_qf_names[USRQUOTA] && 1263 - (sbi->s_mount_opt & EXT3_MOUNT_GRPQUOTA)) || 1264 - (sbi->s_qf_names[GRPQUOTA] && 1265 - (sbi->s_mount_opt & EXT3_MOUNT_USRQUOTA))) { 1252 + if (test_opt(sb, GRPQUOTA) || test_opt(sb, USRQUOTA)) { 1266 1253 ext3_msg(sb, KERN_ERR, "error: old and new quota " 1267 1254 "format mixing."); 1268 1255 return 0; ··· 1479 1478 } 1480 1479 1481 1480 list_add(&EXT3_I(inode)->i_orphan, &EXT3_SB(sb)->s_orphan); 1482 - vfs_dq_init(inode); 1481 + dquot_initialize(inode); 1483 1482 if (inode->i_nlink) { 1484 1483 printk(KERN_DEBUG 1485 1484 "%s: truncating inode %lu to %Ld bytes\n", ··· 1672 1671 set_opt(sbi->s_mount_opt, POSIX_ACL); 1673 1672 #endif 1674 1673 if ((def_mount_opts & EXT3_DEFM_JMODE) == EXT3_DEFM_JMODE_DATA) 1675 - sbi->s_mount_opt |= EXT3_MOUNT_JOURNAL_DATA; 1674 + set_opt(sbi->s_mount_opt, JOURNAL_DATA); 1676 1675 else if ((def_mount_opts & EXT3_DEFM_JMODE) == EXT3_DEFM_JMODE_ORDERED) 1677 - sbi->s_mount_opt |= EXT3_MOUNT_ORDERED_DATA; 1676 + set_opt(sbi->s_mount_opt, ORDERED_DATA); 1678 1677 else if ((def_mount_opts & EXT3_DEFM_JMODE) == EXT3_DEFM_JMODE_WBACK) 1679 - sbi->s_mount_opt |= EXT3_MOUNT_WRITEBACK_DATA; 1678 + set_opt(sbi->s_mount_opt, WRITEBACK_DATA); 1680 1679 1681 1680 if (le16_to_cpu(sbi->s_es->s_errors) == EXT3_ERRORS_PANIC) 1682 1681 set_opt(sbi->s_mount_opt, ERRORS_PANIC); ··· 1695 1694 goto failed_mount; 1696 1695 1697 1696 sb->s_flags = (sb->s_flags & ~MS_POSIXACL) | 1698 - ((sbi->s_mount_opt & EXT3_MOUNT_POSIX_ACL) ? MS_POSIXACL : 0); 1697 + (test_opt(sb, POSIX_ACL) ? MS_POSIXACL : 0); 1699 1698 1700 1699 if (le32_to_cpu(es->s_rev_level) == EXT3_GOOD_OLD_REV && 1701 1700 (EXT3_HAS_COMPAT_FEATURE(sb, ~0U) || ··· 2562 2561 goto restore_opts; 2563 2562 } 2564 2563 2565 - if (sbi->s_mount_opt & EXT3_MOUNT_ABORT) 2564 + if (test_opt(sb, ABORT)) 2566 2565 ext3_abort(sb, __func__, "Abort forced by user"); 2567 2566 2568 2567 sb->s_flags = (sb->s_flags & ~MS_POSIXACL) | 2569 - ((sbi->s_mount_opt & EXT3_MOUNT_POSIX_ACL) ? MS_POSIXACL : 0); 2568 + (test_opt(sb, POSIX_ACL) ? MS_POSIXACL : 0); 2570 2569 2571 2570 es = sbi->s_es; 2572 2571 ··· 2574 2573 2575 2574 if ((*flags & MS_RDONLY) != (sb->s_flags & MS_RDONLY) || 2576 2575 n_blocks_count > le32_to_cpu(es->s_blocks_count)) { 2577 - if (sbi->s_mount_opt & EXT3_MOUNT_ABORT) { 2576 + if (test_opt(sb, ABORT)) { 2578 2577 err = -EROFS; 2579 2578 goto restore_opts; 2580 2579 } ··· 2735 2734 * Process 1 Process 2 2736 2735 * ext3_create() quota_sync() 2737 2736 * journal_start() write_dquot() 2738 - * vfs_dq_init() down(dqio_mutex) 2737 + * dquot_initialize() down(dqio_mutex) 2739 2738 * down(dqio_mutex) journal_start() 2740 2739 * 2741 2740 */ ··· 2943 2942 sector_t blk = off >> EXT3_BLOCK_SIZE_BITS(sb); 2944 2943 int err = 0; 2945 2944 int offset = off & (sb->s_blocksize - 1); 2946 - int tocopy; 2947 2945 int journal_quota = EXT3_SB(sb)->s_qf_names[type] != NULL; 2948 - size_t towrite = len; 2949 2946 struct buffer_head *bh; 2950 2947 handle_t *handle = journal_current_handle(); 2951 2948 ··· 2954 2955 (unsigned long long)off, (unsigned long long)len); 2955 2956 return -EIO; 2956 2957 } 2957 - mutex_lock_nested(&inode->i_mutex, I_MUTEX_QUOTA); 2958 - while (towrite > 0) { 2959 - tocopy = sb->s_blocksize - offset < towrite ? 2960 - sb->s_blocksize - offset : towrite; 2961 - bh = ext3_bread(handle, inode, blk, 1, &err); 2962 - if (!bh) 2963 - goto out; 2964 - if (journal_quota) { 2965 - err = ext3_journal_get_write_access(handle, bh); 2966 - if (err) { 2967 - brelse(bh); 2968 - goto out; 2969 - } 2970 - } 2971 - lock_buffer(bh); 2972 - memcpy(bh->b_data+offset, data, tocopy); 2973 - flush_dcache_page(bh->b_page); 2974 - unlock_buffer(bh); 2975 - if (journal_quota) 2976 - err = ext3_journal_dirty_metadata(handle, bh); 2977 - else { 2978 - /* Always do at least ordered writes for quotas */ 2979 - err = ext3_journal_dirty_data(handle, bh); 2980 - mark_buffer_dirty(bh); 2981 - } 2982 - brelse(bh); 2983 - if (err) 2984 - goto out; 2985 - offset = 0; 2986 - towrite -= tocopy; 2987 - data += tocopy; 2988 - blk++; 2958 + 2959 + /* 2960 + * Since we account only one data block in transaction credits, 2961 + * then it is impossible to cross a block boundary. 2962 + */ 2963 + if (sb->s_blocksize - offset < len) { 2964 + ext3_msg(sb, KERN_WARNING, "Quota write (off=%llu, len=%llu)" 2965 + " cancelled because not block aligned", 2966 + (unsigned long long)off, (unsigned long long)len); 2967 + return -EIO; 2989 2968 } 2969 + mutex_lock_nested(&inode->i_mutex, I_MUTEX_QUOTA); 2970 + bh = ext3_bread(handle, inode, blk, 1, &err); 2971 + if (!bh) 2972 + goto out; 2973 + if (journal_quota) { 2974 + err = ext3_journal_get_write_access(handle, bh); 2975 + if (err) { 2976 + brelse(bh); 2977 + goto out; 2978 + } 2979 + } 2980 + lock_buffer(bh); 2981 + memcpy(bh->b_data+offset, data, len); 2982 + flush_dcache_page(bh->b_page); 2983 + unlock_buffer(bh); 2984 + if (journal_quota) 2985 + err = ext3_journal_dirty_metadata(handle, bh); 2986 + else { 2987 + /* Always do at least ordered writes for quotas */ 2988 + err = ext3_journal_dirty_data(handle, bh); 2989 + mark_buffer_dirty(bh); 2990 + } 2991 + brelse(bh); 2990 2992 out: 2991 - if (len == towrite) { 2993 + if (err) { 2992 2994 mutex_unlock(&inode->i_mutex); 2993 2995 return err; 2994 2996 } 2995 - if (inode->i_size < off+len-towrite) { 2996 - i_size_write(inode, off+len-towrite); 2997 + if (inode->i_size < off + len) { 2998 + i_size_write(inode, off + len); 2997 2999 EXT3_I(inode)->i_disksize = inode->i_size; 2998 3000 } 2999 3001 inode->i_version++; 3000 3002 inode->i_mtime = inode->i_ctime = CURRENT_TIME; 3001 3003 ext3_mark_inode_dirty(handle, inode); 3002 3004 mutex_unlock(&inode->i_mutex); 3003 - return len - towrite; 3005 + return len; 3004 3006 } 3005 3007 3006 3008 #endif
+11 -11
fs/ext3/xattr.c
··· 274 274 void *end; 275 275 int error; 276 276 277 - if (!(EXT3_I(inode)->i_state & EXT3_STATE_XATTR)) 277 + if (!ext3_test_inode_state(inode, EXT3_STATE_XATTR)) 278 278 return -ENODATA; 279 279 error = ext3_get_inode_loc(inode, &iloc); 280 280 if (error) ··· 403 403 void *end; 404 404 int error; 405 405 406 - if (!(EXT3_I(inode)->i_state & EXT3_STATE_XATTR)) 406 + if (!ext3_test_inode_state(inode, EXT3_STATE_XATTR)) 407 407 return 0; 408 408 error = ext3_get_inode_loc(inode, &iloc); 409 409 if (error) ··· 500 500 error = ext3_journal_dirty_metadata(handle, bh); 501 501 if (IS_SYNC(inode)) 502 502 handle->h_sync = 1; 503 - vfs_dq_free_block(inode, 1); 503 + dquot_free_block(inode, 1); 504 504 ea_bdebug(bh, "refcount now=%d; releasing", 505 505 le32_to_cpu(BHDR(bh)->h_refcount)); 506 506 if (ce) ··· 775 775 else { 776 776 /* The old block is released after updating 777 777 the inode. */ 778 - error = -EDQUOT; 779 - if (vfs_dq_alloc_block(inode, 1)) 778 + error = dquot_alloc_block(inode, 1); 779 + if (error) 780 780 goto cleanup; 781 781 error = ext3_journal_get_write_access(handle, 782 782 new_bh); ··· 850 850 return error; 851 851 852 852 cleanup_dquot: 853 - vfs_dq_free_block(inode, 1); 853 + dquot_free_block(inode, 1); 854 854 goto cleanup; 855 855 856 856 bad_block: ··· 882 882 is->s.base = is->s.first = IFIRST(header); 883 883 is->s.here = is->s.first; 884 884 is->s.end = (void *)raw_inode + EXT3_SB(inode->i_sb)->s_inode_size; 885 - if (EXT3_I(inode)->i_state & EXT3_STATE_XATTR) { 885 + if (ext3_test_inode_state(inode, EXT3_STATE_XATTR)) { 886 886 error = ext3_xattr_check_names(IFIRST(header), is->s.end); 887 887 if (error) 888 888 return error; ··· 914 914 header = IHDR(inode, ext3_raw_inode(&is->iloc)); 915 915 if (!IS_LAST_ENTRY(s->first)) { 916 916 header->h_magic = cpu_to_le32(EXT3_XATTR_MAGIC); 917 - EXT3_I(inode)->i_state |= EXT3_STATE_XATTR; 917 + ext3_set_inode_state(inode, EXT3_STATE_XATTR); 918 918 } else { 919 919 header->h_magic = cpu_to_le32(0); 920 - EXT3_I(inode)->i_state &= ~EXT3_STATE_XATTR; 920 + ext3_clear_inode_state(inode, EXT3_STATE_XATTR); 921 921 } 922 922 return 0; 923 923 } ··· 967 967 if (error) 968 968 goto cleanup; 969 969 970 - if (EXT3_I(inode)->i_state & EXT3_STATE_NEW) { 970 + if (ext3_test_inode_state(inode, EXT3_STATE_NEW)) { 971 971 struct ext3_inode *raw_inode = ext3_raw_inode(&is.iloc); 972 972 memset(raw_inode, 0, EXT3_SB(inode->i_sb)->s_inode_size); 973 - EXT3_I(inode)->i_state &= ~EXT3_STATE_NEW; 973 + ext3_clear_inode_state(inode, EXT3_STATE_NEW); 974 974 } 975 975 976 976 error = ext3_xattr_ibody_find(inode, &i, &is);
+2 -1
fs/ext4/file.c
··· 23 23 #include <linux/jbd2.h> 24 24 #include <linux/mount.h> 25 25 #include <linux/path.h> 26 + #include <linux/quotaops.h> 26 27 #include "ext4.h" 27 28 #include "ext4_jbd2.h" 28 29 #include "xattr.h" ··· 126 125 sb->s_dirt = 1; 127 126 } 128 127 } 129 - return generic_file_open(inode, filp); 128 + return dquot_file_open(inode, filp); 130 129 } 131 130 132 131 const struct file_operations ext4_file_operations = {
+8 -8
fs/ext4/ialloc.c
··· 214 214 * Note: we must free any quota before locking the superblock, 215 215 * as writing the quota to disk may need the lock as well. 216 216 */ 217 - vfs_dq_init(inode); 217 + dquot_initialize(inode); 218 218 ext4_xattr_delete_inode(handle, inode); 219 - vfs_dq_free_inode(inode); 220 - vfs_dq_drop(inode); 219 + dquot_free_inode(inode); 220 + dquot_drop(inode); 221 221 222 222 is_directory = S_ISDIR(inode->i_mode); 223 223 ··· 1029 1029 ei->i_extra_isize = EXT4_SB(sb)->s_want_extra_isize; 1030 1030 1031 1031 ret = inode; 1032 - if (vfs_dq_alloc_inode(inode)) { 1033 - err = -EDQUOT; 1032 + dquot_initialize(inode); 1033 + err = dquot_alloc_inode(inode); 1034 + if (err) 1034 1035 goto fail_drop; 1035 - } 1036 1036 1037 1037 err = ext4_init_acl(handle, inode, dir); 1038 1038 if (err) ··· 1069 1069 return ret; 1070 1070 1071 1071 fail_free_drop: 1072 - vfs_dq_free_inode(inode); 1072 + dquot_free_inode(inode); 1073 1073 1074 1074 fail_drop: 1075 - vfs_dq_drop(inode); 1075 + dquot_drop(inode); 1076 1076 inode->i_flags |= S_NOQUOTA; 1077 1077 inode->i_nlink = 0; 1078 1078 unlock_new_inode(inode);
+17 -10
fs/ext4/inode.c
··· 171 171 handle_t *handle; 172 172 int err; 173 173 174 + if (!is_bad_inode(inode)) 175 + dquot_initialize(inode); 176 + 174 177 if (ext4_should_order_data(inode)) 175 178 ext4_begin_ordered_truncate(inode, 0); 176 179 truncate_inode_pages(&inode->i_data, 0); ··· 1111 1108 1112 1109 /* Update quota subsystem */ 1113 1110 if (quota_claim) { 1114 - vfs_dq_claim_block(inode, used); 1111 + dquot_claim_block(inode, used); 1115 1112 if (mdb_free) 1116 - vfs_dq_release_reservation_block(inode, mdb_free); 1113 + dquot_release_reservation_block(inode, mdb_free); 1117 1114 } else { 1118 1115 /* 1119 1116 * We did fallocate with an offset that is already delayed ··· 1124 1121 * that 1125 1122 */ 1126 1123 if (allocated_meta_blocks) 1127 - vfs_dq_claim_block(inode, allocated_meta_blocks); 1128 - vfs_dq_release_reservation_block(inode, mdb_free + used); 1124 + dquot_claim_block(inode, allocated_meta_blocks); 1125 + dquot_release_reservation_block(inode, mdb_free + used); 1129 1126 } 1130 1127 1131 1128 /* ··· 1860 1857 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 1861 1858 struct ext4_inode_info *ei = EXT4_I(inode); 1862 1859 unsigned long md_needed, md_reserved; 1860 + int ret; 1863 1861 1864 1862 /* 1865 1863 * recalculate the amount of metadata blocks to reserve ··· 1879 1875 * later. Real quota accounting is done at pages writeout 1880 1876 * time. 1881 1877 */ 1882 - if (vfs_dq_reserve_block(inode, md_needed + 1)) 1883 - return -EDQUOT; 1878 + ret = dquot_reserve_block(inode, md_needed + 1); 1879 + if (ret) 1880 + return ret; 1884 1881 1885 1882 if (ext4_claim_free_blocks(sbi, md_needed + 1)) { 1886 - vfs_dq_release_reservation_block(inode, md_needed + 1); 1883 + dquot_release_reservation_block(inode, md_needed + 1); 1887 1884 if (ext4_should_retry_alloc(inode->i_sb, &retries)) { 1888 1885 yield(); 1889 1886 goto repeat; ··· 1941 1936 1942 1937 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); 1943 1938 1944 - vfs_dq_release_reservation_block(inode, to_free); 1939 + dquot_release_reservation_block(inode, to_free); 1945 1940 } 1946 1941 1947 1942 static void ext4_da_page_release_reservation(struct page *page, ··· 5423 5418 if (error) 5424 5419 return error; 5425 5420 5421 + if (ia_valid & ATTR_SIZE) 5422 + dquot_initialize(inode); 5426 5423 if ((ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) || 5427 5424 (ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid)) { 5428 5425 handle_t *handle; ··· 5437 5430 error = PTR_ERR(handle); 5438 5431 goto err_out; 5439 5432 } 5440 - error = vfs_dq_transfer(inode, attr) ? -EDQUOT : 0; 5433 + error = dquot_transfer(inode, attr); 5441 5434 if (error) { 5442 5435 ext4_journal_stop(handle); 5443 5436 return error; ··· 5823 5816 * i_size has been changed by generic_commit_write() and we thus need 5824 5817 * to include the updated inode in the current transaction. 5825 5818 * 5826 - * Also, vfs_dq_alloc_block() will always dirty the inode when blocks 5819 + * Also, dquot_alloc_block() will always dirty the inode when blocks 5827 5820 * are allocated to the file. 5828 5821 * 5829 5822 * If the inode is marked synchronous, we don't honour that here - doing
+3 -3
fs/ext4/mballoc.c
··· 4240 4240 return 0; 4241 4241 } 4242 4242 reserv_blks = ar->len; 4243 - while (ar->len && vfs_dq_alloc_block(ar->inode, ar->len)) { 4243 + while (ar->len && dquot_alloc_block(ar->inode, ar->len)) { 4244 4244 ar->flags |= EXT4_MB_HINT_NOPREALLOC; 4245 4245 ar->len--; 4246 4246 } ··· 4317 4317 kmem_cache_free(ext4_ac_cachep, ac); 4318 4318 out1: 4319 4319 if (inquota && ar->len < inquota) 4320 - vfs_dq_free_block(ar->inode, inquota - ar->len); 4320 + dquot_free_block(ar->inode, inquota - ar->len); 4321 4321 out3: 4322 4322 if (!ar->len) { 4323 4323 if (!EXT4_I(ar->inode)->i_delalloc_reserved_flag) ··· 4631 4631 sb->s_dirt = 1; 4632 4632 error_return: 4633 4633 if (freed) 4634 - vfs_dq_free_block(inode, freed); 4634 + dquot_free_block(inode, freed); 4635 4635 brelse(bitmap_bh); 4636 4636 ext4_std_error(sb, err); 4637 4637 if (ac)
+20 -3
fs/ext4/namei.c
··· 1759 1759 struct inode *inode; 1760 1760 int err, retries = 0; 1761 1761 1762 + dquot_initialize(dir); 1763 + 1762 1764 retry: 1763 1765 handle = ext4_journal_start(dir, EXT4_DATA_TRANS_BLOCKS(dir->i_sb) + 1764 1766 EXT4_INDEX_EXTRA_TRANS_BLOCKS + 3 + ··· 1794 1792 1795 1793 if (!new_valid_dev(rdev)) 1796 1794 return -EINVAL; 1795 + 1796 + dquot_initialize(dir); 1797 1797 1798 1798 retry: 1799 1799 handle = ext4_journal_start(dir, EXT4_DATA_TRANS_BLOCKS(dir->i_sb) + ··· 1833 1829 1834 1830 if (EXT4_DIR_LINK_MAX(dir)) 1835 1831 return -EMLINK; 1832 + 1833 + dquot_initialize(dir); 1836 1834 1837 1835 retry: 1838 1836 handle = ext4_journal_start(dir, EXT4_DATA_TRANS_BLOCKS(dir->i_sb) + ··· 2143 2137 2144 2138 /* Initialize quotas before so that eventual writes go in 2145 2139 * separate transaction */ 2146 - vfs_dq_init(dentry->d_inode); 2140 + dquot_initialize(dir); 2141 + dquot_initialize(dentry->d_inode); 2142 + 2147 2143 handle = ext4_journal_start(dir, EXT4_DELETE_TRANS_BLOCKS(dir->i_sb)); 2148 2144 if (IS_ERR(handle)) 2149 2145 return PTR_ERR(handle); ··· 2204 2196 2205 2197 /* Initialize quotas before so that eventual writes go 2206 2198 * in separate transaction */ 2207 - vfs_dq_init(dentry->d_inode); 2199 + dquot_initialize(dir); 2200 + dquot_initialize(dentry->d_inode); 2201 + 2208 2202 handle = ext4_journal_start(dir, EXT4_DELETE_TRANS_BLOCKS(dir->i_sb)); 2209 2203 if (IS_ERR(handle)) 2210 2204 return PTR_ERR(handle); ··· 2260 2250 l = strlen(symname)+1; 2261 2251 if (l > dir->i_sb->s_blocksize) 2262 2252 return -ENAMETOOLONG; 2253 + 2254 + dquot_initialize(dir); 2263 2255 2264 2256 retry: 2265 2257 handle = ext4_journal_start(dir, EXT4_DATA_TRANS_BLOCKS(dir->i_sb) + ··· 2321 2309 if (inode->i_nlink >= EXT4_LINK_MAX) 2322 2310 return -EMLINK; 2323 2311 2312 + dquot_initialize(dir); 2313 + 2324 2314 /* 2325 2315 * Return -ENOENT if we've raced with unlink and i_nlink is 0. Doing 2326 2316 * otherwise has the potential to corrupt the orphan inode list. ··· 2373 2359 struct ext4_dir_entry_2 *old_de, *new_de; 2374 2360 int retval, force_da_alloc = 0; 2375 2361 2362 + dquot_initialize(old_dir); 2363 + dquot_initialize(new_dir); 2364 + 2376 2365 old_bh = new_bh = dir_bh = NULL; 2377 2366 2378 2367 /* Initialize quotas before so that eventual writes go 2379 2368 * in separate transaction */ 2380 2369 if (new_dentry->d_inode) 2381 - vfs_dq_init(new_dentry->d_inode); 2370 + dquot_initialize(new_dentry->d_inode); 2382 2371 handle = ext4_journal_start(old_dir, 2 * 2383 2372 EXT4_DATA_TRANS_BLOCKS(old_dir->i_sb) + 2384 2373 EXT4_INDEX_EXTRA_TRANS_BLOCKS + 2);
+3 -12
fs/ext4/super.c
··· 798 798 799 799 static void ext4_clear_inode(struct inode *inode) 800 800 { 801 + dquot_drop(inode); 801 802 ext4_discard_preallocations(inode); 802 803 if (EXT4_JOURNAL(inode)) 803 804 jbd2_journal_release_jbd_inode(EXT4_SB(inode->i_sb)->s_journal, ··· 1053 1052 const char *data, size_t len, loff_t off); 1054 1053 1055 1054 static const struct dquot_operations ext4_quota_operations = { 1056 - .initialize = dquot_initialize, 1057 - .drop = dquot_drop, 1058 - .alloc_space = dquot_alloc_space, 1059 - .reserve_space = dquot_reserve_space, 1060 - .claim_space = dquot_claim_space, 1061 - .release_rsv = dquot_release_reserved_space, 1062 1055 #ifdef CONFIG_QUOTA 1063 1056 .get_reserved_space = ext4_get_reserved_space, 1064 1057 #endif 1065 - .alloc_inode = dquot_alloc_inode, 1066 - .free_space = dquot_free_space, 1067 - .free_inode = dquot_free_inode, 1068 - .transfer = dquot_transfer, 1069 1058 .write_dquot = ext4_write_dquot, 1070 1059 .acquire_dquot = ext4_acquire_dquot, 1071 1060 .release_dquot = ext4_release_dquot, ··· 2005 2014 } 2006 2015 2007 2016 list_add(&EXT4_I(inode)->i_orphan, &EXT4_SB(sb)->s_orphan); 2008 - vfs_dq_init(inode); 2017 + dquot_initialize(inode); 2009 2018 if (inode->i_nlink) { 2010 2019 ext4_msg(sb, KERN_DEBUG, 2011 2020 "%s: truncating inode %lu to %lld bytes", ··· 3792 3801 * Process 1 Process 2 3793 3802 * ext4_create() quota_sync() 3794 3803 * jbd2_journal_start() write_dquot() 3795 - * vfs_dq_init() down(dqio_mutex) 3804 + * dquot_initialize() down(dqio_mutex) 3796 3805 * down(dqio_mutex) jbd2_journal_start() 3797 3806 * 3798 3807 */
+4 -4
fs/ext4/xattr.c
··· 495 495 error = ext4_handle_dirty_metadata(handle, inode, bh); 496 496 if (IS_SYNC(inode)) 497 497 ext4_handle_sync(handle); 498 - vfs_dq_free_block(inode, 1); 498 + dquot_free_block(inode, 1); 499 499 ea_bdebug(bh, "refcount now=%d; releasing", 500 500 le32_to_cpu(BHDR(bh)->h_refcount)); 501 501 if (ce) ··· 787 787 else { 788 788 /* The old block is released after updating 789 789 the inode. */ 790 - error = -EDQUOT; 791 - if (vfs_dq_alloc_block(inode, 1)) 790 + error = dquot_alloc_block(inode, 1); 791 + if (error) 792 792 goto cleanup; 793 793 error = ext4_journal_get_write_access(handle, 794 794 new_bh); ··· 876 876 return error; 877 877 878 878 cleanup_dquot: 879 - vfs_dq_free_block(inode, 1); 879 + dquot_free_block(inode, 1); 880 880 goto cleanup; 881 881 882 882 bad_block:
+7 -2
fs/gfs2/quota.c
··· 1083 1083 } 1084 1084 } 1085 1085 1086 - int gfs2_quota_sync(struct super_block *sb, int type) 1086 + int gfs2_quota_sync(struct super_block *sb, int type, int wait) 1087 1087 { 1088 1088 struct gfs2_sbd *sdp = sb->s_fs_info; 1089 1089 struct gfs2_quota_data **qda; ··· 1125 1125 kfree(qda); 1126 1126 1127 1127 return error; 1128 + } 1129 + 1130 + static int gfs2_quota_sync_timeo(struct super_block *sb, int type) 1131 + { 1132 + return gfs2_quota_sync(sb, type, 0); 1128 1133 } 1129 1134 1130 1135 int gfs2_quota_refresh(struct gfs2_sbd *sdp, int user, u32 id) ··· 1387 1382 &tune->gt_statfs_quantum); 1388 1383 1389 1384 /* Update quota file */ 1390 - quotad_check_timeo(sdp, "sync", gfs2_quota_sync, t, 1385 + quotad_check_timeo(sdp, "sync", gfs2_quota_sync_timeo, t, 1391 1386 &quotad_timeo, &tune->gt_quota_quantum); 1392 1387 1393 1388 /* Check for & recover partially truncated inodes */
+1 -1
fs/gfs2/quota.h
··· 25 25 extern void gfs2_quota_change(struct gfs2_inode *ip, s64 change, 26 26 u32 uid, u32 gid); 27 27 28 - extern int gfs2_quota_sync(struct super_block *sb, int type); 28 + extern int gfs2_quota_sync(struct super_block *sb, int type, int wait); 29 29 extern int gfs2_quota_refresh(struct gfs2_sbd *sdp, int user, u32 id); 30 30 31 31 extern int gfs2_quota_init(struct gfs2_sbd *sdp);
+1 -1
fs/gfs2/super.c
··· 764 764 int error; 765 765 766 766 flush_workqueue(gfs2_delete_workqueue); 767 - gfs2_quota_sync(sdp->sd_vfs, 0); 767 + gfs2_quota_sync(sdp->sd_vfs, 0, 1); 768 768 gfs2_statfs_sync(sdp->sd_vfs, 0); 769 769 770 770 error = gfs2_glock_nq_init(sdp->sd_trans_gl, LM_ST_SHARED, GL_NOCACHE,
+1 -1
fs/gfs2/sys.c
··· 167 167 if (simple_strtol(buf, NULL, 0) != 1) 168 168 return -EINVAL; 169 169 170 - gfs2_quota_sync(sdp->sd_vfs, 0); 170 + gfs2_quota_sync(sdp->sd_vfs, 0, 1); 171 171 return len; 172 172 } 173 173
-4
fs/inode.c
··· 8 8 #include <linux/mm.h> 9 9 #include <linux/dcache.h> 10 10 #include <linux/init.h> 11 - #include <linux/quotaops.h> 12 11 #include <linux/slab.h> 13 12 #include <linux/writeback.h> 14 13 #include <linux/module.h> ··· 313 314 BUG_ON(!(inode->i_state & I_FREEING)); 314 315 BUG_ON(inode->i_state & I_CLEAR); 315 316 inode_sync_wait(inode); 316 - vfs_dq_drop(inode); 317 317 if (inode->i_sb->s_op->clear_inode) 318 318 inode->i_sb->s_op->clear_inode(inode); 319 319 if (S_ISBLK(inode->i_mode) && inode->i_bdev) ··· 1209 1211 1210 1212 if (op->delete_inode) { 1211 1213 void (*delete)(struct inode *) = op->delete_inode; 1212 - if (!is_bad_inode(inode)) 1213 - vfs_dq_init(inode); 1214 1214 /* Filesystems implementing their own 1215 1215 * s_op->delete_inode are required to call 1216 1216 * truncate_inode_pages and clear_inode()
+5 -5
fs/jbd/commit.c
··· 862 862 /* A buffer which has been freed while still being 863 863 * journaled by a previous transaction may end up still 864 864 * being dirty here, but we want to avoid writing back 865 - * that buffer in the future now that the last use has 866 - * been committed. That's not only a performance gain, 867 - * it also stops aliasing problems if the buffer is left 868 - * behind for writeback and gets reallocated for another 865 + * that buffer in the future after the "add to orphan" 866 + * operation been committed, That's not only a performance 867 + * gain, it also stops aliasing problems if the buffer is 868 + * left behind for writeback and gets reallocated for another 869 869 * use in a different page. */ 870 - if (buffer_freed(bh)) { 870 + if (buffer_freed(bh) && !jh->b_next_transaction) { 871 871 clear_buffer_freed(bh); 872 872 clear_buffer_jbddirty(bh); 873 873 }
+31 -12
fs/jbd/transaction.c
··· 1864 1864 if (!jh) 1865 1865 goto zap_buffer_no_jh; 1866 1866 1867 + /* 1868 + * We cannot remove the buffer from checkpoint lists until the 1869 + * transaction adding inode to orphan list (let's call it T) 1870 + * is committed. Otherwise if the transaction changing the 1871 + * buffer would be cleaned from the journal before T is 1872 + * committed, a crash will cause that the correct contents of 1873 + * the buffer will be lost. On the other hand we have to 1874 + * clear the buffer dirty bit at latest at the moment when the 1875 + * transaction marking the buffer as freed in the filesystem 1876 + * structures is committed because from that moment on the 1877 + * buffer can be reallocated and used by a different page. 1878 + * Since the block hasn't been freed yet but the inode has 1879 + * already been added to orphan list, it is safe for us to add 1880 + * the buffer to BJ_Forget list of the newest transaction. 1881 + */ 1867 1882 transaction = jh->b_transaction; 1868 1883 if (transaction == NULL) { 1869 1884 /* First case: not on any transaction. If it ··· 1944 1929 goto zap_buffer; 1945 1930 } 1946 1931 /* 1947 - * If it is committing, we simply cannot touch it. We 1948 - * can remove it's next_transaction pointer from the 1949 - * running transaction if that is set, but nothing 1950 - * else. */ 1932 + * The buffer is committing, we simply cannot touch 1933 + * it. So we just set j_next_transaction to the 1934 + * running transaction (if there is one) and mark 1935 + * buffer as freed so that commit code knows it should 1936 + * clear dirty bits when it is done with the buffer. 1937 + */ 1951 1938 set_buffer_freed(bh); 1952 - if (jh->b_next_transaction) { 1953 - J_ASSERT(jh->b_next_transaction == 1954 - journal->j_running_transaction); 1955 - jh->b_next_transaction = NULL; 1956 - } 1939 + if (journal->j_running_transaction && buffer_jbddirty(bh)) 1940 + jh->b_next_transaction = journal->j_running_transaction; 1957 1941 journal_put_journal_head(jh); 1958 1942 spin_unlock(&journal->j_list_lock); 1959 1943 jbd_unlock_bh_state(bh); ··· 2134 2120 */ 2135 2121 void __journal_refile_buffer(struct journal_head *jh) 2136 2122 { 2137 - int was_dirty; 2123 + int was_dirty, jlist; 2138 2124 struct buffer_head *bh = jh2bh(jh); 2139 2125 2140 2126 J_ASSERT_JH(jh, jbd_is_locked_bh_state(bh)); ··· 2156 2142 __journal_temp_unlink_buffer(jh); 2157 2143 jh->b_transaction = jh->b_next_transaction; 2158 2144 jh->b_next_transaction = NULL; 2159 - __journal_file_buffer(jh, jh->b_transaction, 2160 - jh->b_modified ? BJ_Metadata : BJ_Reserved); 2145 + if (buffer_freed(bh)) 2146 + jlist = BJ_Forget; 2147 + else if (jh->b_modified) 2148 + jlist = BJ_Metadata; 2149 + else 2150 + jlist = BJ_Reserved; 2151 + __journal_file_buffer(jh, jh->b_transaction, jlist); 2161 2152 J_ASSERT_JH(jh, jh->b_transaction->t_state == T_RUNNING); 2162 2153 2163 2154 if (was_dirty)
+1 -25
fs/jfs/acl.c
··· 20 20 21 21 #include <linux/sched.h> 22 22 #include <linux/fs.h> 23 - #include <linux/quotaops.h> 24 23 #include <linux/posix_acl_xattr.h> 25 24 #include "jfs_incore.h" 26 25 #include "jfs_txnmgr.h" ··· 173 174 return rc; 174 175 } 175 176 176 - static int jfs_acl_chmod(struct inode *inode) 177 + int jfs_acl_chmod(struct inode *inode) 177 178 { 178 179 struct posix_acl *acl, *clone; 179 180 int rc; ··· 202 203 } 203 204 204 205 posix_acl_release(clone); 205 - return rc; 206 - } 207 - 208 - int jfs_setattr(struct dentry *dentry, struct iattr *iattr) 209 - { 210 - struct inode *inode = dentry->d_inode; 211 - int rc; 212 - 213 - rc = inode_change_ok(inode, iattr); 214 - if (rc) 215 - return rc; 216 - 217 - if ((iattr->ia_valid & ATTR_UID && iattr->ia_uid != inode->i_uid) || 218 - (iattr->ia_valid & ATTR_GID && iattr->ia_gid != inode->i_gid)) { 219 - if (vfs_dq_transfer(inode, iattr)) 220 - return -EDQUOT; 221 - } 222 - 223 - rc = inode_setattr(inode, iattr); 224 - 225 - if (!rc && (iattr->ia_valid & ATTR_MODE)) 226 - rc = jfs_acl_chmod(inode); 227 - 228 206 return rc; 229 207 }
+29 -2
fs/jfs/file.c
··· 18 18 */ 19 19 20 20 #include <linux/fs.h> 21 + #include <linux/quotaops.h> 21 22 #include "jfs_incore.h" 22 23 #include "jfs_inode.h" 23 24 #include "jfs_dmap.h" ··· 48 47 { 49 48 int rc; 50 49 51 - if ((rc = generic_file_open(inode, file))) 50 + if ((rc = dquot_file_open(inode, file))) 52 51 return rc; 53 52 54 53 /* ··· 89 88 return 0; 90 89 } 91 90 91 + int jfs_setattr(struct dentry *dentry, struct iattr *iattr) 92 + { 93 + struct inode *inode = dentry->d_inode; 94 + int rc; 95 + 96 + rc = inode_change_ok(inode, iattr); 97 + if (rc) 98 + return rc; 99 + 100 + if (iattr->ia_valid & ATTR_SIZE) 101 + dquot_initialize(inode); 102 + if ((iattr->ia_valid & ATTR_UID && iattr->ia_uid != inode->i_uid) || 103 + (iattr->ia_valid & ATTR_GID && iattr->ia_gid != inode->i_gid)) { 104 + rc = dquot_transfer(inode, iattr); 105 + if (rc) 106 + return rc; 107 + } 108 + 109 + rc = inode_setattr(inode, iattr); 110 + 111 + if (!rc && (iattr->ia_valid & ATTR_MODE)) 112 + rc = jfs_acl_chmod(inode); 113 + 114 + return rc; 115 + } 116 + 92 117 const struct inode_operations jfs_file_inode_operations = { 93 118 .truncate = jfs_truncate, 94 119 .setxattr = jfs_setxattr, 95 120 .getxattr = jfs_getxattr, 96 121 .listxattr = jfs_listxattr, 97 122 .removexattr = jfs_removexattr, 98 - #ifdef CONFIG_JFS_POSIX_ACL 99 123 .setattr = jfs_setattr, 124 + #ifdef CONFIG_JFS_POSIX_ACL 100 125 .check_acl = jfs_check_acl, 101 126 #endif 102 127 };
+6 -3
fs/jfs/inode.c
··· 149 149 { 150 150 jfs_info("In jfs_delete_inode, inode = 0x%p", inode); 151 151 152 + if (!is_bad_inode(inode)) 153 + dquot_initialize(inode); 154 + 152 155 if (!is_bad_inode(inode) && 153 156 (JFS_IP(inode)->fileset == FILESYSTEM_I)) { 154 157 truncate_inode_pages(&inode->i_data, 0); ··· 164 161 /* 165 162 * Free the inode from the quota allocation. 166 163 */ 167 - vfs_dq_init(inode); 168 - vfs_dq_free_inode(inode); 169 - vfs_dq_drop(inode); 164 + dquot_initialize(inode); 165 + dquot_free_inode(inode); 166 + dquot_drop(inode); 170 167 } 171 168 172 169 clear_inode(inode);
+6 -1
fs/jfs/jfs_acl.h
··· 22 22 23 23 int jfs_check_acl(struct inode *, int); 24 24 int jfs_init_acl(tid_t, struct inode *, struct inode *); 25 - int jfs_setattr(struct dentry *, struct iattr *); 25 + int jfs_acl_chmod(struct inode *inode); 26 26 27 27 #else 28 28 29 29 static inline int jfs_init_acl(tid_t tid, struct inode *inode, 30 30 struct inode *dir) 31 + { 32 + return 0; 33 + } 34 + 35 + static inline int jfs_acl_chmod(struct inode *inode) 31 36 { 32 37 return 0; 33 38 }
+15 -13
fs/jfs/jfs_dtree.c
··· 381 381 * It's time to move the inline table to an external 382 382 * page and begin to build the xtree 383 383 */ 384 - if (vfs_dq_alloc_block(ip, sbi->nbperpage)) 384 + if (dquot_alloc_block(ip, sbi->nbperpage)) 385 385 goto clean_up; 386 386 if (dbAlloc(ip, 0, sbi->nbperpage, &xaddr)) { 387 - vfs_dq_free_block(ip, sbi->nbperpage); 387 + dquot_free_block(ip, sbi->nbperpage); 388 388 goto clean_up; 389 389 } 390 390 ··· 408 408 memcpy(&jfs_ip->i_dirtable, temp_table, 409 409 sizeof (temp_table)); 410 410 dbFree(ip, xaddr, sbi->nbperpage); 411 - vfs_dq_free_block(ip, sbi->nbperpage); 411 + dquot_free_block(ip, sbi->nbperpage); 412 412 goto clean_up; 413 413 } 414 414 ip->i_size = PSIZE; ··· 1027 1027 n = xlen; 1028 1028 1029 1029 /* Allocate blocks to quota. */ 1030 - if (vfs_dq_alloc_block(ip, n)) { 1031 - rc = -EDQUOT; 1030 + rc = dquot_alloc_block(ip, n); 1031 + if (rc) 1032 1032 goto extendOut; 1033 - } 1034 1033 quota_allocation += n; 1035 1034 1036 1035 if ((rc = dbReAlloc(sbi->ipbmap, xaddr, (s64) xlen, ··· 1307 1308 1308 1309 /* Rollback quota allocation */ 1309 1310 if (rc && quota_allocation) 1310 - vfs_dq_free_block(ip, quota_allocation); 1311 + dquot_free_block(ip, quota_allocation); 1311 1312 1312 1313 dtSplitUp_Exit: 1313 1314 ··· 1368 1369 return -EIO; 1369 1370 1370 1371 /* Allocate blocks to quota. */ 1371 - if (vfs_dq_alloc_block(ip, lengthPXD(pxd))) { 1372 + rc = dquot_alloc_block(ip, lengthPXD(pxd)); 1373 + if (rc) { 1372 1374 release_metapage(rmp); 1373 - return -EDQUOT; 1375 + return rc; 1374 1376 } 1375 1377 1376 1378 jfs_info("dtSplitPage: ip:0x%p smp:0x%p rmp:0x%p", ip, smp, rmp); ··· 1892 1892 struct dt_lock *dtlck; 1893 1893 struct tlock *tlck; 1894 1894 struct lv *lv; 1895 + int rc; 1895 1896 1896 1897 /* get split root page */ 1897 1898 smp = split->mp; ··· 1917 1916 rp = rmp->data; 1918 1917 1919 1918 /* Allocate blocks to quota. */ 1920 - if (vfs_dq_alloc_block(ip, lengthPXD(pxd))) { 1919 + rc = dquot_alloc_block(ip, lengthPXD(pxd)); 1920 + if (rc) { 1921 1921 release_metapage(rmp); 1922 - return -EDQUOT; 1922 + return rc; 1923 1923 } 1924 1924 1925 1925 BT_MARK_DIRTY(rmp, ip); ··· 2289 2287 xlen = lengthPXD(&fp->header.self); 2290 2288 2291 2289 /* Free quota allocation. */ 2292 - vfs_dq_free_block(ip, xlen); 2290 + dquot_free_block(ip, xlen); 2293 2291 2294 2292 /* free/invalidate its buffer page */ 2295 2293 discard_metapage(fmp); ··· 2365 2363 xlen = lengthPXD(&p->header.self); 2366 2364 2367 2365 /* Free quota allocation */ 2368 - vfs_dq_free_block(ip, xlen); 2366 + dquot_free_block(ip, xlen); 2369 2367 2370 2368 /* free/invalidate its buffer page */ 2371 2369 discard_metapage(mp);
+9 -7
fs/jfs/jfs_extent.c
··· 141 141 } 142 142 143 143 /* Allocate blocks to quota. */ 144 - if (vfs_dq_alloc_block(ip, nxlen)) { 144 + rc = dquot_alloc_block(ip, nxlen); 145 + if (rc) { 145 146 dbFree(ip, nxaddr, (s64) nxlen); 146 147 mutex_unlock(&JFS_IP(ip)->commit_mutex); 147 - return -EDQUOT; 148 + return rc; 148 149 } 149 150 150 151 /* determine the value of the extent flag */ ··· 165 164 */ 166 165 if (rc) { 167 166 dbFree(ip, nxaddr, nxlen); 168 - vfs_dq_free_block(ip, nxlen); 167 + dquot_free_block(ip, nxlen); 169 168 mutex_unlock(&JFS_IP(ip)->commit_mutex); 170 169 return (rc); 171 170 } ··· 257 256 goto exit; 258 257 259 258 /* Allocat blocks to quota. */ 260 - if (vfs_dq_alloc_block(ip, nxlen)) { 259 + rc = dquot_alloc_block(ip, nxlen); 260 + if (rc) { 261 261 dbFree(ip, nxaddr, (s64) nxlen); 262 262 mutex_unlock(&JFS_IP(ip)->commit_mutex); 263 - return -EDQUOT; 263 + return rc; 264 264 } 265 265 266 266 delta = nxlen - xlen; ··· 299 297 /* extend the extent */ 300 298 if ((rc = xtExtend(0, ip, xoff + xlen, (int) nextend, 0))) { 301 299 dbFree(ip, xaddr + xlen, delta); 302 - vfs_dq_free_block(ip, nxlen); 300 + dquot_free_block(ip, nxlen); 303 301 goto exit; 304 302 } 305 303 } else { ··· 310 308 */ 311 309 if ((rc = xtTailgate(0, ip, xoff, (int) ntail, nxaddr, 0))) { 312 310 dbFree(ip, nxaddr, nxlen); 313 - vfs_dq_free_block(ip, nxlen); 311 + dquot_free_block(ip, nxlen); 314 312 goto exit; 315 313 } 316 314 }
+4 -4
fs/jfs/jfs_inode.c
··· 116 116 /* 117 117 * Allocate inode to quota. 118 118 */ 119 - if (vfs_dq_alloc_inode(inode)) { 120 - rc = -EDQUOT; 119 + dquot_initialize(inode); 120 + rc = dquot_alloc_inode(inode); 121 + if (rc) 121 122 goto fail_drop; 122 - } 123 123 124 124 inode->i_mode = mode; 125 125 /* inherit flags from parent */ ··· 162 162 return inode; 163 163 164 164 fail_drop: 165 - vfs_dq_drop(inode); 165 + dquot_drop(inode); 166 166 inode->i_flags |= S_NOQUOTA; 167 167 fail_unlock: 168 168 inode->i_nlink = 0;
+1
fs/jfs/jfs_inode.h
··· 40 40 int fh_len, int fh_type); 41 41 extern void jfs_set_inode_flags(struct inode *); 42 42 extern int jfs_get_block(struct inode *, sector_t, struct buffer_head *, int); 43 + extern int jfs_setattr(struct dentry *, struct iattr *); 43 44 44 45 extern const struct address_space_operations jfs_aops; 45 46 extern const struct inode_operations jfs_dir_inode_operations;
+11 -10
fs/jfs/jfs_xtree.c
··· 585 585 hint = addressXAD(xad) + lengthXAD(xad) - 1; 586 586 } else 587 587 hint = 0; 588 - if ((rc = vfs_dq_alloc_block(ip, xlen))) 588 + if ((rc = dquot_alloc_block(ip, xlen))) 589 589 goto out; 590 590 if ((rc = dbAlloc(ip, hint, (s64) xlen, &xaddr))) { 591 - vfs_dq_free_block(ip, xlen); 591 + dquot_free_block(ip, xlen); 592 592 goto out; 593 593 } 594 594 } ··· 617 617 /* undo data extent allocation */ 618 618 if (*xaddrp == 0) { 619 619 dbFree(ip, xaddr, (s64) xlen); 620 - vfs_dq_free_block(ip, xlen); 620 + dquot_free_block(ip, xlen); 621 621 } 622 622 return rc; 623 623 } ··· 985 985 rbn = addressPXD(pxd); 986 986 987 987 /* Allocate blocks to quota. */ 988 - if (vfs_dq_alloc_block(ip, lengthPXD(pxd))) { 989 - rc = -EDQUOT; 988 + rc = dquot_alloc_block(ip, lengthPXD(pxd)); 989 + if (rc) 990 990 goto clean_up; 991 - } 992 991 993 992 quota_allocation += lengthPXD(pxd); 994 993 ··· 1194 1195 1195 1196 /* Rollback quota allocation. */ 1196 1197 if (quota_allocation) 1197 - vfs_dq_free_block(ip, quota_allocation); 1198 + dquot_free_block(ip, quota_allocation); 1198 1199 1199 1200 return (rc); 1200 1201 } ··· 1234 1235 struct pxdlist *pxdlist; 1235 1236 struct tlock *tlck; 1236 1237 struct xtlock *xtlck; 1238 + int rc; 1237 1239 1238 1240 sp = &JFS_IP(ip)->i_xtroot; 1239 1241 ··· 1252 1252 return -EIO; 1253 1253 1254 1254 /* Allocate blocks to quota. */ 1255 - if (vfs_dq_alloc_block(ip, lengthPXD(pxd))) { 1255 + rc = dquot_alloc_block(ip, lengthPXD(pxd)); 1256 + if (rc) { 1256 1257 release_metapage(rmp); 1257 - return -EDQUOT; 1258 + return rc; 1258 1259 } 1259 1260 1260 1261 jfs_info("xtSplitRoot: ip:0x%p rmp:0x%p", ip, rmp); ··· 3681 3680 ip->i_size = newsize; 3682 3681 3683 3682 /* update quota allocation to reflect freed blocks */ 3684 - vfs_dq_free_block(ip, nfreed); 3683 + dquot_free_block(ip, nfreed); 3685 3684 3686 3685 /* 3687 3686 * free tlock of invalidated pages
+19 -4
fs/jfs/namei.c
··· 85 85 86 86 jfs_info("jfs_create: dip:0x%p name:%s", dip, dentry->d_name.name); 87 87 88 + dquot_initialize(dip); 89 + 88 90 /* 89 91 * search parent directory for entry/freespace 90 92 * (dtSearch() returns parent directory page pinned) ··· 216 214 struct tblock *tblk; 217 215 218 216 jfs_info("jfs_mkdir: dip:0x%p name:%s", dip, dentry->d_name.name); 217 + 218 + dquot_initialize(dip); 219 219 220 220 /* link count overflow on parent directory ? */ 221 221 if (dip->i_nlink == JFS_LINK_MAX) { ··· 360 356 jfs_info("jfs_rmdir: dip:0x%p name:%s", dip, dentry->d_name.name); 361 357 362 358 /* Init inode for quota operations. */ 363 - vfs_dq_init(ip); 359 + dquot_initialize(dip); 360 + dquot_initialize(ip); 364 361 365 362 /* directory must be empty to be removed */ 366 363 if (!dtEmpty(ip)) { ··· 488 483 jfs_info("jfs_unlink: dip:0x%p name:%s", dip, dentry->d_name.name); 489 484 490 485 /* Init inode for quota operations. */ 491 - vfs_dq_init(ip); 486 + dquot_initialize(dip); 487 + dquot_initialize(ip); 492 488 493 489 if ((rc = get_UCSname(&dname, dentry))) 494 490 goto out; ··· 811 805 if (ip->i_nlink == 0) 812 806 return -ENOENT; 813 807 808 + dquot_initialize(dir); 809 + 814 810 tid = txBegin(ip->i_sb, 0); 815 811 816 812 mutex_lock_nested(&JFS_IP(dir)->commit_mutex, COMMIT_MUTEX_PARENT); ··· 903 895 struct inode *iplist[2]; 904 896 905 897 jfs_info("jfs_symlink: dip:0x%p name:%s", dip, name); 898 + 899 + dquot_initialize(dip); 906 900 907 901 ssize = strlen(name) + 1; 908 902 ··· 1097 1087 jfs_info("jfs_rename: %s %s", old_dentry->d_name.name, 1098 1088 new_dentry->d_name.name); 1099 1089 1090 + dquot_initialize(old_dir); 1091 + dquot_initialize(new_dir); 1092 + 1100 1093 old_ip = old_dentry->d_inode; 1101 1094 new_ip = new_dentry->d_inode; 1102 1095 ··· 1149 1136 } else if (new_ip) { 1150 1137 IWRITE_LOCK(new_ip, RDWRLOCK_NORMAL); 1151 1138 /* Init inode for quota operations. */ 1152 - vfs_dq_init(new_ip); 1139 + dquot_initialize(new_ip); 1153 1140 } 1154 1141 1155 1142 /* ··· 1373 1360 1374 1361 jfs_info("jfs_mknod: %s", dentry->d_name.name); 1375 1362 1363 + dquot_initialize(dir); 1364 + 1376 1365 if ((rc = get_UCSname(&dname, dentry))) 1377 1366 goto out; 1378 1367 ··· 1556 1541 .getxattr = jfs_getxattr, 1557 1542 .listxattr = jfs_listxattr, 1558 1543 .removexattr = jfs_removexattr, 1559 - #ifdef CONFIG_JFS_POSIX_ACL 1560 1544 .setattr = jfs_setattr, 1545 + #ifdef CONFIG_JFS_POSIX_ACL 1561 1546 .check_acl = jfs_check_acl, 1562 1547 #endif 1563 1548 };
+6
fs/jfs/super.c
··· 131 131 kmem_cache_free(jfs_inode_cachep, ji); 132 132 } 133 133 134 + static void jfs_clear_inode(struct inode *inode) 135 + { 136 + dquot_drop(inode); 137 + } 138 + 134 139 static int jfs_statfs(struct dentry *dentry, struct kstatfs *buf) 135 140 { 136 141 struct jfs_sb_info *sbi = JFS_SBI(dentry->d_sb); ··· 750 745 .dirty_inode = jfs_dirty_inode, 751 746 .write_inode = jfs_write_inode, 752 747 .delete_inode = jfs_delete_inode, 748 + .clear_inode = jfs_clear_inode, 753 749 .put_super = jfs_put_super, 754 750 .sync_fs = jfs_sync_fs, 755 751 .freeze_fs = jfs_freeze,
+9 -8
fs/jfs/xattr.c
··· 260 260 nblocks = (size + (sb->s_blocksize - 1)) >> sb->s_blocksize_bits; 261 261 262 262 /* Allocate new blocks to quota. */ 263 - if (vfs_dq_alloc_block(ip, nblocks)) { 264 - return -EDQUOT; 265 - } 263 + rc = dquot_alloc_block(ip, nblocks); 264 + if (rc) 265 + return rc; 266 266 267 267 rc = dbAlloc(ip, INOHINT(ip), nblocks, &blkno); 268 268 if (rc) { 269 269 /*Rollback quota allocation. */ 270 - vfs_dq_free_block(ip, nblocks); 270 + dquot_free_block(ip, nblocks); 271 271 return rc; 272 272 } 273 273 ··· 332 332 333 333 failed: 334 334 /* Rollback quota allocation. */ 335 - vfs_dq_free_block(ip, nblocks); 335 + dquot_free_block(ip, nblocks); 336 336 337 337 dbFree(ip, blkno, nblocks); 338 338 return rc; ··· 538 538 539 539 if (blocks_needed > current_blocks) { 540 540 /* Allocate new blocks to quota. */ 541 - if (vfs_dq_alloc_block(inode, blocks_needed)) 541 + rc = dquot_alloc_block(inode, blocks_needed); 542 + if (rc) 542 543 return -EDQUOT; 543 544 544 545 quota_allocation = blocks_needed; ··· 603 602 clean_up: 604 603 /* Rollback quota allocation */ 605 604 if (quota_allocation) 606 - vfs_dq_free_block(inode, quota_allocation); 605 + dquot_free_block(inode, quota_allocation); 607 606 608 607 return (rc); 609 608 } ··· 678 677 679 678 /* If old blocks exist, they must be removed from quota allocation. */ 680 679 if (old_blocks) 681 - vfs_dq_free_block(inode, old_blocks); 680 + dquot_free_block(inode, old_blocks); 682 681 683 682 inode->i_ctime = CURRENT_TIME; 684 683
-16
fs/namei.c
··· 19 19 #include <linux/slab.h> 20 20 #include <linux/fs.h> 21 21 #include <linux/namei.h> 22 - #include <linux/quotaops.h> 23 22 #include <linux/pagemap.h> 24 23 #include <linux/fsnotify.h> 25 24 #include <linux/personality.h> ··· 1415 1416 error = security_inode_create(dir, dentry, mode); 1416 1417 if (error) 1417 1418 return error; 1418 - vfs_dq_init(dir); 1419 1419 error = dir->i_op->create(dir, dentry, mode, nd); 1420 1420 if (!error) 1421 1421 fsnotify_create(dir, dentry); ··· 1584 1586 } 1585 1587 } 1586 1588 if (!IS_ERR(filp)) { 1587 - if (acc_mode & MAY_WRITE) 1588 - vfs_dq_init(nd->path.dentry->d_inode); 1589 - 1590 1589 if (will_truncate) { 1591 1590 error = handle_truncate(&nd->path); 1592 1591 if (error) { ··· 1981 1986 if (error) 1982 1987 return error; 1983 1988 1984 - vfs_dq_init(dir); 1985 1989 error = dir->i_op->mknod(dir, dentry, mode, dev); 1986 1990 if (!error) 1987 1991 fsnotify_create(dir, dentry); ··· 2079 2085 if (error) 2080 2086 return error; 2081 2087 2082 - vfs_dq_init(dir); 2083 2088 error = dir->i_op->mkdir(dir, dentry, mode); 2084 2089 if (!error) 2085 2090 fsnotify_mkdir(dir, dentry); ··· 2164 2171 if (!dir->i_op->rmdir) 2165 2172 return -EPERM; 2166 2173 2167 - vfs_dq_init(dir); 2168 - 2169 2174 mutex_lock(&dentry->d_inode->i_mutex); 2170 2175 dentry_unhash(dentry); 2171 2176 if (d_mountpoint(dentry)) ··· 2248 2257 2249 2258 if (!dir->i_op->unlink) 2250 2259 return -EPERM; 2251 - 2252 - vfs_dq_init(dir); 2253 2260 2254 2261 mutex_lock(&dentry->d_inode->i_mutex); 2255 2262 if (d_mountpoint(dentry)) ··· 2361 2372 if (error) 2362 2373 return error; 2363 2374 2364 - vfs_dq_init(dir); 2365 2375 error = dir->i_op->symlink(dir, dentry, oldname); 2366 2376 if (!error) 2367 2377 fsnotify_create(dir, dentry); ··· 2444 2456 return error; 2445 2457 2446 2458 mutex_lock(&inode->i_mutex); 2447 - vfs_dq_init(dir); 2448 2459 error = dir->i_op->link(old_dentry, dir, new_dentry); 2449 2460 mutex_unlock(&inode->i_mutex); 2450 2461 if (!error) ··· 2643 2656 2644 2657 if (!old_dir->i_op->rename) 2645 2658 return -EPERM; 2646 - 2647 - vfs_dq_init(old_dir); 2648 - vfs_dq_init(new_dir); 2649 2659 2650 2660 old_name = fsnotify_oldname_init(old_dentry->d_name.name); 2651 2661
-4
fs/nfsd/vfs.c
··· 20 20 #include <linux/fcntl.h> 21 21 #include <linux/namei.h> 22 22 #include <linux/delay.h> 23 - #include <linux/quotaops.h> 24 23 #include <linux/fsnotify.h> 25 24 #include <linux/posix_acl_xattr.h> 26 25 #include <linux/xattr.h> ··· 376 377 put_write_access(inode); 377 378 goto out_nfserr; 378 379 } 379 - vfs_dq_init(inode); 380 380 } 381 381 382 382 /* sanitize the mode change */ ··· 743 745 flags = O_RDWR|O_LARGEFILE; 744 746 else 745 747 flags = O_WRONLY|O_LARGEFILE; 746 - 747 - vfs_dq_init(inode); 748 748 } 749 749 *filp = dentry_open(dget(dentry), mntget(fhp->fh_export->ex_path.mnt), 750 750 flags, current_cred());
+6 -7
fs/ocfs2/alloc.c
··· 5713 5713 goto out; 5714 5714 } 5715 5715 5716 - vfs_dq_free_space_nodirty(inode, 5716 + dquot_free_space_nodirty(inode, 5717 5717 ocfs2_clusters_to_bytes(inode->i_sb, len)); 5718 5718 5719 5719 ret = ocfs2_remove_extent(handle, et, cpos, len, meta_ac, dealloc); ··· 6936 6936 goto bail; 6937 6937 } 6938 6938 6939 - vfs_dq_free_space_nodirty(inode, 6939 + dquot_free_space_nodirty(inode, 6940 6940 ocfs2_clusters_to_bytes(osb->sb, clusters_to_del)); 6941 6941 spin_lock(&OCFS2_I(inode)->ip_lock); 6942 6942 OCFS2_I(inode)->ip_clusters = le32_to_cpu(fe->i_clusters) - ··· 7301 7301 unsigned int page_end; 7302 7302 u64 phys; 7303 7303 7304 - if (vfs_dq_alloc_space_nodirty(inode, 7305 - ocfs2_clusters_to_bytes(osb->sb, 1))) { 7306 - ret = -EDQUOT; 7304 + ret = dquot_alloc_space_nodirty(inode, 7305 + ocfs2_clusters_to_bytes(osb->sb, 1)); 7306 + if (ret) 7307 7307 goto out_commit; 7308 - } 7309 7308 did_quota = 1; 7310 7309 7311 7310 ret = ocfs2_claim_clusters(osb, handle, data_ac, 1, &bit_off, ··· 7380 7381 7381 7382 out_commit: 7382 7383 if (ret < 0 && did_quota) 7383 - vfs_dq_free_space_nodirty(inode, 7384 + dquot_free_space_nodirty(inode, 7384 7385 ocfs2_clusters_to_bytes(osb->sb, 1)); 7385 7386 7386 7387 ocfs2_commit_trans(osb, handle);
+6 -5
fs/ocfs2/aops.c
··· 1764 1764 1765 1765 wc->w_handle = handle; 1766 1766 1767 - if (clusters_to_alloc && vfs_dq_alloc_space_nodirty(inode, 1768 - ocfs2_clusters_to_bytes(osb->sb, clusters_to_alloc))) { 1769 - ret = -EDQUOT; 1770 - goto out_commit; 1767 + if (clusters_to_alloc) { 1768 + ret = dquot_alloc_space_nodirty(inode, 1769 + ocfs2_clusters_to_bytes(osb->sb, clusters_to_alloc)); 1770 + if (ret) 1771 + goto out_commit; 1771 1772 } 1772 1773 /* 1773 1774 * We don't want this to fail in ocfs2_write_end(), so do it ··· 1811 1810 return 0; 1812 1811 out_quota: 1813 1812 if (clusters_to_alloc) 1814 - vfs_dq_free_space(inode, 1813 + dquot_free_space(inode, 1815 1814 ocfs2_clusters_to_bytes(osb->sb, clusters_to_alloc)); 1816 1815 out_commit: 1817 1816 ocfs2_commit_trans(osb, handle);
+16 -21
fs/ocfs2/dir.c
··· 2964 2964 goto out; 2965 2965 } 2966 2966 2967 - if (vfs_dq_alloc_space_nodirty(dir, 2968 - ocfs2_clusters_to_bytes(osb->sb, 2969 - alloc + dx_alloc))) { 2970 - ret = -EDQUOT; 2967 + ret = dquot_alloc_space_nodirty(dir, 2968 + ocfs2_clusters_to_bytes(osb->sb, alloc + dx_alloc)); 2969 + if (ret) 2971 2970 goto out_commit; 2972 - } 2973 2971 did_quota = 1; 2974 2972 2975 2973 if (ocfs2_supports_indexed_dirs(osb) && !dx_inline) { ··· 3176 3178 3177 3179 out_commit: 3178 3180 if (ret < 0 && did_quota) 3179 - vfs_dq_free_space_nodirty(dir, bytes_allocated); 3181 + dquot_free_space_nodirty(dir, bytes_allocated); 3180 3182 3181 3183 ocfs2_commit_trans(osb, handle); 3182 3184 ··· 3219 3221 if (extend) { 3220 3222 u32 offset = OCFS2_I(dir)->ip_clusters; 3221 3223 3222 - if (vfs_dq_alloc_space_nodirty(dir, 3223 - ocfs2_clusters_to_bytes(sb, 1))) { 3224 - status = -EDQUOT; 3224 + status = dquot_alloc_space_nodirty(dir, 3225 + ocfs2_clusters_to_bytes(sb, 1)); 3226 + if (status) 3225 3227 goto bail; 3226 - } 3227 3228 did_quota = 1; 3228 3229 3229 3230 status = ocfs2_add_inode_data(OCFS2_SB(sb), dir, &offset, ··· 3251 3254 status = 0; 3252 3255 bail: 3253 3256 if (did_quota && status < 0) 3254 - vfs_dq_free_space_nodirty(dir, ocfs2_clusters_to_bytes(sb, 1)); 3257 + dquot_free_space_nodirty(dir, ocfs2_clusters_to_bytes(sb, 1)); 3255 3258 mlog_exit(status); 3256 3259 return status; 3257 3260 } ··· 3886 3889 goto out; 3887 3890 } 3888 3891 3889 - if (vfs_dq_alloc_space_nodirty(dir, 3890 - ocfs2_clusters_to_bytes(dir->i_sb, 1))) { 3891 - ret = -EDQUOT; 3892 + ret = dquot_alloc_space_nodirty(dir, 3893 + ocfs2_clusters_to_bytes(dir->i_sb, 1)); 3894 + if (ret) 3892 3895 goto out_commit; 3893 - } 3894 3896 did_quota = 1; 3895 3897 3896 3898 ret = ocfs2_journal_access_dl(handle, INODE_CACHE(dir), dx_leaf_bh, ··· 3979 3983 3980 3984 out_commit: 3981 3985 if (ret < 0 && did_quota) 3982 - vfs_dq_free_space_nodirty(dir, 3986 + dquot_free_space_nodirty(dir, 3983 3987 ocfs2_clusters_to_bytes(dir->i_sb, 1)); 3984 3988 3985 3989 ocfs2_commit_trans(osb, handle); ··· 4161 4165 goto out; 4162 4166 } 4163 4167 4164 - if (vfs_dq_alloc_space_nodirty(dir, 4165 - ocfs2_clusters_to_bytes(osb->sb, 1))) { 4166 - ret = -EDQUOT; 4168 + ret = dquot_alloc_space_nodirty(dir, 4169 + ocfs2_clusters_to_bytes(osb->sb, 1)); 4170 + if (ret) 4167 4171 goto out_commit; 4168 - } 4169 4172 did_quota = 1; 4170 4173 4171 4174 /* ··· 4224 4229 4225 4230 out_commit: 4226 4231 if (ret < 0 && did_quota) 4227 - vfs_dq_free_space_nodirty(dir, 4232 + dquot_free_space_nodirty(dir, 4228 4233 ocfs2_clusters_to_bytes(dir->i_sb, 1)); 4229 4234 4230 4235 ocfs2_commit_trans(osb, handle);
+12 -8
fs/ocfs2/file.c
··· 107 107 mlog_entry("(0x%p, 0x%p, '%.*s')\n", inode, file, 108 108 file->f_path.dentry->d_name.len, file->f_path.dentry->d_name.name); 109 109 110 + if (file->f_mode & FMODE_WRITE) 111 + dquot_initialize(inode); 112 + 110 113 spin_lock(&oi->ip_lock); 111 114 112 115 /* Check that the inode hasn't been wiped from disk by another ··· 632 629 } 633 630 634 631 restarted_transaction: 635 - if (vfs_dq_alloc_space_nodirty(inode, ocfs2_clusters_to_bytes(osb->sb, 636 - clusters_to_add))) { 637 - status = -EDQUOT; 632 + status = dquot_alloc_space_nodirty(inode, 633 + ocfs2_clusters_to_bytes(osb->sb, clusters_to_add)); 634 + if (status) 638 635 goto leave; 639 - } 640 636 did_quota = 1; 641 637 642 638 /* reserve a write to the file entry early on - that we if we ··· 676 674 clusters_to_add -= (OCFS2_I(inode)->ip_clusters - prev_clusters); 677 675 spin_unlock(&OCFS2_I(inode)->ip_lock); 678 676 /* Release unused quota reservation */ 679 - vfs_dq_free_space(inode, 677 + dquot_free_space(inode, 680 678 ocfs2_clusters_to_bytes(osb->sb, clusters_to_add)); 681 679 did_quota = 0; 682 680 ··· 712 710 713 711 leave: 714 712 if (status < 0 && did_quota) 715 - vfs_dq_free_space(inode, 713 + dquot_free_space(inode, 716 714 ocfs2_clusters_to_bytes(osb->sb, clusters_to_add)); 717 715 if (handle) { 718 716 ocfs2_commit_trans(osb, handle); ··· 980 978 981 979 size_change = S_ISREG(inode->i_mode) && attr->ia_valid & ATTR_SIZE; 982 980 if (size_change) { 981 + dquot_initialize(inode); 982 + 983 983 status = ocfs2_rw_lock(inode, 1); 984 984 if (status < 0) { 985 985 mlog_errno(status); ··· 1024 1020 /* 1025 1021 * Gather pointers to quota structures so that allocation / 1026 1022 * freeing of quota structures happens here and not inside 1027 - * vfs_dq_transfer() where we have problems with lock ordering 1023 + * dquot_transfer() where we have problems with lock ordering 1028 1024 */ 1029 1025 if (attr->ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid 1030 1026 && OCFS2_HAS_RO_COMPAT_FEATURE(sb, ··· 1057 1053 mlog_errno(status); 1058 1054 goto bail_unlock; 1059 1055 } 1060 - status = vfs_dq_transfer(inode, attr) ? -EDQUOT : 0; 1056 + status = dquot_transfer(inode, attr); 1061 1057 if (status < 0) 1062 1058 goto bail_commit; 1063 1059 } else {
+5 -1
fs/ocfs2/inode.c
··· 665 665 } 666 666 667 667 ocfs2_remove_from_cache(INODE_CACHE(inode), di_bh); 668 - vfs_dq_free_inode(inode); 668 + dquot_free_inode(inode); 669 669 670 670 status = ocfs2_free_dinode(handle, inode_alloc_inode, 671 671 inode_alloc_bh, di); ··· 971 971 goto bail; 972 972 } 973 973 974 + dquot_initialize(inode); 975 + 974 976 if (!ocfs2_inode_is_valid_to_delete(inode)) { 975 977 /* It's probably not necessary to truncate_inode_pages 976 978 * here but we do it for safety anyway (it will most ··· 1088 1086 1089 1087 mlog_bug_on_msg(OCFS2_SB(inode->i_sb) == NULL, 1090 1088 "Inode=%lu\n", inode->i_ino); 1089 + 1090 + dquot_drop(inode); 1091 1091 1092 1092 /* To preven remote deletes we hold open lock before, now it 1093 1093 * is time to unlock PR and EX open locks. */
+25 -27
fs/ocfs2/namei.c
··· 212 212 } else 213 213 inode->i_gid = current_fsgid(); 214 214 inode->i_mode = mode; 215 - vfs_dq_init(inode); 215 + dquot_initialize(inode); 216 216 return inode; 217 217 } 218 218 ··· 243 243 mlog_entry("(0x%p, 0x%p, %d, %lu, '%.*s')\n", dir, dentry, mode, 244 244 (unsigned long)dev, dentry->d_name.len, 245 245 dentry->d_name.name); 246 + 247 + dquot_initialize(dir); 246 248 247 249 /* get our super block */ 248 250 osb = OCFS2_SB(dir->i_sb); ··· 350 348 goto leave; 351 349 } 352 350 353 - /* We don't use standard VFS wrapper because we don't want vfs_dq_init 354 - * to be called. */ 355 - if (sb_any_quota_active(osb->sb) && 356 - osb->sb->dq_op->alloc_inode(inode, 1) == NO_QUOTA) { 357 - status = -EDQUOT; 351 + status = dquot_alloc_inode(inode); 352 + if (status) 358 353 goto leave; 359 - } 360 354 did_quota_inode = 1; 361 355 362 356 mlog_entry("(0x%p, 0x%p, %d, %lu, '%.*s')\n", dir, dentry, ··· 429 431 status = 0; 430 432 leave: 431 433 if (status < 0 && did_quota_inode) 432 - vfs_dq_free_inode(inode); 434 + dquot_free_inode(inode); 433 435 if (handle) 434 436 ocfs2_commit_trans(osb, handle); 435 437 ··· 634 636 if (S_ISDIR(inode->i_mode)) 635 637 return -EPERM; 636 638 639 + dquot_initialize(dir); 640 + 637 641 err = ocfs2_inode_lock_nested(dir, &parent_fe_bh, 1, OI_LS_PARENT); 638 642 if (err < 0) { 639 643 if (err != -ENOENT) ··· 790 790 791 791 mlog_entry("(0x%p, 0x%p, '%.*s')\n", dir, dentry, 792 792 dentry->d_name.len, dentry->d_name.name); 793 + 794 + dquot_initialize(dir); 793 795 794 796 BUG_ON(dentry->d_parent->d_inode != dir); 795 797 ··· 1052 1050 old_dir, old_dentry, new_dir, new_dentry, 1053 1051 old_dentry->d_name.len, old_dentry->d_name.name, 1054 1052 new_dentry->d_name.len, new_dentry->d_name.name); 1053 + 1054 + dquot_initialize(old_dir); 1055 + dquot_initialize(new_dir); 1055 1056 1056 1057 osb = OCFS2_SB(old_dir->i_sb); 1057 1058 ··· 1604 1599 mlog_entry("(0x%p, 0x%p, symname='%s' actual='%.*s')\n", dir, 1605 1600 dentry, symname, dentry->d_name.len, dentry->d_name.name); 1606 1601 1602 + dquot_initialize(dir); 1603 + 1607 1604 sb = dir->i_sb; 1608 1605 osb = OCFS2_SB(sb); 1609 1606 ··· 1695 1688 goto bail; 1696 1689 } 1697 1690 1698 - /* We don't use standard VFS wrapper because we don't want vfs_dq_init 1699 - * to be called. */ 1700 - if (sb_any_quota_active(osb->sb) && 1701 - osb->sb->dq_op->alloc_inode(inode, 1) == NO_QUOTA) { 1702 - status = -EDQUOT; 1691 + status = dquot_alloc_inode(inode); 1692 + if (status) 1703 1693 goto bail; 1704 - } 1705 1694 did_quota_inode = 1; 1706 1695 1707 1696 mlog_entry("(0x%p, 0x%p, %d, '%.*s')\n", dir, dentry, ··· 1719 1716 u32 offset = 0; 1720 1717 1721 1718 inode->i_op = &ocfs2_symlink_inode_operations; 1722 - if (vfs_dq_alloc_space_nodirty(inode, 1723 - ocfs2_clusters_to_bytes(osb->sb, 1))) { 1724 - status = -EDQUOT; 1719 + status = dquot_alloc_space_nodirty(inode, 1720 + ocfs2_clusters_to_bytes(osb->sb, 1)); 1721 + if (status) 1725 1722 goto bail; 1726 - } 1727 1723 did_quota = 1; 1728 1724 status = ocfs2_add_inode_data(osb, inode, &offset, 1, 0, 1729 1725 new_fe_bh, ··· 1790 1788 d_instantiate(dentry, inode); 1791 1789 bail: 1792 1790 if (status < 0 && did_quota) 1793 - vfs_dq_free_space_nodirty(inode, 1791 + dquot_free_space_nodirty(inode, 1794 1792 ocfs2_clusters_to_bytes(osb->sb, 1)); 1795 1793 if (status < 0 && did_quota_inode) 1796 - vfs_dq_free_inode(inode); 1794 + dquot_free_inode(inode); 1797 1795 if (handle) 1798 1796 ocfs2_commit_trans(osb, handle); 1799 1797 ··· 2101 2099 goto leave; 2102 2100 } 2103 2101 2104 - /* We don't use standard VFS wrapper because we don't want vfs_dq_init 2105 - * to be called. */ 2106 - if (sb_any_quota_active(osb->sb) && 2107 - osb->sb->dq_op->alloc_inode(inode, 1) == NO_QUOTA) { 2108 - status = -EDQUOT; 2102 + status = dquot_alloc_inode(inode); 2103 + if (status) 2109 2104 goto leave; 2110 - } 2111 2105 did_quota_inode = 1; 2112 2106 2113 2107 inode->i_nlink = 0; ··· 2138 2140 insert_inode_hash(inode); 2139 2141 leave: 2140 2142 if (status < 0 && did_quota_inode) 2141 - vfs_dq_free_inode(inode); 2143 + dquot_free_inode(inode); 2142 2144 if (handle) 2143 2145 ocfs2_commit_trans(osb, handle); 2144 2146
-7
fs/ocfs2/quota_global.c
··· 851 851 } 852 852 853 853 const struct dquot_operations ocfs2_quota_operations = { 854 - .initialize = dquot_initialize, 855 - .drop = dquot_drop, 856 - .alloc_space = dquot_alloc_space, 857 - .alloc_inode = dquot_alloc_inode, 858 - .free_space = dquot_free_space, 859 - .free_inode = dquot_free_inode, 860 - .transfer = dquot_transfer, 861 854 .write_dquot = ocfs2_write_dquot, 862 855 .acquire_dquot = ocfs2_acquire_dquot, 863 856 .release_dquot = ocfs2_release_dquot,
+1 -1
fs/ocfs2/refcounttree.c
··· 4390 4390 } 4391 4391 4392 4392 mutex_lock(&inode->i_mutex); 4393 - vfs_dq_init(dir); 4393 + dquot_initialize(dir); 4394 4394 error = ocfs2_reflink(old_dentry, dir, new_dentry, preserve); 4395 4395 mutex_unlock(&inode->i_mutex); 4396 4396 if (!error)
+1 -4
fs/open.c
··· 8 8 #include <linux/mm.h> 9 9 #include <linux/file.h> 10 10 #include <linux/fdtable.h> 11 - #include <linux/quotaops.h> 12 11 #include <linux/fsnotify.h> 13 12 #include <linux/module.h> 14 13 #include <linux/slab.h> ··· 277 278 error = locks_verify_truncate(inode, NULL, length); 278 279 if (!error) 279 280 error = security_path_truncate(&path, length, 0); 280 - if (!error) { 281 - vfs_dq_init(inode); 281 + if (!error) 282 282 error = do_truncate(path.dentry, length, 0, NULL); 283 - } 284 283 285 284 put_write_and_out: 286 285 put_write_access(inode);
+5
fs/quota/Kconfig
··· 59 59 bool 60 60 depends on XFS_QUOTA || QUOTA 61 61 default y 62 + 63 + config QUOTACTL_COMPAT 64 + bool 65 + depends on QUOTACTL && COMPAT_FOR_U64_ALIGNMENT 66 + default y
+2
fs/quota/Makefile
··· 3 3 obj-$(CONFIG_QFMT_V2) += quota_v2.o 4 4 obj-$(CONFIG_QUOTA_TREE) += quota_tree.o 5 5 obj-$(CONFIG_QUOTACTL) += quota.o 6 + obj-$(CONFIG_QUOTACTL_COMPAT) += compat.o 7 + obj-$(CONFIG_QUOTA_NETLINK_INTERFACE) += netlink.o
+118
fs/quota/compat.c
··· 1 + 2 + #include <linux/syscalls.h> 3 + #include <linux/compat.h> 4 + #include <linux/quotaops.h> 5 + 6 + /* 7 + * This code works only for 32 bit quota tools over 64 bit OS (x86_64, ia64) 8 + * and is necessary due to alignment problems. 9 + */ 10 + struct compat_if_dqblk { 11 + compat_u64 dqb_bhardlimit; 12 + compat_u64 dqb_bsoftlimit; 13 + compat_u64 dqb_curspace; 14 + compat_u64 dqb_ihardlimit; 15 + compat_u64 dqb_isoftlimit; 16 + compat_u64 dqb_curinodes; 17 + compat_u64 dqb_btime; 18 + compat_u64 dqb_itime; 19 + compat_uint_t dqb_valid; 20 + }; 21 + 22 + /* XFS structures */ 23 + struct compat_fs_qfilestat { 24 + compat_u64 dqb_bhardlimit; 25 + compat_u64 qfs_nblks; 26 + compat_uint_t qfs_nextents; 27 + }; 28 + 29 + struct compat_fs_quota_stat { 30 + __s8 qs_version; 31 + __u16 qs_flags; 32 + __s8 qs_pad; 33 + struct compat_fs_qfilestat qs_uquota; 34 + struct compat_fs_qfilestat qs_gquota; 35 + compat_uint_t qs_incoredqs; 36 + compat_int_t qs_btimelimit; 37 + compat_int_t qs_itimelimit; 38 + compat_int_t qs_rtbtimelimit; 39 + __u16 qs_bwarnlimit; 40 + __u16 qs_iwarnlimit; 41 + }; 42 + 43 + asmlinkage long sys32_quotactl(unsigned int cmd, const char __user *special, 44 + qid_t id, void __user *addr) 45 + { 46 + unsigned int cmds; 47 + struct if_dqblk __user *dqblk; 48 + struct compat_if_dqblk __user *compat_dqblk; 49 + struct fs_quota_stat __user *fsqstat; 50 + struct compat_fs_quota_stat __user *compat_fsqstat; 51 + compat_uint_t data; 52 + u16 xdata; 53 + long ret; 54 + 55 + cmds = cmd >> SUBCMDSHIFT; 56 + 57 + switch (cmds) { 58 + case Q_GETQUOTA: 59 + dqblk = compat_alloc_user_space(sizeof(struct if_dqblk)); 60 + compat_dqblk = addr; 61 + ret = sys_quotactl(cmd, special, id, dqblk); 62 + if (ret) 63 + break; 64 + if (copy_in_user(compat_dqblk, dqblk, sizeof(*compat_dqblk)) || 65 + get_user(data, &dqblk->dqb_valid) || 66 + put_user(data, &compat_dqblk->dqb_valid)) 67 + ret = -EFAULT; 68 + break; 69 + case Q_SETQUOTA: 70 + dqblk = compat_alloc_user_space(sizeof(struct if_dqblk)); 71 + compat_dqblk = addr; 72 + ret = -EFAULT; 73 + if (copy_in_user(dqblk, compat_dqblk, sizeof(*compat_dqblk)) || 74 + get_user(data, &compat_dqblk->dqb_valid) || 75 + put_user(data, &dqblk->dqb_valid)) 76 + break; 77 + ret = sys_quotactl(cmd, special, id, dqblk); 78 + break; 79 + case Q_XGETQSTAT: 80 + fsqstat = compat_alloc_user_space(sizeof(struct fs_quota_stat)); 81 + compat_fsqstat = addr; 82 + ret = sys_quotactl(cmd, special, id, fsqstat); 83 + if (ret) 84 + break; 85 + ret = -EFAULT; 86 + /* Copying qs_version, qs_flags, qs_pad */ 87 + if (copy_in_user(compat_fsqstat, fsqstat, 88 + offsetof(struct compat_fs_quota_stat, qs_uquota))) 89 + break; 90 + /* Copying qs_uquota */ 91 + if (copy_in_user(&compat_fsqstat->qs_uquota, 92 + &fsqstat->qs_uquota, 93 + sizeof(compat_fsqstat->qs_uquota)) || 94 + get_user(data, &fsqstat->qs_uquota.qfs_nextents) || 95 + put_user(data, &compat_fsqstat->qs_uquota.qfs_nextents)) 96 + break; 97 + /* Copying qs_gquota */ 98 + if (copy_in_user(&compat_fsqstat->qs_gquota, 99 + &fsqstat->qs_gquota, 100 + sizeof(compat_fsqstat->qs_gquota)) || 101 + get_user(data, &fsqstat->qs_gquota.qfs_nextents) || 102 + put_user(data, &compat_fsqstat->qs_gquota.qfs_nextents)) 103 + break; 104 + /* Copying the rest */ 105 + if (copy_in_user(&compat_fsqstat->qs_incoredqs, 106 + &fsqstat->qs_incoredqs, 107 + sizeof(struct compat_fs_quota_stat) - 108 + offsetof(struct compat_fs_quota_stat, qs_incoredqs)) || 109 + get_user(xdata, &fsqstat->qs_iwarnlimit) || 110 + put_user(xdata, &compat_fsqstat->qs_iwarnlimit)) 111 + break; 112 + ret = 0; 113 + break; 114 + default: 115 + ret = sys_quotactl(cmd, special, id, addr); 116 + } 117 + return ret; 118 + }
+221 -191
fs/quota/dquot.c
··· 100 100 * 101 101 * Any operation working on dquots via inode pointers must hold dqptr_sem. If 102 102 * operation is just reading pointers from inode (or not using them at all) the 103 - * read lock is enough. If pointers are altered function must hold write lock 104 - * (these locking rules also apply for S_NOQUOTA flag in the inode - note that 105 - * for altering the flag i_mutex is also needed). 103 + * read lock is enough. If pointers are altered function must hold write lock. 104 + * Special care needs to be taken about S_NOQUOTA inode flag (marking that 105 + * inode is a quota file). Functions adding pointers from inode to dquots have 106 + * to check this flag under dqptr_sem and then (if S_NOQUOTA is not set) they 107 + * have to do all pointer modifications before dropping dqptr_sem. This makes 108 + * sure they cannot race with quotaon which first sets S_NOQUOTA flag and 109 + * then drops all pointers to dquots from an inode. 106 110 * 107 111 * Each dquot has its dq_lock mutex. Locked dquots might not be referenced 108 112 * from inodes (dquot_alloc_space() and such don't check the dq_lock). ··· 228 224 229 225 struct dqstats dqstats; 230 226 EXPORT_SYMBOL(dqstats); 227 + 228 + static qsize_t inode_get_rsv_space(struct inode *inode); 229 + static void __dquot_initialize(struct inode *inode, int type); 231 230 232 231 static inline unsigned int 233 232 hashfn(const struct super_block *sb, unsigned int id, int type) ··· 571 564 } 572 565 EXPORT_SYMBOL(dquot_scan_active); 573 566 574 - int vfs_quota_sync(struct super_block *sb, int type) 567 + int vfs_quota_sync(struct super_block *sb, int type, int wait) 575 568 { 576 569 struct list_head *dirty; 577 570 struct dquot *dquot; ··· 615 608 dqstats.syncs++; 616 609 spin_unlock(&dq_list_lock); 617 610 mutex_unlock(&dqopt->dqonoff_mutex); 611 + 612 + if (!wait || (sb_dqopt(sb)->flags & DQUOT_QUOTA_SYS_FILE)) 613 + return 0; 614 + 615 + /* This is not very clever (and fast) but currently I don't know about 616 + * any other simple way of getting quota data to disk and we must get 617 + * them there for userspace to be visible... */ 618 + if (sb->s_op->sync_fs) 619 + sb->s_op->sync_fs(sb, 1); 620 + sync_blockdev(sb->s_bdev); 621 + 622 + /* 623 + * Now when everything is written we can discard the pagecache so 624 + * that userspace sees the changes. 625 + */ 626 + mutex_lock(&sb_dqopt(sb)->dqonoff_mutex); 627 + for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 628 + if (type != -1 && cnt != type) 629 + continue; 630 + if (!sb_has_quota_active(sb, cnt)) 631 + continue; 632 + mutex_lock_nested(&sb_dqopt(sb)->files[cnt]->i_mutex, 633 + I_MUTEX_QUOTA); 634 + truncate_inode_pages(&sb_dqopt(sb)->files[cnt]->i_data, 0); 635 + mutex_unlock(&sb_dqopt(sb)->files[cnt]->i_mutex); 636 + } 637 + mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); 618 638 619 639 return 0; 620 640 } ··· 874 840 static void add_dquot_ref(struct super_block *sb, int type) 875 841 { 876 842 struct inode *inode, *old_inode = NULL; 843 + int reserved = 0; 877 844 878 845 spin_lock(&inode_lock); 879 846 list_for_each_entry(inode, &sb->s_inodes, i_sb_list) { 880 847 if (inode->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE|I_NEW)) 881 848 continue; 849 + if (unlikely(inode_get_rsv_space(inode) > 0)) 850 + reserved = 1; 882 851 if (!atomic_read(&inode->i_writecount)) 883 852 continue; 884 853 if (!dqinit_needed(inode, type)) ··· 891 854 spin_unlock(&inode_lock); 892 855 893 856 iput(old_inode); 894 - sb->dq_op->initialize(inode, type); 857 + __dquot_initialize(inode, type); 895 858 /* We hold a reference to 'inode' so it couldn't have been 896 859 * removed from s_inodes list while we dropped the inode_lock. 897 860 * We cannot iput the inode now as we can be holding the last ··· 902 865 } 903 866 spin_unlock(&inode_lock); 904 867 iput(old_inode); 868 + 869 + if (reserved) { 870 + printk(KERN_WARNING "VFS (%s): Writes happened before quota" 871 + " was turned on thus quota information is probably " 872 + "inconsistent. Please run quotacheck(8).\n", sb->s_id); 873 + } 905 874 } 906 875 907 876 /* ··· 1021 978 /* 1022 979 * Claim reserved quota space 1023 980 */ 1024 - static void dquot_claim_reserved_space(struct dquot *dquot, 1025 - qsize_t number) 981 + static void dquot_claim_reserved_space(struct dquot *dquot, qsize_t number) 1026 982 { 1027 - WARN_ON(dquot->dq_dqb.dqb_rsvspace < number); 983 + if (dquot->dq_dqb.dqb_rsvspace < number) { 984 + WARN_ON_ONCE(1); 985 + number = dquot->dq_dqb.dqb_rsvspace; 986 + } 1028 987 dquot->dq_dqb.dqb_curspace += number; 1029 988 dquot->dq_dqb.dqb_rsvspace -= number; 1030 989 } ··· 1034 989 static inline 1035 990 void dquot_free_reserved_space(struct dquot *dquot, qsize_t number) 1036 991 { 1037 - dquot->dq_dqb.dqb_rsvspace -= number; 992 + if (dquot->dq_dqb.dqb_rsvspace >= number) 993 + dquot->dq_dqb.dqb_rsvspace -= number; 994 + else { 995 + WARN_ON_ONCE(1); 996 + dquot->dq_dqb.dqb_rsvspace = 0; 997 + } 1038 998 } 1039 999 1040 1000 static void dquot_decr_inodes(struct dquot *dquot, qsize_t number) ··· 1181 1131 *warntype = QUOTA_NL_NOWARN; 1182 1132 if (!sb_has_quota_limits_enabled(dquot->dq_sb, dquot->dq_type) || 1183 1133 test_bit(DQ_FAKE_B, &dquot->dq_flags)) 1184 - return QUOTA_OK; 1134 + return 0; 1185 1135 1186 1136 if (dquot->dq_dqb.dqb_ihardlimit && 1187 1137 newinodes > dquot->dq_dqb.dqb_ihardlimit && 1188 1138 !ignore_hardlimit(dquot)) { 1189 1139 *warntype = QUOTA_NL_IHARDWARN; 1190 - return NO_QUOTA; 1140 + return -EDQUOT; 1191 1141 } 1192 1142 1193 1143 if (dquot->dq_dqb.dqb_isoftlimit && ··· 1196 1146 get_seconds() >= dquot->dq_dqb.dqb_itime && 1197 1147 !ignore_hardlimit(dquot)) { 1198 1148 *warntype = QUOTA_NL_ISOFTLONGWARN; 1199 - return NO_QUOTA; 1149 + return -EDQUOT; 1200 1150 } 1201 1151 1202 1152 if (dquot->dq_dqb.dqb_isoftlimit && ··· 1207 1157 sb_dqopt(dquot->dq_sb)->info[dquot->dq_type].dqi_igrace; 1208 1158 } 1209 1159 1210 - return QUOTA_OK; 1160 + return 0; 1211 1161 } 1212 1162 1213 1163 /* needs dq_data_lock */ ··· 1219 1169 *warntype = QUOTA_NL_NOWARN; 1220 1170 if (!sb_has_quota_limits_enabled(sb, dquot->dq_type) || 1221 1171 test_bit(DQ_FAKE_B, &dquot->dq_flags)) 1222 - return QUOTA_OK; 1172 + return 0; 1223 1173 1224 1174 tspace = dquot->dq_dqb.dqb_curspace + dquot->dq_dqb.dqb_rsvspace 1225 1175 + space; ··· 1229 1179 !ignore_hardlimit(dquot)) { 1230 1180 if (!prealloc) 1231 1181 *warntype = QUOTA_NL_BHARDWARN; 1232 - return NO_QUOTA; 1182 + return -EDQUOT; 1233 1183 } 1234 1184 1235 1185 if (dquot->dq_dqb.dqb_bsoftlimit && ··· 1239 1189 !ignore_hardlimit(dquot)) { 1240 1190 if (!prealloc) 1241 1191 *warntype = QUOTA_NL_BSOFTLONGWARN; 1242 - return NO_QUOTA; 1192 + return -EDQUOT; 1243 1193 } 1244 1194 1245 1195 if (dquot->dq_dqb.dqb_bsoftlimit && ··· 1255 1205 * We don't allow preallocation to exceed softlimit so exceeding will 1256 1206 * be always printed 1257 1207 */ 1258 - return NO_QUOTA; 1208 + return -EDQUOT; 1259 1209 } 1260 1210 1261 - return QUOTA_OK; 1211 + return 0; 1262 1212 } 1263 1213 1264 1214 static int info_idq_free(struct dquot *dquot, qsize_t inodes) ··· 1292 1242 return QUOTA_NL_BHARDBELOW; 1293 1243 return QUOTA_NL_NOWARN; 1294 1244 } 1245 + 1295 1246 /* 1296 - * Initialize quota pointers in inode 1297 - * We do things in a bit complicated way but by that we avoid calling 1298 - * dqget() and thus filesystem callbacks under dqptr_sem. 1247 + * Initialize quota pointers in inode 1248 + * 1249 + * We do things in a bit complicated way but by that we avoid calling 1250 + * dqget() and thus filesystem callbacks under dqptr_sem. 1251 + * 1252 + * It is better to call this function outside of any transaction as it 1253 + * might need a lot of space in journal for dquot structure allocation. 1299 1254 */ 1300 - int dquot_initialize(struct inode *inode, int type) 1255 + static void __dquot_initialize(struct inode *inode, int type) 1301 1256 { 1302 1257 unsigned int id = 0; 1303 - int cnt, ret = 0; 1304 - struct dquot *got[MAXQUOTAS] = { NULL, NULL }; 1258 + int cnt; 1259 + struct dquot *got[MAXQUOTAS]; 1305 1260 struct super_block *sb = inode->i_sb; 1261 + qsize_t rsv; 1306 1262 1307 1263 /* First test before acquiring mutex - solves deadlocks when we 1308 1264 * re-enter the quota code and are already holding the mutex */ 1309 - if (IS_NOQUOTA(inode)) 1310 - return 0; 1265 + if (!sb_any_quota_active(inode->i_sb) || IS_NOQUOTA(inode)) 1266 + return; 1311 1267 1312 1268 /* First get references to structures we might need. */ 1313 1269 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 1270 + got[cnt] = NULL; 1314 1271 if (type != -1 && cnt != type) 1315 1272 continue; 1316 1273 switch (cnt) { ··· 1332 1275 } 1333 1276 1334 1277 down_write(&sb_dqopt(sb)->dqptr_sem); 1335 - /* Having dqptr_sem we know NOQUOTA flags can't be altered... */ 1336 1278 if (IS_NOQUOTA(inode)) 1337 1279 goto out_err; 1338 1280 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { ··· 1343 1287 if (!inode->i_dquot[cnt]) { 1344 1288 inode->i_dquot[cnt] = got[cnt]; 1345 1289 got[cnt] = NULL; 1290 + /* 1291 + * Make quota reservation system happy if someone 1292 + * did a write before quota was turned on 1293 + */ 1294 + rsv = inode_get_rsv_space(inode); 1295 + if (unlikely(rsv)) 1296 + dquot_resv_space(inode->i_dquot[cnt], rsv); 1346 1297 } 1347 1298 } 1348 1299 out_err: 1349 1300 up_write(&sb_dqopt(sb)->dqptr_sem); 1350 1301 /* Drop unused references */ 1351 1302 dqput_all(got); 1352 - return ret; 1303 + } 1304 + 1305 + void dquot_initialize(struct inode *inode) 1306 + { 1307 + __dquot_initialize(inode, -1); 1353 1308 } 1354 1309 EXPORT_SYMBOL(dquot_initialize); 1355 1310 1356 1311 /* 1357 1312 * Release all quotas referenced by inode 1358 1313 */ 1359 - int dquot_drop(struct inode *inode) 1314 + static void __dquot_drop(struct inode *inode) 1360 1315 { 1361 1316 int cnt; 1362 1317 struct dquot *put[MAXQUOTAS]; ··· 1379 1312 } 1380 1313 up_write(&sb_dqopt(inode->i_sb)->dqptr_sem); 1381 1314 dqput_all(put); 1382 - return 0; 1315 + } 1316 + 1317 + void dquot_drop(struct inode *inode) 1318 + { 1319 + int cnt; 1320 + 1321 + if (IS_NOQUOTA(inode)) 1322 + return; 1323 + 1324 + /* 1325 + * Test before calling to rule out calls from proc and such 1326 + * where we are not allowed to block. Note that this is 1327 + * actually reliable test even without the lock - the caller 1328 + * must assure that nobody can come after the DQUOT_DROP and 1329 + * add quota pointers back anyway. 1330 + */ 1331 + for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 1332 + if (inode->i_dquot[cnt]) 1333 + break; 1334 + } 1335 + 1336 + if (cnt < MAXQUOTAS) 1337 + __dquot_drop(inode); 1383 1338 } 1384 1339 EXPORT_SYMBOL(dquot_drop); 1385 - 1386 - /* Wrapper to remove references to quota structures from inode */ 1387 - void vfs_dq_drop(struct inode *inode) 1388 - { 1389 - /* Here we can get arbitrary inode from clear_inode() so we have 1390 - * to be careful. OTOH we don't need locking as quota operations 1391 - * are allowed to change only at mount time */ 1392 - if (!IS_NOQUOTA(inode) && inode->i_sb && inode->i_sb->dq_op 1393 - && inode->i_sb->dq_op->drop) { 1394 - int cnt; 1395 - /* Test before calling to rule out calls from proc and such 1396 - * where we are not allowed to block. Note that this is 1397 - * actually reliable test even without the lock - the caller 1398 - * must assure that nobody can come after the DQUOT_DROP and 1399 - * add quota pointers back anyway */ 1400 - for (cnt = 0; cnt < MAXQUOTAS; cnt++) 1401 - if (inode->i_dquot[cnt]) 1402 - break; 1403 - if (cnt < MAXQUOTAS) 1404 - inode->i_sb->dq_op->drop(inode); 1405 - } 1406 - } 1407 - EXPORT_SYMBOL(vfs_dq_drop); 1408 1340 1409 1341 /* 1410 1342 * inode_reserved_space is managed internally by quota, and protected by ··· 1417 1351 return inode->i_sb->dq_op->get_reserved_space(inode); 1418 1352 } 1419 1353 1420 - static void inode_add_rsv_space(struct inode *inode, qsize_t number) 1354 + void inode_add_rsv_space(struct inode *inode, qsize_t number) 1421 1355 { 1422 1356 spin_lock(&inode->i_lock); 1423 1357 *inode_reserved_space(inode) += number; 1424 1358 spin_unlock(&inode->i_lock); 1425 1359 } 1360 + EXPORT_SYMBOL(inode_add_rsv_space); 1426 1361 1427 - 1428 - static void inode_claim_rsv_space(struct inode *inode, qsize_t number) 1362 + void inode_claim_rsv_space(struct inode *inode, qsize_t number) 1429 1363 { 1430 1364 spin_lock(&inode->i_lock); 1431 1365 *inode_reserved_space(inode) -= number; 1432 1366 __inode_add_bytes(inode, number); 1433 1367 spin_unlock(&inode->i_lock); 1434 1368 } 1369 + EXPORT_SYMBOL(inode_claim_rsv_space); 1435 1370 1436 - static void inode_sub_rsv_space(struct inode *inode, qsize_t number) 1371 + void inode_sub_rsv_space(struct inode *inode, qsize_t number) 1437 1372 { 1438 1373 spin_lock(&inode->i_lock); 1439 1374 *inode_reserved_space(inode) -= number; 1440 1375 spin_unlock(&inode->i_lock); 1441 1376 } 1377 + EXPORT_SYMBOL(inode_sub_rsv_space); 1442 1378 1443 1379 static qsize_t inode_get_rsv_space(struct inode *inode) 1444 1380 { ··· 1472 1404 } 1473 1405 1474 1406 /* 1475 - * Following four functions update i_blocks+i_bytes fields and 1476 - * quota information (together with appropriate checks) 1477 - * NOTE: We absolutely rely on the fact that caller dirties 1478 - * the inode (usually macros in quotaops.h care about this) and 1479 - * holds a handle for the current transaction so that dquot write and 1480 - * inode write go into the same transaction. 1407 + * This functions updates i_blocks+i_bytes fields and quota information 1408 + * (together with appropriate checks). 1409 + * 1410 + * NOTE: We absolutely rely on the fact that caller dirties the inode 1411 + * (usually helpers in quotaops.h care about this) and holds a handle for 1412 + * the current transaction so that dquot write and inode write go into the 1413 + * same transaction. 1481 1414 */ 1482 1415 1483 1416 /* 1484 1417 * This operation can block, but only after everything is updated 1485 1418 */ 1486 1419 int __dquot_alloc_space(struct inode *inode, qsize_t number, 1487 - int warn, int reserve) 1420 + int warn, int reserve) 1488 1421 { 1489 - int cnt, ret = QUOTA_OK; 1422 + int cnt, ret = 0; 1490 1423 char warntype[MAXQUOTAS]; 1491 1424 1492 1425 /* 1493 1426 * First test before acquiring mutex - solves deadlocks when we 1494 1427 * re-enter the quota code and are already holding the mutex 1495 1428 */ 1496 - if (IS_NOQUOTA(inode)) { 1429 + if (!sb_any_quota_active(inode->i_sb) || IS_NOQUOTA(inode)) { 1497 1430 inode_incr_space(inode, number, reserve); 1498 1431 goto out; 1499 1432 } 1500 1433 1501 1434 down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); 1502 - if (IS_NOQUOTA(inode)) { 1503 - inode_incr_space(inode, number, reserve); 1504 - goto out_unlock; 1505 - } 1506 - 1507 1435 for (cnt = 0; cnt < MAXQUOTAS; cnt++) 1508 1436 warntype[cnt] = QUOTA_NL_NOWARN; 1509 1437 ··· 1507 1443 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 1508 1444 if (!inode->i_dquot[cnt]) 1509 1445 continue; 1510 - if (check_bdq(inode->i_dquot[cnt], number, warn, warntype+cnt) 1511 - == NO_QUOTA) { 1512 - ret = NO_QUOTA; 1446 + ret = check_bdq(inode->i_dquot[cnt], number, !warn, 1447 + warntype+cnt); 1448 + if (ret) { 1513 1449 spin_unlock(&dq_data_lock); 1514 1450 goto out_flush_warn; 1515 1451 } ··· 1530 1466 mark_all_dquot_dirty(inode->i_dquot); 1531 1467 out_flush_warn: 1532 1468 flush_warnings(inode->i_dquot, warntype); 1533 - out_unlock: 1534 1469 up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); 1535 1470 out: 1536 1471 return ret; 1537 1472 } 1538 - 1539 - int dquot_alloc_space(struct inode *inode, qsize_t number, int warn) 1540 - { 1541 - return __dquot_alloc_space(inode, number, warn, 0); 1542 - } 1543 - EXPORT_SYMBOL(dquot_alloc_space); 1544 - 1545 - int dquot_reserve_space(struct inode *inode, qsize_t number, int warn) 1546 - { 1547 - return __dquot_alloc_space(inode, number, warn, 1); 1548 - } 1549 - EXPORT_SYMBOL(dquot_reserve_space); 1473 + EXPORT_SYMBOL(__dquot_alloc_space); 1550 1474 1551 1475 /* 1552 1476 * This operation can block, but only after everything is updated 1553 1477 */ 1554 - int dquot_alloc_inode(const struct inode *inode, qsize_t number) 1478 + int dquot_alloc_inode(const struct inode *inode) 1555 1479 { 1556 - int cnt, ret = NO_QUOTA; 1480 + int cnt, ret = 0; 1557 1481 char warntype[MAXQUOTAS]; 1558 1482 1559 1483 /* First test before acquiring mutex - solves deadlocks when we 1560 1484 * re-enter the quota code and are already holding the mutex */ 1561 - if (IS_NOQUOTA(inode)) 1562 - return QUOTA_OK; 1485 + if (!sb_any_quota_active(inode->i_sb) || IS_NOQUOTA(inode)) 1486 + return 0; 1563 1487 for (cnt = 0; cnt < MAXQUOTAS; cnt++) 1564 1488 warntype[cnt] = QUOTA_NL_NOWARN; 1565 1489 down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); 1566 - if (IS_NOQUOTA(inode)) { 1567 - up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); 1568 - return QUOTA_OK; 1569 - } 1570 1490 spin_lock(&dq_data_lock); 1571 1491 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 1572 1492 if (!inode->i_dquot[cnt]) 1573 1493 continue; 1574 - if (check_idq(inode->i_dquot[cnt], number, warntype+cnt) 1575 - == NO_QUOTA) 1494 + ret = check_idq(inode->i_dquot[cnt], 1, warntype + cnt); 1495 + if (ret) 1576 1496 goto warn_put_all; 1577 1497 } 1578 1498 1579 1499 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 1580 1500 if (!inode->i_dquot[cnt]) 1581 1501 continue; 1582 - dquot_incr_inodes(inode->i_dquot[cnt], number); 1502 + dquot_incr_inodes(inode->i_dquot[cnt], 1); 1583 1503 } 1584 - ret = QUOTA_OK; 1504 + 1585 1505 warn_put_all: 1586 1506 spin_unlock(&dq_data_lock); 1587 - if (ret == QUOTA_OK) 1507 + if (ret == 0) 1588 1508 mark_all_dquot_dirty(inode->i_dquot); 1589 1509 flush_warnings(inode->i_dquot, warntype); 1590 1510 up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); ··· 1576 1528 } 1577 1529 EXPORT_SYMBOL(dquot_alloc_inode); 1578 1530 1579 - int dquot_claim_space(struct inode *inode, qsize_t number) 1531 + /* 1532 + * Convert in-memory reserved quotas to real consumed quotas 1533 + */ 1534 + int dquot_claim_space_nodirty(struct inode *inode, qsize_t number) 1580 1535 { 1581 1536 int cnt; 1582 - int ret = QUOTA_OK; 1583 1537 1584 - if (IS_NOQUOTA(inode)) { 1538 + if (!sb_any_quota_active(inode->i_sb) || IS_NOQUOTA(inode)) { 1585 1539 inode_claim_rsv_space(inode, number); 1586 - goto out; 1540 + return 0; 1587 1541 } 1588 1542 1589 1543 down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); 1590 - if (IS_NOQUOTA(inode)) { 1591 - up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); 1592 - inode_claim_rsv_space(inode, number); 1593 - goto out; 1594 - } 1595 - 1596 1544 spin_lock(&dq_data_lock); 1597 1545 /* Claim reserved quotas to allocated quotas */ 1598 1546 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { ··· 1601 1557 spin_unlock(&dq_data_lock); 1602 1558 mark_all_dquot_dirty(inode->i_dquot); 1603 1559 up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); 1604 - out: 1605 - return ret; 1560 + return 0; 1606 1561 } 1607 - EXPORT_SYMBOL(dquot_claim_space); 1562 + EXPORT_SYMBOL(dquot_claim_space_nodirty); 1608 1563 1609 1564 /* 1610 1565 * This operation can block, but only after everything is updated 1611 1566 */ 1612 - int __dquot_free_space(struct inode *inode, qsize_t number, int reserve) 1567 + void __dquot_free_space(struct inode *inode, qsize_t number, int reserve) 1613 1568 { 1614 1569 unsigned int cnt; 1615 1570 char warntype[MAXQUOTAS]; 1616 1571 1617 1572 /* First test before acquiring mutex - solves deadlocks when we 1618 1573 * re-enter the quota code and are already holding the mutex */ 1619 - if (IS_NOQUOTA(inode)) { 1620 - out_sub: 1574 + if (!sb_any_quota_active(inode->i_sb) || IS_NOQUOTA(inode)) { 1621 1575 inode_decr_space(inode, number, reserve); 1622 - return QUOTA_OK; 1576 + return; 1623 1577 } 1624 1578 1625 1579 down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); 1626 - /* Now recheck reliably when holding dqptr_sem */ 1627 - if (IS_NOQUOTA(inode)) { 1628 - up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); 1629 - goto out_sub; 1630 - } 1631 1580 spin_lock(&dq_data_lock); 1632 1581 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 1633 1582 if (!inode->i_dquot[cnt]) ··· 1640 1603 out_unlock: 1641 1604 flush_warnings(inode->i_dquot, warntype); 1642 1605 up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); 1643 - return QUOTA_OK; 1644 1606 } 1645 - 1646 - int dquot_free_space(struct inode *inode, qsize_t number) 1647 - { 1648 - return __dquot_free_space(inode, number, 0); 1649 - } 1650 - EXPORT_SYMBOL(dquot_free_space); 1651 - 1652 - /* 1653 - * Release reserved quota space 1654 - */ 1655 - void dquot_release_reserved_space(struct inode *inode, qsize_t number) 1656 - { 1657 - __dquot_free_space(inode, number, 1); 1658 - 1659 - } 1660 - EXPORT_SYMBOL(dquot_release_reserved_space); 1607 + EXPORT_SYMBOL(__dquot_free_space); 1661 1608 1662 1609 /* 1663 1610 * This operation can block, but only after everything is updated 1664 1611 */ 1665 - int dquot_free_inode(const struct inode *inode, qsize_t number) 1612 + void dquot_free_inode(const struct inode *inode) 1666 1613 { 1667 1614 unsigned int cnt; 1668 1615 char warntype[MAXQUOTAS]; 1669 1616 1670 1617 /* First test before acquiring mutex - solves deadlocks when we 1671 1618 * re-enter the quota code and are already holding the mutex */ 1672 - if (IS_NOQUOTA(inode)) 1673 - return QUOTA_OK; 1619 + if (!sb_any_quota_active(inode->i_sb) || IS_NOQUOTA(inode)) 1620 + return; 1674 1621 1675 1622 down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); 1676 - /* Now recheck reliably when holding dqptr_sem */ 1677 - if (IS_NOQUOTA(inode)) { 1678 - up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); 1679 - return QUOTA_OK; 1680 - } 1681 1623 spin_lock(&dq_data_lock); 1682 1624 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 1683 1625 if (!inode->i_dquot[cnt]) 1684 1626 continue; 1685 - warntype[cnt] = info_idq_free(inode->i_dquot[cnt], number); 1686 - dquot_decr_inodes(inode->i_dquot[cnt], number); 1627 + warntype[cnt] = info_idq_free(inode->i_dquot[cnt], 1); 1628 + dquot_decr_inodes(inode->i_dquot[cnt], 1); 1687 1629 } 1688 1630 spin_unlock(&dq_data_lock); 1689 1631 mark_all_dquot_dirty(inode->i_dquot); 1690 1632 flush_warnings(inode->i_dquot, warntype); 1691 1633 up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); 1692 - return QUOTA_OK; 1693 1634 } 1694 1635 EXPORT_SYMBOL(dquot_free_inode); 1695 1636 ··· 1677 1662 * This operation can block, but only after everything is updated 1678 1663 * A transaction must be started when entering this function. 1679 1664 */ 1680 - int dquot_transfer(struct inode *inode, struct iattr *iattr) 1665 + static int __dquot_transfer(struct inode *inode, qid_t *chid, unsigned long mask) 1681 1666 { 1682 1667 qsize_t space, cur_space; 1683 1668 qsize_t rsv_space = 0; 1684 1669 struct dquot *transfer_from[MAXQUOTAS]; 1685 1670 struct dquot *transfer_to[MAXQUOTAS]; 1686 - int cnt, ret = QUOTA_OK; 1687 - int chuid = iattr->ia_valid & ATTR_UID && inode->i_uid != iattr->ia_uid, 1688 - chgid = iattr->ia_valid & ATTR_GID && inode->i_gid != iattr->ia_gid; 1671 + int cnt, ret = 0; 1689 1672 char warntype_to[MAXQUOTAS]; 1690 1673 char warntype_from_inodes[MAXQUOTAS], warntype_from_space[MAXQUOTAS]; 1691 1674 1692 1675 /* First test before acquiring mutex - solves deadlocks when we 1693 1676 * re-enter the quota code and are already holding the mutex */ 1694 1677 if (IS_NOQUOTA(inode)) 1695 - return QUOTA_OK; 1678 + return 0; 1696 1679 /* Initialize the arrays */ 1697 1680 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 1698 1681 transfer_from[cnt] = NULL; 1699 1682 transfer_to[cnt] = NULL; 1700 1683 warntype_to[cnt] = QUOTA_NL_NOWARN; 1701 1684 } 1702 - if (chuid) 1703 - transfer_to[USRQUOTA] = dqget(inode->i_sb, iattr->ia_uid, 1704 - USRQUOTA); 1705 - if (chgid) 1706 - transfer_to[GRPQUOTA] = dqget(inode->i_sb, iattr->ia_gid, 1707 - GRPQUOTA); 1708 - 1685 + for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 1686 + if (mask & (1 << cnt)) 1687 + transfer_to[cnt] = dqget(inode->i_sb, chid[cnt], cnt); 1688 + } 1709 1689 down_write(&sb_dqopt(inode->i_sb)->dqptr_sem); 1710 - /* Now recheck reliably when holding dqptr_sem */ 1711 1690 if (IS_NOQUOTA(inode)) { /* File without quota accounting? */ 1712 1691 up_write(&sb_dqopt(inode->i_sb)->dqptr_sem); 1713 1692 goto put_all; ··· 1715 1706 if (!transfer_to[cnt]) 1716 1707 continue; 1717 1708 transfer_from[cnt] = inode->i_dquot[cnt]; 1718 - if (check_idq(transfer_to[cnt], 1, warntype_to + cnt) == 1719 - NO_QUOTA || check_bdq(transfer_to[cnt], space, 0, 1720 - warntype_to + cnt) == NO_QUOTA) 1709 + ret = check_idq(transfer_to[cnt], 1, warntype_to + cnt); 1710 + if (ret) 1711 + goto over_quota; 1712 + ret = check_bdq(transfer_to[cnt], space, 0, warntype_to + cnt); 1713 + if (ret) 1721 1714 goto over_quota; 1722 1715 } 1723 1716 ··· 1773 1762 /* Clear dquot pointers we don't want to dqput() */ 1774 1763 for (cnt = 0; cnt < MAXQUOTAS; cnt++) 1775 1764 transfer_from[cnt] = NULL; 1776 - ret = NO_QUOTA; 1777 1765 goto warn_put_all; 1778 1766 } 1779 - EXPORT_SYMBOL(dquot_transfer); 1780 1767 1781 - /* Wrapper for transferring ownership of an inode */ 1782 - int vfs_dq_transfer(struct inode *inode, struct iattr *iattr) 1768 + /* Wrapper for transferring ownership of an inode for uid/gid only 1769 + * Called from FSXXX_setattr() 1770 + */ 1771 + int dquot_transfer(struct inode *inode, struct iattr *iattr) 1783 1772 { 1773 + qid_t chid[MAXQUOTAS]; 1774 + unsigned long mask = 0; 1775 + 1776 + if (iattr->ia_valid & ATTR_UID && iattr->ia_uid != inode->i_uid) { 1777 + mask |= 1 << USRQUOTA; 1778 + chid[USRQUOTA] = iattr->ia_uid; 1779 + } 1780 + if (iattr->ia_valid & ATTR_GID && iattr->ia_gid != inode->i_gid) { 1781 + mask |= 1 << GRPQUOTA; 1782 + chid[GRPQUOTA] = iattr->ia_gid; 1783 + } 1784 1784 if (sb_any_quota_active(inode->i_sb) && !IS_NOQUOTA(inode)) { 1785 - vfs_dq_init(inode); 1786 - if (inode->i_sb->dq_op->transfer(inode, iattr) == NO_QUOTA) 1787 - return 1; 1785 + dquot_initialize(inode); 1786 + return __dquot_transfer(inode, chid, mask); 1788 1787 } 1789 1788 return 0; 1790 1789 } 1791 - EXPORT_SYMBOL(vfs_dq_transfer); 1790 + EXPORT_SYMBOL(dquot_transfer); 1792 1791 1793 1792 /* 1794 1793 * Write info of quota file to disk ··· 1819 1798 * Definitions of diskquota operations. 1820 1799 */ 1821 1800 const struct dquot_operations dquot_operations = { 1822 - .initialize = dquot_initialize, 1823 - .drop = dquot_drop, 1824 - .alloc_space = dquot_alloc_space, 1825 - .alloc_inode = dquot_alloc_inode, 1826 - .free_space = dquot_free_space, 1827 - .free_inode = dquot_free_inode, 1828 - .transfer = dquot_transfer, 1829 1801 .write_dquot = dquot_commit, 1830 1802 .acquire_dquot = dquot_acquire, 1831 1803 .release_dquot = dquot_release, ··· 1827 1813 .alloc_dquot = dquot_alloc, 1828 1814 .destroy_dquot = dquot_destroy, 1829 1815 }; 1816 + 1817 + /* 1818 + * Generic helper for ->open on filesystems supporting disk quotas. 1819 + */ 1820 + int dquot_file_open(struct inode *inode, struct file *file) 1821 + { 1822 + int error; 1823 + 1824 + error = generic_file_open(inode, file); 1825 + if (!error && (file->f_mode & FMODE_WRITE)) 1826 + dquot_initialize(inode); 1827 + return error; 1828 + } 1829 + EXPORT_SYMBOL(dquot_file_open); 1830 1830 1831 1831 /* 1832 1832 * Turn quota off on a device. type == -1 ==> quotaoff for all types (umount) ··· 2021 1993 } 2022 1994 2023 1995 if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE)) { 2024 - /* As we bypass the pagecache we must now flush the inode so 2025 - * that we see all the changes from userspace... */ 2026 - write_inode_now(inode, 1); 2027 - /* And now flush the block cache so that kernel sees the 2028 - * changes */ 1996 + /* As we bypass the pagecache we must now flush all the 1997 + * dirty data and invalidate caches so that kernel sees 1998 + * changes from userspace. It is not enough to just flush 1999 + * the quota file since if blocksize < pagesize, invalidation 2000 + * of the cache could fail because of other unrelated dirty 2001 + * data */ 2002 + sync_filesystem(sb); 2029 2003 invalidate_bdev(sb->s_bdev); 2030 2004 } 2031 2005 mutex_lock(&dqopt->dqonoff_mutex); ··· 2040 2010 /* We don't want quota and atime on quota files (deadlocks 2041 2011 * possible) Also nobody should write to the file - we use 2042 2012 * special IO operations which ignore the immutable bit. */ 2043 - down_write(&dqopt->dqptr_sem); 2044 2013 mutex_lock_nested(&inode->i_mutex, I_MUTEX_QUOTA); 2045 2014 oldflags = inode->i_flags & (S_NOATIME | S_IMMUTABLE | 2046 2015 S_NOQUOTA); 2047 2016 inode->i_flags |= S_NOQUOTA | S_NOATIME | S_IMMUTABLE; 2048 2017 mutex_unlock(&inode->i_mutex); 2049 - up_write(&dqopt->dqptr_sem); 2050 - sb->dq_op->drop(inode); 2018 + /* 2019 + * When S_NOQUOTA is set, remove dquot references as no more 2020 + * references can be added 2021 + */ 2022 + __dquot_drop(inode); 2051 2023 } 2052 2024 2053 2025 error = -EIO; ··· 2085 2053 iput(inode); 2086 2054 out_lock: 2087 2055 if (oldflags != -1) { 2088 - down_write(&dqopt->dqptr_sem); 2089 2056 mutex_lock_nested(&inode->i_mutex, I_MUTEX_QUOTA); 2090 2057 /* Set the flags back (in the case of accidental quotaon() 2091 2058 * on a wrong file we don't want to mess up the flags) */ 2092 2059 inode->i_flags &= ~(S_NOATIME | S_NOQUOTA | S_IMMUTABLE); 2093 2060 inode->i_flags |= oldflags; 2094 2061 mutex_unlock(&inode->i_mutex); 2095 - up_write(&dqopt->dqptr_sem); 2096 2062 } 2097 2063 mutex_unlock(&dqopt->dqonoff_mutex); 2098 2064 out_fmt:
+95
fs/quota/netlink.c
··· 1 + 2 + #include <linux/cred.h> 3 + #include <linux/init.h> 4 + #include <linux/module.h> 5 + #include <linux/kernel.h> 6 + #include <linux/quotaops.h> 7 + #include <linux/sched.h> 8 + #include <net/netlink.h> 9 + #include <net/genetlink.h> 10 + 11 + /* Netlink family structure for quota */ 12 + static struct genl_family quota_genl_family = { 13 + .id = GENL_ID_GENERATE, 14 + .hdrsize = 0, 15 + .name = "VFS_DQUOT", 16 + .version = 1, 17 + .maxattr = QUOTA_NL_A_MAX, 18 + }; 19 + 20 + /** 21 + * quota_send_warning - Send warning to userspace about exceeded quota 22 + * @type: The quota type: USRQQUOTA, GRPQUOTA,... 23 + * @id: The user or group id of the quota that was exceeded 24 + * @dev: The device on which the fs is mounted (sb->s_dev) 25 + * @warntype: The type of the warning: QUOTA_NL_... 26 + * 27 + * This can be used by filesystems (including those which don't use 28 + * dquot) to send a message to userspace relating to quota limits. 29 + * 30 + */ 31 + 32 + void quota_send_warning(short type, unsigned int id, dev_t dev, 33 + const char warntype) 34 + { 35 + static atomic_t seq; 36 + struct sk_buff *skb; 37 + void *msg_head; 38 + int ret; 39 + int msg_size = 4 * nla_total_size(sizeof(u32)) + 40 + 2 * nla_total_size(sizeof(u64)); 41 + 42 + /* We have to allocate using GFP_NOFS as we are called from a 43 + * filesystem performing write and thus further recursion into 44 + * the fs to free some data could cause deadlocks. */ 45 + skb = genlmsg_new(msg_size, GFP_NOFS); 46 + if (!skb) { 47 + printk(KERN_ERR 48 + "VFS: Not enough memory to send quota warning.\n"); 49 + return; 50 + } 51 + msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq), 52 + &quota_genl_family, 0, QUOTA_NL_C_WARNING); 53 + if (!msg_head) { 54 + printk(KERN_ERR 55 + "VFS: Cannot store netlink header in quota warning.\n"); 56 + goto err_out; 57 + } 58 + ret = nla_put_u32(skb, QUOTA_NL_A_QTYPE, type); 59 + if (ret) 60 + goto attr_err_out; 61 + ret = nla_put_u64(skb, QUOTA_NL_A_EXCESS_ID, id); 62 + if (ret) 63 + goto attr_err_out; 64 + ret = nla_put_u32(skb, QUOTA_NL_A_WARNING, warntype); 65 + if (ret) 66 + goto attr_err_out; 67 + ret = nla_put_u32(skb, QUOTA_NL_A_DEV_MAJOR, MAJOR(dev)); 68 + if (ret) 69 + goto attr_err_out; 70 + ret = nla_put_u32(skb, QUOTA_NL_A_DEV_MINOR, MINOR(dev)); 71 + if (ret) 72 + goto attr_err_out; 73 + ret = nla_put_u64(skb, QUOTA_NL_A_CAUSED_ID, current_uid()); 74 + if (ret) 75 + goto attr_err_out; 76 + genlmsg_end(skb, msg_head); 77 + 78 + genlmsg_multicast(skb, 0, quota_genl_family.id, GFP_NOFS); 79 + return; 80 + attr_err_out: 81 + printk(KERN_ERR "VFS: Not enough space to compose quota message!\n"); 82 + err_out: 83 + kfree_skb(skb); 84 + } 85 + EXPORT_SYMBOL(quota_send_warning); 86 + 87 + static int __init quota_init(void) 88 + { 89 + if (genl_register_family(&quota_genl_family) != 0) 90 + printk(KERN_ERR 91 + "VFS: Failed to create quota netlink interface.\n"); 92 + return 0; 93 + }; 94 + 95 + module_init(quota_init);
+234 -513
fs/quota/quota.c
··· 10 10 #include <linux/slab.h> 11 11 #include <asm/current.h> 12 12 #include <asm/uaccess.h> 13 - #include <linux/compat.h> 14 13 #include <linux/kernel.h> 15 14 #include <linux/security.h> 16 15 #include <linux/syscalls.h> ··· 17 18 #include <linux/capability.h> 18 19 #include <linux/quotaops.h> 19 20 #include <linux/types.h> 20 - #include <net/netlink.h> 21 - #include <net/genetlink.h> 21 + #include <linux/writeback.h> 22 22 23 - /* Check validity of generic quotactl commands */ 24 - static int generic_quotactl_valid(struct super_block *sb, int type, int cmd, 25 - qid_t id) 23 + static int check_quotactl_permission(struct super_block *sb, int type, int cmd, 24 + qid_t id) 26 25 { 27 - if (type >= MAXQUOTAS) 28 - return -EINVAL; 29 - if (!sb && cmd != Q_SYNC) 30 - return -ENODEV; 31 - /* Is operation supported? */ 32 - if (sb && !sb->s_qcop) 33 - return -ENOSYS; 34 - 35 26 switch (cmd) { 36 - case Q_GETFMT: 27 + /* these commands do not require any special privilegues */ 28 + case Q_GETFMT: 29 + case Q_SYNC: 30 + case Q_GETINFO: 31 + case Q_XGETQSTAT: 32 + case Q_XQUOTASYNC: 33 + break; 34 + /* allow to query information for dquots we "own" */ 35 + case Q_GETQUOTA: 36 + case Q_XGETQUOTA: 37 + if ((type == USRQUOTA && current_euid() == id) || 38 + (type == GRPQUOTA && in_egroup_p(id))) 37 39 break; 38 - case Q_QUOTAON: 39 - if (!sb->s_qcop->quota_on) 40 - return -ENOSYS; 41 - break; 42 - case Q_QUOTAOFF: 43 - if (!sb->s_qcop->quota_off) 44 - return -ENOSYS; 45 - break; 46 - case Q_SETINFO: 47 - if (!sb->s_qcop->set_info) 48 - return -ENOSYS; 49 - break; 50 - case Q_GETINFO: 51 - if (!sb->s_qcop->get_info) 52 - return -ENOSYS; 53 - break; 54 - case Q_SETQUOTA: 55 - if (!sb->s_qcop->set_dqblk) 56 - return -ENOSYS; 57 - break; 58 - case Q_GETQUOTA: 59 - if (!sb->s_qcop->get_dqblk) 60 - return -ENOSYS; 61 - break; 62 - case Q_SYNC: 63 - if (sb && !sb->s_qcop->quota_sync) 64 - return -ENOSYS; 65 - break; 66 - default: 67 - return -EINVAL; 68 - } 69 - 70 - /* Is quota turned on for commands which need it? */ 71 - switch (cmd) { 72 - case Q_GETFMT: 73 - case Q_GETINFO: 74 - case Q_SETINFO: 75 - case Q_SETQUOTA: 76 - case Q_GETQUOTA: 77 - /* This is just an informative test so we are satisfied 78 - * without the lock */ 79 - if (!sb_has_quota_active(sb, type)) 80 - return -ESRCH; 81 - } 82 - 83 - /* Check privileges */ 84 - if (cmd == Q_GETQUOTA) { 85 - if (((type == USRQUOTA && current_euid() != id) || 86 - (type == GRPQUOTA && !in_egroup_p(id))) && 87 - !capable(CAP_SYS_ADMIN)) 88 - return -EPERM; 89 - } 90 - else if (cmd != Q_GETFMT && cmd != Q_SYNC && cmd != Q_GETINFO) 91 - if (!capable(CAP_SYS_ADMIN)) 92 - return -EPERM; 93 - 94 - return 0; 95 - } 96 - 97 - /* Check validity of XFS Quota Manager commands */ 98 - static int xqm_quotactl_valid(struct super_block *sb, int type, int cmd, 99 - qid_t id) 100 - { 101 - if (type >= XQM_MAXQUOTAS) 102 - return -EINVAL; 103 - if (!sb) 104 - return -ENODEV; 105 - if (!sb->s_qcop) 106 - return -ENOSYS; 107 - 108 - switch (cmd) { 109 - case Q_XQUOTAON: 110 - case Q_XQUOTAOFF: 111 - case Q_XQUOTARM: 112 - if (!sb->s_qcop->set_xstate) 113 - return -ENOSYS; 114 - break; 115 - case Q_XGETQSTAT: 116 - if (!sb->s_qcop->get_xstate) 117 - return -ENOSYS; 118 - break; 119 - case Q_XSETQLIM: 120 - if (!sb->s_qcop->set_xquota) 121 - return -ENOSYS; 122 - break; 123 - case Q_XGETQUOTA: 124 - if (!sb->s_qcop->get_xquota) 125 - return -ENOSYS; 126 - break; 127 - case Q_XQUOTASYNC: 128 - if (!sb->s_qcop->quota_sync) 129 - return -ENOSYS; 130 - break; 131 - default: 132 - return -EINVAL; 133 - } 134 - 135 - /* Check privileges */ 136 - if (cmd == Q_XGETQUOTA) { 137 - if (((type == XQM_USRQUOTA && current_euid() != id) || 138 - (type == XQM_GRPQUOTA && !in_egroup_p(id))) && 139 - !capable(CAP_SYS_ADMIN)) 140 - return -EPERM; 141 - } else if (cmd != Q_XGETQSTAT && cmd != Q_XQUOTASYNC) { 40 + /*FALLTHROUGH*/ 41 + default: 142 42 if (!capable(CAP_SYS_ADMIN)) 143 43 return -EPERM; 144 44 } 145 45 146 - return 0; 46 + return security_quotactl(cmd, type, id, sb); 147 47 } 148 48 149 - static int check_quotactl_valid(struct super_block *sb, int type, int cmd, 150 - qid_t id) 151 - { 152 - int error; 153 - 154 - if (XQM_COMMAND(cmd)) 155 - error = xqm_quotactl_valid(sb, type, cmd, id); 156 - else 157 - error = generic_quotactl_valid(sb, type, cmd, id); 158 - if (!error) 159 - error = security_quotactl(cmd, type, id, sb); 160 - return error; 161 - } 162 - 163 - #ifdef CONFIG_QUOTA 164 - void sync_quota_sb(struct super_block *sb, int type) 165 - { 166 - int cnt; 167 - 168 - if (!sb->s_qcop->quota_sync) 169 - return; 170 - 171 - sb->s_qcop->quota_sync(sb, type); 172 - 173 - if (sb_dqopt(sb)->flags & DQUOT_QUOTA_SYS_FILE) 174 - return; 175 - /* This is not very clever (and fast) but currently I don't know about 176 - * any other simple way of getting quota data to disk and we must get 177 - * them there for userspace to be visible... */ 178 - if (sb->s_op->sync_fs) 179 - sb->s_op->sync_fs(sb, 1); 180 - sync_blockdev(sb->s_bdev); 181 - 182 - /* 183 - * Now when everything is written we can discard the pagecache so 184 - * that userspace sees the changes. 185 - */ 186 - mutex_lock(&sb_dqopt(sb)->dqonoff_mutex); 187 - for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 188 - if (type != -1 && cnt != type) 189 - continue; 190 - if (!sb_has_quota_active(sb, cnt)) 191 - continue; 192 - mutex_lock_nested(&sb_dqopt(sb)->files[cnt]->i_mutex, 193 - I_MUTEX_QUOTA); 194 - truncate_inode_pages(&sb_dqopt(sb)->files[cnt]->i_data, 0); 195 - mutex_unlock(&sb_dqopt(sb)->files[cnt]->i_mutex); 196 - } 197 - mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); 198 - } 199 - #endif 200 - 201 - static void sync_dquots(int type) 49 + static int quota_sync_all(int type) 202 50 { 203 51 struct super_block *sb; 204 - int cnt; 52 + int ret; 53 + 54 + if (type >= MAXQUOTAS) 55 + return -EINVAL; 56 + ret = security_quotactl(Q_SYNC, type, 0, NULL); 57 + if (ret) 58 + return ret; 205 59 206 60 spin_lock(&sb_lock); 207 61 restart: 208 62 list_for_each_entry(sb, &super_blocks, s_list) { 209 - /* This test just improves performance so it needn't be 210 - * reliable... */ 211 - for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 212 - if (type != -1 && type != cnt) 213 - continue; 214 - if (!sb_has_quota_active(sb, cnt)) 215 - continue; 216 - if (!info_dirty(&sb_dqopt(sb)->info[cnt]) && 217 - list_empty(&sb_dqopt(sb)->info[cnt].dqi_dirty_list)) 218 - continue; 219 - break; 220 - } 221 - if (cnt == MAXQUOTAS) 63 + if (!sb->s_qcop || !sb->s_qcop->quota_sync) 222 64 continue; 65 + 223 66 sb->s_count++; 224 67 spin_unlock(&sb_lock); 225 68 down_read(&sb->s_umount); 226 69 if (sb->s_root) 227 - sync_quota_sb(sb, type); 70 + sb->s_qcop->quota_sync(sb, type, 1); 228 71 up_read(&sb->s_umount); 229 72 spin_lock(&sb_lock); 230 73 if (__put_super_and_need_restart(sb)) 231 74 goto restart; 232 75 } 233 76 spin_unlock(&sb_lock); 77 + 78 + return 0; 79 + } 80 + 81 + static int quota_quotaon(struct super_block *sb, int type, int cmd, qid_t id, 82 + void __user *addr) 83 + { 84 + char *pathname; 85 + int ret = -ENOSYS; 86 + 87 + pathname = getname(addr); 88 + if (IS_ERR(pathname)) 89 + return PTR_ERR(pathname); 90 + if (sb->s_qcop->quota_on) 91 + ret = sb->s_qcop->quota_on(sb, type, id, pathname, 0); 92 + putname(pathname); 93 + return ret; 94 + } 95 + 96 + static int quota_getfmt(struct super_block *sb, int type, void __user *addr) 97 + { 98 + __u32 fmt; 99 + 100 + down_read(&sb_dqopt(sb)->dqptr_sem); 101 + if (!sb_has_quota_active(sb, type)) { 102 + up_read(&sb_dqopt(sb)->dqptr_sem); 103 + return -ESRCH; 104 + } 105 + fmt = sb_dqopt(sb)->info[type].dqi_format->qf_fmt_id; 106 + up_read(&sb_dqopt(sb)->dqptr_sem); 107 + if (copy_to_user(addr, &fmt, sizeof(fmt))) 108 + return -EFAULT; 109 + return 0; 110 + } 111 + 112 + static int quota_getinfo(struct super_block *sb, int type, void __user *addr) 113 + { 114 + struct if_dqinfo info; 115 + int ret; 116 + 117 + if (!sb_has_quota_active(sb, type)) 118 + return -ESRCH; 119 + if (!sb->s_qcop->get_info) 120 + return -ENOSYS; 121 + ret = sb->s_qcop->get_info(sb, type, &info); 122 + if (!ret && copy_to_user(addr, &info, sizeof(info))) 123 + return -EFAULT; 124 + return ret; 125 + } 126 + 127 + static int quota_setinfo(struct super_block *sb, int type, void __user *addr) 128 + { 129 + struct if_dqinfo info; 130 + 131 + if (copy_from_user(&info, addr, sizeof(info))) 132 + return -EFAULT; 133 + if (!sb_has_quota_active(sb, type)) 134 + return -ESRCH; 135 + if (!sb->s_qcop->set_info) 136 + return -ENOSYS; 137 + return sb->s_qcop->set_info(sb, type, &info); 138 + } 139 + 140 + static int quota_getquota(struct super_block *sb, int type, qid_t id, 141 + void __user *addr) 142 + { 143 + struct if_dqblk idq; 144 + int ret; 145 + 146 + if (!sb_has_quota_active(sb, type)) 147 + return -ESRCH; 148 + if (!sb->s_qcop->get_dqblk) 149 + return -ENOSYS; 150 + ret = sb->s_qcop->get_dqblk(sb, type, id, &idq); 151 + if (ret) 152 + return ret; 153 + if (copy_to_user(addr, &idq, sizeof(idq))) 154 + return -EFAULT; 155 + return 0; 156 + } 157 + 158 + static int quota_setquota(struct super_block *sb, int type, qid_t id, 159 + void __user *addr) 160 + { 161 + struct if_dqblk idq; 162 + 163 + if (copy_from_user(&idq, addr, sizeof(idq))) 164 + return -EFAULT; 165 + if (!sb_has_quota_active(sb, type)) 166 + return -ESRCH; 167 + if (!sb->s_qcop->set_dqblk) 168 + return -ENOSYS; 169 + return sb->s_qcop->set_dqblk(sb, type, id, &idq); 170 + } 171 + 172 + static int quota_setxstate(struct super_block *sb, int cmd, void __user *addr) 173 + { 174 + __u32 flags; 175 + 176 + if (copy_from_user(&flags, addr, sizeof(flags))) 177 + return -EFAULT; 178 + if (!sb->s_qcop->set_xstate) 179 + return -ENOSYS; 180 + return sb->s_qcop->set_xstate(sb, flags, cmd); 181 + } 182 + 183 + static int quota_getxstate(struct super_block *sb, void __user *addr) 184 + { 185 + struct fs_quota_stat fqs; 186 + int ret; 187 + 188 + if (!sb->s_qcop->get_xstate) 189 + return -ENOSYS; 190 + ret = sb->s_qcop->get_xstate(sb, &fqs); 191 + if (!ret && copy_to_user(addr, &fqs, sizeof(fqs))) 192 + return -EFAULT; 193 + return ret; 194 + } 195 + 196 + static int quota_setxquota(struct super_block *sb, int type, qid_t id, 197 + void __user *addr) 198 + { 199 + struct fs_disk_quota fdq; 200 + 201 + if (copy_from_user(&fdq, addr, sizeof(fdq))) 202 + return -EFAULT; 203 + if (!sb->s_qcop->set_xquota) 204 + return -ENOSYS; 205 + return sb->s_qcop->set_xquota(sb, type, id, &fdq); 206 + } 207 + 208 + static int quota_getxquota(struct super_block *sb, int type, qid_t id, 209 + void __user *addr) 210 + { 211 + struct fs_disk_quota fdq; 212 + int ret; 213 + 214 + if (!sb->s_qcop->get_xquota) 215 + return -ENOSYS; 216 + ret = sb->s_qcop->get_xquota(sb, type, id, &fdq); 217 + if (!ret && copy_to_user(addr, &fdq, sizeof(fdq))) 218 + return -EFAULT; 219 + return ret; 234 220 } 235 221 236 222 /* Copy parameters and call proper function */ ··· 224 240 { 225 241 int ret; 226 242 243 + if (type >= (XQM_COMMAND(cmd) ? XQM_MAXQUOTAS : MAXQUOTAS)) 244 + return -EINVAL; 245 + if (!sb->s_qcop) 246 + return -ENOSYS; 247 + 248 + ret = check_quotactl_permission(sb, type, cmd, id); 249 + if (ret < 0) 250 + return ret; 251 + 227 252 switch (cmd) { 228 - case Q_QUOTAON: { 229 - char *pathname; 230 - 231 - pathname = getname(addr); 232 - if (IS_ERR(pathname)) 233 - return PTR_ERR(pathname); 234 - ret = sb->s_qcop->quota_on(sb, type, id, pathname, 0); 235 - putname(pathname); 236 - return ret; 237 - } 238 - case Q_QUOTAOFF: 239 - return sb->s_qcop->quota_off(sb, type, 0); 240 - 241 - case Q_GETFMT: { 242 - __u32 fmt; 243 - 244 - down_read(&sb_dqopt(sb)->dqptr_sem); 245 - if (!sb_has_quota_active(sb, type)) { 246 - up_read(&sb_dqopt(sb)->dqptr_sem); 247 - return -ESRCH; 248 - } 249 - fmt = sb_dqopt(sb)->info[type].dqi_format->qf_fmt_id; 250 - up_read(&sb_dqopt(sb)->dqptr_sem); 251 - if (copy_to_user(addr, &fmt, sizeof(fmt))) 252 - return -EFAULT; 253 - return 0; 254 - } 255 - case Q_GETINFO: { 256 - struct if_dqinfo info; 257 - 258 - ret = sb->s_qcop->get_info(sb, type, &info); 259 - if (ret) 260 - return ret; 261 - if (copy_to_user(addr, &info, sizeof(info))) 262 - return -EFAULT; 263 - return 0; 264 - } 265 - case Q_SETINFO: { 266 - struct if_dqinfo info; 267 - 268 - if (copy_from_user(&info, addr, sizeof(info))) 269 - return -EFAULT; 270 - return sb->s_qcop->set_info(sb, type, &info); 271 - } 272 - case Q_GETQUOTA: { 273 - struct if_dqblk idq; 274 - 275 - ret = sb->s_qcop->get_dqblk(sb, type, id, &idq); 276 - if (ret) 277 - return ret; 278 - if (copy_to_user(addr, &idq, sizeof(idq))) 279 - return -EFAULT; 280 - return 0; 281 - } 282 - case Q_SETQUOTA: { 283 - struct if_dqblk idq; 284 - 285 - if (copy_from_user(&idq, addr, sizeof(idq))) 286 - return -EFAULT; 287 - return sb->s_qcop->set_dqblk(sb, type, id, &idq); 288 - } 289 - case Q_SYNC: 290 - if (sb) 291 - sync_quota_sb(sb, type); 292 - else 293 - sync_dquots(type); 294 - return 0; 295 - 296 - case Q_XQUOTAON: 297 - case Q_XQUOTAOFF: 298 - case Q_XQUOTARM: { 299 - __u32 flags; 300 - 301 - if (copy_from_user(&flags, addr, sizeof(flags))) 302 - return -EFAULT; 303 - return sb->s_qcop->set_xstate(sb, flags, cmd); 304 - } 305 - case Q_XGETQSTAT: { 306 - struct fs_quota_stat fqs; 307 - 308 - if ((ret = sb->s_qcop->get_xstate(sb, &fqs))) 309 - return ret; 310 - if (copy_to_user(addr, &fqs, sizeof(fqs))) 311 - return -EFAULT; 312 - return 0; 313 - } 314 - case Q_XSETQLIM: { 315 - struct fs_disk_quota fdq; 316 - 317 - if (copy_from_user(&fdq, addr, sizeof(fdq))) 318 - return -EFAULT; 319 - return sb->s_qcop->set_xquota(sb, type, id, &fdq); 320 - } 321 - case Q_XGETQUOTA: { 322 - struct fs_disk_quota fdq; 323 - 324 - ret = sb->s_qcop->get_xquota(sb, type, id, &fdq); 325 - if (ret) 326 - return ret; 327 - if (copy_to_user(addr, &fdq, sizeof(fdq))) 328 - return -EFAULT; 329 - return 0; 330 - } 331 - case Q_XQUOTASYNC: 332 - return sb->s_qcop->quota_sync(sb, type); 333 - /* We never reach here unless validity check is broken */ 334 - default: 335 - BUG(); 253 + case Q_QUOTAON: 254 + return quota_quotaon(sb, type, cmd, id, addr); 255 + case Q_QUOTAOFF: 256 + if (!sb->s_qcop->quota_off) 257 + return -ENOSYS; 258 + return sb->s_qcop->quota_off(sb, type, 0); 259 + case Q_GETFMT: 260 + return quota_getfmt(sb, type, addr); 261 + case Q_GETINFO: 262 + return quota_getinfo(sb, type, addr); 263 + case Q_SETINFO: 264 + return quota_setinfo(sb, type, addr); 265 + case Q_GETQUOTA: 266 + return quota_getquota(sb, type, id, addr); 267 + case Q_SETQUOTA: 268 + return quota_setquota(sb, type, id, addr); 269 + case Q_SYNC: 270 + if (!sb->s_qcop->quota_sync) 271 + return -ENOSYS; 272 + return sb->s_qcop->quota_sync(sb, type, 1); 273 + case Q_XQUOTAON: 274 + case Q_XQUOTAOFF: 275 + case Q_XQUOTARM: 276 + return quota_setxstate(sb, cmd, addr); 277 + case Q_XGETQSTAT: 278 + return quota_getxstate(sb, addr); 279 + case Q_XSETQLIM: 280 + return quota_setxquota(sb, type, id, addr); 281 + case Q_XGETQUOTA: 282 + return quota_getxquota(sb, type, id, addr); 283 + case Q_XQUOTASYNC: 284 + /* caller already holds s_umount */ 285 + if (sb->s_flags & MS_RDONLY) 286 + return -EROFS; 287 + writeback_inodes_sb(sb); 288 + return 0; 289 + default: 290 + return -EINVAL; 336 291 } 337 - return 0; 338 292 } 339 293 340 294 /* ··· 319 397 cmds = cmd >> SUBCMDSHIFT; 320 398 type = cmd & SUBCMDMASK; 321 399 322 - if (cmds != Q_SYNC || special) { 323 - sb = quotactl_block(special); 324 - if (IS_ERR(sb)) 325 - return PTR_ERR(sb); 400 + /* 401 + * As a special case Q_SYNC can be called without a specific device. 402 + * It will iterate all superblocks that have quota enabled and call 403 + * the sync action on each of them. 404 + */ 405 + if (!special) { 406 + if (cmds == Q_SYNC) 407 + return quota_sync_all(type); 408 + return -ENODEV; 326 409 } 327 410 328 - ret = check_quotactl_valid(sb, type, cmds, id); 329 - if (ret >= 0) 330 - ret = do_quotactl(sb, type, cmds, id, addr); 331 - if (sb) 332 - drop_super(sb); 411 + sb = quotactl_block(special); 412 + if (IS_ERR(sb)) 413 + return PTR_ERR(sb); 333 414 415 + ret = do_quotactl(sb, type, cmds, id, addr); 416 + 417 + drop_super(sb); 334 418 return ret; 335 419 } 336 - 337 - #if defined(CONFIG_COMPAT_FOR_U64_ALIGNMENT) 338 - /* 339 - * This code works only for 32 bit quota tools over 64 bit OS (x86_64, ia64) 340 - * and is necessary due to alignment problems. 341 - */ 342 - struct compat_if_dqblk { 343 - compat_u64 dqb_bhardlimit; 344 - compat_u64 dqb_bsoftlimit; 345 - compat_u64 dqb_curspace; 346 - compat_u64 dqb_ihardlimit; 347 - compat_u64 dqb_isoftlimit; 348 - compat_u64 dqb_curinodes; 349 - compat_u64 dqb_btime; 350 - compat_u64 dqb_itime; 351 - compat_uint_t dqb_valid; 352 - }; 353 - 354 - /* XFS structures */ 355 - struct compat_fs_qfilestat { 356 - compat_u64 dqb_bhardlimit; 357 - compat_u64 qfs_nblks; 358 - compat_uint_t qfs_nextents; 359 - }; 360 - 361 - struct compat_fs_quota_stat { 362 - __s8 qs_version; 363 - __u16 qs_flags; 364 - __s8 qs_pad; 365 - struct compat_fs_qfilestat qs_uquota; 366 - struct compat_fs_qfilestat qs_gquota; 367 - compat_uint_t qs_incoredqs; 368 - compat_int_t qs_btimelimit; 369 - compat_int_t qs_itimelimit; 370 - compat_int_t qs_rtbtimelimit; 371 - __u16 qs_bwarnlimit; 372 - __u16 qs_iwarnlimit; 373 - }; 374 - 375 - asmlinkage long sys32_quotactl(unsigned int cmd, const char __user *special, 376 - qid_t id, void __user *addr) 377 - { 378 - unsigned int cmds; 379 - struct if_dqblk __user *dqblk; 380 - struct compat_if_dqblk __user *compat_dqblk; 381 - struct fs_quota_stat __user *fsqstat; 382 - struct compat_fs_quota_stat __user *compat_fsqstat; 383 - compat_uint_t data; 384 - u16 xdata; 385 - long ret; 386 - 387 - cmds = cmd >> SUBCMDSHIFT; 388 - 389 - switch (cmds) { 390 - case Q_GETQUOTA: 391 - dqblk = compat_alloc_user_space(sizeof(struct if_dqblk)); 392 - compat_dqblk = addr; 393 - ret = sys_quotactl(cmd, special, id, dqblk); 394 - if (ret) 395 - break; 396 - if (copy_in_user(compat_dqblk, dqblk, sizeof(*compat_dqblk)) || 397 - get_user(data, &dqblk->dqb_valid) || 398 - put_user(data, &compat_dqblk->dqb_valid)) 399 - ret = -EFAULT; 400 - break; 401 - case Q_SETQUOTA: 402 - dqblk = compat_alloc_user_space(sizeof(struct if_dqblk)); 403 - compat_dqblk = addr; 404 - ret = -EFAULT; 405 - if (copy_in_user(dqblk, compat_dqblk, sizeof(*compat_dqblk)) || 406 - get_user(data, &compat_dqblk->dqb_valid) || 407 - put_user(data, &dqblk->dqb_valid)) 408 - break; 409 - ret = sys_quotactl(cmd, special, id, dqblk); 410 - break; 411 - case Q_XGETQSTAT: 412 - fsqstat = compat_alloc_user_space(sizeof(struct fs_quota_stat)); 413 - compat_fsqstat = addr; 414 - ret = sys_quotactl(cmd, special, id, fsqstat); 415 - if (ret) 416 - break; 417 - ret = -EFAULT; 418 - /* Copying qs_version, qs_flags, qs_pad */ 419 - if (copy_in_user(compat_fsqstat, fsqstat, 420 - offsetof(struct compat_fs_quota_stat, qs_uquota))) 421 - break; 422 - /* Copying qs_uquota */ 423 - if (copy_in_user(&compat_fsqstat->qs_uquota, 424 - &fsqstat->qs_uquota, 425 - sizeof(compat_fsqstat->qs_uquota)) || 426 - get_user(data, &fsqstat->qs_uquota.qfs_nextents) || 427 - put_user(data, &compat_fsqstat->qs_uquota.qfs_nextents)) 428 - break; 429 - /* Copying qs_gquota */ 430 - if (copy_in_user(&compat_fsqstat->qs_gquota, 431 - &fsqstat->qs_gquota, 432 - sizeof(compat_fsqstat->qs_gquota)) || 433 - get_user(data, &fsqstat->qs_gquota.qfs_nextents) || 434 - put_user(data, &compat_fsqstat->qs_gquota.qfs_nextents)) 435 - break; 436 - /* Copying the rest */ 437 - if (copy_in_user(&compat_fsqstat->qs_incoredqs, 438 - &fsqstat->qs_incoredqs, 439 - sizeof(struct compat_fs_quota_stat) - 440 - offsetof(struct compat_fs_quota_stat, qs_incoredqs)) || 441 - get_user(xdata, &fsqstat->qs_iwarnlimit) || 442 - put_user(xdata, &compat_fsqstat->qs_iwarnlimit)) 443 - break; 444 - ret = 0; 445 - break; 446 - default: 447 - ret = sys_quotactl(cmd, special, id, addr); 448 - } 449 - return ret; 450 - } 451 - #endif 452 - 453 - 454 - #ifdef CONFIG_QUOTA_NETLINK_INTERFACE 455 - 456 - /* Netlink family structure for quota */ 457 - static struct genl_family quota_genl_family = { 458 - .id = GENL_ID_GENERATE, 459 - .hdrsize = 0, 460 - .name = "VFS_DQUOT", 461 - .version = 1, 462 - .maxattr = QUOTA_NL_A_MAX, 463 - }; 464 - 465 - /** 466 - * quota_send_warning - Send warning to userspace about exceeded quota 467 - * @type: The quota type: USRQQUOTA, GRPQUOTA,... 468 - * @id: The user or group id of the quota that was exceeded 469 - * @dev: The device on which the fs is mounted (sb->s_dev) 470 - * @warntype: The type of the warning: QUOTA_NL_... 471 - * 472 - * This can be used by filesystems (including those which don't use 473 - * dquot) to send a message to userspace relating to quota limits. 474 - * 475 - */ 476 - 477 - void quota_send_warning(short type, unsigned int id, dev_t dev, 478 - const char warntype) 479 - { 480 - static atomic_t seq; 481 - struct sk_buff *skb; 482 - void *msg_head; 483 - int ret; 484 - int msg_size = 4 * nla_total_size(sizeof(u32)) + 485 - 2 * nla_total_size(sizeof(u64)); 486 - 487 - /* We have to allocate using GFP_NOFS as we are called from a 488 - * filesystem performing write and thus further recursion into 489 - * the fs to free some data could cause deadlocks. */ 490 - skb = genlmsg_new(msg_size, GFP_NOFS); 491 - if (!skb) { 492 - printk(KERN_ERR 493 - "VFS: Not enough memory to send quota warning.\n"); 494 - return; 495 - } 496 - msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq), 497 - &quota_genl_family, 0, QUOTA_NL_C_WARNING); 498 - if (!msg_head) { 499 - printk(KERN_ERR 500 - "VFS: Cannot store netlink header in quota warning.\n"); 501 - goto err_out; 502 - } 503 - ret = nla_put_u32(skb, QUOTA_NL_A_QTYPE, type); 504 - if (ret) 505 - goto attr_err_out; 506 - ret = nla_put_u64(skb, QUOTA_NL_A_EXCESS_ID, id); 507 - if (ret) 508 - goto attr_err_out; 509 - ret = nla_put_u32(skb, QUOTA_NL_A_WARNING, warntype); 510 - if (ret) 511 - goto attr_err_out; 512 - ret = nla_put_u32(skb, QUOTA_NL_A_DEV_MAJOR, MAJOR(dev)); 513 - if (ret) 514 - goto attr_err_out; 515 - ret = nla_put_u32(skb, QUOTA_NL_A_DEV_MINOR, MINOR(dev)); 516 - if (ret) 517 - goto attr_err_out; 518 - ret = nla_put_u64(skb, QUOTA_NL_A_CAUSED_ID, current_uid()); 519 - if (ret) 520 - goto attr_err_out; 521 - genlmsg_end(skb, msg_head); 522 - 523 - genlmsg_multicast(skb, 0, quota_genl_family.id, GFP_NOFS); 524 - return; 525 - attr_err_out: 526 - printk(KERN_ERR "VFS: Not enough space to compose quota message!\n"); 527 - err_out: 528 - kfree_skb(skb); 529 - } 530 - EXPORT_SYMBOL(quota_send_warning); 531 - 532 - static int __init quota_init(void) 533 - { 534 - if (genl_register_family(&quota_genl_family) != 0) 535 - printk(KERN_ERR 536 - "VFS: Failed to create quota netlink interface.\n"); 537 - return 0; 538 - }; 539 - 540 - module_init(quota_init); 541 - #endif 542 -
+5 -5
fs/reiserfs/bitmap.c
··· 425 425 426 426 journal_mark_dirty(th, s, sbh); 427 427 if (for_unformatted) 428 - vfs_dq_free_block_nodirty(inode, 1); 428 + dquot_free_block_nodirty(inode, 1); 429 429 } 430 430 431 431 void reiserfs_free_block(struct reiserfs_transaction_handle *th, ··· 1049 1049 amount_needed, hint->inode->i_uid); 1050 1050 #endif 1051 1051 quota_ret = 1052 - vfs_dq_alloc_block_nodirty(hint->inode, amount_needed); 1052 + dquot_alloc_block_nodirty(hint->inode, amount_needed); 1053 1053 if (quota_ret) /* Quota exceeded? */ 1054 1054 return QUOTA_EXCEEDED; 1055 1055 if (hint->preallocate && hint->prealloc_size) { ··· 1058 1058 "reiserquota: allocating (prealloc) %d blocks id=%u", 1059 1059 hint->prealloc_size, hint->inode->i_uid); 1060 1060 #endif 1061 - quota_ret = vfs_dq_prealloc_block_nodirty(hint->inode, 1061 + quota_ret = dquot_prealloc_block_nodirty(hint->inode, 1062 1062 hint->prealloc_size); 1063 1063 if (quota_ret) 1064 1064 hint->preallocate = hint->prealloc_size = 0; ··· 1092 1092 hint->inode->i_uid); 1093 1093 #endif 1094 1094 /* Free not allocated blocks */ 1095 - vfs_dq_free_block_nodirty(hint->inode, 1095 + dquot_free_block_nodirty(hint->inode, 1096 1096 amount_needed + hint->prealloc_size - 1097 1097 nr_allocated); 1098 1098 } ··· 1125 1125 REISERFS_I(hint->inode)->i_prealloc_count, 1126 1126 hint->inode->i_uid); 1127 1127 #endif 1128 - vfs_dq_free_block_nodirty(hint->inode, amount_needed + 1128 + dquot_free_block_nodirty(hint->inode, amount_needed + 1129 1129 hint->prealloc_size - nr_allocated - 1130 1130 REISERFS_I(hint->inode)-> 1131 1131 i_prealloc_count);
+1 -1
fs/reiserfs/file.c
··· 289 289 .compat_ioctl = reiserfs_compat_ioctl, 290 290 #endif 291 291 .mmap = reiserfs_file_mmap, 292 - .open = generic_file_open, 292 + .open = dquot_file_open, 293 293 .release = reiserfs_file_release, 294 294 .fsync = reiserfs_sync_file, 295 295 .aio_read = generic_file_aio_read,
+12 -8
fs/reiserfs/inode.c
··· 34 34 int depth; 35 35 int err; 36 36 37 + if (!is_bad_inode(inode)) 38 + dquot_initialize(inode); 39 + 37 40 truncate_inode_pages(&inode->i_data, 0); 38 41 39 42 depth = reiserfs_write_lock_once(inode->i_sb); ··· 57 54 * after delete_object so that quota updates go into the same transaction as 58 55 * stat data deletion */ 59 56 if (!err) 60 - vfs_dq_free_inode(inode); 57 + dquot_free_inode(inode); 61 58 62 59 if (journal_end(&th, inode->i_sb, jbegin_count)) 63 60 goto out; ··· 1768 1765 1769 1766 BUG_ON(!th->t_trans_id); 1770 1767 1771 - if (vfs_dq_alloc_inode(inode)) { 1772 - err = -EDQUOT; 1768 + dquot_initialize(inode); 1769 + err = dquot_alloc_inode(inode); 1770 + if (err) 1773 1771 goto out_end_trans; 1774 - } 1775 1772 if (!dir->i_nlink) { 1776 1773 err = -EPERM; 1777 1774 goto out_bad_inode; ··· 1962 1959 INODE_PKEY(inode)->k_objectid = 0; 1963 1960 1964 1961 /* Quota change must be inside a transaction for journaling */ 1965 - vfs_dq_free_inode(inode); 1962 + dquot_free_inode(inode); 1966 1963 1967 1964 out_end_trans: 1968 1965 journal_end(th, th->t_super, th->t_blocks_allocated); 1969 1966 /* Drop can be outside and it needs more credits so it's better to have it outside */ 1970 - vfs_dq_drop(inode); 1967 + dquot_drop(inode); 1971 1968 inode->i_flags |= S_NOQUOTA; 1972 1969 make_bad_inode(inode); 1973 1970 ··· 3076 3073 3077 3074 depth = reiserfs_write_lock_once(inode->i_sb); 3078 3075 if (attr->ia_valid & ATTR_SIZE) { 3076 + dquot_initialize(inode); 3077 + 3079 3078 /* version 2 items will be caught by the s_maxbytes check 3080 3079 ** done for us in vmtruncate 3081 3080 */ ··· 3139 3134 jbegin_count); 3140 3135 if (error) 3141 3136 goto out; 3142 - error = 3143 - vfs_dq_transfer(inode, attr) ? -EDQUOT : 0; 3137 + error = dquot_transfer(inode, attr); 3144 3138 if (error) { 3145 3139 journal_end(&th, inode->i_sb, 3146 3140 jbegin_count);
+20 -3
fs/reiserfs/namei.c
··· 546 546 */ 547 547 static int drop_new_inode(struct inode *inode) 548 548 { 549 - vfs_dq_drop(inode); 549 + dquot_drop(inode); 550 550 make_bad_inode(inode); 551 551 inode->i_flags |= S_NOQUOTA; 552 552 iput(inode); ··· 554 554 } 555 555 556 556 /* utility function that does setup for reiserfs_new_inode. 557 - ** vfs_dq_init needs lots of credits so it's better to have it 557 + ** dquot_initialize needs lots of credits so it's better to have it 558 558 ** outside of a transaction, so we had to pull some bits of 559 559 ** reiserfs_new_inode out into this func. 560 560 */ ··· 577 577 } else { 578 578 inode->i_gid = current_fsgid(); 579 579 } 580 - vfs_dq_init(inode); 580 + dquot_initialize(inode); 581 581 return 0; 582 582 } 583 583 ··· 593 593 REISERFS_QUOTA_TRANS_BLOCKS(dir->i_sb)); 594 594 struct reiserfs_transaction_handle th; 595 595 struct reiserfs_security_handle security; 596 + 597 + dquot_initialize(dir); 596 598 597 599 if (!(inode = new_inode(dir->i_sb))) { 598 600 return -ENOMEM; ··· 668 666 if (!new_valid_dev(rdev)) 669 667 return -EINVAL; 670 668 669 + dquot_initialize(dir); 670 + 671 671 if (!(inode = new_inode(dir->i_sb))) { 672 672 return -ENOMEM; 673 673 } ··· 742 738 JOURNAL_PER_BALANCE_CNT * 3 + 743 739 2 * (REISERFS_QUOTA_INIT_BLOCKS(dir->i_sb) + 744 740 REISERFS_QUOTA_TRANS_BLOCKS(dir->i_sb)); 741 + 742 + dquot_initialize(dir); 745 743 746 744 #ifdef DISPLACE_NEW_PACKING_LOCALITIES 747 745 /* set flag that new packing locality created and new blocks for the content * of that directory are not displaced yet */ ··· 848 842 JOURNAL_PER_BALANCE_CNT * 2 + 2 + 849 843 4 * REISERFS_QUOTA_TRANS_BLOCKS(dir->i_sb); 850 844 845 + dquot_initialize(dir); 846 + 851 847 reiserfs_write_lock(dir->i_sb); 852 848 retval = journal_begin(&th, dir->i_sb, jbegin_count); 853 849 if (retval) ··· 930 922 int jbegin_count; 931 923 unsigned long savelink; 932 924 int depth; 925 + 926 + dquot_initialize(dir); 933 927 934 928 inode = dentry->d_inode; 935 929 ··· 1034 1024 2 * (REISERFS_QUOTA_INIT_BLOCKS(parent_dir->i_sb) + 1035 1025 REISERFS_QUOTA_TRANS_BLOCKS(parent_dir->i_sb)); 1036 1026 1027 + dquot_initialize(parent_dir); 1028 + 1037 1029 if (!(inode = new_inode(parent_dir->i_sb))) { 1038 1030 return -ENOMEM; 1039 1031 } ··· 1122 1110 int jbegin_count = 1123 1111 JOURNAL_PER_BALANCE_CNT * 3 + 1124 1112 2 * REISERFS_QUOTA_TRANS_BLOCKS(dir->i_sb); 1113 + 1114 + dquot_initialize(dir); 1125 1115 1126 1116 reiserfs_write_lock(dir->i_sb); 1127 1117 if (inode->i_nlink >= REISERFS_LINK_MAX) { ··· 1248 1234 jbegin_count = 1249 1235 JOURNAL_PER_BALANCE_CNT * 3 + 5 + 1250 1236 4 * REISERFS_QUOTA_TRANS_BLOCKS(old_dir->i_sb); 1237 + 1238 + dquot_initialize(old_dir); 1239 + dquot_initialize(new_dir); 1251 1240 1252 1241 old_inode = old_dentry->d_inode; 1253 1242 new_dentry_inode = new_dentry->d_inode;
+11 -9
fs/reiserfs/stree.c
··· 1299 1299 "reiserquota delete_item(): freeing %u, id=%u type=%c", 1300 1300 quota_cut_bytes, inode->i_uid, head2type(&s_ih)); 1301 1301 #endif 1302 - vfs_dq_free_space_nodirty(inode, quota_cut_bytes); 1302 + dquot_free_space_nodirty(inode, quota_cut_bytes); 1303 1303 1304 1304 /* Return deleted body length */ 1305 1305 return ret_value; ··· 1383 1383 quota_cut_bytes, inode->i_uid, 1384 1384 key2type(key)); 1385 1385 #endif 1386 - vfs_dq_free_space_nodirty(inode, 1386 + dquot_free_space_nodirty(inode, 1387 1387 quota_cut_bytes); 1388 1388 } 1389 1389 break; ··· 1733 1733 "reiserquota cut_from_item(): freeing %u id=%u type=%c", 1734 1734 quota_cut_bytes, inode->i_uid, '?'); 1735 1735 #endif 1736 - vfs_dq_free_space_nodirty(inode, quota_cut_bytes); 1736 + dquot_free_space_nodirty(inode, quota_cut_bytes); 1737 1737 return ret_value; 1738 1738 } 1739 1739 ··· 1968 1968 key2type(&(key->on_disk_key))); 1969 1969 #endif 1970 1970 1971 - if (vfs_dq_alloc_space_nodirty(inode, pasted_size)) { 1971 + retval = dquot_alloc_space_nodirty(inode, pasted_size); 1972 + if (retval) { 1972 1973 pathrelse(search_path); 1973 - return -EDQUOT; 1974 + return retval; 1974 1975 } 1975 1976 init_tb_struct(th, &s_paste_balance, th->t_super, search_path, 1976 1977 pasted_size); ··· 2025 2024 pasted_size, inode->i_uid, 2026 2025 key2type(&(key->on_disk_key))); 2027 2026 #endif 2028 - vfs_dq_free_space_nodirty(inode, pasted_size); 2027 + dquot_free_space_nodirty(inode, pasted_size); 2029 2028 return retval; 2030 2029 } 2031 2030 ··· 2063 2062 #endif 2064 2063 /* We can't dirty inode here. It would be immediately written but 2065 2064 * appropriate stat item isn't inserted yet... */ 2066 - if (vfs_dq_alloc_space_nodirty(inode, quota_bytes)) { 2065 + retval = dquot_alloc_space_nodirty(inode, quota_bytes); 2066 + if (retval) { 2067 2067 pathrelse(path); 2068 - return -EDQUOT; 2068 + return retval; 2069 2069 } 2070 2070 } 2071 2071 init_tb_struct(th, &s_ins_balance, th->t_super, path, ··· 2115 2113 quota_bytes, inode->i_uid, head2type(ih)); 2116 2114 #endif 2117 2115 if (inode) 2118 - vfs_dq_free_space_nodirty(inode, quota_bytes); 2116 + dquot_free_space_nodirty(inode, quota_bytes); 2119 2117 return retval; 2120 2118 }
+7 -8
fs/reiserfs/super.c
··· 246 246 retval = remove_save_link_only(s, &save_link_key, 0); 247 247 continue; 248 248 } 249 - vfs_dq_init(inode); 249 + dquot_initialize(inode); 250 250 251 251 if (truncate && S_ISDIR(inode->i_mode)) { 252 252 /* We got a truncate request for a dir which is impossible. ··· 578 578 reiserfs_write_unlock_once(inode->i_sb, lock_depth); 579 579 } 580 580 581 + static void reiserfs_clear_inode(struct inode *inode) 582 + { 583 + dquot_drop(inode); 584 + } 585 + 581 586 #ifdef CONFIG_QUOTA 582 587 static ssize_t reiserfs_quota_write(struct super_block *, int, const char *, 583 588 size_t, loff_t); ··· 595 590 .destroy_inode = reiserfs_destroy_inode, 596 591 .write_inode = reiserfs_write_inode, 597 592 .dirty_inode = reiserfs_dirty_inode, 593 + .clear_inode = reiserfs_clear_inode, 598 594 .delete_inode = reiserfs_delete_inode, 599 595 .put_super = reiserfs_put_super, 600 596 .write_super = reiserfs_write_super, ··· 622 616 static int reiserfs_quota_on(struct super_block *, int, int, char *, int); 623 617 624 618 static const struct dquot_operations reiserfs_quota_operations = { 625 - .initialize = dquot_initialize, 626 - .drop = dquot_drop, 627 - .alloc_space = dquot_alloc_space, 628 - .alloc_inode = dquot_alloc_inode, 629 - .free_space = dquot_free_space, 630 - .free_inode = dquot_free_inode, 631 - .transfer = dquot_transfer, 632 619 .write_dquot = reiserfs_write_dquot, 633 620 .acquire_dquot = reiserfs_acquire_dquot, 634 621 .release_dquot = reiserfs_release_dquot,
-4
fs/reiserfs/xattr.c
··· 61 61 static int xattr_create(struct inode *dir, struct dentry *dentry, int mode) 62 62 { 63 63 BUG_ON(!mutex_is_locked(&dir->i_mutex)); 64 - vfs_dq_init(dir); 65 64 return dir->i_op->create(dir, dentry, mode, NULL); 66 65 } 67 66 #endif ··· 68 69 static int xattr_mkdir(struct inode *dir, struct dentry *dentry, int mode) 69 70 { 70 71 BUG_ON(!mutex_is_locked(&dir->i_mutex)); 71 - vfs_dq_init(dir); 72 72 return dir->i_op->mkdir(dir, dentry, mode); 73 73 } 74 74 ··· 79 81 { 80 82 int error; 81 83 BUG_ON(!mutex_is_locked(&dir->i_mutex)); 82 - vfs_dq_init(dir); 83 84 84 85 reiserfs_mutex_lock_nested_safe(&dentry->d_inode->i_mutex, 85 86 I_MUTEX_CHILD, dir->i_sb); ··· 94 97 { 95 98 int error; 96 99 BUG_ON(!mutex_is_locked(&dir->i_mutex)); 97 - vfs_dq_init(dir); 98 100 99 101 reiserfs_mutex_lock_nested_safe(&dentry->d_inode->i_mutex, 100 102 I_MUTEX_CHILD, dir->i_sb);
+7 -7
fs/sync.c
··· 34 34 if (!sb->s_bdi) 35 35 return 0; 36 36 37 - /* Avoid doing twice syncing and cache pruning for quota sync */ 38 - if (!wait) { 39 - writeout_quota_sb(sb, -1); 40 - writeback_inodes_sb(sb); 41 - } else { 42 - sync_quota_sb(sb, -1); 37 + if (sb->s_qcop && sb->s_qcop->quota_sync) 38 + sb->s_qcop->quota_sync(sb, -1, wait); 39 + 40 + if (wait) 43 41 sync_inodes_sb(sb); 44 - } 42 + else 43 + writeback_inodes_sb(sb); 44 + 45 45 if (sb->s_op->sync_fs) 46 46 sb->s_op->sync_fs(sb, wait); 47 47 return __sync_blockdev(sb->s_bdev, wait);
+20 -15
fs/udf/balloc.c
··· 208 208 ((char *)bh->b_data)[(bit + i) >> 3]); 209 209 } else { 210 210 if (inode) 211 - vfs_dq_free_block(inode, 1); 211 + dquot_free_block(inode, 1); 212 212 udf_add_free_space(sb, sbi->s_partition, 1); 213 213 } 214 214 } ··· 260 260 while (bit < (sb->s_blocksize << 3) && block_count > 0) { 261 261 if (!udf_test_bit(bit, bh->b_data)) 262 262 goto out; 263 - else if (vfs_dq_prealloc_block(inode, 1)) 263 + else if (dquot_prealloc_block(inode, 1)) 264 264 goto out; 265 265 else if (!udf_clear_bit(bit, bh->b_data)) { 266 266 udf_debug("bit already cleared for block %d\n", bit); 267 - vfs_dq_free_block(inode, 1); 267 + dquot_free_block(inode, 1); 268 268 goto out; 269 269 } 270 270 block_count--; ··· 390 390 /* 391 391 * Check quota for allocation of this block. 392 392 */ 393 - if (inode && vfs_dq_alloc_block(inode, 1)) { 394 - mutex_unlock(&sbi->s_alloc_mutex); 395 - *err = -EDQUOT; 396 - return 0; 393 + if (inode) { 394 + int ret = dquot_alloc_block(inode, 1); 395 + 396 + if (ret) { 397 + mutex_unlock(&sbi->s_alloc_mutex); 398 + *err = ret; 399 + return 0; 400 + } 397 401 } 398 402 399 403 newblock = bit + (block_group << (sb->s_blocksize_bits + 3)) - ··· 453 449 /* We do this up front - There are some error conditions that 454 450 could occure, but.. oh well */ 455 451 if (inode) 456 - vfs_dq_free_block(inode, count); 452 + dquot_free_block(inode, count); 457 453 udf_add_free_space(sb, sbi->s_partition, count); 458 454 459 455 start = bloc->logicalBlockNum + offset; ··· 698 694 epos.offset -= adsize; 699 695 700 696 alloc_count = (elen >> sb->s_blocksize_bits); 701 - if (inode && vfs_dq_prealloc_block(inode, 697 + if (inode && dquot_prealloc_block(inode, 702 698 alloc_count > block_count ? block_count : alloc_count)) 703 699 alloc_count = 0; 704 700 else if (alloc_count > block_count) { ··· 801 797 newblock = goal_eloc.logicalBlockNum; 802 798 goal_eloc.logicalBlockNum++; 803 799 goal_elen -= sb->s_blocksize; 804 - 805 - if (inode && vfs_dq_alloc_block(inode, 1)) { 806 - brelse(goal_epos.bh); 807 - mutex_unlock(&sbi->s_alloc_mutex); 808 - *err = -EDQUOT; 809 - return 0; 800 + if (inode) { 801 + *err = dquot_alloc_block(inode, 1); 802 + if (*err) { 803 + brelse(goal_epos.bh); 804 + mutex_unlock(&sbi->s_alloc_mutex); 805 + return 0; 806 + } 810 807 } 811 808 812 809 if (goal_elen)
+26 -2
fs/udf/file.c
··· 34 34 #include <linux/errno.h> 35 35 #include <linux/smp_lock.h> 36 36 #include <linux/pagemap.h> 37 + #include <linux/quotaops.h> 37 38 #include <linux/buffer_head.h> 38 39 #include <linux/aio.h> 39 40 ··· 208 207 .read = do_sync_read, 209 208 .aio_read = generic_file_aio_read, 210 209 .ioctl = udf_ioctl, 211 - .open = generic_file_open, 210 + .open = dquot_file_open, 212 211 .mmap = generic_file_mmap, 213 212 .write = do_sync_write, 214 213 .aio_write = udf_file_aio_write, ··· 218 217 .llseek = generic_file_llseek, 219 218 }; 220 219 220 + static int udf_setattr(struct dentry *dentry, struct iattr *iattr) 221 + { 222 + struct inode *inode = dentry->d_inode; 223 + int error; 224 + 225 + error = inode_change_ok(inode, iattr); 226 + if (error) 227 + return error; 228 + 229 + if (iattr->ia_valid & ATTR_SIZE) 230 + dquot_initialize(inode); 231 + 232 + if ((iattr->ia_valid & ATTR_UID && iattr->ia_uid != inode->i_uid) || 233 + (iattr->ia_valid & ATTR_GID && iattr->ia_gid != inode->i_gid)) { 234 + error = dquot_transfer(inode, iattr); 235 + if (error) 236 + return error; 237 + } 238 + 239 + return inode_setattr(inode, iattr); 240 + } 241 + 221 242 const struct inode_operations udf_file_inode_operations = { 222 - .truncate = udf_truncate, 243 + .truncate = udf_truncate, 244 + .setattr = udf_setattr, 223 245 };
+8 -6
fs/udf/ialloc.c
··· 36 36 * Note: we must free any quota before locking the superblock, 37 37 * as writing the quota to disk may need the lock as well. 38 38 */ 39 - vfs_dq_free_inode(inode); 40 - vfs_dq_drop(inode); 39 + dquot_free_inode(inode); 40 + dquot_drop(inode); 41 41 42 42 clear_inode(inode); 43 43 ··· 61 61 struct super_block *sb = dir->i_sb; 62 62 struct udf_sb_info *sbi = UDF_SB(sb); 63 63 struct inode *inode; 64 - int block; 64 + int block, ret; 65 65 uint32_t start = UDF_I(dir)->i_location.logicalBlockNum; 66 66 struct udf_inode_info *iinfo; 67 67 struct udf_inode_info *dinfo = UDF_I(dir); ··· 153 153 insert_inode_hash(inode); 154 154 mark_inode_dirty(inode); 155 155 156 - if (vfs_dq_alloc_inode(inode)) { 157 - vfs_dq_drop(inode); 156 + dquot_initialize(inode); 157 + ret = dquot_alloc_inode(inode); 158 + if (ret) { 159 + dquot_drop(inode); 158 160 inode->i_flags |= S_NOQUOTA; 159 161 inode->i_nlink = 0; 160 162 iput(inode); 161 - *err = -EDQUOT; 163 + *err = ret; 162 164 return NULL; 163 165 } 164 166
+6
fs/udf/inode.c
··· 36 36 #include <linux/pagemap.h> 37 37 #include <linux/buffer_head.h> 38 38 #include <linux/writeback.h> 39 + #include <linux/quotaops.h> 39 40 #include <linux/slab.h> 40 41 #include <linux/crc-itu-t.h> 41 42 ··· 71 70 72 71 void udf_delete_inode(struct inode *inode) 73 72 { 73 + if (!is_bad_inode(inode)) 74 + dquot_initialize(inode); 75 + 74 76 truncate_inode_pages(&inode->i_data, 0); 75 77 76 78 if (is_bad_inode(inode)) ··· 112 108 (unsigned long long)inode->i_size, 113 109 (unsigned long long)iinfo->i_lenExtents); 114 110 } 111 + 112 + dquot_drop(inode); 115 113 kfree(iinfo->i_ext.i_data); 116 114 iinfo->i_ext.i_data = NULL; 117 115 }
+17
fs/udf/namei.c
··· 563 563 int err; 564 564 struct udf_inode_info *iinfo; 565 565 566 + dquot_initialize(dir); 567 + 566 568 lock_kernel(); 567 569 inode = udf_new_inode(dir, mode, &err); 568 570 if (!inode) { ··· 618 616 if (!old_valid_dev(rdev)) 619 617 return -EINVAL; 620 618 619 + dquot_initialize(dir); 620 + 621 621 lock_kernel(); 622 622 err = -EIO; 623 623 inode = udf_new_inode(dir, mode, &err); ··· 665 661 int err; 666 662 struct udf_inode_info *dinfo = UDF_I(dir); 667 663 struct udf_inode_info *iinfo; 664 + 665 + dquot_initialize(dir); 668 666 669 667 lock_kernel(); 670 668 err = -EMLINK; ··· 805 799 struct fileIdentDesc *fi, cfi; 806 800 struct kernel_lb_addr tloc; 807 801 802 + dquot_initialize(dir); 803 + 808 804 retval = -ENOENT; 809 805 lock_kernel(); 810 806 fi = udf_find_entry(dir, &dentry->d_name, &fibh, &cfi); ··· 852 844 struct fileIdentDesc *fi; 853 845 struct fileIdentDesc cfi; 854 846 struct kernel_lb_addr tloc; 847 + 848 + dquot_initialize(dir); 855 849 856 850 retval = -ENOENT; 857 851 lock_kernel(); ··· 908 898 int namelen; 909 899 struct buffer_head *bh; 910 900 struct udf_inode_info *iinfo; 901 + 902 + dquot_initialize(dir); 911 903 912 904 lock_kernel(); 913 905 inode = udf_new_inode(dir, S_IFLNK, &err); ··· 1081 1069 int err; 1082 1070 struct buffer_head *bh; 1083 1071 1072 + dquot_initialize(dir); 1073 + 1084 1074 lock_kernel(); 1085 1075 if (inode->i_nlink >= (256 << sizeof(inode->i_nlink)) - 1) { 1086 1076 unlock_kernel(); ··· 1144 1130 int retval = -ENOENT; 1145 1131 struct kernel_lb_addr tloc; 1146 1132 struct udf_inode_info *old_iinfo = UDF_I(old_inode); 1133 + 1134 + dquot_initialize(old_dir); 1135 + dquot_initialize(new_dir); 1147 1136 1148 1137 lock_kernel(); 1149 1138 ofi = udf_find_entry(old_dir, &old_dentry->d_name, &ofibh, &ocfi);
+15 -9
fs/ufs/balloc.c
··· 85 85 "bit already cleared for fragment %u", i); 86 86 } 87 87 88 - vfs_dq_free_block(inode, count); 88 + dquot_free_block(inode, count); 89 89 90 90 91 91 fs32_add(sb, &ucg->cg_cs.cs_nffree, count); ··· 195 195 ubh_setblock(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno); 196 196 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD) 197 197 ufs_clusteracct (sb, ucpi, blkno, 1); 198 - vfs_dq_free_block(inode, uspi->s_fpb); 198 + dquot_free_block(inode, uspi->s_fpb); 199 199 200 200 fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1); 201 201 uspi->cs_total.cs_nbfree++; ··· 511 511 struct ufs_cg_private_info * ucpi; 512 512 struct ufs_cylinder_group * ucg; 513 513 unsigned cgno, fragno, fragoff, count, fragsize, i; 514 + int ret; 514 515 515 516 UFSD("ENTER, fragment %llu, oldcount %u, newcount %u\n", 516 517 (unsigned long long)fragment, oldcount, newcount); ··· 557 556 fs32_add(sb, &ucg->cg_frsum[fragsize - count], 1); 558 557 for (i = oldcount; i < newcount; i++) 559 558 ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i); 560 - if (vfs_dq_alloc_block(inode, count)) { 561 - *err = -EDQUOT; 559 + ret = dquot_alloc_block(inode, count); 560 + if (ret) { 561 + *err = ret; 562 562 return 0; 563 563 } 564 564 ··· 598 596 struct ufs_cylinder_group * ucg; 599 597 unsigned oldcg, i, j, k, allocsize; 600 598 u64 result; 599 + int ret; 601 600 602 601 UFSD("ENTER, ino %lu, cgno %u, goal %llu, count %u\n", 603 602 inode->i_ino, cgno, (unsigned long long)goal, count); ··· 667 664 for (i = count; i < uspi->s_fpb; i++) 668 665 ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, goal + i); 669 666 i = uspi->s_fpb - count; 670 - vfs_dq_free_block(inode, i); 667 + dquot_free_block(inode, i); 671 668 672 669 fs32_add(sb, &ucg->cg_cs.cs_nffree, i); 673 670 uspi->cs_total.cs_nffree += i; ··· 679 676 result = ufs_bitmap_search (sb, ucpi, goal, allocsize); 680 677 if (result == INVBLOCK) 681 678 return 0; 682 - if (vfs_dq_alloc_block(inode, count)) { 683 - *err = -EDQUOT; 679 + ret = dquot_alloc_block(inode, count); 680 + if (ret) { 681 + *err = ret; 684 682 return 0; 685 683 } 686 684 for (i = 0; i < count; i++) ··· 718 714 struct ufs_super_block_first * usb1; 719 715 struct ufs_cylinder_group * ucg; 720 716 u64 result, blkno; 717 + int ret; 721 718 722 719 UFSD("ENTER, goal %llu\n", (unsigned long long)goal); 723 720 ··· 752 747 ubh_clrblock (UCPI_UBH(ucpi), ucpi->c_freeoff, blkno); 753 748 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD) 754 749 ufs_clusteracct (sb, ucpi, blkno, -1); 755 - if (vfs_dq_alloc_block(inode, uspi->s_fpb)) { 756 - *err = -EDQUOT; 750 + ret = dquot_alloc_block(inode, uspi->s_fpb); 751 + if (ret) { 752 + *err = ret; 757 753 return INVBLOCK; 758 754 } 759 755
+2 -1
fs/ufs/file.c
··· 24 24 */ 25 25 26 26 #include <linux/fs.h> 27 + #include <linux/quotaops.h> 27 28 28 29 #include "ufs_fs.h" 29 30 #include "ufs.h" ··· 41 40 .write = do_sync_write, 42 41 .aio_write = generic_file_aio_write, 43 42 .mmap = generic_file_mmap, 44 - .open = generic_file_open, 43 + .open = dquot_file_open, 45 44 .fsync = simple_fsync, 46 45 .splice_read = generic_file_splice_read, 47 46 };
+6 -5
fs/ufs/ialloc.c
··· 95 95 96 96 is_directory = S_ISDIR(inode->i_mode); 97 97 98 - vfs_dq_free_inode(inode); 99 - vfs_dq_drop(inode); 98 + dquot_free_inode(inode); 99 + dquot_drop(inode); 100 100 101 101 clear_inode (inode); 102 102 ··· 355 355 356 356 unlock_super (sb); 357 357 358 - if (vfs_dq_alloc_inode(inode)) { 359 - vfs_dq_drop(inode); 360 - err = -EDQUOT; 358 + dquot_initialize(inode); 359 + err = dquot_alloc_inode(inode); 360 + if (err) { 361 + dquot_drop(inode); 361 362 goto fail_without_unlock; 362 363 } 363 364
+4
fs/ufs/inode.c
··· 37 37 #include <linux/smp_lock.h> 38 38 #include <linux/buffer_head.h> 39 39 #include <linux/writeback.h> 40 + #include <linux/quotaops.h> 40 41 41 42 #include "ufs_fs.h" 42 43 #include "ufs.h" ··· 909 908 void ufs_delete_inode (struct inode * inode) 910 909 { 911 910 loff_t old_i_size; 911 + 912 + if (!is_bad_inode(inode)) 913 + dquot_initialize(inode); 912 914 913 915 truncate_inode_pages(&inode->i_data, 0); 914 916 if (is_bad_inode(inode))
+18
fs/ufs/namei.c
··· 30 30 #include <linux/time.h> 31 31 #include <linux/fs.h> 32 32 #include <linux/smp_lock.h> 33 + #include <linux/quotaops.h> 33 34 34 35 #include "ufs_fs.h" 35 36 #include "ufs.h" ··· 85 84 int err; 86 85 87 86 UFSD("BEGIN\n"); 87 + 88 + dquot_initialize(dir); 89 + 88 90 inode = ufs_new_inode(dir, mode); 89 91 err = PTR_ERR(inode); 90 92 ··· 111 107 112 108 if (!old_valid_dev(rdev)) 113 109 return -EINVAL; 110 + 111 + dquot_initialize(dir); 112 + 114 113 inode = ufs_new_inode(dir, mode); 115 114 err = PTR_ERR(inode); 116 115 if (!IS_ERR(inode)) { ··· 137 130 138 131 if (l > sb->s_blocksize) 139 132 goto out_notlocked; 133 + 134 + dquot_initialize(dir); 140 135 141 136 lock_kernel(); 142 137 inode = ufs_new_inode(dir, S_IFLNK | S_IRWXUGO); ··· 185 176 return -EMLINK; 186 177 } 187 178 179 + dquot_initialize(dir); 180 + 188 181 inode->i_ctime = CURRENT_TIME_SEC; 189 182 inode_inc_link_count(inode); 190 183 atomic_inc(&inode->i_count); ··· 203 192 204 193 if (dir->i_nlink >= UFS_LINK_MAX) 205 194 goto out; 195 + 196 + dquot_initialize(dir); 206 197 207 198 lock_kernel(); 208 199 inode_inc_link_count(dir); ··· 250 237 struct page *page; 251 238 int err = -ENOENT; 252 239 240 + dquot_initialize(dir); 241 + 253 242 de = ufs_find_entry(dir, &dentry->d_name, &page); 254 243 if (!de) 255 244 goto out; ··· 295 280 struct page *old_page; 296 281 struct ufs_dir_entry *old_de; 297 282 int err = -ENOENT; 283 + 284 + dquot_initialize(old_dir); 285 + dquot_initialize(new_dir); 298 286 299 287 old_de = ufs_find_entry(old_dir, &old_dentry->d_name, &old_page); 300 288 if (!old_de)
+6
fs/ufs/super.c
··· 1432 1432 kmem_cache_destroy(ufs_inode_cachep); 1433 1433 } 1434 1434 1435 + static void ufs_clear_inode(struct inode *inode) 1436 + { 1437 + dquot_drop(inode); 1438 + } 1439 + 1435 1440 #ifdef CONFIG_QUOTA 1436 1441 static ssize_t ufs_quota_read(struct super_block *, int, char *,size_t, loff_t); 1437 1442 static ssize_t ufs_quota_write(struct super_block *, int, const char *, size_t, loff_t); ··· 1447 1442 .destroy_inode = ufs_destroy_inode, 1448 1443 .write_inode = ufs_write_inode, 1449 1444 .delete_inode = ufs_delete_inode, 1445 + .clear_inode = ufs_clear_inode, 1450 1446 .put_super = ufs_put_super, 1451 1447 .write_super = ufs_write_super, 1452 1448 .sync_fs = ufs_sync_fs,
+10
fs/ufs/truncate.c
··· 44 44 #include <linux/buffer_head.h> 45 45 #include <linux/blkdev.h> 46 46 #include <linux/sched.h> 47 + #include <linux/quotaops.h> 47 48 48 49 #include "ufs_fs.h" 49 50 #include "ufs.h" ··· 518 517 if (error) 519 518 return error; 520 519 520 + if ((ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) || 521 + (ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid)) { 522 + error = dquot_transfer(inode, attr); 523 + if (error) 524 + return error; 525 + } 521 526 if (ia_valid & ATTR_SIZE && 522 527 attr->ia_size != i_size_read(inode)) { 523 528 loff_t old_i_size = inode->i_size; 529 + 530 + dquot_initialize(inode); 531 + 524 532 error = vmtruncate(inode, attr->ia_size); 525 533 if (error) 526 534 return error;
-19
fs/xfs/linux-2.6/xfs_quotaops.c
··· 44 44 } 45 45 46 46 STATIC int 47 - xfs_fs_quota_sync( 48 - struct super_block *sb, 49 - int type) 50 - { 51 - struct xfs_mount *mp = XFS_M(sb); 52 - 53 - if (sb->s_flags & MS_RDONLY) 54 - return -EROFS; 55 - if (!XFS_IS_QUOTA_RUNNING(mp)) 56 - return -ENOSYS; 57 - return -xfs_sync_data(mp, 0); 58 - } 59 - 60 - STATIC int 61 47 xfs_fs_get_xstate( 62 48 struct super_block *sb, 63 49 struct fs_quota_stat *fqs) ··· 68 82 return -EROFS; 69 83 if (op != Q_XQUOTARM && !XFS_IS_QUOTA_RUNNING(mp)) 70 84 return -ENOSYS; 71 - if (!capable(CAP_SYS_ADMIN)) 72 - return -EPERM; 73 85 74 86 if (uflags & XFS_QUOTA_UDQ_ACCT) 75 87 flags |= XFS_UQUOTA_ACCT; ··· 128 144 return -ENOSYS; 129 145 if (!XFS_IS_QUOTA_ON(mp)) 130 146 return -ESRCH; 131 - if (!capable(CAP_SYS_ADMIN)) 132 - return -EPERM; 133 147 134 148 return -xfs_qm_scall_setqlim(mp, id, xfs_quota_type(type), fdq); 135 149 } 136 150 137 151 const struct quotactl_ops xfs_quotactl_operations = { 138 - .quota_sync = xfs_fs_quota_sync, 139 152 .get_xstate = xfs_fs_get_xstate, 140 153 .set_xstate = xfs_fs_set_xstate, 141 154 .get_xquota = xfs_fs_get_xquota,
+25 -8
include/linux/ext3_fs.h
··· 202 202 return flags & EXT3_OTHER_FLMASK; 203 203 } 204 204 205 - /* 206 - * Inode dynamic state flags 207 - */ 208 - #define EXT3_STATE_JDATA 0x00000001 /* journaled data exists */ 209 - #define EXT3_STATE_NEW 0x00000002 /* inode is newly created */ 210 - #define EXT3_STATE_XATTR 0x00000004 /* has in-inode xattrs */ 211 - #define EXT3_STATE_FLUSH_ON_CLOSE 0x00000008 212 - 213 205 /* Used to pass group descriptor data when online resize is done */ 214 206 struct ext3_new_group_input { 215 207 __u32 group; /* Group number for this data */ ··· 551 559 ino == EXT3_RESIZE_INO || 552 560 (ino >= EXT3_FIRST_INO(sb) && 553 561 ino <= le32_to_cpu(EXT3_SB(sb)->s_es->s_inodes_count)); 562 + } 563 + 564 + /* 565 + * Inode dynamic state flags 566 + */ 567 + enum { 568 + EXT3_STATE_JDATA, /* journaled data exists */ 569 + EXT3_STATE_NEW, /* inode is newly created */ 570 + EXT3_STATE_XATTR, /* has in-inode xattrs */ 571 + EXT3_STATE_FLUSH_ON_CLOSE, /* flush dirty pages on close */ 572 + }; 573 + 574 + static inline int ext3_test_inode_state(struct inode *inode, int bit) 575 + { 576 + return test_bit(bit, &EXT3_I(inode)->i_state); 577 + } 578 + 579 + static inline void ext3_set_inode_state(struct inode *inode, int bit) 580 + { 581 + set_bit(bit, &EXT3_I(inode)->i_state); 582 + } 583 + 584 + static inline void ext3_clear_inode_state(struct inode *inode, int bit) 585 + { 586 + clear_bit(bit, &EXT3_I(inode)->i_state); 554 587 } 555 588 #else 556 589 /* Assume that user mode programs are passing in an ext3fs superblock, not
+1 -1
include/linux/ext3_fs_i.h
··· 87 87 * near to their parent directory's inode. 88 88 */ 89 89 __u32 i_block_group; 90 - __u32 i_state; /* Dynamic state flags for ext3 */ 90 + unsigned long i_state; /* Dynamic state flags for ext3 */ 91 91 92 92 /* block reservation info */ 93 93 struct ext3_block_alloc_info *i_block_alloc_info;
-11
include/linux/jbd.h
··· 246 246 247 247 #define J_ASSERT(assert) BUG_ON(!(assert)) 248 248 249 - #if defined(CONFIG_BUFFER_DEBUG) 250 - void buffer_assertion_failure(struct buffer_head *bh); 251 - #define J_ASSERT_BH(bh, expr) \ 252 - do { \ 253 - if (!(expr)) \ 254 - buffer_assertion_failure(bh); \ 255 - J_ASSERT(expr); \ 256 - } while (0) 257 - #define J_ASSERT_JH(jh, expr) J_ASSERT_BH(jh2bh(jh), expr) 258 - #else 259 249 #define J_ASSERT_BH(bh, expr) J_ASSERT(expr) 260 250 #define J_ASSERT_JH(jh, expr) J_ASSERT(expr) 261 - #endif 262 251 263 252 #if defined(JBD_PARANOID_IOFAIL) 264 253 #define J_EXPECT(expr, why...) J_ASSERT(expr)
-11
include/linux/jbd2.h
··· 277 277 278 278 #define J_ASSERT(assert) BUG_ON(!(assert)) 279 279 280 - #if defined(CONFIG_BUFFER_DEBUG) 281 - void buffer_assertion_failure(struct buffer_head *bh); 282 - #define J_ASSERT_BH(bh, expr) \ 283 - do { \ 284 - if (!(expr)) \ 285 - buffer_assertion_failure(bh); \ 286 - J_ASSERT(expr); \ 287 - } while (0) 288 - #define J_ASSERT_JH(jh, expr) J_ASSERT_BH(jh2bh(jh), expr) 289 - #else 290 280 #define J_ASSERT_BH(bh, expr) J_ASSERT(expr) 291 281 #define J_ASSERT_JH(jh, expr) J_ASSERT(expr) 292 - #endif 293 282 294 283 #if defined(JBD2_PARANOID_IOFAIL) 295 284 #define J_EXPECT(expr, why...) J_ASSERT(expr)
+8 -25
include/linux/quota.h
··· 279 279 struct mem_dqblk dq_dqb; /* Diskquota usage */ 280 280 }; 281 281 282 - #define QUOTA_OK 0 283 - #define NO_QUOTA 1 284 - 285 282 /* Operations which must be implemented by each quota format */ 286 283 struct quota_format_ops { 287 284 int (*check_quota_file)(struct super_block *sb, int type); /* Detect whether file is in our format */ ··· 292 295 293 296 /* Operations working with dquots */ 294 297 struct dquot_operations { 295 - int (*initialize) (struct inode *, int); 296 - int (*drop) (struct inode *); 297 - int (*alloc_space) (struct inode *, qsize_t, int); 298 - int (*alloc_inode) (const struct inode *, qsize_t); 299 - int (*free_space) (struct inode *, qsize_t); 300 - int (*free_inode) (const struct inode *, qsize_t); 301 - int (*transfer) (struct inode *, struct iattr *); 302 298 int (*write_dquot) (struct dquot *); /* Ordinary dquot write */ 303 299 struct dquot *(*alloc_dquot)(struct super_block *, int); /* Allocate memory for new dquot */ 304 300 void (*destroy_dquot)(struct dquot *); /* Free memory for dquot */ ··· 299 309 int (*release_dquot) (struct dquot *); /* Quota is going to be deleted from disk */ 300 310 int (*mark_dirty) (struct dquot *); /* Dquot is marked dirty */ 301 311 int (*write_info) (struct super_block *, int); /* Write of quota "superblock" */ 302 - /* reserve quota for delayed block allocation */ 303 - int (*reserve_space) (struct inode *, qsize_t, int); 304 - /* claim reserved quota for delayed alloc */ 305 - int (*claim_space) (struct inode *, qsize_t); 306 - /* release rsved quota for delayed alloc */ 307 - void (*release_rsv) (struct inode *, qsize_t); 308 312 /* get reserved quota for delayed alloc, value returned is managed by 309 313 * quota code only */ 310 314 qsize_t *(*get_reserved_space) (struct inode *); ··· 308 324 struct quotactl_ops { 309 325 int (*quota_on)(struct super_block *, int, int, char *, int); 310 326 int (*quota_off)(struct super_block *, int, int); 311 - int (*quota_sync)(struct super_block *, int); 327 + int (*quota_sync)(struct super_block *, int, int); 312 328 int (*get_info)(struct super_block *, int, struct if_dqinfo *); 313 329 int (*set_info)(struct super_block *, int, struct if_dqinfo *); 314 330 int (*get_dqblk)(struct super_block *, int, qid_t, struct if_dqblk *); ··· 341 357 #define DQUOT_STATE_FLAGS (DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED | \ 342 358 DQUOT_SUSPENDED) 343 359 /* Other quota flags */ 344 - #define DQUOT_QUOTA_SYS_FILE (1 << 6) /* Quota file is a special 360 + #define DQUOT_STATE_LAST (_DQUOT_STATE_FLAGS * MAXQUOTAS) 361 + #define DQUOT_QUOTA_SYS_FILE (1 << DQUOT_STATE_LAST) 362 + /* Quota file is a special 345 363 * system file and user cannot 346 364 * touch it. Filesystem is 347 365 * responsible for setting 348 366 * S_NOQUOTA, S_NOATIME flags 349 367 */ 350 - #define DQUOT_NEGATIVE_USAGE (1 << 7) /* Allow negative quota usage */ 368 + #define DQUOT_NEGATIVE_USAGE (1 << (DQUOT_STATE_LAST + 1)) 369 + /* Allow negative quota usage */ 351 370 352 371 static inline unsigned int dquot_state_flag(unsigned int flags, int type) 353 372 { 354 - if (type == USRQUOTA) 355 - return flags; 356 - return flags << _DQUOT_STATE_FLAGS; 373 + return flags << _DQUOT_STATE_FLAGS * type; 357 374 } 358 375 359 376 static inline unsigned int dquot_generic_flag(unsigned int flags, int type) 360 377 { 361 - if (type == USRQUOTA) 362 - return flags; 363 - return flags >> _DQUOT_STATE_FLAGS; 378 + return (flags >> _DQUOT_STATE_FLAGS * type) & DQUOT_STATE_FLAGS; 364 379 } 365 380 366 381 #ifdef CONFIG_QUOTA_NETLINK_INTERFACE
+106 -226
include/linux/quotaops.h
··· 19 19 /* 20 20 * declaration of quota_function calls in kernel. 21 21 */ 22 - void sync_quota_sb(struct super_block *sb, int type); 23 - static inline void writeout_quota_sb(struct super_block *sb, int type) 24 - { 25 - if (sb->s_qcop->quota_sync) 26 - sb->s_qcop->quota_sync(sb, type); 27 - } 22 + void inode_add_rsv_space(struct inode *inode, qsize_t number); 23 + void inode_claim_rsv_space(struct inode *inode, qsize_t number); 24 + void inode_sub_rsv_space(struct inode *inode, qsize_t number); 28 25 29 - int dquot_initialize(struct inode *inode, int type); 30 - int dquot_drop(struct inode *inode); 26 + void dquot_initialize(struct inode *inode); 27 + void dquot_drop(struct inode *inode); 31 28 struct dquot *dqget(struct super_block *sb, unsigned int id, int type); 32 29 void dqput(struct dquot *dquot); 33 30 int dquot_scan_active(struct super_block *sb, ··· 33 36 struct dquot *dquot_alloc(struct super_block *sb, int type); 34 37 void dquot_destroy(struct dquot *dquot); 35 38 36 - int dquot_alloc_space(struct inode *inode, qsize_t number, int prealloc); 37 - int dquot_alloc_inode(const struct inode *inode, qsize_t number); 39 + int __dquot_alloc_space(struct inode *inode, qsize_t number, 40 + int warn, int reserve); 41 + void __dquot_free_space(struct inode *inode, qsize_t number, int reserve); 38 42 39 - int dquot_reserve_space(struct inode *inode, qsize_t number, int prealloc); 40 - int dquot_claim_space(struct inode *inode, qsize_t number); 41 - void dquot_release_reserved_space(struct inode *inode, qsize_t number); 42 - qsize_t dquot_get_reserved_space(struct inode *inode); 43 + int dquot_alloc_inode(const struct inode *inode); 43 44 44 - int dquot_free_space(struct inode *inode, qsize_t number); 45 - int dquot_free_inode(const struct inode *inode, qsize_t number); 45 + int dquot_claim_space_nodirty(struct inode *inode, qsize_t number); 46 + void dquot_free_inode(const struct inode *inode); 46 47 47 - int dquot_transfer(struct inode *inode, struct iattr *iattr); 48 48 int dquot_commit(struct dquot *dquot); 49 49 int dquot_acquire(struct dquot *dquot); 50 50 int dquot_release(struct dquot *dquot); 51 51 int dquot_commit_info(struct super_block *sb, int type); 52 52 int dquot_mark_dquot_dirty(struct dquot *dquot); 53 + 54 + int dquot_file_open(struct inode *inode, struct file *file); 53 55 54 56 int vfs_quota_on(struct super_block *sb, int type, int format_id, 55 57 char *path, int remount); ··· 60 64 int format_id, int type); 61 65 int vfs_quota_off(struct super_block *sb, int type, int remount); 62 66 int vfs_quota_disable(struct super_block *sb, int type, unsigned int flags); 63 - int vfs_quota_sync(struct super_block *sb, int type); 67 + int vfs_quota_sync(struct super_block *sb, int type, int wait); 64 68 int vfs_get_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii); 65 69 int vfs_set_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii); 66 70 int vfs_get_dqblk(struct super_block *sb, int type, qid_t id, struct if_dqblk *di); 67 71 int vfs_set_dqblk(struct super_block *sb, int type, qid_t id, struct if_dqblk *di); 68 72 69 - void vfs_dq_drop(struct inode *inode); 70 - int vfs_dq_transfer(struct inode *inode, struct iattr *iattr); 73 + int dquot_transfer(struct inode *inode, struct iattr *iattr); 71 74 int vfs_dq_quota_on_remount(struct super_block *sb); 72 75 73 76 static inline struct mem_dqinfo *sb_dqinfo(struct super_block *sb, int type) ··· 78 83 * Functions for checking status of quota 79 84 */ 80 85 81 - static inline int sb_has_quota_usage_enabled(struct super_block *sb, int type) 86 + static inline bool sb_has_quota_usage_enabled(struct super_block *sb, int type) 82 87 { 83 88 return sb_dqopt(sb)->flags & 84 89 dquot_state_flag(DQUOT_USAGE_ENABLED, type); 85 90 } 86 91 87 - static inline int sb_has_quota_limits_enabled(struct super_block *sb, int type) 92 + static inline bool sb_has_quota_limits_enabled(struct super_block *sb, int type) 88 93 { 89 94 return sb_dqopt(sb)->flags & 90 95 dquot_state_flag(DQUOT_LIMITS_ENABLED, type); 91 96 } 92 97 93 - static inline int sb_has_quota_suspended(struct super_block *sb, int type) 98 + static inline bool sb_has_quota_suspended(struct super_block *sb, int type) 94 99 { 95 100 return sb_dqopt(sb)->flags & 96 101 dquot_state_flag(DQUOT_SUSPENDED, type); 97 102 } 98 103 99 - static inline int sb_any_quota_suspended(struct super_block *sb) 104 + static inline unsigned sb_any_quota_suspended(struct super_block *sb) 100 105 { 101 - return sb_has_quota_suspended(sb, USRQUOTA) || 102 - sb_has_quota_suspended(sb, GRPQUOTA); 106 + unsigned type, tmsk = 0; 107 + for (type = 0; type < MAXQUOTAS; type++) 108 + tmsk |= sb_has_quota_suspended(sb, type) << type; 109 + return tmsk; 103 110 } 104 111 105 112 /* Does kernel know about any quota information for given sb + type? */ 106 - static inline int sb_has_quota_loaded(struct super_block *sb, int type) 113 + static inline bool sb_has_quota_loaded(struct super_block *sb, int type) 107 114 { 108 115 /* Currently if anything is on, then quota usage is on as well */ 109 116 return sb_has_quota_usage_enabled(sb, type); 110 117 } 111 118 112 - static inline int sb_any_quota_loaded(struct super_block *sb) 119 + static inline unsigned sb_any_quota_loaded(struct super_block *sb) 113 120 { 114 - return sb_has_quota_loaded(sb, USRQUOTA) || 115 - sb_has_quota_loaded(sb, GRPQUOTA); 121 + unsigned type, tmsk = 0; 122 + for (type = 0; type < MAXQUOTAS; type++) 123 + tmsk |= sb_has_quota_loaded(sb, type) << type; 124 + return tmsk; 116 125 } 117 126 118 - static inline int sb_has_quota_active(struct super_block *sb, int type) 127 + static inline bool sb_has_quota_active(struct super_block *sb, int type) 119 128 { 120 129 return sb_has_quota_loaded(sb, type) && 121 130 !sb_has_quota_suspended(sb, type); 122 131 } 123 132 124 - static inline int sb_any_quota_active(struct super_block *sb) 133 + static inline unsigned sb_any_quota_active(struct super_block *sb) 125 134 { 126 - return sb_has_quota_active(sb, USRQUOTA) || 127 - sb_has_quota_active(sb, GRPQUOTA); 135 + return sb_any_quota_loaded(sb) & ~sb_any_quota_suspended(sb); 128 136 } 129 137 130 138 /* ··· 138 140 139 141 #define sb_dquot_ops (&dquot_operations) 140 142 #define sb_quotactl_ops (&vfs_quotactl_ops) 141 - 142 - /* It is better to call this function outside of any transaction as it might 143 - * need a lot of space in journal for dquot structure allocation. */ 144 - static inline void vfs_dq_init(struct inode *inode) 145 - { 146 - BUG_ON(!inode->i_sb); 147 - if (sb_any_quota_active(inode->i_sb) && !IS_NOQUOTA(inode)) 148 - inode->i_sb->dq_op->initialize(inode, -1); 149 - } 150 - 151 - /* The following allocation/freeing/transfer functions *must* be called inside 152 - * a transaction (deadlocks possible otherwise) */ 153 - static inline int vfs_dq_prealloc_space_nodirty(struct inode *inode, qsize_t nr) 154 - { 155 - if (sb_any_quota_active(inode->i_sb)) { 156 - /* Used space is updated in alloc_space() */ 157 - if (inode->i_sb->dq_op->alloc_space(inode, nr, 1) == NO_QUOTA) 158 - return 1; 159 - } 160 - else 161 - inode_add_bytes(inode, nr); 162 - return 0; 163 - } 164 - 165 - static inline int vfs_dq_prealloc_space(struct inode *inode, qsize_t nr) 166 - { 167 - int ret; 168 - if (!(ret = vfs_dq_prealloc_space_nodirty(inode, nr))) 169 - mark_inode_dirty(inode); 170 - return ret; 171 - } 172 - 173 - static inline int vfs_dq_alloc_space_nodirty(struct inode *inode, qsize_t nr) 174 - { 175 - if (sb_any_quota_active(inode->i_sb)) { 176 - /* Used space is updated in alloc_space() */ 177 - if (inode->i_sb->dq_op->alloc_space(inode, nr, 0) == NO_QUOTA) 178 - return 1; 179 - } 180 - else 181 - inode_add_bytes(inode, nr); 182 - return 0; 183 - } 184 - 185 - static inline int vfs_dq_alloc_space(struct inode *inode, qsize_t nr) 186 - { 187 - int ret; 188 - if (!(ret = vfs_dq_alloc_space_nodirty(inode, nr))) 189 - mark_inode_dirty(inode); 190 - return ret; 191 - } 192 - 193 - static inline int vfs_dq_reserve_space(struct inode *inode, qsize_t nr) 194 - { 195 - if (sb_any_quota_active(inode->i_sb)) { 196 - /* Used space is updated in alloc_space() */ 197 - if (inode->i_sb->dq_op->reserve_space(inode, nr, 0) == NO_QUOTA) 198 - return 1; 199 - } 200 - return 0; 201 - } 202 - 203 - static inline int vfs_dq_alloc_inode(struct inode *inode) 204 - { 205 - if (sb_any_quota_active(inode->i_sb)) { 206 - vfs_dq_init(inode); 207 - if (inode->i_sb->dq_op->alloc_inode(inode, 1) == NO_QUOTA) 208 - return 1; 209 - } 210 - return 0; 211 - } 212 - 213 - /* 214 - * Convert in-memory reserved quotas to real consumed quotas 215 - */ 216 - static inline int vfs_dq_claim_space(struct inode *inode, qsize_t nr) 217 - { 218 - if (sb_any_quota_active(inode->i_sb)) { 219 - if (inode->i_sb->dq_op->claim_space(inode, nr) == NO_QUOTA) 220 - return 1; 221 - } else 222 - inode_add_bytes(inode, nr); 223 - 224 - mark_inode_dirty(inode); 225 - return 0; 226 - } 227 - 228 - /* 229 - * Release reserved (in-memory) quotas 230 - */ 231 - static inline 232 - void vfs_dq_release_reservation_space(struct inode *inode, qsize_t nr) 233 - { 234 - if (sb_any_quota_active(inode->i_sb)) 235 - inode->i_sb->dq_op->release_rsv(inode, nr); 236 - } 237 - 238 - static inline void vfs_dq_free_space_nodirty(struct inode *inode, qsize_t nr) 239 - { 240 - if (sb_any_quota_active(inode->i_sb)) 241 - inode->i_sb->dq_op->free_space(inode, nr); 242 - else 243 - inode_sub_bytes(inode, nr); 244 - } 245 - 246 - static inline void vfs_dq_free_space(struct inode *inode, qsize_t nr) 247 - { 248 - vfs_dq_free_space_nodirty(inode, nr); 249 - mark_inode_dirty(inode); 250 - } 251 - 252 - static inline void vfs_dq_free_inode(struct inode *inode) 253 - { 254 - if (sb_any_quota_active(inode->i_sb)) 255 - inode->i_sb->dq_op->free_inode(inode, 1); 256 - } 257 143 258 144 /* Cannot be called inside a transaction */ 259 145 static inline int vfs_dq_off(struct super_block *sb, int remount) ··· 198 316 #define sb_dquot_ops (NULL) 199 317 #define sb_quotactl_ops (NULL) 200 318 201 - static inline void vfs_dq_init(struct inode *inode) 319 + static inline void dquot_initialize(struct inode *inode) 202 320 { 203 321 } 204 322 205 - static inline void vfs_dq_drop(struct inode *inode) 323 + static inline void dquot_drop(struct inode *inode) 206 324 { 207 325 } 208 326 209 - static inline int vfs_dq_alloc_inode(struct inode *inode) 327 + static inline int dquot_alloc_inode(const struct inode *inode) 210 328 { 211 329 return 0; 212 330 } 213 331 214 - static inline void vfs_dq_free_inode(struct inode *inode) 215 - { 216 - } 217 - 218 - static inline void sync_quota_sb(struct super_block *sb, int type) 219 - { 220 - } 221 - 222 - static inline void writeout_quota_sb(struct super_block *sb, int type) 332 + static inline void dquot_free_inode(const struct inode *inode) 223 333 { 224 334 } 225 335 ··· 225 351 return 0; 226 352 } 227 353 228 - static inline int vfs_dq_transfer(struct inode *inode, struct iattr *iattr) 354 + static inline int dquot_transfer(struct inode *inode, struct iattr *iattr) 229 355 { 230 356 return 0; 231 357 } 232 358 233 - static inline int vfs_dq_prealloc_space_nodirty(struct inode *inode, qsize_t nr) 359 + static inline int __dquot_alloc_space(struct inode *inode, qsize_t number, 360 + int warn, int reserve) 234 361 { 235 - inode_add_bytes(inode, nr); 362 + if (!reserve) 363 + inode_add_bytes(inode, number); 236 364 return 0; 237 365 } 238 366 239 - static inline int vfs_dq_prealloc_space(struct inode *inode, qsize_t nr) 367 + static inline void __dquot_free_space(struct inode *inode, qsize_t number, 368 + int reserve) 240 369 { 241 - vfs_dq_prealloc_space_nodirty(inode, nr); 242 - mark_inode_dirty(inode); 370 + if (!reserve) 371 + inode_sub_bytes(inode, number); 372 + } 373 + 374 + static inline int dquot_claim_space_nodirty(struct inode *inode, qsize_t number) 375 + { 376 + inode_add_bytes(inode, number); 243 377 return 0; 244 378 } 245 379 246 - static inline int vfs_dq_alloc_space_nodirty(struct inode *inode, qsize_t nr) 247 - { 248 - inode_add_bytes(inode, nr); 249 - return 0; 250 - } 251 - 252 - static inline int vfs_dq_alloc_space(struct inode *inode, qsize_t nr) 253 - { 254 - vfs_dq_alloc_space_nodirty(inode, nr); 255 - mark_inode_dirty(inode); 256 - return 0; 257 - } 258 - 259 - static inline int vfs_dq_reserve_space(struct inode *inode, qsize_t nr) 260 - { 261 - return 0; 262 - } 263 - 264 - static inline int vfs_dq_claim_space(struct inode *inode, qsize_t nr) 265 - { 266 - return vfs_dq_alloc_space(inode, nr); 267 - } 268 - 269 - static inline 270 - int vfs_dq_release_reservation_space(struct inode *inode, qsize_t nr) 271 - { 272 - return 0; 273 - } 274 - 275 - static inline void vfs_dq_free_space_nodirty(struct inode *inode, qsize_t nr) 276 - { 277 - inode_sub_bytes(inode, nr); 278 - } 279 - 280 - static inline void vfs_dq_free_space(struct inode *inode, qsize_t nr) 281 - { 282 - vfs_dq_free_space_nodirty(inode, nr); 283 - mark_inode_dirty(inode); 284 - } 380 + #define dquot_file_open generic_file_open 285 381 286 382 #endif /* CONFIG_QUOTA */ 287 383 288 - static inline int vfs_dq_prealloc_block_nodirty(struct inode *inode, qsize_t nr) 384 + static inline int dquot_alloc_space_nodirty(struct inode *inode, qsize_t nr) 289 385 { 290 - return vfs_dq_prealloc_space_nodirty(inode, nr << inode->i_blkbits); 386 + return __dquot_alloc_space(inode, nr, 1, 0); 291 387 } 292 388 293 - static inline int vfs_dq_prealloc_block(struct inode *inode, qsize_t nr) 389 + static inline int dquot_alloc_space(struct inode *inode, qsize_t nr) 294 390 { 295 - return vfs_dq_prealloc_space(inode, nr << inode->i_blkbits); 391 + int ret; 392 + 393 + ret = dquot_alloc_space_nodirty(inode, nr); 394 + if (!ret) 395 + mark_inode_dirty(inode); 396 + return ret; 296 397 } 297 398 298 - static inline int vfs_dq_alloc_block_nodirty(struct inode *inode, qsize_t nr) 399 + static inline int dquot_alloc_block_nodirty(struct inode *inode, qsize_t nr) 299 400 { 300 - return vfs_dq_alloc_space_nodirty(inode, nr << inode->i_blkbits); 401 + return dquot_alloc_space_nodirty(inode, nr << inode->i_blkbits); 301 402 } 302 403 303 - static inline int vfs_dq_alloc_block(struct inode *inode, qsize_t nr) 404 + static inline int dquot_alloc_block(struct inode *inode, qsize_t nr) 304 405 { 305 - return vfs_dq_alloc_space(inode, nr << inode->i_blkbits); 406 + return dquot_alloc_space(inode, nr << inode->i_blkbits); 306 407 } 307 408 308 - static inline int vfs_dq_reserve_block(struct inode *inode, qsize_t nr) 409 + static inline int dquot_prealloc_block_nodirty(struct inode *inode, qsize_t nr) 309 410 { 310 - return vfs_dq_reserve_space(inode, nr << inode->i_blkbits); 411 + return __dquot_alloc_space(inode, nr << inode->i_blkbits, 0, 0); 311 412 } 312 413 313 - static inline int vfs_dq_claim_block(struct inode *inode, qsize_t nr) 414 + static inline int dquot_prealloc_block(struct inode *inode, qsize_t nr) 314 415 { 315 - return vfs_dq_claim_space(inode, nr << inode->i_blkbits); 416 + int ret; 417 + 418 + ret = dquot_prealloc_block_nodirty(inode, nr); 419 + if (!ret) 420 + mark_inode_dirty(inode); 421 + return ret; 316 422 } 317 423 318 - static inline 319 - void vfs_dq_release_reservation_block(struct inode *inode, qsize_t nr) 424 + static inline int dquot_reserve_block(struct inode *inode, qsize_t nr) 320 425 { 321 - vfs_dq_release_reservation_space(inode, nr << inode->i_blkbits); 426 + return __dquot_alloc_space(inode, nr << inode->i_blkbits, 1, 1); 322 427 } 323 428 324 - static inline void vfs_dq_free_block_nodirty(struct inode *inode, qsize_t nr) 429 + static inline int dquot_claim_block(struct inode *inode, qsize_t nr) 325 430 { 326 - vfs_dq_free_space_nodirty(inode, nr << inode->i_blkbits); 431 + int ret; 432 + 433 + ret = dquot_claim_space_nodirty(inode, nr << inode->i_blkbits); 434 + if (!ret) 435 + mark_inode_dirty(inode); 436 + return ret; 327 437 } 328 438 329 - static inline void vfs_dq_free_block(struct inode *inode, qsize_t nr) 439 + static inline void dquot_free_space_nodirty(struct inode *inode, qsize_t nr) 330 440 { 331 - vfs_dq_free_space(inode, nr << inode->i_blkbits); 441 + __dquot_free_space(inode, nr, 0); 442 + } 443 + 444 + static inline void dquot_free_space(struct inode *inode, qsize_t nr) 445 + { 446 + dquot_free_space_nodirty(inode, nr); 447 + mark_inode_dirty(inode); 448 + } 449 + 450 + static inline void dquot_free_block_nodirty(struct inode *inode, qsize_t nr) 451 + { 452 + dquot_free_space_nodirty(inode, nr << inode->i_blkbits); 453 + } 454 + 455 + static inline void dquot_free_block(struct inode *inode, qsize_t nr) 456 + { 457 + dquot_free_space(inode, nr << inode->i_blkbits); 458 + } 459 + 460 + static inline void dquot_release_reservation_block(struct inode *inode, 461 + qsize_t nr) 462 + { 463 + __dquot_free_space(inode, nr << inode->i_blkbits, 1); 332 464 } 333 465 334 466 #endif /* _LINUX_QUOTAOPS_ */