Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'gfs2-merge-window' of git://git.kernel.org:/pub/scm/linux/kernel/git/gfs2/linux-gfs2

Pull GFS2 updates from Bob Peterson:
"Here are the patches we've accumulated for GFS2 for the current
upstream merge window. We have a good mixture this time. Here are
some of the features:

- Fix a problem with RO mounts writing to the journal.

- Further improvements to quotas on GFS2.

- Added support for rename2 and RENAME_EXCHANGE on GFS2.

- Increase performance by making glock lru_list less of a bottleneck.

- Increase performance by avoiding unnecessary buffer_head releases.

- Increase performance by using average glock round trip time from all CPUs.

- Fixes for some compiler warnings and minor white space issues.

- Other misc bug fixes"

* tag 'gfs2-merge-window' of git://git.kernel.org:/pub/scm/linux/kernel/git/gfs2/linux-gfs2:
GFS2: Don't brelse rgrp buffer_heads every allocation
GFS2: Don't add all glocks to the lru
gfs2: Don't support fallocate on jdata files
gfs2: s64 cast for negative quota value
gfs2: limit quota log messages
gfs2: fix quota updates on block boundaries
gfs2: fix shadow warning in gfs2_rbm_find()
gfs2: kerneldoc warning fixes
gfs2: convert simple_str to kstr
GFS2: make sure S_NOSEC flag isn't overwritten
GFS2: add support for rename2 and RENAME_EXCHANGE
gfs2: handle NULL rgd in set_rgrp_preferences
GFS2: inode.c: indent with TABs, not spaces
GFS2: mark the journal idle to fix ro mounts
GFS2: Average in only non-zero round-trip times for congestion stats
GFS2: Use average srttb value in congestion calculations

+435 -145
+8 -4
fs/gfs2/aops.c
··· 171 171 /** 172 172 * gfs2_jdata_writepage - Write complete page 173 173 * @page: Page to write 174 + * @wbc: The writeback control 174 175 * 175 176 * Returns: errno 176 177 * ··· 222 221 * gfs2_write_jdata_pagevec - Write back a pagevec's worth of pages 223 222 * @mapping: The mapping 224 223 * @wbc: The writeback control 225 - * @writepage: The writepage function to call for each page 226 224 * @pvec: The vector of pages 227 225 * @nr_pages: The number of pages to write 226 + * @end: End position 227 + * @done_index: Page index 228 228 * 229 229 * Returns: non-zero if loop should terminate, zero otherwise 230 230 */ ··· 335 333 * gfs2_write_cache_jdata - Like write_cache_pages but different 336 334 * @mapping: The mapping to write 337 335 * @wbc: The writeback control 338 - * @writepage: The writepage function to call 339 - * @data: The data to pass to writepage 340 336 * 341 337 * The reason that we use our own function here is that we need to 342 338 * start transactions before we grab page locks. This allows us ··· 588 588 589 589 /** 590 590 * gfs2_readpages - Read a bunch of pages at once 591 + * @file: The file to read from 592 + * @mapping: Address space info 593 + * @pages: List of pages to read 594 + * @nr_pages: Number of pages to read 591 595 * 592 596 * Some notes: 593 597 * 1. This is only for readahead, so we can simply ignore any things ··· 857 853 * @mapping: The address space to write to 858 854 * @pos: The file position 859 855 * @len: The length of the data 860 - * @copied: 856 + * @copied: How much was actually copied by the VFS 861 857 * @page: The page that has been written 862 858 * @fsdata: The fsdata (unused in GFS2) 863 859 *
+2 -2
fs/gfs2/file.c
··· 180 180 181 181 flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC|S_NOSEC); 182 182 if ((ip->i_eattr == 0) && !is_sxid(inode->i_mode)) 183 - inode->i_flags |= S_NOSEC; 183 + flags |= S_NOSEC; 184 184 if (ip->i_diskflags & GFS2_DIF_IMMUTABLE) 185 185 flags |= S_IMMUTABLE; 186 186 if (ip->i_diskflags & GFS2_DIF_APPENDONLY) ··· 917 917 struct gfs2_holder gh; 918 918 int ret; 919 919 920 - if (mode & ~FALLOC_FL_KEEP_SIZE) 920 + if ((mode & ~FALLOC_FL_KEEP_SIZE) || gfs2_is_jdata(ip)) 921 921 return -EOPNOTSUPP; 922 922 923 923 mutex_lock(&inode->i_mutex);
+2 -1
fs/gfs2/glock.c
··· 1076 1076 !test_bit(GLF_DEMOTE, &gl->gl_flags)) 1077 1077 fast_path = 1; 1078 1078 } 1079 - if (!test_bit(GLF_LFLUSH, &gl->gl_flags) && demote_ok(gl)) 1079 + if (!test_bit(GLF_LFLUSH, &gl->gl_flags) && demote_ok(gl) && 1080 + (glops->go_flags & GLOF_LRU)) 1080 1081 gfs2_glock_add_to_lru(gl); 1081 1082 1082 1083 trace_gfs2_glock_queue(gh, 0);
+15 -5
fs/gfs2/glops.c
··· 144 144 struct gfs2_rgrpd *rgd; 145 145 int error; 146 146 147 + spin_lock(&gl->gl_spin); 148 + rgd = gl->gl_object; 149 + if (rgd) 150 + gfs2_rgrp_brelse(rgd); 151 + spin_unlock(&gl->gl_spin); 152 + 147 153 if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags)) 148 154 return; 149 155 GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE); ··· 181 175 { 182 176 struct gfs2_sbd *sdp = gl->gl_sbd; 183 177 struct address_space *mapping = &sdp->sd_aspace; 178 + struct gfs2_rgrpd *rgd = gl->gl_object; 179 + 180 + if (rgd) 181 + gfs2_rgrp_brelse(rgd); 184 182 185 183 WARN_ON_ONCE(!(flags & DIO_METADATA)); 186 184 gfs2_assert_withdraw(sdp, !atomic_read(&gl->gl_ail_count)); 187 185 truncate_inode_pages_range(mapping, gl->gl_vm.start, gl->gl_vm.end); 188 186 189 - if (gl->gl_object) { 190 - struct gfs2_rgrpd *rgd = (struct gfs2_rgrpd *)gl->gl_object; 187 + if (rgd) 191 188 rgd->rd_flags &= ~GFS2_RDF_UPTODATE; 192 - } 193 189 } 194 190 195 191 /** ··· 569 561 .go_lock = inode_go_lock, 570 562 .go_dump = inode_go_dump, 571 563 .go_type = LM_TYPE_INODE, 572 - .go_flags = GLOF_ASPACE, 564 + .go_flags = GLOF_ASPACE | GLOF_LRU, 573 565 }; 574 566 575 567 const struct gfs2_glock_operations gfs2_rgrp_glops = { ··· 592 584 const struct gfs2_glock_operations gfs2_iopen_glops = { 593 585 .go_type = LM_TYPE_IOPEN, 594 586 .go_callback = iopen_go_callback, 587 + .go_flags = GLOF_LRU, 595 588 }; 596 589 597 590 const struct gfs2_glock_operations gfs2_flock_glops = { 598 591 .go_type = LM_TYPE_FLOCK, 592 + .go_flags = GLOF_LRU, 599 593 }; 600 594 601 595 const struct gfs2_glock_operations gfs2_nondisk_glops = { ··· 606 596 607 597 const struct gfs2_glock_operations gfs2_quota_glops = { 608 598 .go_type = LM_TYPE_QUOTA, 609 - .go_flags = GLOF_LVB, 599 + .go_flags = GLOF_LVB | GLOF_LRU, 610 600 }; 611 601 612 602 const struct gfs2_glock_operations gfs2_journal_glops = {
+2
fs/gfs2/incore.h
··· 225 225 const unsigned long go_flags; 226 226 #define GLOF_ASPACE 1 227 227 #define GLOF_LVB 2 228 + #define GLOF_LRU 4 228 229 }; 229 230 230 231 enum { ··· 433 432 QDF_CHANGE = 1, 434 433 QDF_LOCKED = 2, 435 434 QDF_REFRESH = 3, 435 + QDF_QMSG_QUIET = 4, 436 436 }; 437 437 438 438 struct gfs2_quota_data {
+192 -19
fs/gfs2/inode.c
··· 1227 1227 */ 1228 1228 1229 1229 static int gfs2_atomic_open(struct inode *dir, struct dentry *dentry, 1230 - struct file *file, unsigned flags, 1231 - umode_t mode, int *opened) 1230 + struct file *file, unsigned flags, 1231 + umode_t mode, int *opened) 1232 1232 { 1233 1233 struct dentry *d; 1234 1234 bool excl = !!(flags & O_EXCL); ··· 1307 1307 } 1308 1308 1309 1309 /** 1310 + * update_moved_ino - Update an inode that's being moved 1311 + * @ip: The inode being moved 1312 + * @ndip: The parent directory of the new filename 1313 + * @dir_rename: True of ip is a directory 1314 + * 1315 + * Returns: errno 1316 + */ 1317 + 1318 + static int update_moved_ino(struct gfs2_inode *ip, struct gfs2_inode *ndip, 1319 + int dir_rename) 1320 + { 1321 + int error; 1322 + struct buffer_head *dibh; 1323 + 1324 + if (dir_rename) 1325 + return gfs2_dir_mvino(ip, &gfs2_qdotdot, ndip, DT_DIR); 1326 + 1327 + error = gfs2_meta_inode_buffer(ip, &dibh); 1328 + if (error) 1329 + return error; 1330 + ip->i_inode.i_ctime = CURRENT_TIME; 1331 + gfs2_trans_add_meta(ip->i_gl, dibh); 1332 + gfs2_dinode_out(ip, dibh->b_data); 1333 + brelse(dibh); 1334 + return 0; 1335 + } 1336 + 1337 + 1338 + /** 1310 1339 * gfs2_rename - Rename a file 1311 1340 * @odir: Parent directory of old file name 1312 1341 * @odentry: The old dentry of the file ··· 1383 1354 1384 1355 if (S_ISDIR(ip->i_inode.i_mode)) { 1385 1356 dir_rename = 1; 1386 - /* don't move a dirctory into it's subdir */ 1357 + /* don't move a directory into its subdir */ 1387 1358 error = gfs2_ok_to_move(ip, ndip); 1388 1359 if (error) 1389 1360 goto out_gunlock_r; ··· 1523 1494 if (nip) 1524 1495 error = gfs2_unlink_inode(ndip, ndentry); 1525 1496 1526 - if (dir_rename) { 1527 - error = gfs2_dir_mvino(ip, &gfs2_qdotdot, ndip, DT_DIR); 1528 - if (error) 1529 - goto out_end_trans; 1530 - } else { 1531 - struct buffer_head *dibh; 1532 - error = gfs2_meta_inode_buffer(ip, &dibh); 1533 - if (error) 1534 - goto out_end_trans; 1535 - ip->i_inode.i_ctime = CURRENT_TIME; 1536 - gfs2_trans_add_meta(ip->i_gl, dibh); 1537 - gfs2_dinode_out(ip, dibh->b_data); 1538 - brelse(dibh); 1539 - } 1497 + error = update_moved_ino(ip, ndip, dir_rename); 1498 + if (error) 1499 + goto out_end_trans; 1540 1500 1541 1501 error = gfs2_dir_del(odip, odentry); 1542 1502 if (error) ··· 1554 1536 gfs2_glock_dq_uninit(&r_gh); 1555 1537 out: 1556 1538 return error; 1539 + } 1540 + 1541 + /** 1542 + * gfs2_exchange - exchange two files 1543 + * @odir: Parent directory of old file name 1544 + * @odentry: The old dentry of the file 1545 + * @ndir: Parent directory of new file name 1546 + * @ndentry: The new dentry of the file 1547 + * @flags: The rename flags 1548 + * 1549 + * Returns: errno 1550 + */ 1551 + 1552 + static int gfs2_exchange(struct inode *odir, struct dentry *odentry, 1553 + struct inode *ndir, struct dentry *ndentry, 1554 + unsigned int flags) 1555 + { 1556 + struct gfs2_inode *odip = GFS2_I(odir); 1557 + struct gfs2_inode *ndip = GFS2_I(ndir); 1558 + struct gfs2_inode *oip = GFS2_I(odentry->d_inode); 1559 + struct gfs2_inode *nip = GFS2_I(ndentry->d_inode); 1560 + struct gfs2_sbd *sdp = GFS2_SB(odir); 1561 + struct gfs2_holder ghs[5], r_gh = { .gh_gl = NULL, }; 1562 + unsigned int num_gh; 1563 + unsigned int x; 1564 + umode_t old_mode = oip->i_inode.i_mode; 1565 + umode_t new_mode = nip->i_inode.i_mode; 1566 + int error; 1567 + 1568 + error = gfs2_rindex_update(sdp); 1569 + if (error) 1570 + return error; 1571 + 1572 + if (odip != ndip) { 1573 + error = gfs2_glock_nq_init(sdp->sd_rename_gl, LM_ST_EXCLUSIVE, 1574 + 0, &r_gh); 1575 + if (error) 1576 + goto out; 1577 + 1578 + if (S_ISDIR(old_mode)) { 1579 + /* don't move a directory into its subdir */ 1580 + error = gfs2_ok_to_move(oip, ndip); 1581 + if (error) 1582 + goto out_gunlock_r; 1583 + } 1584 + 1585 + if (S_ISDIR(new_mode)) { 1586 + /* don't move a directory into its subdir */ 1587 + error = gfs2_ok_to_move(nip, odip); 1588 + if (error) 1589 + goto out_gunlock_r; 1590 + } 1591 + } 1592 + 1593 + num_gh = 1; 1594 + gfs2_holder_init(odip->i_gl, LM_ST_EXCLUSIVE, 0, ghs); 1595 + if (odip != ndip) { 1596 + gfs2_holder_init(ndip->i_gl, LM_ST_EXCLUSIVE, 0, ghs + num_gh); 1597 + num_gh++; 1598 + } 1599 + gfs2_holder_init(oip->i_gl, LM_ST_EXCLUSIVE, 0, ghs + num_gh); 1600 + num_gh++; 1601 + 1602 + gfs2_holder_init(nip->i_gl, LM_ST_EXCLUSIVE, 0, ghs + num_gh); 1603 + num_gh++; 1604 + 1605 + for (x = 0; x < num_gh; x++) { 1606 + error = gfs2_glock_nq(ghs + x); 1607 + if (error) 1608 + goto out_gunlock; 1609 + } 1610 + 1611 + error = -ENOENT; 1612 + if (oip->i_inode.i_nlink == 0 || nip->i_inode.i_nlink == 0) 1613 + goto out_gunlock; 1614 + 1615 + error = gfs2_unlink_ok(odip, &odentry->d_name, oip); 1616 + if (error) 1617 + goto out_gunlock; 1618 + error = gfs2_unlink_ok(ndip, &ndentry->d_name, nip); 1619 + if (error) 1620 + goto out_gunlock; 1621 + 1622 + if (S_ISDIR(old_mode)) { 1623 + error = gfs2_permission(odentry->d_inode, MAY_WRITE); 1624 + if (error) 1625 + goto out_gunlock; 1626 + } 1627 + if (S_ISDIR(new_mode)) { 1628 + error = gfs2_permission(ndentry->d_inode, MAY_WRITE); 1629 + if (error) 1630 + goto out_gunlock; 1631 + } 1632 + error = gfs2_trans_begin(sdp, 4 * RES_DINODE + 4 * RES_LEAF, 0); 1633 + if (error) 1634 + goto out_gunlock; 1635 + 1636 + error = update_moved_ino(oip, ndip, S_ISDIR(old_mode)); 1637 + if (error) 1638 + goto out_end_trans; 1639 + 1640 + error = update_moved_ino(nip, odip, S_ISDIR(new_mode)); 1641 + if (error) 1642 + goto out_end_trans; 1643 + 1644 + error = gfs2_dir_mvino(ndip, &ndentry->d_name, oip, 1645 + IF2DT(old_mode)); 1646 + if (error) 1647 + goto out_end_trans; 1648 + 1649 + error = gfs2_dir_mvino(odip, &odentry->d_name, nip, 1650 + IF2DT(new_mode)); 1651 + if (error) 1652 + goto out_end_trans; 1653 + 1654 + if (odip != ndip) { 1655 + if (S_ISDIR(new_mode) && !S_ISDIR(old_mode)) { 1656 + inc_nlink(&odip->i_inode); 1657 + drop_nlink(&ndip->i_inode); 1658 + } else if (S_ISDIR(old_mode) && !S_ISDIR(new_mode)) { 1659 + inc_nlink(&ndip->i_inode); 1660 + drop_nlink(&odip->i_inode); 1661 + } 1662 + } 1663 + mark_inode_dirty(&ndip->i_inode); 1664 + if (odip != ndip) 1665 + mark_inode_dirty(&odip->i_inode); 1666 + 1667 + out_end_trans: 1668 + gfs2_trans_end(sdp); 1669 + out_gunlock: 1670 + while (x--) { 1671 + gfs2_glock_dq(ghs + x); 1672 + gfs2_holder_uninit(ghs + x); 1673 + } 1674 + out_gunlock_r: 1675 + if (r_gh.gh_gl) 1676 + gfs2_glock_dq_uninit(&r_gh); 1677 + out: 1678 + return error; 1679 + } 1680 + 1681 + static int gfs2_rename2(struct inode *odir, struct dentry *odentry, 1682 + struct inode *ndir, struct dentry *ndentry, 1683 + unsigned int flags) 1684 + { 1685 + flags &= ~RENAME_NOREPLACE; 1686 + 1687 + if (flags & ~RENAME_EXCHANGE) 1688 + return -EINVAL; 1689 + 1690 + if (flags & RENAME_EXCHANGE) 1691 + return gfs2_exchange(odir, odentry, ndir, ndentry, flags); 1692 + 1693 + return gfs2_rename(odir, odentry, ndir, ndentry); 1557 1694 } 1558 1695 1559 1696 /** ··· 1889 1716 1890 1717 if (!uid_eq(ouid, NO_UID_QUOTA_CHANGE) || 1891 1718 !gid_eq(ogid, NO_GID_QUOTA_CHANGE)) { 1892 - gfs2_quota_change(ip, -ap.target, ouid, ogid); 1719 + gfs2_quota_change(ip, -(s64)ap.target, ouid, ogid); 1893 1720 gfs2_quota_change(ip, ap.target, nuid, ngid); 1894 1721 } 1895 1722 ··· 2116 1943 .mkdir = gfs2_mkdir, 2117 1944 .rmdir = gfs2_unlink, 2118 1945 .mknod = gfs2_mknod, 2119 - .rename = gfs2_rename, 1946 + .rename2 = gfs2_rename2, 2120 1947 .permission = gfs2_permission, 2121 1948 .setattr = gfs2_setattr, 2122 1949 .getattr = gfs2_getattr,
+1
fs/gfs2/ops_fstype.c
··· 756 756 } 757 757 } 758 758 759 + sdp->sd_log_idle = 1; 759 760 set_bit(SDF_JOURNAL_CHECKED, &sdp->sd_flags); 760 761 gfs2_glock_dq_uninit(&ji_gh); 761 762 jindex = 0;
+129 -83
fs/gfs2/quota.c
··· 649 649 slot_hold(qd); 650 650 } 651 651 652 + if (change < 0) /* Reset quiet flag if we freed some blocks */ 653 + clear_bit(QDF_QMSG_QUIET, &qd->qd_flags); 652 654 mutex_unlock(&sdp->sd_quota_mutex); 655 + } 656 + 657 + static int gfs2_write_buf_to_page(struct gfs2_inode *ip, unsigned long index, 658 + unsigned off, void *buf, unsigned bytes) 659 + { 660 + struct inode *inode = &ip->i_inode; 661 + struct gfs2_sbd *sdp = GFS2_SB(inode); 662 + struct address_space *mapping = inode->i_mapping; 663 + struct page *page; 664 + struct buffer_head *bh; 665 + void *kaddr; 666 + u64 blk; 667 + unsigned bsize = sdp->sd_sb.sb_bsize, bnum = 0, boff = 0; 668 + unsigned to_write = bytes, pg_off = off; 669 + int done = 0; 670 + 671 + blk = index << (PAGE_CACHE_SHIFT - sdp->sd_sb.sb_bsize_shift); 672 + boff = off % bsize; 673 + 674 + page = find_or_create_page(mapping, index, GFP_NOFS); 675 + if (!page) 676 + return -ENOMEM; 677 + if (!page_has_buffers(page)) 678 + create_empty_buffers(page, bsize, 0); 679 + 680 + bh = page_buffers(page); 681 + while (!done) { 682 + /* Find the beginning block within the page */ 683 + if (pg_off >= ((bnum * bsize) + bsize)) { 684 + bh = bh->b_this_page; 685 + bnum++; 686 + blk++; 687 + continue; 688 + } 689 + if (!buffer_mapped(bh)) { 690 + gfs2_block_map(inode, blk, bh, 1); 691 + if (!buffer_mapped(bh)) 692 + goto unlock_out; 693 + /* If it's a newly allocated disk block, zero it */ 694 + if (buffer_new(bh)) 695 + zero_user(page, bnum * bsize, bh->b_size); 696 + } 697 + if (PageUptodate(page)) 698 + set_buffer_uptodate(bh); 699 + if (!buffer_uptodate(bh)) { 700 + ll_rw_block(READ | REQ_META, 1, &bh); 701 + wait_on_buffer(bh); 702 + if (!buffer_uptodate(bh)) 703 + goto unlock_out; 704 + } 705 + gfs2_trans_add_data(ip->i_gl, bh); 706 + 707 + /* If we need to write to the next block as well */ 708 + if (to_write > (bsize - boff)) { 709 + pg_off += (bsize - boff); 710 + to_write -= (bsize - boff); 711 + boff = pg_off % bsize; 712 + continue; 713 + } 714 + done = 1; 715 + } 716 + 717 + /* Write to the page, now that we have setup the buffer(s) */ 718 + kaddr = kmap_atomic(page); 719 + memcpy(kaddr + off, buf, bytes); 720 + flush_dcache_page(page); 721 + kunmap_atomic(kaddr); 722 + unlock_page(page); 723 + page_cache_release(page); 724 + 725 + return 0; 726 + 727 + unlock_out: 728 + unlock_page(page); 729 + page_cache_release(page); 730 + return -EIO; 731 + } 732 + 733 + static int gfs2_write_disk_quota(struct gfs2_inode *ip, struct gfs2_quota *qp, 734 + loff_t loc) 735 + { 736 + unsigned long pg_beg; 737 + unsigned pg_off, nbytes, overflow = 0; 738 + int pg_oflow = 0, error; 739 + void *ptr; 740 + 741 + nbytes = sizeof(struct gfs2_quota); 742 + 743 + pg_beg = loc >> PAGE_CACHE_SHIFT; 744 + pg_off = loc % PAGE_CACHE_SIZE; 745 + 746 + /* If the quota straddles a page boundary, split the write in two */ 747 + if ((pg_off + nbytes) > PAGE_CACHE_SIZE) { 748 + pg_oflow = 1; 749 + overflow = (pg_off + nbytes) - PAGE_CACHE_SIZE; 750 + } 751 + 752 + ptr = qp; 753 + error = gfs2_write_buf_to_page(ip, pg_beg, pg_off, ptr, 754 + nbytes - overflow); 755 + /* If there's an overflow, write the remaining bytes to the next page */ 756 + if (!error && pg_oflow) 757 + error = gfs2_write_buf_to_page(ip, pg_beg + 1, 0, 758 + ptr + nbytes - overflow, 759 + overflow); 760 + return error; 653 761 } 654 762 655 763 /** ··· 780 672 { 781 673 struct inode *inode = &ip->i_inode; 782 674 struct gfs2_sbd *sdp = GFS2_SB(inode); 783 - struct address_space *mapping = inode->i_mapping; 784 - unsigned long index = loc >> PAGE_CACHE_SHIFT; 785 - unsigned offset = loc & (PAGE_CACHE_SIZE - 1); 786 - unsigned blocksize, iblock, pos; 787 - struct buffer_head *bh; 788 - struct page *page; 789 - void *kaddr, *ptr; 790 675 struct gfs2_quota q; 791 - int err, nbytes; 676 + int err; 792 677 u64 size; 793 678 794 679 if (gfs2_is_stuffed(ip)) { ··· 795 694 if (err < 0) 796 695 return err; 797 696 697 + loc -= sizeof(q); /* gfs2_internal_read would've advanced the loc ptr */ 798 698 err = -EIO; 799 699 be64_add_cpu(&q.qu_value, change); 700 + if (((s64)be64_to_cpu(q.qu_value)) < 0) 701 + q.qu_value = 0; /* Never go negative on quota usage */ 800 702 qd->qd_qb.qb_value = q.qu_value; 801 703 if (fdq) { 802 704 if (fdq->d_fieldmask & QC_SPC_SOFT) { ··· 816 712 } 817 713 } 818 714 819 - /* Write the quota into the quota file on disk */ 820 - ptr = &q; 821 - nbytes = sizeof(struct gfs2_quota); 822 - get_a_page: 823 - page = find_or_create_page(mapping, index, GFP_NOFS); 824 - if (!page) 825 - return -ENOMEM; 826 - 827 - blocksize = inode->i_sb->s_blocksize; 828 - iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits); 829 - 830 - if (!page_has_buffers(page)) 831 - create_empty_buffers(page, blocksize, 0); 832 - 833 - bh = page_buffers(page); 834 - pos = blocksize; 835 - while (offset >= pos) { 836 - bh = bh->b_this_page; 837 - iblock++; 838 - pos += blocksize; 715 + err = gfs2_write_disk_quota(ip, &q, loc); 716 + if (!err) { 717 + size = loc + sizeof(struct gfs2_quota); 718 + if (size > inode->i_size) 719 + i_size_write(inode, size); 720 + inode->i_mtime = inode->i_atime = CURRENT_TIME; 721 + mark_inode_dirty(inode); 722 + set_bit(QDF_REFRESH, &qd->qd_flags); 839 723 } 840 724 841 - if (!buffer_mapped(bh)) { 842 - gfs2_block_map(inode, iblock, bh, 1); 843 - if (!buffer_mapped(bh)) 844 - goto unlock_out; 845 - /* If it's a newly allocated disk block for quota, zero it */ 846 - if (buffer_new(bh)) 847 - zero_user(page, pos - blocksize, bh->b_size); 848 - } 849 - 850 - if (PageUptodate(page)) 851 - set_buffer_uptodate(bh); 852 - 853 - if (!buffer_uptodate(bh)) { 854 - ll_rw_block(READ | REQ_META, 1, &bh); 855 - wait_on_buffer(bh); 856 - if (!buffer_uptodate(bh)) 857 - goto unlock_out; 858 - } 859 - 860 - gfs2_trans_add_data(ip->i_gl, bh); 861 - 862 - kaddr = kmap_atomic(page); 863 - if (offset + sizeof(struct gfs2_quota) > PAGE_CACHE_SIZE) 864 - nbytes = PAGE_CACHE_SIZE - offset; 865 - memcpy(kaddr + offset, ptr, nbytes); 866 - flush_dcache_page(page); 867 - kunmap_atomic(kaddr); 868 - unlock_page(page); 869 - page_cache_release(page); 870 - 871 - /* If quota straddles page boundary, we need to update the rest of the 872 - * quota at the beginning of the next page */ 873 - if ((offset + sizeof(struct gfs2_quota)) > PAGE_CACHE_SIZE) { 874 - ptr = ptr + nbytes; 875 - nbytes = sizeof(struct gfs2_quota) - nbytes; 876 - offset = 0; 877 - index++; 878 - goto get_a_page; 879 - } 880 - 881 - size = loc + sizeof(struct gfs2_quota); 882 - if (size > inode->i_size) 883 - i_size_write(inode, size); 884 - inode->i_mtime = inode->i_atime = CURRENT_TIME; 885 - mark_inode_dirty(inode); 886 - set_bit(QDF_REFRESH, &qd->qd_flags); 887 - return 0; 888 - 889 - unlock_out: 890 - unlock_page(page); 891 - page_cache_release(page); 892 725 return err; 893 726 } 894 727 ··· 1189 1148 /* If no min_target specified or we don't meet 1190 1149 * min_target, return -EDQUOT */ 1191 1150 if (!ap->min_target || ap->min_target > ap->allowed) { 1192 - print_message(qd, "exceeded"); 1193 - quota_send_warning(qd->qd_id, 1194 - sdp->sd_vfs->s_dev, 1195 - QUOTA_NL_BHARDWARN); 1151 + if (!test_and_set_bit(QDF_QMSG_QUIET, 1152 + &qd->qd_flags)) { 1153 + print_message(qd, "exceeded"); 1154 + quota_send_warning(qd->qd_id, 1155 + sdp->sd_vfs->s_dev, 1156 + QUOTA_NL_BHARDWARN); 1157 + } 1196 1158 error = -EDQUOT; 1197 1159 break; 1198 1160 } ··· 1692 1648 1693 1649 /* Apply changes */ 1694 1650 error = gfs2_adjust_quota(ip, offset, 0, qd, fdq); 1651 + if (!error) 1652 + clear_bit(QDF_QMSG_QUIET, &qd->qd_flags); 1695 1653 1696 1654 gfs2_trans_end(sdp); 1697 1655 out_release:
+35 -13
fs/gfs2/rgrp.c
··· 978 978 rgd->rd_flags |= GFS2_RDF_PREFERRED; 979 979 for (i = 0; i < sdp->sd_journals; i++) { 980 980 rgd = gfs2_rgrpd_get_next(rgd); 981 - if (rgd == first) 981 + if (!rgd || rgd == first) 982 982 break; 983 983 } 984 - } while (rgd != first); 984 + } while (rgd && rgd != first); 985 985 } 986 986 987 987 /** ··· 1244 1244 } 1245 1245 1246 1246 /** 1247 - * gfs2_rgrp_go_unlock - Release RG bitmaps read in with gfs2_rgrp_bh_get() 1248 - * @gh: The glock holder for the resource group 1247 + * gfs2_rgrp_brelse - Release RG bitmaps read in with gfs2_rgrp_bh_get() 1248 + * @rgd: The resource group 1249 1249 * 1250 1250 */ 1251 1251 1252 - void gfs2_rgrp_go_unlock(struct gfs2_holder *gh) 1252 + void gfs2_rgrp_brelse(struct gfs2_rgrpd *rgd) 1253 1253 { 1254 - struct gfs2_rgrpd *rgd = gh->gh_gl->gl_object; 1255 1254 int x, length = rgd->rd_length; 1256 1255 1257 1256 for (x = 0; x < length; x++) { ··· 1261 1262 } 1262 1263 } 1263 1264 1265 + } 1266 + 1267 + /** 1268 + * gfs2_rgrp_go_unlock - Unlock a rgrp glock 1269 + * @gh: The glock holder for the resource group 1270 + * 1271 + */ 1272 + 1273 + void gfs2_rgrp_go_unlock(struct gfs2_holder *gh) 1274 + { 1275 + struct gfs2_rgrpd *rgd = gh->gh_gl->gl_object; 1276 + int demote_requested = test_bit(GLF_DEMOTE, &gh->gh_gl->gl_flags) | 1277 + test_bit(GLF_PENDING_DEMOTE, &gh->gh_gl->gl_flags); 1278 + 1279 + if (rgd && demote_requested) 1280 + gfs2_rgrp_brelse(rgd); 1264 1281 } 1265 1282 1266 1283 int gfs2_rgrp_send_discards(struct gfs2_sbd *sdp, u64 offset, ··· 1726 1711 return ret; 1727 1712 1728 1713 bitmap_full: /* Mark bitmap as full and fall through */ 1729 - if ((state == GFS2_BLKST_FREE) && initial_offset == 0) { 1730 - struct gfs2_bitmap *bi = rbm_bi(rbm); 1714 + if ((state == GFS2_BLKST_FREE) && initial_offset == 0) 1731 1715 set_bit(GBF_FULL, &bi->bi_flags); 1732 - } 1733 1716 1734 1717 next_bitmap: /* Find next bitmap in the rgrp */ 1735 1718 rbm->offset = 0; ··· 1863 1850 const struct gfs2_sbd *sdp = gl->gl_sbd; 1864 1851 struct gfs2_lkstats *st; 1865 1852 s64 r_dcount, l_dcount; 1866 - s64 r_srttb, l_srttb; 1853 + s64 l_srttb, a_srttb = 0; 1867 1854 s64 srttb_diff; 1868 1855 s64 sqr_diff; 1869 1856 s64 var; 1857 + int cpu, nonzero = 0; 1870 1858 1871 1859 preempt_disable(); 1860 + for_each_present_cpu(cpu) { 1861 + st = &per_cpu_ptr(sdp->sd_lkstats, cpu)->lkstats[LM_TYPE_RGRP]; 1862 + if (st->stats[GFS2_LKS_SRTTB]) { 1863 + a_srttb += st->stats[GFS2_LKS_SRTTB]; 1864 + nonzero++; 1865 + } 1866 + } 1872 1867 st = &this_cpu_ptr(sdp->sd_lkstats)->lkstats[LM_TYPE_RGRP]; 1873 - r_srttb = st->stats[GFS2_LKS_SRTTB]; 1868 + if (nonzero) 1869 + do_div(a_srttb, nonzero); 1874 1870 r_dcount = st->stats[GFS2_LKS_DCOUNT]; 1875 1871 var = st->stats[GFS2_LKS_SRTTVARB] + 1876 1872 gl->gl_stats.stats[GFS2_LKS_SRTTVARB]; ··· 1888 1866 l_srttb = gl->gl_stats.stats[GFS2_LKS_SRTTB]; 1889 1867 l_dcount = gl->gl_stats.stats[GFS2_LKS_DCOUNT]; 1890 1868 1891 - if ((l_dcount < 1) || (r_dcount < 1) || (r_srttb == 0)) 1869 + if ((l_dcount < 1) || (r_dcount < 1) || (a_srttb == 0)) 1892 1870 return false; 1893 1871 1894 - srttb_diff = r_srttb - l_srttb; 1872 + srttb_diff = a_srttb - l_srttb; 1895 1873 sqr_diff = srttb_diff * srttb_diff; 1896 1874 1897 1875 var *= 2;
+1
fs/gfs2/rgrp.h
··· 36 36 extern int gfs2_rindex_update(struct gfs2_sbd *sdp); 37 37 extern void gfs2_free_clones(struct gfs2_rgrpd *rgd); 38 38 extern int gfs2_rgrp_go_lock(struct gfs2_holder *gh); 39 + extern void gfs2_rgrp_brelse(struct gfs2_rgrpd *rgd); 39 40 extern void gfs2_rgrp_go_unlock(struct gfs2_holder *gh); 40 41 41 42 extern struct gfs2_alloc *gfs2_alloc_get(struct gfs2_inode *ip);
+48 -18
fs/gfs2/sys.c
··· 101 101 102 102 static ssize_t freeze_store(struct gfs2_sbd *sdp, const char *buf, size_t len) 103 103 { 104 - int error; 105 - int n = simple_strtol(buf, NULL, 0); 104 + int error, n; 105 + 106 + error = kstrtoint(buf, 0, &n); 107 + if (error) 108 + return error; 106 109 107 110 if (!capable(CAP_SYS_ADMIN)) 108 111 return -EPERM; ··· 137 134 138 135 static ssize_t withdraw_store(struct gfs2_sbd *sdp, const char *buf, size_t len) 139 136 { 137 + int error, val; 138 + 140 139 if (!capable(CAP_SYS_ADMIN)) 141 140 return -EPERM; 142 141 143 - if (simple_strtol(buf, NULL, 0) != 1) 142 + error = kstrtoint(buf, 0, &val); 143 + if (error) 144 + return error; 145 + 146 + if (val != 1) 144 147 return -EINVAL; 145 148 146 149 gfs2_lm_withdraw(sdp, "withdrawing from cluster at user's request\n"); ··· 157 148 static ssize_t statfs_sync_store(struct gfs2_sbd *sdp, const char *buf, 158 149 size_t len) 159 150 { 151 + int error, val; 152 + 160 153 if (!capable(CAP_SYS_ADMIN)) 161 154 return -EPERM; 162 155 163 - if (simple_strtol(buf, NULL, 0) != 1) 156 + error = kstrtoint(buf, 0, &val); 157 + if (error) 158 + return error; 159 + 160 + if (val != 1) 164 161 return -EINVAL; 165 162 166 163 gfs2_statfs_sync(sdp->sd_vfs, 0); ··· 176 161 static ssize_t quota_sync_store(struct gfs2_sbd *sdp, const char *buf, 177 162 size_t len) 178 163 { 164 + int error, val; 165 + 179 166 if (!capable(CAP_SYS_ADMIN)) 180 167 return -EPERM; 181 168 182 - if (simple_strtol(buf, NULL, 0) != 1) 169 + error = kstrtoint(buf, 0, &val); 170 + if (error) 171 + return error; 172 + 173 + if (val != 1) 183 174 return -EINVAL; 184 175 185 176 gfs2_quota_sync(sdp->sd_vfs, 0); ··· 202 181 if (!capable(CAP_SYS_ADMIN)) 203 182 return -EPERM; 204 183 205 - id = simple_strtoul(buf, NULL, 0); 184 + error = kstrtou32(buf, 0, &id); 185 + if (error) 186 + return error; 206 187 207 188 qid = make_kqid(current_user_ns(), USRQUOTA, id); 208 189 if (!qid_valid(qid)) ··· 224 201 if (!capable(CAP_SYS_ADMIN)) 225 202 return -EPERM; 226 203 227 - id = simple_strtoul(buf, NULL, 0); 204 + error = kstrtou32(buf, 0, &id); 205 + if (error) 206 + return error; 228 207 229 208 qid = make_kqid(current_user_ns(), GRPQUOTA, id); 230 209 if (!qid_valid(qid)) ··· 349 324 static ssize_t block_store(struct gfs2_sbd *sdp, const char *buf, size_t len) 350 325 { 351 326 struct lm_lockstruct *ls = &sdp->sd_lockstruct; 352 - ssize_t ret = len; 353 - int val; 327 + int ret, val; 354 328 355 - val = simple_strtol(buf, NULL, 0); 329 + ret = kstrtoint(buf, 0, &val); 330 + if (ret) 331 + return ret; 356 332 357 333 if (val == 1) 358 334 set_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags); ··· 362 336 smp_mb__after_atomic(); 363 337 gfs2_glock_thaw(sdp); 364 338 } else { 365 - ret = -EINVAL; 339 + return -EINVAL; 366 340 } 367 - return ret; 341 + return len; 368 342 } 369 343 370 344 static ssize_t wdack_show(struct gfs2_sbd *sdp, char *buf) ··· 376 350 377 351 static ssize_t wdack_store(struct gfs2_sbd *sdp, const char *buf, size_t len) 378 352 { 379 - ssize_t ret = len; 380 - int val; 353 + int ret, val; 381 354 382 - val = simple_strtol(buf, NULL, 0); 355 + ret = kstrtoint(buf, 0, &val); 356 + if (ret) 357 + return ret; 383 358 384 359 if ((val == 1) && 385 360 !strcmp(sdp->sd_lockstruct.ls_ops->lm_proto_name, "lock_dlm")) 386 361 complete(&sdp->sd_wdack); 387 362 else 388 - ret = -EINVAL; 389 - return ret; 363 + return -EINVAL; 364 + return len; 390 365 } 391 366 392 367 static ssize_t lkfirst_show(struct gfs2_sbd *sdp, char *buf) ··· 580 553 { 581 554 struct gfs2_tune *gt = &sdp->sd_tune; 582 555 unsigned int x; 556 + int error; 583 557 584 558 if (!capable(CAP_SYS_ADMIN)) 585 559 return -EPERM; 586 560 587 - x = simple_strtoul(buf, NULL, 0); 561 + error = kstrtouint(buf, 0, &x); 562 + if (error) 563 + return error; 588 564 589 565 if (check_zero && !x) 590 566 return -EINVAL;