Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'gfs2-4.15.fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/gfs2/linux-gfs2

Pull gfs2 updates from Bob Peterson:
"We've got a total of 17 GFS2 patches for this merge window. The
patches are basically in three categories: (1) patches related to
broken xfstest cases, (2) patches related to improving iomap and start
using it in GFS2, and (3) general typos and clarifications.

Please note that one of the iomap patches extends beyond GFS2 and
affects other file systems, but it was publically reviewed by a
variety of file system people in the community.

From Andreas Gruenbacher:

- rename variable 'bsize' to 'factor' to clarify the logic related to
gfs2_block_map.

- correctly set ctime in the setflags ioctl, which fixes broken
xfstests test 277.

- fix broken xfstest 258, due to an atime initialization problem.

- fix broken xfstest 307, in which GFS2 was not setting ctime when
setting acls.

- switch general iomap code from blkno to disk offset for a variety
of file systems.

- add a new IOMAP_F_DATA_INLINE flag for iomap to indicate blocks
that have data mixed with metadata.

- implement SEEK_HOLE and SEEK_DATA via iomap in GFS2.

- fix failing xfstest case 066, which was due to not properly syncing
dirty inodes when changing extended attributes.

- fix a minor typo in a comment.

- partially fix xfstest 424, which involved GET_FLAGS and SET_FLAGS
ioctl. This is also a cleanup and simplification of the translation
of flags from fs flags to gfs2 flags.

- add support for STATX_ATTR_ in statx, which fixed broken xfstest
424.

- fix for failing xfstest 093 which fixes a recursive glock problem
with gfs2_xattr_get and _set

From me:

- make inode height info part of the 'metapath' data structure to
facilitate using iomap in GFS2.

- start using iomap inside GFS2 and switch GFS2's block_map functions
to use iomap under the covers.

- switch GFS2's fiemap implementation from using block_map to using
iomap under the covers.

- fix journaled data pages not being properly synced to media when
writing inodes. This was caught with xfstests.

- fix another failing xfstest case in which switching a file from
ordered_write to journaled data via set_flags caused a deadlock"

* tag 'gfs2-4.15.fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/gfs2/linux-gfs2:
gfs2: Allow gfs2_xattr_set to be called with the glock held
gfs2: Add support for statx inode flags
gfs2: Fix and clean up {GET,SET}FLAGS ioctl
gfs2: Fix a harmless typo
gfs2: Fix xattr fsync
GFS2: Take inode off order_write list when setting jdata flag
GFS2: flush the log and all pages for jdata as we do for WB_SYNC_ALL
gfs2: Implement SEEK_HOLE / SEEK_DATA via iomap
GFS2: Switch fiemap implementation to use iomap
GFS2: Implement iomap for block_map
GFS2: Make height info part of metapath
gfs2: Always update inode ctime in set_acl
gfs2: Support negative atimes
gfs2: Update ctime in setflags ioctl
gfs2: Clarify gfs2_block_map

+489 -228
+1
fs/gfs2/Kconfig
··· 4 4 select FS_POSIX_ACL 5 5 select CRC32 6 6 select QUOTACTL 7 + select FS_IOMAP 7 8 help 8 9 A cluster filesystem. 9 10
+1
fs/gfs2/acl.c
··· 141 141 142 142 ret = __gfs2_set_acl(inode, acl, type); 143 143 if (!ret && mode != inode->i_mode) { 144 + inode->i_ctime = current_time(inode); 144 145 inode->i_mode = mode; 145 146 mark_inode_dirty(inode); 146 147 }
+258 -100
fs/gfs2/bmap.c
··· 13 13 #include <linux/blkdev.h> 14 14 #include <linux/gfs2_ondisk.h> 15 15 #include <linux/crc32.h> 16 + #include <linux/iomap.h> 16 17 17 18 #include "gfs2.h" 18 19 #include "incore.h" ··· 37 36 struct metapath { 38 37 struct buffer_head *mp_bh[GFS2_MAX_META_HEIGHT]; 39 38 __u16 mp_list[GFS2_MAX_META_HEIGHT]; 39 + int mp_fheight; /* find_metapath height */ 40 + int mp_aheight; /* actual height (lookup height) */ 40 41 }; 41 42 42 43 /** ··· 238 235 { 239 236 unsigned int i; 240 237 238 + mp->mp_fheight = height; 241 239 for (i = height; i--;) 242 240 mp->mp_list[i] = do_div(block, sdp->sd_inptrs); 243 - 244 241 } 245 242 246 243 static inline unsigned int metapath_branch_start(const struct metapath *mp) ··· 251 248 } 252 249 253 250 /** 254 - * metaptr1 - Return the first possible metadata pointer in a metaath buffer 251 + * metaptr1 - Return the first possible metadata pointer in a metapath buffer 255 252 * @height: The metadata height (0 = dinode) 256 253 * @mp: The metapath 257 254 */ ··· 348 345 for (x = 0; x < end_of_metadata; x++) { 349 346 ret = lookup_mp_height(ip, mp, x); 350 347 if (ret) 351 - return ret; 348 + goto out; 352 349 } 353 350 354 - return ip->i_height; 351 + ret = ip->i_height; 352 + out: 353 + mp->mp_aheight = ret; 354 + return ret; 355 355 } 356 356 357 357 /** ··· 486 480 * @inode: The GFS2 inode 487 481 * @lblock: The logical starting block of the extent 488 482 * @bh_map: This is used to return the mapping details 489 - * @mp: The metapath 490 - * @sheight: The starting height (i.e. whats already mapped) 491 - * @height: The height to build to 483 + * @zero_new: True if newly allocated blocks should be zeroed 484 + * @mp: The metapath, with proper height information calculated 492 485 * @maxlen: The max number of data blocks to alloc 486 + * @dblock: Pointer to return the resulting new block 487 + * @dblks: Pointer to return the number of blocks allocated 493 488 * 494 489 * In this routine we may have to alloc: 495 490 * i) Indirect blocks to grow the metadata tree height ··· 506 499 * Returns: errno on error 507 500 */ 508 501 509 - static int gfs2_bmap_alloc(struct inode *inode, const sector_t lblock, 510 - struct buffer_head *bh_map, struct metapath *mp, 511 - const unsigned int sheight, 512 - const unsigned int height, 513 - const size_t maxlen) 502 + static int gfs2_iomap_alloc(struct inode *inode, struct iomap *iomap, 503 + unsigned flags, struct metapath *mp) 514 504 { 515 505 struct gfs2_inode *ip = GFS2_I(inode); 516 506 struct gfs2_sbd *sdp = GFS2_SB(inode); 517 507 struct super_block *sb = sdp->sd_vfs; 518 508 struct buffer_head *dibh = mp->mp_bh[0]; 519 - u64 bn, dblock = 0; 509 + u64 bn; 520 510 unsigned n, i, blks, alloced = 0, iblks = 0, branch_start = 0; 521 511 unsigned dblks = 0; 522 512 unsigned ptrs_per_blk; 523 - const unsigned end_of_metadata = height - 1; 513 + const unsigned end_of_metadata = mp->mp_fheight - 1; 524 514 int ret; 525 - int eob = 0; 526 515 enum alloc_state state; 527 516 __be64 *ptr; 528 517 __be64 zero_bn = 0; 518 + size_t maxlen = iomap->length >> inode->i_blkbits; 529 519 530 - BUG_ON(sheight < 1); 520 + BUG_ON(mp->mp_aheight < 1); 531 521 BUG_ON(dibh == NULL); 532 522 533 523 gfs2_trans_add_meta(ip->i_gl, dibh); 534 524 535 - if (height == sheight) { 525 + if (mp->mp_fheight == mp->mp_aheight) { 536 526 struct buffer_head *bh; 527 + int eob; 528 + 537 529 /* Bottom indirect block exists, find unalloced extent size */ 538 530 ptr = metapointer(end_of_metadata, mp); 539 531 bh = mp->mp_bh[end_of_metadata]; 540 - dblks = gfs2_extent_length(bh->b_data, bh->b_size, ptr, maxlen, 541 - &eob); 532 + dblks = gfs2_extent_length(bh->b_data, bh->b_size, ptr, 533 + maxlen, &eob); 542 534 BUG_ON(dblks < 1); 543 535 state = ALLOC_DATA; 544 536 } else { 545 537 /* Need to allocate indirect blocks */ 546 - ptrs_per_blk = height > 1 ? sdp->sd_inptrs : sdp->sd_diptrs; 538 + ptrs_per_blk = mp->mp_fheight > 1 ? sdp->sd_inptrs : 539 + sdp->sd_diptrs; 547 540 dblks = min(maxlen, (size_t)(ptrs_per_blk - 548 541 mp->mp_list[end_of_metadata])); 549 - if (height == ip->i_height) { 542 + if (mp->mp_fheight == ip->i_height) { 550 543 /* Writing into existing tree, extend tree down */ 551 - iblks = height - sheight; 544 + iblks = mp->mp_fheight - mp->mp_aheight; 552 545 state = ALLOC_GROW_DEPTH; 553 546 } else { 554 547 /* Building up tree height */ 555 548 state = ALLOC_GROW_HEIGHT; 556 - iblks = height - ip->i_height; 549 + iblks = mp->mp_fheight - ip->i_height; 557 550 branch_start = metapath_branch_start(mp); 558 - iblks += (height - branch_start); 551 + iblks += (mp->mp_fheight - branch_start); 559 552 } 560 553 } 561 554 562 555 /* start of the second part of the function (state machine) */ 563 556 564 557 blks = dblks + iblks; 565 - i = sheight; 558 + i = mp->mp_aheight; 566 559 do { 567 560 int error; 568 561 n = blks - alloced; ··· 580 573 sizeof(struct gfs2_dinode)); 581 574 zero_bn = *ptr; 582 575 } 583 - for (; i - 1 < height - ip->i_height && n > 0; i++, n--) 576 + for (; i - 1 < mp->mp_fheight - ip->i_height && n > 0; 577 + i++, n--) 584 578 gfs2_indirect_init(mp, ip->i_gl, i, 0, bn++); 585 - if (i - 1 == height - ip->i_height) { 579 + if (i - 1 == mp->mp_fheight - ip->i_height) { 586 580 i--; 587 581 gfs2_buffer_copy_tail(mp->mp_bh[i], 588 582 sizeof(struct gfs2_meta_header), ··· 595 587 sizeof(struct gfs2_meta_header)); 596 588 *ptr = zero_bn; 597 589 state = ALLOC_GROW_DEPTH; 598 - for(i = branch_start; i < height; i++) { 590 + for(i = branch_start; i < mp->mp_fheight; i++) { 599 591 if (mp->mp_bh[i] == NULL) 600 592 break; 601 593 brelse(mp->mp_bh[i]); ··· 607 599 break; 608 600 /* Branching from existing tree */ 609 601 case ALLOC_GROW_DEPTH: 610 - if (i > 1 && i < height) 602 + if (i > 1 && i < mp->mp_fheight) 611 603 gfs2_trans_add_meta(ip->i_gl, mp->mp_bh[i-1]); 612 - for (; i < height && n > 0; i++, n--) 604 + for (; i < mp->mp_fheight && n > 0; i++, n--) 613 605 gfs2_indirect_init(mp, ip->i_gl, i, 614 606 mp->mp_list[i-1], bn++); 615 - if (i == height) 607 + if (i == mp->mp_fheight) 616 608 state = ALLOC_DATA; 617 609 if (n == 0) 618 610 break; ··· 623 615 gfs2_trans_add_meta(ip->i_gl, mp->mp_bh[end_of_metadata]); 624 616 dblks = n; 625 617 ptr = metapointer(end_of_metadata, mp); 626 - dblock = bn; 618 + iomap->addr = bn << inode->i_blkbits; 619 + iomap->flags |= IOMAP_F_NEW; 627 620 while (n-- > 0) 628 621 *ptr++ = cpu_to_be64(bn++); 629 - if (buffer_zeronew(bh_map)) { 630 - ret = sb_issue_zeroout(sb, dblock, dblks, 631 - GFP_NOFS); 622 + if (flags & IOMAP_ZERO) { 623 + ret = sb_issue_zeroout(sb, iomap->addr >> inode->i_blkbits, 624 + dblks, GFP_NOFS); 632 625 if (ret) { 633 626 fs_err(sdp, 634 627 "Failed to zero data buffers\n"); 635 - clear_buffer_zeronew(bh_map); 628 + flags &= ~IOMAP_ZERO; 636 629 } 637 630 } 638 631 break; 639 632 } 640 - } while ((state != ALLOC_DATA) || !dblock); 633 + } while (iomap->addr == IOMAP_NULL_ADDR); 641 634 642 - ip->i_height = height; 635 + iomap->length = (u64)dblks << inode->i_blkbits; 636 + ip->i_height = mp->mp_fheight; 643 637 gfs2_add_inode_blocks(&ip->i_inode, alloced); 644 638 gfs2_dinode_out(ip, mp->mp_bh[0]->b_data); 645 - map_bh(bh_map, inode->i_sb, dblock); 646 - bh_map->b_size = dblks << inode->i_blkbits; 647 - set_buffer_new(bh_map); 648 639 return 0; 640 + } 641 + 642 + /** 643 + * hole_size - figure out the size of a hole 644 + * @inode: The inode 645 + * @lblock: The logical starting block number 646 + * @mp: The metapath 647 + * 648 + * Returns: The hole size in bytes 649 + * 650 + */ 651 + static u64 hole_size(struct inode *inode, sector_t lblock, struct metapath *mp) 652 + { 653 + struct gfs2_inode *ip = GFS2_I(inode); 654 + struct gfs2_sbd *sdp = GFS2_SB(inode); 655 + struct metapath mp_eof; 656 + u64 factor = 1; 657 + int hgt; 658 + u64 holesz = 0; 659 + const __be64 *first, *end, *ptr; 660 + const struct buffer_head *bh; 661 + u64 lblock_stop = (i_size_read(inode) - 1) >> inode->i_blkbits; 662 + int zeroptrs; 663 + bool done = false; 664 + 665 + /* Get another metapath, to the very last byte */ 666 + find_metapath(sdp, lblock_stop, &mp_eof, ip->i_height); 667 + for (hgt = ip->i_height - 1; hgt >= 0 && !done; hgt--) { 668 + bh = mp->mp_bh[hgt]; 669 + if (bh) { 670 + zeroptrs = 0; 671 + first = metapointer(hgt, mp); 672 + end = (const __be64 *)(bh->b_data + bh->b_size); 673 + 674 + for (ptr = first; ptr < end; ptr++) { 675 + if (*ptr) { 676 + done = true; 677 + break; 678 + } else { 679 + zeroptrs++; 680 + } 681 + } 682 + } else { 683 + zeroptrs = sdp->sd_inptrs; 684 + } 685 + if (factor * zeroptrs >= lblock_stop - lblock + 1) { 686 + holesz = lblock_stop - lblock + 1; 687 + break; 688 + } 689 + holesz += factor * zeroptrs; 690 + 691 + factor *= sdp->sd_inptrs; 692 + if (hgt && (mp->mp_list[hgt - 1] < mp_eof.mp_list[hgt - 1])) 693 + (mp->mp_list[hgt - 1])++; 694 + } 695 + return holesz << inode->i_blkbits; 696 + } 697 + 698 + static void gfs2_stuffed_iomap(struct inode *inode, struct iomap *iomap) 699 + { 700 + struct gfs2_inode *ip = GFS2_I(inode); 701 + 702 + iomap->addr = (ip->i_no_addr << inode->i_blkbits) + 703 + sizeof(struct gfs2_dinode); 704 + iomap->offset = 0; 705 + iomap->length = i_size_read(inode); 706 + iomap->type = IOMAP_MAPPED; 707 + iomap->flags = IOMAP_F_DATA_INLINE; 708 + } 709 + 710 + /** 711 + * gfs2_iomap_begin - Map blocks from an inode to disk blocks 712 + * @inode: The inode 713 + * @pos: Starting position in bytes 714 + * @length: Length to map, in bytes 715 + * @flags: iomap flags 716 + * @iomap: The iomap structure 717 + * 718 + * Returns: errno 719 + */ 720 + int gfs2_iomap_begin(struct inode *inode, loff_t pos, loff_t length, 721 + unsigned flags, struct iomap *iomap) 722 + { 723 + struct gfs2_inode *ip = GFS2_I(inode); 724 + struct gfs2_sbd *sdp = GFS2_SB(inode); 725 + struct metapath mp = { .mp_aheight = 1, }; 726 + unsigned int factor = sdp->sd_sb.sb_bsize; 727 + const u64 *arr = sdp->sd_heightsize; 728 + __be64 *ptr; 729 + sector_t lblock; 730 + sector_t lend; 731 + int ret; 732 + int eob; 733 + unsigned int len; 734 + struct buffer_head *bh; 735 + u8 height; 736 + 737 + trace_gfs2_iomap_start(ip, pos, length, flags); 738 + if (!length) { 739 + ret = -EINVAL; 740 + goto out; 741 + } 742 + 743 + if ((flags & IOMAP_REPORT) && gfs2_is_stuffed(ip)) { 744 + gfs2_stuffed_iomap(inode, iomap); 745 + if (pos >= iomap->length) 746 + return -ENOENT; 747 + ret = 0; 748 + goto out; 749 + } 750 + 751 + lblock = pos >> inode->i_blkbits; 752 + lend = (pos + length + sdp->sd_sb.sb_bsize - 1) >> inode->i_blkbits; 753 + 754 + iomap->offset = lblock << inode->i_blkbits; 755 + iomap->addr = IOMAP_NULL_ADDR; 756 + iomap->type = IOMAP_HOLE; 757 + iomap->length = (u64)(lend - lblock) << inode->i_blkbits; 758 + iomap->flags = IOMAP_F_MERGED; 759 + bmap_lock(ip, 0); 760 + 761 + /* 762 + * Directory data blocks have a struct gfs2_meta_header header, so the 763 + * remaining size is smaller than the filesystem block size. Logical 764 + * block numbers for directories are in units of this remaining size! 765 + */ 766 + if (gfs2_is_dir(ip)) { 767 + factor = sdp->sd_jbsize; 768 + arr = sdp->sd_jheightsize; 769 + } 770 + 771 + ret = gfs2_meta_inode_buffer(ip, &mp.mp_bh[0]); 772 + if (ret) 773 + goto out_release; 774 + 775 + height = ip->i_height; 776 + while ((lblock + 1) * factor > arr[height]) 777 + height++; 778 + find_metapath(sdp, lblock, &mp, height); 779 + if (height > ip->i_height || gfs2_is_stuffed(ip)) 780 + goto do_alloc; 781 + 782 + ret = lookup_metapath(ip, &mp); 783 + if (ret < 0) 784 + goto out_release; 785 + 786 + if (mp.mp_aheight != ip->i_height) 787 + goto do_alloc; 788 + 789 + ptr = metapointer(ip->i_height - 1, &mp); 790 + if (*ptr == 0) 791 + goto do_alloc; 792 + 793 + iomap->type = IOMAP_MAPPED; 794 + iomap->addr = be64_to_cpu(*ptr) << inode->i_blkbits; 795 + 796 + bh = mp.mp_bh[ip->i_height - 1]; 797 + len = gfs2_extent_length(bh->b_data, bh->b_size, ptr, lend - lblock, &eob); 798 + if (eob) 799 + iomap->flags |= IOMAP_F_BOUNDARY; 800 + iomap->length = (u64)len << inode->i_blkbits; 801 + 802 + ret = 0; 803 + 804 + out_release: 805 + release_metapath(&mp); 806 + bmap_unlock(ip, 0); 807 + out: 808 + trace_gfs2_iomap_end(ip, iomap, ret); 809 + return ret; 810 + 811 + do_alloc: 812 + if (!(flags & IOMAP_WRITE)) { 813 + if (pos >= i_size_read(inode)) { 814 + ret = -ENOENT; 815 + goto out_release; 816 + } 817 + ret = 0; 818 + iomap->length = hole_size(inode, lblock, &mp); 819 + goto out_release; 820 + } 821 + 822 + ret = gfs2_iomap_alloc(inode, iomap, flags, &mp); 823 + goto out_release; 649 824 } 650 825 651 826 /** ··· 849 658 struct buffer_head *bh_map, int create) 850 659 { 851 660 struct gfs2_inode *ip = GFS2_I(inode); 852 - struct gfs2_sbd *sdp = GFS2_SB(inode); 853 - unsigned int bsize = sdp->sd_sb.sb_bsize; 854 - const size_t maxlen = bh_map->b_size >> inode->i_blkbits; 855 - const u64 *arr = sdp->sd_heightsize; 856 - __be64 *ptr; 857 - u64 size; 858 - struct metapath mp; 859 - int ret; 860 - int eob; 861 - unsigned int len; 862 - struct buffer_head *bh; 863 - u8 height; 661 + struct iomap iomap; 662 + int ret, flags = 0; 864 663 865 - BUG_ON(maxlen == 0); 866 - 867 - memset(&mp, 0, sizeof(mp)); 868 - bmap_lock(ip, create); 869 664 clear_buffer_mapped(bh_map); 870 665 clear_buffer_new(bh_map); 871 666 clear_buffer_boundary(bh_map); 872 667 trace_gfs2_bmap(ip, bh_map, lblock, create, 1); 873 - if (gfs2_is_dir(ip)) { 874 - bsize = sdp->sd_jbsize; 875 - arr = sdp->sd_jheightsize; 668 + 669 + if (create) 670 + flags |= IOMAP_WRITE; 671 + if (buffer_zeronew(bh_map)) 672 + flags |= IOMAP_ZERO; 673 + ret = gfs2_iomap_begin(inode, (loff_t)lblock << inode->i_blkbits, 674 + bh_map->b_size, flags, &iomap); 675 + if (ret) { 676 + if (!create && ret == -ENOENT) { 677 + /* Return unmapped buffer beyond the end of file. */ 678 + ret = 0; 679 + } 680 + goto out; 876 681 } 877 682 878 - ret = gfs2_meta_inode_buffer(ip, &mp.mp_bh[0]); 879 - if (ret) 880 - goto out; 881 - 882 - height = ip->i_height; 883 - size = (lblock + 1) * bsize; 884 - while (size > arr[height]) 885 - height++; 886 - find_metapath(sdp, lblock, &mp, height); 887 - ret = 1; 888 - if (height > ip->i_height || gfs2_is_stuffed(ip)) 889 - goto do_alloc; 890 - ret = lookup_metapath(ip, &mp); 891 - if (ret < 0) 892 - goto out; 893 - if (ret != ip->i_height) 894 - goto do_alloc; 895 - ptr = metapointer(ip->i_height - 1, &mp); 896 - if (*ptr == 0) 897 - goto do_alloc; 898 - map_bh(bh_map, inode->i_sb, be64_to_cpu(*ptr)); 899 - bh = mp.mp_bh[ip->i_height - 1]; 900 - len = gfs2_extent_length(bh->b_data, bh->b_size, ptr, maxlen, &eob); 901 - bh_map->b_size = (len << inode->i_blkbits); 902 - if (eob) 683 + if (iomap.length > bh_map->b_size) { 684 + iomap.length = bh_map->b_size; 685 + iomap.flags &= ~IOMAP_F_BOUNDARY; 686 + } 687 + if (iomap.addr != IOMAP_NULL_ADDR) 688 + map_bh(bh_map, inode->i_sb, iomap.addr >> inode->i_blkbits); 689 + bh_map->b_size = iomap.length; 690 + if (iomap.flags & IOMAP_F_BOUNDARY) 903 691 set_buffer_boundary(bh_map); 904 - ret = 0; 692 + if (iomap.flags & IOMAP_F_NEW) 693 + set_buffer_new(bh_map); 694 + 905 695 out: 906 - release_metapath(&mp); 907 696 trace_gfs2_bmap(ip, bh_map, lblock, create, ret); 908 - bmap_unlock(ip, create); 909 697 return ret; 910 - 911 - do_alloc: 912 - /* All allocations are done here, firstly check create flag */ 913 - if (!create) { 914 - BUG_ON(gfs2_is_stuffed(ip)); 915 - ret = 0; 916 - goto out; 917 - } 918 - 919 - /* At this point ret is the tree depth of already allocated blocks */ 920 - ret = gfs2_bmap_alloc(inode, lblock, bh_map, &mp, ret, height, maxlen); 921 - goto out; 922 698 } 923 699 924 700 /*
+4
fs/gfs2/bmap.h
··· 10 10 #ifndef __BMAP_DOT_H__ 11 11 #define __BMAP_DOT_H__ 12 12 13 + #include <linux/iomap.h> 14 + 13 15 #include "inode.h" 14 16 15 17 struct inode; ··· 49 47 extern int gfs2_unstuff_dinode(struct gfs2_inode *ip, struct page *page); 50 48 extern int gfs2_block_map(struct inode *inode, sector_t lblock, 51 49 struct buffer_head *bh, int create); 50 + extern int gfs2_iomap_begin(struct inode *inode, loff_t pos, loff_t length, 51 + unsigned flags, struct iomap *iomap); 52 52 extern int gfs2_extent_map(struct inode *inode, u64 lblock, int *new, 53 53 u64 *dblock, unsigned *extlen); 54 54 extern int gfs2_setattr_size(struct inode *inode, u64 size);
+65 -59
fs/gfs2/file.c
··· 60 60 loff_t error; 61 61 62 62 switch (whence) { 63 - case SEEK_END: /* These reference inode->i_size */ 64 - case SEEK_DATA: 65 - case SEEK_HOLE: 63 + case SEEK_END: 66 64 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, 67 65 &i_gh); 68 66 if (!error) { ··· 68 70 gfs2_glock_dq_uninit(&i_gh); 69 71 } 70 72 break; 73 + 74 + case SEEK_DATA: 75 + error = gfs2_seek_data(file, offset); 76 + break; 77 + 78 + case SEEK_HOLE: 79 + error = gfs2_seek_hole(file, offset); 80 + break; 81 + 71 82 case SEEK_CUR: 72 83 case SEEK_SET: 84 + /* 85 + * These don't reference inode->i_size and don't depend on the 86 + * block mapping, so we don't need the glock. 87 + */ 73 88 error = generic_file_llseek(file, offset, whence); 74 89 break; 75 90 default: ··· 119 108 } 120 109 121 110 /** 122 - * fsflags_cvt 123 - * @table: A table of 32 u32 flags 124 - * @val: a 32 bit value to convert 111 + * fsflag_gfs2flag 125 112 * 126 - * This function can be used to convert between fsflags values and 127 - * GFS2's own flags values. 128 - * 129 - * Returns: the converted flags 113 + * The FS_JOURNAL_DATA_FL flag maps to GFS2_DIF_INHERIT_JDATA for directories, 114 + * and to GFS2_DIF_JDATA for non-directories. 130 115 */ 131 - static u32 fsflags_cvt(const u32 *table, u32 val) 132 - { 133 - u32 res = 0; 134 - while(val) { 135 - if (val & 1) 136 - res |= *table; 137 - table++; 138 - val >>= 1; 139 - } 140 - return res; 141 - } 142 - 143 - static const u32 fsflags_to_gfs2[32] = { 144 - [3] = GFS2_DIF_SYNC, 145 - [4] = GFS2_DIF_IMMUTABLE, 146 - [5] = GFS2_DIF_APPENDONLY, 147 - [7] = GFS2_DIF_NOATIME, 148 - [12] = GFS2_DIF_EXHASH, 149 - [14] = GFS2_DIF_INHERIT_JDATA, 150 - [17] = GFS2_DIF_TOPDIR, 151 - }; 152 - 153 - static const u32 gfs2_to_fsflags[32] = { 154 - [gfs2fl_Sync] = FS_SYNC_FL, 155 - [gfs2fl_Immutable] = FS_IMMUTABLE_FL, 156 - [gfs2fl_AppendOnly] = FS_APPEND_FL, 157 - [gfs2fl_NoAtime] = FS_NOATIME_FL, 158 - [gfs2fl_ExHash] = FS_INDEX_FL, 159 - [gfs2fl_TopLevel] = FS_TOPDIR_FL, 160 - [gfs2fl_InheritJdata] = FS_JOURNAL_DATA_FL, 116 + static struct { 117 + u32 fsflag; 118 + u32 gfsflag; 119 + } fsflag_gfs2flag[] = { 120 + {FS_SYNC_FL, GFS2_DIF_SYNC}, 121 + {FS_IMMUTABLE_FL, GFS2_DIF_IMMUTABLE}, 122 + {FS_APPEND_FL, GFS2_DIF_APPENDONLY}, 123 + {FS_NOATIME_FL, GFS2_DIF_NOATIME}, 124 + {FS_INDEX_FL, GFS2_DIF_EXHASH}, 125 + {FS_TOPDIR_FL, GFS2_DIF_TOPDIR}, 126 + {FS_JOURNAL_DATA_FL, GFS2_DIF_JDATA | GFS2_DIF_INHERIT_JDATA}, 161 127 }; 162 128 163 129 static int gfs2_get_flags(struct file *filp, u32 __user *ptr) ··· 142 154 struct inode *inode = file_inode(filp); 143 155 struct gfs2_inode *ip = GFS2_I(inode); 144 156 struct gfs2_holder gh; 145 - int error; 146 - u32 fsflags; 157 + int i, error; 158 + u32 gfsflags, fsflags = 0; 147 159 148 160 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh); 149 161 error = gfs2_glock_nq(&gh); 150 162 if (error) 151 163 goto out_uninit; 152 164 153 - fsflags = fsflags_cvt(gfs2_to_fsflags, ip->i_diskflags); 154 - if (!S_ISDIR(inode->i_mode) && ip->i_diskflags & GFS2_DIF_JDATA) 155 - fsflags |= FS_JOURNAL_DATA_FL; 165 + gfsflags = ip->i_diskflags; 166 + if (S_ISDIR(inode->i_mode)) 167 + gfsflags &= ~GFS2_DIF_JDATA; 168 + else 169 + gfsflags &= ~GFS2_DIF_INHERIT_JDATA; 170 + for (i = 0; i < ARRAY_SIZE(fsflag_gfs2flag); i++) 171 + if (gfsflags & fsflag_gfs2flag[i].gfsflag) 172 + fsflags |= fsflag_gfs2flag[i].fsflag; 173 + 156 174 if (put_user(fsflags, ptr)) 157 175 error = -EFAULT; 158 176 ··· 193 199 GFS2_DIF_APPENDONLY| \ 194 200 GFS2_DIF_NOATIME| \ 195 201 GFS2_DIF_SYNC| \ 196 - GFS2_DIF_SYSTEM| \ 197 202 GFS2_DIF_TOPDIR| \ 198 203 GFS2_DIF_INHERIT_JDATA) 199 204 ··· 231 238 if ((new_flags ^ flags) == 0) 232 239 goto out; 233 240 234 - error = -EINVAL; 235 - if ((new_flags ^ flags) & ~GFS2_FLAGS_USER_SET) 236 - goto out; 237 - 238 241 error = -EPERM; 239 242 if (IS_IMMUTABLE(inode) && (new_flags & GFS2_DIF_IMMUTABLE)) 240 243 goto out; ··· 245 256 goto out; 246 257 } 247 258 if ((flags ^ new_flags) & GFS2_DIF_JDATA) { 248 - if (flags & GFS2_DIF_JDATA) 259 + if (new_flags & GFS2_DIF_JDATA) 249 260 gfs2_log_flush(sdp, ip->i_gl, NORMAL_FLUSH); 250 261 error = filemap_fdatawrite(inode->i_mapping); 251 262 if (error) ··· 253 264 error = filemap_fdatawait(inode->i_mapping); 254 265 if (error) 255 266 goto out; 267 + if (new_flags & GFS2_DIF_JDATA) 268 + gfs2_ordered_del_inode(ip); 256 269 } 257 270 error = gfs2_trans_begin(sdp, RES_DINODE, 0); 258 271 if (error) ··· 262 271 error = gfs2_meta_inode_buffer(ip, &bh); 263 272 if (error) 264 273 goto out_trans_end; 274 + inode->i_ctime = current_time(inode); 265 275 gfs2_trans_add_meta(ip->i_gl, bh); 266 276 ip->i_diskflags = new_flags; 267 277 gfs2_dinode_out(ip, bh->b_data); ··· 281 289 static int gfs2_set_flags(struct file *filp, u32 __user *ptr) 282 290 { 283 291 struct inode *inode = file_inode(filp); 284 - u32 fsflags, gfsflags; 292 + u32 fsflags, gfsflags = 0; 293 + u32 mask; 294 + int i; 285 295 286 296 if (get_user(fsflags, ptr)) 287 297 return -EFAULT; 288 298 289 - gfsflags = fsflags_cvt(fsflags_to_gfs2, fsflags); 290 - if (!S_ISDIR(inode->i_mode)) { 291 - gfsflags &= ~GFS2_DIF_TOPDIR; 292 - if (gfsflags & GFS2_DIF_INHERIT_JDATA) 293 - gfsflags ^= (GFS2_DIF_JDATA | GFS2_DIF_INHERIT_JDATA); 294 - return do_gfs2_set_flags(filp, gfsflags, ~GFS2_DIF_SYSTEM); 299 + for (i = 0; i < ARRAY_SIZE(fsflag_gfs2flag); i++) { 300 + if (fsflags & fsflag_gfs2flag[i].fsflag) { 301 + fsflags &= ~fsflag_gfs2flag[i].fsflag; 302 + gfsflags |= fsflag_gfs2flag[i].gfsflag; 303 + } 295 304 } 296 - return do_gfs2_set_flags(filp, gfsflags, ~(GFS2_DIF_SYSTEM | GFS2_DIF_JDATA)); 305 + if (fsflags || gfsflags & ~GFS2_FLAGS_USER_SET) 306 + return -EINVAL; 307 + 308 + mask = GFS2_FLAGS_USER_SET; 309 + if (S_ISDIR(inode->i_mode)) { 310 + mask &= ~GFS2_DIF_JDATA; 311 + } else { 312 + /* The GFS2_DIF_TOPDIR flag is only valid for directories. */ 313 + if (gfsflags & GFS2_DIF_TOPDIR) 314 + return -EINVAL; 315 + mask &= ~(GFS2_DIF_TOPDIR | GFS2_DIF_INHERIT_JDATA); 316 + } 317 + 318 + return do_gfs2_set_flags(filp, gfsflags, mask); 297 319 } 298 320 299 321 static long gfs2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+63 -26
fs/gfs2/inode.c
··· 18 18 #include <linux/posix_acl.h> 19 19 #include <linux/gfs2_ondisk.h> 20 20 #include <linux/crc32.h> 21 - #include <linux/fiemap.h> 21 + #include <linux/iomap.h> 22 22 #include <linux/security.h> 23 23 #include <linux/uaccess.h> 24 24 ··· 189 189 190 190 gfs2_set_iop(inode); 191 191 192 - inode->i_atime.tv_sec = 0; 192 + /* Lowest possible timestamp; will be overwritten in gfs2_dinode_in. */ 193 + inode->i_atime.tv_sec = 1LL << (8 * sizeof(inode->i_atime.tv_sec) - 1); 193 194 inode->i_atime.tv_nsec = 0; 194 195 195 196 unlock_new_inode(inode); ··· 1987 1986 struct inode *inode = d_inode(path->dentry); 1988 1987 struct gfs2_inode *ip = GFS2_I(inode); 1989 1988 struct gfs2_holder gh; 1989 + u32 gfsflags; 1990 1990 int error; 1991 1991 1992 1992 gfs2_holder_mark_uninitialized(&gh); ··· 1997 1995 return error; 1998 1996 } 1999 1997 1998 + gfsflags = ip->i_diskflags; 1999 + if (gfsflags & GFS2_DIF_APPENDONLY) 2000 + stat->attributes |= STATX_ATTR_APPEND; 2001 + if (gfsflags & GFS2_DIF_IMMUTABLE) 2002 + stat->attributes |= STATX_ATTR_IMMUTABLE; 2003 + 2004 + stat->attributes_mask |= (STATX_ATTR_APPEND | 2005 + STATX_ATTR_COMPRESSED | 2006 + STATX_ATTR_ENCRYPTED | 2007 + STATX_ATTR_IMMUTABLE | 2008 + STATX_ATTR_NODUMP); 2009 + 2000 2010 generic_fillattr(inode, stat); 2011 + 2001 2012 if (gfs2_holder_initialized(&gh)) 2002 2013 gfs2_glock_dq_uninit(&gh); 2003 2014 2004 2015 return 0; 2005 2016 } 2017 + 2018 + const struct iomap_ops gfs2_iomap_ops = { 2019 + .iomap_begin = gfs2_iomap_begin, 2020 + }; 2006 2021 2007 2022 static int gfs2_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, 2008 2023 u64 start, u64 len) ··· 2028 2009 struct gfs2_holder gh; 2029 2010 int ret; 2030 2011 2031 - ret = fiemap_check_flags(fieinfo, FIEMAP_FLAG_SYNC); 2032 - if (ret) 2033 - return ret; 2034 - 2035 - inode_lock(inode); 2012 + inode_lock_shared(inode); 2036 2013 2037 2014 ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &gh); 2038 2015 if (ret) 2039 2016 goto out; 2040 2017 2041 - if (gfs2_is_stuffed(ip)) { 2042 - u64 phys = ip->i_no_addr << inode->i_blkbits; 2043 - u64 size = i_size_read(inode); 2044 - u32 flags = FIEMAP_EXTENT_LAST|FIEMAP_EXTENT_NOT_ALIGNED| 2045 - FIEMAP_EXTENT_DATA_INLINE; 2046 - phys += sizeof(struct gfs2_dinode); 2047 - phys += start; 2048 - if (start + len > size) 2049 - len = size - start; 2050 - if (start < size) 2051 - ret = fiemap_fill_next_extent(fieinfo, start, phys, 2052 - len, flags); 2053 - if (ret == 1) 2054 - ret = 0; 2055 - } else { 2056 - ret = __generic_block_fiemap(inode, fieinfo, start, len, 2057 - gfs2_block_map); 2058 - } 2018 + ret = iomap_fiemap(inode, fieinfo, start, len, &gfs2_iomap_ops); 2059 2019 2060 2020 gfs2_glock_dq_uninit(&gh); 2021 + 2061 2022 out: 2062 - inode_unlock(inode); 2023 + inode_unlock_shared(inode); 2063 2024 return ret; 2025 + } 2026 + 2027 + loff_t gfs2_seek_data(struct file *file, loff_t offset) 2028 + { 2029 + struct inode *inode = file->f_mapping->host; 2030 + struct gfs2_inode *ip = GFS2_I(inode); 2031 + struct gfs2_holder gh; 2032 + loff_t ret; 2033 + 2034 + inode_lock_shared(inode); 2035 + ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &gh); 2036 + if (!ret) 2037 + ret = iomap_seek_data(inode, offset, &gfs2_iomap_ops); 2038 + gfs2_glock_dq_uninit(&gh); 2039 + inode_unlock_shared(inode); 2040 + 2041 + if (ret < 0) 2042 + return ret; 2043 + return vfs_setpos(file, ret, inode->i_sb->s_maxbytes); 2044 + } 2045 + 2046 + loff_t gfs2_seek_hole(struct file *file, loff_t offset) 2047 + { 2048 + struct inode *inode = file->f_mapping->host; 2049 + struct gfs2_inode *ip = GFS2_I(inode); 2050 + struct gfs2_holder gh; 2051 + loff_t ret; 2052 + 2053 + inode_lock_shared(inode); 2054 + ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &gh); 2055 + if (!ret) 2056 + ret = iomap_seek_hole(inode, offset, &gfs2_iomap_ops); 2057 + gfs2_glock_dq_uninit(&gh); 2058 + inode_unlock_shared(inode); 2059 + 2060 + if (ret < 0) 2061 + return ret; 2062 + return vfs_setpos(file, ret, inode->i_sb->s_maxbytes); 2064 2063 } 2065 2064 2066 2065 const struct inode_operations gfs2_file_iops = {
+2
fs/gfs2/inode.h
··· 109 109 extern struct inode *gfs2_lookup_simple(struct inode *dip, const char *name); 110 110 extern void gfs2_dinode_out(const struct gfs2_inode *ip, void *buf); 111 111 extern int gfs2_open_common(struct inode *inode, struct file *file); 112 + extern loff_t gfs2_seek_data(struct file *file, loff_t offset); 113 + extern loff_t gfs2_seek_hole(struct file *file, loff_t offset); 112 114 113 115 extern const struct inode_operations gfs2_file_iops; 114 116 extern const struct inode_operations gfs2_dir_iops;
+3 -2
fs/gfs2/super.c
··· 754 754 struct address_space *metamapping = gfs2_glock2aspace(ip->i_gl); 755 755 struct backing_dev_info *bdi = inode_to_bdi(metamapping->host); 756 756 int ret = 0; 757 + bool flush_all = (wbc->sync_mode == WB_SYNC_ALL || gfs2_is_jdata(ip)); 757 758 758 - if (wbc->sync_mode == WB_SYNC_ALL) 759 + if (flush_all) 759 760 gfs2_log_flush(GFS2_SB(inode), ip->i_gl, NORMAL_FLUSH); 760 761 if (bdi->wb.dirty_exceeded) 761 762 gfs2_ail1_flush(sdp, wbc); 762 763 else 763 764 filemap_fdatawrite(metamapping); 764 - if (wbc->sync_mode == WB_SYNC_ALL) 765 + if (flush_all) 765 766 ret = filemap_fdatawait(metamapping); 766 767 if (ret) 767 768 mark_inode_dirty_sync(inode);
+65
fs/gfs2/trace_gfs2.h
··· 13 13 #include <linux/gfs2_ondisk.h> 14 14 #include <linux/writeback.h> 15 15 #include <linux/ktime.h> 16 + #include <linux/iomap.h> 16 17 #include "incore.h" 17 18 #include "glock.h" 18 19 #include "rgrp.h" ··· 469 468 (unsigned long long)__entry->pblock, 470 469 __entry->state, __entry->create ? "create " : "nocreate", 471 470 __entry->errno) 471 + ); 472 + 473 + TRACE_EVENT(gfs2_iomap_start, 474 + 475 + TP_PROTO(const struct gfs2_inode *ip, loff_t pos, ssize_t length, 476 + u16 flags), 477 + 478 + TP_ARGS(ip, pos, length, flags), 479 + 480 + TP_STRUCT__entry( 481 + __field( dev_t, dev ) 482 + __field( u64, inum ) 483 + __field( loff_t, pos ) 484 + __field( ssize_t, length ) 485 + __field( u16, flags ) 486 + ), 487 + 488 + TP_fast_assign( 489 + __entry->dev = ip->i_gl->gl_name.ln_sbd->sd_vfs->s_dev; 490 + __entry->inum = ip->i_no_addr; 491 + __entry->pos = pos; 492 + __entry->length = length; 493 + __entry->flags = flags; 494 + ), 495 + 496 + TP_printk("%u,%u bmap %llu iomap start %llu/%lu flags:%08x", 497 + MAJOR(__entry->dev), MINOR(__entry->dev), 498 + (unsigned long long)__entry->inum, 499 + (unsigned long long)__entry->pos, 500 + (unsigned long)__entry->length, (u16)__entry->flags) 501 + ); 502 + 503 + TRACE_EVENT(gfs2_iomap_end, 504 + 505 + TP_PROTO(const struct gfs2_inode *ip, struct iomap *iomap, int ret), 506 + 507 + TP_ARGS(ip, iomap, ret), 508 + 509 + TP_STRUCT__entry( 510 + __field( dev_t, dev ) 511 + __field( u64, inum ) 512 + __field( loff_t, offset ) 513 + __field( ssize_t, length ) 514 + __field( u16, flags ) 515 + __field( u16, type ) 516 + __field( int, ret ) 517 + ), 518 + 519 + TP_fast_assign( 520 + __entry->dev = ip->i_gl->gl_name.ln_sbd->sd_vfs->s_dev; 521 + __entry->inum = ip->i_no_addr; 522 + __entry->offset = iomap->offset; 523 + __entry->length = iomap->length; 524 + __entry->flags = iomap->flags; 525 + __entry->type = iomap->type; 526 + __entry->ret = ret; 527 + ), 528 + 529 + TP_printk("%u,%u bmap %llu iomap end %llu/%lu ty:%d flags:%08x rc:%d", 530 + MAJOR(__entry->dev), MINOR(__entry->dev), 531 + (unsigned long long)__entry->inum, 532 + (unsigned long long)__entry->offset, 533 + (unsigned long)__entry->length, (u16)__entry->type, 534 + (u16)__entry->flags, __entry->ret) 472 535 ); 473 536 474 537 /* Keep track of blocks as they are allocated/freed */
+1 -1
fs/gfs2/trans.c
··· 145 145 * 146 146 * This is used in two distinct cases: 147 147 * i) In ordered write mode 148 - * We put the data buffer on a list so that we can ensure that its 148 + * We put the data buffer on a list so that we can ensure that it's 149 149 * synced to disk at the right time 150 150 * ii) In journaled data mode 151 151 * We need to journal the data block in the same way as metadata in
+24 -39
fs/gfs2/xattr.c
··· 231 231 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 232 232 struct gfs2_rgrpd *rgd; 233 233 struct gfs2_holder rg_gh; 234 - struct buffer_head *dibh; 235 234 __be64 *dataptrs; 236 235 u64 bn = 0; 237 236 u64 bstart = 0; ··· 307 308 ea->ea_num_ptrs = 0; 308 309 } 309 310 310 - error = gfs2_meta_inode_buffer(ip, &dibh); 311 - if (!error) { 312 - ip->i_inode.i_ctime = current_time(&ip->i_inode); 313 - gfs2_trans_add_meta(ip->i_gl, dibh); 314 - gfs2_dinode_out(ip, dibh->b_data); 315 - brelse(dibh); 316 - } 311 + ip->i_inode.i_ctime = current_time(&ip->i_inode); 312 + __mark_inode_dirty(&ip->i_inode, I_DIRTY_SYNC | I_DIRTY_DATASYNC); 317 313 318 314 gfs2_trans_end(sdp); 319 315 ··· 610 616 { 611 617 struct gfs2_inode *ip = GFS2_I(inode); 612 618 struct gfs2_holder gh; 613 - bool need_unlock = false; 614 619 int ret; 615 620 616 621 /* During lookup, SELinux calls this function with the glock locked. */ ··· 618 625 ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &gh); 619 626 if (ret) 620 627 return ret; 621 - need_unlock = true; 628 + } else { 629 + gfs2_holder_mark_uninitialized(&gh); 622 630 } 623 631 ret = __gfs2_xattr_get(inode, name, buffer, size, handler->flags); 624 - if (need_unlock) 632 + if (gfs2_holder_initialized(&gh)) 625 633 gfs2_glock_dq_uninit(&gh); 626 634 return ret; 627 635 } ··· 743 749 ea_skeleton_call_t skeleton_call, void *private) 744 750 { 745 751 struct gfs2_alloc_parms ap = { .target = blks }; 746 - struct buffer_head *dibh; 747 752 int error; 748 753 749 754 error = gfs2_rindex_update(GFS2_SB(&ip->i_inode)); ··· 767 774 if (error) 768 775 goto out_end_trans; 769 776 770 - error = gfs2_meta_inode_buffer(ip, &dibh); 771 - if (!error) { 772 - ip->i_inode.i_ctime = current_time(&ip->i_inode); 773 - gfs2_trans_add_meta(ip->i_gl, dibh); 774 - gfs2_dinode_out(ip, dibh->b_data); 775 - brelse(dibh); 776 - } 777 + ip->i_inode.i_ctime = current_time(&ip->i_inode); 778 + __mark_inode_dirty(&ip->i_inode, I_DIRTY_SYNC | I_DIRTY_DATASYNC); 777 779 778 780 out_end_trans: 779 781 gfs2_trans_end(GFS2_SB(&ip->i_inode)); ··· 879 891 struct gfs2_ea_header *ea, struct ea_set *es) 880 892 { 881 893 struct gfs2_ea_request *er = es->es_er; 882 - struct buffer_head *dibh; 883 894 int error; 884 895 885 896 error = gfs2_trans_begin(GFS2_SB(&ip->i_inode), RES_DINODE + 2 * RES_EATTR, 0); ··· 895 908 if (es->es_el) 896 909 ea_set_remove_stuffed(ip, es->es_el); 897 910 898 - error = gfs2_meta_inode_buffer(ip, &dibh); 899 - if (error) 900 - goto out; 901 911 ip->i_inode.i_ctime = current_time(&ip->i_inode); 902 - gfs2_trans_add_meta(ip->i_gl, dibh); 903 - gfs2_dinode_out(ip, dibh->b_data); 904 - brelse(dibh); 905 - out: 912 + __mark_inode_dirty(&ip->i_inode, I_DIRTY_SYNC | I_DIRTY_DATASYNC); 913 + 906 914 gfs2_trans_end(GFS2_SB(&ip->i_inode)); 907 915 return error; 908 916 } ··· 1093 1111 { 1094 1112 struct gfs2_ea_header *ea = el->el_ea; 1095 1113 struct gfs2_ea_header *prev = el->el_prev; 1096 - struct buffer_head *dibh; 1097 1114 int error; 1098 1115 1099 1116 error = gfs2_trans_begin(GFS2_SB(&ip->i_inode), RES_DINODE + RES_EATTR, 0); ··· 1113 1132 ea->ea_type = GFS2_EATYPE_UNUSED; 1114 1133 } 1115 1134 1116 - error = gfs2_meta_inode_buffer(ip, &dibh); 1117 - if (!error) { 1118 - ip->i_inode.i_ctime = current_time(&ip->i_inode); 1119 - gfs2_trans_add_meta(ip->i_gl, dibh); 1120 - gfs2_dinode_out(ip, dibh->b_data); 1121 - brelse(dibh); 1122 - } 1135 + ip->i_inode.i_ctime = current_time(&ip->i_inode); 1136 + __mark_inode_dirty(&ip->i_inode, I_DIRTY_SYNC | I_DIRTY_DATASYNC); 1123 1137 1124 1138 gfs2_trans_end(GFS2_SB(&ip->i_inode)); 1125 1139 ··· 1244 1268 if (ret) 1245 1269 return ret; 1246 1270 1247 - ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh); 1248 - if (ret) 1249 - return ret; 1271 + /* May be called from gfs_setattr with the glock locked. */ 1272 + 1273 + if (!gfs2_glock_is_locked_by_me(ip->i_gl)) { 1274 + ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh); 1275 + if (ret) 1276 + return ret; 1277 + } else { 1278 + if (WARN_ON_ONCE(ip->i_gl->gl_state != LM_ST_EXCLUSIVE)) 1279 + return -EIO; 1280 + gfs2_holder_mark_uninitialized(&gh); 1281 + } 1250 1282 ret = __gfs2_xattr_set(inode, name, value, size, flags, handler->flags); 1251 - gfs2_glock_dq_uninit(&gh); 1283 + if (gfs2_holder_initialized(&gh)) 1284 + gfs2_glock_dq_uninit(&gh); 1252 1285 return ret; 1253 1286 } 1254 1287
+2 -1
include/linux/iomap.h
··· 22 22 /* 23 23 * Flags for all iomap mappings: 24 24 */ 25 - #define IOMAP_F_NEW 0x01 /* blocks have been newly allocated */ 25 + #define IOMAP_F_NEW 0x01 /* blocks have been newly allocated */ 26 + #define IOMAP_F_BOUNDARY 0x02 /* mapping ends at metadata boundary */ 26 27 27 28 /* 28 29 * Flags that only need to be reported for IOMAP_REPORT requests: