Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'master' into for-linus

+741 -803
-5
Documentation/filesystems/xfs-delayed-logging-design.txt
··· 794 794 795 795 Roadmap: 796 796 797 - 2.6.35 Inclusion in mainline as an experimental mount option 798 - => approximately 2-3 months to merge window 799 - => needs to be in xfs-dev tree in 4-6 weeks 800 - => code is nearing readiness for review 801 - 802 797 2.6.37 Remove experimental tag from mount option 803 798 => should be roughly 6 months after initial merge 804 799 => enough time to:
+15
fs/xfs/linux-2.6/xfs_aops.c
··· 1333 1333 trace_xfs_writepage(inode, page, 0); 1334 1334 1335 1335 /* 1336 + * Refuse to write the page out if we are called from reclaim context. 1337 + * 1338 + * This is primarily to avoid stack overflows when called from deep 1339 + * used stacks in random callers for direct reclaim, but disabling 1340 + * reclaim for kswap is a nice side-effect as kswapd causes rather 1341 + * suboptimal I/O patters, too. 1342 + * 1343 + * This should really be done by the core VM, but until that happens 1344 + * filesystems like XFS, btrfs and ext4 have to take care of this 1345 + * by themselves. 1346 + */ 1347 + if (current->flags & PF_MEMALLOC) 1348 + goto out_fail; 1349 + 1350 + /* 1336 1351 * We need a transaction if: 1337 1352 * 1. There are delalloc buffers on the page 1338 1353 * 2. The page is uptodate and we have unmapped buffers
+13 -3
fs/xfs/linux-2.6/xfs_iops.c
··· 585 585 bf.l_len = len; 586 586 587 587 xfs_ilock(ip, XFS_IOLOCK_EXCL); 588 + 589 + /* check the new inode size is valid before allocating */ 590 + if (!(mode & FALLOC_FL_KEEP_SIZE) && 591 + offset + len > i_size_read(inode)) { 592 + new_size = offset + len; 593 + error = inode_newsize_ok(inode, new_size); 594 + if (error) 595 + goto out_unlock; 596 + } 597 + 588 598 error = -xfs_change_file_space(ip, XFS_IOC_RESVSP, &bf, 589 599 0, XFS_ATTR_NOLOCK); 590 - if (!error && !(mode & FALLOC_FL_KEEP_SIZE) && 591 - offset + len > i_size_read(inode)) 592 - new_size = offset + len; 600 + if (error) 601 + goto out_unlock; 593 602 594 603 /* Change file size if needed */ 595 604 if (new_size) { ··· 609 600 error = -xfs_setattr(ip, &iattr, XFS_ATTR_NOLOCK); 610 601 } 611 602 603 + out_unlock: 612 604 xfs_iunlock(ip, XFS_IOLOCK_EXCL); 613 605 out_error: 614 606 return error;
-1
fs/xfs/linux-2.6/xfs_quotaops.c
··· 23 23 #include "xfs_ag.h" 24 24 #include "xfs_mount.h" 25 25 #include "xfs_quota.h" 26 - #include "xfs_log.h" 27 26 #include "xfs_trans.h" 28 27 #include "xfs_bmap_btree.h" 29 28 #include "xfs_inode.h"
-9
fs/xfs/linux-2.6/xfs_sync.c
··· 164 164 struct xfs_perag *pag; 165 165 166 166 pag = xfs_perag_get(mp, ag); 167 - if (!pag->pag_ici_init) { 168 - xfs_perag_put(pag); 169 - continue; 170 - } 171 167 error = xfs_inode_ag_walk(mp, pag, execute, flags, tag, 172 168 exclusive, &nr); 173 169 xfs_perag_put(pag); ··· 863 867 down_read(&xfs_mount_list_lock); 864 868 list_for_each_entry(mp, &xfs_mount_list, m_mplist) { 865 869 for (ag = 0; ag < mp->m_sb.sb_agcount; ag++) { 866 - 867 870 pag = xfs_perag_get(mp, ag); 868 - if (!pag->pag_ici_init) { 869 - xfs_perag_put(pag); 870 - continue; 871 - } 872 871 reclaimable += pag->pag_ici_reclaimable; 873 872 xfs_perag_put(pag); 874 873 }
-1
fs/xfs/linux-2.6/xfs_trace.c
··· 50 50 #include "quota/xfs_dquot_item.h" 51 51 #include "quota/xfs_dquot.h" 52 52 #include "xfs_log_recover.h" 53 - #include "xfs_buf_item.h" 54 53 #include "xfs_inode_item.h" 55 54 56 55 /*
+187 -167
fs/xfs/linux-2.6/xfs_trace.h
··· 82 82 ) 83 83 ) 84 84 85 - #define DEFINE_PERAG_REF_EVENT(name) \ 86 - TRACE_EVENT(name, \ 87 - TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, int refcount, \ 88 - unsigned long caller_ip), \ 89 - TP_ARGS(mp, agno, refcount, caller_ip), \ 90 - TP_STRUCT__entry( \ 91 - __field(dev_t, dev) \ 92 - __field(xfs_agnumber_t, agno) \ 93 - __field(int, refcount) \ 94 - __field(unsigned long, caller_ip) \ 95 - ), \ 96 - TP_fast_assign( \ 97 - __entry->dev = mp->m_super->s_dev; \ 98 - __entry->agno = agno; \ 99 - __entry->refcount = refcount; \ 100 - __entry->caller_ip = caller_ip; \ 101 - ), \ 102 - TP_printk("dev %d:%d agno %u refcount %d caller %pf", \ 103 - MAJOR(__entry->dev), MINOR(__entry->dev), \ 104 - __entry->agno, \ 105 - __entry->refcount, \ 106 - (char *)__entry->caller_ip) \ 107 - ); 108 - 109 - DEFINE_PERAG_REF_EVENT(xfs_perag_get) 110 - DEFINE_PERAG_REF_EVENT(xfs_perag_put) 111 - 112 85 #define DEFINE_ATTR_LIST_EVENT(name) \ 113 86 DEFINE_EVENT(xfs_attr_list_class, name, \ 114 87 TP_PROTO(struct xfs_attr_list_context *ctx), \ ··· 94 121 DEFINE_ATTR_LIST_EVENT(xfs_attr_list_add); 95 122 DEFINE_ATTR_LIST_EVENT(xfs_attr_list_wrong_blk); 96 123 DEFINE_ATTR_LIST_EVENT(xfs_attr_list_notfound); 124 + 125 + DECLARE_EVENT_CLASS(xfs_perag_class, 126 + TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, int refcount, 127 + unsigned long caller_ip), 128 + TP_ARGS(mp, agno, refcount, caller_ip), 129 + TP_STRUCT__entry( 130 + __field(dev_t, dev) 131 + __field(xfs_agnumber_t, agno) 132 + __field(int, refcount) 133 + __field(unsigned long, caller_ip) 134 + ), 135 + TP_fast_assign( 136 + __entry->dev = mp->m_super->s_dev; 137 + __entry->agno = agno; 138 + __entry->refcount = refcount; 139 + __entry->caller_ip = caller_ip; 140 + ), 141 + TP_printk("dev %d:%d agno %u refcount %d caller %pf", 142 + MAJOR(__entry->dev), MINOR(__entry->dev), 143 + __entry->agno, 144 + __entry->refcount, 145 + (char *)__entry->caller_ip) 146 + ); 147 + 148 + #define DEFINE_PERAG_REF_EVENT(name) \ 149 + DEFINE_EVENT(xfs_perag_class, name, \ 150 + TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, int refcount, \ 151 + unsigned long caller_ip), \ 152 + TP_ARGS(mp, agno, refcount, caller_ip)) 153 + DEFINE_PERAG_REF_EVENT(xfs_perag_get); 154 + DEFINE_PERAG_REF_EVENT(xfs_perag_put); 97 155 98 156 TRACE_EVENT(xfs_attr_list_node_descend, 99 157 TP_PROTO(struct xfs_attr_list_context *ctx, ··· 779 775 DEFINE_LOGGRANT_EVENT(xfs_log_ungrant_exit); 780 776 DEFINE_LOGGRANT_EVENT(xfs_log_ungrant_sub); 781 777 782 - #define DEFINE_RW_EVENT(name) \ 783 - TRACE_EVENT(name, \ 784 - TP_PROTO(struct xfs_inode *ip, size_t count, loff_t offset, int flags), \ 785 - TP_ARGS(ip, count, offset, flags), \ 786 - TP_STRUCT__entry( \ 787 - __field(dev_t, dev) \ 788 - __field(xfs_ino_t, ino) \ 789 - __field(xfs_fsize_t, size) \ 790 - __field(xfs_fsize_t, new_size) \ 791 - __field(loff_t, offset) \ 792 - __field(size_t, count) \ 793 - __field(int, flags) \ 794 - ), \ 795 - TP_fast_assign( \ 796 - __entry->dev = VFS_I(ip)->i_sb->s_dev; \ 797 - __entry->ino = ip->i_ino; \ 798 - __entry->size = ip->i_d.di_size; \ 799 - __entry->new_size = ip->i_new_size; \ 800 - __entry->offset = offset; \ 801 - __entry->count = count; \ 802 - __entry->flags = flags; \ 803 - ), \ 804 - TP_printk("dev %d:%d ino 0x%llx size 0x%llx new_size 0x%llx " \ 805 - "offset 0x%llx count 0x%zx ioflags %s", \ 806 - MAJOR(__entry->dev), MINOR(__entry->dev), \ 807 - __entry->ino, \ 808 - __entry->size, \ 809 - __entry->new_size, \ 810 - __entry->offset, \ 811 - __entry->count, \ 812 - __print_flags(__entry->flags, "|", XFS_IO_FLAGS)) \ 778 + DECLARE_EVENT_CLASS(xfs_file_class, 779 + TP_PROTO(struct xfs_inode *ip, size_t count, loff_t offset, int flags), 780 + TP_ARGS(ip, count, offset, flags), 781 + TP_STRUCT__entry( 782 + __field(dev_t, dev) 783 + __field(xfs_ino_t, ino) 784 + __field(xfs_fsize_t, size) 785 + __field(xfs_fsize_t, new_size) 786 + __field(loff_t, offset) 787 + __field(size_t, count) 788 + __field(int, flags) 789 + ), 790 + TP_fast_assign( 791 + __entry->dev = VFS_I(ip)->i_sb->s_dev; 792 + __entry->ino = ip->i_ino; 793 + __entry->size = ip->i_d.di_size; 794 + __entry->new_size = ip->i_new_size; 795 + __entry->offset = offset; 796 + __entry->count = count; 797 + __entry->flags = flags; 798 + ), 799 + TP_printk("dev %d:%d ino 0x%llx size 0x%llx new_size 0x%llx " 800 + "offset 0x%llx count 0x%zx ioflags %s", 801 + MAJOR(__entry->dev), MINOR(__entry->dev), 802 + __entry->ino, 803 + __entry->size, 804 + __entry->new_size, 805 + __entry->offset, 806 + __entry->count, 807 + __print_flags(__entry->flags, "|", XFS_IO_FLAGS)) 813 808 ) 809 + 810 + #define DEFINE_RW_EVENT(name) \ 811 + DEFINE_EVENT(xfs_file_class, name, \ 812 + TP_PROTO(struct xfs_inode *ip, size_t count, loff_t offset, int flags), \ 813 + TP_ARGS(ip, count, offset, flags)) 814 814 DEFINE_RW_EVENT(xfs_file_read); 815 815 DEFINE_RW_EVENT(xfs_file_buffered_write); 816 816 DEFINE_RW_EVENT(xfs_file_direct_write); 817 817 DEFINE_RW_EVENT(xfs_file_splice_read); 818 818 DEFINE_RW_EVENT(xfs_file_splice_write); 819 819 820 + DECLARE_EVENT_CLASS(xfs_page_class, 821 + TP_PROTO(struct inode *inode, struct page *page, unsigned long off), 822 + TP_ARGS(inode, page, off), 823 + TP_STRUCT__entry( 824 + __field(dev_t, dev) 825 + __field(xfs_ino_t, ino) 826 + __field(pgoff_t, pgoff) 827 + __field(loff_t, size) 828 + __field(unsigned long, offset) 829 + __field(int, delalloc) 830 + __field(int, unmapped) 831 + __field(int, unwritten) 832 + ), 833 + TP_fast_assign( 834 + int delalloc = -1, unmapped = -1, unwritten = -1; 820 835 821 - #define DEFINE_PAGE_EVENT(name) \ 822 - TRACE_EVENT(name, \ 823 - TP_PROTO(struct inode *inode, struct page *page, unsigned long off), \ 824 - TP_ARGS(inode, page, off), \ 825 - TP_STRUCT__entry( \ 826 - __field(dev_t, dev) \ 827 - __field(xfs_ino_t, ino) \ 828 - __field(pgoff_t, pgoff) \ 829 - __field(loff_t, size) \ 830 - __field(unsigned long, offset) \ 831 - __field(int, delalloc) \ 832 - __field(int, unmapped) \ 833 - __field(int, unwritten) \ 834 - ), \ 835 - TP_fast_assign( \ 836 - int delalloc = -1, unmapped = -1, unwritten = -1; \ 837 - \ 838 - if (page_has_buffers(page)) \ 839 - xfs_count_page_state(page, &delalloc, \ 840 - &unmapped, &unwritten); \ 841 - __entry->dev = inode->i_sb->s_dev; \ 842 - __entry->ino = XFS_I(inode)->i_ino; \ 843 - __entry->pgoff = page_offset(page); \ 844 - __entry->size = i_size_read(inode); \ 845 - __entry->offset = off; \ 846 - __entry->delalloc = delalloc; \ 847 - __entry->unmapped = unmapped; \ 848 - __entry->unwritten = unwritten; \ 849 - ), \ 850 - TP_printk("dev %d:%d ino 0x%llx pgoff 0x%lx size 0x%llx offset %lx " \ 851 - "delalloc %d unmapped %d unwritten %d", \ 852 - MAJOR(__entry->dev), MINOR(__entry->dev), \ 853 - __entry->ino, \ 854 - __entry->pgoff, \ 855 - __entry->size, \ 856 - __entry->offset, \ 857 - __entry->delalloc, \ 858 - __entry->unmapped, \ 859 - __entry->unwritten) \ 836 + if (page_has_buffers(page)) 837 + xfs_count_page_state(page, &delalloc, 838 + &unmapped, &unwritten); 839 + __entry->dev = inode->i_sb->s_dev; 840 + __entry->ino = XFS_I(inode)->i_ino; 841 + __entry->pgoff = page_offset(page); 842 + __entry->size = i_size_read(inode); 843 + __entry->offset = off; 844 + __entry->delalloc = delalloc; 845 + __entry->unmapped = unmapped; 846 + __entry->unwritten = unwritten; 847 + ), 848 + TP_printk("dev %d:%d ino 0x%llx pgoff 0x%lx size 0x%llx offset %lx " 849 + "delalloc %d unmapped %d unwritten %d", 850 + MAJOR(__entry->dev), MINOR(__entry->dev), 851 + __entry->ino, 852 + __entry->pgoff, 853 + __entry->size, 854 + __entry->offset, 855 + __entry->delalloc, 856 + __entry->unmapped, 857 + __entry->unwritten) 860 858 ) 859 + 860 + #define DEFINE_PAGE_EVENT(name) \ 861 + DEFINE_EVENT(xfs_page_class, name, \ 862 + TP_PROTO(struct inode *inode, struct page *page, unsigned long off), \ 863 + TP_ARGS(inode, page, off)) 861 864 DEFINE_PAGE_EVENT(xfs_writepage); 862 865 DEFINE_PAGE_EVENT(xfs_releasepage); 863 866 DEFINE_PAGE_EVENT(xfs_invalidatepage); 864 867 865 - #define DEFINE_IOMAP_EVENT(name) \ 866 - TRACE_EVENT(name, \ 867 - TP_PROTO(struct xfs_inode *ip, xfs_off_t offset, ssize_t count, \ 868 - int flags, struct xfs_bmbt_irec *irec), \ 869 - TP_ARGS(ip, offset, count, flags, irec), \ 870 - TP_STRUCT__entry( \ 871 - __field(dev_t, dev) \ 872 - __field(xfs_ino_t, ino) \ 873 - __field(loff_t, size) \ 874 - __field(loff_t, new_size) \ 875 - __field(loff_t, offset) \ 876 - __field(size_t, count) \ 877 - __field(int, flags) \ 878 - __field(xfs_fileoff_t, startoff) \ 879 - __field(xfs_fsblock_t, startblock) \ 880 - __field(xfs_filblks_t, blockcount) \ 881 - ), \ 882 - TP_fast_assign( \ 883 - __entry->dev = VFS_I(ip)->i_sb->s_dev; \ 884 - __entry->ino = ip->i_ino; \ 885 - __entry->size = ip->i_d.di_size; \ 886 - __entry->new_size = ip->i_new_size; \ 887 - __entry->offset = offset; \ 888 - __entry->count = count; \ 889 - __entry->flags = flags; \ 890 - __entry->startoff = irec ? irec->br_startoff : 0; \ 891 - __entry->startblock = irec ? irec->br_startblock : 0; \ 892 - __entry->blockcount = irec ? irec->br_blockcount : 0; \ 893 - ), \ 894 - TP_printk("dev %d:%d ino 0x%llx size 0x%llx new_size 0x%llx " \ 895 - "offset 0x%llx count %zd flags %s " \ 896 - "startoff 0x%llx startblock %lld blockcount 0x%llx", \ 897 - MAJOR(__entry->dev), MINOR(__entry->dev), \ 898 - __entry->ino, \ 899 - __entry->size, \ 900 - __entry->new_size, \ 901 - __entry->offset, \ 902 - __entry->count, \ 903 - __print_flags(__entry->flags, "|", BMAPI_FLAGS), \ 904 - __entry->startoff, \ 905 - (__int64_t)__entry->startblock, \ 906 - __entry->blockcount) \ 868 + DECLARE_EVENT_CLASS(xfs_iomap_class, 869 + TP_PROTO(struct xfs_inode *ip, xfs_off_t offset, ssize_t count, 870 + int flags, struct xfs_bmbt_irec *irec), 871 + TP_ARGS(ip, offset, count, flags, irec), 872 + TP_STRUCT__entry( 873 + __field(dev_t, dev) 874 + __field(xfs_ino_t, ino) 875 + __field(loff_t, size) 876 + __field(loff_t, new_size) 877 + __field(loff_t, offset) 878 + __field(size_t, count) 879 + __field(int, flags) 880 + __field(xfs_fileoff_t, startoff) 881 + __field(xfs_fsblock_t, startblock) 882 + __field(xfs_filblks_t, blockcount) 883 + ), 884 + TP_fast_assign( 885 + __entry->dev = VFS_I(ip)->i_sb->s_dev; 886 + __entry->ino = ip->i_ino; 887 + __entry->size = ip->i_d.di_size; 888 + __entry->new_size = ip->i_new_size; 889 + __entry->offset = offset; 890 + __entry->count = count; 891 + __entry->flags = flags; 892 + __entry->startoff = irec ? irec->br_startoff : 0; 893 + __entry->startblock = irec ? irec->br_startblock : 0; 894 + __entry->blockcount = irec ? irec->br_blockcount : 0; 895 + ), 896 + TP_printk("dev %d:%d ino 0x%llx size 0x%llx new_size 0x%llx " 897 + "offset 0x%llx count %zd flags %s " 898 + "startoff 0x%llx startblock %lld blockcount 0x%llx", 899 + MAJOR(__entry->dev), MINOR(__entry->dev), 900 + __entry->ino, 901 + __entry->size, 902 + __entry->new_size, 903 + __entry->offset, 904 + __entry->count, 905 + __print_flags(__entry->flags, "|", BMAPI_FLAGS), 906 + __entry->startoff, 907 + (__int64_t)__entry->startblock, 908 + __entry->blockcount) 907 909 ) 910 + 911 + #define DEFINE_IOMAP_EVENT(name) \ 912 + DEFINE_EVENT(xfs_iomap_class, name, \ 913 + TP_PROTO(struct xfs_inode *ip, xfs_off_t offset, ssize_t count, \ 914 + int flags, struct xfs_bmbt_irec *irec), \ 915 + TP_ARGS(ip, offset, count, flags, irec)) 908 916 DEFINE_IOMAP_EVENT(xfs_iomap_enter); 909 917 DEFINE_IOMAP_EVENT(xfs_iomap_found); 910 918 DEFINE_IOMAP_EVENT(xfs_iomap_alloc); 911 919 912 - #define DEFINE_SIMPLE_IO_EVENT(name) \ 913 - TRACE_EVENT(name, \ 914 - TP_PROTO(struct xfs_inode *ip, xfs_off_t offset, ssize_t count), \ 915 - TP_ARGS(ip, offset, count), \ 916 - TP_STRUCT__entry( \ 917 - __field(dev_t, dev) \ 918 - __field(xfs_ino_t, ino) \ 919 - __field(loff_t, size) \ 920 - __field(loff_t, new_size) \ 921 - __field(loff_t, offset) \ 922 - __field(size_t, count) \ 923 - ), \ 924 - TP_fast_assign( \ 925 - __entry->dev = VFS_I(ip)->i_sb->s_dev; \ 926 - __entry->ino = ip->i_ino; \ 927 - __entry->size = ip->i_d.di_size; \ 928 - __entry->new_size = ip->i_new_size; \ 929 - __entry->offset = offset; \ 930 - __entry->count = count; \ 931 - ), \ 932 - TP_printk("dev %d:%d ino 0x%llx size 0x%llx new_size 0x%llx " \ 933 - "offset 0x%llx count %zd", \ 934 - MAJOR(__entry->dev), MINOR(__entry->dev), \ 935 - __entry->ino, \ 936 - __entry->size, \ 937 - __entry->new_size, \ 938 - __entry->offset, \ 939 - __entry->count) \ 920 + DECLARE_EVENT_CLASS(xfs_simple_io_class, 921 + TP_PROTO(struct xfs_inode *ip, xfs_off_t offset, ssize_t count), 922 + TP_ARGS(ip, offset, count), 923 + TP_STRUCT__entry( 924 + __field(dev_t, dev) 925 + __field(xfs_ino_t, ino) 926 + __field(loff_t, size) 927 + __field(loff_t, new_size) 928 + __field(loff_t, offset) 929 + __field(size_t, count) 930 + ), 931 + TP_fast_assign( 932 + __entry->dev = VFS_I(ip)->i_sb->s_dev; 933 + __entry->ino = ip->i_ino; 934 + __entry->size = ip->i_d.di_size; 935 + __entry->new_size = ip->i_new_size; 936 + __entry->offset = offset; 937 + __entry->count = count; 938 + ), 939 + TP_printk("dev %d:%d ino 0x%llx size 0x%llx new_size 0x%llx " 940 + "offset 0x%llx count %zd", 941 + MAJOR(__entry->dev), MINOR(__entry->dev), 942 + __entry->ino, 943 + __entry->size, 944 + __entry->new_size, 945 + __entry->offset, 946 + __entry->count) 940 947 ); 948 + 949 + #define DEFINE_SIMPLE_IO_EVENT(name) \ 950 + DEFINE_EVENT(xfs_simple_io_class, name, \ 951 + TP_PROTO(struct xfs_inode *ip, xfs_off_t offset, ssize_t count), \ 952 + TP_ARGS(ip, offset, count)) 941 953 DEFINE_SIMPLE_IO_EVENT(xfs_delalloc_enospc); 942 954 DEFINE_SIMPLE_IO_EVENT(xfs_unwritten_convert); 943 955
+3 -1
fs/xfs/quota/xfs_qm.c
··· 249 249 250 250 if (!xfs_Gqm) { 251 251 xfs_Gqm = xfs_Gqm_init(); 252 - if (!xfs_Gqm) 252 + if (!xfs_Gqm) { 253 + mutex_unlock(&xfs_Gqm_lock); 253 254 return ENOMEM; 255 + } 254 256 } 255 257 256 258 /*
-1
fs/xfs/xfs_ag.h
··· 227 227 228 228 atomic_t pagf_fstrms; /* # of filestreams active in this AG */ 229 229 230 - int pag_ici_init; /* incore inode cache initialised */ 231 230 rwlock_t pag_ici_lock; /* incore inode lock */ 232 231 struct radix_tree_root pag_ici_root; /* incore inode cache root */ 233 232 int pag_ici_reclaimable; /* reclaimable inodes */
+10 -19
fs/xfs/xfs_iget.c
··· 382 382 383 383 /* get the perag structure and ensure that it's inode capable */ 384 384 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ino)); 385 - if (!pag->pagi_inodeok) 386 - return EINVAL; 387 - ASSERT(pag->pag_ici_init); 388 385 agino = XFS_INO_TO_AGINO(mp, ino); 389 386 390 387 again: ··· 741 744 } 742 745 743 746 #ifdef DEBUG 744 - /* 745 - * Debug-only routine, without additional rw_semaphore APIs, we can 746 - * now only answer requests regarding whether we hold the lock for write 747 - * (reader state is outside our visibility, we only track writer state). 748 - * 749 - * Note: this means !xfs_isilocked would give false positives, so don't do that. 750 - */ 751 747 int 752 748 xfs_isilocked( 753 749 xfs_inode_t *ip, 754 750 uint lock_flags) 755 751 { 756 - if ((lock_flags & (XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)) == 757 - XFS_ILOCK_EXCL) { 758 - if (!ip->i_lock.mr_writer) 759 - return 0; 752 + if (lock_flags & (XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)) { 753 + if (!(lock_flags & XFS_ILOCK_SHARED)) 754 + return !!ip->i_lock.mr_writer; 755 + return rwsem_is_locked(&ip->i_lock.mr_lock); 760 756 } 761 757 762 - if ((lock_flags & (XFS_IOLOCK_EXCL|XFS_IOLOCK_SHARED)) == 763 - XFS_IOLOCK_EXCL) { 764 - if (!ip->i_iolock.mr_writer) 765 - return 0; 758 + if (lock_flags & (XFS_IOLOCK_EXCL|XFS_IOLOCK_SHARED)) { 759 + if (!(lock_flags & XFS_IOLOCK_SHARED)) 760 + return !!ip->i_iolock.mr_writer; 761 + return rwsem_is_locked(&ip->i_iolock.mr_lock); 766 762 } 767 763 768 - return 1; 764 + ASSERT(0); 765 + return 0; 769 766 } 770 767 #endif
+66 -86
fs/xfs/xfs_inode.c
··· 1940 1940 int blks_per_cluster; 1941 1941 int nbufs; 1942 1942 int ninodes; 1943 - int i, j, found, pre_flushed; 1943 + int i, j; 1944 1944 xfs_daddr_t blkno; 1945 1945 xfs_buf_t *bp; 1946 - xfs_inode_t *ip, **ip_found; 1946 + xfs_inode_t *ip; 1947 1947 xfs_inode_log_item_t *iip; 1948 1948 xfs_log_item_t *lip; 1949 1949 struct xfs_perag *pag; ··· 1960 1960 nbufs = XFS_IALLOC_BLOCKS(mp) / blks_per_cluster; 1961 1961 } 1962 1962 1963 - ip_found = kmem_alloc(ninodes * sizeof(xfs_inode_t *), KM_NOFS); 1964 - 1965 1963 for (j = 0; j < nbufs; j++, inum += ninodes) { 1964 + int found = 0; 1965 + 1966 1966 blkno = XFS_AGB_TO_DADDR(mp, XFS_INO_TO_AGNO(mp, inum), 1967 1967 XFS_INO_TO_AGBNO(mp, inum)); 1968 1968 1969 - 1970 1969 /* 1971 - * Look for each inode in memory and attempt to lock it, 1972 - * we can be racing with flush and tail pushing here. 1973 - * any inode we get the locks on, add to an array of 1974 - * inode items to process later. 1975 - * 1976 - * The get the buffer lock, we could beat a flush 1977 - * or tail pushing thread to the lock here, in which 1978 - * case they will go looking for the inode buffer 1979 - * and fail, we need some other form of interlock 1980 - * here. 1970 + * We obtain and lock the backing buffer first in the process 1971 + * here, as we have to ensure that any dirty inode that we 1972 + * can't get the flush lock on is attached to the buffer. 1973 + * If we scan the in-memory inodes first, then buffer IO can 1974 + * complete before we get a lock on it, and hence we may fail 1975 + * to mark all the active inodes on the buffer stale. 1981 1976 */ 1982 - found = 0; 1983 - for (i = 0; i < ninodes; i++) { 1984 - read_lock(&pag->pag_ici_lock); 1985 - ip = radix_tree_lookup(&pag->pag_ici_root, 1986 - XFS_INO_TO_AGINO(mp, (inum + i))); 1987 - 1988 - /* Inode not in memory or we found it already, 1989 - * nothing to do 1990 - */ 1991 - if (!ip || xfs_iflags_test(ip, XFS_ISTALE)) { 1992 - read_unlock(&pag->pag_ici_lock); 1993 - continue; 1994 - } 1995 - 1996 - if (xfs_inode_clean(ip)) { 1997 - read_unlock(&pag->pag_ici_lock); 1998 - continue; 1999 - } 2000 - 2001 - /* If we can get the locks then add it to the 2002 - * list, otherwise by the time we get the bp lock 2003 - * below it will already be attached to the 2004 - * inode buffer. 2005 - */ 2006 - 2007 - /* This inode will already be locked - by us, lets 2008 - * keep it that way. 2009 - */ 2010 - 2011 - if (ip == free_ip) { 2012 - if (xfs_iflock_nowait(ip)) { 2013 - xfs_iflags_set(ip, XFS_ISTALE); 2014 - if (xfs_inode_clean(ip)) { 2015 - xfs_ifunlock(ip); 2016 - } else { 2017 - ip_found[found++] = ip; 2018 - } 2019 - } 2020 - read_unlock(&pag->pag_ici_lock); 2021 - continue; 2022 - } 2023 - 2024 - if (xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) { 2025 - if (xfs_iflock_nowait(ip)) { 2026 - xfs_iflags_set(ip, XFS_ISTALE); 2027 - 2028 - if (xfs_inode_clean(ip)) { 2029 - xfs_ifunlock(ip); 2030 - xfs_iunlock(ip, XFS_ILOCK_EXCL); 2031 - } else { 2032 - ip_found[found++] = ip; 2033 - } 2034 - } else { 2035 - xfs_iunlock(ip, XFS_ILOCK_EXCL); 2036 - } 2037 - } 2038 - read_unlock(&pag->pag_ici_lock); 2039 - } 2040 - 2041 - bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, blkno, 1977 + bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, blkno, 2042 1978 mp->m_bsize * blks_per_cluster, 2043 1979 XBF_LOCK); 2044 1980 2045 - pre_flushed = 0; 1981 + /* 1982 + * Walk the inodes already attached to the buffer and mark them 1983 + * stale. These will all have the flush locks held, so an 1984 + * in-memory inode walk can't lock them. 1985 + */ 2046 1986 lip = XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *); 2047 1987 while (lip) { 2048 1988 if (lip->li_type == XFS_LI_INODE) { ··· 1993 2053 &iip->ili_flush_lsn, 1994 2054 &iip->ili_item.li_lsn); 1995 2055 xfs_iflags_set(iip->ili_inode, XFS_ISTALE); 1996 - pre_flushed++; 2056 + found++; 1997 2057 } 1998 2058 lip = lip->li_bio_list; 1999 2059 } 2000 2060 2001 - for (i = 0; i < found; i++) { 2002 - ip = ip_found[i]; 2003 - iip = ip->i_itemp; 2061 + /* 2062 + * For each inode in memory attempt to add it to the inode 2063 + * buffer and set it up for being staled on buffer IO 2064 + * completion. This is safe as we've locked out tail pushing 2065 + * and flushing by locking the buffer. 2066 + * 2067 + * We have already marked every inode that was part of a 2068 + * transaction stale above, which means there is no point in 2069 + * even trying to lock them. 2070 + */ 2071 + for (i = 0; i < ninodes; i++) { 2072 + read_lock(&pag->pag_ici_lock); 2073 + ip = radix_tree_lookup(&pag->pag_ici_root, 2074 + XFS_INO_TO_AGINO(mp, (inum + i))); 2004 2075 2076 + /* Inode not in memory or stale, nothing to do */ 2077 + if (!ip || xfs_iflags_test(ip, XFS_ISTALE)) { 2078 + read_unlock(&pag->pag_ici_lock); 2079 + continue; 2080 + } 2081 + 2082 + /* don't try to lock/unlock the current inode */ 2083 + if (ip != free_ip && 2084 + !xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) { 2085 + read_unlock(&pag->pag_ici_lock); 2086 + continue; 2087 + } 2088 + read_unlock(&pag->pag_ici_lock); 2089 + 2090 + if (!xfs_iflock_nowait(ip)) { 2091 + if (ip != free_ip) 2092 + xfs_iunlock(ip, XFS_ILOCK_EXCL); 2093 + continue; 2094 + } 2095 + 2096 + xfs_iflags_set(ip, XFS_ISTALE); 2097 + if (xfs_inode_clean(ip)) { 2098 + ASSERT(ip != free_ip); 2099 + xfs_ifunlock(ip); 2100 + xfs_iunlock(ip, XFS_ILOCK_EXCL); 2101 + continue; 2102 + } 2103 + 2104 + iip = ip->i_itemp; 2005 2105 if (!iip) { 2106 + /* inode with unlogged changes only */ 2107 + ASSERT(ip != free_ip); 2006 2108 ip->i_update_core = 0; 2007 2109 xfs_ifunlock(ip); 2008 2110 xfs_iunlock(ip, XFS_ILOCK_EXCL); 2009 2111 continue; 2010 2112 } 2113 + found++; 2011 2114 2012 2115 iip->ili_last_fields = iip->ili_format.ilf_fields; 2013 2116 iip->ili_format.ilf_fields = 0; ··· 2061 2078 xfs_buf_attach_iodone(bp, 2062 2079 (void(*)(xfs_buf_t*,xfs_log_item_t*)) 2063 2080 xfs_istale_done, (xfs_log_item_t *)iip); 2064 - if (ip != free_ip) { 2081 + 2082 + if (ip != free_ip) 2065 2083 xfs_iunlock(ip, XFS_ILOCK_EXCL); 2066 - } 2067 2084 } 2068 2085 2069 - if (found || pre_flushed) 2086 + if (found) 2070 2087 xfs_trans_stale_inode_buf(tp, bp); 2071 2088 xfs_trans_binval(tp, bp); 2072 2089 } 2073 2090 2074 - kmem_free(ip_found); 2075 2091 xfs_perag_put(pag); 2076 2092 } 2077 2093 ··· 2631 2649 int i; 2632 2650 2633 2651 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino)); 2634 - ASSERT(pag->pagi_inodeok); 2635 - ASSERT(pag->pag_ici_init); 2636 2652 2637 2653 inodes_per_cluster = XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog; 2638 2654 ilist_size = inodes_per_cluster * sizeof(xfs_inode_t *);
+3 -8
fs/xfs/xfs_log_recover.c
··· 132 132 int nbblks, 133 133 xfs_buf_t *bp) 134 134 { 135 - xfs_daddr_t offset; 136 - xfs_caddr_t ptr; 135 + xfs_daddr_t offset = blk_no & ((xfs_daddr_t)log->l_sectBBsize - 1); 137 136 138 - offset = blk_no & ((xfs_daddr_t) log->l_sectBBsize - 1); 139 - ptr = XFS_BUF_PTR(bp) + BBTOB(offset); 140 - 141 - ASSERT(ptr + BBTOB(nbblks) <= XFS_BUF_PTR(bp) + XFS_BUF_SIZE(bp)); 142 - 143 - return ptr; 137 + ASSERT(BBTOB(offset + nbblks) <= XFS_BUF_SIZE(bp)); 138 + return XFS_BUF_PTR(bp) + BBTOB(offset); 144 139 } 145 140 146 141
+31 -41
fs/xfs/xfs_mount.c
··· 268 268 269 269 #if XFS_BIG_BLKNOS /* Limited by ULONG_MAX of page cache index */ 270 270 if (nblocks >> (PAGE_CACHE_SHIFT - sbp->sb_blocklog) > ULONG_MAX) 271 - return E2BIG; 271 + return EFBIG; 272 272 #else /* Limited by UINT_MAX of sectors */ 273 273 if (nblocks << (sbp->sb_blocklog - BBSHIFT) > UINT_MAX) 274 - return E2BIG; 274 + return EFBIG; 275 275 #endif 276 276 return 0; 277 277 } ··· 393 393 xfs_sb_validate_fsb_count(sbp, sbp->sb_rblocks)) { 394 394 xfs_fs_mount_cmn_err(flags, 395 395 "file system too large to be mounted on this system."); 396 - return XFS_ERROR(E2BIG); 396 + return XFS_ERROR(EFBIG); 397 397 } 398 398 399 399 if (unlikely(sbp->sb_inprogress)) { ··· 413 413 return 0; 414 414 } 415 415 416 - STATIC void 417 - xfs_initialize_perag_icache( 418 - xfs_perag_t *pag) 419 - { 420 - if (!pag->pag_ici_init) { 421 - rwlock_init(&pag->pag_ici_lock); 422 - INIT_RADIX_TREE(&pag->pag_ici_root, GFP_ATOMIC); 423 - pag->pag_ici_init = 1; 424 - } 425 - } 426 - 427 416 int 428 417 xfs_initialize_perag( 429 418 xfs_mount_t *mp, ··· 425 436 xfs_agino_t agino; 426 437 xfs_ino_t ino; 427 438 xfs_sb_t *sbp = &mp->m_sb; 428 - xfs_ino_t max_inum = XFS_MAXINUMBER_32; 429 439 int error = -ENOMEM; 430 - 431 - /* Check to see if the filesystem can overflow 32 bit inodes */ 432 - agino = XFS_OFFBNO_TO_AGINO(mp, sbp->sb_agblocks - 1, 0); 433 - ino = XFS_AGINO_TO_INO(mp, agcount - 1, agino); 434 440 435 441 /* 436 442 * Walk the current per-ag tree so we don't try to initialise AGs ··· 440 456 } 441 457 if (!first_initialised) 442 458 first_initialised = index; 459 + 443 460 pag = kmem_zalloc(sizeof(*pag), KM_MAYFAIL); 444 461 if (!pag) 445 462 goto out_unwind; 463 + pag->pag_agno = index; 464 + pag->pag_mount = mp; 465 + rwlock_init(&pag->pag_ici_lock); 466 + INIT_RADIX_TREE(&pag->pag_ici_root, GFP_ATOMIC); 467 + 446 468 if (radix_tree_preload(GFP_NOFS)) 447 469 goto out_unwind; 470 + 448 471 spin_lock(&mp->m_perag_lock); 449 472 if (radix_tree_insert(&mp->m_perag_tree, index, pag)) { 450 473 BUG(); ··· 460 469 error = -EEXIST; 461 470 goto out_unwind; 462 471 } 463 - pag->pag_agno = index; 464 - pag->pag_mount = mp; 465 472 spin_unlock(&mp->m_perag_lock); 466 473 radix_tree_preload_end(); 467 474 } 468 475 469 - /* Clear the mount flag if no inode can overflow 32 bits 470 - * on this filesystem, or if specifically requested.. 476 + /* 477 + * If we mount with the inode64 option, or no inode overflows 478 + * the legacy 32-bit address space clear the inode32 option. 471 479 */ 472 - if ((mp->m_flags & XFS_MOUNT_SMALL_INUMS) && ino > max_inum) { 473 - mp->m_flags |= XFS_MOUNT_32BITINODES; 474 - } else { 475 - mp->m_flags &= ~XFS_MOUNT_32BITINODES; 476 - } 480 + agino = XFS_OFFBNO_TO_AGINO(mp, sbp->sb_agblocks - 1, 0); 481 + ino = XFS_AGINO_TO_INO(mp, agcount - 1, agino); 477 482 478 - /* If we can overflow then setup the ag headers accordingly */ 483 + if ((mp->m_flags & XFS_MOUNT_SMALL_INUMS) && ino > XFS_MAXINUMBER_32) 484 + mp->m_flags |= XFS_MOUNT_32BITINODES; 485 + else 486 + mp->m_flags &= ~XFS_MOUNT_32BITINODES; 487 + 479 488 if (mp->m_flags & XFS_MOUNT_32BITINODES) { 480 - /* Calculate how much should be reserved for inodes to 481 - * meet the max inode percentage. 489 + /* 490 + * Calculate how much should be reserved for inodes to meet 491 + * the max inode percentage. 482 492 */ 483 493 if (mp->m_maxicount) { 484 494 __uint64_t icount; ··· 492 500 } else { 493 501 max_metadata = agcount; 494 502 } 503 + 495 504 for (index = 0; index < agcount; index++) { 496 505 ino = XFS_AGINO_TO_INO(mp, index, agino); 497 - if (ino > max_inum) { 506 + if (ino > XFS_MAXINUMBER_32) { 498 507 index++; 499 508 break; 500 509 } 501 510 502 - /* This ag is preferred for inodes */ 503 511 pag = xfs_perag_get(mp, index); 504 512 pag->pagi_inodeok = 1; 505 513 if (index < max_metadata) 506 514 pag->pagf_metadata = 1; 507 - xfs_initialize_perag_icache(pag); 508 515 xfs_perag_put(pag); 509 516 } 510 517 } else { 511 - /* Setup default behavior for smaller filesystems */ 512 518 for (index = 0; index < agcount; index++) { 513 519 pag = xfs_perag_get(mp, index); 514 520 pag->pagi_inodeok = 1; 515 - xfs_initialize_perag_icache(pag); 516 521 xfs_perag_put(pag); 517 522 } 518 523 } 524 + 519 525 if (maxagi) 520 526 *maxagi = index; 521 527 return 0; ··· 999 1009 d = (xfs_daddr_t)XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks); 1000 1010 if (XFS_BB_TO_FSB(mp, d) != mp->m_sb.sb_dblocks) { 1001 1011 cmn_err(CE_WARN, "XFS: size check 1 failed"); 1002 - return XFS_ERROR(E2BIG); 1012 + return XFS_ERROR(EFBIG); 1003 1013 } 1004 1014 error = xfs_read_buf(mp, mp->m_ddev_targp, 1005 1015 d - XFS_FSS_TO_BB(mp, 1), ··· 1009 1019 } else { 1010 1020 cmn_err(CE_WARN, "XFS: size check 2 failed"); 1011 1021 if (error == ENOSPC) 1012 - error = XFS_ERROR(E2BIG); 1022 + error = XFS_ERROR(EFBIG); 1013 1023 return error; 1014 1024 } 1015 1025 ··· 1017 1027 d = (xfs_daddr_t)XFS_FSB_TO_BB(mp, mp->m_sb.sb_logblocks); 1018 1028 if (XFS_BB_TO_FSB(mp, d) != mp->m_sb.sb_logblocks) { 1019 1029 cmn_err(CE_WARN, "XFS: size check 3 failed"); 1020 - return XFS_ERROR(E2BIG); 1030 + return XFS_ERROR(EFBIG); 1021 1031 } 1022 1032 error = xfs_read_buf(mp, mp->m_logdev_targp, 1023 1033 d - XFS_FSB_TO_BB(mp, 1), ··· 1027 1037 } else { 1028 1038 cmn_err(CE_WARN, "XFS: size check 3 failed"); 1029 1039 if (error == ENOSPC) 1030 - error = XFS_ERROR(E2BIG); 1040 + error = XFS_ERROR(EFBIG); 1031 1041 return error; 1032 1042 } 1033 1043 } ··· 1244 1254 * Allocate and initialize the per-ag data. 1245 1255 */ 1246 1256 spin_lock_init(&mp->m_perag_lock); 1247 - INIT_RADIX_TREE(&mp->m_perag_tree, GFP_NOFS); 1257 + INIT_RADIX_TREE(&mp->m_perag_tree, GFP_ATOMIC); 1248 1258 error = xfs_initialize_perag(mp, sbp->sb_agcount, &mp->m_maxagi); 1249 1259 if (error) { 1250 1260 cmn_err(CE_WARN, "XFS: Failed per-ag init: %d", error);
+2 -2
fs/xfs/xfs_rtalloc.c
··· 2247 2247 cmn_err(CE_WARN, "XFS: realtime mount -- %llu != %llu", 2248 2248 (unsigned long long) XFS_BB_TO_FSB(mp, d), 2249 2249 (unsigned long long) mp->m_sb.sb_rblocks); 2250 - return XFS_ERROR(E2BIG); 2250 + return XFS_ERROR(EFBIG); 2251 2251 } 2252 2252 error = xfs_read_buf(mp, mp->m_rtdev_targp, 2253 2253 d - XFS_FSB_TO_BB(mp, 1), ··· 2256 2256 cmn_err(CE_WARN, 2257 2257 "XFS: realtime mount -- xfs_read_buf failed, returned %d", error); 2258 2258 if (error == ENOSPC) 2259 - return XFS_ERROR(E2BIG); 2259 + return XFS_ERROR(EFBIG); 2260 2260 return error; 2261 2261 } 2262 2262 xfs_buf_relse(bp);
+10 -1
fs/xfs/xfs_rtalloc.h
··· 147 147 # define xfs_rtfree_extent(t,b,l) (ENOSYS) 148 148 # define xfs_rtpick_extent(m,t,l,rb) (ENOSYS) 149 149 # define xfs_growfs_rt(mp,in) (ENOSYS) 150 - # define xfs_rtmount_init(m) (((mp)->m_sb.sb_rblocks == 0)? 0 : (ENOSYS)) 150 + static inline int /* error */ 151 + xfs_rtmount_init( 152 + xfs_mount_t *mp) /* file system mount structure */ 153 + { 154 + if (mp->m_sb.sb_rblocks == 0) 155 + return 0; 156 + 157 + cmn_err(CE_WARN, "XFS: Not built with CONFIG_XFS_RT"); 158 + return ENOSYS; 159 + } 151 160 # define xfs_rtmount_inodes(m) (((mp)->m_sb.sb_rblocks == 0)? 0 : (ENOSYS)) 152 161 # define xfs_rtunmount_inodes(m) 153 162 #endif /* CONFIG_XFS_RT */
+400 -46
fs/xfs/xfs_trans.c
··· 48 48 49 49 kmem_zone_t *xfs_trans_zone; 50 50 51 + 51 52 /* 52 - * Reservation functions here avoid a huge stack in xfs_trans_init 53 - * due to register overflow from temporaries in the calculations. 53 + * Various log reservation values. 54 + * 55 + * These are based on the size of the file system block because that is what 56 + * most transactions manipulate. Each adds in an additional 128 bytes per 57 + * item logged to try to account for the overhead of the transaction mechanism. 58 + * 59 + * Note: Most of the reservations underestimate the number of allocation 60 + * groups into which they could free extents in the xfs_bmap_finish() call. 61 + * This is because the number in the worst case is quite high and quite 62 + * unusual. In order to fix this we need to change xfs_bmap_finish() to free 63 + * extents in only a single AG at a time. This will require changes to the 64 + * EFI code as well, however, so that the EFI for the extents not freed is 65 + * logged again in each transaction. See SGI PV #261917. 66 + * 67 + * Reservation functions here avoid a huge stack in xfs_trans_init due to 68 + * register overflow from temporaries in the calculations. 69 + */ 70 + 71 + 72 + /* 73 + * In a write transaction we can allocate a maximum of 2 74 + * extents. This gives: 75 + * the inode getting the new extents: inode size 76 + * the inode's bmap btree: max depth * block size 77 + * the agfs of the ags from which the extents are allocated: 2 * sector 78 + * the superblock free block counter: sector size 79 + * the allocation btrees: 2 exts * 2 trees * (2 * max depth - 1) * block size 80 + * And the bmap_finish transaction can free bmap blocks in a join: 81 + * the agfs of the ags containing the blocks: 2 * sector size 82 + * the agfls of the ags containing the blocks: 2 * sector size 83 + * the super block free block counter: sector size 84 + * the allocation btrees: 2 exts * 2 trees * (2 * max depth - 1) * block size 54 85 */ 55 86 STATIC uint 56 - xfs_calc_write_reservation(xfs_mount_t *mp) 87 + xfs_calc_write_reservation( 88 + struct xfs_mount *mp) 57 89 { 58 - return XFS_CALC_WRITE_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp); 90 + return XFS_DQUOT_LOGRES(mp) + 91 + MAX((mp->m_sb.sb_inodesize + 92 + XFS_FSB_TO_B(mp, XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK)) + 93 + 2 * mp->m_sb.sb_sectsize + 94 + mp->m_sb.sb_sectsize + 95 + XFS_ALLOCFREE_LOG_RES(mp, 2) + 96 + 128 * (4 + XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK) + 97 + XFS_ALLOCFREE_LOG_COUNT(mp, 2))), 98 + (2 * mp->m_sb.sb_sectsize + 99 + 2 * mp->m_sb.sb_sectsize + 100 + mp->m_sb.sb_sectsize + 101 + XFS_ALLOCFREE_LOG_RES(mp, 2) + 102 + 128 * (5 + XFS_ALLOCFREE_LOG_COUNT(mp, 2)))); 59 103 } 60 104 105 + /* 106 + * In truncating a file we free up to two extents at once. We can modify: 107 + * the inode being truncated: inode size 108 + * the inode's bmap btree: (max depth + 1) * block size 109 + * And the bmap_finish transaction can free the blocks and bmap blocks: 110 + * the agf for each of the ags: 4 * sector size 111 + * the agfl for each of the ags: 4 * sector size 112 + * the super block to reflect the freed blocks: sector size 113 + * worst case split in allocation btrees per extent assuming 4 extents: 114 + * 4 exts * 2 trees * (2 * max depth - 1) * block size 115 + * the inode btree: max depth * blocksize 116 + * the allocation btrees: 2 trees * (max depth - 1) * block size 117 + */ 61 118 STATIC uint 62 - xfs_calc_itruncate_reservation(xfs_mount_t *mp) 119 + xfs_calc_itruncate_reservation( 120 + struct xfs_mount *mp) 63 121 { 64 - return XFS_CALC_ITRUNCATE_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp); 122 + return XFS_DQUOT_LOGRES(mp) + 123 + MAX((mp->m_sb.sb_inodesize + 124 + XFS_FSB_TO_B(mp, XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK) + 1) + 125 + 128 * (2 + XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK))), 126 + (4 * mp->m_sb.sb_sectsize + 127 + 4 * mp->m_sb.sb_sectsize + 128 + mp->m_sb.sb_sectsize + 129 + XFS_ALLOCFREE_LOG_RES(mp, 4) + 130 + 128 * (9 + XFS_ALLOCFREE_LOG_COUNT(mp, 4)) + 131 + 128 * 5 + 132 + XFS_ALLOCFREE_LOG_RES(mp, 1) + 133 + 128 * (2 + XFS_IALLOC_BLOCKS(mp) + mp->m_in_maxlevels + 134 + XFS_ALLOCFREE_LOG_COUNT(mp, 1)))); 65 135 } 66 136 137 + /* 138 + * In renaming a files we can modify: 139 + * the four inodes involved: 4 * inode size 140 + * the two directory btrees: 2 * (max depth + v2) * dir block size 141 + * the two directory bmap btrees: 2 * max depth * block size 142 + * And the bmap_finish transaction can free dir and bmap blocks (two sets 143 + * of bmap blocks) giving: 144 + * the agf for the ags in which the blocks live: 3 * sector size 145 + * the agfl for the ags in which the blocks live: 3 * sector size 146 + * the superblock for the free block count: sector size 147 + * the allocation btrees: 3 exts * 2 trees * (2 * max depth - 1) * block size 148 + */ 67 149 STATIC uint 68 - xfs_calc_rename_reservation(xfs_mount_t *mp) 150 + xfs_calc_rename_reservation( 151 + struct xfs_mount *mp) 69 152 { 70 - return XFS_CALC_RENAME_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp); 153 + return XFS_DQUOT_LOGRES(mp) + 154 + MAX((4 * mp->m_sb.sb_inodesize + 155 + 2 * XFS_DIROP_LOG_RES(mp) + 156 + 128 * (4 + 2 * XFS_DIROP_LOG_COUNT(mp))), 157 + (3 * mp->m_sb.sb_sectsize + 158 + 3 * mp->m_sb.sb_sectsize + 159 + mp->m_sb.sb_sectsize + 160 + XFS_ALLOCFREE_LOG_RES(mp, 3) + 161 + 128 * (7 + XFS_ALLOCFREE_LOG_COUNT(mp, 3)))); 71 162 } 72 163 164 + /* 165 + * For creating a link to an inode: 166 + * the parent directory inode: inode size 167 + * the linked inode: inode size 168 + * the directory btree could split: (max depth + v2) * dir block size 169 + * the directory bmap btree could join or split: (max depth + v2) * blocksize 170 + * And the bmap_finish transaction can free some bmap blocks giving: 171 + * the agf for the ag in which the blocks live: sector size 172 + * the agfl for the ag in which the blocks live: sector size 173 + * the superblock for the free block count: sector size 174 + * the allocation btrees: 2 trees * (2 * max depth - 1) * block size 175 + */ 73 176 STATIC uint 74 - xfs_calc_link_reservation(xfs_mount_t *mp) 177 + xfs_calc_link_reservation( 178 + struct xfs_mount *mp) 75 179 { 76 - return XFS_CALC_LINK_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp); 180 + return XFS_DQUOT_LOGRES(mp) + 181 + MAX((mp->m_sb.sb_inodesize + 182 + mp->m_sb.sb_inodesize + 183 + XFS_DIROP_LOG_RES(mp) + 184 + 128 * (2 + XFS_DIROP_LOG_COUNT(mp))), 185 + (mp->m_sb.sb_sectsize + 186 + mp->m_sb.sb_sectsize + 187 + mp->m_sb.sb_sectsize + 188 + XFS_ALLOCFREE_LOG_RES(mp, 1) + 189 + 128 * (3 + XFS_ALLOCFREE_LOG_COUNT(mp, 1)))); 77 190 } 78 191 192 + /* 193 + * For removing a directory entry we can modify: 194 + * the parent directory inode: inode size 195 + * the removed inode: inode size 196 + * the directory btree could join: (max depth + v2) * dir block size 197 + * the directory bmap btree could join or split: (max depth + v2) * blocksize 198 + * And the bmap_finish transaction can free the dir and bmap blocks giving: 199 + * the agf for the ag in which the blocks live: 2 * sector size 200 + * the agfl for the ag in which the blocks live: 2 * sector size 201 + * the superblock for the free block count: sector size 202 + * the allocation btrees: 2 exts * 2 trees * (2 * max depth - 1) * block size 203 + */ 79 204 STATIC uint 80 - xfs_calc_remove_reservation(xfs_mount_t *mp) 205 + xfs_calc_remove_reservation( 206 + struct xfs_mount *mp) 81 207 { 82 - return XFS_CALC_REMOVE_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp); 208 + return XFS_DQUOT_LOGRES(mp) + 209 + MAX((mp->m_sb.sb_inodesize + 210 + mp->m_sb.sb_inodesize + 211 + XFS_DIROP_LOG_RES(mp) + 212 + 128 * (2 + XFS_DIROP_LOG_COUNT(mp))), 213 + (2 * mp->m_sb.sb_sectsize + 214 + 2 * mp->m_sb.sb_sectsize + 215 + mp->m_sb.sb_sectsize + 216 + XFS_ALLOCFREE_LOG_RES(mp, 2) + 217 + 128 * (5 + XFS_ALLOCFREE_LOG_COUNT(mp, 2)))); 83 218 } 84 219 220 + /* 221 + * For symlink we can modify: 222 + * the parent directory inode: inode size 223 + * the new inode: inode size 224 + * the inode btree entry: 1 block 225 + * the directory btree: (max depth + v2) * dir block size 226 + * the directory inode's bmap btree: (max depth + v2) * block size 227 + * the blocks for the symlink: 1 kB 228 + * Or in the first xact we allocate some inodes giving: 229 + * the agi and agf of the ag getting the new inodes: 2 * sectorsize 230 + * the inode blocks allocated: XFS_IALLOC_BLOCKS * blocksize 231 + * the inode btree: max depth * blocksize 232 + * the allocation btrees: 2 trees * (2 * max depth - 1) * block size 233 + */ 85 234 STATIC uint 86 - xfs_calc_symlink_reservation(xfs_mount_t *mp) 235 + xfs_calc_symlink_reservation( 236 + struct xfs_mount *mp) 87 237 { 88 - return XFS_CALC_SYMLINK_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp); 238 + return XFS_DQUOT_LOGRES(mp) + 239 + MAX((mp->m_sb.sb_inodesize + 240 + mp->m_sb.sb_inodesize + 241 + XFS_FSB_TO_B(mp, 1) + 242 + XFS_DIROP_LOG_RES(mp) + 243 + 1024 + 244 + 128 * (4 + XFS_DIROP_LOG_COUNT(mp))), 245 + (2 * mp->m_sb.sb_sectsize + 246 + XFS_FSB_TO_B(mp, XFS_IALLOC_BLOCKS(mp)) + 247 + XFS_FSB_TO_B(mp, mp->m_in_maxlevels) + 248 + XFS_ALLOCFREE_LOG_RES(mp, 1) + 249 + 128 * (2 + XFS_IALLOC_BLOCKS(mp) + mp->m_in_maxlevels + 250 + XFS_ALLOCFREE_LOG_COUNT(mp, 1)))); 89 251 } 90 252 253 + /* 254 + * For create we can modify: 255 + * the parent directory inode: inode size 256 + * the new inode: inode size 257 + * the inode btree entry: block size 258 + * the superblock for the nlink flag: sector size 259 + * the directory btree: (max depth + v2) * dir block size 260 + * the directory inode's bmap btree: (max depth + v2) * block size 261 + * Or in the first xact we allocate some inodes giving: 262 + * the agi and agf of the ag getting the new inodes: 2 * sectorsize 263 + * the superblock for the nlink flag: sector size 264 + * the inode blocks allocated: XFS_IALLOC_BLOCKS * blocksize 265 + * the inode btree: max depth * blocksize 266 + * the allocation btrees: 2 trees * (max depth - 1) * block size 267 + */ 91 268 STATIC uint 92 - xfs_calc_create_reservation(xfs_mount_t *mp) 269 + xfs_calc_create_reservation( 270 + struct xfs_mount *mp) 93 271 { 94 - return XFS_CALC_CREATE_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp); 272 + return XFS_DQUOT_LOGRES(mp) + 273 + MAX((mp->m_sb.sb_inodesize + 274 + mp->m_sb.sb_inodesize + 275 + mp->m_sb.sb_sectsize + 276 + XFS_FSB_TO_B(mp, 1) + 277 + XFS_DIROP_LOG_RES(mp) + 278 + 128 * (3 + XFS_DIROP_LOG_COUNT(mp))), 279 + (3 * mp->m_sb.sb_sectsize + 280 + XFS_FSB_TO_B(mp, XFS_IALLOC_BLOCKS(mp)) + 281 + XFS_FSB_TO_B(mp, mp->m_in_maxlevels) + 282 + XFS_ALLOCFREE_LOG_RES(mp, 1) + 283 + 128 * (2 + XFS_IALLOC_BLOCKS(mp) + mp->m_in_maxlevels + 284 + XFS_ALLOCFREE_LOG_COUNT(mp, 1)))); 95 285 } 96 286 287 + /* 288 + * Making a new directory is the same as creating a new file. 289 + */ 97 290 STATIC uint 98 - xfs_calc_mkdir_reservation(xfs_mount_t *mp) 291 + xfs_calc_mkdir_reservation( 292 + struct xfs_mount *mp) 99 293 { 100 - return XFS_CALC_MKDIR_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp); 294 + return xfs_calc_create_reservation(mp); 101 295 } 102 296 297 + /* 298 + * In freeing an inode we can modify: 299 + * the inode being freed: inode size 300 + * the super block free inode counter: sector size 301 + * the agi hash list and counters: sector size 302 + * the inode btree entry: block size 303 + * the on disk inode before ours in the agi hash list: inode cluster size 304 + * the inode btree: max depth * blocksize 305 + * the allocation btrees: 2 trees * (max depth - 1) * block size 306 + */ 103 307 STATIC uint 104 - xfs_calc_ifree_reservation(xfs_mount_t *mp) 308 + xfs_calc_ifree_reservation( 309 + struct xfs_mount *mp) 105 310 { 106 - return XFS_CALC_IFREE_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp); 311 + return XFS_DQUOT_LOGRES(mp) + 312 + mp->m_sb.sb_inodesize + 313 + mp->m_sb.sb_sectsize + 314 + mp->m_sb.sb_sectsize + 315 + XFS_FSB_TO_B(mp, 1) + 316 + MAX((__uint16_t)XFS_FSB_TO_B(mp, 1), 317 + XFS_INODE_CLUSTER_SIZE(mp)) + 318 + 128 * 5 + 319 + XFS_ALLOCFREE_LOG_RES(mp, 1) + 320 + 128 * (2 + XFS_IALLOC_BLOCKS(mp) + mp->m_in_maxlevels + 321 + XFS_ALLOCFREE_LOG_COUNT(mp, 1)); 107 322 } 108 323 324 + /* 325 + * When only changing the inode we log the inode and possibly the superblock 326 + * We also add a bit of slop for the transaction stuff. 327 + */ 109 328 STATIC uint 110 - xfs_calc_ichange_reservation(xfs_mount_t *mp) 329 + xfs_calc_ichange_reservation( 330 + struct xfs_mount *mp) 111 331 { 112 - return XFS_CALC_ICHANGE_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp); 332 + return XFS_DQUOT_LOGRES(mp) + 333 + mp->m_sb.sb_inodesize + 334 + mp->m_sb.sb_sectsize + 335 + 512; 336 + 113 337 } 114 338 339 + /* 340 + * Growing the data section of the filesystem. 341 + * superblock 342 + * agi and agf 343 + * allocation btrees 344 + */ 115 345 STATIC uint 116 - xfs_calc_growdata_reservation(xfs_mount_t *mp) 346 + xfs_calc_growdata_reservation( 347 + struct xfs_mount *mp) 117 348 { 118 - return XFS_CALC_GROWDATA_LOG_RES(mp); 349 + return mp->m_sb.sb_sectsize * 3 + 350 + XFS_ALLOCFREE_LOG_RES(mp, 1) + 351 + 128 * (3 + XFS_ALLOCFREE_LOG_COUNT(mp, 1)); 119 352 } 120 353 354 + /* 355 + * Growing the rt section of the filesystem. 356 + * In the first set of transactions (ALLOC) we allocate space to the 357 + * bitmap or summary files. 358 + * superblock: sector size 359 + * agf of the ag from which the extent is allocated: sector size 360 + * bmap btree for bitmap/summary inode: max depth * blocksize 361 + * bitmap/summary inode: inode size 362 + * allocation btrees for 1 block alloc: 2 * (2 * maxdepth - 1) * blocksize 363 + */ 121 364 STATIC uint 122 - xfs_calc_growrtalloc_reservation(xfs_mount_t *mp) 365 + xfs_calc_growrtalloc_reservation( 366 + struct xfs_mount *mp) 123 367 { 124 - return XFS_CALC_GROWRTALLOC_LOG_RES(mp); 368 + return 2 * mp->m_sb.sb_sectsize + 369 + XFS_FSB_TO_B(mp, XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK)) + 370 + mp->m_sb.sb_inodesize + 371 + XFS_ALLOCFREE_LOG_RES(mp, 1) + 372 + 128 * (3 + XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK) + 373 + XFS_ALLOCFREE_LOG_COUNT(mp, 1)); 125 374 } 126 375 376 + /* 377 + * Growing the rt section of the filesystem. 378 + * In the second set of transactions (ZERO) we zero the new metadata blocks. 379 + * one bitmap/summary block: blocksize 380 + */ 127 381 STATIC uint 128 - xfs_calc_growrtzero_reservation(xfs_mount_t *mp) 382 + xfs_calc_growrtzero_reservation( 383 + struct xfs_mount *mp) 129 384 { 130 - return XFS_CALC_GROWRTZERO_LOG_RES(mp); 385 + return mp->m_sb.sb_blocksize + 128; 131 386 } 132 387 388 + /* 389 + * Growing the rt section of the filesystem. 390 + * In the third set of transactions (FREE) we update metadata without 391 + * allocating any new blocks. 392 + * superblock: sector size 393 + * bitmap inode: inode size 394 + * summary inode: inode size 395 + * one bitmap block: blocksize 396 + * summary blocks: new summary size 397 + */ 133 398 STATIC uint 134 - xfs_calc_growrtfree_reservation(xfs_mount_t *mp) 399 + xfs_calc_growrtfree_reservation( 400 + struct xfs_mount *mp) 135 401 { 136 - return XFS_CALC_GROWRTFREE_LOG_RES(mp); 402 + return mp->m_sb.sb_sectsize + 403 + 2 * mp->m_sb.sb_inodesize + 404 + mp->m_sb.sb_blocksize + 405 + mp->m_rsumsize + 406 + 128 * 5; 137 407 } 138 408 409 + /* 410 + * Logging the inode modification timestamp on a synchronous write. 411 + * inode 412 + */ 139 413 STATIC uint 140 - xfs_calc_swrite_reservation(xfs_mount_t *mp) 414 + xfs_calc_swrite_reservation( 415 + struct xfs_mount *mp) 141 416 { 142 - return XFS_CALC_SWRITE_LOG_RES(mp); 417 + return mp->m_sb.sb_inodesize + 128; 143 418 } 144 419 420 + /* 421 + * Logging the inode mode bits when writing a setuid/setgid file 422 + * inode 423 + */ 145 424 STATIC uint 146 425 xfs_calc_writeid_reservation(xfs_mount_t *mp) 147 426 { 148 - return XFS_CALC_WRITEID_LOG_RES(mp); 427 + return mp->m_sb.sb_inodesize + 128; 149 428 } 150 429 430 + /* 431 + * Converting the inode from non-attributed to attributed. 432 + * the inode being converted: inode size 433 + * agf block and superblock (for block allocation) 434 + * the new block (directory sized) 435 + * bmap blocks for the new directory block 436 + * allocation btrees 437 + */ 151 438 STATIC uint 152 - xfs_calc_addafork_reservation(xfs_mount_t *mp) 439 + xfs_calc_addafork_reservation( 440 + struct xfs_mount *mp) 153 441 { 154 - return XFS_CALC_ADDAFORK_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp); 442 + return XFS_DQUOT_LOGRES(mp) + 443 + mp->m_sb.sb_inodesize + 444 + mp->m_sb.sb_sectsize * 2 + 445 + mp->m_dirblksize + 446 + XFS_FSB_TO_B(mp, XFS_DAENTER_BMAP1B(mp, XFS_DATA_FORK) + 1) + 447 + XFS_ALLOCFREE_LOG_RES(mp, 1) + 448 + 128 * (4 + XFS_DAENTER_BMAP1B(mp, XFS_DATA_FORK) + 1 + 449 + XFS_ALLOCFREE_LOG_COUNT(mp, 1)); 155 450 } 156 451 452 + /* 453 + * Removing the attribute fork of a file 454 + * the inode being truncated: inode size 455 + * the inode's bmap btree: max depth * block size 456 + * And the bmap_finish transaction can free the blocks and bmap blocks: 457 + * the agf for each of the ags: 4 * sector size 458 + * the agfl for each of the ags: 4 * sector size 459 + * the super block to reflect the freed blocks: sector size 460 + * worst case split in allocation btrees per extent assuming 4 extents: 461 + * 4 exts * 2 trees * (2 * max depth - 1) * block size 462 + */ 157 463 STATIC uint 158 - xfs_calc_attrinval_reservation(xfs_mount_t *mp) 464 + xfs_calc_attrinval_reservation( 465 + struct xfs_mount *mp) 159 466 { 160 - return XFS_CALC_ATTRINVAL_LOG_RES(mp); 467 + return MAX((mp->m_sb.sb_inodesize + 468 + XFS_FSB_TO_B(mp, XFS_BM_MAXLEVELS(mp, XFS_ATTR_FORK)) + 469 + 128 * (1 + XFS_BM_MAXLEVELS(mp, XFS_ATTR_FORK))), 470 + (4 * mp->m_sb.sb_sectsize + 471 + 4 * mp->m_sb.sb_sectsize + 472 + mp->m_sb.sb_sectsize + 473 + XFS_ALLOCFREE_LOG_RES(mp, 4) + 474 + 128 * (9 + XFS_ALLOCFREE_LOG_COUNT(mp, 4)))); 161 475 } 162 476 477 + /* 478 + * Setting an attribute. 479 + * the inode getting the attribute 480 + * the superblock for allocations 481 + * the agfs extents are allocated from 482 + * the attribute btree * max depth 483 + * the inode allocation btree 484 + * Since attribute transaction space is dependent on the size of the attribute, 485 + * the calculation is done partially at mount time and partially at runtime. 486 + */ 163 487 STATIC uint 164 - xfs_calc_attrset_reservation(xfs_mount_t *mp) 488 + xfs_calc_attrset_reservation( 489 + struct xfs_mount *mp) 165 490 { 166 - return XFS_CALC_ATTRSET_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp); 491 + return XFS_DQUOT_LOGRES(mp) + 492 + mp->m_sb.sb_inodesize + 493 + mp->m_sb.sb_sectsize + 494 + XFS_FSB_TO_B(mp, XFS_DA_NODE_MAXDEPTH) + 495 + 128 * (2 + XFS_DA_NODE_MAXDEPTH); 167 496 } 168 497 498 + /* 499 + * Removing an attribute. 500 + * the inode: inode size 501 + * the attribute btree could join: max depth * block size 502 + * the inode bmap btree could join or split: max depth * block size 503 + * And the bmap_finish transaction can free the attr blocks freed giving: 504 + * the agf for the ag in which the blocks live: 2 * sector size 505 + * the agfl for the ag in which the blocks live: 2 * sector size 506 + * the superblock for the free block count: sector size 507 + * the allocation btrees: 2 exts * 2 trees * (2 * max depth - 1) * block size 508 + */ 169 509 STATIC uint 170 - xfs_calc_attrrm_reservation(xfs_mount_t *mp) 510 + xfs_calc_attrrm_reservation( 511 + struct xfs_mount *mp) 171 512 { 172 - return XFS_CALC_ATTRRM_LOG_RES(mp) + XFS_DQUOT_LOGRES(mp); 513 + return XFS_DQUOT_LOGRES(mp) + 514 + MAX((mp->m_sb.sb_inodesize + 515 + XFS_FSB_TO_B(mp, XFS_DA_NODE_MAXDEPTH) + 516 + XFS_FSB_TO_B(mp, XFS_BM_MAXLEVELS(mp, XFS_ATTR_FORK)) + 517 + 128 * (1 + XFS_DA_NODE_MAXDEPTH + 518 + XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK))), 519 + (2 * mp->m_sb.sb_sectsize + 520 + 2 * mp->m_sb.sb_sectsize + 521 + mp->m_sb.sb_sectsize + 522 + XFS_ALLOCFREE_LOG_RES(mp, 2) + 523 + 128 * (5 + XFS_ALLOCFREE_LOG_COUNT(mp, 2)))); 173 524 } 174 525 526 + /* 527 + * Clearing a bad agino number in an agi hash bucket. 528 + */ 175 529 STATIC uint 176 - xfs_calc_clear_agi_bucket_reservation(xfs_mount_t *mp) 530 + xfs_calc_clear_agi_bucket_reservation( 531 + struct xfs_mount *mp) 177 532 { 178 - return XFS_CALC_CLEAR_AGI_BUCKET_LOG_RES(mp); 533 + return mp->m_sb.sb_sectsize + 128; 179 534 } 180 535 181 536 /* ··· 539 184 */ 540 185 void 541 186 xfs_trans_init( 542 - xfs_mount_t *mp) 187 + struct xfs_mount *mp) 543 188 { 544 - xfs_trans_reservations_t *resp; 189 + struct xfs_trans_reservations *resp = &mp->m_reservations; 545 190 546 - resp = &(mp->m_reservations); 547 191 resp->tr_write = xfs_calc_write_reservation(mp); 548 192 resp->tr_itruncate = xfs_calc_itruncate_reservation(mp); 549 193 resp->tr_rename = xfs_calc_rename_reservation(mp);
-411
fs/xfs/xfs_trans.h
··· 300 300 301 301 302 302 /* 303 - * Various log reservation values. 304 - * These are based on the size of the file system block 305 - * because that is what most transactions manipulate. 306 - * Each adds in an additional 128 bytes per item logged to 307 - * try to account for the overhead of the transaction mechanism. 308 - * 309 - * Note: 310 - * Most of the reservations underestimate the number of allocation 311 - * groups into which they could free extents in the xfs_bmap_finish() 312 - * call. This is because the number in the worst case is quite high 313 - * and quite unusual. In order to fix this we need to change 314 - * xfs_bmap_finish() to free extents in only a single AG at a time. 315 - * This will require changes to the EFI code as well, however, so that 316 - * the EFI for the extents not freed is logged again in each transaction. 317 - * See bug 261917. 318 - */ 319 - 320 - /* 321 303 * Per-extent log reservation for the allocation btree changes 322 304 * involved in freeing or allocating an extent. 323 305 * 2 trees * (2 blocks/level * max depth - 1) * block size ··· 323 341 (XFS_DAENTER_BLOCKS(mp, XFS_DATA_FORK) + \ 324 342 XFS_DAENTER_BMAPS(mp, XFS_DATA_FORK) + 1) 325 343 326 - /* 327 - * In a write transaction we can allocate a maximum of 2 328 - * extents. This gives: 329 - * the inode getting the new extents: inode size 330 - * the inode's bmap btree: max depth * block size 331 - * the agfs of the ags from which the extents are allocated: 2 * sector 332 - * the superblock free block counter: sector size 333 - * the allocation btrees: 2 exts * 2 trees * (2 * max depth - 1) * block size 334 - * And the bmap_finish transaction can free bmap blocks in a join: 335 - * the agfs of the ags containing the blocks: 2 * sector size 336 - * the agfls of the ags containing the blocks: 2 * sector size 337 - * the super block free block counter: sector size 338 - * the allocation btrees: 2 exts * 2 trees * (2 * max depth - 1) * block size 339 - */ 340 - #define XFS_CALC_WRITE_LOG_RES(mp) \ 341 - (MAX( \ 342 - ((mp)->m_sb.sb_inodesize + \ 343 - XFS_FSB_TO_B((mp), XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK)) + \ 344 - (2 * (mp)->m_sb.sb_sectsize) + \ 345 - (mp)->m_sb.sb_sectsize + \ 346 - XFS_ALLOCFREE_LOG_RES(mp, 2) + \ 347 - (128 * (4 + XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK) + XFS_ALLOCFREE_LOG_COUNT(mp, 2)))),\ 348 - ((2 * (mp)->m_sb.sb_sectsize) + \ 349 - (2 * (mp)->m_sb.sb_sectsize) + \ 350 - (mp)->m_sb.sb_sectsize + \ 351 - XFS_ALLOCFREE_LOG_RES(mp, 2) + \ 352 - (128 * (5 + XFS_ALLOCFREE_LOG_COUNT(mp, 2)))))) 353 344 354 345 #define XFS_WRITE_LOG_RES(mp) ((mp)->m_reservations.tr_write) 355 - 356 - /* 357 - * In truncating a file we free up to two extents at once. We can modify: 358 - * the inode being truncated: inode size 359 - * the inode's bmap btree: (max depth + 1) * block size 360 - * And the bmap_finish transaction can free the blocks and bmap blocks: 361 - * the agf for each of the ags: 4 * sector size 362 - * the agfl for each of the ags: 4 * sector size 363 - * the super block to reflect the freed blocks: sector size 364 - * worst case split in allocation btrees per extent assuming 4 extents: 365 - * 4 exts * 2 trees * (2 * max depth - 1) * block size 366 - * the inode btree: max depth * blocksize 367 - * the allocation btrees: 2 trees * (max depth - 1) * block size 368 - */ 369 - #define XFS_CALC_ITRUNCATE_LOG_RES(mp) \ 370 - (MAX( \ 371 - ((mp)->m_sb.sb_inodesize + \ 372 - XFS_FSB_TO_B((mp), XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK) + 1) + \ 373 - (128 * (2 + XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK)))), \ 374 - ((4 * (mp)->m_sb.sb_sectsize) + \ 375 - (4 * (mp)->m_sb.sb_sectsize) + \ 376 - (mp)->m_sb.sb_sectsize + \ 377 - XFS_ALLOCFREE_LOG_RES(mp, 4) + \ 378 - (128 * (9 + XFS_ALLOCFREE_LOG_COUNT(mp, 4))) + \ 379 - (128 * 5) + \ 380 - XFS_ALLOCFREE_LOG_RES(mp, 1) + \ 381 - (128 * (2 + XFS_IALLOC_BLOCKS(mp) + (mp)->m_in_maxlevels + \ 382 - XFS_ALLOCFREE_LOG_COUNT(mp, 1)))))) 383 - 384 346 #define XFS_ITRUNCATE_LOG_RES(mp) ((mp)->m_reservations.tr_itruncate) 385 - 386 - /* 387 - * In renaming a files we can modify: 388 - * the four inodes involved: 4 * inode size 389 - * the two directory btrees: 2 * (max depth + v2) * dir block size 390 - * the two directory bmap btrees: 2 * max depth * block size 391 - * And the bmap_finish transaction can free dir and bmap blocks (two sets 392 - * of bmap blocks) giving: 393 - * the agf for the ags in which the blocks live: 3 * sector size 394 - * the agfl for the ags in which the blocks live: 3 * sector size 395 - * the superblock for the free block count: sector size 396 - * the allocation btrees: 3 exts * 2 trees * (2 * max depth - 1) * block size 397 - */ 398 - #define XFS_CALC_RENAME_LOG_RES(mp) \ 399 - (MAX( \ 400 - ((4 * (mp)->m_sb.sb_inodesize) + \ 401 - (2 * XFS_DIROP_LOG_RES(mp)) + \ 402 - (128 * (4 + 2 * XFS_DIROP_LOG_COUNT(mp)))), \ 403 - ((3 * (mp)->m_sb.sb_sectsize) + \ 404 - (3 * (mp)->m_sb.sb_sectsize) + \ 405 - (mp)->m_sb.sb_sectsize + \ 406 - XFS_ALLOCFREE_LOG_RES(mp, 3) + \ 407 - (128 * (7 + XFS_ALLOCFREE_LOG_COUNT(mp, 3)))))) 408 - 409 347 #define XFS_RENAME_LOG_RES(mp) ((mp)->m_reservations.tr_rename) 410 - 411 - /* 412 - * For creating a link to an inode: 413 - * the parent directory inode: inode size 414 - * the linked inode: inode size 415 - * the directory btree could split: (max depth + v2) * dir block size 416 - * the directory bmap btree could join or split: (max depth + v2) * blocksize 417 - * And the bmap_finish transaction can free some bmap blocks giving: 418 - * the agf for the ag in which the blocks live: sector size 419 - * the agfl for the ag in which the blocks live: sector size 420 - * the superblock for the free block count: sector size 421 - * the allocation btrees: 2 trees * (2 * max depth - 1) * block size 422 - */ 423 - #define XFS_CALC_LINK_LOG_RES(mp) \ 424 - (MAX( \ 425 - ((mp)->m_sb.sb_inodesize + \ 426 - (mp)->m_sb.sb_inodesize + \ 427 - XFS_DIROP_LOG_RES(mp) + \ 428 - (128 * (2 + XFS_DIROP_LOG_COUNT(mp)))), \ 429 - ((mp)->m_sb.sb_sectsize + \ 430 - (mp)->m_sb.sb_sectsize + \ 431 - (mp)->m_sb.sb_sectsize + \ 432 - XFS_ALLOCFREE_LOG_RES(mp, 1) + \ 433 - (128 * (3 + XFS_ALLOCFREE_LOG_COUNT(mp, 1)))))) 434 - 435 348 #define XFS_LINK_LOG_RES(mp) ((mp)->m_reservations.tr_link) 436 - 437 - /* 438 - * For removing a directory entry we can modify: 439 - * the parent directory inode: inode size 440 - * the removed inode: inode size 441 - * the directory btree could join: (max depth + v2) * dir block size 442 - * the directory bmap btree could join or split: (max depth + v2) * blocksize 443 - * And the bmap_finish transaction can free the dir and bmap blocks giving: 444 - * the agf for the ag in which the blocks live: 2 * sector size 445 - * the agfl for the ag in which the blocks live: 2 * sector size 446 - * the superblock for the free block count: sector size 447 - * the allocation btrees: 2 exts * 2 trees * (2 * max depth - 1) * block size 448 - */ 449 - #define XFS_CALC_REMOVE_LOG_RES(mp) \ 450 - (MAX( \ 451 - ((mp)->m_sb.sb_inodesize + \ 452 - (mp)->m_sb.sb_inodesize + \ 453 - XFS_DIROP_LOG_RES(mp) + \ 454 - (128 * (2 + XFS_DIROP_LOG_COUNT(mp)))), \ 455 - ((2 * (mp)->m_sb.sb_sectsize) + \ 456 - (2 * (mp)->m_sb.sb_sectsize) + \ 457 - (mp)->m_sb.sb_sectsize + \ 458 - XFS_ALLOCFREE_LOG_RES(mp, 2) + \ 459 - (128 * (5 + XFS_ALLOCFREE_LOG_COUNT(mp, 2)))))) 460 - 461 349 #define XFS_REMOVE_LOG_RES(mp) ((mp)->m_reservations.tr_remove) 462 - 463 - /* 464 - * For symlink we can modify: 465 - * the parent directory inode: inode size 466 - * the new inode: inode size 467 - * the inode btree entry: 1 block 468 - * the directory btree: (max depth + v2) * dir block size 469 - * the directory inode's bmap btree: (max depth + v2) * block size 470 - * the blocks for the symlink: 1 kB 471 - * Or in the first xact we allocate some inodes giving: 472 - * the agi and agf of the ag getting the new inodes: 2 * sectorsize 473 - * the inode blocks allocated: XFS_IALLOC_BLOCKS * blocksize 474 - * the inode btree: max depth * blocksize 475 - * the allocation btrees: 2 trees * (2 * max depth - 1) * block size 476 - */ 477 - #define XFS_CALC_SYMLINK_LOG_RES(mp) \ 478 - (MAX( \ 479 - ((mp)->m_sb.sb_inodesize + \ 480 - (mp)->m_sb.sb_inodesize + \ 481 - XFS_FSB_TO_B(mp, 1) + \ 482 - XFS_DIROP_LOG_RES(mp) + \ 483 - 1024 + \ 484 - (128 * (4 + XFS_DIROP_LOG_COUNT(mp)))), \ 485 - (2 * (mp)->m_sb.sb_sectsize + \ 486 - XFS_FSB_TO_B((mp), XFS_IALLOC_BLOCKS((mp))) + \ 487 - XFS_FSB_TO_B((mp), (mp)->m_in_maxlevels) + \ 488 - XFS_ALLOCFREE_LOG_RES(mp, 1) + \ 489 - (128 * (2 + XFS_IALLOC_BLOCKS(mp) + (mp)->m_in_maxlevels + \ 490 - XFS_ALLOCFREE_LOG_COUNT(mp, 1)))))) 491 - 492 350 #define XFS_SYMLINK_LOG_RES(mp) ((mp)->m_reservations.tr_symlink) 493 - 494 - /* 495 - * For create we can modify: 496 - * the parent directory inode: inode size 497 - * the new inode: inode size 498 - * the inode btree entry: block size 499 - * the superblock for the nlink flag: sector size 500 - * the directory btree: (max depth + v2) * dir block size 501 - * the directory inode's bmap btree: (max depth + v2) * block size 502 - * Or in the first xact we allocate some inodes giving: 503 - * the agi and agf of the ag getting the new inodes: 2 * sectorsize 504 - * the superblock for the nlink flag: sector size 505 - * the inode blocks allocated: XFS_IALLOC_BLOCKS * blocksize 506 - * the inode btree: max depth * blocksize 507 - * the allocation btrees: 2 trees * (max depth - 1) * block size 508 - */ 509 - #define XFS_CALC_CREATE_LOG_RES(mp) \ 510 - (MAX( \ 511 - ((mp)->m_sb.sb_inodesize + \ 512 - (mp)->m_sb.sb_inodesize + \ 513 - (mp)->m_sb.sb_sectsize + \ 514 - XFS_FSB_TO_B(mp, 1) + \ 515 - XFS_DIROP_LOG_RES(mp) + \ 516 - (128 * (3 + XFS_DIROP_LOG_COUNT(mp)))), \ 517 - (3 * (mp)->m_sb.sb_sectsize + \ 518 - XFS_FSB_TO_B((mp), XFS_IALLOC_BLOCKS((mp))) + \ 519 - XFS_FSB_TO_B((mp), (mp)->m_in_maxlevels) + \ 520 - XFS_ALLOCFREE_LOG_RES(mp, 1) + \ 521 - (128 * (2 + XFS_IALLOC_BLOCKS(mp) + (mp)->m_in_maxlevels + \ 522 - XFS_ALLOCFREE_LOG_COUNT(mp, 1)))))) 523 - 524 351 #define XFS_CREATE_LOG_RES(mp) ((mp)->m_reservations.tr_create) 525 - 526 - /* 527 - * Making a new directory is the same as creating a new file. 528 - */ 529 - #define XFS_CALC_MKDIR_LOG_RES(mp) XFS_CALC_CREATE_LOG_RES(mp) 530 - 531 352 #define XFS_MKDIR_LOG_RES(mp) ((mp)->m_reservations.tr_mkdir) 532 - 533 - /* 534 - * In freeing an inode we can modify: 535 - * the inode being freed: inode size 536 - * the super block free inode counter: sector size 537 - * the agi hash list and counters: sector size 538 - * the inode btree entry: block size 539 - * the on disk inode before ours in the agi hash list: inode cluster size 540 - * the inode btree: max depth * blocksize 541 - * the allocation btrees: 2 trees * (max depth - 1) * block size 542 - */ 543 - #define XFS_CALC_IFREE_LOG_RES(mp) \ 544 - ((mp)->m_sb.sb_inodesize + \ 545 - (mp)->m_sb.sb_sectsize + \ 546 - (mp)->m_sb.sb_sectsize + \ 547 - XFS_FSB_TO_B((mp), 1) + \ 548 - MAX((__uint16_t)XFS_FSB_TO_B((mp), 1), XFS_INODE_CLUSTER_SIZE(mp)) + \ 549 - (128 * 5) + \ 550 - XFS_ALLOCFREE_LOG_RES(mp, 1) + \ 551 - (128 * (2 + XFS_IALLOC_BLOCKS(mp) + (mp)->m_in_maxlevels + \ 552 - XFS_ALLOCFREE_LOG_COUNT(mp, 1)))) 553 - 554 - 555 353 #define XFS_IFREE_LOG_RES(mp) ((mp)->m_reservations.tr_ifree) 556 - 557 - /* 558 - * When only changing the inode we log the inode and possibly the superblock 559 - * We also add a bit of slop for the transaction stuff. 560 - */ 561 - #define XFS_CALC_ICHANGE_LOG_RES(mp) ((mp)->m_sb.sb_inodesize + \ 562 - (mp)->m_sb.sb_sectsize + 512) 563 - 564 354 #define XFS_ICHANGE_LOG_RES(mp) ((mp)->m_reservations.tr_ichange) 565 - 566 - /* 567 - * Growing the data section of the filesystem. 568 - * superblock 569 - * agi and agf 570 - * allocation btrees 571 - */ 572 - #define XFS_CALC_GROWDATA_LOG_RES(mp) \ 573 - ((mp)->m_sb.sb_sectsize * 3 + \ 574 - XFS_ALLOCFREE_LOG_RES(mp, 1) + \ 575 - (128 * (3 + XFS_ALLOCFREE_LOG_COUNT(mp, 1)))) 576 - 577 355 #define XFS_GROWDATA_LOG_RES(mp) ((mp)->m_reservations.tr_growdata) 578 - 579 - /* 580 - * Growing the rt section of the filesystem. 581 - * In the first set of transactions (ALLOC) we allocate space to the 582 - * bitmap or summary files. 583 - * superblock: sector size 584 - * agf of the ag from which the extent is allocated: sector size 585 - * bmap btree for bitmap/summary inode: max depth * blocksize 586 - * bitmap/summary inode: inode size 587 - * allocation btrees for 1 block alloc: 2 * (2 * maxdepth - 1) * blocksize 588 - */ 589 - #define XFS_CALC_GROWRTALLOC_LOG_RES(mp) \ 590 - (2 * (mp)->m_sb.sb_sectsize + \ 591 - XFS_FSB_TO_B((mp), XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK)) + \ 592 - (mp)->m_sb.sb_inodesize + \ 593 - XFS_ALLOCFREE_LOG_RES(mp, 1) + \ 594 - (128 * \ 595 - (3 + XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK) + \ 596 - XFS_ALLOCFREE_LOG_COUNT(mp, 1)))) 597 - 598 356 #define XFS_GROWRTALLOC_LOG_RES(mp) ((mp)->m_reservations.tr_growrtalloc) 599 - 600 - /* 601 - * Growing the rt section of the filesystem. 602 - * In the second set of transactions (ZERO) we zero the new metadata blocks. 603 - * one bitmap/summary block: blocksize 604 - */ 605 - #define XFS_CALC_GROWRTZERO_LOG_RES(mp) \ 606 - ((mp)->m_sb.sb_blocksize + 128) 607 - 608 357 #define XFS_GROWRTZERO_LOG_RES(mp) ((mp)->m_reservations.tr_growrtzero) 609 - 610 - /* 611 - * Growing the rt section of the filesystem. 612 - * In the third set of transactions (FREE) we update metadata without 613 - * allocating any new blocks. 614 - * superblock: sector size 615 - * bitmap inode: inode size 616 - * summary inode: inode size 617 - * one bitmap block: blocksize 618 - * summary blocks: new summary size 619 - */ 620 - #define XFS_CALC_GROWRTFREE_LOG_RES(mp) \ 621 - ((mp)->m_sb.sb_sectsize + \ 622 - 2 * (mp)->m_sb.sb_inodesize + \ 623 - (mp)->m_sb.sb_blocksize + \ 624 - (mp)->m_rsumsize + \ 625 - (128 * 5)) 626 - 627 358 #define XFS_GROWRTFREE_LOG_RES(mp) ((mp)->m_reservations.tr_growrtfree) 628 - 629 - /* 630 - * Logging the inode modification timestamp on a synchronous write. 631 - * inode 632 - */ 633 - #define XFS_CALC_SWRITE_LOG_RES(mp) \ 634 - ((mp)->m_sb.sb_inodesize + 128) 635 - 636 359 #define XFS_SWRITE_LOG_RES(mp) ((mp)->m_reservations.tr_swrite) 637 - 638 360 /* 639 361 * Logging the inode timestamps on an fsync -- same as SWRITE 640 362 * as long as SWRITE logs the entire inode core 641 363 */ 642 364 #define XFS_FSYNC_TS_LOG_RES(mp) ((mp)->m_reservations.tr_swrite) 643 - 644 - /* 645 - * Logging the inode mode bits when writing a setuid/setgid file 646 - * inode 647 - */ 648 - #define XFS_CALC_WRITEID_LOG_RES(mp) \ 649 - ((mp)->m_sb.sb_inodesize + 128) 650 - 651 365 #define XFS_WRITEID_LOG_RES(mp) ((mp)->m_reservations.tr_swrite) 652 - 653 - /* 654 - * Converting the inode from non-attributed to attributed. 655 - * the inode being converted: inode size 656 - * agf block and superblock (for block allocation) 657 - * the new block (directory sized) 658 - * bmap blocks for the new directory block 659 - * allocation btrees 660 - */ 661 - #define XFS_CALC_ADDAFORK_LOG_RES(mp) \ 662 - ((mp)->m_sb.sb_inodesize + \ 663 - (mp)->m_sb.sb_sectsize * 2 + \ 664 - (mp)->m_dirblksize + \ 665 - XFS_FSB_TO_B(mp, (XFS_DAENTER_BMAP1B(mp, XFS_DATA_FORK) + 1)) + \ 666 - XFS_ALLOCFREE_LOG_RES(mp, 1) + \ 667 - (128 * (4 + (XFS_DAENTER_BMAP1B(mp, XFS_DATA_FORK) + 1) + \ 668 - XFS_ALLOCFREE_LOG_COUNT(mp, 1)))) 669 - 670 366 #define XFS_ADDAFORK_LOG_RES(mp) ((mp)->m_reservations.tr_addafork) 671 - 672 - /* 673 - * Removing the attribute fork of a file 674 - * the inode being truncated: inode size 675 - * the inode's bmap btree: max depth * block size 676 - * And the bmap_finish transaction can free the blocks and bmap blocks: 677 - * the agf for each of the ags: 4 * sector size 678 - * the agfl for each of the ags: 4 * sector size 679 - * the super block to reflect the freed blocks: sector size 680 - * worst case split in allocation btrees per extent assuming 4 extents: 681 - * 4 exts * 2 trees * (2 * max depth - 1) * block size 682 - */ 683 - #define XFS_CALC_ATTRINVAL_LOG_RES(mp) \ 684 - (MAX( \ 685 - ((mp)->m_sb.sb_inodesize + \ 686 - XFS_FSB_TO_B((mp), XFS_BM_MAXLEVELS(mp, XFS_ATTR_FORK)) + \ 687 - (128 * (1 + XFS_BM_MAXLEVELS(mp, XFS_ATTR_FORK)))), \ 688 - ((4 * (mp)->m_sb.sb_sectsize) + \ 689 - (4 * (mp)->m_sb.sb_sectsize) + \ 690 - (mp)->m_sb.sb_sectsize + \ 691 - XFS_ALLOCFREE_LOG_RES(mp, 4) + \ 692 - (128 * (9 + XFS_ALLOCFREE_LOG_COUNT(mp, 4)))))) 693 - 694 367 #define XFS_ATTRINVAL_LOG_RES(mp) ((mp)->m_reservations.tr_attrinval) 695 - 696 - /* 697 - * Setting an attribute. 698 - * the inode getting the attribute 699 - * the superblock for allocations 700 - * the agfs extents are allocated from 701 - * the attribute btree * max depth 702 - * the inode allocation btree 703 - * Since attribute transaction space is dependent on the size of the attribute, 704 - * the calculation is done partially at mount time and partially at runtime. 705 - */ 706 - #define XFS_CALC_ATTRSET_LOG_RES(mp) \ 707 - ((mp)->m_sb.sb_inodesize + \ 708 - (mp)->m_sb.sb_sectsize + \ 709 - XFS_FSB_TO_B((mp), XFS_DA_NODE_MAXDEPTH) + \ 710 - (128 * (2 + XFS_DA_NODE_MAXDEPTH))) 711 - 712 368 #define XFS_ATTRSET_LOG_RES(mp, ext) \ 713 369 ((mp)->m_reservations.tr_attrset + \ 714 370 (ext * (mp)->m_sb.sb_sectsize) + \ 715 371 (ext * XFS_FSB_TO_B((mp), XFS_BM_MAXLEVELS(mp, XFS_ATTR_FORK))) + \ 716 372 (128 * (ext + (ext * XFS_BM_MAXLEVELS(mp, XFS_ATTR_FORK))))) 717 - 718 - /* 719 - * Removing an attribute. 720 - * the inode: inode size 721 - * the attribute btree could join: max depth * block size 722 - * the inode bmap btree could join or split: max depth * block size 723 - * And the bmap_finish transaction can free the attr blocks freed giving: 724 - * the agf for the ag in which the blocks live: 2 * sector size 725 - * the agfl for the ag in which the blocks live: 2 * sector size 726 - * the superblock for the free block count: sector size 727 - * the allocation btrees: 2 exts * 2 trees * (2 * max depth - 1) * block size 728 - */ 729 - #define XFS_CALC_ATTRRM_LOG_RES(mp) \ 730 - (MAX( \ 731 - ((mp)->m_sb.sb_inodesize + \ 732 - XFS_FSB_TO_B((mp), XFS_DA_NODE_MAXDEPTH) + \ 733 - XFS_FSB_TO_B((mp), XFS_BM_MAXLEVELS(mp, XFS_ATTR_FORK)) + \ 734 - (128 * (1 + XFS_DA_NODE_MAXDEPTH + XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK)))), \ 735 - ((2 * (mp)->m_sb.sb_sectsize) + \ 736 - (2 * (mp)->m_sb.sb_sectsize) + \ 737 - (mp)->m_sb.sb_sectsize + \ 738 - XFS_ALLOCFREE_LOG_RES(mp, 2) + \ 739 - (128 * (5 + XFS_ALLOCFREE_LOG_COUNT(mp, 2)))))) 740 - 741 373 #define XFS_ATTRRM_LOG_RES(mp) ((mp)->m_reservations.tr_attrrm) 742 - 743 - /* 744 - * Clearing a bad agino number in an agi hash bucket. 745 - */ 746 - #define XFS_CALC_CLEAR_AGI_BUCKET_LOG_RES(mp) \ 747 - ((mp)->m_sb.sb_sectsize + 128) 748 - 749 374 #define XFS_CLEAR_AGI_BUCKET_LOG_RES(mp) ((mp)->m_reservations.tr_clearagi) 750 375 751 376
+1 -1
fs/xfs/xfs_vnodeops.c
··· 267 267 if (code) { 268 268 ASSERT(tp == NULL); 269 269 lock_flags &= ~XFS_ILOCK_EXCL; 270 - ASSERT(lock_flags == XFS_IOLOCK_EXCL); 270 + ASSERT(lock_flags == XFS_IOLOCK_EXCL || !need_iolock); 271 271 goto error_return; 272 272 } 273 273 tp = xfs_trans_alloc(mp, XFS_TRANS_SETATTR_SIZE);