Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs

Pull UFS fixes from Al Viro:
"This is just the obvious backport fodder; I'm pretty sure that there
will be more - definitely so wrt performance and quite possibly
correctness as well"

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs:
ufs: we need to sync inode before freeing it
excessive checks in ufs_write_failed() and ufs_evict_inode()
ufs_getfrag_block(): we only grab ->truncate_mutex on block creation path
ufs_extend_tail(): fix the braino in calling conventions of ufs_new_fragments()
ufs: set correct ->s_maxsize
ufs: restore maintaining ->i_blocks
fix ufs_isblockset()
ufs: restore proper tail allocation

+63 -20
+1
fs/stat.c
··· 672 inode->i_bytes -= 512; 673 } 674 } 675 676 void inode_add_bytes(struct inode *inode, loff_t bytes) 677 {
··· 672 inode->i_bytes -= 512; 673 } 674 } 675 + EXPORT_SYMBOL(__inode_add_bytes); 676 677 void inode_add_bytes(struct inode *inode, loff_t bytes) 678 {
+25 -1
fs/ufs/balloc.c
··· 82 ufs_error (sb, "ufs_free_fragments", 83 "bit already cleared for fragment %u", i); 84 } 85 - 86 fs32_add(sb, &ucg->cg_cs.cs_nffree, count); 87 uspi->cs_total.cs_nffree += count; 88 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count); ··· 185 ufs_error(sb, "ufs_free_blocks", "freeing free fragment"); 186 } 187 ubh_setblock(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno); 188 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD) 189 ufs_clusteracct (sb, ucpi, blkno, 1); 190 ··· 496 return 0; 497 } 498 499 static u64 ufs_add_fragments(struct inode *inode, u64 fragment, 500 unsigned oldcount, unsigned newcount) 501 { ··· 546 for (i = oldcount; i < newcount; i++) 547 if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i)) 548 return 0; 549 /* 550 * Block can be extended 551 */ ··· 666 ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, goal + i); 667 i = uspi->s_fpb - count; 668 669 fs32_add(sb, &ucg->cg_cs.cs_nffree, i); 670 uspi->cs_total.cs_nffree += i; 671 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, i); ··· 676 677 result = ufs_bitmap_search (sb, ucpi, goal, allocsize); 678 if (result == INVBLOCK) 679 return 0; 680 for (i = 0; i < count; i++) 681 ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, result + i); ··· 738 return INVBLOCK; 739 ucpi->c_rotor = result; 740 gotit: 741 blkno = ufs_fragstoblks(result); 742 ubh_clrblock (UCPI_UBH(ucpi), ucpi->c_freeoff, blkno); 743 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
··· 82 ufs_error (sb, "ufs_free_fragments", 83 "bit already cleared for fragment %u", i); 84 } 85 + 86 + inode_sub_bytes(inode, count << uspi->s_fshift); 87 fs32_add(sb, &ucg->cg_cs.cs_nffree, count); 88 uspi->cs_total.cs_nffree += count; 89 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count); ··· 184 ufs_error(sb, "ufs_free_blocks", "freeing free fragment"); 185 } 186 ubh_setblock(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno); 187 + inode_sub_bytes(inode, uspi->s_fpb << uspi->s_fshift); 188 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD) 189 ufs_clusteracct (sb, ucpi, blkno, 1); 190 ··· 494 return 0; 495 } 496 497 + static bool try_add_frags(struct inode *inode, unsigned frags) 498 + { 499 + unsigned size = frags * i_blocksize(inode); 500 + spin_lock(&inode->i_lock); 501 + __inode_add_bytes(inode, size); 502 + if (unlikely((u32)inode->i_blocks != inode->i_blocks)) { 503 + __inode_sub_bytes(inode, size); 504 + spin_unlock(&inode->i_lock); 505 + return false; 506 + } 507 + spin_unlock(&inode->i_lock); 508 + return true; 509 + } 510 + 511 static u64 ufs_add_fragments(struct inode *inode, u64 fragment, 512 unsigned oldcount, unsigned newcount) 513 { ··· 530 for (i = oldcount; i < newcount; i++) 531 if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i)) 532 return 0; 533 + 534 + if (!try_add_frags(inode, count)) 535 + return 0; 536 /* 537 * Block can be extended 538 */ ··· 647 ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, goal + i); 648 i = uspi->s_fpb - count; 649 650 + inode_sub_bytes(inode, i << uspi->s_fshift); 651 fs32_add(sb, &ucg->cg_cs.cs_nffree, i); 652 uspi->cs_total.cs_nffree += i; 653 fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, i); ··· 656 657 result = ufs_bitmap_search (sb, ucpi, goal, allocsize); 658 if (result == INVBLOCK) 659 + return 0; 660 + if (!try_add_frags(inode, count)) 661 return 0; 662 for (i = 0; i < count; i++) 663 ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, result + i); ··· 716 return INVBLOCK; 717 ucpi->c_rotor = result; 718 gotit: 719 + if (!try_add_frags(inode, uspi->s_fpb)) 720 + return 0; 721 blkno = ufs_fragstoblks(result); 722 ubh_clrblock (UCPI_UBH(ucpi), ucpi->c_freeoff, blkno); 723 if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
+12 -16
fs/ufs/inode.c
··· 235 236 p = ufs_get_direct_data_ptr(uspi, ufsi, block); 237 tmp = ufs_new_fragments(inode, p, lastfrag, ufs_data_ptr_to_cpu(sb, p), 238 - new_size, err, locked_page); 239 return tmp != 0; 240 } 241 ··· 285 goal += uspi->s_fpb; 286 } 287 tmp = ufs_new_fragments(inode, p, ufs_blknum(new_fragment), 288 - goal, uspi->s_fpb, err, locked_page); 289 290 if (!tmp) { 291 *err = -ENOSPC; ··· 403 404 if (!create) { 405 phys64 = ufs_frag_map(inode, offsets, depth); 406 - goto out; 407 } 408 409 /* This code entered only while writing ....? */ ··· 844 truncate_inode_pages_final(&inode->i_data); 845 if (want_delete) { 846 inode->i_size = 0; 847 - if (inode->i_blocks) 848 ufs_truncate_blocks(inode); 849 } 850 851 invalidate_inode_buffers(inode); ··· 1106 return err; 1107 } 1108 1109 - static void __ufs_truncate_blocks(struct inode *inode) 1110 { 1111 struct ufs_inode_info *ufsi = UFS_I(inode); 1112 struct super_block *sb = inode->i_sb; ··· 1189 1190 truncate_setsize(inode, size); 1191 1192 - __ufs_truncate_blocks(inode); 1193 inode->i_mtime = inode->i_ctime = current_time(inode); 1194 mark_inode_dirty(inode); 1195 out: 1196 UFSD("EXIT: err %d\n", err); 1197 return err; 1198 - } 1199 - 1200 - static void ufs_truncate_blocks(struct inode *inode) 1201 - { 1202 - if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || 1203 - S_ISLNK(inode->i_mode))) 1204 - return; 1205 - if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) 1206 - return; 1207 - __ufs_truncate_blocks(inode); 1208 } 1209 1210 int ufs_setattr(struct dentry *dentry, struct iattr *attr)
··· 235 236 p = ufs_get_direct_data_ptr(uspi, ufsi, block); 237 tmp = ufs_new_fragments(inode, p, lastfrag, ufs_data_ptr_to_cpu(sb, p), 238 + new_size - (lastfrag & uspi->s_fpbmask), err, 239 + locked_page); 240 return tmp != 0; 241 } 242 ··· 284 goal += uspi->s_fpb; 285 } 286 tmp = ufs_new_fragments(inode, p, ufs_blknum(new_fragment), 287 + goal, nfrags, err, locked_page); 288 289 if (!tmp) { 290 *err = -ENOSPC; ··· 402 403 if (!create) { 404 phys64 = ufs_frag_map(inode, offsets, depth); 405 + if (phys64) 406 + map_bh(bh_result, sb, phys64 + frag); 407 + return 0; 408 } 409 410 /* This code entered only while writing ....? */ ··· 841 truncate_inode_pages_final(&inode->i_data); 842 if (want_delete) { 843 inode->i_size = 0; 844 + if (inode->i_blocks && 845 + (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || 846 + S_ISLNK(inode->i_mode))) 847 ufs_truncate_blocks(inode); 848 + ufs_update_inode(inode, inode_needs_sync(inode)); 849 } 850 851 invalidate_inode_buffers(inode); ··· 1100 return err; 1101 } 1102 1103 + static void ufs_truncate_blocks(struct inode *inode) 1104 { 1105 struct ufs_inode_info *ufsi = UFS_I(inode); 1106 struct super_block *sb = inode->i_sb; ··· 1183 1184 truncate_setsize(inode, size); 1185 1186 + ufs_truncate_blocks(inode); 1187 inode->i_mtime = inode->i_ctime = current_time(inode); 1188 mark_inode_dirty(inode); 1189 out: 1190 UFSD("EXIT: err %d\n", err); 1191 return err; 1192 } 1193 1194 int ufs_setattr(struct dentry *dentry, struct iattr *attr)
+18
fs/ufs/super.c
··· 746 return; 747 } 748 749 static int ufs_fill_super(struct super_block *sb, void *data, int silent) 750 { 751 struct ufs_sb_info * sbi; ··· 1228 "fast symlink size (%u)\n", uspi->s_maxsymlinklen); 1229 uspi->s_maxsymlinklen = maxsymlen; 1230 } 1231 sb->s_max_links = UFS_LINK_MAX; 1232 1233 inode = ufs_iget(sb, UFS_ROOTINO);
··· 746 return; 747 } 748 749 + static u64 ufs_max_bytes(struct super_block *sb) 750 + { 751 + struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi; 752 + int bits = uspi->s_apbshift; 753 + u64 res; 754 + 755 + if (bits > 21) 756 + res = ~0ULL; 757 + else 758 + res = UFS_NDADDR + (1LL << bits) + (1LL << (2*bits)) + 759 + (1LL << (3*bits)); 760 + 761 + if (res >= (MAX_LFS_FILESIZE >> uspi->s_bshift)) 762 + return MAX_LFS_FILESIZE; 763 + return res << uspi->s_bshift; 764 + } 765 + 766 static int ufs_fill_super(struct super_block *sb, void *data, int silent) 767 { 768 struct ufs_sb_info * sbi; ··· 1211 "fast symlink size (%u)\n", uspi->s_maxsymlinklen); 1212 uspi->s_maxsymlinklen = maxsymlen; 1213 } 1214 + sb->s_maxbytes = ufs_max_bytes(sb); 1215 sb->s_max_links = UFS_LINK_MAX; 1216 1217 inode = ufs_iget(sb, UFS_ROOTINO);
+7 -3
fs/ufs/util.h
··· 473 static inline int _ubh_isblockset_(struct ufs_sb_private_info * uspi, 474 struct ufs_buffer_head * ubh, unsigned begin, unsigned block) 475 { 476 switch (uspi->s_fpb) { 477 case 8: 478 return (*ubh_get_addr (ubh, begin + block) == 0xff); 479 case 4: 480 - return (*ubh_get_addr (ubh, begin + (block >> 1)) == (0x0f << ((block & 0x01) << 2))); 481 case 2: 482 - return (*ubh_get_addr (ubh, begin + (block >> 2)) == (0x03 << ((block & 0x03) << 1))); 483 case 1: 484 - return (*ubh_get_addr (ubh, begin + (block >> 3)) == (0x01 << (block & 0x07))); 485 } 486 return 0; 487 }
··· 473 static inline int _ubh_isblockset_(struct ufs_sb_private_info * uspi, 474 struct ufs_buffer_head * ubh, unsigned begin, unsigned block) 475 { 476 + u8 mask; 477 switch (uspi->s_fpb) { 478 case 8: 479 return (*ubh_get_addr (ubh, begin + block) == 0xff); 480 case 4: 481 + mask = 0x0f << ((block & 0x01) << 2); 482 + return (*ubh_get_addr (ubh, begin + (block >> 1)) & mask) == mask; 483 case 2: 484 + mask = 0x03 << ((block & 0x03) << 1); 485 + return (*ubh_get_addr (ubh, begin + (block >> 2)) & mask) == mask; 486 case 1: 487 + mask = 0x01 << (block & 0x07); 488 + return (*ubh_get_addr (ubh, begin + (block >> 3)) & mask) == mask; 489 } 490 return 0; 491 }