Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

fs: add i_blocksize()

Replace all 1 << inode->i_blkbits and (1 << inode->i_blkbits) in fs
branch.

This patch also fixes multiple checkpatch warnings: WARNING: Prefer
'unsigned int' to bare use of 'unsigned'

Thanks to Andrew Morton for suggesting more appropriate function instead
of macro.

[geliangtang@gmail.com: truncate: use i_blocksize()]
Link: http://lkml.kernel.org/r/9c8b2cd83c8f5653805d43debde9fa8817e02fc4.1484895804.git.geliangtang@gmail.com
Link: http://lkml.kernel.org/r/1481319905-10126-1-git-send-email-fabf@skynet.be
Signed-off-by: Fabian Frederick <fabf@skynet.be>
Signed-off-by: Geliang Tang <geliangtang@gmail.com>
Cc: Alexander Viro <viro@zeniv.linux.org.uk>
Cc: Ross Zwisler <ross.zwisler@linux.intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Fabian Frederick and committed by
Linus Torvalds
93407472 d3171200

+56 -51
+1 -1
fs/block_dev.c
··· 989 989 bdev->bd_super = NULL; 990 990 bdev->bd_inode = inode; 991 991 bdev->bd_bdi = &noop_backing_dev_info; 992 - bdev->bd_block_size = (1 << inode->i_blkbits); 992 + bdev->bd_block_size = i_blocksize(inode); 993 993 bdev->bd_part_count = 0; 994 994 bdev->bd_invalidated = 0; 995 995 inode->i_mode = S_IFBLK;
+1 -1
fs/btrfs/file.c
··· 2875 2875 if (!ret) 2876 2876 ret = btrfs_prealloc_file_range(inode, mode, 2877 2877 range->start, 2878 - range->len, 1 << inode->i_blkbits, 2878 + range->len, i_blocksize(inode), 2879 2879 offset + len, &alloc_hint); 2880 2880 else 2881 2881 btrfs_free_reserved_data_space(inode, range->start,
+6 -6
fs/buffer.c
··· 2395 2395 loff_t pos, loff_t *bytes) 2396 2396 { 2397 2397 struct inode *inode = mapping->host; 2398 - unsigned blocksize = 1 << inode->i_blkbits; 2398 + unsigned int blocksize = i_blocksize(inode); 2399 2399 struct page *page; 2400 2400 void *fsdata; 2401 2401 pgoff_t index, curidx; ··· 2475 2475 get_block_t *get_block, loff_t *bytes) 2476 2476 { 2477 2477 struct inode *inode = mapping->host; 2478 - unsigned blocksize = 1 << inode->i_blkbits; 2479 - unsigned zerofrom; 2478 + unsigned int blocksize = i_blocksize(inode); 2479 + unsigned int zerofrom; 2480 2480 int err; 2481 2481 2482 2482 err = cont_expand_zero(file, mapping, pos, bytes); ··· 2838 2838 struct buffer_head map_bh; 2839 2839 int err; 2840 2840 2841 - blocksize = 1 << inode->i_blkbits; 2841 + blocksize = i_blocksize(inode); 2842 2842 length = offset & (blocksize - 1); 2843 2843 2844 2844 /* Block boundary? Nothing to do */ ··· 2916 2916 struct buffer_head *bh; 2917 2917 int err; 2918 2918 2919 - blocksize = 1 << inode->i_blkbits; 2919 + blocksize = i_blocksize(inode); 2920 2920 length = offset & (blocksize - 1); 2921 2921 2922 2922 /* Block boundary? Nothing to do */ ··· 3028 3028 struct inode *inode = mapping->host; 3029 3029 tmp.b_state = 0; 3030 3030 tmp.b_blocknr = 0; 3031 - tmp.b_size = 1 << inode->i_blkbits; 3031 + tmp.b_size = i_blocksize(inode); 3032 3032 get_block(inode, block, &tmp, 0); 3033 3033 return tmp.b_blocknr; 3034 3034 }
+1 -1
fs/ceph/addr.c
··· 751 751 struct pagevec pvec; 752 752 int done = 0; 753 753 int rc = 0; 754 - unsigned wsize = 1 << inode->i_blkbits; 754 + unsigned int wsize = i_blocksize(inode); 755 755 struct ceph_osd_request *req = NULL; 756 756 int do_sync = 0; 757 757 loff_t snap_size, i_size;
+1 -1
fs/direct-io.c
··· 587 587 /* 588 588 * Call into the fs to map some more disk blocks. We record the current number 589 589 * of available blocks at sdio->blocks_available. These are in units of the 590 - * fs blocksize, (1 << inode->i_blkbits). 590 + * fs blocksize, i_blocksize(inode). 591 591 * 592 592 * The fs is allowed to map lots of blocks at once. If it wants to do that, 593 593 * it uses the passed inode-relative block number as the file offset, as usual.
+3 -3
fs/ext4/inode.c
··· 2221 2221 { 2222 2222 struct inode *inode = mpd->inode; 2223 2223 int err; 2224 - ext4_lblk_t blocks = (i_size_read(inode) + (1 << inode->i_blkbits) - 1) 2224 + ext4_lblk_t blocks = (i_size_read(inode) + i_blocksize(inode) - 1) 2225 2225 >> inode->i_blkbits; 2226 2226 2227 2227 do { ··· 3577 3577 if (overwrite) 3578 3578 get_block_func = ext4_dio_get_block_overwrite; 3579 3579 else if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS) || 3580 - round_down(offset, 1 << inode->i_blkbits) >= inode->i_size) { 3580 + round_down(offset, i_blocksize(inode)) >= inode->i_size) { 3581 3581 get_block_func = ext4_dio_get_block; 3582 3582 dio_flags = DIO_LOCKING | DIO_SKIP_HOLES; 3583 3583 } else if (is_sync_kiocb(iocb)) { ··· 5179 5179 * do. We do the check mainly to optimize the common PAGE_SIZE == 5180 5180 * blocksize case 5181 5181 */ 5182 - if (offset > PAGE_SIZE - (1 << inode->i_blkbits)) 5182 + if (offset > PAGE_SIZE - i_blocksize(inode)) 5183 5183 return; 5184 5184 while (1) { 5185 5185 page = find_lock_page(inode->i_mapping,
+1 -1
fs/ext4/mballoc.c
··· 838 838 inode = page->mapping->host; 839 839 sb = inode->i_sb; 840 840 ngroups = ext4_get_groups_count(sb); 841 - blocksize = 1 << inode->i_blkbits; 841 + blocksize = i_blocksize(inode); 842 842 blocks_per_page = PAGE_SIZE / blocksize; 843 843 844 844 groups_per_page = blocks_per_page >> 1;
+1 -1
fs/ext4/move_extent.c
··· 187 187 if (PageUptodate(page)) 188 188 return 0; 189 189 190 - blocksize = 1 << inode->i_blkbits; 190 + blocksize = i_blocksize(inode); 191 191 if (!page_has_buffers(page)) 192 192 create_empty_buffers(page, blocksize, 0); 193 193
+5 -5
fs/iomap.c
··· 420 420 iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero, 421 421 const struct iomap_ops *ops) 422 422 { 423 - unsigned blocksize = (1 << inode->i_blkbits); 424 - unsigned off = pos & (blocksize - 1); 423 + unsigned int blocksize = i_blocksize(inode); 424 + unsigned int off = pos & (blocksize - 1); 425 425 426 426 /* Block boundary? Nothing to do */ 427 427 if (!off) ··· 735 735 void *data, struct iomap *iomap) 736 736 { 737 737 struct iomap_dio *dio = data; 738 - unsigned blkbits = blksize_bits(bdev_logical_block_size(iomap->bdev)); 739 - unsigned fs_block_size = (1 << inode->i_blkbits), pad; 740 - unsigned align = iov_iter_alignment(dio->submit.iter); 738 + unsigned int blkbits = blksize_bits(bdev_logical_block_size(iomap->bdev)); 739 + unsigned int fs_block_size = i_blocksize(inode), pad; 740 + unsigned int align = iov_iter_alignment(dio->submit.iter); 741 741 struct iov_iter iter; 742 742 struct bio *bio; 743 743 bool need_zeroout = false;
+2 -2
fs/jfs/super.c
··· 758 758 sb->s_blocksize - offset : toread; 759 759 760 760 tmp_bh.b_state = 0; 761 - tmp_bh.b_size = 1 << inode->i_blkbits; 761 + tmp_bh.b_size = i_blocksize(inode); 762 762 err = jfs_get_block(inode, blk, &tmp_bh, 0); 763 763 if (err) 764 764 return err; ··· 798 798 sb->s_blocksize - offset : towrite; 799 799 800 800 tmp_bh.b_state = 0; 801 - tmp_bh.b_size = 1 << inode->i_blkbits; 801 + tmp_bh.b_size = i_blocksize(inode); 802 802 err = jfs_get_block(inode, blk, &tmp_bh, 1); 803 803 if (err) 804 804 goto out;
+1 -1
fs/mpage.c
··· 115 115 SetPageUptodate(page); 116 116 return; 117 117 } 118 - create_empty_buffers(page, 1 << inode->i_blkbits, 0); 118 + create_empty_buffers(page, i_blocksize(inode), 0); 119 119 } 120 120 head = page_buffers(page); 121 121 page_bh = head;
+3 -3
fs/nfsd/blocklayout.c
··· 24 24 { 25 25 struct nfsd4_layout_seg *seg = &args->lg_seg; 26 26 struct super_block *sb = inode->i_sb; 27 - u32 block_size = (1 << inode->i_blkbits); 27 + u32 block_size = i_blocksize(inode); 28 28 struct pnfs_block_extent *bex; 29 29 struct iomap iomap; 30 30 u32 device_generation = 0; ··· 181 181 int nr_iomaps; 182 182 183 183 nr_iomaps = nfsd4_block_decode_layoutupdate(lcp->lc_up_layout, 184 - lcp->lc_up_len, &iomaps, 1 << inode->i_blkbits); 184 + lcp->lc_up_len, &iomaps, i_blocksize(inode)); 185 185 if (nr_iomaps < 0) 186 186 return nfserrno(nr_iomaps); 187 187 ··· 375 375 int nr_iomaps; 376 376 377 377 nr_iomaps = nfsd4_scsi_decode_layoutupdate(lcp->lc_up_layout, 378 - lcp->lc_up_len, &iomaps, 1 << inode->i_blkbits); 378 + lcp->lc_up_len, &iomaps, i_blocksize(inode)); 379 379 if (nr_iomaps < 0) 380 380 return nfserrno(nr_iomaps); 381 381
+1 -1
fs/nilfs2/btnode.c
··· 50 50 brelse(bh); 51 51 BUG(); 52 52 } 53 - memset(bh->b_data, 0, 1 << inode->i_blkbits); 53 + memset(bh->b_data, 0, i_blocksize(inode)); 54 54 bh->b_bdev = inode->i_sb->s_bdev; 55 55 bh->b_blocknr = blocknr; 56 56 set_buffer_mapped(bh);
+2 -2
fs/nilfs2/inode.c
··· 51 51 { 52 52 struct nilfs_root *root = NILFS_I(inode)->i_root; 53 53 54 - inode_add_bytes(inode, (1 << inode->i_blkbits) * n); 54 + inode_add_bytes(inode, i_blocksize(inode) * n); 55 55 if (root) 56 56 atomic64_add(n, &root->blocks_count); 57 57 } ··· 60 60 { 61 61 struct nilfs_root *root = NILFS_I(inode)->i_root; 62 62 63 - inode_sub_bytes(inode, (1 << inode->i_blkbits) * n); 63 + inode_sub_bytes(inode, i_blocksize(inode) * n); 64 64 if (root) 65 65 atomic64_sub(n, &root->blocks_count); 66 66 }
+2 -2
fs/nilfs2/mdt.c
··· 57 57 set_buffer_mapped(bh); 58 58 59 59 kaddr = kmap_atomic(bh->b_page); 60 - memset(kaddr + bh_offset(bh), 0, 1 << inode->i_blkbits); 60 + memset(kaddr + bh_offset(bh), 0, i_blocksize(inode)); 61 61 if (init_block) 62 62 init_block(inode, bh, kaddr); 63 63 flush_dcache_page(bh->b_page); ··· 501 501 struct nilfs_mdt_info *mi = NILFS_MDT(inode); 502 502 503 503 mi->mi_entry_size = entry_size; 504 - mi->mi_entries_per_block = (1 << inode->i_blkbits) / entry_size; 504 + mi->mi_entries_per_block = i_blocksize(inode) / entry_size; 505 505 mi->mi_first_entry_offset = DIV_ROUND_UP(header_size, entry_size); 506 506 } 507 507
+1 -1
fs/nilfs2/segment.c
··· 723 723 724 724 lock_page(page); 725 725 if (!page_has_buffers(page)) 726 - create_empty_buffers(page, 1 << inode->i_blkbits, 0); 726 + create_empty_buffers(page, i_blocksize(inode), 0); 727 727 unlock_page(page); 728 728 729 729 bh = head = page_buffers(page);
+1 -1
fs/ocfs2/aops.c
··· 608 608 int ret = 0; 609 609 struct buffer_head *head, *bh, *wait[2], **wait_bh = wait; 610 610 unsigned int block_end, block_start; 611 - unsigned int bsize = 1 << inode->i_blkbits; 611 + unsigned int bsize = i_blocksize(inode); 612 612 613 613 if (!page_has_buffers(page)) 614 614 create_empty_buffers(page, bsize, 0);
+1 -1
fs/ocfs2/file.c
··· 808 808 /* We know that zero_from is block aligned */ 809 809 for (block_start = zero_from; block_start < zero_to; 810 810 block_start = block_end) { 811 - block_end = block_start + (1 << inode->i_blkbits); 811 + block_end = block_start + i_blocksize(inode); 812 812 813 813 /* 814 814 * block_start is block-aligned. Bump it by one to force
+2 -2
fs/orangefs/orangefs-utils.c
··· 306 306 break; 307 307 case S_IFDIR: 308 308 inode->i_size = PAGE_SIZE; 309 - orangefs_inode->blksize = (1 << inode->i_blkbits); 309 + orangefs_inode->blksize = i_blocksize(inode); 310 310 spin_lock(&inode->i_lock); 311 311 inode_set_bytes(inode, inode->i_size); 312 312 spin_unlock(&inode->i_lock); ··· 316 316 if (new) { 317 317 inode->i_size = (loff_t)strlen(new_op-> 318 318 downcall.resp.getattr.link_target); 319 - orangefs_inode->blksize = (1 << inode->i_blkbits); 319 + orangefs_inode->blksize = i_blocksize(inode); 320 320 ret = strscpy(orangefs_inode->link_target, 321 321 new_op->downcall.resp.getattr.link_target, 322 322 ORANGEFS_NAME_MAX);
+1 -1
fs/reiserfs/file.c
··· 189 189 int ret = 0; 190 190 191 191 th.t_trans_id = 0; 192 - blocksize = 1 << inode->i_blkbits; 192 + blocksize = i_blocksize(inode); 193 193 194 194 if (logit) { 195 195 reiserfs_write_lock(s);
+1 -1
fs/reiserfs/inode.c
··· 525 525 * referenced in convert_tail_for_hole() that may be called from 526 526 * reiserfs_get_block() 527 527 */ 528 - bh_result->b_size = (1 << inode->i_blkbits); 528 + bh_result->b_size = i_blocksize(inode); 529 529 530 530 ret = reiserfs_get_block(inode, iblock, bh_result, 531 531 create | GET_BLOCK_NO_DANGLE);
+1 -1
fs/stat.c
··· 31 31 stat->atime = inode->i_atime; 32 32 stat->mtime = inode->i_mtime; 33 33 stat->ctime = inode->i_ctime; 34 - stat->blksize = (1 << inode->i_blkbits); 34 + stat->blksize = i_blocksize(inode); 35 35 stat->blocks = inode->i_blocks; 36 36 } 37 37
+1 -1
fs/udf/inode.c
··· 1193 1193 { 1194 1194 int err; 1195 1195 struct udf_inode_info *iinfo; 1196 - int bsize = 1 << inode->i_blkbits; 1196 + int bsize = i_blocksize(inode); 1197 1197 1198 1198 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || 1199 1199 S_ISLNK(inode->i_mode)))
+8 -8
fs/xfs/xfs_aops.c
··· 103 103 unsigned int bsize; 104 104 105 105 ASSERT(bvec->bv_offset < PAGE_SIZE); 106 - ASSERT((bvec->bv_offset & ((1 << inode->i_blkbits) - 1)) == 0); 106 + ASSERT((bvec->bv_offset & (i_blocksize(inode) - 1)) == 0); 107 107 ASSERT(end < PAGE_SIZE); 108 - ASSERT((bvec->bv_len & ((1 << inode->i_blkbits) - 1)) == 0); 108 + ASSERT((bvec->bv_len & (i_blocksize(inode) - 1)) == 0); 109 109 110 110 bh = head = page_buffers(bvec->bv_page); 111 111 ··· 349 349 { 350 350 struct xfs_inode *ip = XFS_I(inode); 351 351 struct xfs_mount *mp = ip->i_mount; 352 - ssize_t count = 1 << inode->i_blkbits; 352 + ssize_t count = i_blocksize(inode); 353 353 xfs_fileoff_t offset_fsb, end_fsb; 354 354 int error = 0; 355 355 int bmapi_flags = XFS_BMAPI_ENTIRE; ··· 758 758 break; 759 759 } 760 760 next_buffer: 761 - offset += 1 << inode->i_blkbits; 761 + offset += i_blocksize(inode); 762 762 763 763 } while ((bh = bh->b_this_page) != head); 764 764 ··· 846 846 LIST_HEAD(submit_list); 847 847 struct xfs_ioend *ioend, *next; 848 848 struct buffer_head *bh, *head; 849 - ssize_t len = 1 << inode->i_blkbits; 849 + ssize_t len = i_blocksize(inode); 850 850 int error = 0; 851 851 int count = 0; 852 852 int uptodate = 1; ··· 1210 1210 offset + mapping_size >= i_size_read(inode)) { 1211 1211 /* limit mapping to block that spans EOF */ 1212 1212 mapping_size = roundup_64(i_size_read(inode) - offset, 1213 - 1 << inode->i_blkbits); 1213 + i_blocksize(inode)); 1214 1214 } 1215 1215 if (mapping_size > LONG_MAX) 1216 1216 mapping_size = LONG_MAX; ··· 1241 1241 return -EIO; 1242 1242 1243 1243 offset = (xfs_off_t)iblock << inode->i_blkbits; 1244 - ASSERT(bh_result->b_size >= (1 << inode->i_blkbits)); 1244 + ASSERT(bh_result->b_size >= i_blocksize(inode)); 1245 1245 size = bh_result->b_size; 1246 1246 1247 1247 if (offset >= i_size_read(inode)) ··· 1389 1389 if (offset < end_offset) 1390 1390 set_buffer_dirty(bh); 1391 1391 bh = bh->b_this_page; 1392 - offset += 1 << inode->i_blkbits; 1392 + offset += i_blocksize(inode); 1393 1393 } while (bh != head); 1394 1394 } 1395 1395 /*
+2 -2
fs/xfs/xfs_file.c
··· 754 754 if (error) 755 755 goto out_unlock; 756 756 } else if (mode & FALLOC_FL_COLLAPSE_RANGE) { 757 - unsigned blksize_mask = (1 << inode->i_blkbits) - 1; 757 + unsigned int blksize_mask = i_blocksize(inode) - 1; 758 758 759 759 if (offset & blksize_mask || len & blksize_mask) { 760 760 error = -EINVAL; ··· 776 776 if (error) 777 777 goto out_unlock; 778 778 } else if (mode & FALLOC_FL_INSERT_RANGE) { 779 - unsigned blksize_mask = (1 << inode->i_blkbits) - 1; 779 + unsigned int blksize_mask = i_blocksize(inode) - 1; 780 780 781 781 new_size = i_size_read(inode) + len; 782 782 if (offset & blksize_mask || len & blksize_mask) {
+5
include/linux/fs.h
··· 655 655 void *i_private; /* fs or device private pointer */ 656 656 }; 657 657 658 + static inline unsigned int i_blocksize(const struct inode *node) 659 + { 660 + return (1 << node->i_blkbits); 661 + } 662 + 658 663 static inline int inode_unhashed(struct inode *inode) 659 664 { 660 665 return hlist_unhashed(&inode->i_hash);
+1 -1
mm/truncate.c
··· 786 786 */ 787 787 void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to) 788 788 { 789 - int bsize = 1 << inode->i_blkbits; 789 + int bsize = i_blocksize(inode); 790 790 loff_t rounded_from; 791 791 struct page *page; 792 792 pgoff_t index;