Merge branch 'for-linus' of git://oss.sgi.com:8090/xfs/xfs-2.6

* 'for-linus' of git://oss.sgi.com:8090/xfs/xfs-2.6: (37 commits)
[XFS] Fix lockdep annotations for xfs_lock_inodes
[LIB]: export radix_tree_preload()
[XFS] Fix XFS_IOC_FSBULKSTAT{,_SINGLE} & XFS_IOC_FSINUMBERS in compat mode
[XFS] Compat ioctl handler for handle operations
[XFS] Compat ioctl handler for XFS_IOC_FSGEOMETRY_V1.
[XFS] Clean up function name handling in tracing code
[XFS] Quota inode has no parent.
[XFS] Concurrent Multi-File Data Streams
[XFS] Use uninitialized_var macro to stop warning about rtx
[XFS] XFS should not be looking at filp reference counts
[XFS] Use is_power_of_2 instead of open coding checks
[XFS] Reduce shouting by removing unnecessary macros from dir2 code.
[XFS] Simplify XFS min/max macros.
[XFS] Kill off xfs_count_bits
[XFS] Cancel transactions on xfs_itruncate_start error.
[XFS] Use do_div() on 64 bit types.
[XFS] Fix remount,readonly path to flush everything correctly.
[XFS] Cleanup inode extent size hint extraction
[XFS] Prevent ENOSPC from aborting transactions that need to succeed
[XFS] Prevent deadlock when flushing inodes on unmount
...

+3263 -1140
+2
fs/xfs/Makefile-linux-2.6
··· 64 64 xfs_dir2_sf.o \ 65 65 xfs_error.o \ 66 66 xfs_extfree_item.o \ 67 + xfs_filestream.o \ 67 68 xfs_fsops.o \ 68 69 xfs_ialloc.o \ 69 70 xfs_ialloc_btree.o \ ··· 78 77 xfs_log.o \ 79 78 xfs_log_recover.o \ 80 79 xfs_mount.o \ 80 + xfs_mru_cache.o \ 81 81 xfs_rename.o \ 82 82 xfs_trans.o \ 83 83 xfs_trans_ail.o \
-19
fs/xfs/linux-2.6/kmem.h
··· 100 100 extern void *kmem_zone_alloc(kmem_zone_t *, unsigned int __nocast); 101 101 extern void *kmem_zone_zalloc(kmem_zone_t *, unsigned int __nocast); 102 102 103 - /* 104 - * Low memory cache shrinkers 105 - */ 106 - 107 - typedef struct shrinker *kmem_shaker_t; 108 - typedef int (*kmem_shake_func_t)(int, gfp_t); 109 - 110 - static inline kmem_shaker_t 111 - kmem_shake_register(kmem_shake_func_t sfunc) 112 - { 113 - return set_shrinker(DEFAULT_SEEKS, sfunc); 114 - } 115 - 116 - static inline void 117 - kmem_shake_deregister(kmem_shaker_t shrinker) 118 - { 119 - remove_shrinker(shrinker); 120 - } 121 - 122 103 static inline int 123 104 kmem_shake_allow(gfp_t gfp_mask) 124 105 {
+34 -9
fs/xfs/linux-2.6/xfs_aops.c
··· 108 108 109 109 /* 110 110 * Schedule IO completion handling on a xfsdatad if this was 111 - * the final hold on this ioend. 111 + * the final hold on this ioend. If we are asked to wait, 112 + * flush the workqueue. 112 113 */ 113 114 STATIC void 114 115 xfs_finish_ioend( 115 - xfs_ioend_t *ioend) 116 + xfs_ioend_t *ioend, 117 + int wait) 116 118 { 117 - if (atomic_dec_and_test(&ioend->io_remaining)) 119 + if (atomic_dec_and_test(&ioend->io_remaining)) { 118 120 queue_work(xfsdatad_workqueue, &ioend->io_work); 121 + if (wait) 122 + flush_workqueue(xfsdatad_workqueue); 123 + } 119 124 } 120 125 121 126 /* ··· 161 156 xfs_fsize_t bsize; 162 157 163 158 ip = xfs_vtoi(ioend->io_vnode); 159 + if (!ip) 160 + return; 164 161 165 162 ASSERT((ip->i_d.di_mode & S_IFMT) == S_IFREG); 166 163 ASSERT(ioend->io_type != IOMAP_READ); ··· 341 334 bio->bi_end_io = NULL; 342 335 bio_put(bio); 343 336 344 - xfs_finish_ioend(ioend); 337 + xfs_finish_ioend(ioend, 0); 345 338 return 0; 346 339 } 347 340 ··· 477 470 } 478 471 if (bio) 479 472 xfs_submit_ioend_bio(ioend, bio); 480 - xfs_finish_ioend(ioend); 473 + xfs_finish_ioend(ioend, 0); 481 474 } while ((ioend = next) != NULL); 482 475 } 483 476 ··· 1010 1003 if (buffer_unwritten(bh) || buffer_delay(bh) || 1011 1004 ((buffer_uptodate(bh) || PageUptodate(page)) && 1012 1005 !buffer_mapped(bh) && (unmapped || startio))) { 1006 + int new_ioend = 0; 1007 + 1013 1008 /* 1014 1009 * Make sure we don't use a read-only iomap 1015 1010 */ ··· 1030 1021 } 1031 1022 1032 1023 if (!iomap_valid) { 1024 + /* 1025 + * if we didn't have a valid mapping then we 1026 + * need to ensure that we put the new mapping 1027 + * in a new ioend structure. This needs to be 1028 + * done to ensure that the ioends correctly 1029 + * reflect the block mappings at io completion 1030 + * for unwritten extent conversion. 1031 + */ 1032 + new_ioend = 1; 1033 1033 if (type == IOMAP_NEW) { 1034 1034 size = xfs_probe_cluster(inode, 1035 1035 page, bh, head, 0); ··· 1058 1040 if (startio) { 1059 1041 xfs_add_to_ioend(inode, bh, offset, 1060 1042 type, &ioend, 1061 - !iomap_valid); 1043 + new_ioend); 1062 1044 } else { 1063 1045 set_buffer_dirty(bh); 1064 1046 unlock_buffer(bh); ··· 1434 1416 * This is not necessary for synchronous direct I/O, but we do 1435 1417 * it anyway to keep the code uniform and simpler. 1436 1418 * 1419 + * Well, if only it were that simple. Because synchronous direct I/O 1420 + * requires extent conversion to occur *before* we return to userspace, 1421 + * we have to wait for extent conversion to complete. Look at the 1422 + * iocb that has been passed to us to determine if this is AIO or 1423 + * not. If it is synchronous, tell xfs_finish_ioend() to kick the 1424 + * workqueue and wait for it to complete. 1425 + * 1437 1426 * The core direct I/O code might be changed to always call the 1438 1427 * completion handler in the future, in which case all this can 1439 1428 * go away. ··· 1448 1423 ioend->io_offset = offset; 1449 1424 ioend->io_size = size; 1450 1425 if (ioend->io_type == IOMAP_READ) { 1451 - xfs_finish_ioend(ioend); 1426 + xfs_finish_ioend(ioend, 0); 1452 1427 } else if (private && size > 0) { 1453 - xfs_finish_ioend(ioend); 1428 + xfs_finish_ioend(ioend, is_sync_kiocb(iocb)); 1454 1429 } else { 1455 1430 /* 1456 1431 * A direct I/O write ioend starts it's life in unwritten ··· 1459 1434 * handler. 1460 1435 */ 1461 1436 INIT_WORK(&ioend->io_work, xfs_end_bio_written); 1462 - xfs_finish_ioend(ioend); 1437 + xfs_finish_ioend(ioend, 0); 1463 1438 } 1464 1439 1465 1440 /*
+27 -32
fs/xfs/linux-2.6/xfs_buf.c
··· 35 35 #include <linux/freezer.h> 36 36 37 37 static kmem_zone_t *xfs_buf_zone; 38 - static kmem_shaker_t xfs_buf_shake; 38 + static struct shrinker *xfs_buf_shake; 39 39 STATIC int xfsbufd(void *); 40 40 STATIC int xfsbufd_wakeup(int, gfp_t); 41 41 STATIC void xfs_buf_delwri_queue(xfs_buf_t *, int); ··· 314 314 315 315 ASSERT(list_empty(&bp->b_hash_list)); 316 316 317 - if (bp->b_flags & _XBF_PAGE_CACHE) { 317 + if (bp->b_flags & (_XBF_PAGE_CACHE|_XBF_PAGES)) { 318 318 uint i; 319 319 320 320 if ((bp->b_flags & XBF_MAPPED) && (bp->b_page_count > 1)) ··· 323 323 for (i = 0; i < bp->b_page_count; i++) { 324 324 struct page *page = bp->b_pages[i]; 325 325 326 - ASSERT(!PagePrivate(page)); 326 + if (bp->b_flags & _XBF_PAGE_CACHE) 327 + ASSERT(!PagePrivate(page)); 327 328 page_cache_release(page); 328 329 } 329 - _xfs_buf_free_pages(bp); 330 - } else if (bp->b_flags & _XBF_KMEM_ALLOC) { 331 - /* 332 - * XXX(hch): bp->b_count_desired might be incorrect (see 333 - * xfs_buf_associate_memory for details), but fortunately 334 - * the Linux version of kmem_free ignores the len argument.. 335 - */ 336 - kmem_free(bp->b_addr, bp->b_count_desired); 337 330 _xfs_buf_free_pages(bp); 338 331 } 339 332 ··· 757 764 size_t len, 758 765 xfs_buftarg_t *target) 759 766 { 760 - size_t malloc_len = len; 767 + unsigned long page_count = PAGE_ALIGN(len) >> PAGE_SHIFT; 768 + int error, i; 761 769 xfs_buf_t *bp; 762 - void *data; 763 - int error; 764 770 765 771 bp = xfs_buf_allocate(0); 766 772 if (unlikely(bp == NULL)) 767 773 goto fail; 768 774 _xfs_buf_initialize(bp, target, 0, len, 0); 769 775 770 - try_again: 771 - data = kmem_alloc(malloc_len, KM_SLEEP | KM_MAYFAIL | KM_LARGE); 772 - if (unlikely(data == NULL)) 776 + error = _xfs_buf_get_pages(bp, page_count, 0); 777 + if (error) 773 778 goto fail_free_buf; 774 779 775 - /* check whether alignment matches.. */ 776 - if ((__psunsigned_t)data != 777 - ((__psunsigned_t)data & ~target->bt_smask)) { 778 - /* .. else double the size and try again */ 779 - kmem_free(data, malloc_len); 780 - malloc_len <<= 1; 781 - goto try_again; 780 + for (i = 0; i < page_count; i++) { 781 + bp->b_pages[i] = alloc_page(GFP_KERNEL); 782 + if (!bp->b_pages[i]) 783 + goto fail_free_mem; 782 784 } 785 + bp->b_flags |= _XBF_PAGES; 783 786 784 - error = xfs_buf_associate_memory(bp, data, len); 785 - if (error) 787 + error = _xfs_buf_map_pages(bp, XBF_MAPPED); 788 + if (unlikely(error)) { 789 + printk(KERN_WARNING "%s: failed to map pages\n", 790 + __FUNCTION__); 786 791 goto fail_free_mem; 787 - bp->b_flags |= _XBF_KMEM_ALLOC; 792 + } 788 793 789 794 xfs_buf_unlock(bp); 790 795 791 - XB_TRACE(bp, "no_daddr", data); 796 + XB_TRACE(bp, "no_daddr", len); 792 797 return bp; 798 + 793 799 fail_free_mem: 794 - kmem_free(data, malloc_len); 800 + while (--i >= 0) 801 + __free_page(bp->b_pages[i]); 802 + _xfs_buf_free_pages(bp); 795 803 fail_free_buf: 796 - xfs_buf_free(bp); 804 + xfs_buf_deallocate(bp); 797 805 fail: 798 806 return NULL; 799 807 } ··· 1447 1453 int external) 1448 1454 { 1449 1455 xfs_flush_buftarg(btp, 1); 1456 + xfs_blkdev_issue_flush(btp); 1450 1457 if (external) 1451 1458 xfs_blkdev_put(btp->bt_bdev); 1452 1459 xfs_free_bufhash(btp); ··· 1832 1837 if (!xfsdatad_workqueue) 1833 1838 goto out_destroy_xfslogd_workqueue; 1834 1839 1835 - xfs_buf_shake = kmem_shake_register(xfsbufd_wakeup); 1840 + xfs_buf_shake = set_shrinker(DEFAULT_SEEKS, xfsbufd_wakeup); 1836 1841 if (!xfs_buf_shake) 1837 1842 goto out_destroy_xfsdatad_workqueue; 1838 1843 ··· 1854 1859 void 1855 1860 xfs_buf_terminate(void) 1856 1861 { 1857 - kmem_shake_deregister(xfs_buf_shake); 1862 + remove_shrinker(xfs_buf_shake); 1858 1863 destroy_workqueue(xfsdatad_workqueue); 1859 1864 destroy_workqueue(xfslogd_workqueue); 1860 1865 kmem_zone_destroy(xfs_buf_zone);
+1 -1
fs/xfs/linux-2.6/xfs_buf.h
··· 63 63 64 64 /* flags used only internally */ 65 65 _XBF_PAGE_CACHE = (1 << 17),/* backed by pagecache */ 66 - _XBF_KMEM_ALLOC = (1 << 18),/* backed by kmem_alloc() */ 66 + _XBF_PAGES = (1 << 18), /* backed by refcounted pages */ 67 67 _XBF_RUN_QUEUES = (1 << 19),/* run block device task queue */ 68 68 _XBF_DELWRI_Q = (1 << 21), /* buffer on delwri queue */ 69 69 } xfs_buf_flags_t;
-11
fs/xfs/linux-2.6/xfs_file.c
··· 184 184 } 185 185 186 186 STATIC int 187 - xfs_file_close( 188 - struct file *filp, 189 - fl_owner_t id) 190 - { 191 - return -bhv_vop_close(vn_from_inode(filp->f_path.dentry->d_inode), 0, 192 - file_count(filp) > 1 ? L_FALSE : L_TRUE, NULL); 193 - } 194 - 195 - STATIC int 196 187 xfs_file_release( 197 188 struct inode *inode, 198 189 struct file *filp) ··· 427 436 #endif 428 437 .mmap = xfs_file_mmap, 429 438 .open = xfs_file_open, 430 - .flush = xfs_file_close, 431 439 .release = xfs_file_release, 432 440 .fsync = xfs_file_fsync, 433 441 #ifdef HAVE_FOP_OPEN_EXEC ··· 448 458 #endif 449 459 .mmap = xfs_file_mmap, 450 460 .open = xfs_file_open, 451 - .flush = xfs_file_close, 452 461 .release = xfs_file_release, 453 462 .fsync = xfs_file_fsync, 454 463 };
+1
fs/xfs/linux-2.6/xfs_globals.c
··· 46 46 .inherit_nosym = { 0, 0, 1 }, 47 47 .rotorstep = { 1, 1, 255 }, 48 48 .inherit_nodfrg = { 0, 1, 1 }, 49 + .fstrm_timer = { 1, 50, 3600*100}, 49 50 }; 50 51 51 52 /*
+1 -1
fs/xfs/linux-2.6/xfs_ioctl.c
··· 1019 1019 1020 1020 if (cmd == XFS_IOC_FSINUMBERS) 1021 1021 error = xfs_inumbers(mp, &inlast, &count, 1022 - bulkreq.ubuffer); 1022 + bulkreq.ubuffer, xfs_inumbers_fmt); 1023 1023 else if (cmd == XFS_IOC_FSBULKSTAT_SINGLE) 1024 1024 error = xfs_bulkstat_single(mp, &inlast, 1025 1025 bulkreq.ubuffer, &done);
+293 -28
fs/xfs/linux-2.6/xfs_ioctl32.c
··· 23 23 #include <linux/fs.h> 24 24 #include <asm/uaccess.h> 25 25 #include "xfs.h" 26 - #include "xfs_types.h" 27 26 #include "xfs_fs.h" 27 + #include "xfs_bit.h" 28 + #include "xfs_log.h" 29 + #include "xfs_inum.h" 30 + #include "xfs_trans.h" 31 + #include "xfs_sb.h" 32 + #include "xfs_ag.h" 33 + #include "xfs_dir2.h" 34 + #include "xfs_dmapi.h" 35 + #include "xfs_mount.h" 36 + #include "xfs_bmap_btree.h" 37 + #include "xfs_attr_sf.h" 38 + #include "xfs_dir2_sf.h" 28 39 #include "xfs_vfs.h" 29 40 #include "xfs_vnode.h" 41 + #include "xfs_dinode.h" 42 + #include "xfs_inode.h" 43 + #include "xfs_itable.h" 44 + #include "xfs_error.h" 30 45 #include "xfs_dfrag.h" 31 46 32 47 #define _NATIVE_IOC(cmd, type) \ ··· 49 34 50 35 #if defined(CONFIG_IA64) || defined(CONFIG_X86_64) 51 36 #define BROKEN_X86_ALIGNMENT 37 + #define _PACKED __attribute__((packed)) 52 38 /* on ia32 l_start is on a 32-bit boundary */ 53 39 typedef struct xfs_flock64_32 { 54 40 __s16 l_type; ··· 91 75 return (unsigned long)p; 92 76 } 93 77 78 + typedef struct compat_xfs_fsop_geom_v1 { 79 + __u32 blocksize; /* filesystem (data) block size */ 80 + __u32 rtextsize; /* realtime extent size */ 81 + __u32 agblocks; /* fsblocks in an AG */ 82 + __u32 agcount; /* number of allocation groups */ 83 + __u32 logblocks; /* fsblocks in the log */ 84 + __u32 sectsize; /* (data) sector size, bytes */ 85 + __u32 inodesize; /* inode size in bytes */ 86 + __u32 imaxpct; /* max allowed inode space(%) */ 87 + __u64 datablocks; /* fsblocks in data subvolume */ 88 + __u64 rtblocks; /* fsblocks in realtime subvol */ 89 + __u64 rtextents; /* rt extents in realtime subvol*/ 90 + __u64 logstart; /* starting fsblock of the log */ 91 + unsigned char uuid[16]; /* unique id of the filesystem */ 92 + __u32 sunit; /* stripe unit, fsblocks */ 93 + __u32 swidth; /* stripe width, fsblocks */ 94 + __s32 version; /* structure version */ 95 + __u32 flags; /* superblock version flags */ 96 + __u32 logsectsize; /* log sector size, bytes */ 97 + __u32 rtsectsize; /* realtime sector size, bytes */ 98 + __u32 dirblocksize; /* directory block size, bytes */ 99 + } __attribute__((packed)) compat_xfs_fsop_geom_v1_t; 100 + 101 + #define XFS_IOC_FSGEOMETRY_V1_32 \ 102 + _IOR ('X', 100, struct compat_xfs_fsop_geom_v1) 103 + 104 + STATIC unsigned long xfs_ioctl32_geom_v1(unsigned long arg) 105 + { 106 + compat_xfs_fsop_geom_v1_t __user *p32 = (void __user *)arg; 107 + xfs_fsop_geom_v1_t __user *p = compat_alloc_user_space(sizeof(*p)); 108 + 109 + if (copy_in_user(p, p32, sizeof(*p32))) 110 + return -EFAULT; 111 + return (unsigned long)p; 112 + } 113 + 114 + typedef struct compat_xfs_inogrp { 115 + __u64 xi_startino; /* starting inode number */ 116 + __s32 xi_alloccount; /* # bits set in allocmask */ 117 + __u64 xi_allocmask; /* mask of allocated inodes */ 118 + } __attribute__((packed)) compat_xfs_inogrp_t; 119 + 120 + STATIC int xfs_inumbers_fmt_compat( 121 + void __user *ubuffer, 122 + const xfs_inogrp_t *buffer, 123 + long count, 124 + long *written) 125 + { 126 + compat_xfs_inogrp_t *p32 = ubuffer; 127 + long i; 128 + 129 + for (i = 0; i < count; i++) { 130 + if (put_user(buffer[i].xi_startino, &p32[i].xi_startino) || 131 + put_user(buffer[i].xi_alloccount, &p32[i].xi_alloccount) || 132 + put_user(buffer[i].xi_allocmask, &p32[i].xi_allocmask)) 133 + return -EFAULT; 134 + } 135 + *written = count * sizeof(*p32); 136 + return 0; 137 + } 138 + 94 139 #else 95 140 96 - typedef struct xfs_fsop_bulkreq32 { 141 + #define xfs_inumbers_fmt_compat xfs_inumbers_fmt 142 + #define _PACKED 143 + 144 + #endif 145 + 146 + /* XFS_IOC_FSBULKSTAT and friends */ 147 + 148 + typedef struct compat_xfs_bstime { 149 + __s32 tv_sec; /* seconds */ 150 + __s32 tv_nsec; /* and nanoseconds */ 151 + } compat_xfs_bstime_t; 152 + 153 + STATIC int xfs_bstime_store_compat( 154 + compat_xfs_bstime_t __user *p32, 155 + const xfs_bstime_t *p) 156 + { 157 + __s32 sec32; 158 + 159 + sec32 = p->tv_sec; 160 + if (put_user(sec32, &p32->tv_sec) || 161 + put_user(p->tv_nsec, &p32->tv_nsec)) 162 + return -EFAULT; 163 + return 0; 164 + } 165 + 166 + typedef struct compat_xfs_bstat { 167 + __u64 bs_ino; /* inode number */ 168 + __u16 bs_mode; /* type and mode */ 169 + __u16 bs_nlink; /* number of links */ 170 + __u32 bs_uid; /* user id */ 171 + __u32 bs_gid; /* group id */ 172 + __u32 bs_rdev; /* device value */ 173 + __s32 bs_blksize; /* block size */ 174 + __s64 bs_size; /* file size */ 175 + compat_xfs_bstime_t bs_atime; /* access time */ 176 + compat_xfs_bstime_t bs_mtime; /* modify time */ 177 + compat_xfs_bstime_t bs_ctime; /* inode change time */ 178 + int64_t bs_blocks; /* number of blocks */ 179 + __u32 bs_xflags; /* extended flags */ 180 + __s32 bs_extsize; /* extent size */ 181 + __s32 bs_extents; /* number of extents */ 182 + __u32 bs_gen; /* generation count */ 183 + __u16 bs_projid; /* project id */ 184 + unsigned char bs_pad[14]; /* pad space, unused */ 185 + __u32 bs_dmevmask; /* DMIG event mask */ 186 + __u16 bs_dmstate; /* DMIG state info */ 187 + __u16 bs_aextents; /* attribute number of extents */ 188 + } _PACKED compat_xfs_bstat_t; 189 + 190 + STATIC int xfs_bulkstat_one_fmt_compat( 191 + void __user *ubuffer, 192 + const xfs_bstat_t *buffer) 193 + { 194 + compat_xfs_bstat_t __user *p32 = ubuffer; 195 + 196 + if (put_user(buffer->bs_ino, &p32->bs_ino) || 197 + put_user(buffer->bs_mode, &p32->bs_mode) || 198 + put_user(buffer->bs_nlink, &p32->bs_nlink) || 199 + put_user(buffer->bs_uid, &p32->bs_uid) || 200 + put_user(buffer->bs_gid, &p32->bs_gid) || 201 + put_user(buffer->bs_rdev, &p32->bs_rdev) || 202 + put_user(buffer->bs_blksize, &p32->bs_blksize) || 203 + put_user(buffer->bs_size, &p32->bs_size) || 204 + xfs_bstime_store_compat(&p32->bs_atime, &buffer->bs_atime) || 205 + xfs_bstime_store_compat(&p32->bs_mtime, &buffer->bs_mtime) || 206 + xfs_bstime_store_compat(&p32->bs_ctime, &buffer->bs_ctime) || 207 + put_user(buffer->bs_blocks, &p32->bs_blocks) || 208 + put_user(buffer->bs_xflags, &p32->bs_xflags) || 209 + put_user(buffer->bs_extsize, &p32->bs_extsize) || 210 + put_user(buffer->bs_extents, &p32->bs_extents) || 211 + put_user(buffer->bs_gen, &p32->bs_gen) || 212 + put_user(buffer->bs_projid, &p32->bs_projid) || 213 + put_user(buffer->bs_dmevmask, &p32->bs_dmevmask) || 214 + put_user(buffer->bs_dmstate, &p32->bs_dmstate) || 215 + put_user(buffer->bs_aextents, &p32->bs_aextents)) 216 + return -EFAULT; 217 + return sizeof(*p32); 218 + } 219 + 220 + 221 + 222 + typedef struct compat_xfs_fsop_bulkreq { 97 223 compat_uptr_t lastip; /* last inode # pointer */ 98 224 __s32 icount; /* count of entries in buffer */ 99 225 compat_uptr_t ubuffer; /* user buffer for inode desc. */ 100 - __s32 ocount; /* output count pointer */ 101 - } xfs_fsop_bulkreq32_t; 226 + compat_uptr_t ocount; /* output count pointer */ 227 + } compat_xfs_fsop_bulkreq_t; 102 228 103 - STATIC unsigned long 104 - xfs_ioctl32_bulkstat( 105 - unsigned long arg) 229 + #define XFS_IOC_FSBULKSTAT_32 \ 230 + _IOWR('X', 101, struct compat_xfs_fsop_bulkreq) 231 + #define XFS_IOC_FSBULKSTAT_SINGLE_32 \ 232 + _IOWR('X', 102, struct compat_xfs_fsop_bulkreq) 233 + #define XFS_IOC_FSINUMBERS_32 \ 234 + _IOWR('X', 103, struct compat_xfs_fsop_bulkreq) 235 + 236 + /* copied from xfs_ioctl.c */ 237 + STATIC int 238 + xfs_ioc_bulkstat_compat( 239 + xfs_mount_t *mp, 240 + unsigned int cmd, 241 + void __user *arg) 106 242 { 107 - xfs_fsop_bulkreq32_t __user *p32 = (void __user *)arg; 108 - xfs_fsop_bulkreq_t __user *p = compat_alloc_user_space(sizeof(*p)); 243 + compat_xfs_fsop_bulkreq_t __user *p32 = (void __user *)arg; 109 244 u32 addr; 245 + xfs_fsop_bulkreq_t bulkreq; 246 + int count; /* # of records returned */ 247 + xfs_ino_t inlast; /* last inode number */ 248 + int done; 249 + int error; 110 250 111 - if (get_user(addr, &p32->lastip) || 112 - put_user(compat_ptr(addr), &p->lastip) || 113 - copy_in_user(&p->icount, &p32->icount, sizeof(s32)) || 114 - get_user(addr, &p32->ubuffer) || 115 - put_user(compat_ptr(addr), &p->ubuffer) || 116 - get_user(addr, &p32->ocount) || 117 - put_user(compat_ptr(addr), &p->ocount)) 251 + /* done = 1 if there are more stats to get and if bulkstat */ 252 + /* should be called again (unused here, but used in dmapi) */ 253 + 254 + if (!capable(CAP_SYS_ADMIN)) 255 + return -EPERM; 256 + 257 + if (XFS_FORCED_SHUTDOWN(mp)) 258 + return -XFS_ERROR(EIO); 259 + 260 + if (get_user(addr, &p32->lastip)) 261 + return -EFAULT; 262 + bulkreq.lastip = compat_ptr(addr); 263 + if (get_user(bulkreq.icount, &p32->icount) || 264 + get_user(addr, &p32->ubuffer)) 265 + return -EFAULT; 266 + bulkreq.ubuffer = compat_ptr(addr); 267 + if (get_user(addr, &p32->ocount)) 268 + return -EFAULT; 269 + bulkreq.ocount = compat_ptr(addr); 270 + 271 + if (copy_from_user(&inlast, bulkreq.lastip, sizeof(__s64))) 272 + return -XFS_ERROR(EFAULT); 273 + 274 + if ((count = bulkreq.icount) <= 0) 275 + return -XFS_ERROR(EINVAL); 276 + 277 + if (cmd == XFS_IOC_FSINUMBERS) 278 + error = xfs_inumbers(mp, &inlast, &count, 279 + bulkreq.ubuffer, xfs_inumbers_fmt_compat); 280 + else { 281 + /* declare a var to get a warning in case the type changes */ 282 + bulkstat_one_fmt_pf formatter = xfs_bulkstat_one_fmt_compat; 283 + error = xfs_bulkstat(mp, &inlast, &count, 284 + xfs_bulkstat_one, formatter, 285 + sizeof(compat_xfs_bstat_t), bulkreq.ubuffer, 286 + BULKSTAT_FG_QUICK, &done); 287 + } 288 + if (error) 289 + return -error; 290 + 291 + if (bulkreq.ocount != NULL) { 292 + if (copy_to_user(bulkreq.lastip, &inlast, 293 + sizeof(xfs_ino_t))) 294 + return -XFS_ERROR(EFAULT); 295 + 296 + if (copy_to_user(bulkreq.ocount, &count, sizeof(count))) 297 + return -XFS_ERROR(EFAULT); 298 + } 299 + 300 + return 0; 301 + } 302 + 303 + 304 + 305 + typedef struct compat_xfs_fsop_handlereq { 306 + __u32 fd; /* fd for FD_TO_HANDLE */ 307 + compat_uptr_t path; /* user pathname */ 308 + __u32 oflags; /* open flags */ 309 + compat_uptr_t ihandle; /* user supplied handle */ 310 + __u32 ihandlen; /* user supplied length */ 311 + compat_uptr_t ohandle; /* user buffer for handle */ 312 + compat_uptr_t ohandlen; /* user buffer length */ 313 + } compat_xfs_fsop_handlereq_t; 314 + 315 + #define XFS_IOC_PATH_TO_FSHANDLE_32 \ 316 + _IOWR('X', 104, struct compat_xfs_fsop_handlereq) 317 + #define XFS_IOC_PATH_TO_HANDLE_32 \ 318 + _IOWR('X', 105, struct compat_xfs_fsop_handlereq) 319 + #define XFS_IOC_FD_TO_HANDLE_32 \ 320 + _IOWR('X', 106, struct compat_xfs_fsop_handlereq) 321 + #define XFS_IOC_OPEN_BY_HANDLE_32 \ 322 + _IOWR('X', 107, struct compat_xfs_fsop_handlereq) 323 + #define XFS_IOC_READLINK_BY_HANDLE_32 \ 324 + _IOWR('X', 108, struct compat_xfs_fsop_handlereq) 325 + 326 + STATIC unsigned long xfs_ioctl32_fshandle(unsigned long arg) 327 + { 328 + compat_xfs_fsop_handlereq_t __user *p32 = (void __user *)arg; 329 + xfs_fsop_handlereq_t __user *p = compat_alloc_user_space(sizeof(*p)); 330 + u32 addr; 331 + 332 + if (copy_in_user(&p->fd, &p32->fd, sizeof(__u32)) || 333 + get_user(addr, &p32->path) || 334 + put_user(compat_ptr(addr), &p->path) || 335 + copy_in_user(&p->oflags, &p32->oflags, sizeof(__u32)) || 336 + get_user(addr, &p32->ihandle) || 337 + put_user(compat_ptr(addr), &p->ihandle) || 338 + copy_in_user(&p->ihandlen, &p32->ihandlen, sizeof(__u32)) || 339 + get_user(addr, &p32->ohandle) || 340 + put_user(compat_ptr(addr), &p->ohandle) || 341 + get_user(addr, &p32->ohandlen) || 342 + put_user(compat_ptr(addr), &p->ohandlen)) 118 343 return -EFAULT; 119 344 120 345 return (unsigned long)p; 121 346 } 122 - #endif 347 + 123 348 124 349 STATIC long 125 350 xfs_compat_ioctl( ··· 375 118 376 119 switch (cmd) { 377 120 case XFS_IOC_DIOINFO: 378 - case XFS_IOC_FSGEOMETRY_V1: 379 121 case XFS_IOC_FSGEOMETRY: 380 122 case XFS_IOC_GETVERSION: 381 123 case XFS_IOC_GETXFLAGS: ··· 387 131 case XFS_IOC_GETBMAPA: 388 132 case XFS_IOC_GETBMAPX: 389 133 /* not handled 390 - case XFS_IOC_FD_TO_HANDLE: 391 - case XFS_IOC_PATH_TO_HANDLE: 392 - case XFS_IOC_PATH_TO_FSHANDLE: 393 - case XFS_IOC_OPEN_BY_HANDLE: 394 134 case XFS_IOC_FSSETDM_BY_HANDLE: 395 - case XFS_IOC_READLINK_BY_HANDLE: 396 135 case XFS_IOC_ATTRLIST_BY_HANDLE: 397 136 case XFS_IOC_ATTRMULTI_BY_HANDLE: 398 137 */ ··· 417 166 arg = xfs_ioctl32_flock(arg); 418 167 cmd = _NATIVE_IOC(cmd, struct xfs_flock64); 419 168 break; 169 + case XFS_IOC_FSGEOMETRY_V1_32: 170 + arg = xfs_ioctl32_geom_v1(arg); 171 + cmd = _NATIVE_IOC(cmd, struct xfs_fsop_geom_v1); 172 + break; 420 173 421 174 #else /* These are handled fine if no alignment issues */ 422 175 case XFS_IOC_ALLOCSP: ··· 431 176 case XFS_IOC_FREESP64: 432 177 case XFS_IOC_RESVSP64: 433 178 case XFS_IOC_UNRESVSP64: 179 + case XFS_IOC_FSGEOMETRY_V1: 434 180 break; 435 181 436 182 /* xfs_bstat_t still has wrong u32 vs u64 alignment */ 437 183 case XFS_IOC_SWAPEXT: 438 184 break; 439 185 440 - case XFS_IOC_FSBULKSTAT_SINGLE: 441 - case XFS_IOC_FSBULKSTAT: 442 - case XFS_IOC_FSINUMBERS: 443 - arg = xfs_ioctl32_bulkstat(arg); 444 - break; 445 186 #endif 187 + case XFS_IOC_FSBULKSTAT_32: 188 + case XFS_IOC_FSBULKSTAT_SINGLE_32: 189 + case XFS_IOC_FSINUMBERS_32: 190 + cmd = _NATIVE_IOC(cmd, struct xfs_fsop_bulkreq); 191 + return xfs_ioc_bulkstat_compat(XFS_BHVTOI(VNHEAD(vp))->i_mount, 192 + cmd, (void*)arg); 193 + case XFS_IOC_FD_TO_HANDLE_32: 194 + case XFS_IOC_PATH_TO_HANDLE_32: 195 + case XFS_IOC_PATH_TO_FSHANDLE_32: 196 + case XFS_IOC_OPEN_BY_HANDLE_32: 197 + case XFS_IOC_READLINK_BY_HANDLE_32: 198 + arg = xfs_ioctl32_fshandle(arg); 199 + cmd = _NATIVE_IOC(cmd, struct xfs_fsop_handlereq); 200 + break; 446 201 default: 447 202 return -ENOIOCTLCMD; 448 203 }
+1
fs/xfs/linux-2.6/xfs_linux.h
··· 123 123 #define xfs_inherit_nosymlinks xfs_params.inherit_nosym.val 124 124 #define xfs_rotorstep xfs_params.rotorstep.val 125 125 #define xfs_inherit_nodefrag xfs_params.inherit_nodfrg.val 126 + #define xfs_fstrm_centisecs xfs_params.fstrm_timer.val 126 127 127 128 #define current_cpu() (raw_smp_processor_id()) 128 129 #define current_pid() (current->pid)
+3 -2
fs/xfs/linux-2.6/xfs_super.c
··· 547 547 548 548 if (!(vfsp->vfs_flag & VFS_RDONLY)) 549 549 error = bhv_vfs_sync(vfsp, SYNC_FSDATA | SYNC_BDFLUSH | \ 550 - SYNC_ATTR | SYNC_REFCACHE, NULL); 550 + SYNC_ATTR | SYNC_REFCACHE | SYNC_SUPER, 551 + NULL); 551 552 vfsp->vfs_sync_seq++; 552 553 wake_up(&vfsp->vfs_wait_single_sync_task); 553 554 } ··· 664 663 * occur here so don't bother flushing the buftarg (i.e 665 664 * SYNC_QUIESCE) because it'll just get dirty again. 666 665 */ 667 - flags = SYNC_FSDATA | SYNC_DELWRI | SYNC_WAIT | SYNC_IOWAIT; 666 + flags = SYNC_DATA_QUIESCE; 668 667 } else 669 668 flags = SYNC_FSDATA | (wait ? SYNC_WAIT : 0); 670 669
+11
fs/xfs/linux-2.6/xfs_sysctl.c
··· 210 210 .extra1 = &xfs_params.inherit_nodfrg.min, 211 211 .extra2 = &xfs_params.inherit_nodfrg.max 212 212 }, 213 + { 214 + .ctl_name = XFS_FILESTREAM_TIMER, 215 + .procname = "filestream_centisecs", 216 + .data = &xfs_params.fstrm_timer.val, 217 + .maxlen = sizeof(int), 218 + .mode = 0644, 219 + .proc_handler = &proc_dointvec_minmax, 220 + .strategy = &sysctl_intvec, 221 + .extra1 = &xfs_params.fstrm_timer.min, 222 + .extra2 = &xfs_params.fstrm_timer.max, 223 + }, 213 224 /* please keep this the last entry */ 214 225 #ifdef CONFIG_PROC_FS 215 226 {
+2
fs/xfs/linux-2.6/xfs_sysctl.h
··· 47 47 xfs_sysctl_val_t inherit_nosym; /* Inherit the "nosymlinks" flag. */ 48 48 xfs_sysctl_val_t rotorstep; /* inode32 AG rotoring control knob */ 49 49 xfs_sysctl_val_t inherit_nodfrg;/* Inherit the "nodefrag" inode flag. */ 50 + xfs_sysctl_val_t fstrm_timer; /* Filestream dir-AG assoc'n timeout. */ 50 51 } xfs_param_t; 51 52 52 53 /* ··· 87 86 XFS_INHERIT_NOSYM = 19, 88 87 XFS_ROTORSTEP = 20, 89 88 XFS_INHERIT_NODFRG = 21, 89 + XFS_FILESTREAM_TIMER = 22, 90 90 }; 91 91 92 92 extern xfs_param_t xfs_params;
+15
fs/xfs/linux-2.6/xfs_vfs.h
··· 92 92 #define SYNC_REFCACHE 0x0040 /* prune some of the nfs ref cache */ 93 93 #define SYNC_REMOUNT 0x0080 /* remount readonly, no dummy LRs */ 94 94 #define SYNC_IOWAIT 0x0100 /* wait for all I/O to complete */ 95 + #define SYNC_SUPER 0x0200 /* flush superblock to disk */ 96 + 97 + /* 98 + * When remounting a filesystem read-only or freezing the filesystem, 99 + * we have two phases to execute. This first phase is syncing the data 100 + * before we quiesce the fielsystem, and the second is flushing all the 101 + * inodes out after we've waited for all the transactions created by 102 + * the first phase to complete. The second phase uses SYNC_INODE_QUIESCE 103 + * to ensure that the inodes are written to their location on disk 104 + * rather than just existing in transactions in the log. This means 105 + * after a quiesce there is no log replay required to write the inodes 106 + * to disk (this is the main difference between a sync and a quiesce). 107 + */ 108 + #define SYNC_DATA_QUIESCE (SYNC_DELWRI|SYNC_FSDATA|SYNC_WAIT|SYNC_IOWAIT) 109 + #define SYNC_INODE_QUIESCE (SYNC_REMOUNT|SYNC_ATTR|SYNC_WAIT) 95 110 96 111 #define SHUTDOWN_META_IO_ERROR 0x0001 /* write attempt to metadata failed */ 97 112 #define SHUTDOWN_LOG_IO_ERROR 0x0002 /* write attempt to the log failed */
-5
fs/xfs/linux-2.6/xfs_vnode.h
··· 129 129 VCHANGE_FLAGS_IOEXCL_COUNT = 4 130 130 } bhv_vchange_t; 131 131 132 - typedef enum { L_FALSE, L_TRUE } lastclose_t; 133 - 134 132 typedef int (*vop_open_t)(bhv_desc_t *, struct cred *); 135 - typedef int (*vop_close_t)(bhv_desc_t *, int, lastclose_t, struct cred *); 136 133 typedef ssize_t (*vop_read_t)(bhv_desc_t *, struct kiocb *, 137 134 const struct iovec *, unsigned int, 138 135 loff_t *, int, struct cred *); ··· 197 200 typedef struct bhv_vnodeops { 198 201 bhv_position_t vn_position; /* position within behavior chain */ 199 202 vop_open_t vop_open; 200 - vop_close_t vop_close; 201 203 vop_read_t vop_read; 202 204 vop_write_t vop_write; 203 205 vop_splice_read_t vop_splice_read; ··· 241 245 #define VNHEAD(vp) ((vp)->v_bh.bh_first) 242 246 #define VOP(op, vp) (*((bhv_vnodeops_t *)VNHEAD(vp)->bd_ops)->op) 243 247 #define bhv_vop_open(vp, cr) VOP(vop_open, vp)(VNHEAD(vp),cr) 244 - #define bhv_vop_close(vp, f,last,cr) VOP(vop_close, vp)(VNHEAD(vp),f,last,cr) 245 248 #define bhv_vop_read(vp,file,iov,segs,offset,ioflags,cr) \ 246 249 VOP(vop_read, vp)(VNHEAD(vp),file,iov,segs,offset,ioflags,cr) 247 250 #define bhv_vop_write(vp,file,iov,segs,offset,ioflags,cr) \
+4 -5
fs/xfs/quota/xfs_qm.c
··· 62 62 63 63 kmem_zone_t *qm_dqzone; 64 64 kmem_zone_t *qm_dqtrxzone; 65 - static kmem_shaker_t xfs_qm_shaker; 65 + static struct shrinker *xfs_qm_shaker; 66 66 67 67 static cred_t xfs_zerocr; 68 - static xfs_inode_t xfs_zeroino; 69 68 70 69 STATIC void xfs_qm_list_init(xfs_dqlist_t *, char *, int); 71 70 STATIC void xfs_qm_list_destroy(xfs_dqlist_t *); ··· 149 150 } else 150 151 xqm->qm_dqzone = qm_dqzone; 151 152 152 - xfs_qm_shaker = kmem_shake_register(xfs_qm_shake); 153 + xfs_qm_shaker = set_shrinker(DEFAULT_SEEKS, xfs_qm_shake); 153 154 154 155 /* 155 156 * The t_dqinfo portion of transactions. ··· 181 182 182 183 ASSERT(xqm != NULL); 183 184 ASSERT(xqm->qm_nrefs == 0); 184 - kmem_shake_deregister(xfs_qm_shaker); 185 + remove_shrinker(xfs_qm_shaker); 185 186 hsize = xqm->qm_dqhashmask + 1; 186 187 for (i = 0; i < hsize; i++) { 187 188 xfs_qm_list_destroy(&(xqm->qm_usr_dqhtable[i])); ··· 1414 1415 return error; 1415 1416 } 1416 1417 1417 - if ((error = xfs_dir_ialloc(&tp, &xfs_zeroino, S_IFREG, 1, 0, 1418 + if ((error = xfs_dir_ialloc(&tp, NULL, S_IFREG, 1, 0, 1418 1419 &xfs_zerocr, 0, 1, ip, &committed))) { 1419 1420 xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | 1420 1421 XFS_TRANS_ABORT);
+1
fs/xfs/xfs.h
··· 38 38 #define XFS_RW_TRACE 1 39 39 #define XFS_BUF_TRACE 1 40 40 #define XFS_VNODE_TRACE 1 41 + #define XFS_FILESTREAMS_TRACE 1 41 42 #endif 42 43 43 44 #include <linux-2.6/xfs_linux.h>
+7 -2
fs/xfs/xfs_ag.h
··· 68 68 __be32 agf_flcount; /* count of blocks in freelist */ 69 69 __be32 agf_freeblks; /* total free blocks */ 70 70 __be32 agf_longest; /* longest free space */ 71 + __be32 agf_btreeblks; /* # of blocks held in AGF btrees */ 71 72 } xfs_agf_t; 72 73 73 74 #define XFS_AGF_MAGICNUM 0x00000001 ··· 82 81 #define XFS_AGF_FLCOUNT 0x00000100 83 82 #define XFS_AGF_FREEBLKS 0x00000200 84 83 #define XFS_AGF_LONGEST 0x00000400 85 - #define XFS_AGF_NUM_BITS 11 84 + #define XFS_AGF_BTREEBLKS 0x00000800 85 + #define XFS_AGF_NUM_BITS 12 86 86 #define XFS_AGF_ALL_BITS ((1 << XFS_AGF_NUM_BITS) - 1) 87 87 88 88 /* disk block (xfs_daddr_t) in the AG */ ··· 188 186 __uint32_t pagf_flcount; /* count of blocks in freelist */ 189 187 xfs_extlen_t pagf_freeblks; /* total free blocks */ 190 188 xfs_extlen_t pagf_longest; /* longest free space */ 189 + __uint32_t pagf_btreeblks; /* # of blocks held in AGF btrees */ 191 190 xfs_agino_t pagi_freecount; /* number of free inodes */ 191 + xfs_agino_t pagi_count; /* number of allocated inodes */ 192 + int pagb_count; /* pagb slots in use */ 192 193 #ifdef __KERNEL__ 193 194 lock_t pagb_lock; /* lock for pagb_list */ 194 195 #endif 195 - int pagb_count; /* pagb slots in use */ 196 196 xfs_perag_busy_t *pagb_list; /* unstable blocks */ 197 + atomic_t pagf_fstrms; /* # of filestreams active in this AG */ 197 198 } xfs_perag_t; 198 199 199 200 #define XFS_AG_MAXLEVELS(mp) ((mp)->m_ag_maxlevels)
+51 -50
fs/xfs/xfs_alloc.c
··· 55 55 ktrace_t *xfs_alloc_trace_buf; 56 56 57 57 #define TRACE_ALLOC(s,a) \ 58 - xfs_alloc_trace_alloc(fname, s, a, __LINE__) 58 + xfs_alloc_trace_alloc(__FUNCTION__, s, a, __LINE__) 59 59 #define TRACE_FREE(s,a,b,x,f) \ 60 - xfs_alloc_trace_free(fname, s, mp, a, b, x, f, __LINE__) 60 + xfs_alloc_trace_free(__FUNCTION__, s, mp, a, b, x, f, __LINE__) 61 61 #define TRACE_MODAGF(s,a,f) \ 62 - xfs_alloc_trace_modagf(fname, s, mp, a, f, __LINE__) 63 - #define TRACE_BUSY(fname,s,ag,agb,l,sl,tp) \ 64 - xfs_alloc_trace_busy(fname, s, mp, ag, agb, l, sl, tp, XFS_ALLOC_KTRACE_BUSY, __LINE__) 65 - #define TRACE_UNBUSY(fname,s,ag,sl,tp) \ 66 - xfs_alloc_trace_busy(fname, s, mp, ag, -1, -1, sl, tp, XFS_ALLOC_KTRACE_UNBUSY, __LINE__) 67 - #define TRACE_BUSYSEARCH(fname,s,ag,agb,l,sl,tp) \ 68 - xfs_alloc_trace_busy(fname, s, mp, ag, agb, l, sl, tp, XFS_ALLOC_KTRACE_BUSYSEARCH, __LINE__) 62 + xfs_alloc_trace_modagf(__FUNCTION__, s, mp, a, f, __LINE__) 63 + #define TRACE_BUSY(__FUNCTION__,s,ag,agb,l,sl,tp) \ 64 + xfs_alloc_trace_busy(__FUNCTION__, s, mp, ag, agb, l, sl, tp, XFS_ALLOC_KTRACE_BUSY, __LINE__) 65 + #define TRACE_UNBUSY(__FUNCTION__,s,ag,sl,tp) \ 66 + xfs_alloc_trace_busy(__FUNCTION__, s, mp, ag, -1, -1, sl, tp, XFS_ALLOC_KTRACE_UNBUSY, __LINE__) 67 + #define TRACE_BUSYSEARCH(__FUNCTION__,s,ag,agb,l,sl,tp) \ 68 + xfs_alloc_trace_busy(__FUNCTION__, s, mp, ag, agb, l, sl, tp, XFS_ALLOC_KTRACE_BUSYSEARCH, __LINE__) 69 69 #else 70 70 #define TRACE_ALLOC(s,a) 71 71 #define TRACE_FREE(s,a,b,x,f) ··· 420 420 */ 421 421 STATIC void 422 422 xfs_alloc_trace_alloc( 423 - char *name, /* function tag string */ 423 + const char *name, /* function tag string */ 424 424 char *str, /* additional string */ 425 425 xfs_alloc_arg_t *args, /* allocation argument structure */ 426 426 int line) /* source line number */ ··· 453 453 */ 454 454 STATIC void 455 455 xfs_alloc_trace_free( 456 - char *name, /* function tag string */ 456 + const char *name, /* function tag string */ 457 457 char *str, /* additional string */ 458 458 xfs_mount_t *mp, /* file system mount point */ 459 459 xfs_agnumber_t agno, /* allocation group number */ ··· 479 479 */ 480 480 STATIC void 481 481 xfs_alloc_trace_modagf( 482 - char *name, /* function tag string */ 482 + const char *name, /* function tag string */ 483 483 char *str, /* additional string */ 484 484 xfs_mount_t *mp, /* file system mount point */ 485 485 xfs_agf_t *agf, /* new agf value */ ··· 507 507 508 508 STATIC void 509 509 xfs_alloc_trace_busy( 510 - char *name, /* function tag string */ 510 + const char *name, /* function tag string */ 511 511 char *str, /* additional string */ 512 512 xfs_mount_t *mp, /* file system mount point */ 513 513 xfs_agnumber_t agno, /* allocation group number */ ··· 549 549 xfs_alloc_arg_t *args) /* argument structure for allocation */ 550 550 { 551 551 int error=0; 552 - #ifdef XFS_ALLOC_TRACE 553 - static char fname[] = "xfs_alloc_ag_vextent"; 554 - #endif 555 552 556 553 ASSERT(args->minlen > 0); 557 554 ASSERT(args->maxlen > 0); ··· 632 635 xfs_agblock_t fbno; /* start block of found extent */ 633 636 xfs_agblock_t fend; /* end block of found extent */ 634 637 xfs_extlen_t flen; /* length of found extent */ 635 - #ifdef XFS_ALLOC_TRACE 636 - static char fname[] = "xfs_alloc_ag_vextent_exact"; 637 - #endif 638 638 int i; /* success/failure of operation */ 639 639 xfs_agblock_t maxend; /* end of maximal extent */ 640 640 xfs_agblock_t minend; /* end of minimal extent */ ··· 731 737 xfs_btree_cur_t *bno_cur_gt; /* cursor for bno btree, right side */ 732 738 xfs_btree_cur_t *bno_cur_lt; /* cursor for bno btree, left side */ 733 739 xfs_btree_cur_t *cnt_cur; /* cursor for count btree */ 734 - #ifdef XFS_ALLOC_TRACE 735 - static char fname[] = "xfs_alloc_ag_vextent_near"; 736 - #endif 737 740 xfs_agblock_t gtbno; /* start bno of right side entry */ 738 741 xfs_agblock_t gtbnoa; /* aligned ... */ 739 742 xfs_extlen_t gtdiff; /* difference to right side entry */ ··· 1261 1270 int error; /* error result */ 1262 1271 xfs_agblock_t fbno; /* start of found freespace */ 1263 1272 xfs_extlen_t flen; /* length of found freespace */ 1264 - #ifdef XFS_ALLOC_TRACE 1265 - static char fname[] = "xfs_alloc_ag_vextent_size"; 1266 - #endif 1267 1273 int i; /* temp status variable */ 1268 1274 xfs_agblock_t rbno; /* returned block number */ 1269 1275 xfs_extlen_t rlen; /* length of returned extent */ ··· 1415 1427 int error; 1416 1428 xfs_agblock_t fbno; 1417 1429 xfs_extlen_t flen; 1418 - #ifdef XFS_ALLOC_TRACE 1419 - static char fname[] = "xfs_alloc_ag_vextent_small"; 1420 - #endif 1421 1430 int i; 1422 1431 1423 1432 if ((error = xfs_alloc_decrement(ccur, 0, &i))) ··· 1432 1447 else if (args->minlen == 1 && args->alignment == 1 && !args->isfl && 1433 1448 (be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_flcount) 1434 1449 > args->minleft)) { 1435 - if ((error = xfs_alloc_get_freelist(args->tp, args->agbp, &fbno))) 1450 + error = xfs_alloc_get_freelist(args->tp, args->agbp, &fbno, 0); 1451 + if (error) 1436 1452 goto error0; 1437 1453 if (fbno != NULLAGBLOCK) { 1438 1454 if (args->userdata) { ··· 1501 1515 xfs_btree_cur_t *bno_cur; /* cursor for by-block btree */ 1502 1516 xfs_btree_cur_t *cnt_cur; /* cursor for by-size btree */ 1503 1517 int error; /* error return value */ 1504 - #ifdef XFS_ALLOC_TRACE 1505 - static char fname[] = "xfs_free_ag_extent"; 1506 - #endif 1507 1518 xfs_agblock_t gtbno; /* start of right neighbor block */ 1508 1519 xfs_extlen_t gtlen; /* length of right neighbor block */ 1509 1520 int haveleft; /* have a left neighbor block */ ··· 1906 1923 while (be32_to_cpu(agf->agf_flcount) > need) { 1907 1924 xfs_buf_t *bp; 1908 1925 1909 - if ((error = xfs_alloc_get_freelist(tp, agbp, &bno))) 1926 + error = xfs_alloc_get_freelist(tp, agbp, &bno, 0); 1927 + if (error) 1910 1928 return error; 1911 1929 if ((error = xfs_free_ag_extent(tp, agbp, args->agno, bno, 1, 1))) 1912 1930 return error; ··· 1957 1973 * Put each allocated block on the list. 1958 1974 */ 1959 1975 for (bno = targs.agbno; bno < targs.agbno + targs.len; bno++) { 1960 - if ((error = xfs_alloc_put_freelist(tp, agbp, agflbp, 1961 - bno))) 1976 + error = xfs_alloc_put_freelist(tp, agbp, 1977 + agflbp, bno, 0); 1978 + if (error) 1962 1979 return error; 1963 1980 } 1964 1981 } ··· 1976 1991 xfs_alloc_get_freelist( 1977 1992 xfs_trans_t *tp, /* transaction pointer */ 1978 1993 xfs_buf_t *agbp, /* buffer containing the agf structure */ 1979 - xfs_agblock_t *bnop) /* block address retrieved from freelist */ 1994 + xfs_agblock_t *bnop, /* block address retrieved from freelist */ 1995 + int btreeblk) /* destination is a AGF btree */ 1980 1996 { 1981 1997 xfs_agf_t *agf; /* a.g. freespace structure */ 1982 1998 xfs_agfl_t *agfl; /* a.g. freelist structure */ 1983 1999 xfs_buf_t *agflbp;/* buffer for a.g. freelist structure */ 1984 2000 xfs_agblock_t bno; /* block number returned */ 1985 2001 int error; 1986 - #ifdef XFS_ALLOC_TRACE 1987 - static char fname[] = "xfs_alloc_get_freelist"; 1988 - #endif 2002 + int logflags; 1989 2003 xfs_mount_t *mp; /* mount structure */ 1990 2004 xfs_perag_t *pag; /* per allocation group data */ 1991 2005 ··· 2016 2032 be32_add(&agf->agf_flcount, -1); 2017 2033 xfs_trans_agflist_delta(tp, -1); 2018 2034 pag->pagf_flcount--; 2019 - TRACE_MODAGF(NULL, agf, XFS_AGF_FLFIRST | XFS_AGF_FLCOUNT); 2020 - xfs_alloc_log_agf(tp, agbp, XFS_AGF_FLFIRST | XFS_AGF_FLCOUNT); 2035 + 2036 + logflags = XFS_AGF_FLFIRST | XFS_AGF_FLCOUNT; 2037 + if (btreeblk) { 2038 + be32_add(&agf->agf_btreeblks, 1); 2039 + pag->pagf_btreeblks++; 2040 + logflags |= XFS_AGF_BTREEBLKS; 2041 + } 2042 + 2043 + TRACE_MODAGF(NULL, agf, logflags); 2044 + xfs_alloc_log_agf(tp, agbp, logflags); 2021 2045 *bnop = bno; 2022 2046 2023 2047 /* ··· 2063 2071 offsetof(xfs_agf_t, agf_flcount), 2064 2072 offsetof(xfs_agf_t, agf_freeblks), 2065 2073 offsetof(xfs_agf_t, agf_longest), 2074 + offsetof(xfs_agf_t, agf_btreeblks), 2066 2075 sizeof(xfs_agf_t) 2067 2076 }; 2068 2077 ··· 2099 2106 xfs_trans_t *tp, /* transaction pointer */ 2100 2107 xfs_buf_t *agbp, /* buffer for a.g. freelist header */ 2101 2108 xfs_buf_t *agflbp,/* buffer for a.g. free block array */ 2102 - xfs_agblock_t bno) /* block being freed */ 2109 + xfs_agblock_t bno, /* block being freed */ 2110 + int btreeblk) /* block came from a AGF btree */ 2103 2111 { 2104 2112 xfs_agf_t *agf; /* a.g. freespace structure */ 2105 2113 xfs_agfl_t *agfl; /* a.g. free block array */ 2106 2114 __be32 *blockp;/* pointer to array entry */ 2107 2115 int error; 2108 - #ifdef XFS_ALLOC_TRACE 2109 - static char fname[] = "xfs_alloc_put_freelist"; 2110 - #endif 2116 + int logflags; 2111 2117 xfs_mount_t *mp; /* mount structure */ 2112 2118 xfs_perag_t *pag; /* per allocation group data */ 2113 2119 ··· 2124 2132 be32_add(&agf->agf_flcount, 1); 2125 2133 xfs_trans_agflist_delta(tp, 1); 2126 2134 pag->pagf_flcount++; 2135 + 2136 + logflags = XFS_AGF_FLLAST | XFS_AGF_FLCOUNT; 2137 + if (btreeblk) { 2138 + be32_add(&agf->agf_btreeblks, -1); 2139 + pag->pagf_btreeblks--; 2140 + logflags |= XFS_AGF_BTREEBLKS; 2141 + } 2142 + 2143 + TRACE_MODAGF(NULL, agf, logflags); 2144 + xfs_alloc_log_agf(tp, agbp, logflags); 2145 + 2127 2146 ASSERT(be32_to_cpu(agf->agf_flcount) <= XFS_AGFL_SIZE(mp)); 2128 2147 blockp = &agfl->agfl_bno[be32_to_cpu(agf->agf_fllast)]; 2129 2148 *blockp = cpu_to_be32(bno); 2130 - TRACE_MODAGF(NULL, agf, XFS_AGF_FLLAST | XFS_AGF_FLCOUNT); 2131 - xfs_alloc_log_agf(tp, agbp, XFS_AGF_FLLAST | XFS_AGF_FLCOUNT); 2149 + TRACE_MODAGF(NULL, agf, logflags); 2150 + xfs_alloc_log_agf(tp, agbp, logflags); 2132 2151 xfs_trans_log_buf(tp, agflbp, 2133 2152 (int)((xfs_caddr_t)blockp - (xfs_caddr_t)agfl), 2134 2153 (int)((xfs_caddr_t)blockp - (xfs_caddr_t)agfl + ··· 2199 2196 pag = &mp->m_perag[agno]; 2200 2197 if (!pag->pagf_init) { 2201 2198 pag->pagf_freeblks = be32_to_cpu(agf->agf_freeblks); 2199 + pag->pagf_btreeblks = be32_to_cpu(agf->agf_btreeblks); 2202 2200 pag->pagf_flcount = be32_to_cpu(agf->agf_flcount); 2203 2201 pag->pagf_longest = be32_to_cpu(agf->agf_longest); 2204 2202 pag->pagf_levels[XFS_BTNUM_BNOi] = ··· 2239 2235 xfs_agblock_t agsize; /* allocation group size */ 2240 2236 int error; 2241 2237 int flags; /* XFS_ALLOC_FLAG_... locking flags */ 2242 - #ifdef XFS_ALLOC_TRACE 2243 - static char fname[] = "xfs_alloc_vextent"; 2244 - #endif 2245 2238 xfs_extlen_t minleft;/* minimum left value, temp copy */ 2246 2239 xfs_mount_t *mp; /* mount structure pointer */ 2247 2240 xfs_agnumber_t sagno; /* starting allocation group number */
+4 -2
fs/xfs/xfs_alloc.h
··· 136 136 xfs_alloc_get_freelist( 137 137 struct xfs_trans *tp, /* transaction pointer */ 138 138 struct xfs_buf *agbp, /* buffer containing the agf structure */ 139 - xfs_agblock_t *bnop); /* block address retrieved from freelist */ 139 + xfs_agblock_t *bnop, /* block address retrieved from freelist */ 140 + int btreeblk); /* destination is a AGF btree */ 140 141 141 142 /* 142 143 * Log the given fields from the agf structure. ··· 166 165 struct xfs_trans *tp, /* transaction pointer */ 167 166 struct xfs_buf *agbp, /* buffer for a.g. freelist header */ 168 167 struct xfs_buf *agflbp,/* buffer for a.g. free block array */ 169 - xfs_agblock_t bno); /* block being freed */ 168 + xfs_agblock_t bno, /* block being freed */ 169 + int btreeblk); /* owner was a AGF btree */ 170 170 171 171 /* 172 172 * Read in the allocation group header (free/alloc section).
+12 -8
fs/xfs/xfs_alloc_btree.c
··· 226 226 /* 227 227 * Put this buffer/block on the ag's freelist. 228 228 */ 229 - if ((error = xfs_alloc_put_freelist(cur->bc_tp, 230 - cur->bc_private.a.agbp, NULL, bno))) 229 + error = xfs_alloc_put_freelist(cur->bc_tp, 230 + cur->bc_private.a.agbp, NULL, bno, 1); 231 + if (error) 231 232 return error; 232 233 /* 233 234 * Since blocks move to the free list without the ··· 550 549 /* 551 550 * Free the deleting block by putting it on the freelist. 552 551 */ 553 - if ((error = xfs_alloc_put_freelist(cur->bc_tp, cur->bc_private.a.agbp, 554 - NULL, rbno))) 552 + error = xfs_alloc_put_freelist(cur->bc_tp, 553 + cur->bc_private.a.agbp, NULL, rbno, 1); 554 + if (error) 555 555 return error; 556 556 /* 557 557 * Since blocks move to the free list without the coordination ··· 1322 1320 /* 1323 1321 * Get a buffer from the freelist blocks, for the new root. 1324 1322 */ 1325 - if ((error = xfs_alloc_get_freelist(cur->bc_tp, cur->bc_private.a.agbp, 1326 - &nbno))) 1323 + error = xfs_alloc_get_freelist(cur->bc_tp, 1324 + cur->bc_private.a.agbp, &nbno, 1); 1325 + if (error) 1327 1326 return error; 1328 1327 /* 1329 1328 * None available, we fail. ··· 1607 1604 * Allocate the new block from the freelist. 1608 1605 * If we can't do it, we're toast. Give up. 1609 1606 */ 1610 - if ((error = xfs_alloc_get_freelist(cur->bc_tp, cur->bc_private.a.agbp, 1611 - &rbno))) 1607 + error = xfs_alloc_get_freelist(cur->bc_tp, 1608 + cur->bc_private.a.agbp, &rbno, 1); 1609 + if (error) 1612 1610 return error; 1613 1611 if (rbno == NULLAGBLOCK) { 1614 1612 *stat = 0;
+9 -82
fs/xfs/xfs_bit.c
··· 66 66 #endif 67 67 68 68 /* 69 - * Count of bits set in byte, 0..8. 70 - */ 71 - static const char xfs_countbit[256] = { 72 - 0, 1, 1, 2, 1, 2, 2, 3, /* 00 .. 07 */ 73 - 1, 2, 2, 3, 2, 3, 3, 4, /* 08 .. 0f */ 74 - 1, 2, 2, 3, 2, 3, 3, 4, /* 10 .. 17 */ 75 - 2, 3, 3, 4, 3, 4, 4, 5, /* 18 .. 1f */ 76 - 1, 2, 2, 3, 2, 3, 3, 4, /* 20 .. 27 */ 77 - 2, 3, 3, 4, 3, 4, 4, 5, /* 28 .. 2f */ 78 - 2, 3, 3, 4, 3, 4, 4, 5, /* 30 .. 37 */ 79 - 3, 4, 4, 5, 4, 5, 5, 6, /* 38 .. 3f */ 80 - 1, 2, 2, 3, 2, 3, 3, 4, /* 40 .. 47 */ 81 - 2, 3, 3, 4, 3, 4, 4, 5, /* 48 .. 4f */ 82 - 2, 3, 3, 4, 3, 4, 4, 5, /* 50 .. 57 */ 83 - 3, 4, 4, 5, 4, 5, 5, 6, /* 58 .. 5f */ 84 - 2, 3, 3, 4, 3, 4, 4, 5, /* 60 .. 67 */ 85 - 3, 4, 4, 5, 4, 5, 5, 6, /* 68 .. 6f */ 86 - 3, 4, 4, 5, 4, 5, 5, 6, /* 70 .. 77 */ 87 - 4, 5, 5, 6, 5, 6, 6, 7, /* 78 .. 7f */ 88 - 1, 2, 2, 3, 2, 3, 3, 4, /* 80 .. 87 */ 89 - 2, 3, 3, 4, 3, 4, 4, 5, /* 88 .. 8f */ 90 - 2, 3, 3, 4, 3, 4, 4, 5, /* 90 .. 97 */ 91 - 3, 4, 4, 5, 4, 5, 5, 6, /* 98 .. 9f */ 92 - 2, 3, 3, 4, 3, 4, 4, 5, /* a0 .. a7 */ 93 - 3, 4, 4, 5, 4, 5, 5, 6, /* a8 .. af */ 94 - 3, 4, 4, 5, 4, 5, 5, 6, /* b0 .. b7 */ 95 - 4, 5, 5, 6, 5, 6, 6, 7, /* b8 .. bf */ 96 - 2, 3, 3, 4, 3, 4, 4, 5, /* c0 .. c7 */ 97 - 3, 4, 4, 5, 4, 5, 5, 6, /* c8 .. cf */ 98 - 3, 4, 4, 5, 4, 5, 5, 6, /* d0 .. d7 */ 99 - 4, 5, 5, 6, 5, 6, 6, 7, /* d8 .. df */ 100 - 3, 4, 4, 5, 4, 5, 5, 6, /* e0 .. e7 */ 101 - 4, 5, 5, 6, 5, 6, 6, 7, /* e8 .. ef */ 102 - 4, 5, 5, 6, 5, 6, 6, 7, /* f0 .. f7 */ 103 - 5, 6, 6, 7, 6, 7, 7, 8, /* f8 .. ff */ 104 - }; 105 - 106 - /* 107 69 * xfs_highbit32: get high bit set out of 32-bit argument, -1 if none set. 108 70 */ 109 71 inline int ··· 129 167 130 168 131 169 /* 132 - * Count the number of bits set in the bitmap starting with bit 133 - * start_bit. Size is the size of the bitmap in words. 134 - * 135 - * Do the counting by mapping a byte value to the number of set 136 - * bits for that value using the xfs_countbit array, i.e. 137 - * xfs_countbit[0] == 0, xfs_countbit[1] == 1, xfs_countbit[2] == 1, 138 - * xfs_countbit[3] == 2, etc. 170 + * Return whether bitmap is empty. 171 + * Size is number of words in the bitmap, which is padded to word boundary 172 + * Returns 1 for empty, 0 for non-empty. 139 173 */ 140 174 int 141 - xfs_count_bits(uint *map, uint size, uint start_bit) 175 + xfs_bitmap_empty(uint *map, uint size) 142 176 { 143 - register int bits; 144 - register unsigned char *bytep; 145 - register unsigned char *end_map; 146 - int byte_bit; 177 + uint i; 178 + uint ret = 0; 147 179 148 - bits = 0; 149 - end_map = (char*)(map + size); 150 - bytep = (char*)(map + (start_bit & ~0x7)); 151 - byte_bit = start_bit & 0x7; 152 - 153 - /* 154 - * If the caller fell off the end of the map, return 0. 155 - */ 156 - if (bytep >= end_map) { 157 - return (0); 180 + for (i = 0; i < size; i++) { 181 + ret |= map[i]; 158 182 } 159 183 160 - /* 161 - * If start_bit is not byte aligned, then process the 162 - * first byte separately. 163 - */ 164 - if (byte_bit != 0) { 165 - /* 166 - * Shift off the bits we don't want to look at, 167 - * before indexing into xfs_countbit. 168 - */ 169 - bits += xfs_countbit[(*bytep >> byte_bit)]; 170 - bytep++; 171 - } 172 - 173 - /* 174 - * Count the bits in each byte until the end of the bitmap. 175 - */ 176 - while (bytep < end_map) { 177 - bits += xfs_countbit[*bytep]; 178 - bytep++; 179 - } 180 - 181 - return (bits); 184 + return (ret == 0); 182 185 } 183 186 184 187 /*
+2 -2
fs/xfs/xfs_bit.h
··· 55 55 /* Get high bit set out of 64-bit argument, -1 if none set */ 56 56 extern int xfs_highbit64(__uint64_t); 57 57 58 - /* Count set bits in map starting with start_bit */ 59 - extern int xfs_count_bits(uint *map, uint size, uint start_bit); 58 + /* Return whether bitmap is empty (1 == empty) */ 59 + extern int xfs_bitmap_empty(uint *map, uint size); 60 60 61 61 /* Count continuous one bits in map starting with start_bit */ 62 62 extern int xfs_contig_bits(uint *map, uint size, uint start_bit);
+188 -181
fs/xfs/xfs_bmap.c
··· 52 52 #include "xfs_quota.h" 53 53 #include "xfs_trans_space.h" 54 54 #include "xfs_buf_item.h" 55 + #include "xfs_filestream.h" 55 56 56 57 57 58 #ifdef DEBUG ··· 278 277 STATIC void 279 278 xfs_bmap_trace_addentry( 280 279 int opcode, /* operation */ 281 - char *fname, /* function name */ 280 + const char *fname, /* function name */ 282 281 char *desc, /* operation description */ 283 282 xfs_inode_t *ip, /* incore inode pointer */ 284 283 xfs_extnum_t idx, /* index of entry(ies) */ ··· 292 291 */ 293 292 STATIC void 294 293 xfs_bmap_trace_delete( 295 - char *fname, /* function name */ 294 + const char *fname, /* function name */ 296 295 char *desc, /* operation description */ 297 296 xfs_inode_t *ip, /* incore inode pointer */ 298 297 xfs_extnum_t idx, /* index of entry(entries) deleted */ ··· 305 304 */ 306 305 STATIC void 307 306 xfs_bmap_trace_insert( 308 - char *fname, /* function name */ 307 + const char *fname, /* function name */ 309 308 char *desc, /* operation description */ 310 309 xfs_inode_t *ip, /* incore inode pointer */ 311 310 xfs_extnum_t idx, /* index of entry(entries) inserted */ ··· 319 318 */ 320 319 STATIC void 321 320 xfs_bmap_trace_post_update( 322 - char *fname, /* function name */ 321 + const char *fname, /* function name */ 323 322 char *desc, /* operation description */ 324 323 xfs_inode_t *ip, /* incore inode pointer */ 325 324 xfs_extnum_t idx, /* index of entry updated */ ··· 330 329 */ 331 330 STATIC void 332 331 xfs_bmap_trace_pre_update( 333 - char *fname, /* function name */ 332 + const char *fname, /* function name */ 334 333 char *desc, /* operation description */ 335 334 xfs_inode_t *ip, /* incore inode pointer */ 336 335 xfs_extnum_t idx, /* index of entry to be updated */ 337 336 int whichfork); /* data or attr fork */ 338 337 338 + #define XFS_BMAP_TRACE_DELETE(d,ip,i,c,w) \ 339 + xfs_bmap_trace_delete(__FUNCTION__,d,ip,i,c,w) 340 + #define XFS_BMAP_TRACE_INSERT(d,ip,i,c,r1,r2,w) \ 341 + xfs_bmap_trace_insert(__FUNCTION__,d,ip,i,c,r1,r2,w) 342 + #define XFS_BMAP_TRACE_POST_UPDATE(d,ip,i,w) \ 343 + xfs_bmap_trace_post_update(__FUNCTION__,d,ip,i,w) 344 + #define XFS_BMAP_TRACE_PRE_UPDATE(d,ip,i,w) \ 345 + xfs_bmap_trace_pre_update(__FUNCTION__,d,ip,i,w) 339 346 #else 340 - #define xfs_bmap_trace_delete(f,d,ip,i,c,w) 341 - #define xfs_bmap_trace_insert(f,d,ip,i,c,r1,r2,w) 342 - #define xfs_bmap_trace_post_update(f,d,ip,i,w) 343 - #define xfs_bmap_trace_pre_update(f,d,ip,i,w) 347 + #define XFS_BMAP_TRACE_DELETE(d,ip,i,c,w) 348 + #define XFS_BMAP_TRACE_INSERT(d,ip,i,c,r1,r2,w) 349 + #define XFS_BMAP_TRACE_POST_UPDATE(d,ip,i,w) 350 + #define XFS_BMAP_TRACE_PRE_UPDATE(d,ip,i,w) 344 351 #endif /* XFS_BMAP_TRACE */ 345 352 346 353 /* ··· 540 531 xfs_filblks_t da_new; /* new count del alloc blocks used */ 541 532 xfs_filblks_t da_old; /* old count del alloc blocks used */ 542 533 int error; /* error return value */ 543 - #ifdef XFS_BMAP_TRACE 544 - static char fname[] = "xfs_bmap_add_extent"; 545 - #endif 546 534 xfs_ifork_t *ifp; /* inode fork ptr */ 547 535 int logflags; /* returned value */ 548 536 xfs_extnum_t nextents; /* number of extents in file now */ ··· 557 551 * already extents in the list. 558 552 */ 559 553 if (nextents == 0) { 560 - xfs_bmap_trace_insert(fname, "insert empty", ip, 0, 1, new, 561 - NULL, whichfork); 554 + XFS_BMAP_TRACE_INSERT("insert empty", ip, 0, 1, new, NULL, 555 + whichfork); 562 556 xfs_iext_insert(ifp, 0, 1, new); 563 557 ASSERT(cur == NULL); 564 558 ifp->if_lastex = 0; ··· 716 710 int diff; /* temp value */ 717 711 xfs_bmbt_rec_t *ep; /* extent entry for idx */ 718 712 int error; /* error return value */ 719 - #ifdef XFS_BMAP_TRACE 720 - static char fname[] = "xfs_bmap_add_extent_delay_real"; 721 - #endif 722 713 int i; /* temp state */ 723 714 xfs_ifork_t *ifp; /* inode fork pointer */ 724 715 xfs_fileoff_t new_endoff; /* end offset of new entry */ ··· 811 808 * Filling in all of a previously delayed allocation extent. 812 809 * The left and right neighbors are both contiguous with new. 813 810 */ 814 - xfs_bmap_trace_pre_update(fname, "LF|RF|LC|RC", ip, idx - 1, 811 + XFS_BMAP_TRACE_PRE_UPDATE("LF|RF|LC|RC", ip, idx - 1, 815 812 XFS_DATA_FORK); 816 813 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1), 817 814 LEFT.br_blockcount + PREV.br_blockcount + 818 815 RIGHT.br_blockcount); 819 - xfs_bmap_trace_post_update(fname, "LF|RF|LC|RC", ip, idx - 1, 816 + XFS_BMAP_TRACE_POST_UPDATE("LF|RF|LC|RC", ip, idx - 1, 820 817 XFS_DATA_FORK); 821 - xfs_bmap_trace_delete(fname, "LF|RF|LC|RC", ip, idx, 2, 822 - XFS_DATA_FORK); 818 + XFS_BMAP_TRACE_DELETE("LF|RF|LC|RC", ip, idx, 2, XFS_DATA_FORK); 823 819 xfs_iext_remove(ifp, idx, 2); 824 820 ip->i_df.if_lastex = idx - 1; 825 821 ip->i_d.di_nextents--; ··· 857 855 * Filling in all of a previously delayed allocation extent. 858 856 * The left neighbor is contiguous, the right is not. 859 857 */ 860 - xfs_bmap_trace_pre_update(fname, "LF|RF|LC", ip, idx - 1, 858 + XFS_BMAP_TRACE_PRE_UPDATE("LF|RF|LC", ip, idx - 1, 861 859 XFS_DATA_FORK); 862 860 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1), 863 861 LEFT.br_blockcount + PREV.br_blockcount); 864 - xfs_bmap_trace_post_update(fname, "LF|RF|LC", ip, idx - 1, 862 + XFS_BMAP_TRACE_POST_UPDATE("LF|RF|LC", ip, idx - 1, 865 863 XFS_DATA_FORK); 866 864 ip->i_df.if_lastex = idx - 1; 867 - xfs_bmap_trace_delete(fname, "LF|RF|LC", ip, idx, 1, 868 - XFS_DATA_FORK); 865 + XFS_BMAP_TRACE_DELETE("LF|RF|LC", ip, idx, 1, XFS_DATA_FORK); 869 866 xfs_iext_remove(ifp, idx, 1); 870 867 if (cur == NULL) 871 868 rval = XFS_ILOG_DEXT; ··· 893 892 * Filling in all of a previously delayed allocation extent. 894 893 * The right neighbor is contiguous, the left is not. 895 894 */ 896 - xfs_bmap_trace_pre_update(fname, "LF|RF|RC", ip, idx, 897 - XFS_DATA_FORK); 895 + XFS_BMAP_TRACE_PRE_UPDATE("LF|RF|RC", ip, idx, XFS_DATA_FORK); 898 896 xfs_bmbt_set_startblock(ep, new->br_startblock); 899 897 xfs_bmbt_set_blockcount(ep, 900 898 PREV.br_blockcount + RIGHT.br_blockcount); 901 - xfs_bmap_trace_post_update(fname, "LF|RF|RC", ip, idx, 902 - XFS_DATA_FORK); 899 + XFS_BMAP_TRACE_POST_UPDATE("LF|RF|RC", ip, idx, XFS_DATA_FORK); 903 900 ip->i_df.if_lastex = idx; 904 - xfs_bmap_trace_delete(fname, "LF|RF|RC", ip, idx + 1, 1, 905 - XFS_DATA_FORK); 901 + XFS_BMAP_TRACE_DELETE("LF|RF|RC", ip, idx + 1, 1, XFS_DATA_FORK); 906 902 xfs_iext_remove(ifp, idx + 1, 1); 907 903 if (cur == NULL) 908 904 rval = XFS_ILOG_DEXT; ··· 929 931 * Neither the left nor right neighbors are contiguous with 930 932 * the new one. 931 933 */ 932 - xfs_bmap_trace_pre_update(fname, "LF|RF", ip, idx, 933 - XFS_DATA_FORK); 934 + XFS_BMAP_TRACE_PRE_UPDATE("LF|RF", ip, idx, XFS_DATA_FORK); 934 935 xfs_bmbt_set_startblock(ep, new->br_startblock); 935 - xfs_bmap_trace_post_update(fname, "LF|RF", ip, idx, 936 - XFS_DATA_FORK); 936 + XFS_BMAP_TRACE_POST_UPDATE("LF|RF", ip, idx, XFS_DATA_FORK); 937 937 ip->i_df.if_lastex = idx; 938 938 ip->i_d.di_nextents++; 939 939 if (cur == NULL) ··· 959 963 * Filling in the first part of a previous delayed allocation. 960 964 * The left neighbor is contiguous. 961 965 */ 962 - xfs_bmap_trace_pre_update(fname, "LF|LC", ip, idx - 1, 963 - XFS_DATA_FORK); 966 + XFS_BMAP_TRACE_PRE_UPDATE("LF|LC", ip, idx - 1, XFS_DATA_FORK); 964 967 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1), 965 968 LEFT.br_blockcount + new->br_blockcount); 966 969 xfs_bmbt_set_startoff(ep, 967 970 PREV.br_startoff + new->br_blockcount); 968 - xfs_bmap_trace_post_update(fname, "LF|LC", ip, idx - 1, 969 - XFS_DATA_FORK); 971 + XFS_BMAP_TRACE_POST_UPDATE("LF|LC", ip, idx - 1, XFS_DATA_FORK); 970 972 temp = PREV.br_blockcount - new->br_blockcount; 971 - xfs_bmap_trace_pre_update(fname, "LF|LC", ip, idx, 972 - XFS_DATA_FORK); 973 + XFS_BMAP_TRACE_PRE_UPDATE("LF|LC", ip, idx, XFS_DATA_FORK); 973 974 xfs_bmbt_set_blockcount(ep, temp); 974 975 ip->i_df.if_lastex = idx - 1; 975 976 if (cur == NULL) ··· 988 995 temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp), 989 996 STARTBLOCKVAL(PREV.br_startblock)); 990 997 xfs_bmbt_set_startblock(ep, NULLSTARTBLOCK((int)temp)); 991 - xfs_bmap_trace_post_update(fname, "LF|LC", ip, idx, 992 - XFS_DATA_FORK); 998 + XFS_BMAP_TRACE_POST_UPDATE("LF|LC", ip, idx, XFS_DATA_FORK); 993 999 *dnew = temp; 994 1000 /* DELTA: The boundary between two in-core extents moved. */ 995 1001 temp = LEFT.br_startoff; ··· 1001 1009 * Filling in the first part of a previous delayed allocation. 1002 1010 * The left neighbor is not contiguous. 1003 1011 */ 1004 - xfs_bmap_trace_pre_update(fname, "LF", ip, idx, XFS_DATA_FORK); 1012 + XFS_BMAP_TRACE_PRE_UPDATE("LF", ip, idx, XFS_DATA_FORK); 1005 1013 xfs_bmbt_set_startoff(ep, new_endoff); 1006 1014 temp = PREV.br_blockcount - new->br_blockcount; 1007 1015 xfs_bmbt_set_blockcount(ep, temp); 1008 - xfs_bmap_trace_insert(fname, "LF", ip, idx, 1, new, NULL, 1016 + XFS_BMAP_TRACE_INSERT("LF", ip, idx, 1, new, NULL, 1009 1017 XFS_DATA_FORK); 1010 1018 xfs_iext_insert(ifp, idx, 1, new); 1011 1019 ip->i_df.if_lastex = idx; ··· 1038 1046 (cur ? cur->bc_private.b.allocated : 0)); 1039 1047 ep = xfs_iext_get_ext(ifp, idx + 1); 1040 1048 xfs_bmbt_set_startblock(ep, NULLSTARTBLOCK((int)temp)); 1041 - xfs_bmap_trace_post_update(fname, "LF", ip, idx + 1, 1042 - XFS_DATA_FORK); 1049 + XFS_BMAP_TRACE_POST_UPDATE("LF", ip, idx + 1, XFS_DATA_FORK); 1043 1050 *dnew = temp; 1044 1051 /* DELTA: One in-core extent is split in two. */ 1045 1052 temp = PREV.br_startoff; ··· 1051 1060 * The right neighbor is contiguous with the new allocation. 1052 1061 */ 1053 1062 temp = PREV.br_blockcount - new->br_blockcount; 1054 - xfs_bmap_trace_pre_update(fname, "RF|RC", ip, idx, 1055 - XFS_DATA_FORK); 1056 - xfs_bmap_trace_pre_update(fname, "RF|RC", ip, idx + 1, 1057 - XFS_DATA_FORK); 1063 + XFS_BMAP_TRACE_PRE_UPDATE("RF|RC", ip, idx, XFS_DATA_FORK); 1064 + XFS_BMAP_TRACE_PRE_UPDATE("RF|RC", ip, idx + 1, XFS_DATA_FORK); 1058 1065 xfs_bmbt_set_blockcount(ep, temp); 1059 1066 xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, idx + 1), 1060 1067 new->br_startoff, new->br_startblock, 1061 1068 new->br_blockcount + RIGHT.br_blockcount, 1062 1069 RIGHT.br_state); 1063 - xfs_bmap_trace_post_update(fname, "RF|RC", ip, idx + 1, 1064 - XFS_DATA_FORK); 1070 + XFS_BMAP_TRACE_POST_UPDATE("RF|RC", ip, idx + 1, XFS_DATA_FORK); 1065 1071 ip->i_df.if_lastex = idx + 1; 1066 1072 if (cur == NULL) 1067 1073 rval = XFS_ILOG_DEXT; ··· 1079 1091 temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp), 1080 1092 STARTBLOCKVAL(PREV.br_startblock)); 1081 1093 xfs_bmbt_set_startblock(ep, NULLSTARTBLOCK((int)temp)); 1082 - xfs_bmap_trace_post_update(fname, "RF|RC", ip, idx, 1083 - XFS_DATA_FORK); 1094 + XFS_BMAP_TRACE_POST_UPDATE("RF|RC", ip, idx, XFS_DATA_FORK); 1084 1095 *dnew = temp; 1085 1096 /* DELTA: The boundary between two in-core extents moved. */ 1086 1097 temp = PREV.br_startoff; ··· 1093 1106 * The right neighbor is not contiguous. 1094 1107 */ 1095 1108 temp = PREV.br_blockcount - new->br_blockcount; 1096 - xfs_bmap_trace_pre_update(fname, "RF", ip, idx, XFS_DATA_FORK); 1109 + XFS_BMAP_TRACE_PRE_UPDATE("RF", ip, idx, XFS_DATA_FORK); 1097 1110 xfs_bmbt_set_blockcount(ep, temp); 1098 - xfs_bmap_trace_insert(fname, "RF", ip, idx + 1, 1, 1099 - new, NULL, XFS_DATA_FORK); 1111 + XFS_BMAP_TRACE_INSERT("RF", ip, idx + 1, 1, new, NULL, 1112 + XFS_DATA_FORK); 1100 1113 xfs_iext_insert(ifp, idx + 1, 1, new); 1101 1114 ip->i_df.if_lastex = idx + 1; 1102 1115 ip->i_d.di_nextents++; ··· 1128 1141 (cur ? cur->bc_private.b.allocated : 0)); 1129 1142 ep = xfs_iext_get_ext(ifp, idx); 1130 1143 xfs_bmbt_set_startblock(ep, NULLSTARTBLOCK((int)temp)); 1131 - xfs_bmap_trace_post_update(fname, "RF", ip, idx, XFS_DATA_FORK); 1144 + XFS_BMAP_TRACE_POST_UPDATE("RF", ip, idx, XFS_DATA_FORK); 1132 1145 *dnew = temp; 1133 1146 /* DELTA: One in-core extent is split in two. */ 1134 1147 temp = PREV.br_startoff; ··· 1142 1155 * This case is avoided almost all the time. 1143 1156 */ 1144 1157 temp = new->br_startoff - PREV.br_startoff; 1145 - xfs_bmap_trace_pre_update(fname, "0", ip, idx, XFS_DATA_FORK); 1158 + XFS_BMAP_TRACE_PRE_UPDATE("0", ip, idx, XFS_DATA_FORK); 1146 1159 xfs_bmbt_set_blockcount(ep, temp); 1147 1160 r[0] = *new; 1148 1161 r[1].br_state = PREV.br_state; ··· 1150 1163 r[1].br_startoff = new_endoff; 1151 1164 temp2 = PREV.br_startoff + PREV.br_blockcount - new_endoff; 1152 1165 r[1].br_blockcount = temp2; 1153 - xfs_bmap_trace_insert(fname, "0", ip, idx + 1, 2, &r[0], &r[1], 1166 + XFS_BMAP_TRACE_INSERT("0", ip, idx + 1, 2, &r[0], &r[1], 1154 1167 XFS_DATA_FORK); 1155 1168 xfs_iext_insert(ifp, idx + 1, 2, &r[0]); 1156 1169 ip->i_df.if_lastex = idx + 1; ··· 1209 1222 } 1210 1223 ep = xfs_iext_get_ext(ifp, idx); 1211 1224 xfs_bmbt_set_startblock(ep, NULLSTARTBLOCK((int)temp)); 1212 - xfs_bmap_trace_post_update(fname, "0", ip, idx, XFS_DATA_FORK); 1213 - xfs_bmap_trace_pre_update(fname, "0", ip, idx + 2, 1214 - XFS_DATA_FORK); 1225 + XFS_BMAP_TRACE_POST_UPDATE("0", ip, idx, XFS_DATA_FORK); 1226 + XFS_BMAP_TRACE_PRE_UPDATE("0", ip, idx + 2, XFS_DATA_FORK); 1215 1227 xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, idx + 2), 1216 1228 NULLSTARTBLOCK((int)temp2)); 1217 - xfs_bmap_trace_post_update(fname, "0", ip, idx + 2, 1218 - XFS_DATA_FORK); 1229 + XFS_BMAP_TRACE_POST_UPDATE("0", ip, idx + 2, XFS_DATA_FORK); 1219 1230 *dnew = temp + temp2; 1220 1231 /* DELTA: One in-core extent is split in three. */ 1221 1232 temp = PREV.br_startoff; ··· 1272 1287 xfs_btree_cur_t *cur; /* btree cursor */ 1273 1288 xfs_bmbt_rec_t *ep; /* extent entry for idx */ 1274 1289 int error; /* error return value */ 1275 - #ifdef XFS_BMAP_TRACE 1276 - static char fname[] = "xfs_bmap_add_extent_unwritten_real"; 1277 - #endif 1278 1290 int i; /* temp state */ 1279 1291 xfs_ifork_t *ifp; /* inode fork pointer */ 1280 1292 xfs_fileoff_t new_endoff; /* end offset of new entry */ ··· 1372 1390 * Setting all of a previous oldext extent to newext. 1373 1391 * The left and right neighbors are both contiguous with new. 1374 1392 */ 1375 - xfs_bmap_trace_pre_update(fname, "LF|RF|LC|RC", ip, idx - 1, 1393 + XFS_BMAP_TRACE_PRE_UPDATE("LF|RF|LC|RC", ip, idx - 1, 1376 1394 XFS_DATA_FORK); 1377 1395 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1), 1378 1396 LEFT.br_blockcount + PREV.br_blockcount + 1379 1397 RIGHT.br_blockcount); 1380 - xfs_bmap_trace_post_update(fname, "LF|RF|LC|RC", ip, idx - 1, 1398 + XFS_BMAP_TRACE_POST_UPDATE("LF|RF|LC|RC", ip, idx - 1, 1381 1399 XFS_DATA_FORK); 1382 - xfs_bmap_trace_delete(fname, "LF|RF|LC|RC", ip, idx, 2, 1383 - XFS_DATA_FORK); 1400 + XFS_BMAP_TRACE_DELETE("LF|RF|LC|RC", ip, idx, 2, XFS_DATA_FORK); 1384 1401 xfs_iext_remove(ifp, idx, 2); 1385 1402 ip->i_df.if_lastex = idx - 1; 1386 1403 ip->i_d.di_nextents -= 2; ··· 1422 1441 * Setting all of a previous oldext extent to newext. 1423 1442 * The left neighbor is contiguous, the right is not. 1424 1443 */ 1425 - xfs_bmap_trace_pre_update(fname, "LF|RF|LC", ip, idx - 1, 1444 + XFS_BMAP_TRACE_PRE_UPDATE("LF|RF|LC", ip, idx - 1, 1426 1445 XFS_DATA_FORK); 1427 1446 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1), 1428 1447 LEFT.br_blockcount + PREV.br_blockcount); 1429 - xfs_bmap_trace_post_update(fname, "LF|RF|LC", ip, idx - 1, 1448 + XFS_BMAP_TRACE_POST_UPDATE("LF|RF|LC", ip, idx - 1, 1430 1449 XFS_DATA_FORK); 1431 1450 ip->i_df.if_lastex = idx - 1; 1432 - xfs_bmap_trace_delete(fname, "LF|RF|LC", ip, idx, 1, 1433 - XFS_DATA_FORK); 1451 + XFS_BMAP_TRACE_DELETE("LF|RF|LC", ip, idx, 1, XFS_DATA_FORK); 1434 1452 xfs_iext_remove(ifp, idx, 1); 1435 1453 ip->i_d.di_nextents--; 1436 1454 if (cur == NULL) ··· 1464 1484 * Setting all of a previous oldext extent to newext. 1465 1485 * The right neighbor is contiguous, the left is not. 1466 1486 */ 1467 - xfs_bmap_trace_pre_update(fname, "LF|RF|RC", ip, idx, 1487 + XFS_BMAP_TRACE_PRE_UPDATE("LF|RF|RC", ip, idx, 1468 1488 XFS_DATA_FORK); 1469 1489 xfs_bmbt_set_blockcount(ep, 1470 1490 PREV.br_blockcount + RIGHT.br_blockcount); 1471 1491 xfs_bmbt_set_state(ep, newext); 1472 - xfs_bmap_trace_post_update(fname, "LF|RF|RC", ip, idx, 1492 + XFS_BMAP_TRACE_POST_UPDATE("LF|RF|RC", ip, idx, 1473 1493 XFS_DATA_FORK); 1474 1494 ip->i_df.if_lastex = idx; 1475 - xfs_bmap_trace_delete(fname, "LF|RF|RC", ip, idx + 1, 1, 1476 - XFS_DATA_FORK); 1495 + XFS_BMAP_TRACE_DELETE("LF|RF|RC", ip, idx + 1, 1, XFS_DATA_FORK); 1477 1496 xfs_iext_remove(ifp, idx + 1, 1); 1478 1497 ip->i_d.di_nextents--; 1479 1498 if (cur == NULL) ··· 1508 1529 * Neither the left nor right neighbors are contiguous with 1509 1530 * the new one. 1510 1531 */ 1511 - xfs_bmap_trace_pre_update(fname, "LF|RF", ip, idx, 1532 + XFS_BMAP_TRACE_PRE_UPDATE("LF|RF", ip, idx, 1512 1533 XFS_DATA_FORK); 1513 1534 xfs_bmbt_set_state(ep, newext); 1514 - xfs_bmap_trace_post_update(fname, "LF|RF", ip, idx, 1535 + XFS_BMAP_TRACE_POST_UPDATE("LF|RF", ip, idx, 1515 1536 XFS_DATA_FORK); 1516 1537 ip->i_df.if_lastex = idx; 1517 1538 if (cur == NULL) ··· 1538 1559 * Setting the first part of a previous oldext extent to newext. 1539 1560 * The left neighbor is contiguous. 1540 1561 */ 1541 - xfs_bmap_trace_pre_update(fname, "LF|LC", ip, idx - 1, 1562 + XFS_BMAP_TRACE_PRE_UPDATE("LF|LC", ip, idx - 1, 1542 1563 XFS_DATA_FORK); 1543 1564 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1), 1544 1565 LEFT.br_blockcount + new->br_blockcount); 1545 1566 xfs_bmbt_set_startoff(ep, 1546 1567 PREV.br_startoff + new->br_blockcount); 1547 - xfs_bmap_trace_post_update(fname, "LF|LC", ip, idx - 1, 1568 + XFS_BMAP_TRACE_POST_UPDATE("LF|LC", ip, idx - 1, 1548 1569 XFS_DATA_FORK); 1549 - xfs_bmap_trace_pre_update(fname, "LF|LC", ip, idx, 1570 + XFS_BMAP_TRACE_PRE_UPDATE("LF|LC", ip, idx, 1550 1571 XFS_DATA_FORK); 1551 1572 xfs_bmbt_set_startblock(ep, 1552 1573 new->br_startblock + new->br_blockcount); 1553 1574 xfs_bmbt_set_blockcount(ep, 1554 1575 PREV.br_blockcount - new->br_blockcount); 1555 - xfs_bmap_trace_post_update(fname, "LF|LC", ip, idx, 1576 + XFS_BMAP_TRACE_POST_UPDATE("LF|LC", ip, idx, 1556 1577 XFS_DATA_FORK); 1557 1578 ip->i_df.if_lastex = idx - 1; 1558 1579 if (cur == NULL) ··· 1589 1610 * Setting the first part of a previous oldext extent to newext. 1590 1611 * The left neighbor is not contiguous. 1591 1612 */ 1592 - xfs_bmap_trace_pre_update(fname, "LF", ip, idx, XFS_DATA_FORK); 1613 + XFS_BMAP_TRACE_PRE_UPDATE("LF", ip, idx, XFS_DATA_FORK); 1593 1614 ASSERT(ep && xfs_bmbt_get_state(ep) == oldext); 1594 1615 xfs_bmbt_set_startoff(ep, new_endoff); 1595 1616 xfs_bmbt_set_blockcount(ep, 1596 1617 PREV.br_blockcount - new->br_blockcount); 1597 1618 xfs_bmbt_set_startblock(ep, 1598 1619 new->br_startblock + new->br_blockcount); 1599 - xfs_bmap_trace_post_update(fname, "LF", ip, idx, XFS_DATA_FORK); 1600 - xfs_bmap_trace_insert(fname, "LF", ip, idx, 1, new, NULL, 1620 + XFS_BMAP_TRACE_POST_UPDATE("LF", ip, idx, XFS_DATA_FORK); 1621 + XFS_BMAP_TRACE_INSERT("LF", ip, idx, 1, new, NULL, 1601 1622 XFS_DATA_FORK); 1602 1623 xfs_iext_insert(ifp, idx, 1, new); 1603 1624 ip->i_df.if_lastex = idx; ··· 1632 1653 * Setting the last part of a previous oldext extent to newext. 1633 1654 * The right neighbor is contiguous with the new allocation. 1634 1655 */ 1635 - xfs_bmap_trace_pre_update(fname, "RF|RC", ip, idx, 1656 + XFS_BMAP_TRACE_PRE_UPDATE("RF|RC", ip, idx, 1636 1657 XFS_DATA_FORK); 1637 - xfs_bmap_trace_pre_update(fname, "RF|RC", ip, idx + 1, 1658 + XFS_BMAP_TRACE_PRE_UPDATE("RF|RC", ip, idx + 1, 1638 1659 XFS_DATA_FORK); 1639 1660 xfs_bmbt_set_blockcount(ep, 1640 1661 PREV.br_blockcount - new->br_blockcount); 1641 - xfs_bmap_trace_post_update(fname, "RF|RC", ip, idx, 1662 + XFS_BMAP_TRACE_POST_UPDATE("RF|RC", ip, idx, 1642 1663 XFS_DATA_FORK); 1643 1664 xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, idx + 1), 1644 1665 new->br_startoff, new->br_startblock, 1645 1666 new->br_blockcount + RIGHT.br_blockcount, newext); 1646 - xfs_bmap_trace_post_update(fname, "RF|RC", ip, idx + 1, 1667 + XFS_BMAP_TRACE_POST_UPDATE("RF|RC", ip, idx + 1, 1647 1668 XFS_DATA_FORK); 1648 1669 ip->i_df.if_lastex = idx + 1; 1649 1670 if (cur == NULL) ··· 1679 1700 * Setting the last part of a previous oldext extent to newext. 1680 1701 * The right neighbor is not contiguous. 1681 1702 */ 1682 - xfs_bmap_trace_pre_update(fname, "RF", ip, idx, XFS_DATA_FORK); 1703 + XFS_BMAP_TRACE_PRE_UPDATE("RF", ip, idx, XFS_DATA_FORK); 1683 1704 xfs_bmbt_set_blockcount(ep, 1684 1705 PREV.br_blockcount - new->br_blockcount); 1685 - xfs_bmap_trace_post_update(fname, "RF", ip, idx, XFS_DATA_FORK); 1686 - xfs_bmap_trace_insert(fname, "RF", ip, idx + 1, 1, 1687 - new, NULL, XFS_DATA_FORK); 1706 + XFS_BMAP_TRACE_POST_UPDATE("RF", ip, idx, XFS_DATA_FORK); 1707 + XFS_BMAP_TRACE_INSERT("RF", ip, idx + 1, 1, new, NULL, 1708 + XFS_DATA_FORK); 1688 1709 xfs_iext_insert(ifp, idx + 1, 1, new); 1689 1710 ip->i_df.if_lastex = idx + 1; 1690 1711 ip->i_d.di_nextents++; ··· 1723 1744 * newext. Contiguity is impossible here. 1724 1745 * One extent becomes three extents. 1725 1746 */ 1726 - xfs_bmap_trace_pre_update(fname, "0", ip, idx, XFS_DATA_FORK); 1747 + XFS_BMAP_TRACE_PRE_UPDATE("0", ip, idx, XFS_DATA_FORK); 1727 1748 xfs_bmbt_set_blockcount(ep, 1728 1749 new->br_startoff - PREV.br_startoff); 1729 - xfs_bmap_trace_post_update(fname, "0", ip, idx, XFS_DATA_FORK); 1750 + XFS_BMAP_TRACE_POST_UPDATE("0", ip, idx, XFS_DATA_FORK); 1730 1751 r[0] = *new; 1731 1752 r[1].br_startoff = new_endoff; 1732 1753 r[1].br_blockcount = 1733 1754 PREV.br_startoff + PREV.br_blockcount - new_endoff; 1734 1755 r[1].br_startblock = new->br_startblock + new->br_blockcount; 1735 1756 r[1].br_state = oldext; 1736 - xfs_bmap_trace_insert(fname, "0", ip, idx + 1, 2, &r[0], &r[1], 1757 + XFS_BMAP_TRACE_INSERT("0", ip, idx + 1, 2, &r[0], &r[1], 1737 1758 XFS_DATA_FORK); 1738 1759 xfs_iext_insert(ifp, idx + 1, 2, &r[0]); 1739 1760 ip->i_df.if_lastex = idx + 1; ··· 1824 1845 int rsvd) /* OK to allocate reserved blocks */ 1825 1846 { 1826 1847 xfs_bmbt_rec_t *ep; /* extent record for idx */ 1827 - #ifdef XFS_BMAP_TRACE 1828 - static char fname[] = "xfs_bmap_add_extent_hole_delay"; 1829 - #endif 1830 1848 xfs_ifork_t *ifp; /* inode fork pointer */ 1831 1849 xfs_bmbt_irec_t left; /* left neighbor extent entry */ 1832 1850 xfs_filblks_t newlen=0; /* new indirect size */ ··· 1895 1919 */ 1896 1920 temp = left.br_blockcount + new->br_blockcount + 1897 1921 right.br_blockcount; 1898 - xfs_bmap_trace_pre_update(fname, "LC|RC", ip, idx - 1, 1922 + XFS_BMAP_TRACE_PRE_UPDATE("LC|RC", ip, idx - 1, 1899 1923 XFS_DATA_FORK); 1900 1924 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1), temp); 1901 1925 oldlen = STARTBLOCKVAL(left.br_startblock) + ··· 1904 1928 newlen = xfs_bmap_worst_indlen(ip, temp); 1905 1929 xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, idx - 1), 1906 1930 NULLSTARTBLOCK((int)newlen)); 1907 - xfs_bmap_trace_post_update(fname, "LC|RC", ip, idx - 1, 1931 + XFS_BMAP_TRACE_POST_UPDATE("LC|RC", ip, idx - 1, 1908 1932 XFS_DATA_FORK); 1909 - xfs_bmap_trace_delete(fname, "LC|RC", ip, idx, 1, 1910 - XFS_DATA_FORK); 1933 + XFS_BMAP_TRACE_DELETE("LC|RC", ip, idx, 1, XFS_DATA_FORK); 1911 1934 xfs_iext_remove(ifp, idx, 1); 1912 1935 ip->i_df.if_lastex = idx - 1; 1913 1936 /* DELTA: Two in-core extents were replaced by one. */ ··· 1921 1946 * Merge the new allocation with the left neighbor. 1922 1947 */ 1923 1948 temp = left.br_blockcount + new->br_blockcount; 1924 - xfs_bmap_trace_pre_update(fname, "LC", ip, idx - 1, 1949 + XFS_BMAP_TRACE_PRE_UPDATE("LC", ip, idx - 1, 1925 1950 XFS_DATA_FORK); 1926 1951 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1), temp); 1927 1952 oldlen = STARTBLOCKVAL(left.br_startblock) + ··· 1929 1954 newlen = xfs_bmap_worst_indlen(ip, temp); 1930 1955 xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, idx - 1), 1931 1956 NULLSTARTBLOCK((int)newlen)); 1932 - xfs_bmap_trace_post_update(fname, "LC", ip, idx - 1, 1957 + XFS_BMAP_TRACE_POST_UPDATE("LC", ip, idx - 1, 1933 1958 XFS_DATA_FORK); 1934 1959 ip->i_df.if_lastex = idx - 1; 1935 1960 /* DELTA: One in-core extent grew into a hole. */ ··· 1943 1968 * on the right. 1944 1969 * Merge the new allocation with the right neighbor. 1945 1970 */ 1946 - xfs_bmap_trace_pre_update(fname, "RC", ip, idx, XFS_DATA_FORK); 1971 + XFS_BMAP_TRACE_PRE_UPDATE("RC", ip, idx, XFS_DATA_FORK); 1947 1972 temp = new->br_blockcount + right.br_blockcount; 1948 1973 oldlen = STARTBLOCKVAL(new->br_startblock) + 1949 1974 STARTBLOCKVAL(right.br_startblock); 1950 1975 newlen = xfs_bmap_worst_indlen(ip, temp); 1951 1976 xfs_bmbt_set_allf(ep, new->br_startoff, 1952 1977 NULLSTARTBLOCK((int)newlen), temp, right.br_state); 1953 - xfs_bmap_trace_post_update(fname, "RC", ip, idx, XFS_DATA_FORK); 1978 + XFS_BMAP_TRACE_POST_UPDATE("RC", ip, idx, XFS_DATA_FORK); 1954 1979 ip->i_df.if_lastex = idx; 1955 1980 /* DELTA: One in-core extent grew into a hole. */ 1956 1981 temp2 = temp; ··· 1964 1989 * Insert a new entry. 1965 1990 */ 1966 1991 oldlen = newlen = 0; 1967 - xfs_bmap_trace_insert(fname, "0", ip, idx, 1, new, NULL, 1992 + XFS_BMAP_TRACE_INSERT("0", ip, idx, 1, new, NULL, 1968 1993 XFS_DATA_FORK); 1969 1994 xfs_iext_insert(ifp, idx, 1, new); 1970 1995 ip->i_df.if_lastex = idx; ··· 2014 2039 { 2015 2040 xfs_bmbt_rec_t *ep; /* pointer to extent entry ins. point */ 2016 2041 int error; /* error return value */ 2017 - #ifdef XFS_BMAP_TRACE 2018 - static char fname[] = "xfs_bmap_add_extent_hole_real"; 2019 - #endif 2020 2042 int i; /* temp state */ 2021 2043 xfs_ifork_t *ifp; /* inode fork pointer */ 2022 2044 xfs_bmbt_irec_t left; /* left neighbor extent entry */ ··· 2090 2118 * left and on the right. 2091 2119 * Merge all three into a single extent record. 2092 2120 */ 2093 - xfs_bmap_trace_pre_update(fname, "LC|RC", ip, idx - 1, 2121 + XFS_BMAP_TRACE_PRE_UPDATE("LC|RC", ip, idx - 1, 2094 2122 whichfork); 2095 2123 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1), 2096 2124 left.br_blockcount + new->br_blockcount + 2097 2125 right.br_blockcount); 2098 - xfs_bmap_trace_post_update(fname, "LC|RC", ip, idx - 1, 2126 + XFS_BMAP_TRACE_POST_UPDATE("LC|RC", ip, idx - 1, 2099 2127 whichfork); 2100 - xfs_bmap_trace_delete(fname, "LC|RC", ip, 2101 - idx, 1, whichfork); 2128 + XFS_BMAP_TRACE_DELETE("LC|RC", ip, idx, 1, whichfork); 2102 2129 xfs_iext_remove(ifp, idx, 1); 2103 2130 ifp->if_lastex = idx - 1; 2104 2131 XFS_IFORK_NEXT_SET(ip, whichfork, ··· 2139 2168 * on the left. 2140 2169 * Merge the new allocation with the left neighbor. 2141 2170 */ 2142 - xfs_bmap_trace_pre_update(fname, "LC", ip, idx - 1, whichfork); 2171 + XFS_BMAP_TRACE_PRE_UPDATE("LC", ip, idx - 1, whichfork); 2143 2172 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1), 2144 2173 left.br_blockcount + new->br_blockcount); 2145 - xfs_bmap_trace_post_update(fname, "LC", ip, idx - 1, whichfork); 2174 + XFS_BMAP_TRACE_POST_UPDATE("LC", ip, idx - 1, whichfork); 2146 2175 ifp->if_lastex = idx - 1; 2147 2176 if (cur == NULL) { 2148 2177 rval = XFS_ILOG_FEXT(whichfork); ··· 2173 2202 * on the right. 2174 2203 * Merge the new allocation with the right neighbor. 2175 2204 */ 2176 - xfs_bmap_trace_pre_update(fname, "RC", ip, idx, whichfork); 2205 + XFS_BMAP_TRACE_PRE_UPDATE("RC", ip, idx, whichfork); 2177 2206 xfs_bmbt_set_allf(ep, new->br_startoff, new->br_startblock, 2178 2207 new->br_blockcount + right.br_blockcount, 2179 2208 right.br_state); 2180 - xfs_bmap_trace_post_update(fname, "RC", ip, idx, whichfork); 2209 + XFS_BMAP_TRACE_POST_UPDATE("RC", ip, idx, whichfork); 2181 2210 ifp->if_lastex = idx; 2182 2211 if (cur == NULL) { 2183 2212 rval = XFS_ILOG_FEXT(whichfork); ··· 2208 2237 * real allocation. 2209 2238 * Insert a new entry. 2210 2239 */ 2211 - xfs_bmap_trace_insert(fname, "0", ip, idx, 1, new, NULL, 2212 - whichfork); 2240 + XFS_BMAP_TRACE_INSERT("0", ip, idx, 1, new, NULL, whichfork); 2213 2241 xfs_iext_insert(ifp, idx, 1, new); 2214 2242 ifp->if_lastex = idx; 2215 2243 XFS_IFORK_NEXT_SET(ip, whichfork, ··· 2575 2605 xfs_extlen_t prod = 0; /* product factor for allocators */ 2576 2606 xfs_extlen_t ralen = 0; /* realtime allocation length */ 2577 2607 xfs_extlen_t align; /* minimum allocation alignment */ 2578 - xfs_rtblock_t rtx; /* realtime extent number */ 2579 2608 xfs_rtblock_t rtb; 2580 2609 2581 2610 mp = ap->ip->i_mount; 2582 - align = ap->ip->i_d.di_extsize ? 2583 - ap->ip->i_d.di_extsize : mp->m_sb.sb_rextsize; 2611 + align = xfs_get_extsz_hint(ap->ip); 2584 2612 prod = align / mp->m_sb.sb_rextsize; 2585 2613 error = xfs_bmap_extsize_align(mp, ap->gotp, ap->prevp, 2586 2614 align, 1, ap->eof, 0, ··· 2612 2644 * pick an extent that will space things out in the rt area. 2613 2645 */ 2614 2646 if (ap->eof && ap->off == 0) { 2647 + xfs_rtblock_t uninitialized_var(rtx); /* realtime extent no */ 2648 + 2615 2649 error = xfs_rtpick_extent(mp, ap->tp, ralen, &rtx); 2616 2650 if (error) 2617 2651 return error; ··· 2685 2715 int error; 2686 2716 2687 2717 mp = ap->ip->i_mount; 2688 - align = (ap->userdata && ap->ip->i_d.di_extsize && 2689 - (ap->ip->i_d.di_flags & XFS_DIFLAG_EXTSIZE)) ? 2690 - ap->ip->i_d.di_extsize : 0; 2718 + align = ap->userdata ? xfs_get_extsz_hint(ap->ip) : 0; 2691 2719 if (unlikely(align)) { 2692 2720 error = xfs_bmap_extsize_align(mp, ap->gotp, ap->prevp, 2693 2721 align, 0, ap->eof, 0, ap->conv, ··· 2695 2727 } 2696 2728 nullfb = ap->firstblock == NULLFSBLOCK; 2697 2729 fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp, ap->firstblock); 2698 - if (nullfb) 2699 - ap->rval = XFS_INO_TO_FSB(mp, ap->ip->i_ino); 2700 - else 2730 + if (nullfb) { 2731 + if (ap->userdata && xfs_inode_is_filestream(ap->ip)) { 2732 + ag = xfs_filestream_lookup_ag(ap->ip); 2733 + ag = (ag != NULLAGNUMBER) ? ag : 0; 2734 + ap->rval = XFS_AGB_TO_FSB(mp, ag, 0); 2735 + } else { 2736 + ap->rval = XFS_INO_TO_FSB(mp, ap->ip->i_ino); 2737 + } 2738 + } else 2701 2739 ap->rval = ap->firstblock; 2702 2740 2703 2741 xfs_bmap_adjacent(ap); ··· 2727 2753 args.firstblock = ap->firstblock; 2728 2754 blen = 0; 2729 2755 if (nullfb) { 2730 - args.type = XFS_ALLOCTYPE_START_BNO; 2756 + if (ap->userdata && xfs_inode_is_filestream(ap->ip)) 2757 + args.type = XFS_ALLOCTYPE_NEAR_BNO; 2758 + else 2759 + args.type = XFS_ALLOCTYPE_START_BNO; 2731 2760 args.total = ap->total; 2761 + 2732 2762 /* 2733 - * Find the longest available space. 2734 - * We're going to try for the whole allocation at once. 2763 + * Search for an allocation group with a single extent 2764 + * large enough for the request. 2765 + * 2766 + * If one isn't found, then adjust the minimum allocation 2767 + * size to the largest space found. 2735 2768 */ 2736 2769 startag = ag = XFS_FSB_TO_AGNO(mp, args.fsbno); 2770 + if (startag == NULLAGNUMBER) 2771 + startag = ag = 0; 2737 2772 notinit = 0; 2738 2773 down_read(&mp->m_peraglock); 2739 2774 while (blen < ap->alen) { ··· 2768 2785 blen = longest; 2769 2786 } else 2770 2787 notinit = 1; 2788 + 2789 + if (xfs_inode_is_filestream(ap->ip)) { 2790 + if (blen >= ap->alen) 2791 + break; 2792 + 2793 + if (ap->userdata) { 2794 + /* 2795 + * If startag is an invalid AG, we've 2796 + * come here once before and 2797 + * xfs_filestream_new_ag picked the 2798 + * best currently available. 2799 + * 2800 + * Don't continue looping, since we 2801 + * could loop forever. 2802 + */ 2803 + if (startag == NULLAGNUMBER) 2804 + break; 2805 + 2806 + error = xfs_filestream_new_ag(ap, &ag); 2807 + if (error) { 2808 + up_read(&mp->m_peraglock); 2809 + return error; 2810 + } 2811 + 2812 + /* loop again to set 'blen'*/ 2813 + startag = NULLAGNUMBER; 2814 + continue; 2815 + } 2816 + } 2771 2817 if (++ag == mp->m_sb.sb_agcount) 2772 2818 ag = 0; 2773 2819 if (ag == startag) ··· 2821 2809 */ 2822 2810 else 2823 2811 args.minlen = ap->alen; 2812 + 2813 + /* 2814 + * set the failure fallback case to look in the selected 2815 + * AG as the stream may have moved. 2816 + */ 2817 + if (xfs_inode_is_filestream(ap->ip)) 2818 + ap->rval = args.fsbno = XFS_AGB_TO_FSB(mp, ag, 0); 2824 2819 } else if (ap->low) { 2825 - args.type = XFS_ALLOCTYPE_START_BNO; 2820 + if (xfs_inode_is_filestream(ap->ip)) 2821 + args.type = XFS_ALLOCTYPE_FIRST_AG; 2822 + else 2823 + args.type = XFS_ALLOCTYPE_START_BNO; 2826 2824 args.total = args.minlen = ap->minlen; 2827 2825 } else { 2828 2826 args.type = XFS_ALLOCTYPE_NEAR_BNO; 2829 2827 args.total = ap->total; 2830 2828 args.minlen = ap->minlen; 2831 2829 } 2832 - if (unlikely(ap->userdata && ap->ip->i_d.di_extsize && 2833 - (ap->ip->i_d.di_flags & XFS_DIFLAG_EXTSIZE))) { 2834 - args.prod = ap->ip->i_d.di_extsize; 2830 + /* apply extent size hints if obtained earlier */ 2831 + if (unlikely(align)) { 2832 + args.prod = align; 2835 2833 if ((args.mod = (xfs_extlen_t)do_mod(ap->off, args.prod))) 2836 2834 args.mod = (xfs_extlen_t)(args.prod - args.mod); 2837 2835 } else if (mp->m_sb.sb_blocksize >= NBPP) { ··· 3073 3051 xfs_bmbt_rec_t *ep; /* current extent entry pointer */ 3074 3052 int error; /* error return value */ 3075 3053 int flags; /* inode logging flags */ 3076 - #ifdef XFS_BMAP_TRACE 3077 - static char fname[] = "xfs_bmap_del_extent"; 3078 - #endif 3079 3054 xfs_bmbt_irec_t got; /* current extent entry */ 3080 3055 xfs_fileoff_t got_endoff; /* first offset past got */ 3081 3056 int i; /* temp state */ ··· 3166 3147 /* 3167 3148 * Matches the whole extent. Delete the entry. 3168 3149 */ 3169 - xfs_bmap_trace_delete(fname, "3", ip, idx, 1, whichfork); 3150 + XFS_BMAP_TRACE_DELETE("3", ip, idx, 1, whichfork); 3170 3151 xfs_iext_remove(ifp, idx, 1); 3171 3152 ifp->if_lastex = idx; 3172 3153 if (delay) ··· 3187 3168 /* 3188 3169 * Deleting the first part of the extent. 3189 3170 */ 3190 - xfs_bmap_trace_pre_update(fname, "2", ip, idx, whichfork); 3171 + XFS_BMAP_TRACE_PRE_UPDATE("2", ip, idx, whichfork); 3191 3172 xfs_bmbt_set_startoff(ep, del_endoff); 3192 3173 temp = got.br_blockcount - del->br_blockcount; 3193 3174 xfs_bmbt_set_blockcount(ep, temp); ··· 3196 3177 temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp), 3197 3178 da_old); 3198 3179 xfs_bmbt_set_startblock(ep, NULLSTARTBLOCK((int)temp)); 3199 - xfs_bmap_trace_post_update(fname, "2", ip, idx, 3180 + XFS_BMAP_TRACE_POST_UPDATE("2", ip, idx, 3200 3181 whichfork); 3201 3182 da_new = temp; 3202 3183 break; 3203 3184 } 3204 3185 xfs_bmbt_set_startblock(ep, del_endblock); 3205 - xfs_bmap_trace_post_update(fname, "2", ip, idx, whichfork); 3186 + XFS_BMAP_TRACE_POST_UPDATE("2", ip, idx, whichfork); 3206 3187 if (!cur) { 3207 3188 flags |= XFS_ILOG_FEXT(whichfork); 3208 3189 break; ··· 3218 3199 * Deleting the last part of the extent. 3219 3200 */ 3220 3201 temp = got.br_blockcount - del->br_blockcount; 3221 - xfs_bmap_trace_pre_update(fname, "1", ip, idx, whichfork); 3202 + XFS_BMAP_TRACE_PRE_UPDATE("1", ip, idx, whichfork); 3222 3203 xfs_bmbt_set_blockcount(ep, temp); 3223 3204 ifp->if_lastex = idx; 3224 3205 if (delay) { 3225 3206 temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp), 3226 3207 da_old); 3227 3208 xfs_bmbt_set_startblock(ep, NULLSTARTBLOCK((int)temp)); 3228 - xfs_bmap_trace_post_update(fname, "1", ip, idx, 3209 + XFS_BMAP_TRACE_POST_UPDATE("1", ip, idx, 3229 3210 whichfork); 3230 3211 da_new = temp; 3231 3212 break; 3232 3213 } 3233 - xfs_bmap_trace_post_update(fname, "1", ip, idx, whichfork); 3214 + XFS_BMAP_TRACE_POST_UPDATE("1", ip, idx, whichfork); 3234 3215 if (!cur) { 3235 3216 flags |= XFS_ILOG_FEXT(whichfork); 3236 3217 break; ··· 3247 3228 * Deleting the middle of the extent. 3248 3229 */ 3249 3230 temp = del->br_startoff - got.br_startoff; 3250 - xfs_bmap_trace_pre_update(fname, "0", ip, idx, whichfork); 3231 + XFS_BMAP_TRACE_PRE_UPDATE("0", ip, idx, whichfork); 3251 3232 xfs_bmbt_set_blockcount(ep, temp); 3252 3233 new.br_startoff = del_endoff; 3253 3234 temp2 = got_endoff - del_endoff; ··· 3334 3315 } 3335 3316 } 3336 3317 } 3337 - xfs_bmap_trace_post_update(fname, "0", ip, idx, whichfork); 3338 - xfs_bmap_trace_insert(fname, "0", ip, idx + 1, 1, &new, NULL, 3318 + XFS_BMAP_TRACE_POST_UPDATE("0", ip, idx, whichfork); 3319 + XFS_BMAP_TRACE_INSERT("0", ip, idx + 1, 1, &new, NULL, 3339 3320 whichfork); 3340 3321 xfs_iext_insert(ifp, idx + 1, 1, &new); 3341 3322 ifp->if_lastex = idx + 1; ··· 3575 3556 { 3576 3557 int error; /* error return value */ 3577 3558 int flags; /* logging flags returned */ 3578 - #ifdef XFS_BMAP_TRACE 3579 - static char fname[] = "xfs_bmap_local_to_extents"; 3580 - #endif 3581 3559 xfs_ifork_t *ifp; /* inode fork pointer */ 3582 3560 3583 3561 /* ··· 3629 3613 xfs_iext_add(ifp, 0, 1); 3630 3614 ep = xfs_iext_get_ext(ifp, 0); 3631 3615 xfs_bmbt_set_allf(ep, 0, args.fsbno, 1, XFS_EXT_NORM); 3632 - xfs_bmap_trace_post_update(fname, "new", ip, 0, whichfork); 3616 + XFS_BMAP_TRACE_POST_UPDATE("new", ip, 0, whichfork); 3633 3617 XFS_IFORK_NEXT_SET(ip, whichfork, 1); 3634 3618 ip->i_d.di_nblocks = 1; 3635 3619 XFS_TRANS_MOD_DQUOT_BYINO(args.mp, tp, ip, ··· 3752 3736 STATIC void 3753 3737 xfs_bmap_trace_addentry( 3754 3738 int opcode, /* operation */ 3755 - char *fname, /* function name */ 3739 + const char *fname, /* function name */ 3756 3740 char *desc, /* operation description */ 3757 3741 xfs_inode_t *ip, /* incore inode pointer */ 3758 3742 xfs_extnum_t idx, /* index of entry(ies) */ ··· 3811 3795 */ 3812 3796 STATIC void 3813 3797 xfs_bmap_trace_delete( 3814 - char *fname, /* function name */ 3798 + const char *fname, /* function name */ 3815 3799 char *desc, /* operation description */ 3816 3800 xfs_inode_t *ip, /* incore inode pointer */ 3817 3801 xfs_extnum_t idx, /* index of entry(entries) deleted */ ··· 3833 3817 */ 3834 3818 STATIC void 3835 3819 xfs_bmap_trace_insert( 3836 - char *fname, /* function name */ 3820 + const char *fname, /* function name */ 3837 3821 char *desc, /* operation description */ 3838 3822 xfs_inode_t *ip, /* incore inode pointer */ 3839 3823 xfs_extnum_t idx, /* index of entry(entries) inserted */ ··· 3862 3846 */ 3863 3847 STATIC void 3864 3848 xfs_bmap_trace_post_update( 3865 - char *fname, /* function name */ 3849 + const char *fname, /* function name */ 3866 3850 char *desc, /* operation description */ 3867 3851 xfs_inode_t *ip, /* incore inode pointer */ 3868 3852 xfs_extnum_t idx, /* index of entry updated */ ··· 3880 3864 */ 3881 3865 STATIC void 3882 3866 xfs_bmap_trace_pre_update( 3883 - char *fname, /* function name */ 3867 + const char *fname, /* function name */ 3884 3868 char *desc, /* operation description */ 3885 3869 xfs_inode_t *ip, /* incore inode pointer */ 3886 3870 xfs_extnum_t idx, /* index of entry to be updated */ ··· 4497 4481 xfs_buf_t *bp; /* buffer for "block" */ 4498 4482 int error; /* error return value */ 4499 4483 xfs_exntfmt_t exntf; /* XFS_EXTFMT_NOSTATE, if checking */ 4500 - #ifdef XFS_BMAP_TRACE 4501 - static char fname[] = "xfs_bmap_read_extents"; 4502 - #endif 4503 4484 xfs_extnum_t i, j; /* index into the extents list */ 4504 4485 xfs_ifork_t *ifp; /* fork structure */ 4505 4486 int level; /* btree level, for checking */ ··· 4613 4600 } 4614 4601 ASSERT(i == (ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t))); 4615 4602 ASSERT(i == XFS_IFORK_NEXTENTS(ip, whichfork)); 4616 - xfs_bmap_trace_exlist(fname, ip, i, whichfork); 4603 + XFS_BMAP_TRACE_EXLIST(ip, i, whichfork); 4617 4604 return 0; 4618 4605 error0: 4619 4606 xfs_trans_brelse(tp, bp); ··· 4626 4613 */ 4627 4614 void 4628 4615 xfs_bmap_trace_exlist( 4629 - char *fname, /* function name */ 4616 + const char *fname, /* function name */ 4630 4617 xfs_inode_t *ip, /* incore inode pointer */ 4631 4618 xfs_extnum_t cnt, /* count of entries in the list */ 4632 4619 int whichfork) /* data or attr fork */ ··· 4641 4628 for (idx = 0; idx < cnt; idx++) { 4642 4629 ep = xfs_iext_get_ext(ifp, idx); 4643 4630 xfs_bmbt_get_all(ep, &s); 4644 - xfs_bmap_trace_insert(fname, "exlist", ip, idx, 1, &s, NULL, 4631 + XFS_BMAP_TRACE_INSERT("exlist", ip, idx, 1, &s, NULL, 4645 4632 whichfork); 4646 4633 } 4647 4634 } ··· 4881 4868 xfs_extlen_t extsz; 4882 4869 4883 4870 /* Figure out the extent size, adjust alen */ 4884 - if (rt) { 4885 - if (!(extsz = ip->i_d.di_extsize)) 4886 - extsz = mp->m_sb.sb_rextsize; 4887 - } else { 4888 - extsz = ip->i_d.di_extsize; 4889 - } 4871 + extsz = xfs_get_extsz_hint(ip); 4890 4872 if (extsz) { 4891 4873 error = xfs_bmap_extsize_align(mp, 4892 4874 &got, &prev, extsz, ··· 5227 5219 * Else go on to the next record. 5228 5220 */ 5229 5221 ep = xfs_iext_get_ext(ifp, ++lastx); 5230 - if (lastx >= nextents) { 5222 + prev = got; 5223 + if (lastx >= nextents) 5231 5224 eof = 1; 5232 - prev = got; 5233 - } else 5225 + else 5234 5226 xfs_bmbt_get_all(ep, &got); 5235 5227 } 5236 5228 ifp->if_lastex = lastx; ··· 5821 5813 ip->i_d.di_format != XFS_DINODE_FMT_LOCAL) 5822 5814 return XFS_ERROR(EINVAL); 5823 5815 if (whichfork == XFS_DATA_FORK) { 5824 - if ((ip->i_d.di_extsize && (ip->i_d.di_flags & 5825 - (XFS_DIFLAG_REALTIME|XFS_DIFLAG_EXTSIZE))) || 5816 + if (xfs_get_extsz_hint(ip) || 5826 5817 ip->i_d.di_flags & (XFS_DIFLAG_PREALLOC|XFS_DIFLAG_APPEND)){ 5827 5818 prealloced = 1; 5828 5819 fixlen = XFS_MAXIOFFSET(mp);
+4 -2
fs/xfs/xfs_bmap.h
··· 144 144 */ 145 145 void 146 146 xfs_bmap_trace_exlist( 147 - char *fname, /* function name */ 147 + const char *fname, /* function name */ 148 148 struct xfs_inode *ip, /* incore inode pointer */ 149 149 xfs_extnum_t cnt, /* count of entries in list */ 150 150 int whichfork); /* data or attr fork */ 151 + #define XFS_BMAP_TRACE_EXLIST(ip,c,w) \ 152 + xfs_bmap_trace_exlist(__FUNCTION__,ip,c,w) 151 153 #else 152 - #define xfs_bmap_trace_exlist(f,ip,c,w) 154 + #define XFS_BMAP_TRACE_EXLIST(ip,c,w) 153 155 #endif 154 156 155 157 /*
+17 -71
fs/xfs/xfs_bmap_btree.c
··· 76 76 */ 77 77 STATIC void 78 78 xfs_bmbt_trace_enter( 79 - char *func, 79 + const char *func, 80 80 xfs_btree_cur_t *cur, 81 81 char *s, 82 82 int type, ··· 117 117 */ 118 118 STATIC void 119 119 xfs_bmbt_trace_argbi( 120 - char *func, 120 + const char *func, 121 121 xfs_btree_cur_t *cur, 122 122 xfs_buf_t *b, 123 123 int i, ··· 134 134 */ 135 135 STATIC void 136 136 xfs_bmbt_trace_argbii( 137 - char *func, 137 + const char *func, 138 138 xfs_btree_cur_t *cur, 139 139 xfs_buf_t *b, 140 140 int i0, ··· 153 153 */ 154 154 STATIC void 155 155 xfs_bmbt_trace_argfffi( 156 - char *func, 156 + const char *func, 157 157 xfs_btree_cur_t *cur, 158 158 xfs_dfiloff_t o, 159 159 xfs_dfsbno_t b, ··· 172 172 */ 173 173 STATIC void 174 174 xfs_bmbt_trace_argi( 175 - char *func, 175 + const char *func, 176 176 xfs_btree_cur_t *cur, 177 177 int i, 178 178 int line) ··· 188 188 */ 189 189 STATIC void 190 190 xfs_bmbt_trace_argifk( 191 - char *func, 191 + const char *func, 192 192 xfs_btree_cur_t *cur, 193 193 int i, 194 194 xfs_fsblock_t f, ··· 206 206 */ 207 207 STATIC void 208 208 xfs_bmbt_trace_argifr( 209 - char *func, 209 + const char *func, 210 210 xfs_btree_cur_t *cur, 211 211 int i, 212 212 xfs_fsblock_t f, ··· 235 235 */ 236 236 STATIC void 237 237 xfs_bmbt_trace_argik( 238 - char *func, 238 + const char *func, 239 239 xfs_btree_cur_t *cur, 240 240 int i, 241 241 xfs_bmbt_key_t *k, ··· 255 255 */ 256 256 STATIC void 257 257 xfs_bmbt_trace_cursor( 258 - char *func, 258 + const char *func, 259 259 xfs_btree_cur_t *cur, 260 260 char *s, 261 261 int line) ··· 274 274 } 275 275 276 276 #define XFS_BMBT_TRACE_ARGBI(c,b,i) \ 277 - xfs_bmbt_trace_argbi(fname, c, b, i, __LINE__) 277 + xfs_bmbt_trace_argbi(__FUNCTION__, c, b, i, __LINE__) 278 278 #define XFS_BMBT_TRACE_ARGBII(c,b,i,j) \ 279 - xfs_bmbt_trace_argbii(fname, c, b, i, j, __LINE__) 279 + xfs_bmbt_trace_argbii(__FUNCTION__, c, b, i, j, __LINE__) 280 280 #define XFS_BMBT_TRACE_ARGFFFI(c,o,b,i,j) \ 281 - xfs_bmbt_trace_argfffi(fname, c, o, b, i, j, __LINE__) 281 + xfs_bmbt_trace_argfffi(__FUNCTION__, c, o, b, i, j, __LINE__) 282 282 #define XFS_BMBT_TRACE_ARGI(c,i) \ 283 - xfs_bmbt_trace_argi(fname, c, i, __LINE__) 283 + xfs_bmbt_trace_argi(__FUNCTION__, c, i, __LINE__) 284 284 #define XFS_BMBT_TRACE_ARGIFK(c,i,f,s) \ 285 - xfs_bmbt_trace_argifk(fname, c, i, f, s, __LINE__) 285 + xfs_bmbt_trace_argifk(__FUNCTION__, c, i, f, s, __LINE__) 286 286 #define XFS_BMBT_TRACE_ARGIFR(c,i,f,r) \ 287 - xfs_bmbt_trace_argifr(fname, c, i, f, r, __LINE__) 287 + xfs_bmbt_trace_argifr(__FUNCTION__, c, i, f, r, __LINE__) 288 288 #define XFS_BMBT_TRACE_ARGIK(c,i,k) \ 289 - xfs_bmbt_trace_argik(fname, c, i, k, __LINE__) 289 + xfs_bmbt_trace_argik(__FUNCTION__, c, i, k, __LINE__) 290 290 #define XFS_BMBT_TRACE_CURSOR(c,s) \ 291 - xfs_bmbt_trace_cursor(fname, c, s, __LINE__) 291 + xfs_bmbt_trace_cursor(__FUNCTION__, c, s, __LINE__) 292 292 #else 293 293 #define XFS_BMBT_TRACE_ARGBI(c,b,i) 294 294 #define XFS_BMBT_TRACE_ARGBII(c,b,i,j) ··· 318 318 xfs_fsblock_t bno; /* fs-relative block number */ 319 319 xfs_buf_t *bp; /* buffer for block */ 320 320 int error; /* error return value */ 321 - #ifdef XFS_BMBT_TRACE 322 - static char fname[] = "xfs_bmbt_delrec"; 323 - #endif 324 321 int i; /* loop counter */ 325 322 int j; /* temp state */ 326 323 xfs_bmbt_key_t key; /* bmap btree key */ ··· 691 694 xfs_bmbt_block_t *block; /* bmap btree block */ 692 695 xfs_buf_t *bp; /* buffer for block */ 693 696 int error; /* error return value */ 694 - #ifdef XFS_BMBT_TRACE 695 - static char fname[] = "xfs_bmbt_insrec"; 696 - #endif 697 697 int i; /* loop index */ 698 698 xfs_bmbt_key_t key; /* bmap btree key */ 699 699 xfs_bmbt_key_t *kp=NULL; /* pointer to bmap btree key */ ··· 875 881 #ifdef DEBUG 876 882 int error; 877 883 #endif 878 - #ifdef XFS_BMBT_TRACE 879 - static char fname[] = "xfs_bmbt_killroot"; 880 - #endif 881 884 int i; 882 885 xfs_bmbt_key_t *kp; 883 886 xfs_inode_t *ip; ··· 964 973 int kfirst, 965 974 int klast) 966 975 { 967 - #ifdef XFS_BMBT_TRACE 968 - static char fname[] = "xfs_bmbt_log_keys"; 969 - #endif 970 976 xfs_trans_t *tp; 971 977 972 978 XFS_BMBT_TRACE_CURSOR(cur, ENTRY); ··· 1000 1012 int pfirst, 1001 1013 int plast) 1002 1014 { 1003 - #ifdef XFS_BMBT_TRACE 1004 - static char fname[] = "xfs_bmbt_log_ptrs"; 1005 - #endif 1006 1015 xfs_trans_t *tp; 1007 1016 1008 1017 XFS_BMBT_TRACE_CURSOR(cur, ENTRY); ··· 1040 1055 xfs_daddr_t d; 1041 1056 xfs_sfiloff_t diff; 1042 1057 int error; /* error return value */ 1043 - #ifdef XFS_BMBT_TRACE 1044 - static char fname[] = "xfs_bmbt_lookup"; 1045 - #endif 1046 1058 xfs_fsblock_t fsbno=0; 1047 1059 int high; 1048 1060 int i; ··· 1177 1195 int *stat) /* success/failure */ 1178 1196 { 1179 1197 int error; /* error return value */ 1180 - #ifdef XFS_BMBT_TRACE 1181 - static char fname[] = "xfs_bmbt_lshift"; 1182 - #endif 1183 1198 #ifdef DEBUG 1184 1199 int i; /* loop counter */ 1185 1200 #endif ··· 1310 1331 int *stat) /* success/failure */ 1311 1332 { 1312 1333 int error; /* error return value */ 1313 - #ifdef XFS_BMBT_TRACE 1314 - static char fname[] = "xfs_bmbt_rshift"; 1315 - #endif 1316 1334 int i; /* loop counter */ 1317 1335 xfs_bmbt_key_t key; /* bmap btree key */ 1318 1336 xfs_buf_t *lbp; /* left buffer pointer */ ··· 1468 1492 { 1469 1493 xfs_alloc_arg_t args; /* block allocation args */ 1470 1494 int error; /* error return value */ 1471 - #ifdef XFS_BMBT_TRACE 1472 - static char fname[] = "xfs_bmbt_split"; 1473 - #endif 1474 1495 int i; /* loop counter */ 1475 1496 xfs_fsblock_t lbno; /* left sibling block number */ 1476 1497 xfs_buf_t *lbp; /* left buffer pointer */ ··· 1614 1641 #ifdef DEBUG 1615 1642 int error; 1616 1643 #endif 1617 - #ifdef XFS_BMBT_TRACE 1618 - static char fname[] = "xfs_bmbt_updkey"; 1619 - #endif 1620 1644 xfs_bmbt_key_t *kp; 1621 1645 int ptr; 1622 1646 ··· 1682 1712 xfs_bmbt_block_t *block; 1683 1713 xfs_buf_t *bp; 1684 1714 int error; /* error return value */ 1685 - #ifdef XFS_BMBT_TRACE 1686 - static char fname[] = "xfs_bmbt_decrement"; 1687 - #endif 1688 1715 xfs_fsblock_t fsbno; 1689 1716 int lev; 1690 1717 xfs_mount_t *mp; ··· 1752 1785 int *stat) /* success/failure */ 1753 1786 { 1754 1787 int error; /* error return value */ 1755 - #ifdef XFS_BMBT_TRACE 1756 - static char fname[] = "xfs_bmbt_delete"; 1757 - #endif 1758 1788 int i; 1759 1789 int level; 1760 1790 ··· 1964 2000 xfs_bmbt_block_t *block; 1965 2001 xfs_buf_t *bp; 1966 2002 int error; /* error return value */ 1967 - #ifdef XFS_BMBT_TRACE 1968 - static char fname[] = "xfs_bmbt_increment"; 1969 - #endif 1970 2003 xfs_fsblock_t fsbno; 1971 2004 int lev; 1972 2005 xfs_mount_t *mp; ··· 2041 2080 int *stat) /* success/failure */ 2042 2081 { 2043 2082 int error; /* error return value */ 2044 - #ifdef XFS_BMBT_TRACE 2045 - static char fname[] = "xfs_bmbt_insert"; 2046 - #endif 2047 2083 int i; 2048 2084 int level; 2049 2085 xfs_fsblock_t nbno; ··· 2100 2142 int fields) 2101 2143 { 2102 2144 int first; 2103 - #ifdef XFS_BMBT_TRACE 2104 - static char fname[] = "xfs_bmbt_log_block"; 2105 - #endif 2106 2145 int last; 2107 2146 xfs_trans_t *tp; 2108 2147 static const short offsets[] = { ··· 2136 2181 { 2137 2182 xfs_bmbt_block_t *block; 2138 2183 int first; 2139 - #ifdef XFS_BMBT_TRACE 2140 - static char fname[] = "xfs_bmbt_log_recs"; 2141 - #endif 2142 2184 int last; 2143 2185 xfs_bmbt_rec_t *rp; 2144 2186 xfs_trans_t *tp; ··· 2197 2245 xfs_bmbt_key_t *ckp; /* child key pointer */ 2198 2246 xfs_bmbt_ptr_t *cpp; /* child ptr pointer */ 2199 2247 int error; /* error return code */ 2200 - #ifdef XFS_BMBT_TRACE 2201 - static char fname[] = "xfs_bmbt_newroot"; 2202 - #endif 2203 2248 #ifdef DEBUG 2204 2249 int i; /* loop counter */ 2205 2250 #endif ··· 2579 2630 xfs_bmbt_block_t *block; 2580 2631 xfs_buf_t *bp; 2581 2632 int error; 2582 - #ifdef XFS_BMBT_TRACE 2583 - static char fname[] = "xfs_bmbt_update"; 2584 - #endif 2585 2633 xfs_bmbt_key_t key; 2586 2634 int ptr; 2587 2635 xfs_bmbt_rec_t *rp;
+8 -24
fs/xfs/xfs_btree.h
··· 444 444 /* 445 445 * Min and max functions for extlen, agblock, fileoff, and filblks types. 446 446 */ 447 - #define XFS_EXTLEN_MIN(a,b) \ 448 - ((xfs_extlen_t)(a) < (xfs_extlen_t)(b) ? \ 449 - (xfs_extlen_t)(a) : (xfs_extlen_t)(b)) 450 - #define XFS_EXTLEN_MAX(a,b) \ 451 - ((xfs_extlen_t)(a) > (xfs_extlen_t)(b) ? \ 452 - (xfs_extlen_t)(a) : (xfs_extlen_t)(b)) 453 - #define XFS_AGBLOCK_MIN(a,b) \ 454 - ((xfs_agblock_t)(a) < (xfs_agblock_t)(b) ? \ 455 - (xfs_agblock_t)(a) : (xfs_agblock_t)(b)) 456 - #define XFS_AGBLOCK_MAX(a,b) \ 457 - ((xfs_agblock_t)(a) > (xfs_agblock_t)(b) ? \ 458 - (xfs_agblock_t)(a) : (xfs_agblock_t)(b)) 459 - #define XFS_FILEOFF_MIN(a,b) \ 460 - ((xfs_fileoff_t)(a) < (xfs_fileoff_t)(b) ? \ 461 - (xfs_fileoff_t)(a) : (xfs_fileoff_t)(b)) 462 - #define XFS_FILEOFF_MAX(a,b) \ 463 - ((xfs_fileoff_t)(a) > (xfs_fileoff_t)(b) ? \ 464 - (xfs_fileoff_t)(a) : (xfs_fileoff_t)(b)) 465 - #define XFS_FILBLKS_MIN(a,b) \ 466 - ((xfs_filblks_t)(a) < (xfs_filblks_t)(b) ? \ 467 - (xfs_filblks_t)(a) : (xfs_filblks_t)(b)) 468 - #define XFS_FILBLKS_MAX(a,b) \ 469 - ((xfs_filblks_t)(a) > (xfs_filblks_t)(b) ? \ 470 - (xfs_filblks_t)(a) : (xfs_filblks_t)(b)) 447 + #define XFS_EXTLEN_MIN(a,b) min_t(xfs_extlen_t, (a), (b)) 448 + #define XFS_EXTLEN_MAX(a,b) max_t(xfs_extlen_t, (a), (b)) 449 + #define XFS_AGBLOCK_MIN(a,b) min_t(xfs_agblock_t, (a), (b)) 450 + #define XFS_AGBLOCK_MAX(a,b) max_t(xfs_agblock_t, (a), (b)) 451 + #define XFS_FILEOFF_MIN(a,b) min_t(xfs_fileoff_t, (a), (b)) 452 + #define XFS_FILEOFF_MAX(a,b) max_t(xfs_fileoff_t, (a), (b)) 453 + #define XFS_FILBLKS_MIN(a,b) min_t(xfs_filblks_t, (a), (b)) 454 + #define XFS_FILBLKS_MAX(a,b) max_t(xfs_filblks_t, (a), (b)) 471 455 472 456 #define XFS_FSB_SANITY_CHECK(mp,fsb) \ 473 457 (XFS_FSB_TO_AGNO(mp, fsb) < mp->m_sb.sb_agcount && \
+2 -2
fs/xfs/xfs_buf_item.c
··· 580 580 * If the buf item isn't tracking any data, free it. 581 581 * Otherwise, if XFS_BLI_HOLD is set clear it. 582 582 */ 583 - if (xfs_count_bits(bip->bli_format.blf_data_map, 584 - bip->bli_format.blf_map_size, 0) == 0) { 583 + if (xfs_bitmap_empty(bip->bli_format.blf_data_map, 584 + bip->bli_format.blf_map_size)) { 585 585 xfs_buf_item_relse(bp); 586 586 } else if (hold) { 587 587 bip->bli_flags &= ~XFS_BLI_HOLD;
+2
fs/xfs/xfs_clnt.h
··· 99 99 */ 100 100 #define XFSMNT2_COMPAT_IOSIZE 0x00000001 /* don't report large preferred 101 101 * I/O size in stat(2) */ 102 + #define XFSMNT2_FILESTREAMS 0x00000002 /* enable the filestreams 103 + * allocator */ 102 104 103 105 #endif /* __XFS_CLNT_H__ */
+3 -1
fs/xfs/xfs_dinode.h
··· 257 257 #define XFS_DIFLAG_EXTSIZE_BIT 11 /* inode extent size allocator hint */ 258 258 #define XFS_DIFLAG_EXTSZINHERIT_BIT 12 /* inherit inode extent size */ 259 259 #define XFS_DIFLAG_NODEFRAG_BIT 13 /* do not reorganize/defragment */ 260 + #define XFS_DIFLAG_FILESTREAM_BIT 14 /* use filestream allocator */ 260 261 #define XFS_DIFLAG_REALTIME (1 << XFS_DIFLAG_REALTIME_BIT) 261 262 #define XFS_DIFLAG_PREALLOC (1 << XFS_DIFLAG_PREALLOC_BIT) 262 263 #define XFS_DIFLAG_NEWRTBM (1 << XFS_DIFLAG_NEWRTBM_BIT) ··· 272 271 #define XFS_DIFLAG_EXTSIZE (1 << XFS_DIFLAG_EXTSIZE_BIT) 273 272 #define XFS_DIFLAG_EXTSZINHERIT (1 << XFS_DIFLAG_EXTSZINHERIT_BIT) 274 273 #define XFS_DIFLAG_NODEFRAG (1 << XFS_DIFLAG_NODEFRAG_BIT) 274 + #define XFS_DIFLAG_FILESTREAM (1 << XFS_DIFLAG_FILESTREAM_BIT) 275 275 276 276 #define XFS_DIFLAG_ANY \ 277 277 (XFS_DIFLAG_REALTIME | XFS_DIFLAG_PREALLOC | XFS_DIFLAG_NEWRTBM | \ 278 278 XFS_DIFLAG_IMMUTABLE | XFS_DIFLAG_APPEND | XFS_DIFLAG_SYNC | \ 279 279 XFS_DIFLAG_NOATIME | XFS_DIFLAG_NODUMP | XFS_DIFLAG_RTINHERIT | \ 280 280 XFS_DIFLAG_PROJINHERIT | XFS_DIFLAG_NOSYMLINKS | XFS_DIFLAG_EXTSIZE | \ 281 - XFS_DIFLAG_EXTSZINHERIT | XFS_DIFLAG_NODEFRAG) 281 + XFS_DIFLAG_EXTSZINHERIT | XFS_DIFLAG_NODEFRAG | XFS_DIFLAG_FILESTREAM) 282 282 283 283 #endif /* __XFS_DINODE_H__ */
+6 -6
fs/xfs/xfs_dir2.c
··· 55 55 XFS_MAX_BLOCKSIZE); 56 56 mp->m_dirblksize = 1 << (mp->m_sb.sb_blocklog + mp->m_sb.sb_dirblklog); 57 57 mp->m_dirblkfsbs = 1 << mp->m_sb.sb_dirblklog; 58 - mp->m_dirdatablk = XFS_DIR2_DB_TO_DA(mp, XFS_DIR2_DATA_FIRSTDB(mp)); 59 - mp->m_dirleafblk = XFS_DIR2_DB_TO_DA(mp, XFS_DIR2_LEAF_FIRSTDB(mp)); 60 - mp->m_dirfreeblk = XFS_DIR2_DB_TO_DA(mp, XFS_DIR2_FREE_FIRSTDB(mp)); 58 + mp->m_dirdatablk = xfs_dir2_db_to_da(mp, XFS_DIR2_DATA_FIRSTDB(mp)); 59 + mp->m_dirleafblk = xfs_dir2_db_to_da(mp, XFS_DIR2_LEAF_FIRSTDB(mp)); 60 + mp->m_dirfreeblk = xfs_dir2_db_to_da(mp, XFS_DIR2_FREE_FIRSTDB(mp)); 61 61 mp->m_attr_node_ents = 62 62 (mp->m_sb.sb_blocksize - (uint)sizeof(xfs_da_node_hdr_t)) / 63 63 (uint)sizeof(xfs_da_node_entry_t); ··· 554 554 */ 555 555 if (mapp != &map) 556 556 kmem_free(mapp, sizeof(*mapp) * count); 557 - *dbp = XFS_DIR2_DA_TO_DB(mp, (xfs_dablk_t)bno); 557 + *dbp = xfs_dir2_da_to_db(mp, (xfs_dablk_t)bno); 558 558 /* 559 559 * Update file's size if this is the data space and it grew. 560 560 */ ··· 706 706 dp = args->dp; 707 707 mp = dp->i_mount; 708 708 tp = args->trans; 709 - da = XFS_DIR2_DB_TO_DA(mp, db); 709 + da = xfs_dir2_db_to_da(mp, db); 710 710 /* 711 711 * Unmap the fsblock(s). 712 712 */ ··· 742 742 /* 743 743 * If the block isn't the last one in the directory, we're done. 744 744 */ 745 - if (dp->i_d.di_size > XFS_DIR2_DB_OFF_TO_BYTE(mp, db + 1, 0)) 745 + if (dp->i_d.di_size > xfs_dir2_db_off_to_byte(mp, db + 1, 0)) 746 746 return 0; 747 747 bno = da; 748 748 if ((error = xfs_bmap_last_before(tp, dp, &bno, XFS_DATA_FORK))) {
+49 -49
fs/xfs/xfs_dir2_block.c
··· 115 115 xfs_da_brelse(tp, bp); 116 116 return XFS_ERROR(EFSCORRUPTED); 117 117 } 118 - len = XFS_DIR2_DATA_ENTSIZE(args->namelen); 118 + len = xfs_dir2_data_entsize(args->namelen); 119 119 /* 120 120 * Set up pointers to parts of the block. 121 121 */ 122 122 bf = block->hdr.bestfree; 123 - btp = XFS_DIR2_BLOCK_TAIL_P(mp, block); 124 - blp = XFS_DIR2_BLOCK_LEAF_P(btp); 123 + btp = xfs_dir2_block_tail_p(mp, block); 124 + blp = xfs_dir2_block_leaf_p(btp); 125 125 /* 126 126 * No stale entries? Need space for entry and new leaf. 127 127 */ ··· 396 396 * Fill in the leaf entry. 397 397 */ 398 398 blp[mid].hashval = cpu_to_be32(args->hashval); 399 - blp[mid].address = cpu_to_be32(XFS_DIR2_BYTE_TO_DATAPTR(mp, 399 + blp[mid].address = cpu_to_be32(xfs_dir2_byte_to_dataptr(mp, 400 400 (char *)dep - (char *)block)); 401 401 xfs_dir2_block_log_leaf(tp, bp, lfloglow, lfloghigh); 402 402 /* ··· 411 411 dep->inumber = cpu_to_be64(args->inumber); 412 412 dep->namelen = args->namelen; 413 413 memcpy(dep->name, args->name, args->namelen); 414 - tagp = XFS_DIR2_DATA_ENTRY_TAG_P(dep); 414 + tagp = xfs_dir2_data_entry_tag_p(dep); 415 415 *tagp = cpu_to_be16((char *)dep - (char *)block); 416 416 /* 417 417 * Clean up the bestfree array and log the header, tail, and entry. ··· 455 455 /* 456 456 * If the block number in the offset is out of range, we're done. 457 457 */ 458 - if (XFS_DIR2_DATAPTR_TO_DB(mp, uio->uio_offset) > mp->m_dirdatablk) { 458 + if (xfs_dir2_dataptr_to_db(mp, uio->uio_offset) > mp->m_dirdatablk) { 459 459 *eofp = 1; 460 460 return 0; 461 461 } ··· 471 471 * Extract the byte offset we start at from the seek pointer. 472 472 * We'll skip entries before this. 473 473 */ 474 - wantoff = XFS_DIR2_DATAPTR_TO_OFF(mp, uio->uio_offset); 474 + wantoff = xfs_dir2_dataptr_to_off(mp, uio->uio_offset); 475 475 block = bp->data; 476 476 xfs_dir2_data_check(dp, bp); 477 477 /* 478 478 * Set up values for the loop. 479 479 */ 480 - btp = XFS_DIR2_BLOCK_TAIL_P(mp, block); 480 + btp = xfs_dir2_block_tail_p(mp, block); 481 481 ptr = (char *)block->u; 482 - endptr = (char *)XFS_DIR2_BLOCK_LEAF_P(btp); 482 + endptr = (char *)xfs_dir2_block_leaf_p(btp); 483 483 p.dbp = dbp; 484 484 p.put = put; 485 485 p.uio = uio; ··· 502 502 /* 503 503 * Bump pointer for the next iteration. 504 504 */ 505 - ptr += XFS_DIR2_DATA_ENTSIZE(dep->namelen); 505 + ptr += xfs_dir2_data_entsize(dep->namelen); 506 506 /* 507 507 * The entry is before the desired starting point, skip it. 508 508 */ ··· 513 513 */ 514 514 p.namelen = dep->namelen; 515 515 516 - p.cook = XFS_DIR2_DB_OFF_TO_DATAPTR(mp, mp->m_dirdatablk, 516 + p.cook = xfs_dir2_db_off_to_dataptr(mp, mp->m_dirdatablk, 517 517 ptr - (char *)block); 518 518 p.ino = be64_to_cpu(dep->inumber); 519 519 #if XFS_BIG_INUMS ··· 531 531 */ 532 532 if (!p.done) { 533 533 uio->uio_offset = 534 - XFS_DIR2_DB_OFF_TO_DATAPTR(mp, mp->m_dirdatablk, 534 + xfs_dir2_db_off_to_dataptr(mp, mp->m_dirdatablk, 535 535 (char *)dep - (char *)block); 536 536 xfs_da_brelse(tp, bp); 537 537 return error; ··· 545 545 *eofp = 1; 546 546 547 547 uio->uio_offset = 548 - XFS_DIR2_DB_OFF_TO_DATAPTR(mp, mp->m_dirdatablk + 1, 0); 548 + xfs_dir2_db_off_to_dataptr(mp, mp->m_dirdatablk + 1, 0); 549 549 550 550 xfs_da_brelse(tp, bp); 551 551 ··· 569 569 570 570 mp = tp->t_mountp; 571 571 block = bp->data; 572 - btp = XFS_DIR2_BLOCK_TAIL_P(mp, block); 573 - blp = XFS_DIR2_BLOCK_LEAF_P(btp); 572 + btp = xfs_dir2_block_tail_p(mp, block); 573 + blp = xfs_dir2_block_leaf_p(btp); 574 574 xfs_da_log_buf(tp, bp, (uint)((char *)&blp[first] - (char *)block), 575 575 (uint)((char *)&blp[last + 1] - (char *)block - 1)); 576 576 } ··· 589 589 590 590 mp = tp->t_mountp; 591 591 block = bp->data; 592 - btp = XFS_DIR2_BLOCK_TAIL_P(mp, block); 592 + btp = xfs_dir2_block_tail_p(mp, block); 593 593 xfs_da_log_buf(tp, bp, (uint)((char *)btp - (char *)block), 594 594 (uint)((char *)(btp + 1) - (char *)block - 1)); 595 595 } ··· 623 623 mp = dp->i_mount; 624 624 block = bp->data; 625 625 xfs_dir2_data_check(dp, bp); 626 - btp = XFS_DIR2_BLOCK_TAIL_P(mp, block); 627 - blp = XFS_DIR2_BLOCK_LEAF_P(btp); 626 + btp = xfs_dir2_block_tail_p(mp, block); 627 + blp = xfs_dir2_block_leaf_p(btp); 628 628 /* 629 629 * Get the offset from the leaf entry, to point to the data. 630 630 */ 631 631 dep = (xfs_dir2_data_entry_t *) 632 - ((char *)block + XFS_DIR2_DATAPTR_TO_OFF(mp, be32_to_cpu(blp[ent].address))); 632 + ((char *)block + xfs_dir2_dataptr_to_off(mp, be32_to_cpu(blp[ent].address))); 633 633 /* 634 634 * Fill in inode number, release the block. 635 635 */ ··· 675 675 ASSERT(bp != NULL); 676 676 block = bp->data; 677 677 xfs_dir2_data_check(dp, bp); 678 - btp = XFS_DIR2_BLOCK_TAIL_P(mp, block); 679 - blp = XFS_DIR2_BLOCK_LEAF_P(btp); 678 + btp = xfs_dir2_block_tail_p(mp, block); 679 + blp = xfs_dir2_block_leaf_p(btp); 680 680 /* 681 681 * Loop doing a binary search for our hash value. 682 682 * Find our entry, ENOENT if it's not there. ··· 713 713 * Get pointer to the entry from the leaf. 714 714 */ 715 715 dep = (xfs_dir2_data_entry_t *) 716 - ((char *)block + XFS_DIR2_DATAPTR_TO_OFF(mp, addr)); 716 + ((char *)block + xfs_dir2_dataptr_to_off(mp, addr)); 717 717 /* 718 718 * Compare, if it's right give back buffer & entry number. 719 719 */ ··· 768 768 tp = args->trans; 769 769 mp = dp->i_mount; 770 770 block = bp->data; 771 - btp = XFS_DIR2_BLOCK_TAIL_P(mp, block); 772 - blp = XFS_DIR2_BLOCK_LEAF_P(btp); 771 + btp = xfs_dir2_block_tail_p(mp, block); 772 + blp = xfs_dir2_block_leaf_p(btp); 773 773 /* 774 774 * Point to the data entry using the leaf entry. 775 775 */ 776 776 dep = (xfs_dir2_data_entry_t *) 777 - ((char *)block + XFS_DIR2_DATAPTR_TO_OFF(mp, be32_to_cpu(blp[ent].address))); 777 + ((char *)block + xfs_dir2_dataptr_to_off(mp, be32_to_cpu(blp[ent].address))); 778 778 /* 779 779 * Mark the data entry's space free. 780 780 */ 781 781 needlog = needscan = 0; 782 782 xfs_dir2_data_make_free(tp, bp, 783 783 (xfs_dir2_data_aoff_t)((char *)dep - (char *)block), 784 - XFS_DIR2_DATA_ENTSIZE(dep->namelen), &needlog, &needscan); 784 + xfs_dir2_data_entsize(dep->namelen), &needlog, &needscan); 785 785 /* 786 786 * Fix up the block tail. 787 787 */ ··· 843 843 dp = args->dp; 844 844 mp = dp->i_mount; 845 845 block = bp->data; 846 - btp = XFS_DIR2_BLOCK_TAIL_P(mp, block); 847 - blp = XFS_DIR2_BLOCK_LEAF_P(btp); 846 + btp = xfs_dir2_block_tail_p(mp, block); 847 + blp = xfs_dir2_block_leaf_p(btp); 848 848 /* 849 849 * Point to the data entry we need to change. 850 850 */ 851 851 dep = (xfs_dir2_data_entry_t *) 852 - ((char *)block + XFS_DIR2_DATAPTR_TO_OFF(mp, be32_to_cpu(blp[ent].address))); 852 + ((char *)block + xfs_dir2_dataptr_to_off(mp, be32_to_cpu(blp[ent].address))); 853 853 ASSERT(be64_to_cpu(dep->inumber) != args->inumber); 854 854 /* 855 855 * Change the inode number to the new value. ··· 912 912 mp = dp->i_mount; 913 913 leaf = lbp->data; 914 914 ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR2_LEAF1_MAGIC); 915 - ltp = XFS_DIR2_LEAF_TAIL_P(mp, leaf); 915 + ltp = xfs_dir2_leaf_tail_p(mp, leaf); 916 916 /* 917 917 * If there are data blocks other than the first one, take this 918 918 * opportunity to remove trailing empty data blocks that may have ··· 920 920 * These will show up in the leaf bests table. 921 921 */ 922 922 while (dp->i_d.di_size > mp->m_dirblksize) { 923 - bestsp = XFS_DIR2_LEAF_BESTS_P(ltp); 923 + bestsp = xfs_dir2_leaf_bests_p(ltp); 924 924 if (be16_to_cpu(bestsp[be32_to_cpu(ltp->bestcount) - 1]) == 925 925 mp->m_dirblksize - (uint)sizeof(block->hdr)) { 926 926 if ((error = ··· 974 974 /* 975 975 * Initialize the block tail. 976 976 */ 977 - btp = XFS_DIR2_BLOCK_TAIL_P(mp, block); 977 + btp = xfs_dir2_block_tail_p(mp, block); 978 978 btp->count = cpu_to_be32(be16_to_cpu(leaf->hdr.count) - be16_to_cpu(leaf->hdr.stale)); 979 979 btp->stale = 0; 980 980 xfs_dir2_block_log_tail(tp, dbp); 981 981 /* 982 982 * Initialize the block leaf area. We compact out stale entries. 983 983 */ 984 - lep = XFS_DIR2_BLOCK_LEAF_P(btp); 984 + lep = xfs_dir2_block_leaf_p(btp); 985 985 for (from = to = 0; from < be16_to_cpu(leaf->hdr.count); from++) { 986 986 if (be32_to_cpu(leaf->ents[from].address) == XFS_DIR2_NULL_DATAPTR) 987 987 continue; ··· 1067 1067 ASSERT(dp->i_df.if_bytes == dp->i_d.di_size); 1068 1068 ASSERT(dp->i_df.if_u1.if_data != NULL); 1069 1069 sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data; 1070 - ASSERT(dp->i_d.di_size >= XFS_DIR2_SF_HDR_SIZE(sfp->hdr.i8count)); 1070 + ASSERT(dp->i_d.di_size >= xfs_dir2_sf_hdr_size(sfp->hdr.i8count)); 1071 1071 /* 1072 1072 * Copy the directory into the stack buffer. 1073 1073 * Then pitch the incore inode data so we can make extents. ··· 1119 1119 /* 1120 1120 * Fill in the tail. 1121 1121 */ 1122 - btp = XFS_DIR2_BLOCK_TAIL_P(mp, block); 1122 + btp = xfs_dir2_block_tail_p(mp, block); 1123 1123 btp->count = cpu_to_be32(sfp->hdr.count + 2); /* ., .. */ 1124 1124 btp->stale = 0; 1125 - blp = XFS_DIR2_BLOCK_LEAF_P(btp); 1125 + blp = xfs_dir2_block_leaf_p(btp); 1126 1126 endoffset = (uint)((char *)blp - (char *)block); 1127 1127 /* 1128 1128 * Remove the freespace, we'll manage it. ··· 1138 1138 dep->inumber = cpu_to_be64(dp->i_ino); 1139 1139 dep->namelen = 1; 1140 1140 dep->name[0] = '.'; 1141 - tagp = XFS_DIR2_DATA_ENTRY_TAG_P(dep); 1141 + tagp = xfs_dir2_data_entry_tag_p(dep); 1142 1142 *tagp = cpu_to_be16((char *)dep - (char *)block); 1143 1143 xfs_dir2_data_log_entry(tp, bp, dep); 1144 1144 blp[0].hashval = cpu_to_be32(xfs_dir_hash_dot); 1145 - blp[0].address = cpu_to_be32(XFS_DIR2_BYTE_TO_DATAPTR(mp, 1145 + blp[0].address = cpu_to_be32(xfs_dir2_byte_to_dataptr(mp, 1146 1146 (char *)dep - (char *)block)); 1147 1147 /* 1148 1148 * Create entry for .. 1149 1149 */ 1150 1150 dep = (xfs_dir2_data_entry_t *) 1151 1151 ((char *)block + XFS_DIR2_DATA_DOTDOT_OFFSET); 1152 - dep->inumber = cpu_to_be64(XFS_DIR2_SF_GET_INUMBER(sfp, &sfp->hdr.parent)); 1152 + dep->inumber = cpu_to_be64(xfs_dir2_sf_get_inumber(sfp, &sfp->hdr.parent)); 1153 1153 dep->namelen = 2; 1154 1154 dep->name[0] = dep->name[1] = '.'; 1155 - tagp = XFS_DIR2_DATA_ENTRY_TAG_P(dep); 1155 + tagp = xfs_dir2_data_entry_tag_p(dep); 1156 1156 *tagp = cpu_to_be16((char *)dep - (char *)block); 1157 1157 xfs_dir2_data_log_entry(tp, bp, dep); 1158 1158 blp[1].hashval = cpu_to_be32(xfs_dir_hash_dotdot); 1159 - blp[1].address = cpu_to_be32(XFS_DIR2_BYTE_TO_DATAPTR(mp, 1159 + blp[1].address = cpu_to_be32(xfs_dir2_byte_to_dataptr(mp, 1160 1160 (char *)dep - (char *)block)); 1161 1161 offset = XFS_DIR2_DATA_FIRST_OFFSET; 1162 1162 /* ··· 1165 1165 if ((i = 0) == sfp->hdr.count) 1166 1166 sfep = NULL; 1167 1167 else 1168 - sfep = XFS_DIR2_SF_FIRSTENTRY(sfp); 1168 + sfep = xfs_dir2_sf_firstentry(sfp); 1169 1169 /* 1170 1170 * Need to preserve the existing offset values in the sf directory. 1171 1171 * Insert holes (unused entries) where necessary. ··· 1177 1177 if (sfep == NULL) 1178 1178 newoffset = endoffset; 1179 1179 else 1180 - newoffset = XFS_DIR2_SF_GET_OFFSET(sfep); 1180 + newoffset = xfs_dir2_sf_get_offset(sfep); 1181 1181 /* 1182 1182 * There should be a hole here, make one. 1183 1183 */ ··· 1186 1186 ((char *)block + offset); 1187 1187 dup->freetag = cpu_to_be16(XFS_DIR2_DATA_FREE_TAG); 1188 1188 dup->length = cpu_to_be16(newoffset - offset); 1189 - *XFS_DIR2_DATA_UNUSED_TAG_P(dup) = cpu_to_be16( 1189 + *xfs_dir2_data_unused_tag_p(dup) = cpu_to_be16( 1190 1190 ((char *)dup - (char *)block)); 1191 1191 xfs_dir2_data_log_unused(tp, bp, dup); 1192 1192 (void)xfs_dir2_data_freeinsert((xfs_dir2_data_t *)block, ··· 1198 1198 * Copy a real entry. 1199 1199 */ 1200 1200 dep = (xfs_dir2_data_entry_t *)((char *)block + newoffset); 1201 - dep->inumber = cpu_to_be64(XFS_DIR2_SF_GET_INUMBER(sfp, 1202 - XFS_DIR2_SF_INUMBERP(sfep))); 1201 + dep->inumber = cpu_to_be64(xfs_dir2_sf_get_inumber(sfp, 1202 + xfs_dir2_sf_inumberp(sfep))); 1203 1203 dep->namelen = sfep->namelen; 1204 1204 memcpy(dep->name, sfep->name, dep->namelen); 1205 - tagp = XFS_DIR2_DATA_ENTRY_TAG_P(dep); 1205 + tagp = xfs_dir2_data_entry_tag_p(dep); 1206 1206 *tagp = cpu_to_be16((char *)dep - (char *)block); 1207 1207 xfs_dir2_data_log_entry(tp, bp, dep); 1208 1208 blp[2 + i].hashval = cpu_to_be32(xfs_da_hashname( 1209 1209 (char *)sfep->name, sfep->namelen)); 1210 - blp[2 + i].address = cpu_to_be32(XFS_DIR2_BYTE_TO_DATAPTR(mp, 1210 + blp[2 + i].address = cpu_to_be32(xfs_dir2_byte_to_dataptr(mp, 1211 1211 (char *)dep - (char *)block)); 1212 1212 offset = (int)((char *)(tagp + 1) - (char *)block); 1213 1213 if (++i == sfp->hdr.count) 1214 1214 sfep = NULL; 1215 1215 else 1216 - sfep = XFS_DIR2_SF_NEXTENTRY(sfp, sfep); 1216 + sfep = xfs_dir2_sf_nextentry(sfp, sfep); 1217 1217 } 1218 1218 /* Done with the temporary buffer */ 1219 1219 kmem_free(buf, buf_len);
-2
fs/xfs/xfs_dir2_block.h
··· 60 60 /* 61 61 * Pointer to the leaf header embedded in a data block (1-block format) 62 62 */ 63 - #define XFS_DIR2_BLOCK_TAIL_P(mp,block) xfs_dir2_block_tail_p(mp,block) 64 63 static inline xfs_dir2_block_tail_t * 65 64 xfs_dir2_block_tail_p(struct xfs_mount *mp, xfs_dir2_block_t *block) 66 65 { ··· 70 71 /* 71 72 * Pointer to the leaf entries embedded in a data block (1-block format) 72 73 */ 73 - #define XFS_DIR2_BLOCK_LEAF_P(btp) xfs_dir2_block_leaf_p(btp) 74 74 static inline struct xfs_dir2_leaf_entry * 75 75 xfs_dir2_block_leaf_p(xfs_dir2_block_tail_t *btp) 76 76 {
+27 -27
fs/xfs/xfs_dir2_data.c
··· 72 72 bf = d->hdr.bestfree; 73 73 p = (char *)d->u; 74 74 if (be32_to_cpu(d->hdr.magic) == XFS_DIR2_BLOCK_MAGIC) { 75 - btp = XFS_DIR2_BLOCK_TAIL_P(mp, (xfs_dir2_block_t *)d); 76 - lep = XFS_DIR2_BLOCK_LEAF_P(btp); 75 + btp = xfs_dir2_block_tail_p(mp, (xfs_dir2_block_t *)d); 76 + lep = xfs_dir2_block_leaf_p(btp); 77 77 endp = (char *)lep; 78 78 } else 79 79 endp = (char *)d + mp->m_dirblksize; ··· 107 107 */ 108 108 if (be16_to_cpu(dup->freetag) == XFS_DIR2_DATA_FREE_TAG) { 109 109 ASSERT(lastfree == 0); 110 - ASSERT(be16_to_cpu(*XFS_DIR2_DATA_UNUSED_TAG_P(dup)) == 110 + ASSERT(be16_to_cpu(*xfs_dir2_data_unused_tag_p(dup)) == 111 111 (char *)dup - (char *)d); 112 112 dfp = xfs_dir2_data_freefind(d, dup); 113 113 if (dfp) { ··· 131 131 dep = (xfs_dir2_data_entry_t *)p; 132 132 ASSERT(dep->namelen != 0); 133 133 ASSERT(xfs_dir_ino_validate(mp, be64_to_cpu(dep->inumber)) == 0); 134 - ASSERT(be16_to_cpu(*XFS_DIR2_DATA_ENTRY_TAG_P(dep)) == 134 + ASSERT(be16_to_cpu(*xfs_dir2_data_entry_tag_p(dep)) == 135 135 (char *)dep - (char *)d); 136 136 count++; 137 137 lastfree = 0; 138 138 if (be32_to_cpu(d->hdr.magic) == XFS_DIR2_BLOCK_MAGIC) { 139 - addr = XFS_DIR2_DB_OFF_TO_DATAPTR(mp, mp->m_dirdatablk, 139 + addr = xfs_dir2_db_off_to_dataptr(mp, mp->m_dirdatablk, 140 140 (xfs_dir2_data_aoff_t) 141 141 ((char *)dep - (char *)d)); 142 142 hash = xfs_da_hashname((char *)dep->name, dep->namelen); ··· 147 147 } 148 148 ASSERT(i < be32_to_cpu(btp->count)); 149 149 } 150 - p += XFS_DIR2_DATA_ENTSIZE(dep->namelen); 150 + p += xfs_dir2_data_entsize(dep->namelen); 151 151 } 152 152 /* 153 153 * Need to have seen all the entries and all the bestfree slots. ··· 346 346 */ 347 347 p = (char *)d->u; 348 348 if (be32_to_cpu(d->hdr.magic) == XFS_DIR2_BLOCK_MAGIC) { 349 - btp = XFS_DIR2_BLOCK_TAIL_P(mp, (xfs_dir2_block_t *)d); 350 - endp = (char *)XFS_DIR2_BLOCK_LEAF_P(btp); 349 + btp = xfs_dir2_block_tail_p(mp, (xfs_dir2_block_t *)d); 350 + endp = (char *)xfs_dir2_block_leaf_p(btp); 351 351 } else 352 352 endp = (char *)d + mp->m_dirblksize; 353 353 /* ··· 360 360 */ 361 361 if (be16_to_cpu(dup->freetag) == XFS_DIR2_DATA_FREE_TAG) { 362 362 ASSERT((char *)dup - (char *)d == 363 - be16_to_cpu(*XFS_DIR2_DATA_UNUSED_TAG_P(dup))); 363 + be16_to_cpu(*xfs_dir2_data_unused_tag_p(dup))); 364 364 xfs_dir2_data_freeinsert(d, dup, loghead); 365 365 p += be16_to_cpu(dup->length); 366 366 } ··· 370 370 else { 371 371 dep = (xfs_dir2_data_entry_t *)p; 372 372 ASSERT((char *)dep - (char *)d == 373 - be16_to_cpu(*XFS_DIR2_DATA_ENTRY_TAG_P(dep))); 374 - p += XFS_DIR2_DATA_ENTSIZE(dep->namelen); 373 + be16_to_cpu(*xfs_dir2_data_entry_tag_p(dep))); 374 + p += xfs_dir2_data_entsize(dep->namelen); 375 375 } 376 376 } 377 377 } ··· 402 402 /* 403 403 * Get the buffer set up for the block. 404 404 */ 405 - error = xfs_da_get_buf(tp, dp, XFS_DIR2_DB_TO_DA(mp, blkno), -1, &bp, 405 + error = xfs_da_get_buf(tp, dp, xfs_dir2_db_to_da(mp, blkno), -1, &bp, 406 406 XFS_DATA_FORK); 407 407 if (error) { 408 408 return error; ··· 427 427 t=mp->m_dirblksize - (uint)sizeof(d->hdr); 428 428 d->hdr.bestfree[0].length = cpu_to_be16(t); 429 429 dup->length = cpu_to_be16(t); 430 - *XFS_DIR2_DATA_UNUSED_TAG_P(dup) = cpu_to_be16((char *)dup - (char *)d); 430 + *xfs_dir2_data_unused_tag_p(dup) = cpu_to_be16((char *)dup - (char *)d); 431 431 /* 432 432 * Log it and return it. 433 433 */ ··· 452 452 ASSERT(be32_to_cpu(d->hdr.magic) == XFS_DIR2_DATA_MAGIC || 453 453 be32_to_cpu(d->hdr.magic) == XFS_DIR2_BLOCK_MAGIC); 454 454 xfs_da_log_buf(tp, bp, (uint)((char *)dep - (char *)d), 455 - (uint)((char *)(XFS_DIR2_DATA_ENTRY_TAG_P(dep) + 1) - 455 + (uint)((char *)(xfs_dir2_data_entry_tag_p(dep) + 1) - 456 456 (char *)d - 1)); 457 457 } 458 458 ··· 497 497 * Log the end (tag) of the unused entry. 498 498 */ 499 499 xfs_da_log_buf(tp, bp, 500 - (uint)((char *)XFS_DIR2_DATA_UNUSED_TAG_P(dup) - (char *)d), 501 - (uint)((char *)XFS_DIR2_DATA_UNUSED_TAG_P(dup) - (char *)d + 500 + (uint)((char *)xfs_dir2_data_unused_tag_p(dup) - (char *)d), 501 + (uint)((char *)xfs_dir2_data_unused_tag_p(dup) - (char *)d + 502 502 sizeof(xfs_dir2_data_off_t) - 1)); 503 503 } 504 504 ··· 535 535 xfs_dir2_block_tail_t *btp; /* block tail */ 536 536 537 537 ASSERT(be32_to_cpu(d->hdr.magic) == XFS_DIR2_BLOCK_MAGIC); 538 - btp = XFS_DIR2_BLOCK_TAIL_P(mp, (xfs_dir2_block_t *)d); 539 - endptr = (char *)XFS_DIR2_BLOCK_LEAF_P(btp); 538 + btp = xfs_dir2_block_tail_p(mp, (xfs_dir2_block_t *)d); 539 + endptr = (char *)xfs_dir2_block_leaf_p(btp); 540 540 } 541 541 /* 542 542 * If this isn't the start of the block, then back up to ··· 587 587 * Fix up the new big freespace. 588 588 */ 589 589 be16_add(&prevdup->length, len + be16_to_cpu(postdup->length)); 590 - *XFS_DIR2_DATA_UNUSED_TAG_P(prevdup) = 590 + *xfs_dir2_data_unused_tag_p(prevdup) = 591 591 cpu_to_be16((char *)prevdup - (char *)d); 592 592 xfs_dir2_data_log_unused(tp, bp, prevdup); 593 593 if (!needscan) { ··· 621 621 else if (prevdup) { 622 622 dfp = xfs_dir2_data_freefind(d, prevdup); 623 623 be16_add(&prevdup->length, len); 624 - *XFS_DIR2_DATA_UNUSED_TAG_P(prevdup) = 624 + *xfs_dir2_data_unused_tag_p(prevdup) = 625 625 cpu_to_be16((char *)prevdup - (char *)d); 626 626 xfs_dir2_data_log_unused(tp, bp, prevdup); 627 627 /* ··· 649 649 newdup = (xfs_dir2_data_unused_t *)((char *)d + offset); 650 650 newdup->freetag = cpu_to_be16(XFS_DIR2_DATA_FREE_TAG); 651 651 newdup->length = cpu_to_be16(len + be16_to_cpu(postdup->length)); 652 - *XFS_DIR2_DATA_UNUSED_TAG_P(newdup) = 652 + *xfs_dir2_data_unused_tag_p(newdup) = 653 653 cpu_to_be16((char *)newdup - (char *)d); 654 654 xfs_dir2_data_log_unused(tp, bp, newdup); 655 655 /* ··· 676 676 newdup = (xfs_dir2_data_unused_t *)((char *)d + offset); 677 677 newdup->freetag = cpu_to_be16(XFS_DIR2_DATA_FREE_TAG); 678 678 newdup->length = cpu_to_be16(len); 679 - *XFS_DIR2_DATA_UNUSED_TAG_P(newdup) = 679 + *xfs_dir2_data_unused_tag_p(newdup) = 680 680 cpu_to_be16((char *)newdup - (char *)d); 681 681 xfs_dir2_data_log_unused(tp, bp, newdup); 682 682 (void)xfs_dir2_data_freeinsert(d, newdup, needlogp); ··· 712 712 ASSERT(be16_to_cpu(dup->freetag) == XFS_DIR2_DATA_FREE_TAG); 713 713 ASSERT(offset >= (char *)dup - (char *)d); 714 714 ASSERT(offset + len <= (char *)dup + be16_to_cpu(dup->length) - (char *)d); 715 - ASSERT((char *)dup - (char *)d == be16_to_cpu(*XFS_DIR2_DATA_UNUSED_TAG_P(dup))); 715 + ASSERT((char *)dup - (char *)d == be16_to_cpu(*xfs_dir2_data_unused_tag_p(dup))); 716 716 /* 717 717 * Look up the entry in the bestfree table. 718 718 */ ··· 745 745 newdup = (xfs_dir2_data_unused_t *)((char *)d + offset + len); 746 746 newdup->freetag = cpu_to_be16(XFS_DIR2_DATA_FREE_TAG); 747 747 newdup->length = cpu_to_be16(oldlen - len); 748 - *XFS_DIR2_DATA_UNUSED_TAG_P(newdup) = 748 + *xfs_dir2_data_unused_tag_p(newdup) = 749 749 cpu_to_be16((char *)newdup - (char *)d); 750 750 xfs_dir2_data_log_unused(tp, bp, newdup); 751 751 /* ··· 772 772 else if (matchback) { 773 773 newdup = dup; 774 774 newdup->length = cpu_to_be16(((char *)d + offset) - (char *)newdup); 775 - *XFS_DIR2_DATA_UNUSED_TAG_P(newdup) = 775 + *xfs_dir2_data_unused_tag_p(newdup) = 776 776 cpu_to_be16((char *)newdup - (char *)d); 777 777 xfs_dir2_data_log_unused(tp, bp, newdup); 778 778 /* ··· 799 799 else { 800 800 newdup = dup; 801 801 newdup->length = cpu_to_be16(((char *)d + offset) - (char *)newdup); 802 - *XFS_DIR2_DATA_UNUSED_TAG_P(newdup) = 802 + *xfs_dir2_data_unused_tag_p(newdup) = 803 803 cpu_to_be16((char *)newdup - (char *)d); 804 804 xfs_dir2_data_log_unused(tp, bp, newdup); 805 805 newdup2 = (xfs_dir2_data_unused_t *)((char *)d + offset + len); 806 806 newdup2->freetag = cpu_to_be16(XFS_DIR2_DATA_FREE_TAG); 807 807 newdup2->length = cpu_to_be16(oldlen - len - be16_to_cpu(newdup->length)); 808 - *XFS_DIR2_DATA_UNUSED_TAG_P(newdup2) = 808 + *xfs_dir2_data_unused_tag_p(newdup2) = 809 809 cpu_to_be16((char *)newdup2 - (char *)d); 810 810 xfs_dir2_data_log_unused(tp, bp, newdup2); 811 811 /*
+4 -8
fs/xfs/xfs_dir2_data.h
··· 44 44 #define XFS_DIR2_DATA_SPACE 0 45 45 #define XFS_DIR2_DATA_OFFSET (XFS_DIR2_DATA_SPACE * XFS_DIR2_SPACE_SIZE) 46 46 #define XFS_DIR2_DATA_FIRSTDB(mp) \ 47 - XFS_DIR2_BYTE_TO_DB(mp, XFS_DIR2_DATA_OFFSET) 47 + xfs_dir2_byte_to_db(mp, XFS_DIR2_DATA_OFFSET) 48 48 49 49 /* 50 50 * Offsets of . and .. in data space (always block 0) ··· 52 52 #define XFS_DIR2_DATA_DOT_OFFSET \ 53 53 ((xfs_dir2_data_aoff_t)sizeof(xfs_dir2_data_hdr_t)) 54 54 #define XFS_DIR2_DATA_DOTDOT_OFFSET \ 55 - (XFS_DIR2_DATA_DOT_OFFSET + XFS_DIR2_DATA_ENTSIZE(1)) 55 + (XFS_DIR2_DATA_DOT_OFFSET + xfs_dir2_data_entsize(1)) 56 56 #define XFS_DIR2_DATA_FIRST_OFFSET \ 57 - (XFS_DIR2_DATA_DOTDOT_OFFSET + XFS_DIR2_DATA_ENTSIZE(2)) 57 + (XFS_DIR2_DATA_DOTDOT_OFFSET + xfs_dir2_data_entsize(2)) 58 58 59 59 /* 60 60 * Structures. ··· 123 123 /* 124 124 * Size of a data entry. 125 125 */ 126 - #define XFS_DIR2_DATA_ENTSIZE(n) xfs_dir2_data_entsize(n) 127 126 static inline int xfs_dir2_data_entsize(int n) 128 127 { 129 128 return (int)roundup(offsetof(xfs_dir2_data_entry_t, name[0]) + (n) + \ ··· 132 133 /* 133 134 * Pointer to an entry's tag word. 134 135 */ 135 - #define XFS_DIR2_DATA_ENTRY_TAG_P(dep) xfs_dir2_data_entry_tag_p(dep) 136 136 static inline __be16 * 137 137 xfs_dir2_data_entry_tag_p(xfs_dir2_data_entry_t *dep) 138 138 { 139 139 return (__be16 *)((char *)dep + 140 - XFS_DIR2_DATA_ENTSIZE(dep->namelen) - sizeof(__be16)); 140 + xfs_dir2_data_entsize(dep->namelen) - sizeof(__be16)); 141 141 } 142 142 143 143 /* 144 144 * Pointer to a freespace's tag word. 145 145 */ 146 - #define XFS_DIR2_DATA_UNUSED_TAG_P(dup) \ 147 - xfs_dir2_data_unused_tag_p(dup) 148 146 static inline __be16 * 149 147 xfs_dir2_data_unused_tag_p(xfs_dir2_data_unused_t *dup) 150 148 {
+53 -53
fs/xfs/xfs_dir2_leaf.c
··· 92 92 if ((error = xfs_da_grow_inode(args, &blkno))) { 93 93 return error; 94 94 } 95 - ldb = XFS_DIR2_DA_TO_DB(mp, blkno); 95 + ldb = xfs_dir2_da_to_db(mp, blkno); 96 96 ASSERT(ldb == XFS_DIR2_LEAF_FIRSTDB(mp)); 97 97 /* 98 98 * Initialize the leaf block, get a buffer for it. ··· 104 104 leaf = lbp->data; 105 105 block = dbp->data; 106 106 xfs_dir2_data_check(dp, dbp); 107 - btp = XFS_DIR2_BLOCK_TAIL_P(mp, block); 108 - blp = XFS_DIR2_BLOCK_LEAF_P(btp); 107 + btp = xfs_dir2_block_tail_p(mp, block); 108 + blp = xfs_dir2_block_leaf_p(btp); 109 109 /* 110 110 * Set the counts in the leaf header. 111 111 */ ··· 137 137 /* 138 138 * Set up leaf tail and bests table. 139 139 */ 140 - ltp = XFS_DIR2_LEAF_TAIL_P(mp, leaf); 140 + ltp = xfs_dir2_leaf_tail_p(mp, leaf); 141 141 ltp->bestcount = cpu_to_be32(1); 142 - bestsp = XFS_DIR2_LEAF_BESTS_P(ltp); 142 + bestsp = xfs_dir2_leaf_bests_p(ltp); 143 143 bestsp[0] = block->hdr.bestfree[0].length; 144 144 /* 145 145 * Log the data header and leaf bests table. ··· 209 209 */ 210 210 index = xfs_dir2_leaf_search_hash(args, lbp); 211 211 leaf = lbp->data; 212 - ltp = XFS_DIR2_LEAF_TAIL_P(mp, leaf); 213 - bestsp = XFS_DIR2_LEAF_BESTS_P(ltp); 214 - length = XFS_DIR2_DATA_ENTSIZE(args->namelen); 212 + ltp = xfs_dir2_leaf_tail_p(mp, leaf); 213 + bestsp = xfs_dir2_leaf_bests_p(ltp); 214 + length = xfs_dir2_data_entsize(args->namelen); 215 215 /* 216 216 * See if there are any entries with the same hash value 217 217 * and space in their block for the new entry. ··· 223 223 index++, lep++) { 224 224 if (be32_to_cpu(lep->address) == XFS_DIR2_NULL_DATAPTR) 225 225 continue; 226 - i = XFS_DIR2_DATAPTR_TO_DB(mp, be32_to_cpu(lep->address)); 226 + i = xfs_dir2_dataptr_to_db(mp, be32_to_cpu(lep->address)); 227 227 ASSERT(i < be32_to_cpu(ltp->bestcount)); 228 228 ASSERT(be16_to_cpu(bestsp[i]) != NULLDATAOFF); 229 229 if (be16_to_cpu(bestsp[i]) >= length) { ··· 378 378 */ 379 379 else { 380 380 if ((error = 381 - xfs_da_read_buf(tp, dp, XFS_DIR2_DB_TO_DA(mp, use_block), 381 + xfs_da_read_buf(tp, dp, xfs_dir2_db_to_da(mp, use_block), 382 382 -1, &dbp, XFS_DATA_FORK))) { 383 383 xfs_da_brelse(tp, lbp); 384 384 return error; ··· 407 407 dep->inumber = cpu_to_be64(args->inumber); 408 408 dep->namelen = args->namelen; 409 409 memcpy(dep->name, args->name, dep->namelen); 410 - tagp = XFS_DIR2_DATA_ENTRY_TAG_P(dep); 410 + tagp = xfs_dir2_data_entry_tag_p(dep); 411 411 *tagp = cpu_to_be16((char *)dep - (char *)data); 412 412 /* 413 413 * Need to scan fix up the bestfree table. ··· 529 529 * Fill in the new leaf entry. 530 530 */ 531 531 lep->hashval = cpu_to_be32(args->hashval); 532 - lep->address = cpu_to_be32(XFS_DIR2_DB_OFF_TO_DATAPTR(mp, use_block, 532 + lep->address = cpu_to_be32(xfs_dir2_db_off_to_dataptr(mp, use_block, 533 533 be16_to_cpu(*tagp))); 534 534 /* 535 535 * Log the leaf fields and give up the buffers. ··· 567 567 * Should factor in the size of the bests table as well. 568 568 * We can deduce a value for that from di_size. 569 569 */ 570 - ASSERT(be16_to_cpu(leaf->hdr.count) <= XFS_DIR2_MAX_LEAF_ENTS(mp)); 571 - ltp = XFS_DIR2_LEAF_TAIL_P(mp, leaf); 570 + ASSERT(be16_to_cpu(leaf->hdr.count) <= xfs_dir2_max_leaf_ents(mp)); 571 + ltp = xfs_dir2_leaf_tail_p(mp, leaf); 572 572 /* 573 573 * Leaves and bests don't overlap. 574 574 */ 575 575 ASSERT((char *)&leaf->ents[be16_to_cpu(leaf->hdr.count)] <= 576 - (char *)XFS_DIR2_LEAF_BESTS_P(ltp)); 576 + (char *)xfs_dir2_leaf_bests_p(ltp)); 577 577 /* 578 578 * Check hash value order, count stale entries. 579 579 */ ··· 815 815 * Inside the loop we keep the main offset value as a byte offset 816 816 * in the directory file. 817 817 */ 818 - curoff = XFS_DIR2_DATAPTR_TO_BYTE(mp, uio->uio_offset); 818 + curoff = xfs_dir2_dataptr_to_byte(mp, uio->uio_offset); 819 819 /* 820 820 * Force this conversion through db so we truncate the offset 821 821 * down to get the start of the data block. 822 822 */ 823 - map_off = XFS_DIR2_DB_TO_DA(mp, XFS_DIR2_BYTE_TO_DB(mp, curoff)); 823 + map_off = xfs_dir2_db_to_da(mp, xfs_dir2_byte_to_db(mp, curoff)); 824 824 /* 825 825 * Loop over directory entries until we reach the end offset. 826 826 * Get more blocks and readahead as necessary. ··· 870 870 */ 871 871 if (1 + ra_want > map_blocks && 872 872 map_off < 873 - XFS_DIR2_BYTE_TO_DA(mp, XFS_DIR2_LEAF_OFFSET)) { 873 + xfs_dir2_byte_to_da(mp, XFS_DIR2_LEAF_OFFSET)) { 874 874 /* 875 875 * Get more bmaps, fill in after the ones 876 876 * we already have in the table. ··· 878 878 nmap = map_size - map_valid; 879 879 error = xfs_bmapi(tp, dp, 880 880 map_off, 881 - XFS_DIR2_BYTE_TO_DA(mp, 881 + xfs_dir2_byte_to_da(mp, 882 882 XFS_DIR2_LEAF_OFFSET) - map_off, 883 883 XFS_BMAPI_METADATA, NULL, 0, 884 884 &map[map_valid], &nmap, NULL, NULL); ··· 903 903 map[map_valid + nmap - 1].br_blockcount; 904 904 else 905 905 map_off = 906 - XFS_DIR2_BYTE_TO_DA(mp, 906 + xfs_dir2_byte_to_da(mp, 907 907 XFS_DIR2_LEAF_OFFSET); 908 908 /* 909 909 * Look for holes in the mapping, and ··· 931 931 * No valid mappings, so no more data blocks. 932 932 */ 933 933 if (!map_valid) { 934 - curoff = XFS_DIR2_DA_TO_BYTE(mp, map_off); 934 + curoff = xfs_dir2_da_to_byte(mp, map_off); 935 935 break; 936 936 } 937 937 /* 938 938 * Read the directory block starting at the first 939 939 * mapping. 940 940 */ 941 - curdb = XFS_DIR2_DA_TO_DB(mp, map->br_startoff); 941 + curdb = xfs_dir2_da_to_db(mp, map->br_startoff); 942 942 error = xfs_da_read_buf(tp, dp, map->br_startoff, 943 943 map->br_blockcount >= mp->m_dirblkfsbs ? 944 944 XFS_FSB_TO_DADDR(mp, map->br_startblock) : ··· 1014 1014 /* 1015 1015 * Having done a read, we need to set a new offset. 1016 1016 */ 1017 - newoff = XFS_DIR2_DB_OFF_TO_BYTE(mp, curdb, 0); 1017 + newoff = xfs_dir2_db_off_to_byte(mp, curdb, 0); 1018 1018 /* 1019 1019 * Start of the current block. 1020 1020 */ ··· 1024 1024 * Make sure we're in the right block. 1025 1025 */ 1026 1026 else if (curoff > newoff) 1027 - ASSERT(XFS_DIR2_BYTE_TO_DB(mp, curoff) == 1027 + ASSERT(xfs_dir2_byte_to_db(mp, curoff) == 1028 1028 curdb); 1029 1029 data = bp->data; 1030 1030 xfs_dir2_data_check(dp, bp); ··· 1032 1032 * Find our position in the block. 1033 1033 */ 1034 1034 ptr = (char *)&data->u; 1035 - byteoff = XFS_DIR2_BYTE_TO_OFF(mp, curoff); 1035 + byteoff = xfs_dir2_byte_to_off(mp, curoff); 1036 1036 /* 1037 1037 * Skip past the header. 1038 1038 */ ··· 1054 1054 } 1055 1055 dep = (xfs_dir2_data_entry_t *)ptr; 1056 1056 length = 1057 - XFS_DIR2_DATA_ENTSIZE(dep->namelen); 1057 + xfs_dir2_data_entsize(dep->namelen); 1058 1058 ptr += length; 1059 1059 } 1060 1060 /* 1061 1061 * Now set our real offset. 1062 1062 */ 1063 1063 curoff = 1064 - XFS_DIR2_DB_OFF_TO_BYTE(mp, 1065 - XFS_DIR2_BYTE_TO_DB(mp, curoff), 1064 + xfs_dir2_db_off_to_byte(mp, 1065 + xfs_dir2_byte_to_db(mp, curoff), 1066 1066 (char *)ptr - (char *)data); 1067 1067 if (ptr >= (char *)data + mp->m_dirblksize) { 1068 1068 continue; ··· 1091 1091 1092 1092 p->namelen = dep->namelen; 1093 1093 1094 - length = XFS_DIR2_DATA_ENTSIZE(p->namelen); 1094 + length = xfs_dir2_data_entsize(p->namelen); 1095 1095 1096 - p->cook = XFS_DIR2_BYTE_TO_DATAPTR(mp, curoff + length); 1096 + p->cook = xfs_dir2_byte_to_dataptr(mp, curoff + length); 1097 1097 1098 1098 p->ino = be64_to_cpu(dep->inumber); 1099 1099 #if XFS_BIG_INUMS ··· 1121 1121 * All done. Set output offset value to current offset. 1122 1122 */ 1123 1123 *eofp = eof; 1124 - if (curoff > XFS_DIR2_DATAPTR_TO_BYTE(mp, XFS_DIR2_MAX_DATAPTR)) 1124 + if (curoff > xfs_dir2_dataptr_to_byte(mp, XFS_DIR2_MAX_DATAPTR)) 1125 1125 uio->uio_offset = XFS_DIR2_MAX_DATAPTR; 1126 1126 else 1127 - uio->uio_offset = XFS_DIR2_BYTE_TO_DATAPTR(mp, curoff); 1127 + uio->uio_offset = xfs_dir2_byte_to_dataptr(mp, curoff); 1128 1128 kmem_free(map, map_size * sizeof(*map)); 1129 1129 kmem_free(p, sizeof(*p)); 1130 1130 if (bp) ··· 1159 1159 /* 1160 1160 * Get the buffer for the block. 1161 1161 */ 1162 - error = xfs_da_get_buf(tp, dp, XFS_DIR2_DB_TO_DA(mp, bno), -1, &bp, 1162 + error = xfs_da_get_buf(tp, dp, xfs_dir2_db_to_da(mp, bno), -1, &bp, 1163 1163 XFS_DATA_FORK); 1164 1164 if (error) { 1165 1165 return error; ··· 1181 1181 * the block. 1182 1182 */ 1183 1183 if (magic == XFS_DIR2_LEAF1_MAGIC) { 1184 - ltp = XFS_DIR2_LEAF_TAIL_P(mp, leaf); 1184 + ltp = xfs_dir2_leaf_tail_p(mp, leaf); 1185 1185 ltp->bestcount = 0; 1186 1186 xfs_dir2_leaf_log_tail(tp, bp); 1187 1187 } ··· 1206 1206 1207 1207 leaf = bp->data; 1208 1208 ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR2_LEAF1_MAGIC); 1209 - ltp = XFS_DIR2_LEAF_TAIL_P(tp->t_mountp, leaf); 1210 - firstb = XFS_DIR2_LEAF_BESTS_P(ltp) + first; 1211 - lastb = XFS_DIR2_LEAF_BESTS_P(ltp) + last; 1209 + ltp = xfs_dir2_leaf_tail_p(tp->t_mountp, leaf); 1210 + firstb = xfs_dir2_leaf_bests_p(ltp) + first; 1211 + lastb = xfs_dir2_leaf_bests_p(ltp) + last; 1212 1212 xfs_da_log_buf(tp, bp, (uint)((char *)firstb - (char *)leaf), 1213 1213 (uint)((char *)lastb - (char *)leaf + sizeof(*lastb) - 1)); 1214 1214 } ··· 1268 1268 mp = tp->t_mountp; 1269 1269 leaf = bp->data; 1270 1270 ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR2_LEAF1_MAGIC); 1271 - ltp = XFS_DIR2_LEAF_TAIL_P(mp, leaf); 1271 + ltp = xfs_dir2_leaf_tail_p(mp, leaf); 1272 1272 xfs_da_log_buf(tp, bp, (uint)((char *)ltp - (char *)leaf), 1273 1273 (uint)(mp->m_dirblksize - 1)); 1274 1274 } ··· 1312 1312 */ 1313 1313 dep = (xfs_dir2_data_entry_t *) 1314 1314 ((char *)dbp->data + 1315 - XFS_DIR2_DATAPTR_TO_OFF(dp->i_mount, be32_to_cpu(lep->address))); 1315 + xfs_dir2_dataptr_to_off(dp->i_mount, be32_to_cpu(lep->address))); 1316 1316 /* 1317 1317 * Return the found inode number. 1318 1318 */ ··· 1381 1381 /* 1382 1382 * Get the new data block number. 1383 1383 */ 1384 - newdb = XFS_DIR2_DATAPTR_TO_DB(mp, be32_to_cpu(lep->address)); 1384 + newdb = xfs_dir2_dataptr_to_db(mp, be32_to_cpu(lep->address)); 1385 1385 /* 1386 1386 * If it's not the same as the old data block number, 1387 1387 * need to pitch the old one and read the new one. ··· 1391 1391 xfs_da_brelse(tp, dbp); 1392 1392 if ((error = 1393 1393 xfs_da_read_buf(tp, dp, 1394 - XFS_DIR2_DB_TO_DA(mp, newdb), -1, &dbp, 1394 + xfs_dir2_db_to_da(mp, newdb), -1, &dbp, 1395 1395 XFS_DATA_FORK))) { 1396 1396 xfs_da_brelse(tp, lbp); 1397 1397 return error; ··· 1404 1404 */ 1405 1405 dep = (xfs_dir2_data_entry_t *) 1406 1406 ((char *)dbp->data + 1407 - XFS_DIR2_DATAPTR_TO_OFF(mp, be32_to_cpu(lep->address))); 1407 + xfs_dir2_dataptr_to_off(mp, be32_to_cpu(lep->address))); 1408 1408 /* 1409 1409 * If it matches then return it. 1410 1410 */ ··· 1469 1469 * Point to the leaf entry, use that to point to the data entry. 1470 1470 */ 1471 1471 lep = &leaf->ents[index]; 1472 - db = XFS_DIR2_DATAPTR_TO_DB(mp, be32_to_cpu(lep->address)); 1472 + db = xfs_dir2_dataptr_to_db(mp, be32_to_cpu(lep->address)); 1473 1473 dep = (xfs_dir2_data_entry_t *) 1474 - ((char *)data + XFS_DIR2_DATAPTR_TO_OFF(mp, be32_to_cpu(lep->address))); 1474 + ((char *)data + xfs_dir2_dataptr_to_off(mp, be32_to_cpu(lep->address))); 1475 1475 needscan = needlog = 0; 1476 1476 oldbest = be16_to_cpu(data->hdr.bestfree[0].length); 1477 - ltp = XFS_DIR2_LEAF_TAIL_P(mp, leaf); 1478 - bestsp = XFS_DIR2_LEAF_BESTS_P(ltp); 1477 + ltp = xfs_dir2_leaf_tail_p(mp, leaf); 1478 + bestsp = xfs_dir2_leaf_bests_p(ltp); 1479 1479 ASSERT(be16_to_cpu(bestsp[db]) == oldbest); 1480 1480 /* 1481 1481 * Mark the former data entry unused. 1482 1482 */ 1483 1483 xfs_dir2_data_make_free(tp, dbp, 1484 1484 (xfs_dir2_data_aoff_t)((char *)dep - (char *)data), 1485 - XFS_DIR2_DATA_ENTSIZE(dep->namelen), &needlog, &needscan); 1485 + xfs_dir2_data_entsize(dep->namelen), &needlog, &needscan); 1486 1486 /* 1487 1487 * We just mark the leaf entry stale by putting a null in it. 1488 1488 */ ··· 1602 1602 */ 1603 1603 dep = (xfs_dir2_data_entry_t *) 1604 1604 ((char *)dbp->data + 1605 - XFS_DIR2_DATAPTR_TO_OFF(dp->i_mount, be32_to_cpu(lep->address))); 1605 + xfs_dir2_dataptr_to_off(dp->i_mount, be32_to_cpu(lep->address))); 1606 1606 ASSERT(args->inumber != be64_to_cpu(dep->inumber)); 1607 1607 /* 1608 1608 * Put the new inode number in, log it. ··· 1698 1698 /* 1699 1699 * Read the offending data block. We need its buffer. 1700 1700 */ 1701 - if ((error = xfs_da_read_buf(tp, dp, XFS_DIR2_DB_TO_DA(mp, db), -1, &dbp, 1701 + if ((error = xfs_da_read_buf(tp, dp, xfs_dir2_db_to_da(mp, db), -1, &dbp, 1702 1702 XFS_DATA_FORK))) { 1703 1703 return error; 1704 1704 } ··· 1712 1712 */ 1713 1713 1714 1714 leaf = lbp->data; 1715 - ltp = XFS_DIR2_LEAF_TAIL_P(mp, leaf); 1715 + ltp = xfs_dir2_leaf_tail_p(mp, leaf); 1716 1716 ASSERT(be16_to_cpu(data->hdr.bestfree[0].length) == 1717 1717 mp->m_dirblksize - (uint)sizeof(data->hdr)); 1718 1718 ASSERT(db == be32_to_cpu(ltp->bestcount) - 1); ··· 1727 1727 /* 1728 1728 * Eliminate the last bests entry from the table. 1729 1729 */ 1730 - bestsp = XFS_DIR2_LEAF_BESTS_P(ltp); 1730 + bestsp = xfs_dir2_leaf_bests_p(ltp); 1731 1731 be32_add(&ltp->bestcount, -1); 1732 1732 memmove(&bestsp[1], &bestsp[0], be32_to_cpu(ltp->bestcount) * sizeof(*bestsp)); 1733 1733 xfs_dir2_leaf_log_tail(tp, lbp); ··· 1838 1838 /* 1839 1839 * Set up the leaf tail from the freespace block. 1840 1840 */ 1841 - ltp = XFS_DIR2_LEAF_TAIL_P(mp, leaf); 1841 + ltp = xfs_dir2_leaf_tail_p(mp, leaf); 1842 1842 ltp->bestcount = free->hdr.nvalid; 1843 1843 /* 1844 1844 * Set up the leaf bests table. 1845 1845 */ 1846 - memcpy(XFS_DIR2_LEAF_BESTS_P(ltp), free->bests, 1846 + memcpy(xfs_dir2_leaf_bests_p(ltp), free->bests, 1847 1847 be32_to_cpu(ltp->bestcount) * sizeof(leaf->bests[0])); 1848 1848 xfs_dir2_leaf_log_bests(tp, lbp, 0, be32_to_cpu(ltp->bestcount) - 1); 1849 1849 xfs_dir2_leaf_log_tail(tp, lbp);
+6 -23
fs/xfs/xfs_dir2_leaf.h
··· 32 32 #define XFS_DIR2_LEAF_SPACE 1 33 33 #define XFS_DIR2_LEAF_OFFSET (XFS_DIR2_LEAF_SPACE * XFS_DIR2_SPACE_SIZE) 34 34 #define XFS_DIR2_LEAF_FIRSTDB(mp) \ 35 - XFS_DIR2_BYTE_TO_DB(mp, XFS_DIR2_LEAF_OFFSET) 35 + xfs_dir2_byte_to_db(mp, XFS_DIR2_LEAF_OFFSET) 36 36 37 37 /* 38 38 * Offset in data space of a data entry. ··· 82 82 * DB blocks here are logical directory block numbers, not filesystem blocks. 83 83 */ 84 84 85 - #define XFS_DIR2_MAX_LEAF_ENTS(mp) xfs_dir2_max_leaf_ents(mp) 86 85 static inline int xfs_dir2_max_leaf_ents(struct xfs_mount *mp) 87 86 { 88 87 return (int)(((mp)->m_dirblksize - (uint)sizeof(xfs_dir2_leaf_hdr_t)) / ··· 91 92 /* 92 93 * Get address of the bestcount field in the single-leaf block. 93 94 */ 94 - #define XFS_DIR2_LEAF_TAIL_P(mp,lp) xfs_dir2_leaf_tail_p(mp, lp) 95 95 static inline xfs_dir2_leaf_tail_t * 96 96 xfs_dir2_leaf_tail_p(struct xfs_mount *mp, xfs_dir2_leaf_t *lp) 97 97 { ··· 102 104 /* 103 105 * Get address of the bests array in the single-leaf block. 104 106 */ 105 - #define XFS_DIR2_LEAF_BESTS_P(ltp) xfs_dir2_leaf_bests_p(ltp) 106 107 static inline __be16 * 107 108 xfs_dir2_leaf_bests_p(xfs_dir2_leaf_tail_t *ltp) 108 109 { ··· 111 114 /* 112 115 * Convert dataptr to byte in file space 113 116 */ 114 - #define XFS_DIR2_DATAPTR_TO_BYTE(mp,dp) xfs_dir2_dataptr_to_byte(mp, dp) 115 117 static inline xfs_dir2_off_t 116 118 xfs_dir2_dataptr_to_byte(struct xfs_mount *mp, xfs_dir2_dataptr_t dp) 117 119 { ··· 120 124 /* 121 125 * Convert byte in file space to dataptr. It had better be aligned. 122 126 */ 123 - #define XFS_DIR2_BYTE_TO_DATAPTR(mp,by) xfs_dir2_byte_to_dataptr(mp,by) 124 127 static inline xfs_dir2_dataptr_t 125 128 xfs_dir2_byte_to_dataptr(struct xfs_mount *mp, xfs_dir2_off_t by) 126 129 { ··· 129 134 /* 130 135 * Convert byte in space to (DB) block 131 136 */ 132 - #define XFS_DIR2_BYTE_TO_DB(mp,by) xfs_dir2_byte_to_db(mp, by) 133 137 static inline xfs_dir2_db_t 134 138 xfs_dir2_byte_to_db(struct xfs_mount *mp, xfs_dir2_off_t by) 135 139 { ··· 139 145 /* 140 146 * Convert dataptr to a block number 141 147 */ 142 - #define XFS_DIR2_DATAPTR_TO_DB(mp,dp) xfs_dir2_dataptr_to_db(mp, dp) 143 148 static inline xfs_dir2_db_t 144 149 xfs_dir2_dataptr_to_db(struct xfs_mount *mp, xfs_dir2_dataptr_t dp) 145 150 { 146 - return XFS_DIR2_BYTE_TO_DB(mp, XFS_DIR2_DATAPTR_TO_BYTE(mp, dp)); 151 + return xfs_dir2_byte_to_db(mp, xfs_dir2_dataptr_to_byte(mp, dp)); 147 152 } 148 153 149 154 /* 150 155 * Convert byte in space to offset in a block 151 156 */ 152 - #define XFS_DIR2_BYTE_TO_OFF(mp,by) xfs_dir2_byte_to_off(mp, by) 153 157 static inline xfs_dir2_data_aoff_t 154 158 xfs_dir2_byte_to_off(struct xfs_mount *mp, xfs_dir2_off_t by) 155 159 { ··· 158 166 /* 159 167 * Convert dataptr to a byte offset in a block 160 168 */ 161 - #define XFS_DIR2_DATAPTR_TO_OFF(mp,dp) xfs_dir2_dataptr_to_off(mp, dp) 162 169 static inline xfs_dir2_data_aoff_t 163 170 xfs_dir2_dataptr_to_off(struct xfs_mount *mp, xfs_dir2_dataptr_t dp) 164 171 { 165 - return XFS_DIR2_BYTE_TO_OFF(mp, XFS_DIR2_DATAPTR_TO_BYTE(mp, dp)); 172 + return xfs_dir2_byte_to_off(mp, xfs_dir2_dataptr_to_byte(mp, dp)); 166 173 } 167 174 168 175 /* 169 176 * Convert block and offset to byte in space 170 177 */ 171 - #define XFS_DIR2_DB_OFF_TO_BYTE(mp,db,o) \ 172 - xfs_dir2_db_off_to_byte(mp, db, o) 173 178 static inline xfs_dir2_off_t 174 179 xfs_dir2_db_off_to_byte(struct xfs_mount *mp, xfs_dir2_db_t db, 175 180 xfs_dir2_data_aoff_t o) ··· 178 189 /* 179 190 * Convert block (DB) to block (dablk) 180 191 */ 181 - #define XFS_DIR2_DB_TO_DA(mp,db) xfs_dir2_db_to_da(mp, db) 182 192 static inline xfs_dablk_t 183 193 xfs_dir2_db_to_da(struct xfs_mount *mp, xfs_dir2_db_t db) 184 194 { ··· 187 199 /* 188 200 * Convert byte in space to (DA) block 189 201 */ 190 - #define XFS_DIR2_BYTE_TO_DA(mp,by) xfs_dir2_byte_to_da(mp, by) 191 202 static inline xfs_dablk_t 192 203 xfs_dir2_byte_to_da(struct xfs_mount *mp, xfs_dir2_off_t by) 193 204 { 194 - return XFS_DIR2_DB_TO_DA(mp, XFS_DIR2_BYTE_TO_DB(mp, by)); 205 + return xfs_dir2_db_to_da(mp, xfs_dir2_byte_to_db(mp, by)); 195 206 } 196 207 197 208 /* 198 209 * Convert block and offset to dataptr 199 210 */ 200 - #define XFS_DIR2_DB_OFF_TO_DATAPTR(mp,db,o) \ 201 - xfs_dir2_db_off_to_dataptr(mp, db, o) 202 211 static inline xfs_dir2_dataptr_t 203 212 xfs_dir2_db_off_to_dataptr(struct xfs_mount *mp, xfs_dir2_db_t db, 204 213 xfs_dir2_data_aoff_t o) 205 214 { 206 - return XFS_DIR2_BYTE_TO_DATAPTR(mp, XFS_DIR2_DB_OFF_TO_BYTE(mp, db, o)); 215 + return xfs_dir2_byte_to_dataptr(mp, xfs_dir2_db_off_to_byte(mp, db, o)); 207 216 } 208 217 209 218 /* 210 219 * Convert block (dablk) to block (DB) 211 220 */ 212 - #define XFS_DIR2_DA_TO_DB(mp,da) xfs_dir2_da_to_db(mp, da) 213 221 static inline xfs_dir2_db_t 214 222 xfs_dir2_da_to_db(struct xfs_mount *mp, xfs_dablk_t da) 215 223 { ··· 215 231 /* 216 232 * Convert block (dablk) to byte offset in space 217 233 */ 218 - #define XFS_DIR2_DA_TO_BYTE(mp,da) xfs_dir2_da_to_byte(mp, da) 219 234 static inline xfs_dir2_off_t 220 235 xfs_dir2_da_to_byte(struct xfs_mount *mp, xfs_dablk_t da) 221 236 { 222 - return XFS_DIR2_DB_OFF_TO_BYTE(mp, XFS_DIR2_DA_TO_DB(mp, da), 0); 237 + return xfs_dir2_db_off_to_byte(mp, xfs_dir2_da_to_db(mp, da), 0); 223 238 } 224 239 225 240 /*
+33 -33
fs/xfs/xfs_dir2_node.c
··· 136 136 /* 137 137 * Get the buffer for the new freespace block. 138 138 */ 139 - if ((error = xfs_da_get_buf(tp, dp, XFS_DIR2_DB_TO_DA(mp, fdb), -1, &fbp, 139 + if ((error = xfs_da_get_buf(tp, dp, xfs_dir2_db_to_da(mp, fdb), -1, &fbp, 140 140 XFS_DATA_FORK))) { 141 141 return error; 142 142 } 143 143 ASSERT(fbp != NULL); 144 144 free = fbp->data; 145 145 leaf = lbp->data; 146 - ltp = XFS_DIR2_LEAF_TAIL_P(mp, leaf); 146 + ltp = xfs_dir2_leaf_tail_p(mp, leaf); 147 147 /* 148 148 * Initialize the freespace block header. 149 149 */ ··· 155 155 * Copy freespace entries from the leaf block to the new block. 156 156 * Count active entries. 157 157 */ 158 - for (i = n = 0, from = XFS_DIR2_LEAF_BESTS_P(ltp), to = free->bests; 158 + for (i = n = 0, from = xfs_dir2_leaf_bests_p(ltp), to = free->bests; 159 159 i < be32_to_cpu(ltp->bestcount); i++, from++, to++) { 160 160 if ((off = be16_to_cpu(*from)) != NULLDATAOFF) 161 161 n++; ··· 215 215 * a compact. 216 216 */ 217 217 218 - if (be16_to_cpu(leaf->hdr.count) == XFS_DIR2_MAX_LEAF_ENTS(mp)) { 218 + if (be16_to_cpu(leaf->hdr.count) == xfs_dir2_max_leaf_ents(mp)) { 219 219 if (!leaf->hdr.stale) 220 220 return XFS_ERROR(ENOSPC); 221 221 compact = be16_to_cpu(leaf->hdr.stale) > 1; ··· 327 327 * Insert the new entry, log everything. 328 328 */ 329 329 lep->hashval = cpu_to_be32(args->hashval); 330 - lep->address = cpu_to_be32(XFS_DIR2_DB_OFF_TO_DATAPTR(mp, 330 + lep->address = cpu_to_be32(xfs_dir2_db_off_to_dataptr(mp, 331 331 args->blkno, args->index)); 332 332 xfs_dir2_leaf_log_header(tp, bp); 333 333 xfs_dir2_leaf_log_ents(tp, bp, lfloglow, lfloghigh); ··· 352 352 leaf = bp->data; 353 353 mp = dp->i_mount; 354 354 ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR2_LEAFN_MAGIC); 355 - ASSERT(be16_to_cpu(leaf->hdr.count) <= XFS_DIR2_MAX_LEAF_ENTS(mp)); 355 + ASSERT(be16_to_cpu(leaf->hdr.count) <= xfs_dir2_max_leaf_ents(mp)); 356 356 for (i = stale = 0; i < be16_to_cpu(leaf->hdr.count); i++) { 357 357 if (i + 1 < be16_to_cpu(leaf->hdr.count)) { 358 358 ASSERT(be32_to_cpu(leaf->ents[i].hashval) <= ··· 440 440 if (args->addname) { 441 441 curfdb = curbp ? state->extrablk.blkno : -1; 442 442 curdb = -1; 443 - length = XFS_DIR2_DATA_ENTSIZE(args->namelen); 443 + length = xfs_dir2_data_entsize(args->namelen); 444 444 if ((free = (curbp ? curbp->data : NULL))) 445 445 ASSERT(be32_to_cpu(free->hdr.magic) == XFS_DIR2_FREE_MAGIC); 446 446 } ··· 465 465 /* 466 466 * Pull the data block number from the entry. 467 467 */ 468 - newdb = XFS_DIR2_DATAPTR_TO_DB(mp, be32_to_cpu(lep->address)); 468 + newdb = xfs_dir2_dataptr_to_db(mp, be32_to_cpu(lep->address)); 469 469 /* 470 470 * For addname, we're looking for a place to put the new entry. 471 471 * We want to use a data block with an entry of equal ··· 482 482 * Convert the data block to the free block 483 483 * holding its freespace information. 484 484 */ 485 - newfdb = XFS_DIR2_DB_TO_FDB(mp, newdb); 485 + newfdb = xfs_dir2_db_to_fdb(mp, newdb); 486 486 /* 487 487 * If it's not the one we have in hand, 488 488 * read it in. ··· 497 497 * Read the free block. 498 498 */ 499 499 if ((error = xfs_da_read_buf(tp, dp, 500 - XFS_DIR2_DB_TO_DA(mp, 500 + xfs_dir2_db_to_da(mp, 501 501 newfdb), 502 502 -1, &curbp, 503 503 XFS_DATA_FORK))) { ··· 517 517 /* 518 518 * Get the index for our entry. 519 519 */ 520 - fi = XFS_DIR2_DB_TO_FDINDEX(mp, curdb); 520 + fi = xfs_dir2_db_to_fdindex(mp, curdb); 521 521 /* 522 522 * If it has room, return it. 523 523 */ ··· 561 561 */ 562 562 if ((error = 563 563 xfs_da_read_buf(tp, dp, 564 - XFS_DIR2_DB_TO_DA(mp, newdb), -1, 564 + xfs_dir2_db_to_da(mp, newdb), -1, 565 565 &curbp, XFS_DATA_FORK))) { 566 566 return error; 567 567 } ··· 573 573 */ 574 574 dep = (xfs_dir2_data_entry_t *) 575 575 ((char *)curbp->data + 576 - XFS_DIR2_DATAPTR_TO_OFF(mp, be32_to_cpu(lep->address))); 576 + xfs_dir2_dataptr_to_off(mp, be32_to_cpu(lep->address))); 577 577 /* 578 578 * Compare the entry, return it if it matches. 579 579 */ ··· 876 876 /* 877 877 * Extract the data block and offset from the entry. 878 878 */ 879 - db = XFS_DIR2_DATAPTR_TO_DB(mp, be32_to_cpu(lep->address)); 879 + db = xfs_dir2_dataptr_to_db(mp, be32_to_cpu(lep->address)); 880 880 ASSERT(dblk->blkno == db); 881 - off = XFS_DIR2_DATAPTR_TO_OFF(mp, be32_to_cpu(lep->address)); 881 + off = xfs_dir2_dataptr_to_off(mp, be32_to_cpu(lep->address)); 882 882 ASSERT(dblk->index == off); 883 883 /* 884 884 * Kill the leaf entry by marking it stale. ··· 898 898 longest = be16_to_cpu(data->hdr.bestfree[0].length); 899 899 needlog = needscan = 0; 900 900 xfs_dir2_data_make_free(tp, dbp, off, 901 - XFS_DIR2_DATA_ENTSIZE(dep->namelen), &needlog, &needscan); 901 + xfs_dir2_data_entsize(dep->namelen), &needlog, &needscan); 902 902 /* 903 903 * Rescan the data block freespaces for bestfree. 904 904 * Log the data block header if needed. ··· 924 924 * Convert the data block number to a free block, 925 925 * read in the free block. 926 926 */ 927 - fdb = XFS_DIR2_DB_TO_FDB(mp, db); 928 - if ((error = xfs_da_read_buf(tp, dp, XFS_DIR2_DB_TO_DA(mp, fdb), 927 + fdb = xfs_dir2_db_to_fdb(mp, db); 928 + if ((error = xfs_da_read_buf(tp, dp, xfs_dir2_db_to_da(mp, fdb), 929 929 -1, &fbp, XFS_DATA_FORK))) { 930 930 return error; 931 931 } ··· 937 937 /* 938 938 * Calculate which entry we need to fix. 939 939 */ 940 - findex = XFS_DIR2_DB_TO_FDINDEX(mp, db); 940 + findex = xfs_dir2_db_to_fdindex(mp, db); 941 941 longest = be16_to_cpu(data->hdr.bestfree[0].length); 942 942 /* 943 943 * If the data block is now empty we can get rid of it ··· 1073 1073 /* 1074 1074 * Initialize the new leaf block. 1075 1075 */ 1076 - error = xfs_dir2_leaf_init(args, XFS_DIR2_DA_TO_DB(mp, blkno), 1076 + error = xfs_dir2_leaf_init(args, xfs_dir2_da_to_db(mp, blkno), 1077 1077 &newblk->bp, XFS_DIR2_LEAFN_MAGIC); 1078 1078 if (error) { 1079 1079 return error; ··· 1385 1385 dp = args->dp; 1386 1386 mp = dp->i_mount; 1387 1387 tp = args->trans; 1388 - length = XFS_DIR2_DATA_ENTSIZE(args->namelen); 1388 + length = xfs_dir2_data_entsize(args->namelen); 1389 1389 /* 1390 1390 * If we came in with a freespace block that means that lookup 1391 1391 * found an entry with our hash value. This is the freespace ··· 1438 1438 1439 1439 if ((error = xfs_bmap_last_offset(tp, dp, &fo, XFS_DATA_FORK))) 1440 1440 return error; 1441 - lastfbno = XFS_DIR2_DA_TO_DB(mp, (xfs_dablk_t)fo); 1441 + lastfbno = xfs_dir2_da_to_db(mp, (xfs_dablk_t)fo); 1442 1442 fbno = ifbno; 1443 1443 } 1444 1444 /* ··· 1474 1474 * to avoid it. 1475 1475 */ 1476 1476 if ((error = xfs_da_read_buf(tp, dp, 1477 - XFS_DIR2_DB_TO_DA(mp, fbno), -2, &fbp, 1477 + xfs_dir2_db_to_da(mp, fbno), -2, &fbp, 1478 1478 XFS_DATA_FORK))) { 1479 1479 return error; 1480 1480 } ··· 1550 1550 * Get the freespace block corresponding to the data block 1551 1551 * that was just allocated. 1552 1552 */ 1553 - fbno = XFS_DIR2_DB_TO_FDB(mp, dbno); 1553 + fbno = xfs_dir2_db_to_fdb(mp, dbno); 1554 1554 if (unlikely(error = xfs_da_read_buf(tp, dp, 1555 - XFS_DIR2_DB_TO_DA(mp, fbno), -2, &fbp, 1555 + xfs_dir2_db_to_da(mp, fbno), -2, &fbp, 1556 1556 XFS_DATA_FORK))) { 1557 1557 xfs_da_buf_done(dbp); 1558 1558 return error; ··· 1567 1567 return error; 1568 1568 } 1569 1569 1570 - if (unlikely(XFS_DIR2_DB_TO_FDB(mp, dbno) != fbno)) { 1570 + if (unlikely(xfs_dir2_db_to_fdb(mp, dbno) != fbno)) { 1571 1571 cmn_err(CE_ALERT, 1572 1572 "xfs_dir2_node_addname_int: dir ino " 1573 1573 "%llu needed freesp block %lld for\n" 1574 1574 " data block %lld, got %lld\n" 1575 1575 " ifbno %llu lastfbno %d\n", 1576 1576 (unsigned long long)dp->i_ino, 1577 - (long long)XFS_DIR2_DB_TO_FDB(mp, dbno), 1577 + (long long)xfs_dir2_db_to_fdb(mp, dbno), 1578 1578 (long long)dbno, (long long)fbno, 1579 1579 (unsigned long long)ifbno, lastfbno); 1580 1580 if (fblk) { ··· 1598 1598 * Get a buffer for the new block. 1599 1599 */ 1600 1600 if ((error = xfs_da_get_buf(tp, dp, 1601 - XFS_DIR2_DB_TO_DA(mp, fbno), 1601 + xfs_dir2_db_to_da(mp, fbno), 1602 1602 -1, &fbp, XFS_DATA_FORK))) { 1603 1603 return error; 1604 1604 } ··· 1623 1623 /* 1624 1624 * Set the freespace block index from the data block number. 1625 1625 */ 1626 - findex = XFS_DIR2_DB_TO_FDINDEX(mp, dbno); 1626 + findex = xfs_dir2_db_to_fdindex(mp, dbno); 1627 1627 /* 1628 1628 * If it's after the end of the current entries in the 1629 1629 * freespace block, extend that table. ··· 1669 1669 * Read the data block in. 1670 1670 */ 1671 1671 if (unlikely( 1672 - error = xfs_da_read_buf(tp, dp, XFS_DIR2_DB_TO_DA(mp, dbno), 1672 + error = xfs_da_read_buf(tp, dp, xfs_dir2_db_to_da(mp, dbno), 1673 1673 -1, &dbp, XFS_DATA_FORK))) { 1674 1674 if ((fblk == NULL || fblk->bp == NULL) && fbp != NULL) 1675 1675 xfs_da_buf_done(fbp); ··· 1698 1698 dep->inumber = cpu_to_be64(args->inumber); 1699 1699 dep->namelen = args->namelen; 1700 1700 memcpy(dep->name, args->name, dep->namelen); 1701 - tagp = XFS_DIR2_DATA_ENTRY_TAG_P(dep); 1701 + tagp = xfs_dir2_data_entry_tag_p(dep); 1702 1702 *tagp = cpu_to_be16((char *)dep - (char *)data); 1703 1703 xfs_dir2_data_log_entry(tp, dbp, dep); 1704 1704 /* ··· 1904 1904 ASSERT(be32_to_cpu(data->hdr.magic) == XFS_DIR2_DATA_MAGIC); 1905 1905 dep = (xfs_dir2_data_entry_t *) 1906 1906 ((char *)data + 1907 - XFS_DIR2_DATAPTR_TO_OFF(state->mp, be32_to_cpu(lep->address))); 1907 + xfs_dir2_dataptr_to_off(state->mp, be32_to_cpu(lep->address))); 1908 1908 ASSERT(inum != be64_to_cpu(dep->inumber)); 1909 1909 /* 1910 1910 * Fill in the new inode number and log the entry. ··· 1980 1980 * Blow the block away. 1981 1981 */ 1982 1982 if ((error = 1983 - xfs_dir2_shrink_inode(args, XFS_DIR2_DA_TO_DB(mp, (xfs_dablk_t)fo), 1983 + xfs_dir2_shrink_inode(args, xfs_dir2_da_to_db(mp, (xfs_dablk_t)fo), 1984 1984 bp))) { 1985 1985 /* 1986 1986 * Can't fail with ENOSPC since that only happens with no
+1 -3
fs/xfs/xfs_dir2_node.h
··· 36 36 #define XFS_DIR2_FREE_SPACE 2 37 37 #define XFS_DIR2_FREE_OFFSET (XFS_DIR2_FREE_SPACE * XFS_DIR2_SPACE_SIZE) 38 38 #define XFS_DIR2_FREE_FIRSTDB(mp) \ 39 - XFS_DIR2_BYTE_TO_DB(mp, XFS_DIR2_FREE_OFFSET) 39 + xfs_dir2_byte_to_db(mp, XFS_DIR2_FREE_OFFSET) 40 40 41 41 #define XFS_DIR2_FREE_MAGIC 0x58443246 /* XD2F */ 42 42 ··· 60 60 /* 61 61 * Convert data space db to the corresponding free db. 62 62 */ 63 - #define XFS_DIR2_DB_TO_FDB(mp,db) xfs_dir2_db_to_fdb(mp, db) 64 63 static inline xfs_dir2_db_t 65 64 xfs_dir2_db_to_fdb(struct xfs_mount *mp, xfs_dir2_db_t db) 66 65 { ··· 69 70 /* 70 71 * Convert data space db to the corresponding index in a free db. 71 72 */ 72 - #define XFS_DIR2_DB_TO_FDINDEX(mp,db) xfs_dir2_db_to_fdindex(mp, db) 73 73 static inline int 74 74 xfs_dir2_db_to_fdindex(struct xfs_mount *mp, xfs_dir2_db_t db) 75 75 {
+102 -102
fs/xfs/xfs_dir2_sf.c
··· 89 89 mp = dp->i_mount; 90 90 91 91 count = i8count = namelen = 0; 92 - btp = XFS_DIR2_BLOCK_TAIL_P(mp, block); 93 - blp = XFS_DIR2_BLOCK_LEAF_P(btp); 92 + btp = xfs_dir2_block_tail_p(mp, block); 93 + blp = xfs_dir2_block_leaf_p(btp); 94 94 95 95 /* 96 96 * Iterate over the block's data entries by using the leaf pointers. ··· 102 102 * Calculate the pointer to the entry at hand. 103 103 */ 104 104 dep = (xfs_dir2_data_entry_t *) 105 - ((char *)block + XFS_DIR2_DATAPTR_TO_OFF(mp, addr)); 105 + ((char *)block + xfs_dir2_dataptr_to_off(mp, addr)); 106 106 /* 107 107 * Detect . and .., so we can special-case them. 108 108 * . is not included in sf directories. ··· 124 124 /* 125 125 * Calculate the new size, see if we should give up yet. 126 126 */ 127 - size = XFS_DIR2_SF_HDR_SIZE(i8count) + /* header */ 127 + size = xfs_dir2_sf_hdr_size(i8count) + /* header */ 128 128 count + /* namelen */ 129 129 count * (uint)sizeof(xfs_dir2_sf_off_t) + /* offset */ 130 130 namelen + /* name */ ··· 139 139 */ 140 140 sfhp->count = count; 141 141 sfhp->i8count = i8count; 142 - XFS_DIR2_SF_PUT_INUMBER((xfs_dir2_sf_t *)sfhp, &parent, &sfhp->parent); 142 + xfs_dir2_sf_put_inumber((xfs_dir2_sf_t *)sfhp, &parent, &sfhp->parent); 143 143 return size; 144 144 } 145 145 ··· 199 199 * Copy the header into the newly allocate local space. 200 200 */ 201 201 sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data; 202 - memcpy(sfp, sfhp, XFS_DIR2_SF_HDR_SIZE(sfhp->i8count)); 202 + memcpy(sfp, sfhp, xfs_dir2_sf_hdr_size(sfhp->i8count)); 203 203 dp->i_d.di_size = size; 204 204 /* 205 205 * Set up to loop over the block's entries. 206 206 */ 207 - btp = XFS_DIR2_BLOCK_TAIL_P(mp, block); 207 + btp = xfs_dir2_block_tail_p(mp, block); 208 208 ptr = (char *)block->u; 209 - endptr = (char *)XFS_DIR2_BLOCK_LEAF_P(btp); 210 - sfep = XFS_DIR2_SF_FIRSTENTRY(sfp); 209 + endptr = (char *)xfs_dir2_block_leaf_p(btp); 210 + sfep = xfs_dir2_sf_firstentry(sfp); 211 211 /* 212 212 * Loop over the active and unused entries. 213 213 * Stop when we reach the leaf/tail portion of the block. ··· 233 233 else if (dep->namelen == 2 && 234 234 dep->name[0] == '.' && dep->name[1] == '.') 235 235 ASSERT(be64_to_cpu(dep->inumber) == 236 - XFS_DIR2_SF_GET_INUMBER(sfp, &sfp->hdr.parent)); 236 + xfs_dir2_sf_get_inumber(sfp, &sfp->hdr.parent)); 237 237 /* 238 238 * Normal entry, copy it into shortform. 239 239 */ 240 240 else { 241 241 sfep->namelen = dep->namelen; 242 - XFS_DIR2_SF_PUT_OFFSET(sfep, 242 + xfs_dir2_sf_put_offset(sfep, 243 243 (xfs_dir2_data_aoff_t) 244 244 ((char *)dep - (char *)block)); 245 245 memcpy(sfep->name, dep->name, dep->namelen); 246 246 temp = be64_to_cpu(dep->inumber); 247 - XFS_DIR2_SF_PUT_INUMBER(sfp, &temp, 248 - XFS_DIR2_SF_INUMBERP(sfep)); 249 - sfep = XFS_DIR2_SF_NEXTENTRY(sfp, sfep); 247 + xfs_dir2_sf_put_inumber(sfp, &temp, 248 + xfs_dir2_sf_inumberp(sfep)); 249 + sfep = xfs_dir2_sf_nextentry(sfp, sfep); 250 250 } 251 - ptr += XFS_DIR2_DATA_ENTSIZE(dep->namelen); 251 + ptr += xfs_dir2_data_entsize(dep->namelen); 252 252 } 253 253 ASSERT((char *)sfep - (char *)sfp == size); 254 254 xfs_dir2_sf_check(args); ··· 294 294 ASSERT(dp->i_df.if_bytes == dp->i_d.di_size); 295 295 ASSERT(dp->i_df.if_u1.if_data != NULL); 296 296 sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data; 297 - ASSERT(dp->i_d.di_size >= XFS_DIR2_SF_HDR_SIZE(sfp->hdr.i8count)); 297 + ASSERT(dp->i_d.di_size >= xfs_dir2_sf_hdr_size(sfp->hdr.i8count)); 298 298 /* 299 299 * Compute entry (and change in) size. 300 300 */ 301 - add_entsize = XFS_DIR2_SF_ENTSIZE_BYNAME(sfp, args->namelen); 301 + add_entsize = xfs_dir2_sf_entsize_byname(sfp, args->namelen); 302 302 incr_isize = add_entsize; 303 303 objchange = 0; 304 304 #if XFS_BIG_INUMS ··· 392 392 /* 393 393 * Grow the in-inode space. 394 394 */ 395 - xfs_idata_realloc(dp, XFS_DIR2_SF_ENTSIZE_BYNAME(sfp, args->namelen), 395 + xfs_idata_realloc(dp, xfs_dir2_sf_entsize_byname(sfp, args->namelen), 396 396 XFS_DATA_FORK); 397 397 /* 398 398 * Need to set up again due to realloc of the inode data. ··· 403 403 * Fill in the new entry. 404 404 */ 405 405 sfep->namelen = args->namelen; 406 - XFS_DIR2_SF_PUT_OFFSET(sfep, offset); 406 + xfs_dir2_sf_put_offset(sfep, offset); 407 407 memcpy(sfep->name, args->name, sfep->namelen); 408 - XFS_DIR2_SF_PUT_INUMBER(sfp, &args->inumber, 409 - XFS_DIR2_SF_INUMBERP(sfep)); 408 + xfs_dir2_sf_put_inumber(sfp, &args->inumber, 409 + xfs_dir2_sf_inumberp(sfep)); 410 410 /* 411 411 * Update the header and inode. 412 412 */ ··· 463 463 * If it's going to end up at the end then oldsfep will point there. 464 464 */ 465 465 for (offset = XFS_DIR2_DATA_FIRST_OFFSET, 466 - oldsfep = XFS_DIR2_SF_FIRSTENTRY(oldsfp), 467 - add_datasize = XFS_DIR2_DATA_ENTSIZE(args->namelen), 466 + oldsfep = xfs_dir2_sf_firstentry(oldsfp), 467 + add_datasize = xfs_dir2_data_entsize(args->namelen), 468 468 eof = (char *)oldsfep == &buf[old_isize]; 469 469 !eof; 470 - offset = new_offset + XFS_DIR2_DATA_ENTSIZE(oldsfep->namelen), 471 - oldsfep = XFS_DIR2_SF_NEXTENTRY(oldsfp, oldsfep), 470 + offset = new_offset + xfs_dir2_data_entsize(oldsfep->namelen), 471 + oldsfep = xfs_dir2_sf_nextentry(oldsfp, oldsfep), 472 472 eof = (char *)oldsfep == &buf[old_isize]) { 473 - new_offset = XFS_DIR2_SF_GET_OFFSET(oldsfep); 473 + new_offset = xfs_dir2_sf_get_offset(oldsfep); 474 474 if (offset + add_datasize <= new_offset) 475 475 break; 476 476 } ··· 495 495 * Fill in the new entry, and update the header counts. 496 496 */ 497 497 sfep->namelen = args->namelen; 498 - XFS_DIR2_SF_PUT_OFFSET(sfep, offset); 498 + xfs_dir2_sf_put_offset(sfep, offset); 499 499 memcpy(sfep->name, args->name, sfep->namelen); 500 - XFS_DIR2_SF_PUT_INUMBER(sfp, &args->inumber, 501 - XFS_DIR2_SF_INUMBERP(sfep)); 500 + xfs_dir2_sf_put_inumber(sfp, &args->inumber, 501 + xfs_dir2_sf_inumberp(sfep)); 502 502 sfp->hdr.count++; 503 503 #if XFS_BIG_INUMS 504 504 if (args->inumber > XFS_DIR2_MAX_SHORT_INUM && !objchange) ··· 508 508 * If there's more left to copy, do that. 509 509 */ 510 510 if (!eof) { 511 - sfep = XFS_DIR2_SF_NEXTENTRY(sfp, sfep); 511 + sfep = xfs_dir2_sf_nextentry(sfp, sfep); 512 512 memcpy(sfep, oldsfep, old_isize - nbytes); 513 513 } 514 514 kmem_free(buf, old_isize); ··· 544 544 mp = dp->i_mount; 545 545 546 546 sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data; 547 - size = XFS_DIR2_DATA_ENTSIZE(args->namelen); 547 + size = xfs_dir2_data_entsize(args->namelen); 548 548 offset = XFS_DIR2_DATA_FIRST_OFFSET; 549 - sfep = XFS_DIR2_SF_FIRSTENTRY(sfp); 549 + sfep = xfs_dir2_sf_firstentry(sfp); 550 550 holefit = 0; 551 551 /* 552 552 * Loop over sf entries. ··· 555 555 */ 556 556 for (i = 0; i < sfp->hdr.count; i++) { 557 557 if (!holefit) 558 - holefit = offset + size <= XFS_DIR2_SF_GET_OFFSET(sfep); 559 - offset = XFS_DIR2_SF_GET_OFFSET(sfep) + 560 - XFS_DIR2_DATA_ENTSIZE(sfep->namelen); 561 - sfep = XFS_DIR2_SF_NEXTENTRY(sfp, sfep); 558 + holefit = offset + size <= xfs_dir2_sf_get_offset(sfep); 559 + offset = xfs_dir2_sf_get_offset(sfep) + 560 + xfs_dir2_data_entsize(sfep->namelen); 561 + sfep = xfs_dir2_sf_nextentry(sfp, sfep); 562 562 } 563 563 /* 564 564 * Calculate data bytes used excluding the new entry, if this ··· 617 617 618 618 sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data; 619 619 offset = XFS_DIR2_DATA_FIRST_OFFSET; 620 - ino = XFS_DIR2_SF_GET_INUMBER(sfp, &sfp->hdr.parent); 620 + ino = xfs_dir2_sf_get_inumber(sfp, &sfp->hdr.parent); 621 621 i8count = ino > XFS_DIR2_MAX_SHORT_INUM; 622 622 623 - for (i = 0, sfep = XFS_DIR2_SF_FIRSTENTRY(sfp); 623 + for (i = 0, sfep = xfs_dir2_sf_firstentry(sfp); 624 624 i < sfp->hdr.count; 625 - i++, sfep = XFS_DIR2_SF_NEXTENTRY(sfp, sfep)) { 626 - ASSERT(XFS_DIR2_SF_GET_OFFSET(sfep) >= offset); 627 - ino = XFS_DIR2_SF_GET_INUMBER(sfp, XFS_DIR2_SF_INUMBERP(sfep)); 625 + i++, sfep = xfs_dir2_sf_nextentry(sfp, sfep)) { 626 + ASSERT(xfs_dir2_sf_get_offset(sfep) >= offset); 627 + ino = xfs_dir2_sf_get_inumber(sfp, xfs_dir2_sf_inumberp(sfep)); 628 628 i8count += ino > XFS_DIR2_MAX_SHORT_INUM; 629 629 offset = 630 - XFS_DIR2_SF_GET_OFFSET(sfep) + 631 - XFS_DIR2_DATA_ENTSIZE(sfep->namelen); 630 + xfs_dir2_sf_get_offset(sfep) + 631 + xfs_dir2_data_entsize(sfep->namelen); 632 632 } 633 633 ASSERT(i8count == sfp->hdr.i8count); 634 634 ASSERT(XFS_BIG_INUMS || i8count == 0); ··· 671 671 ASSERT(dp->i_df.if_flags & XFS_IFINLINE); 672 672 ASSERT(dp->i_df.if_bytes == 0); 673 673 i8count = pino > XFS_DIR2_MAX_SHORT_INUM; 674 - size = XFS_DIR2_SF_HDR_SIZE(i8count); 674 + size = xfs_dir2_sf_hdr_size(i8count); 675 675 /* 676 676 * Make a buffer for the data. 677 677 */ ··· 684 684 /* 685 685 * Now can put in the inode number, since i8count is set. 686 686 */ 687 - XFS_DIR2_SF_PUT_INUMBER(sfp, &pino, &sfp->hdr.parent); 687 + xfs_dir2_sf_put_inumber(sfp, &pino, &sfp->hdr.parent); 688 688 sfp->hdr.count = 0; 689 689 dp->i_d.di_size = size; 690 690 xfs_dir2_sf_check(args); ··· 727 727 728 728 sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data; 729 729 730 - ASSERT(dp->i_d.di_size >= XFS_DIR2_SF_HDR_SIZE(sfp->hdr.i8count)); 730 + ASSERT(dp->i_d.di_size >= xfs_dir2_sf_hdr_size(sfp->hdr.i8count)); 731 731 732 732 /* 733 733 * If the block number in the offset is out of range, we're done. 734 734 */ 735 - if (XFS_DIR2_DATAPTR_TO_DB(mp, dir_offset) > mp->m_dirdatablk) { 735 + if (xfs_dir2_dataptr_to_db(mp, dir_offset) > mp->m_dirdatablk) { 736 736 *eofp = 1; 737 737 return 0; 738 738 } ··· 747 747 * Put . entry unless we're starting past it. 748 748 */ 749 749 if (dir_offset <= 750 - XFS_DIR2_DB_OFF_TO_DATAPTR(mp, mp->m_dirdatablk, 750 + xfs_dir2_db_off_to_dataptr(mp, mp->m_dirdatablk, 751 751 XFS_DIR2_DATA_DOT_OFFSET)) { 752 - p.cook = XFS_DIR2_DB_OFF_TO_DATAPTR(mp, 0, 752 + p.cook = xfs_dir2_db_off_to_dataptr(mp, 0, 753 753 XFS_DIR2_DATA_DOTDOT_OFFSET); 754 754 p.ino = dp->i_ino; 755 755 #if XFS_BIG_INUMS ··· 762 762 763 763 if (!p.done) { 764 764 uio->uio_offset = 765 - XFS_DIR2_DB_OFF_TO_DATAPTR(mp, mp->m_dirdatablk, 765 + xfs_dir2_db_off_to_dataptr(mp, mp->m_dirdatablk, 766 766 XFS_DIR2_DATA_DOT_OFFSET); 767 767 return error; 768 768 } ··· 772 772 * Put .. entry unless we're starting past it. 773 773 */ 774 774 if (dir_offset <= 775 - XFS_DIR2_DB_OFF_TO_DATAPTR(mp, mp->m_dirdatablk, 775 + xfs_dir2_db_off_to_dataptr(mp, mp->m_dirdatablk, 776 776 XFS_DIR2_DATA_DOTDOT_OFFSET)) { 777 - p.cook = XFS_DIR2_DB_OFF_TO_DATAPTR(mp, mp->m_dirdatablk, 777 + p.cook = xfs_dir2_db_off_to_dataptr(mp, mp->m_dirdatablk, 778 778 XFS_DIR2_DATA_FIRST_OFFSET); 779 - p.ino = XFS_DIR2_SF_GET_INUMBER(sfp, &sfp->hdr.parent); 779 + p.ino = xfs_dir2_sf_get_inumber(sfp, &sfp->hdr.parent); 780 780 #if XFS_BIG_INUMS 781 781 p.ino += mp->m_inoadd; 782 782 #endif ··· 787 787 788 788 if (!p.done) { 789 789 uio->uio_offset = 790 - XFS_DIR2_DB_OFF_TO_DATAPTR(mp, mp->m_dirdatablk, 790 + xfs_dir2_db_off_to_dataptr(mp, mp->m_dirdatablk, 791 791 XFS_DIR2_DATA_DOTDOT_OFFSET); 792 792 return error; 793 793 } ··· 796 796 /* 797 797 * Loop while there are more entries and put'ing works. 798 798 */ 799 - for (i = 0, sfep = XFS_DIR2_SF_FIRSTENTRY(sfp); 799 + for (i = 0, sfep = xfs_dir2_sf_firstentry(sfp); 800 800 i < sfp->hdr.count; 801 - i++, sfep = XFS_DIR2_SF_NEXTENTRY(sfp, sfep)) { 801 + i++, sfep = xfs_dir2_sf_nextentry(sfp, sfep)) { 802 802 803 - off = XFS_DIR2_DB_OFF_TO_DATAPTR(mp, mp->m_dirdatablk, 804 - XFS_DIR2_SF_GET_OFFSET(sfep)); 803 + off = xfs_dir2_db_off_to_dataptr(mp, mp->m_dirdatablk, 804 + xfs_dir2_sf_get_offset(sfep)); 805 805 806 806 if (dir_offset > off) 807 807 continue; 808 808 809 809 p.namelen = sfep->namelen; 810 810 811 - p.cook = XFS_DIR2_DB_OFF_TO_DATAPTR(mp, mp->m_dirdatablk, 812 - XFS_DIR2_SF_GET_OFFSET(sfep) + 813 - XFS_DIR2_DATA_ENTSIZE(p.namelen)); 811 + p.cook = xfs_dir2_db_off_to_dataptr(mp, mp->m_dirdatablk, 812 + xfs_dir2_sf_get_offset(sfep) + 813 + xfs_dir2_data_entsize(p.namelen)); 814 814 815 - p.ino = XFS_DIR2_SF_GET_INUMBER(sfp, XFS_DIR2_SF_INUMBERP(sfep)); 815 + p.ino = xfs_dir2_sf_get_inumber(sfp, xfs_dir2_sf_inumberp(sfep)); 816 816 #if XFS_BIG_INUMS 817 817 p.ino += mp->m_inoadd; 818 818 #endif ··· 832 832 *eofp = 1; 833 833 834 834 uio->uio_offset = 835 - XFS_DIR2_DB_OFF_TO_DATAPTR(mp, mp->m_dirdatablk + 1, 0); 835 + xfs_dir2_db_off_to_dataptr(mp, mp->m_dirdatablk + 1, 0); 836 836 837 837 return 0; 838 838 } ··· 865 865 ASSERT(dp->i_df.if_bytes == dp->i_d.di_size); 866 866 ASSERT(dp->i_df.if_u1.if_data != NULL); 867 867 sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data; 868 - ASSERT(dp->i_d.di_size >= XFS_DIR2_SF_HDR_SIZE(sfp->hdr.i8count)); 868 + ASSERT(dp->i_d.di_size >= xfs_dir2_sf_hdr_size(sfp->hdr.i8count)); 869 869 /* 870 870 * Special case for . 871 871 */ ··· 878 878 */ 879 879 if (args->namelen == 2 && 880 880 args->name[0] == '.' && args->name[1] == '.') { 881 - args->inumber = XFS_DIR2_SF_GET_INUMBER(sfp, &sfp->hdr.parent); 881 + args->inumber = xfs_dir2_sf_get_inumber(sfp, &sfp->hdr.parent); 882 882 return XFS_ERROR(EEXIST); 883 883 } 884 884 /* 885 885 * Loop over all the entries trying to match ours. 886 886 */ 887 - for (i = 0, sfep = XFS_DIR2_SF_FIRSTENTRY(sfp); 887 + for (i = 0, sfep = xfs_dir2_sf_firstentry(sfp); 888 888 i < sfp->hdr.count; 889 - i++, sfep = XFS_DIR2_SF_NEXTENTRY(sfp, sfep)) { 889 + i++, sfep = xfs_dir2_sf_nextentry(sfp, sfep)) { 890 890 if (sfep->namelen == args->namelen && 891 891 sfep->name[0] == args->name[0] && 892 892 memcmp(args->name, sfep->name, args->namelen) == 0) { 893 893 args->inumber = 894 - XFS_DIR2_SF_GET_INUMBER(sfp, 895 - XFS_DIR2_SF_INUMBERP(sfep)); 894 + xfs_dir2_sf_get_inumber(sfp, 895 + xfs_dir2_sf_inumberp(sfep)); 896 896 return XFS_ERROR(EEXIST); 897 897 } 898 898 } ··· 934 934 ASSERT(dp->i_df.if_bytes == oldsize); 935 935 ASSERT(dp->i_df.if_u1.if_data != NULL); 936 936 sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data; 937 - ASSERT(oldsize >= XFS_DIR2_SF_HDR_SIZE(sfp->hdr.i8count)); 937 + ASSERT(oldsize >= xfs_dir2_sf_hdr_size(sfp->hdr.i8count)); 938 938 /* 939 939 * Loop over the old directory entries. 940 940 * Find the one we're deleting. 941 941 */ 942 - for (i = 0, sfep = XFS_DIR2_SF_FIRSTENTRY(sfp); 942 + for (i = 0, sfep = xfs_dir2_sf_firstentry(sfp); 943 943 i < sfp->hdr.count; 944 - i++, sfep = XFS_DIR2_SF_NEXTENTRY(sfp, sfep)) { 944 + i++, sfep = xfs_dir2_sf_nextentry(sfp, sfep)) { 945 945 if (sfep->namelen == args->namelen && 946 946 sfep->name[0] == args->name[0] && 947 947 memcmp(sfep->name, args->name, args->namelen) == 0) { 948 - ASSERT(XFS_DIR2_SF_GET_INUMBER(sfp, 949 - XFS_DIR2_SF_INUMBERP(sfep)) == 948 + ASSERT(xfs_dir2_sf_get_inumber(sfp, 949 + xfs_dir2_sf_inumberp(sfep)) == 950 950 args->inumber); 951 951 break; 952 952 } ··· 961 961 * Calculate sizes. 962 962 */ 963 963 byteoff = (int)((char *)sfep - (char *)sfp); 964 - entsize = XFS_DIR2_SF_ENTSIZE_BYNAME(sfp, args->namelen); 964 + entsize = xfs_dir2_sf_entsize_byname(sfp, args->namelen); 965 965 newsize = oldsize - entsize; 966 966 /* 967 967 * Copy the part if any after the removed entry, sliding it down. ··· 1027 1027 ASSERT(dp->i_df.if_bytes == dp->i_d.di_size); 1028 1028 ASSERT(dp->i_df.if_u1.if_data != NULL); 1029 1029 sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data; 1030 - ASSERT(dp->i_d.di_size >= XFS_DIR2_SF_HDR_SIZE(sfp->hdr.i8count)); 1030 + ASSERT(dp->i_d.di_size >= xfs_dir2_sf_hdr_size(sfp->hdr.i8count)); 1031 1031 #if XFS_BIG_INUMS 1032 1032 /* 1033 1033 * New inode number is large, and need to convert to 8-byte inodes. ··· 1067 1067 if (args->namelen == 2 && 1068 1068 args->name[0] == '.' && args->name[1] == '.') { 1069 1069 #if XFS_BIG_INUMS || defined(DEBUG) 1070 - ino = XFS_DIR2_SF_GET_INUMBER(sfp, &sfp->hdr.parent); 1070 + ino = xfs_dir2_sf_get_inumber(sfp, &sfp->hdr.parent); 1071 1071 ASSERT(args->inumber != ino); 1072 1072 #endif 1073 - XFS_DIR2_SF_PUT_INUMBER(sfp, &args->inumber, &sfp->hdr.parent); 1073 + xfs_dir2_sf_put_inumber(sfp, &args->inumber, &sfp->hdr.parent); 1074 1074 } 1075 1075 /* 1076 1076 * Normal entry, look for the name. 1077 1077 */ 1078 1078 else { 1079 - for (i = 0, sfep = XFS_DIR2_SF_FIRSTENTRY(sfp); 1079 + for (i = 0, sfep = xfs_dir2_sf_firstentry(sfp); 1080 1080 i < sfp->hdr.count; 1081 - i++, sfep = XFS_DIR2_SF_NEXTENTRY(sfp, sfep)) { 1081 + i++, sfep = xfs_dir2_sf_nextentry(sfp, sfep)) { 1082 1082 if (sfep->namelen == args->namelen && 1083 1083 sfep->name[0] == args->name[0] && 1084 1084 memcmp(args->name, sfep->name, args->namelen) == 0) { 1085 1085 #if XFS_BIG_INUMS || defined(DEBUG) 1086 - ino = XFS_DIR2_SF_GET_INUMBER(sfp, 1087 - XFS_DIR2_SF_INUMBERP(sfep)); 1086 + ino = xfs_dir2_sf_get_inumber(sfp, 1087 + xfs_dir2_sf_inumberp(sfep)); 1088 1088 ASSERT(args->inumber != ino); 1089 1089 #endif 1090 - XFS_DIR2_SF_PUT_INUMBER(sfp, &args->inumber, 1091 - XFS_DIR2_SF_INUMBERP(sfep)); 1090 + xfs_dir2_sf_put_inumber(sfp, &args->inumber, 1091 + xfs_dir2_sf_inumberp(sfep)); 1092 1092 break; 1093 1093 } 1094 1094 } ··· 1189 1189 */ 1190 1190 sfp->hdr.count = oldsfp->hdr.count; 1191 1191 sfp->hdr.i8count = 0; 1192 - ino = XFS_DIR2_SF_GET_INUMBER(oldsfp, &oldsfp->hdr.parent); 1193 - XFS_DIR2_SF_PUT_INUMBER(sfp, &ino, &sfp->hdr.parent); 1192 + ino = xfs_dir2_sf_get_inumber(oldsfp, &oldsfp->hdr.parent); 1193 + xfs_dir2_sf_put_inumber(sfp, &ino, &sfp->hdr.parent); 1194 1194 /* 1195 1195 * Copy the entries field by field. 1196 1196 */ 1197 - for (i = 0, sfep = XFS_DIR2_SF_FIRSTENTRY(sfp), 1198 - oldsfep = XFS_DIR2_SF_FIRSTENTRY(oldsfp); 1197 + for (i = 0, sfep = xfs_dir2_sf_firstentry(sfp), 1198 + oldsfep = xfs_dir2_sf_firstentry(oldsfp); 1199 1199 i < sfp->hdr.count; 1200 - i++, sfep = XFS_DIR2_SF_NEXTENTRY(sfp, sfep), 1201 - oldsfep = XFS_DIR2_SF_NEXTENTRY(oldsfp, oldsfep)) { 1200 + i++, sfep = xfs_dir2_sf_nextentry(sfp, sfep), 1201 + oldsfep = xfs_dir2_sf_nextentry(oldsfp, oldsfep)) { 1202 1202 sfep->namelen = oldsfep->namelen; 1203 1203 sfep->offset = oldsfep->offset; 1204 1204 memcpy(sfep->name, oldsfep->name, sfep->namelen); 1205 - ino = XFS_DIR2_SF_GET_INUMBER(oldsfp, 1206 - XFS_DIR2_SF_INUMBERP(oldsfep)); 1207 - XFS_DIR2_SF_PUT_INUMBER(sfp, &ino, XFS_DIR2_SF_INUMBERP(sfep)); 1205 + ino = xfs_dir2_sf_get_inumber(oldsfp, 1206 + xfs_dir2_sf_inumberp(oldsfep)); 1207 + xfs_dir2_sf_put_inumber(sfp, &ino, xfs_dir2_sf_inumberp(sfep)); 1208 1208 } 1209 1209 /* 1210 1210 * Clean up the inode. ··· 1266 1266 */ 1267 1267 sfp->hdr.count = oldsfp->hdr.count; 1268 1268 sfp->hdr.i8count = 1; 1269 - ino = XFS_DIR2_SF_GET_INUMBER(oldsfp, &oldsfp->hdr.parent); 1270 - XFS_DIR2_SF_PUT_INUMBER(sfp, &ino, &sfp->hdr.parent); 1269 + ino = xfs_dir2_sf_get_inumber(oldsfp, &oldsfp->hdr.parent); 1270 + xfs_dir2_sf_put_inumber(sfp, &ino, &sfp->hdr.parent); 1271 1271 /* 1272 1272 * Copy the entries field by field. 1273 1273 */ 1274 - for (i = 0, sfep = XFS_DIR2_SF_FIRSTENTRY(sfp), 1275 - oldsfep = XFS_DIR2_SF_FIRSTENTRY(oldsfp); 1274 + for (i = 0, sfep = xfs_dir2_sf_firstentry(sfp), 1275 + oldsfep = xfs_dir2_sf_firstentry(oldsfp); 1276 1276 i < sfp->hdr.count; 1277 - i++, sfep = XFS_DIR2_SF_NEXTENTRY(sfp, sfep), 1278 - oldsfep = XFS_DIR2_SF_NEXTENTRY(oldsfp, oldsfep)) { 1277 + i++, sfep = xfs_dir2_sf_nextentry(sfp, sfep), 1278 + oldsfep = xfs_dir2_sf_nextentry(oldsfp, oldsfep)) { 1279 1279 sfep->namelen = oldsfep->namelen; 1280 1280 sfep->offset = oldsfep->offset; 1281 1281 memcpy(sfep->name, oldsfep->name, sfep->namelen); 1282 - ino = XFS_DIR2_SF_GET_INUMBER(oldsfp, 1283 - XFS_DIR2_SF_INUMBERP(oldsfep)); 1284 - XFS_DIR2_SF_PUT_INUMBER(sfp, &ino, XFS_DIR2_SF_INUMBERP(sfep)); 1282 + ino = xfs_dir2_sf_get_inumber(oldsfp, 1283 + xfs_dir2_sf_inumberp(oldsfep)); 1284 + xfs_dir2_sf_put_inumber(sfp, &ino, xfs_dir2_sf_inumberp(sfep)); 1285 1285 } 1286 1286 /* 1287 1287 * Clean up the inode.
+2 -18
fs/xfs/xfs_dir2_sf.h
··· 90 90 xfs_dir2_sf_entry_t list[1]; /* shortform entries */ 91 91 } xfs_dir2_sf_t; 92 92 93 - #define XFS_DIR2_SF_HDR_SIZE(i8count) xfs_dir2_sf_hdr_size(i8count) 94 93 static inline int xfs_dir2_sf_hdr_size(int i8count) 95 94 { 96 95 return ((uint)sizeof(xfs_dir2_sf_hdr_t) - \ ··· 97 98 ((uint)sizeof(xfs_dir2_ino8_t) - (uint)sizeof(xfs_dir2_ino4_t))); 98 99 } 99 100 100 - #define XFS_DIR2_SF_INUMBERP(sfep) xfs_dir2_sf_inumberp(sfep) 101 101 static inline xfs_dir2_inou_t *xfs_dir2_sf_inumberp(xfs_dir2_sf_entry_t *sfep) 102 102 { 103 103 return (xfs_dir2_inou_t *)&(sfep)->name[(sfep)->namelen]; 104 104 } 105 105 106 - #define XFS_DIR2_SF_GET_INUMBER(sfp, from) \ 107 - xfs_dir2_sf_get_inumber(sfp, from) 108 106 static inline xfs_intino_t 109 107 xfs_dir2_sf_get_inumber(xfs_dir2_sf_t *sfp, xfs_dir2_inou_t *from) 110 108 { ··· 110 114 (xfs_intino_t)XFS_GET_DIR_INO8((from)->i8)); 111 115 } 112 116 113 - #define XFS_DIR2_SF_PUT_INUMBER(sfp,from,to) \ 114 - xfs_dir2_sf_put_inumber(sfp,from,to) 115 117 static inline void xfs_dir2_sf_put_inumber(xfs_dir2_sf_t *sfp, xfs_ino_t *from, 116 118 xfs_dir2_inou_t *to) 117 119 { ··· 119 125 XFS_PUT_DIR_INO8(*(from), (to)->i8); 120 126 } 121 127 122 - #define XFS_DIR2_SF_GET_OFFSET(sfep) \ 123 - xfs_dir2_sf_get_offset(sfep) 124 128 static inline xfs_dir2_data_aoff_t 125 129 xfs_dir2_sf_get_offset(xfs_dir2_sf_entry_t *sfep) 126 130 { 127 131 return INT_GET_UNALIGNED_16_BE(&(sfep)->offset.i); 128 132 } 129 133 130 - #define XFS_DIR2_SF_PUT_OFFSET(sfep,off) \ 131 - xfs_dir2_sf_put_offset(sfep,off) 132 134 static inline void 133 135 xfs_dir2_sf_put_offset(xfs_dir2_sf_entry_t *sfep, xfs_dir2_data_aoff_t off) 134 136 { 135 137 INT_SET_UNALIGNED_16_BE(&(sfep)->offset.i, off); 136 138 } 137 139 138 - #define XFS_DIR2_SF_ENTSIZE_BYNAME(sfp,len) \ 139 - xfs_dir2_sf_entsize_byname(sfp,len) 140 140 static inline int xfs_dir2_sf_entsize_byname(xfs_dir2_sf_t *sfp, int len) 141 141 { 142 142 return ((uint)sizeof(xfs_dir2_sf_entry_t) - 1 + (len) - \ ··· 138 150 ((uint)sizeof(xfs_dir2_ino8_t) - (uint)sizeof(xfs_dir2_ino4_t))); 139 151 } 140 152 141 - #define XFS_DIR2_SF_ENTSIZE_BYENTRY(sfp,sfep) \ 142 - xfs_dir2_sf_entsize_byentry(sfp,sfep) 143 153 static inline int 144 154 xfs_dir2_sf_entsize_byentry(xfs_dir2_sf_t *sfp, xfs_dir2_sf_entry_t *sfep) 145 155 { ··· 146 160 ((uint)sizeof(xfs_dir2_ino8_t) - (uint)sizeof(xfs_dir2_ino4_t))); 147 161 } 148 162 149 - #define XFS_DIR2_SF_FIRSTENTRY(sfp) xfs_dir2_sf_firstentry(sfp) 150 163 static inline xfs_dir2_sf_entry_t *xfs_dir2_sf_firstentry(xfs_dir2_sf_t *sfp) 151 164 { 152 165 return ((xfs_dir2_sf_entry_t *) \ 153 - ((char *)(sfp) + XFS_DIR2_SF_HDR_SIZE(sfp->hdr.i8count))); 166 + ((char *)(sfp) + xfs_dir2_sf_hdr_size(sfp->hdr.i8count))); 154 167 } 155 168 156 - #define XFS_DIR2_SF_NEXTENTRY(sfp,sfep) xfs_dir2_sf_nextentry(sfp,sfep) 157 169 static inline xfs_dir2_sf_entry_t * 158 170 xfs_dir2_sf_nextentry(xfs_dir2_sf_t *sfp, xfs_dir2_sf_entry_t *sfep) 159 171 { 160 172 return ((xfs_dir2_sf_entry_t *) \ 161 - ((char *)(sfep) + XFS_DIR2_SF_ENTSIZE_BYENTRY(sfp,sfep))); 173 + ((char *)(sfep) + xfs_dir2_sf_entsize_byentry(sfp,sfep))); 162 174 } 163 175 164 176 /*
+771
fs/xfs/xfs_filestream.c
··· 1 + /* 2 + * Copyright (c) 2006-2007 Silicon Graphics, Inc. 3 + * All Rights Reserved. 4 + * 5 + * This program is free software; you can redistribute it and/or 6 + * modify it under the terms of the GNU General Public License as 7 + * published by the Free Software Foundation. 8 + * 9 + * This program is distributed in the hope that it would be useful, 10 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 + * GNU General Public License for more details. 13 + * 14 + * You should have received a copy of the GNU General Public License 15 + * along with this program; if not, write the Free Software Foundation, 16 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 17 + */ 18 + #include "xfs.h" 19 + #include "xfs_bmap_btree.h" 20 + #include "xfs_inum.h" 21 + #include "xfs_dir2.h" 22 + #include "xfs_dir2_sf.h" 23 + #include "xfs_attr_sf.h" 24 + #include "xfs_dinode.h" 25 + #include "xfs_inode.h" 26 + #include "xfs_ag.h" 27 + #include "xfs_dmapi.h" 28 + #include "xfs_log.h" 29 + #include "xfs_trans.h" 30 + #include "xfs_sb.h" 31 + #include "xfs_mount.h" 32 + #include "xfs_bmap.h" 33 + #include "xfs_alloc.h" 34 + #include "xfs_utils.h" 35 + #include "xfs_mru_cache.h" 36 + #include "xfs_filestream.h" 37 + 38 + #ifdef XFS_FILESTREAMS_TRACE 39 + 40 + ktrace_t *xfs_filestreams_trace_buf; 41 + 42 + STATIC void 43 + xfs_filestreams_trace( 44 + xfs_mount_t *mp, /* mount point */ 45 + int type, /* type of trace */ 46 + const char *func, /* source function */ 47 + int line, /* source line number */ 48 + __psunsigned_t arg0, 49 + __psunsigned_t arg1, 50 + __psunsigned_t arg2, 51 + __psunsigned_t arg3, 52 + __psunsigned_t arg4, 53 + __psunsigned_t arg5) 54 + { 55 + ktrace_enter(xfs_filestreams_trace_buf, 56 + (void *)(__psint_t)(type | (line << 16)), 57 + (void *)func, 58 + (void *)(__psunsigned_t)current_pid(), 59 + (void *)mp, 60 + (void *)(__psunsigned_t)arg0, 61 + (void *)(__psunsigned_t)arg1, 62 + (void *)(__psunsigned_t)arg2, 63 + (void *)(__psunsigned_t)arg3, 64 + (void *)(__psunsigned_t)arg4, 65 + (void *)(__psunsigned_t)arg5, 66 + NULL, NULL, NULL, NULL, NULL, NULL); 67 + } 68 + 69 + #define TRACE0(mp,t) TRACE6(mp,t,0,0,0,0,0,0) 70 + #define TRACE1(mp,t,a0) TRACE6(mp,t,a0,0,0,0,0,0) 71 + #define TRACE2(mp,t,a0,a1) TRACE6(mp,t,a0,a1,0,0,0,0) 72 + #define TRACE3(mp,t,a0,a1,a2) TRACE6(mp,t,a0,a1,a2,0,0,0) 73 + #define TRACE4(mp,t,a0,a1,a2,a3) TRACE6(mp,t,a0,a1,a2,a3,0,0) 74 + #define TRACE5(mp,t,a0,a1,a2,a3,a4) TRACE6(mp,t,a0,a1,a2,a3,a4,0) 75 + #define TRACE6(mp,t,a0,a1,a2,a3,a4,a5) \ 76 + xfs_filestreams_trace(mp, t, __FUNCTION__, __LINE__, \ 77 + (__psunsigned_t)a0, (__psunsigned_t)a1, \ 78 + (__psunsigned_t)a2, (__psunsigned_t)a3, \ 79 + (__psunsigned_t)a4, (__psunsigned_t)a5) 80 + 81 + #define TRACE_AG_SCAN(mp, ag, ag2) \ 82 + TRACE2(mp, XFS_FSTRM_KTRACE_AGSCAN, ag, ag2); 83 + #define TRACE_AG_PICK1(mp, max_ag, maxfree) \ 84 + TRACE2(mp, XFS_FSTRM_KTRACE_AGPICK1, max_ag, maxfree); 85 + #define TRACE_AG_PICK2(mp, ag, ag2, cnt, free, scan, flag) \ 86 + TRACE6(mp, XFS_FSTRM_KTRACE_AGPICK2, ag, ag2, \ 87 + cnt, free, scan, flag) 88 + #define TRACE_UPDATE(mp, ip, ag, cnt, ag2, cnt2) \ 89 + TRACE5(mp, XFS_FSTRM_KTRACE_UPDATE, ip, ag, cnt, ag2, cnt2) 90 + #define TRACE_FREE(mp, ip, pip, ag, cnt) \ 91 + TRACE4(mp, XFS_FSTRM_KTRACE_FREE, ip, pip, ag, cnt) 92 + #define TRACE_LOOKUP(mp, ip, pip, ag, cnt) \ 93 + TRACE4(mp, XFS_FSTRM_KTRACE_ITEM_LOOKUP, ip, pip, ag, cnt) 94 + #define TRACE_ASSOCIATE(mp, ip, pip, ag, cnt) \ 95 + TRACE4(mp, XFS_FSTRM_KTRACE_ASSOCIATE, ip, pip, ag, cnt) 96 + #define TRACE_MOVEAG(mp, ip, pip, oag, ocnt, nag, ncnt) \ 97 + TRACE6(mp, XFS_FSTRM_KTRACE_MOVEAG, ip, pip, oag, ocnt, nag, ncnt) 98 + #define TRACE_ORPHAN(mp, ip, ag) \ 99 + TRACE2(mp, XFS_FSTRM_KTRACE_ORPHAN, ip, ag); 100 + 101 + 102 + #else 103 + #define TRACE_AG_SCAN(mp, ag, ag2) 104 + #define TRACE_AG_PICK1(mp, max_ag, maxfree) 105 + #define TRACE_AG_PICK2(mp, ag, ag2, cnt, free, scan, flag) 106 + #define TRACE_UPDATE(mp, ip, ag, cnt, ag2, cnt2) 107 + #define TRACE_FREE(mp, ip, pip, ag, cnt) 108 + #define TRACE_LOOKUP(mp, ip, pip, ag, cnt) 109 + #define TRACE_ASSOCIATE(mp, ip, pip, ag, cnt) 110 + #define TRACE_MOVEAG(mp, ip, pip, oag, ocnt, nag, ncnt) 111 + #define TRACE_ORPHAN(mp, ip, ag) 112 + #endif 113 + 114 + static kmem_zone_t *item_zone; 115 + 116 + /* 117 + * Structure for associating a file or a directory with an allocation group. 118 + * The parent directory pointer is only needed for files, but since there will 119 + * generally be vastly more files than directories in the cache, using the same 120 + * data structure simplifies the code with very little memory overhead. 121 + */ 122 + typedef struct fstrm_item 123 + { 124 + xfs_agnumber_t ag; /* AG currently in use for the file/directory. */ 125 + xfs_inode_t *ip; /* inode self-pointer. */ 126 + xfs_inode_t *pip; /* Parent directory inode pointer. */ 127 + } fstrm_item_t; 128 + 129 + 130 + /* 131 + * Scan the AGs starting at startag looking for an AG that isn't in use and has 132 + * at least minlen blocks free. 133 + */ 134 + static int 135 + _xfs_filestream_pick_ag( 136 + xfs_mount_t *mp, 137 + xfs_agnumber_t startag, 138 + xfs_agnumber_t *agp, 139 + int flags, 140 + xfs_extlen_t minlen) 141 + { 142 + int err, trylock, nscan; 143 + xfs_extlen_t delta, longest, need, free, minfree, maxfree = 0; 144 + xfs_agnumber_t ag, max_ag = NULLAGNUMBER; 145 + struct xfs_perag *pag; 146 + 147 + /* 2% of an AG's blocks must be free for it to be chosen. */ 148 + minfree = mp->m_sb.sb_agblocks / 50; 149 + 150 + ag = startag; 151 + *agp = NULLAGNUMBER; 152 + 153 + /* For the first pass, don't sleep trying to init the per-AG. */ 154 + trylock = XFS_ALLOC_FLAG_TRYLOCK; 155 + 156 + for (nscan = 0; 1; nscan++) { 157 + 158 + TRACE_AG_SCAN(mp, ag, xfs_filestream_peek_ag(mp, ag)); 159 + 160 + pag = mp->m_perag + ag; 161 + 162 + if (!pag->pagf_init) { 163 + err = xfs_alloc_pagf_init(mp, NULL, ag, trylock); 164 + if (err && !trylock) 165 + return err; 166 + } 167 + 168 + /* Might fail sometimes during the 1st pass with trylock set. */ 169 + if (!pag->pagf_init) 170 + goto next_ag; 171 + 172 + /* Keep track of the AG with the most free blocks. */ 173 + if (pag->pagf_freeblks > maxfree) { 174 + maxfree = pag->pagf_freeblks; 175 + max_ag = ag; 176 + } 177 + 178 + /* 179 + * The AG reference count does two things: it enforces mutual 180 + * exclusion when examining the suitability of an AG in this 181 + * loop, and it guards against two filestreams being established 182 + * in the same AG as each other. 183 + */ 184 + if (xfs_filestream_get_ag(mp, ag) > 1) { 185 + xfs_filestream_put_ag(mp, ag); 186 + goto next_ag; 187 + } 188 + 189 + need = XFS_MIN_FREELIST_PAG(pag, mp); 190 + delta = need > pag->pagf_flcount ? need - pag->pagf_flcount : 0; 191 + longest = (pag->pagf_longest > delta) ? 192 + (pag->pagf_longest - delta) : 193 + (pag->pagf_flcount > 0 || pag->pagf_longest > 0); 194 + 195 + if (((minlen && longest >= minlen) || 196 + (!minlen && pag->pagf_freeblks >= minfree)) && 197 + (!pag->pagf_metadata || !(flags & XFS_PICK_USERDATA) || 198 + (flags & XFS_PICK_LOWSPACE))) { 199 + 200 + /* Break out, retaining the reference on the AG. */ 201 + free = pag->pagf_freeblks; 202 + *agp = ag; 203 + break; 204 + } 205 + 206 + /* Drop the reference on this AG, it's not usable. */ 207 + xfs_filestream_put_ag(mp, ag); 208 + next_ag: 209 + /* Move to the next AG, wrapping to AG 0 if necessary. */ 210 + if (++ag >= mp->m_sb.sb_agcount) 211 + ag = 0; 212 + 213 + /* If a full pass of the AGs hasn't been done yet, continue. */ 214 + if (ag != startag) 215 + continue; 216 + 217 + /* Allow sleeping in xfs_alloc_pagf_init() on the 2nd pass. */ 218 + if (trylock != 0) { 219 + trylock = 0; 220 + continue; 221 + } 222 + 223 + /* Finally, if lowspace wasn't set, set it for the 3rd pass. */ 224 + if (!(flags & XFS_PICK_LOWSPACE)) { 225 + flags |= XFS_PICK_LOWSPACE; 226 + continue; 227 + } 228 + 229 + /* 230 + * Take the AG with the most free space, regardless of whether 231 + * it's already in use by another filestream. 232 + */ 233 + if (max_ag != NULLAGNUMBER) { 234 + xfs_filestream_get_ag(mp, max_ag); 235 + TRACE_AG_PICK1(mp, max_ag, maxfree); 236 + free = maxfree; 237 + *agp = max_ag; 238 + break; 239 + } 240 + 241 + /* take AG 0 if none matched */ 242 + TRACE_AG_PICK1(mp, max_ag, maxfree); 243 + *agp = 0; 244 + return 0; 245 + } 246 + 247 + TRACE_AG_PICK2(mp, startag, *agp, xfs_filestream_peek_ag(mp, *agp), 248 + free, nscan, flags); 249 + 250 + return 0; 251 + } 252 + 253 + /* 254 + * Set the allocation group number for a file or a directory, updating inode 255 + * references and per-AG references as appropriate. Must be called with the 256 + * m_peraglock held in read mode. 257 + */ 258 + static int 259 + _xfs_filestream_update_ag( 260 + xfs_inode_t *ip, 261 + xfs_inode_t *pip, 262 + xfs_agnumber_t ag) 263 + { 264 + int err = 0; 265 + xfs_mount_t *mp; 266 + xfs_mru_cache_t *cache; 267 + fstrm_item_t *item; 268 + xfs_agnumber_t old_ag; 269 + xfs_inode_t *old_pip; 270 + 271 + /* 272 + * Either ip is a regular file and pip is a directory, or ip is a 273 + * directory and pip is NULL. 274 + */ 275 + ASSERT(ip && (((ip->i_d.di_mode & S_IFREG) && pip && 276 + (pip->i_d.di_mode & S_IFDIR)) || 277 + ((ip->i_d.di_mode & S_IFDIR) && !pip))); 278 + 279 + mp = ip->i_mount; 280 + cache = mp->m_filestream; 281 + 282 + item = xfs_mru_cache_lookup(cache, ip->i_ino); 283 + if (item) { 284 + ASSERT(item->ip == ip); 285 + old_ag = item->ag; 286 + item->ag = ag; 287 + old_pip = item->pip; 288 + item->pip = pip; 289 + xfs_mru_cache_done(cache); 290 + 291 + /* 292 + * If the AG has changed, drop the old ref and take a new one, 293 + * effectively transferring the reference from old to new AG. 294 + */ 295 + if (ag != old_ag) { 296 + xfs_filestream_put_ag(mp, old_ag); 297 + xfs_filestream_get_ag(mp, ag); 298 + } 299 + 300 + /* 301 + * If ip is a file and its pip has changed, drop the old ref and 302 + * take a new one. 303 + */ 304 + if (pip && pip != old_pip) { 305 + IRELE(old_pip); 306 + IHOLD(pip); 307 + } 308 + 309 + TRACE_UPDATE(mp, ip, old_ag, xfs_filestream_peek_ag(mp, old_ag), 310 + ag, xfs_filestream_peek_ag(mp, ag)); 311 + return 0; 312 + } 313 + 314 + item = kmem_zone_zalloc(item_zone, KM_MAYFAIL); 315 + if (!item) 316 + return ENOMEM; 317 + 318 + item->ag = ag; 319 + item->ip = ip; 320 + item->pip = pip; 321 + 322 + err = xfs_mru_cache_insert(cache, ip->i_ino, item); 323 + if (err) { 324 + kmem_zone_free(item_zone, item); 325 + return err; 326 + } 327 + 328 + /* Take a reference on the AG. */ 329 + xfs_filestream_get_ag(mp, ag); 330 + 331 + /* 332 + * Take a reference on the inode itself regardless of whether it's a 333 + * regular file or a directory. 334 + */ 335 + IHOLD(ip); 336 + 337 + /* 338 + * In the case of a regular file, take a reference on the parent inode 339 + * as well to ensure it remains in-core. 340 + */ 341 + if (pip) 342 + IHOLD(pip); 343 + 344 + TRACE_UPDATE(mp, ip, ag, xfs_filestream_peek_ag(mp, ag), 345 + ag, xfs_filestream_peek_ag(mp, ag)); 346 + 347 + return 0; 348 + } 349 + 350 + /* xfs_fstrm_free_func(): callback for freeing cached stream items. */ 351 + void 352 + xfs_fstrm_free_func( 353 + xfs_ino_t ino, 354 + fstrm_item_t *item) 355 + { 356 + xfs_inode_t *ip = item->ip; 357 + int ref; 358 + 359 + ASSERT(ip->i_ino == ino); 360 + 361 + xfs_iflags_clear(ip, XFS_IFILESTREAM); 362 + 363 + /* Drop the reference taken on the AG when the item was added. */ 364 + ref = xfs_filestream_put_ag(ip->i_mount, item->ag); 365 + 366 + ASSERT(ref >= 0); 367 + TRACE_FREE(ip->i_mount, ip, item->pip, item->ag, 368 + xfs_filestream_peek_ag(ip->i_mount, item->ag)); 369 + 370 + /* 371 + * _xfs_filestream_update_ag() always takes a reference on the inode 372 + * itself, whether it's a file or a directory. Release it here. 373 + * This can result in the inode being freed and so we must 374 + * not hold any inode locks when freeing filesstreams objects 375 + * otherwise we can deadlock here. 376 + */ 377 + IRELE(ip); 378 + 379 + /* 380 + * In the case of a regular file, _xfs_filestream_update_ag() also 381 + * takes a ref on the parent inode to keep it in-core. Release that 382 + * too. 383 + */ 384 + if (item->pip) 385 + IRELE(item->pip); 386 + 387 + /* Finally, free the memory allocated for the item. */ 388 + kmem_zone_free(item_zone, item); 389 + } 390 + 391 + /* 392 + * xfs_filestream_init() is called at xfs initialisation time to set up the 393 + * memory zone that will be used for filestream data structure allocation. 394 + */ 395 + int 396 + xfs_filestream_init(void) 397 + { 398 + item_zone = kmem_zone_init(sizeof(fstrm_item_t), "fstrm_item"); 399 + #ifdef XFS_FILESTREAMS_TRACE 400 + xfs_filestreams_trace_buf = ktrace_alloc(XFS_FSTRM_KTRACE_SIZE, KM_SLEEP); 401 + #endif 402 + return item_zone ? 0 : -ENOMEM; 403 + } 404 + 405 + /* 406 + * xfs_filestream_uninit() is called at xfs termination time to destroy the 407 + * memory zone that was used for filestream data structure allocation. 408 + */ 409 + void 410 + xfs_filestream_uninit(void) 411 + { 412 + #ifdef XFS_FILESTREAMS_TRACE 413 + ktrace_free(xfs_filestreams_trace_buf); 414 + #endif 415 + kmem_zone_destroy(item_zone); 416 + } 417 + 418 + /* 419 + * xfs_filestream_mount() is called when a file system is mounted with the 420 + * filestream option. It is responsible for allocating the data structures 421 + * needed to track the new file system's file streams. 422 + */ 423 + int 424 + xfs_filestream_mount( 425 + xfs_mount_t *mp) 426 + { 427 + int err; 428 + unsigned int lifetime, grp_count; 429 + 430 + /* 431 + * The filestream timer tunable is currently fixed within the range of 432 + * one second to four minutes, with five seconds being the default. The 433 + * group count is somewhat arbitrary, but it'd be nice to adhere to the 434 + * timer tunable to within about 10 percent. This requires at least 10 435 + * groups. 436 + */ 437 + lifetime = xfs_fstrm_centisecs * 10; 438 + grp_count = 10; 439 + 440 + err = xfs_mru_cache_create(&mp->m_filestream, lifetime, grp_count, 441 + (xfs_mru_cache_free_func_t)xfs_fstrm_free_func); 442 + 443 + return err; 444 + } 445 + 446 + /* 447 + * xfs_filestream_unmount() is called when a file system that was mounted with 448 + * the filestream option is unmounted. It drains the data structures created 449 + * to track the file system's file streams and frees all the memory that was 450 + * allocated. 451 + */ 452 + void 453 + xfs_filestream_unmount( 454 + xfs_mount_t *mp) 455 + { 456 + xfs_mru_cache_destroy(mp->m_filestream); 457 + } 458 + 459 + /* 460 + * If the mount point's m_perag array is going to be reallocated, all 461 + * outstanding cache entries must be flushed to avoid accessing reference count 462 + * addresses that have been freed. The call to xfs_filestream_flush() must be 463 + * made inside the block that holds the m_peraglock in write mode to do the 464 + * reallocation. 465 + */ 466 + void 467 + xfs_filestream_flush( 468 + xfs_mount_t *mp) 469 + { 470 + /* point in time flush, so keep the reaper running */ 471 + xfs_mru_cache_flush(mp->m_filestream, 1); 472 + } 473 + 474 + /* 475 + * Return the AG of the filestream the file or directory belongs to, or 476 + * NULLAGNUMBER otherwise. 477 + */ 478 + xfs_agnumber_t 479 + xfs_filestream_lookup_ag( 480 + xfs_inode_t *ip) 481 + { 482 + xfs_mru_cache_t *cache; 483 + fstrm_item_t *item; 484 + xfs_agnumber_t ag; 485 + int ref; 486 + 487 + if (!(ip->i_d.di_mode & (S_IFREG | S_IFDIR))) { 488 + ASSERT(0); 489 + return NULLAGNUMBER; 490 + } 491 + 492 + cache = ip->i_mount->m_filestream; 493 + item = xfs_mru_cache_lookup(cache, ip->i_ino); 494 + if (!item) { 495 + TRACE_LOOKUP(ip->i_mount, ip, NULL, NULLAGNUMBER, 0); 496 + return NULLAGNUMBER; 497 + } 498 + 499 + ASSERT(ip == item->ip); 500 + ag = item->ag; 501 + ref = xfs_filestream_peek_ag(ip->i_mount, ag); 502 + xfs_mru_cache_done(cache); 503 + 504 + TRACE_LOOKUP(ip->i_mount, ip, item->pip, ag, ref); 505 + return ag; 506 + } 507 + 508 + /* 509 + * xfs_filestream_associate() should only be called to associate a regular file 510 + * with its parent directory. Calling it with a child directory isn't 511 + * appropriate because filestreams don't apply to entire directory hierarchies. 512 + * Creating a file in a child directory of an existing filestream directory 513 + * starts a new filestream with its own allocation group association. 514 + * 515 + * Returns < 0 on error, 0 if successful association occurred, > 0 if 516 + * we failed to get an association because of locking issues. 517 + */ 518 + int 519 + xfs_filestream_associate( 520 + xfs_inode_t *pip, 521 + xfs_inode_t *ip) 522 + { 523 + xfs_mount_t *mp; 524 + xfs_mru_cache_t *cache; 525 + fstrm_item_t *item; 526 + xfs_agnumber_t ag, rotorstep, startag; 527 + int err = 0; 528 + 529 + ASSERT(pip->i_d.di_mode & S_IFDIR); 530 + ASSERT(ip->i_d.di_mode & S_IFREG); 531 + if (!(pip->i_d.di_mode & S_IFDIR) || !(ip->i_d.di_mode & S_IFREG)) 532 + return -EINVAL; 533 + 534 + mp = pip->i_mount; 535 + cache = mp->m_filestream; 536 + down_read(&mp->m_peraglock); 537 + 538 + /* 539 + * We have a problem, Houston. 540 + * 541 + * Taking the iolock here violates inode locking order - we already 542 + * hold the ilock. Hence if we block getting this lock we may never 543 + * wake. Unfortunately, that means if we can't get the lock, we're 544 + * screwed in terms of getting a stream association - we can't spin 545 + * waiting for the lock because someone else is waiting on the lock we 546 + * hold and we cannot drop that as we are in a transaction here. 547 + * 548 + * Lucky for us, this inversion is rarely a problem because it's a 549 + * directory inode that we are trying to lock here and that means the 550 + * only place that matters is xfs_sync_inodes() and SYNC_DELWRI is 551 + * used. i.e. freeze, remount-ro, quotasync or unmount. 552 + * 553 + * So, if we can't get the iolock without sleeping then just give up 554 + */ 555 + if (!xfs_ilock_nowait(pip, XFS_IOLOCK_EXCL)) { 556 + up_read(&mp->m_peraglock); 557 + return 1; 558 + } 559 + 560 + /* If the parent directory is already in the cache, use its AG. */ 561 + item = xfs_mru_cache_lookup(cache, pip->i_ino); 562 + if (item) { 563 + ASSERT(item->ip == pip); 564 + ag = item->ag; 565 + xfs_mru_cache_done(cache); 566 + 567 + TRACE_LOOKUP(mp, pip, pip, ag, xfs_filestream_peek_ag(mp, ag)); 568 + err = _xfs_filestream_update_ag(ip, pip, ag); 569 + 570 + goto exit; 571 + } 572 + 573 + /* 574 + * Set the starting AG using the rotor for inode32, otherwise 575 + * use the directory inode's AG. 576 + */ 577 + if (mp->m_flags & XFS_MOUNT_32BITINODES) { 578 + rotorstep = xfs_rotorstep; 579 + startag = (mp->m_agfrotor / rotorstep) % mp->m_sb.sb_agcount; 580 + mp->m_agfrotor = (mp->m_agfrotor + 1) % 581 + (mp->m_sb.sb_agcount * rotorstep); 582 + } else 583 + startag = XFS_INO_TO_AGNO(mp, pip->i_ino); 584 + 585 + /* Pick a new AG for the parent inode starting at startag. */ 586 + err = _xfs_filestream_pick_ag(mp, startag, &ag, 0, 0); 587 + if (err || ag == NULLAGNUMBER) 588 + goto exit_did_pick; 589 + 590 + /* Associate the parent inode with the AG. */ 591 + err = _xfs_filestream_update_ag(pip, NULL, ag); 592 + if (err) 593 + goto exit_did_pick; 594 + 595 + /* Associate the file inode with the AG. */ 596 + err = _xfs_filestream_update_ag(ip, pip, ag); 597 + if (err) 598 + goto exit_did_pick; 599 + 600 + TRACE_ASSOCIATE(mp, ip, pip, ag, xfs_filestream_peek_ag(mp, ag)); 601 + 602 + exit_did_pick: 603 + /* 604 + * If _xfs_filestream_pick_ag() returned a valid AG, remove the 605 + * reference it took on it, since the file and directory will have taken 606 + * their own now if they were successfully cached. 607 + */ 608 + if (ag != NULLAGNUMBER) 609 + xfs_filestream_put_ag(mp, ag); 610 + 611 + exit: 612 + xfs_iunlock(pip, XFS_IOLOCK_EXCL); 613 + up_read(&mp->m_peraglock); 614 + return -err; 615 + } 616 + 617 + /* 618 + * Pick a new allocation group for the current file and its file stream. This 619 + * function is called by xfs_bmap_filestreams() with the mount point's per-ag 620 + * lock held. 621 + */ 622 + int 623 + xfs_filestream_new_ag( 624 + xfs_bmalloca_t *ap, 625 + xfs_agnumber_t *agp) 626 + { 627 + int flags, err; 628 + xfs_inode_t *ip, *pip = NULL; 629 + xfs_mount_t *mp; 630 + xfs_mru_cache_t *cache; 631 + xfs_extlen_t minlen; 632 + fstrm_item_t *dir, *file; 633 + xfs_agnumber_t ag = NULLAGNUMBER; 634 + 635 + ip = ap->ip; 636 + mp = ip->i_mount; 637 + cache = mp->m_filestream; 638 + minlen = ap->alen; 639 + *agp = NULLAGNUMBER; 640 + 641 + /* 642 + * Look for the file in the cache, removing it if it's found. Doing 643 + * this allows it to be held across the dir lookup that follows. 644 + */ 645 + file = xfs_mru_cache_remove(cache, ip->i_ino); 646 + if (file) { 647 + ASSERT(ip == file->ip); 648 + 649 + /* Save the file's parent inode and old AG number for later. */ 650 + pip = file->pip; 651 + ag = file->ag; 652 + 653 + /* Look for the file's directory in the cache. */ 654 + dir = xfs_mru_cache_lookup(cache, pip->i_ino); 655 + if (dir) { 656 + ASSERT(pip == dir->ip); 657 + 658 + /* 659 + * If the directory has already moved on to a new AG, 660 + * use that AG as the new AG for the file. Don't 661 + * forget to twiddle the AG refcounts to match the 662 + * movement. 663 + */ 664 + if (dir->ag != file->ag) { 665 + xfs_filestream_put_ag(mp, file->ag); 666 + xfs_filestream_get_ag(mp, dir->ag); 667 + *agp = file->ag = dir->ag; 668 + } 669 + 670 + xfs_mru_cache_done(cache); 671 + } 672 + 673 + /* 674 + * Put the file back in the cache. If this fails, the free 675 + * function needs to be called to tidy up in the same way as if 676 + * the item had simply expired from the cache. 677 + */ 678 + err = xfs_mru_cache_insert(cache, ip->i_ino, file); 679 + if (err) { 680 + xfs_fstrm_free_func(ip->i_ino, file); 681 + return err; 682 + } 683 + 684 + /* 685 + * If the file's AG was moved to the directory's new AG, there's 686 + * nothing more to be done. 687 + */ 688 + if (*agp != NULLAGNUMBER) { 689 + TRACE_MOVEAG(mp, ip, pip, 690 + ag, xfs_filestream_peek_ag(mp, ag), 691 + *agp, xfs_filestream_peek_ag(mp, *agp)); 692 + return 0; 693 + } 694 + } 695 + 696 + /* 697 + * If the file's parent directory is known, take its iolock in exclusive 698 + * mode to prevent two sibling files from racing each other to migrate 699 + * themselves and their parent to different AGs. 700 + */ 701 + if (pip) 702 + xfs_ilock(pip, XFS_IOLOCK_EXCL); 703 + 704 + /* 705 + * A new AG needs to be found for the file. If the file's parent 706 + * directory is also known, it will be moved to the new AG as well to 707 + * ensure that files created inside it in future use the new AG. 708 + */ 709 + ag = (ag == NULLAGNUMBER) ? 0 : (ag + 1) % mp->m_sb.sb_agcount; 710 + flags = (ap->userdata ? XFS_PICK_USERDATA : 0) | 711 + (ap->low ? XFS_PICK_LOWSPACE : 0); 712 + 713 + err = _xfs_filestream_pick_ag(mp, ag, agp, flags, minlen); 714 + if (err || *agp == NULLAGNUMBER) 715 + goto exit; 716 + 717 + /* 718 + * If the file wasn't found in the file cache, then its parent directory 719 + * inode isn't known. For this to have happened, the file must either 720 + * be pre-existing, or it was created long enough ago that its cache 721 + * entry has expired. This isn't the sort of usage that the filestreams 722 + * allocator is trying to optimise, so there's no point trying to track 723 + * its new AG somehow in the filestream data structures. 724 + */ 725 + if (!pip) { 726 + TRACE_ORPHAN(mp, ip, *agp); 727 + goto exit; 728 + } 729 + 730 + /* Associate the parent inode with the AG. */ 731 + err = _xfs_filestream_update_ag(pip, NULL, *agp); 732 + if (err) 733 + goto exit; 734 + 735 + /* Associate the file inode with the AG. */ 736 + err = _xfs_filestream_update_ag(ip, pip, *agp); 737 + if (err) 738 + goto exit; 739 + 740 + TRACE_MOVEAG(mp, ip, pip, NULLAGNUMBER, 0, 741 + *agp, xfs_filestream_peek_ag(mp, *agp)); 742 + 743 + exit: 744 + /* 745 + * If _xfs_filestream_pick_ag() returned a valid AG, remove the 746 + * reference it took on it, since the file and directory will have taken 747 + * their own now if they were successfully cached. 748 + */ 749 + if (*agp != NULLAGNUMBER) 750 + xfs_filestream_put_ag(mp, *agp); 751 + else 752 + *agp = 0; 753 + 754 + if (pip) 755 + xfs_iunlock(pip, XFS_IOLOCK_EXCL); 756 + 757 + return err; 758 + } 759 + 760 + /* 761 + * Remove an association between an inode and a filestream object. 762 + * Typically this is done on last close of an unlinked file. 763 + */ 764 + void 765 + xfs_filestream_deassociate( 766 + xfs_inode_t *ip) 767 + { 768 + xfs_mru_cache_t *cache = ip->i_mount->m_filestream; 769 + 770 + xfs_mru_cache_delete(cache, ip->i_ino); 771 + }
+136
fs/xfs/xfs_filestream.h
··· 1 + /* 2 + * Copyright (c) 2006-2007 Silicon Graphics, Inc. 3 + * All Rights Reserved. 4 + * 5 + * This program is free software; you can redistribute it and/or 6 + * modify it under the terms of the GNU General Public License as 7 + * published by the Free Software Foundation. 8 + * 9 + * This program is distributed in the hope that it would be useful, 10 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 + * GNU General Public License for more details. 13 + * 14 + * You should have received a copy of the GNU General Public License 15 + * along with this program; if not, write the Free Software Foundation, 16 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 17 + */ 18 + #ifndef __XFS_FILESTREAM_H__ 19 + #define __XFS_FILESTREAM_H__ 20 + 21 + #ifdef __KERNEL__ 22 + 23 + struct xfs_mount; 24 + struct xfs_inode; 25 + struct xfs_perag; 26 + struct xfs_bmalloca; 27 + 28 + #ifdef XFS_FILESTREAMS_TRACE 29 + #define XFS_FSTRM_KTRACE_INFO 1 30 + #define XFS_FSTRM_KTRACE_AGSCAN 2 31 + #define XFS_FSTRM_KTRACE_AGPICK1 3 32 + #define XFS_FSTRM_KTRACE_AGPICK2 4 33 + #define XFS_FSTRM_KTRACE_UPDATE 5 34 + #define XFS_FSTRM_KTRACE_FREE 6 35 + #define XFS_FSTRM_KTRACE_ITEM_LOOKUP 7 36 + #define XFS_FSTRM_KTRACE_ASSOCIATE 8 37 + #define XFS_FSTRM_KTRACE_MOVEAG 9 38 + #define XFS_FSTRM_KTRACE_ORPHAN 10 39 + 40 + #define XFS_FSTRM_KTRACE_SIZE 16384 41 + extern ktrace_t *xfs_filestreams_trace_buf; 42 + 43 + #endif 44 + 45 + /* 46 + * Allocation group filestream associations are tracked with per-ag atomic 47 + * counters. These counters allow _xfs_filestream_pick_ag() to tell whether a 48 + * particular AG already has active filestreams associated with it. The mount 49 + * point's m_peraglock is used to protect these counters from per-ag array 50 + * re-allocation during a growfs operation. When xfs_growfs_data_private() is 51 + * about to reallocate the array, it calls xfs_filestream_flush() with the 52 + * m_peraglock held in write mode. 53 + * 54 + * Since xfs_mru_cache_flush() guarantees that all the free functions for all 55 + * the cache elements have finished executing before it returns, it's safe for 56 + * the free functions to use the atomic counters without m_peraglock protection. 57 + * This allows the implementation of xfs_fstrm_free_func() to be agnostic about 58 + * whether it was called with the m_peraglock held in read mode, write mode or 59 + * not held at all. The race condition this addresses is the following: 60 + * 61 + * - The work queue scheduler fires and pulls a filestream directory cache 62 + * element off the LRU end of the cache for deletion, then gets pre-empted. 63 + * - A growfs operation grabs the m_peraglock in write mode, flushes all the 64 + * remaining items from the cache and reallocates the mount point's per-ag 65 + * array, resetting all the counters to zero. 66 + * - The work queue thread resumes and calls the free function for the element 67 + * it started cleaning up earlier. In the process it decrements the 68 + * filestreams counter for an AG that now has no references. 69 + * 70 + * With a shrinkfs feature, the above scenario could panic the system. 71 + * 72 + * All other uses of the following macros should be protected by either the 73 + * m_peraglock held in read mode, or the cache's internal locking exposed by the 74 + * interval between a call to xfs_mru_cache_lookup() and a call to 75 + * xfs_mru_cache_done(). In addition, the m_peraglock must be held in read mode 76 + * when new elements are added to the cache. 77 + * 78 + * Combined, these locking rules ensure that no associations will ever exist in 79 + * the cache that reference per-ag array elements that have since been 80 + * reallocated. 81 + */ 82 + STATIC_INLINE int 83 + xfs_filestream_peek_ag( 84 + xfs_mount_t *mp, 85 + xfs_agnumber_t agno) 86 + { 87 + return atomic_read(&mp->m_perag[agno].pagf_fstrms); 88 + } 89 + 90 + STATIC_INLINE int 91 + xfs_filestream_get_ag( 92 + xfs_mount_t *mp, 93 + xfs_agnumber_t agno) 94 + { 95 + return atomic_inc_return(&mp->m_perag[agno].pagf_fstrms); 96 + } 97 + 98 + STATIC_INLINE int 99 + xfs_filestream_put_ag( 100 + xfs_mount_t *mp, 101 + xfs_agnumber_t agno) 102 + { 103 + return atomic_dec_return(&mp->m_perag[agno].pagf_fstrms); 104 + } 105 + 106 + /* allocation selection flags */ 107 + typedef enum xfs_fstrm_alloc { 108 + XFS_PICK_USERDATA = 1, 109 + XFS_PICK_LOWSPACE = 2, 110 + } xfs_fstrm_alloc_t; 111 + 112 + /* prototypes for filestream.c */ 113 + int xfs_filestream_init(void); 114 + void xfs_filestream_uninit(void); 115 + int xfs_filestream_mount(struct xfs_mount *mp); 116 + void xfs_filestream_unmount(struct xfs_mount *mp); 117 + void xfs_filestream_flush(struct xfs_mount *mp); 118 + xfs_agnumber_t xfs_filestream_lookup_ag(struct xfs_inode *ip); 119 + int xfs_filestream_associate(struct xfs_inode *dip, struct xfs_inode *ip); 120 + void xfs_filestream_deassociate(struct xfs_inode *ip); 121 + int xfs_filestream_new_ag(struct xfs_bmalloca *ap, xfs_agnumber_t *agp); 122 + 123 + 124 + /* filestreams for the inode? */ 125 + STATIC_INLINE int 126 + xfs_inode_is_filestream( 127 + struct xfs_inode *ip) 128 + { 129 + return (ip->i_mount->m_flags & XFS_MOUNT_FILESTREAMS) || 130 + xfs_iflags_test(ip, XFS_IFILESTREAM) || 131 + (ip->i_d.di_flags & XFS_DIFLAG_FILESTREAM); 132 + } 133 + 134 + #endif /* __KERNEL__ */ 135 + 136 + #endif /* __XFS_FILESTREAM_H__ */
+2
fs/xfs/xfs_fs.h
··· 66 66 #define XFS_XFLAG_EXTSIZE 0x00000800 /* extent size allocator hint */ 67 67 #define XFS_XFLAG_EXTSZINHERIT 0x00001000 /* inherit inode extent size */ 68 68 #define XFS_XFLAG_NODEFRAG 0x00002000 /* do not defragment */ 69 + #define XFS_XFLAG_FILESTREAM 0x00004000 /* use filestream allocator */ 69 70 #define XFS_XFLAG_HASATTR 0x80000000 /* no DIFLAG for this */ 70 71 71 72 /* ··· 239 238 #define XFS_FSOP_GEOM_FLAGS_LOGV2 0x0100 /* log format version 2 */ 240 239 #define XFS_FSOP_GEOM_FLAGS_SECTOR 0x0200 /* sector sizes >1BB */ 241 240 #define XFS_FSOP_GEOM_FLAGS_ATTR2 0x0400 /* inline attributes rework */ 241 + #define XFS_FSOP_GEOM_FLAGS_LAZYSB 0x4000 /* lazy superblock counters */ 242 242 243 243 244 244 /*
+14 -3
fs/xfs/xfs_fsops.c
··· 44 44 #include "xfs_trans_space.h" 45 45 #include "xfs_rtalloc.h" 46 46 #include "xfs_rw.h" 47 + #include "xfs_filestream.h" 47 48 48 49 /* 49 50 * File system operations ··· 95 94 XFS_FSOP_GEOM_FLAGS_DIRV2 : 0) | 96 95 (XFS_SB_VERSION_HASSECTOR(&mp->m_sb) ? 97 96 XFS_FSOP_GEOM_FLAGS_SECTOR : 0) | 97 + (xfs_sb_version_haslazysbcount(&mp->m_sb) ? 98 + XFS_FSOP_GEOM_FLAGS_LAZYSB : 0) | 98 99 (XFS_SB_VERSION_HASATTR2(&mp->m_sb) ? 99 100 XFS_FSOP_GEOM_FLAGS_ATTR2 : 0); 100 101 geo->logsectsize = XFS_SB_VERSION_HASSECTOR(&mp->m_sb) ? ··· 143 140 pct = in->imaxpct; 144 141 if (nb < mp->m_sb.sb_dblocks || pct < 0 || pct > 100) 145 142 return XFS_ERROR(EINVAL); 143 + if ((error = xfs_sb_validate_fsb_count(&mp->m_sb, nb))) 144 + return error; 146 145 dpct = pct - mp->m_sb.sb_imax_pct; 147 146 error = xfs_read_buf(mp, mp->m_ddev_targp, 148 147 XFS_FSB_TO_BB(mp, nb) - XFS_FSS_TO_BB(mp, 1), ··· 166 161 new = nb - mp->m_sb.sb_dblocks; 167 162 oagcount = mp->m_sb.sb_agcount; 168 163 if (nagcount > oagcount) { 164 + xfs_filestream_flush(mp); 169 165 down_write(&mp->m_peraglock); 170 166 mp->m_perag = kmem_realloc(mp->m_perag, 171 167 sizeof(xfs_perag_t) * nagcount, ··· 179 173 up_write(&mp->m_peraglock); 180 174 } 181 175 tp = xfs_trans_alloc(mp, XFS_TRANS_GROWFS); 176 + tp->t_flags |= XFS_TRANS_RESERVE; 182 177 if ((error = xfs_trans_reserve(tp, XFS_GROWFS_SPACE_RES(mp), 183 178 XFS_GROWDATA_LOG_RES(mp), 0, 0, 0))) { 184 179 xfs_trans_cancel(tp, 0); ··· 335 328 be32_add(&agf->agf_length, new); 336 329 ASSERT(be32_to_cpu(agf->agf_length) == 337 330 be32_to_cpu(agi->agi_length)); 331 + xfs_alloc_log_agf(tp, bp, XFS_AGF_LENGTH); 338 332 /* 339 333 * Free the new space. 340 334 */ ··· 502 494 unsigned long s; 503 495 504 496 /* If inval is null, report current values and return */ 505 - 506 497 if (inval == (__uint64_t *)NULL) { 498 + if (!outval) 499 + return EINVAL; 507 500 outval->resblks = mp->m_resblks; 508 501 outval->resblks_avail = mp->m_resblks_avail; 509 502 return 0; ··· 567 558 } 568 559 } 569 560 out: 570 - outval->resblks = mp->m_resblks; 571 - outval->resblks_avail = mp->m_resblks_avail; 561 + if (outval) { 562 + outval->resblks = mp->m_resblks; 563 + outval->resblks_avail = mp->m_resblks_avail; 564 + } 572 565 XFS_SB_UNLOCK(mp, s); 573 566 574 567 if (fdblks_delta) {
+25 -3
fs/xfs/xfs_ialloc.c
··· 123 123 int blks_per_cluster; /* fs blocks per inode cluster */ 124 124 xfs_btree_cur_t *cur; /* inode btree cursor */ 125 125 xfs_daddr_t d; /* disk addr of buffer */ 126 + xfs_agnumber_t agno; 126 127 int error; 127 128 xfs_buf_t *fbuf; /* new free inodes' buffer */ 128 129 xfs_dinode_t *free; /* new free inode structure */ ··· 303 302 } 304 303 be32_add(&agi->agi_count, newlen); 305 304 be32_add(&agi->agi_freecount, newlen); 305 + agno = be32_to_cpu(agi->agi_seqno); 306 306 down_read(&args.mp->m_peraglock); 307 - args.mp->m_perag[be32_to_cpu(agi->agi_seqno)].pagi_freecount += newlen; 307 + args.mp->m_perag[agno].pagi_freecount += newlen; 308 308 up_read(&args.mp->m_peraglock); 309 309 agi->agi_newino = cpu_to_be32(newino); 310 310 /* 311 311 * Insert records describing the new inode chunk into the btree. 312 312 */ 313 - cur = xfs_btree_init_cursor(args.mp, tp, agbp, 314 - be32_to_cpu(agi->agi_seqno), 313 + cur = xfs_btree_init_cursor(args.mp, tp, agbp, agno, 315 314 XFS_BTNUM_INO, (xfs_inode_t *)0, 0); 316 315 for (thisino = newino; 317 316 thisino < newino + newlen; ··· 1388 1387 pag = &mp->m_perag[agno]; 1389 1388 if (!pag->pagi_init) { 1390 1389 pag->pagi_freecount = be32_to_cpu(agi->agi_freecount); 1390 + pag->pagi_count = be32_to_cpu(agi->agi_count); 1391 1391 pag->pagi_init = 1; 1392 1392 } else { 1393 1393 /* ··· 1410 1408 1411 1409 XFS_BUF_SET_VTYPE_REF(bp, B_FS_AGI, XFS_AGI_REF); 1412 1410 *bpp = bp; 1411 + return 0; 1412 + } 1413 + 1414 + /* 1415 + * Read in the agi to initialise the per-ag data in the mount structure 1416 + */ 1417 + int 1418 + xfs_ialloc_pagi_init( 1419 + xfs_mount_t *mp, /* file system mount structure */ 1420 + xfs_trans_t *tp, /* transaction pointer */ 1421 + xfs_agnumber_t agno) /* allocation group number */ 1422 + { 1423 + xfs_buf_t *bp = NULL; 1424 + int error; 1425 + 1426 + error = xfs_ialloc_read_agi(mp, tp, agno, &bp); 1427 + if (error) 1428 + return error; 1429 + if (bp) 1430 + xfs_trans_brelse(tp, bp); 1413 1431 return 0; 1414 1432 }
+10
fs/xfs/xfs_ialloc.h
··· 149 149 xfs_agnumber_t agno, /* allocation group number */ 150 150 struct xfs_buf **bpp); /* allocation group hdr buf */ 151 151 152 + /* 153 + * Read in the allocation group header to initialise the per-ag data 154 + * in the mount structure 155 + */ 156 + int 157 + xfs_ialloc_pagi_init( 158 + struct xfs_mount *mp, /* file system mount structure */ 159 + struct xfs_trans *tp, /* transaction pointer */ 160 + xfs_agnumber_t agno); /* allocation group number */ 161 + 152 162 #endif /* __KERNEL__ */ 153 163 154 164 #endif /* __XFS_IALLOC_H__ */
+27 -12
fs/xfs/xfs_inode.c
··· 48 48 #include "xfs_dir2_trace.h" 49 49 #include "xfs_quota.h" 50 50 #include "xfs_acl.h" 51 + #include "xfs_filestream.h" 51 52 53 + #include <linux/log2.h> 52 54 53 55 kmem_zone_t *xfs_ifork_zone; 54 56 kmem_zone_t *xfs_inode_zone; ··· 645 643 ep->l1 = INT_GET(get_unaligned((__uint64_t*)&dp->l1), 646 644 ARCH_CONVERT); 647 645 } 648 - xfs_bmap_trace_exlist("xfs_iformat_extents", ip, nex, 649 - whichfork); 646 + XFS_BMAP_TRACE_EXLIST(ip, nex, whichfork); 650 647 if (whichfork != XFS_DATA_FORK || 651 648 XFS_EXTFMT_INODE(ip) == XFS_EXTFMT_NOSTATE) 652 649 if (unlikely(xfs_check_nostate_extents( ··· 818 817 flags |= XFS_XFLAG_EXTSZINHERIT; 819 818 if (di_flags & XFS_DIFLAG_NODEFRAG) 820 819 flags |= XFS_XFLAG_NODEFRAG; 820 + if (di_flags & XFS_DIFLAG_FILESTREAM) 821 + flags |= XFS_XFLAG_FILESTREAM; 821 822 } 822 823 823 824 return flags; ··· 1077 1074 * also returns the [locked] bp pointing to the head of the freelist 1078 1075 * as ialloc_context. The caller should hold this buffer across 1079 1076 * the commit and pass it back into this routine on the second call. 1077 + * 1078 + * If we are allocating quota inodes, we do not have a parent inode 1079 + * to attach to or associate with (i.e. pip == NULL) because they 1080 + * are not linked into the directory structure - they are attached 1081 + * directly to the superblock - and so have no parent. 1080 1082 */ 1081 1083 int 1082 1084 xfs_ialloc( ··· 1107 1099 * Call the space management code to pick 1108 1100 * the on-disk inode to be allocated. 1109 1101 */ 1110 - error = xfs_dialloc(tp, pip->i_ino, mode, okalloc, 1102 + error = xfs_dialloc(tp, pip ? pip->i_ino : 0, mode, okalloc, 1111 1103 ialloc_context, call_again, &ino); 1112 1104 if (error != 0) { 1113 1105 return error; ··· 1158 1150 /* 1159 1151 * Project ids won't be stored on disk if we are using a version 1 inode. 1160 1152 */ 1161 - if ( (prid != 0) && (ip->i_d.di_version == XFS_DINODE_VERSION_1)) 1153 + if ((prid != 0) && (ip->i_d.di_version == XFS_DINODE_VERSION_1)) 1162 1154 xfs_bump_ino_vers2(tp, ip); 1163 1155 1164 - if (XFS_INHERIT_GID(pip, vp->v_vfsp)) { 1156 + if (pip && XFS_INHERIT_GID(pip, vp->v_vfsp)) { 1165 1157 ip->i_d.di_gid = pip->i_d.di_gid; 1166 1158 if ((pip->i_d.di_mode & S_ISGID) && (mode & S_IFMT) == S_IFDIR) { 1167 1159 ip->i_d.di_mode |= S_ISGID; ··· 1203 1195 flags |= XFS_ILOG_DEV; 1204 1196 break; 1205 1197 case S_IFREG: 1198 + if (pip && xfs_inode_is_filestream(pip)) { 1199 + error = xfs_filestream_associate(pip, ip); 1200 + if (error < 0) 1201 + return -error; 1202 + if (!error) 1203 + xfs_iflags_set(ip, XFS_IFILESTREAM); 1204 + } 1205 + /* fall through */ 1206 1206 case S_IFDIR: 1207 - if (unlikely(pip->i_d.di_flags & XFS_DIFLAG_ANY)) { 1207 + if (pip && (pip->i_d.di_flags & XFS_DIFLAG_ANY)) { 1208 1208 uint di_flags = 0; 1209 1209 1210 1210 if ((mode & S_IFMT) == S_IFDIR) { ··· 1249 1233 if ((pip->i_d.di_flags & XFS_DIFLAG_NODEFRAG) && 1250 1234 xfs_inherit_nodefrag) 1251 1235 di_flags |= XFS_DIFLAG_NODEFRAG; 1236 + if (pip->i_d.di_flags & XFS_DIFLAG_FILESTREAM) 1237 + di_flags |= XFS_DIFLAG_FILESTREAM; 1252 1238 ip->i_d.di_flags |= di_flags; 1253 1239 } 1254 1240 /* FALLTHROUGH */ ··· 2893 2875 int copied; 2894 2876 xfs_bmbt_rec_t *dest_ep; 2895 2877 xfs_bmbt_rec_t *ep; 2896 - #ifdef XFS_BMAP_TRACE 2897 - static char fname[] = "xfs_iextents_copy"; 2898 - #endif 2899 2878 int i; 2900 2879 xfs_ifork_t *ifp; 2901 2880 int nrecs; ··· 2903 2888 ASSERT(ifp->if_bytes > 0); 2904 2889 2905 2890 nrecs = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); 2906 - xfs_bmap_trace_exlist(fname, ip, nrecs, whichfork); 2891 + XFS_BMAP_TRACE_EXLIST(ip, nrecs, whichfork); 2907 2892 ASSERT(nrecs > 0); 2908 2893 2909 2894 /* ··· 4199 4184 ifp->if_bytes = new_size; 4200 4185 return; 4201 4186 } 4202 - if ((new_size & (new_size - 1)) != 0) { 4187 + if (!is_power_of_2(new_size)){ 4203 4188 rnew_size = xfs_iroundup(new_size); 4204 4189 } 4205 4190 if (rnew_size != ifp->if_real_bytes) { ··· 4222 4207 */ 4223 4208 else { 4224 4209 new_size += ifp->if_bytes; 4225 - if ((new_size & (new_size - 1)) != 0) { 4210 + if (!is_power_of_2(new_size)) { 4226 4211 rnew_size = xfs_iroundup(new_size); 4227 4212 } 4228 4213 xfs_iext_inline_to_direct(ifp, rnew_size);
+10 -6
fs/xfs/xfs_inode.h
··· 379 379 #define XFS_ISTALE 0x0010 /* inode has been staled */ 380 380 #define XFS_IRECLAIMABLE 0x0020 /* inode can be reclaimed */ 381 381 #define XFS_INEW 0x0040 382 + #define XFS_IFILESTREAM 0x0080 /* inode is in a filestream directory */ 382 383 383 384 /* 384 385 * Flags for inode locking. ··· 415 414 * gets a lockdep subclass of 1 and the second lock will have a lockdep 416 415 * subclass of 0. 417 416 * 418 - * XFS_I[O]LOCK_INUMORDER - for locking several inodes at the some time 417 + * XFS_LOCK_INUMORDER - for locking several inodes at the some time 419 418 * with xfs_lock_inodes(). This flag is used as the starting subclass 420 419 * and each subsequent lock acquired will increment the subclass by one. 421 420 * So the first lock acquired will have a lockdep subclass of 2, the 422 - * second lock will have a lockdep subclass of 3, and so on. 421 + * second lock will have a lockdep subclass of 3, and so on. It is 422 + * the responsibility of the class builder to shift this to the correct 423 + * portion of the lock_mode lockdep mask. 423 424 */ 425 + #define XFS_LOCK_PARENT 1 426 + #define XFS_LOCK_INUMORDER 2 427 + 424 428 #define XFS_IOLOCK_SHIFT 16 425 - #define XFS_IOLOCK_PARENT (1 << XFS_IOLOCK_SHIFT) 426 - #define XFS_IOLOCK_INUMORDER (2 << XFS_IOLOCK_SHIFT) 429 + #define XFS_IOLOCK_PARENT (XFS_LOCK_PARENT << XFS_IOLOCK_SHIFT) 427 430 428 431 #define XFS_ILOCK_SHIFT 24 429 - #define XFS_ILOCK_PARENT (1 << XFS_ILOCK_SHIFT) 430 - #define XFS_ILOCK_INUMORDER (2 << XFS_ILOCK_SHIFT) 432 + #define XFS_ILOCK_PARENT (XFS_LOCK_PARENT << XFS_ILOCK_SHIFT) 431 433 432 434 #define XFS_IOLOCK_DEP_MASK 0x00ff0000 433 435 #define XFS_ILOCK_DEP_MASK 0xff000000
+12 -29
fs/xfs/xfs_iomap.c
··· 451 451 return XFS_ERROR(error); 452 452 453 453 rt = XFS_IS_REALTIME_INODE(ip); 454 - if (unlikely(rt)) { 455 - if (!(extsz = ip->i_d.di_extsize)) 456 - extsz = mp->m_sb.sb_rextsize; 457 - } else { 458 - extsz = ip->i_d.di_extsize; 459 - } 454 + extsz = xfs_get_extsz_hint(ip); 460 455 461 456 isize = ip->i_size; 462 457 if (io->io_new_size > isize) 463 458 isize = io->io_new_size; 464 459 465 - offset_fsb = XFS_B_TO_FSBT(mp, offset); 466 - last_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)(offset + count))); 460 + offset_fsb = XFS_B_TO_FSBT(mp, offset); 461 + last_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)(offset + count))); 467 462 if ((offset + count) > isize) { 468 463 error = xfs_iomap_eof_align_last_fsb(mp, io, isize, extsz, 469 464 &last_fsb); ··· 484 489 if (unlikely(rt)) { 485 490 resrtextents = qblocks = resaligned; 486 491 resrtextents /= mp->m_sb.sb_rextsize; 487 - resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0); 488 - quota_flag = XFS_QMOPT_RES_RTBLKS; 489 - } else { 490 - resrtextents = 0; 492 + resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0); 493 + quota_flag = XFS_QMOPT_RES_RTBLKS; 494 + } else { 495 + resrtextents = 0; 491 496 resblks = qblocks = XFS_DIOSTRAT_SPACE_RES(mp, resaligned); 492 - quota_flag = XFS_QMOPT_RES_REGBLKS; 493 - } 497 + quota_flag = XFS_QMOPT_RES_REGBLKS; 498 + } 494 499 495 500 /* 496 501 * Allocate and setup the transaction ··· 661 666 if (error) 662 667 return XFS_ERROR(error); 663 668 664 - if (XFS_IS_REALTIME_INODE(ip)) { 665 - if (!(extsz = ip->i_d.di_extsize)) 666 - extsz = mp->m_sb.sb_rextsize; 667 - } else { 668 - extsz = ip->i_d.di_extsize; 669 - } 670 - 669 + extsz = xfs_get_extsz_hint(ip); 671 670 offset_fsb = XFS_B_TO_FSBT(mp, offset); 672 671 673 672 retry: ··· 777 788 nimaps = 0; 778 789 while (nimaps == 0) { 779 790 tp = xfs_trans_alloc(mp, XFS_TRANS_STRAT_WRITE); 791 + tp->t_flags |= XFS_TRANS_RESERVE; 780 792 nres = XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK); 781 793 error = xfs_trans_reserve(tp, nres, 782 794 XFS_WRITE_LOG_RES(mp), 783 795 0, XFS_TRANS_PERM_LOG_RES, 784 796 XFS_WRITE_LOG_COUNT); 785 - if (error == ENOSPC) { 786 - error = xfs_trans_reserve(tp, 0, 787 - XFS_WRITE_LOG_RES(mp), 788 - 0, 789 - XFS_TRANS_PERM_LOG_RES, 790 - XFS_WRITE_LOG_COUNT); 791 - } 792 797 if (error) { 793 798 xfs_trans_cancel(tp, 0); 794 799 return XFS_ERROR(error); ··· 900 917 * from unwritten to real. Do allocations in a loop until 901 918 * we have covered the range passed in. 902 919 */ 903 - 904 920 tp = xfs_trans_alloc(mp, XFS_TRANS_STRAT_WRITE); 921 + tp->t_flags |= XFS_TRANS_RESERVE; 905 922 error = xfs_trans_reserve(tp, resblks, 906 923 XFS_WRITE_LOG_RES(mp), 0, 907 924 XFS_TRANS_PERM_LOG_RES,
+34 -8
fs/xfs/xfs_itable.c
··· 202 202 return 0; 203 203 } 204 204 205 + STATIC int 206 + xfs_bulkstat_one_fmt( 207 + void __user *ubuffer, 208 + const xfs_bstat_t *buffer) 209 + { 210 + if (copy_to_user(ubuffer, buffer, sizeof(*buffer))) 211 + return -EFAULT; 212 + return sizeof(*buffer); 213 + } 214 + 205 215 /* 206 216 * Return stat information for one inode. 207 217 * Return 0 if ok, else errno. ··· 231 221 xfs_bstat_t *buf; /* return buffer */ 232 222 int error = 0; /* error value */ 233 223 xfs_dinode_t *dip; /* dinode inode pointer */ 224 + bulkstat_one_fmt_pf formatter = private_data ? : xfs_bulkstat_one_fmt; 234 225 235 226 dip = (xfs_dinode_t *)dibuff; 236 227 *stat = BULKSTAT_RV_NOTHING; ··· 254 243 xfs_bulkstat_one_dinode(mp, ino, dip, buf); 255 244 } 256 245 257 - if (copy_to_user(buffer, buf, sizeof(*buf))) { 246 + error = formatter(buffer, buf); 247 + if (error < 0) { 258 248 error = EFAULT; 259 249 goto out_free; 260 250 } 261 251 262 252 *stat = BULKSTAT_RV_DIDONE; 263 253 if (ubused) 264 - *ubused = sizeof(*buf); 254 + *ubused = error; 265 255 266 256 out_free: 267 257 kmem_free(buf, sizeof(*buf)); ··· 760 748 return 0; 761 749 } 762 750 751 + int 752 + xfs_inumbers_fmt( 753 + void __user *ubuffer, /* buffer to write to */ 754 + const xfs_inogrp_t *buffer, /* buffer to read from */ 755 + long count, /* # of elements to read */ 756 + long *written) /* # of bytes written */ 757 + { 758 + if (copy_to_user(ubuffer, buffer, count * sizeof(*buffer))) 759 + return -EFAULT; 760 + *written = count * sizeof(*buffer); 761 + return 0; 762 + } 763 + 763 764 /* 764 765 * Return inode number table for the filesystem. 765 766 */ ··· 781 756 xfs_mount_t *mp, /* mount point for filesystem */ 782 757 xfs_ino_t *lastino, /* last inode returned */ 783 758 int *count, /* size of buffer/count returned */ 784 - xfs_inogrp_t __user *ubuffer)/* buffer with inode descriptions */ 759 + void __user *ubuffer,/* buffer with inode descriptions */ 760 + inumbers_fmt_pf formatter) 785 761 { 786 762 xfs_buf_t *agbp; 787 763 xfs_agino_t agino; ··· 861 835 bufidx++; 862 836 left--; 863 837 if (bufidx == bcount) { 864 - if (copy_to_user(ubuffer, buffer, 865 - bufidx * sizeof(*buffer))) { 838 + long written; 839 + if (formatter(ubuffer, buffer, bufidx, &written)) { 866 840 error = XFS_ERROR(EFAULT); 867 841 break; 868 842 } 869 - ubuffer += bufidx; 843 + ubuffer += written; 870 844 *count += bufidx; 871 845 bufidx = 0; 872 846 } ··· 888 862 } 889 863 if (!error) { 890 864 if (bufidx) { 891 - if (copy_to_user(ubuffer, buffer, 892 - bufidx * sizeof(*buffer))) 865 + long written; 866 + if (formatter(ubuffer, buffer, bufidx, &written)) 893 867 error = XFS_ERROR(EFAULT); 894 868 else 895 869 *count += bufidx;
+19 -1
fs/xfs/xfs_itable.h
··· 69 69 char __user *buffer, 70 70 int *done); 71 71 72 + typedef int (*bulkstat_one_fmt_pf)( /* used size in bytes or negative error */ 73 + void __user *ubuffer, /* buffer to write to */ 74 + const xfs_bstat_t *buffer); /* buffer to read from */ 75 + 72 76 int 73 77 xfs_bulkstat_one( 74 78 xfs_mount_t *mp, ··· 90 86 xfs_mount_t *mp, 91 87 xfs_ino_t ino); 92 88 89 + typedef int (*inumbers_fmt_pf)( 90 + void __user *ubuffer, /* buffer to write to */ 91 + const xfs_inogrp_t *buffer, /* buffer to read from */ 92 + long count, /* # of elements to read */ 93 + long *written); /* # of bytes written */ 94 + 95 + int 96 + xfs_inumbers_fmt( 97 + void __user *ubuffer, /* buffer to write to */ 98 + const xfs_inogrp_t *buffer, /* buffer to read from */ 99 + long count, /* # of elements to read */ 100 + long *written); /* # of bytes written */ 101 + 93 102 int /* error status */ 94 103 xfs_inumbers( 95 104 xfs_mount_t *mp, /* mount point for filesystem */ 96 105 xfs_ino_t *last, /* last inode returned */ 97 106 int *count, /* size of buffer/count returned */ 98 - xfs_inogrp_t __user *buffer);/* buffer with inode info */ 107 + void __user *buffer, /* buffer with inode info */ 108 + inumbers_fmt_pf formatter); 99 109 100 110 #endif /* __XFS_ITABLE_H__ */
+21 -20
fs/xfs/xfs_log.c
··· 817 817 SPLDECL(s); 818 818 int needed = 0, gen; 819 819 xlog_t *log = mp->m_log; 820 - bhv_vfs_t *vfsp = XFS_MTOVFS(mp); 821 820 822 - if (vfs_test_for_freeze(vfsp) || XFS_FORCED_SHUTDOWN(mp) || 823 - (vfsp->vfs_flag & VFS_RDONLY)) 821 + if (!xfs_fs_writable(mp)) 824 822 return 0; 825 823 826 824 s = LOG_LOCK(log); ··· 965 967 } else if (iclog->ic_state & XLOG_STATE_IOERROR) { 966 968 aborted = XFS_LI_ABORTED; 967 969 } 970 + 971 + /* log I/O is always issued ASYNC */ 972 + ASSERT(XFS_BUF_ISASYNC(bp)); 968 973 xlog_state_done_syncing(iclog, aborted); 969 - if (!(XFS_BUF_ISASYNC(bp))) { 970 - /* 971 - * Corresponding psema() will be done in bwrite(). If we don't 972 - * vsema() here, panic. 973 - */ 974 - XFS_BUF_V_IODONESEMA(bp); 975 - } 974 + /* 975 + * do not reference the buffer (bp) here as we could race 976 + * with it being freed after writing the unmount record to the 977 + * log. 978 + */ 979 + 976 980 } /* xlog_iodone */ 977 981 978 982 /* ··· 1199 1199 *iclogp = (xlog_in_core_t *) 1200 1200 kmem_zalloc(sizeof(xlog_in_core_t), KM_SLEEP); 1201 1201 iclog = *iclogp; 1202 - iclog->hic_data = (xlog_in_core_2_t *) 1203 - kmem_zalloc(iclogsize, KM_SLEEP | KM_LARGE); 1204 - 1205 1202 iclog->ic_prev = prev_iclog; 1206 1203 prev_iclog = iclog; 1204 + 1205 + bp = xfs_buf_get_noaddr(log->l_iclog_size, mp->m_logdev_targp); 1206 + if (!XFS_BUF_CPSEMA(bp)) 1207 + ASSERT(0); 1208 + XFS_BUF_SET_IODONE_FUNC(bp, xlog_iodone); 1209 + XFS_BUF_SET_BDSTRAT_FUNC(bp, xlog_bdstrat_cb); 1210 + XFS_BUF_SET_FSPRIVATE2(bp, (unsigned long)1); 1211 + iclog->ic_bp = bp; 1212 + iclog->hic_data = bp->b_addr; 1213 + 1207 1214 log->l_iclog_bak[i] = (xfs_caddr_t)&(iclog->ic_header); 1208 1215 1209 1216 head = &iclog->ic_header; ··· 1223 1216 INT_SET(head->h_fmt, ARCH_CONVERT, XLOG_FMT); 1224 1217 memcpy(&head->h_fs_uuid, &mp->m_sb.sb_uuid, sizeof(uuid_t)); 1225 1218 1226 - bp = xfs_buf_get_empty(log->l_iclog_size, mp->m_logdev_targp); 1227 - XFS_BUF_SET_IODONE_FUNC(bp, xlog_iodone); 1228 - XFS_BUF_SET_BDSTRAT_FUNC(bp, xlog_bdstrat_cb); 1229 - XFS_BUF_SET_FSPRIVATE2(bp, (unsigned long)1); 1230 - iclog->ic_bp = bp; 1231 1219 1232 1220 iclog->ic_size = XFS_BUF_SIZE(bp) - log->l_iclog_hsize; 1233 1221 iclog->ic_state = XLOG_STATE_ACTIVE; ··· 1434 1432 } else { 1435 1433 iclog->ic_bwritecnt = 1; 1436 1434 } 1437 - XFS_BUF_SET_PTR(bp, (xfs_caddr_t) &(iclog->ic_header), count); 1435 + XFS_BUF_SET_COUNT(bp, count); 1438 1436 XFS_BUF_SET_FSPRIVATE(bp, iclog); /* save for later */ 1439 1437 XFS_BUF_ZEROFLAGS(bp); 1440 1438 XFS_BUF_BUSY(bp); ··· 1530 1528 } 1531 1529 #endif 1532 1530 next_iclog = iclog->ic_next; 1533 - kmem_free(iclog->hic_data, log->l_iclog_size); 1534 1531 kmem_free(iclog, sizeof(xlog_in_core_t)); 1535 1532 iclog = next_iclog; 1536 1533 }
+8
fs/xfs/xfs_log_recover.c
··· 927 927 ASSIGN_ANY_LSN_HOST(log->l_last_sync_lsn, log->l_curr_cycle, 928 928 after_umount_blk); 929 929 *tail_blk = after_umount_blk; 930 + 931 + /* 932 + * Note that the unmount was clean. If the unmount 933 + * was not clean, we need to know this to rebuild the 934 + * superblock counters from the perag headers if we 935 + * have a filesystem using non-persistent counters. 936 + */ 937 + log->l_mp->m_flags |= XFS_MOUNT_WAS_CLEAN; 930 938 } 931 939 } 932 940
+218 -19
fs/xfs/xfs_mount.c
··· 202 202 kmem_free(mp, sizeof(xfs_mount_t)); 203 203 } 204 204 205 + /* 206 + * Check size of device based on the (data/realtime) block count. 207 + * Note: this check is used by the growfs code as well as mount. 208 + */ 209 + int 210 + xfs_sb_validate_fsb_count( 211 + xfs_sb_t *sbp, 212 + __uint64_t nblocks) 213 + { 214 + ASSERT(PAGE_SHIFT >= sbp->sb_blocklog); 215 + ASSERT(sbp->sb_blocklog >= BBSHIFT); 216 + 217 + #if XFS_BIG_BLKNOS /* Limited by ULONG_MAX of page cache index */ 218 + if (nblocks >> (PAGE_CACHE_SHIFT - sbp->sb_blocklog) > ULONG_MAX) 219 + return E2BIG; 220 + #else /* Limited by UINT_MAX of sectors */ 221 + if (nblocks << (sbp->sb_blocklog - BBSHIFT) > UINT_MAX) 222 + return E2BIG; 223 + #endif 224 + return 0; 225 + } 205 226 206 227 /* 207 228 * Check the validity of the SB found. ··· 305 284 return XFS_ERROR(EFSCORRUPTED); 306 285 } 307 286 308 - ASSERT(PAGE_SHIFT >= sbp->sb_blocklog); 309 - ASSERT(sbp->sb_blocklog >= BBSHIFT); 310 - 311 - #if XFS_BIG_BLKNOS /* Limited by ULONG_MAX of page cache index */ 312 - if (unlikely( 313 - (sbp->sb_dblocks >> (PAGE_SHIFT - sbp->sb_blocklog)) > ULONG_MAX || 314 - (sbp->sb_rblocks >> (PAGE_SHIFT - sbp->sb_blocklog)) > ULONG_MAX)) { 315 - #else /* Limited by UINT_MAX of sectors */ 316 - if (unlikely( 317 - (sbp->sb_dblocks << (sbp->sb_blocklog - BBSHIFT)) > UINT_MAX || 318 - (sbp->sb_rblocks << (sbp->sb_blocklog - BBSHIFT)) > UINT_MAX)) { 319 - #endif 287 + if (xfs_sb_validate_fsb_count(sbp, sbp->sb_dblocks) || 288 + xfs_sb_validate_fsb_count(sbp, sbp->sb_rblocks)) { 320 289 xfs_fs_mount_cmn_err(flags, 321 290 "file system too large to be mounted on this system."); 322 291 return XFS_ERROR(E2BIG); ··· 643 632 sbp->sb_inopblock); 644 633 mp->m_ialloc_blks = mp->m_ialloc_inos >> sbp->sb_inopblog; 645 634 } 635 + 636 + /* 637 + * xfs_initialize_perag_data 638 + * 639 + * Read in each per-ag structure so we can count up the number of 640 + * allocated inodes, free inodes and used filesystem blocks as this 641 + * information is no longer persistent in the superblock. Once we have 642 + * this information, write it into the in-core superblock structure. 643 + */ 644 + STATIC int 645 + xfs_initialize_perag_data(xfs_mount_t *mp, xfs_agnumber_t agcount) 646 + { 647 + xfs_agnumber_t index; 648 + xfs_perag_t *pag; 649 + xfs_sb_t *sbp = &mp->m_sb; 650 + uint64_t ifree = 0; 651 + uint64_t ialloc = 0; 652 + uint64_t bfree = 0; 653 + uint64_t bfreelst = 0; 654 + uint64_t btree = 0; 655 + int error; 656 + int s; 657 + 658 + for (index = 0; index < agcount; index++) { 659 + /* 660 + * read the agf, then the agi. This gets us 661 + * all the inforamtion we need and populates the 662 + * per-ag structures for us. 663 + */ 664 + error = xfs_alloc_pagf_init(mp, NULL, index, 0); 665 + if (error) 666 + return error; 667 + 668 + error = xfs_ialloc_pagi_init(mp, NULL, index); 669 + if (error) 670 + return error; 671 + pag = &mp->m_perag[index]; 672 + ifree += pag->pagi_freecount; 673 + ialloc += pag->pagi_count; 674 + bfree += pag->pagf_freeblks; 675 + bfreelst += pag->pagf_flcount; 676 + btree += pag->pagf_btreeblks; 677 + } 678 + /* 679 + * Overwrite incore superblock counters with just-read data 680 + */ 681 + s = XFS_SB_LOCK(mp); 682 + sbp->sb_ifree = ifree; 683 + sbp->sb_icount = ialloc; 684 + sbp->sb_fdblocks = bfree + bfreelst + btree; 685 + XFS_SB_UNLOCK(mp, s); 686 + 687 + /* Fixup the per-cpu counters as well. */ 688 + xfs_icsb_reinit_counters(mp); 689 + 690 + return 0; 691 + } 692 + 646 693 /* 647 694 * xfs_mountfs 648 695 * ··· 725 656 bhv_vnode_t *rvp = NULL; 726 657 int readio_log, writeio_log; 727 658 xfs_daddr_t d; 728 - __uint64_t ret64; 659 + __uint64_t resblks; 729 660 __int64_t update_flags; 730 661 uint quotamount, quotaflags; 731 662 int agno; ··· 842 773 */ 843 774 if ((mfsi_flags & XFS_MFSI_SECOND) == 0 && 844 775 (mp->m_flags & XFS_MOUNT_NOUUID) == 0) { 776 + __uint64_t ret64; 845 777 if (xfs_uuid_mount(mp)) { 846 778 error = XFS_ERROR(EINVAL); 847 779 goto error1; ··· 1046 976 } 1047 977 1048 978 /* 979 + * Now the log is mounted, we know if it was an unclean shutdown or 980 + * not. If it was, with the first phase of recovery has completed, we 981 + * have consistent AG blocks on disk. We have not recovered EFIs yet, 982 + * but they are recovered transactionally in the second recovery phase 983 + * later. 984 + * 985 + * Hence we can safely re-initialise incore superblock counters from 986 + * the per-ag data. These may not be correct if the filesystem was not 987 + * cleanly unmounted, so we need to wait for recovery to finish before 988 + * doing this. 989 + * 990 + * If the filesystem was cleanly unmounted, then we can trust the 991 + * values in the superblock to be correct and we don't need to do 992 + * anything here. 993 + * 994 + * If we are currently making the filesystem, the initialisation will 995 + * fail as the perag data is in an undefined state. 996 + */ 997 + 998 + if (xfs_sb_version_haslazysbcount(&mp->m_sb) && 999 + !XFS_LAST_UNMOUNT_WAS_CLEAN(mp) && 1000 + !mp->m_sb.sb_inprogress) { 1001 + error = xfs_initialize_perag_data(mp, sbp->sb_agcount); 1002 + if (error) { 1003 + goto error2; 1004 + } 1005 + } 1006 + /* 1049 1007 * Get and sanity-check the root inode. 1050 1008 * Save the pointer to it in the mount structure. 1051 1009 */ ··· 1142 1044 if ((error = XFS_QM_MOUNT(mp, quotamount, quotaflags, mfsi_flags))) 1143 1045 goto error4; 1144 1046 1047 + /* 1048 + * Now we are mounted, reserve a small amount of unused space for 1049 + * privileged transactions. This is needed so that transaction 1050 + * space required for critical operations can dip into this pool 1051 + * when at ENOSPC. This is needed for operations like create with 1052 + * attr, unwritten extent conversion at ENOSPC, etc. Data allocations 1053 + * are not allowed to use this reserved space. 1054 + * 1055 + * We default to 5% or 1024 fsbs of space reserved, whichever is smaller. 1056 + * This may drive us straight to ENOSPC on mount, but that implies 1057 + * we were already there on the last unmount. 1058 + */ 1059 + resblks = mp->m_sb.sb_dblocks; 1060 + do_div(resblks, 20); 1061 + resblks = min_t(__uint64_t, resblks, 1024); 1062 + xfs_reserve_blocks(mp, &resblks, NULL); 1063 + 1145 1064 return 0; 1146 1065 1147 1066 error4: ··· 1198 1083 #if defined(DEBUG) || defined(INDUCE_IO_ERROR) 1199 1084 int64_t fsid; 1200 1085 #endif 1086 + __uint64_t resblks; 1201 1087 1088 + /* 1089 + * We can potentially deadlock here if we have an inode cluster 1090 + * that has been freed has it's buffer still pinned in memory because 1091 + * the transaction is still sitting in a iclog. The stale inodes 1092 + * on that buffer will have their flush locks held until the 1093 + * transaction hits the disk and the callbacks run. the inode 1094 + * flush takes the flush lock unconditionally and with nothing to 1095 + * push out the iclog we will never get that unlocked. hence we 1096 + * need to force the log first. 1097 + */ 1098 + xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE | XFS_LOG_SYNC); 1202 1099 xfs_iflush_all(mp); 1203 1100 1204 1101 XFS_QM_DQPURGEALL(mp, XFS_QMOPT_QUOTALL | XFS_QMOPT_UMOUNTING); ··· 1227 1100 xfs_binval(mp->m_rtdev_targp); 1228 1101 } 1229 1102 1103 + /* 1104 + * Unreserve any blocks we have so that when we unmount we don't account 1105 + * the reserved free space as used. This is really only necessary for 1106 + * lazy superblock counting because it trusts the incore superblock 1107 + * counters to be aboslutely correct on clean unmount. 1108 + * 1109 + * We don't bother correcting this elsewhere for lazy superblock 1110 + * counting because on mount of an unclean filesystem we reconstruct the 1111 + * correct counter value and this is irrelevant. 1112 + * 1113 + * For non-lazy counter filesystems, this doesn't matter at all because 1114 + * we only every apply deltas to the superblock and hence the incore 1115 + * value does not matter.... 1116 + */ 1117 + resblks = 0; 1118 + xfs_reserve_blocks(mp, &resblks, NULL); 1119 + 1120 + xfs_log_sbcount(mp, 1); 1230 1121 xfs_unmountfs_writesb(mp); 1231 - 1232 1122 xfs_unmountfs_wait(mp); /* wait for async bufs */ 1233 - 1234 1123 xfs_log_unmount(mp); /* Done! No more fs ops. */ 1235 1124 1236 1125 xfs_freesb(mp); ··· 1293 1150 } 1294 1151 1295 1152 int 1153 + xfs_fs_writable(xfs_mount_t *mp) 1154 + { 1155 + bhv_vfs_t *vfsp = XFS_MTOVFS(mp); 1156 + 1157 + return !(vfs_test_for_freeze(vfsp) || XFS_FORCED_SHUTDOWN(mp) || 1158 + (vfsp->vfs_flag & VFS_RDONLY)); 1159 + } 1160 + 1161 + /* 1162 + * xfs_log_sbcount 1163 + * 1164 + * Called either periodically to keep the on disk superblock values 1165 + * roughly up to date or from unmount to make sure the values are 1166 + * correct on a clean unmount. 1167 + * 1168 + * Note this code can be called during the process of freezing, so 1169 + * we may need to use the transaction allocator which does not not 1170 + * block when the transaction subsystem is in its frozen state. 1171 + */ 1172 + int 1173 + xfs_log_sbcount( 1174 + xfs_mount_t *mp, 1175 + uint sync) 1176 + { 1177 + xfs_trans_t *tp; 1178 + int error; 1179 + 1180 + if (!xfs_fs_writable(mp)) 1181 + return 0; 1182 + 1183 + xfs_icsb_sync_counters(mp); 1184 + 1185 + /* 1186 + * we don't need to do this if we are updating the superblock 1187 + * counters on every modification. 1188 + */ 1189 + if (!xfs_sb_version_haslazysbcount(&mp->m_sb)) 1190 + return 0; 1191 + 1192 + tp = _xfs_trans_alloc(mp, XFS_TRANS_SB_COUNT); 1193 + error = xfs_trans_reserve(tp, 0, mp->m_sb.sb_sectsize + 128, 0, 0, 1194 + XFS_DEFAULT_LOG_COUNT); 1195 + if (error) { 1196 + xfs_trans_cancel(tp, 0); 1197 + return error; 1198 + } 1199 + 1200 + xfs_mod_sb(tp, XFS_SB_IFREE | XFS_SB_ICOUNT | XFS_SB_FDBLOCKS); 1201 + if (sync) 1202 + xfs_trans_set_sync(tp); 1203 + xfs_trans_commit(tp, 0); 1204 + 1205 + return 0; 1206 + } 1207 + 1208 + int 1296 1209 xfs_unmountfs_writesb(xfs_mount_t *mp) 1297 1210 { 1298 1211 xfs_buf_t *sbp; ··· 1359 1160 * skip superblock write if fs is read-only, or 1360 1161 * if we are doing a forced umount. 1361 1162 */ 1362 - sbp = xfs_getsb(mp, 0); 1363 1163 if (!(XFS_MTOVFS(mp)->vfs_flag & VFS_RDONLY || 1364 1164 XFS_FORCED_SHUTDOWN(mp))) { 1365 1165 1366 - xfs_icsb_sync_counters(mp); 1166 + sbp = xfs_getsb(mp, 0); 1167 + sb = XFS_BUF_TO_SBP(sbp); 1367 1168 1368 1169 /* 1369 1170 * mark shared-readonly if desired 1370 1171 */ 1371 - sb = XFS_BUF_TO_SBP(sbp); 1372 1172 if (mp->m_mk_sharedro) { 1373 1173 if (!(sb->sb_flags & XFS_SBF_READONLY)) 1374 1174 sb->sb_flags |= XFS_SBF_READONLY; ··· 1376 1178 xfs_fs_cmn_err(CE_NOTE, mp, 1377 1179 "Unmounting, marking shared read-only"); 1378 1180 } 1181 + 1379 1182 XFS_BUF_UNDONE(sbp); 1380 1183 XFS_BUF_UNREAD(sbp); 1381 1184 XFS_BUF_UNDELAYWRITE(sbp); ··· 1391 1192 mp, sbp, XFS_BUF_ADDR(sbp)); 1392 1193 if (error && mp->m_mk_sharedro) 1393 1194 xfs_fs_cmn_err(CE_ALERT, mp, "Superblock write error detected while unmounting. Filesystem may not be marked shared readonly"); 1195 + xfs_buf_relse(sbp); 1394 1196 } 1395 - xfs_buf_relse(sbp); 1396 1197 return error; 1397 1198 } 1398 1199
+12 -3
fs/xfs/xfs_mount.h
··· 66 66 struct xfs_bmap_free; 67 67 struct xfs_extdelta; 68 68 struct xfs_swapext; 69 + struct xfs_mru_cache; 69 70 70 71 extern struct bhv_vfsops xfs_vfsops; 71 72 extern struct bhv_vnodeops xfs_vnodeops; ··· 425 424 struct notifier_block m_icsb_notifier; /* hotplug cpu notifier */ 426 425 struct mutex m_icsb_mutex; /* balancer sync lock */ 427 426 #endif 427 + struct xfs_mru_cache *m_filestream; /* per-mount filestream data */ 428 428 } xfs_mount_t; 429 429 430 430 /* 431 431 * Flags for m_flags. 432 432 */ 433 - #define XFS_MOUNT_WSYNC (1ULL << 0) /* for nfs - all metadata ops 433 + #define XFS_MOUNT_WSYNC (1ULL << 0) /* for nfs - all metadata ops 434 434 must be synchronous except 435 435 for space allocations */ 436 - #define XFS_MOUNT_INO64 (1ULL << 1) 436 + #define XFS_MOUNT_INO64 (1ULL << 1) 437 437 /* (1ULL << 2) -- currently unused */ 438 - /* (1ULL << 3) -- currently unused */ 438 + #define XFS_MOUNT_WAS_CLEAN (1ULL << 3) 439 439 #define XFS_MOUNT_FS_SHUTDOWN (1ULL << 4) /* atomic stop of all filesystem 440 440 operations, typically for 441 441 disk errors in metadata */ ··· 465 463 * I/O size in stat() */ 466 464 #define XFS_MOUNT_NO_PERCPU_SB (1ULL << 23) /* don't use per-cpu superblock 467 465 counters */ 466 + #define XFS_MOUNT_FILESTREAMS (1ULL << 24) /* enable the filestreams 467 + allocator */ 468 468 469 469 470 470 /* ··· 515 511 516 512 #define XFS_MAXIOFFSET(mp) ((mp)->m_maxioffset) 517 513 514 + #define XFS_LAST_UNMOUNT_WAS_CLEAN(mp) \ 515 + ((mp)->m_flags & XFS_MOUNT_WAS_CLEAN) 518 516 #define XFS_FORCED_SHUTDOWN(mp) ((mp)->m_flags & XFS_MOUNT_FS_SHUTDOWN) 519 517 #define xfs_force_shutdown(m,f) \ 520 518 bhv_vfs_force_shutdown((XFS_MTOVFS(m)), f, __FILE__, __LINE__) ··· 608 602 609 603 extern xfs_mount_t *xfs_mount_init(void); 610 604 extern void xfs_mod_sb(xfs_trans_t *, __int64_t); 605 + extern int xfs_log_sbcount(xfs_mount_t *, uint); 611 606 extern void xfs_mount_free(xfs_mount_t *mp, int remove_bhv); 612 607 extern int xfs_mountfs(struct bhv_vfs *, xfs_mount_t *mp, int); 613 608 extern void xfs_mountfs_check_barriers(xfs_mount_t *mp); ··· 625 618 extern struct xfs_buf *xfs_getsb(xfs_mount_t *, int); 626 619 extern int xfs_readsb(xfs_mount_t *, int); 627 620 extern void xfs_freesb(xfs_mount_t *); 621 + extern int xfs_fs_writable(xfs_mount_t *); 628 622 extern void xfs_do_force_shutdown(bhv_desc_t *, int, char *, int); 629 623 extern int xfs_syncsub(xfs_mount_t *, int, int *); 630 624 extern int xfs_sync_inodes(xfs_mount_t *, int, int *); 631 625 extern xfs_agnumber_t xfs_initialize_perag(struct bhv_vfs *, xfs_mount_t *, 632 626 xfs_agnumber_t); 633 627 extern void xfs_xlatesb(void *, struct xfs_sb *, int, __int64_t); 628 + extern int xfs_sb_validate_fsb_count(struct xfs_sb *, __uint64_t); 634 629 635 630 extern struct xfs_dmops xfs_dmcore_stub; 636 631 extern struct xfs_qmops xfs_qmcore_stub;
+608
fs/xfs/xfs_mru_cache.c
··· 1 + /* 2 + * Copyright (c) 2006-2007 Silicon Graphics, Inc. 3 + * All Rights Reserved. 4 + * 5 + * This program is free software; you can redistribute it and/or 6 + * modify it under the terms of the GNU General Public License as 7 + * published by the Free Software Foundation. 8 + * 9 + * This program is distributed in the hope that it would be useful, 10 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 + * GNU General Public License for more details. 13 + * 14 + * You should have received a copy of the GNU General Public License 15 + * along with this program; if not, write the Free Software Foundation, 16 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 17 + */ 18 + #include "xfs.h" 19 + #include "xfs_mru_cache.h" 20 + 21 + /* 22 + * The MRU Cache data structure consists of a data store, an array of lists and 23 + * a lock to protect its internal state. At initialisation time, the client 24 + * supplies an element lifetime in milliseconds and a group count, as well as a 25 + * function pointer to call when deleting elements. A data structure for 26 + * queueing up work in the form of timed callbacks is also included. 27 + * 28 + * The group count controls how many lists are created, and thereby how finely 29 + * the elements are grouped in time. When reaping occurs, all the elements in 30 + * all the lists whose time has expired are deleted. 31 + * 32 + * To give an example of how this works in practice, consider a client that 33 + * initialises an MRU Cache with a lifetime of ten seconds and a group count of 34 + * five. Five internal lists will be created, each representing a two second 35 + * period in time. When the first element is added, time zero for the data 36 + * structure is initialised to the current time. 37 + * 38 + * All the elements added in the first two seconds are appended to the first 39 + * list. Elements added in the third second go into the second list, and so on. 40 + * If an element is accessed at any point, it is removed from its list and 41 + * inserted at the head of the current most-recently-used list. 42 + * 43 + * The reaper function will have nothing to do until at least twelve seconds 44 + * have elapsed since the first element was added. The reason for this is that 45 + * if it were called at t=11s, there could be elements in the first list that 46 + * have only been inactive for nine seconds, so it still does nothing. If it is 47 + * called anywhere between t=12 and t=14 seconds, it will delete all the 48 + * elements that remain in the first list. It's therefore possible for elements 49 + * to remain in the data store even after they've been inactive for up to 50 + * (t + t/g) seconds, where t is the inactive element lifetime and g is the 51 + * number of groups. 52 + * 53 + * The above example assumes that the reaper function gets called at least once 54 + * every (t/g) seconds. If it is called less frequently, unused elements will 55 + * accumulate in the reap list until the reaper function is eventually called. 56 + * The current implementation uses work queue callbacks to carefully time the 57 + * reaper function calls, so this should happen rarely, if at all. 58 + * 59 + * From a design perspective, the primary reason for the choice of a list array 60 + * representing discrete time intervals is that it's only practical to reap 61 + * expired elements in groups of some appreciable size. This automatically 62 + * introduces a granularity to element lifetimes, so there's no point storing an 63 + * individual timeout with each element that specifies a more precise reap time. 64 + * The bonus is a saving of sizeof(long) bytes of memory per element stored. 65 + * 66 + * The elements could have been stored in just one list, but an array of 67 + * counters or pointers would need to be maintained to allow them to be divided 68 + * up into discrete time groups. More critically, the process of touching or 69 + * removing an element would involve walking large portions of the entire list, 70 + * which would have a detrimental effect on performance. The additional memory 71 + * requirement for the array of list heads is minimal. 72 + * 73 + * When an element is touched or deleted, it needs to be removed from its 74 + * current list. Doubly linked lists are used to make the list maintenance 75 + * portion of these operations O(1). Since reaper timing can be imprecise, 76 + * inserts and lookups can occur when there are no free lists available. When 77 + * this happens, all the elements on the LRU list need to be migrated to the end 78 + * of the reap list. To keep the list maintenance portion of these operations 79 + * O(1) also, list tails need to be accessible without walking the entire list. 80 + * This is the reason why doubly linked list heads are used. 81 + */ 82 + 83 + /* 84 + * An MRU Cache is a dynamic data structure that stores its elements in a way 85 + * that allows efficient lookups, but also groups them into discrete time 86 + * intervals based on insertion time. This allows elements to be efficiently 87 + * and automatically reaped after a fixed period of inactivity. 88 + * 89 + * When a client data pointer is stored in the MRU Cache it needs to be added to 90 + * both the data store and to one of the lists. It must also be possible to 91 + * access each of these entries via the other, i.e. to: 92 + * 93 + * a) Walk a list, removing the corresponding data store entry for each item. 94 + * b) Look up a data store entry, then access its list entry directly. 95 + * 96 + * To achieve both of these goals, each entry must contain both a list entry and 97 + * a key, in addition to the user's data pointer. Note that it's not a good 98 + * idea to have the client embed one of these structures at the top of their own 99 + * data structure, because inserting the same item more than once would most 100 + * likely result in a loop in one of the lists. That's a sure-fire recipe for 101 + * an infinite loop in the code. 102 + */ 103 + typedef struct xfs_mru_cache_elem 104 + { 105 + struct list_head list_node; 106 + unsigned long key; 107 + void *value; 108 + } xfs_mru_cache_elem_t; 109 + 110 + static kmem_zone_t *xfs_mru_elem_zone; 111 + static struct workqueue_struct *xfs_mru_reap_wq; 112 + 113 + /* 114 + * When inserting, destroying or reaping, it's first necessary to update the 115 + * lists relative to a particular time. In the case of destroying, that time 116 + * will be well in the future to ensure that all items are moved to the reap 117 + * list. In all other cases though, the time will be the current time. 118 + * 119 + * This function enters a loop, moving the contents of the LRU list to the reap 120 + * list again and again until either a) the lists are all empty, or b) time zero 121 + * has been advanced sufficiently to be within the immediate element lifetime. 122 + * 123 + * Case a) above is detected by counting how many groups are migrated and 124 + * stopping when they've all been moved. Case b) is detected by monitoring the 125 + * time_zero field, which is updated as each group is migrated. 126 + * 127 + * The return value is the earliest time that more migration could be needed, or 128 + * zero if there's no need to schedule more work because the lists are empty. 129 + */ 130 + STATIC unsigned long 131 + _xfs_mru_cache_migrate( 132 + xfs_mru_cache_t *mru, 133 + unsigned long now) 134 + { 135 + unsigned int grp; 136 + unsigned int migrated = 0; 137 + struct list_head *lru_list; 138 + 139 + /* Nothing to do if the data store is empty. */ 140 + if (!mru->time_zero) 141 + return 0; 142 + 143 + /* While time zero is older than the time spanned by all the lists. */ 144 + while (mru->time_zero <= now - mru->grp_count * mru->grp_time) { 145 + 146 + /* 147 + * If the LRU list isn't empty, migrate its elements to the tail 148 + * of the reap list. 149 + */ 150 + lru_list = mru->lists + mru->lru_grp; 151 + if (!list_empty(lru_list)) 152 + list_splice_init(lru_list, mru->reap_list.prev); 153 + 154 + /* 155 + * Advance the LRU group number, freeing the old LRU list to 156 + * become the new MRU list; advance time zero accordingly. 157 + */ 158 + mru->lru_grp = (mru->lru_grp + 1) % mru->grp_count; 159 + mru->time_zero += mru->grp_time; 160 + 161 + /* 162 + * If reaping is so far behind that all the elements on all the 163 + * lists have been migrated to the reap list, it's now empty. 164 + */ 165 + if (++migrated == mru->grp_count) { 166 + mru->lru_grp = 0; 167 + mru->time_zero = 0; 168 + return 0; 169 + } 170 + } 171 + 172 + /* Find the first non-empty list from the LRU end. */ 173 + for (grp = 0; grp < mru->grp_count; grp++) { 174 + 175 + /* Check the grp'th list from the LRU end. */ 176 + lru_list = mru->lists + ((mru->lru_grp + grp) % mru->grp_count); 177 + if (!list_empty(lru_list)) 178 + return mru->time_zero + 179 + (mru->grp_count + grp) * mru->grp_time; 180 + } 181 + 182 + /* All the lists must be empty. */ 183 + mru->lru_grp = 0; 184 + mru->time_zero = 0; 185 + return 0; 186 + } 187 + 188 + /* 189 + * When inserting or doing a lookup, an element needs to be inserted into the 190 + * MRU list. The lists must be migrated first to ensure that they're 191 + * up-to-date, otherwise the new element could be given a shorter lifetime in 192 + * the cache than it should. 193 + */ 194 + STATIC void 195 + _xfs_mru_cache_list_insert( 196 + xfs_mru_cache_t *mru, 197 + xfs_mru_cache_elem_t *elem) 198 + { 199 + unsigned int grp = 0; 200 + unsigned long now = jiffies; 201 + 202 + /* 203 + * If the data store is empty, initialise time zero, leave grp set to 204 + * zero and start the work queue timer if necessary. Otherwise, set grp 205 + * to the number of group times that have elapsed since time zero. 206 + */ 207 + if (!_xfs_mru_cache_migrate(mru, now)) { 208 + mru->time_zero = now; 209 + if (!mru->next_reap) 210 + mru->next_reap = mru->grp_count * mru->grp_time; 211 + } else { 212 + grp = (now - mru->time_zero) / mru->grp_time; 213 + grp = (mru->lru_grp + grp) % mru->grp_count; 214 + } 215 + 216 + /* Insert the element at the tail of the corresponding list. */ 217 + list_add_tail(&elem->list_node, mru->lists + grp); 218 + } 219 + 220 + /* 221 + * When destroying or reaping, all the elements that were migrated to the reap 222 + * list need to be deleted. For each element this involves removing it from the 223 + * data store, removing it from the reap list, calling the client's free 224 + * function and deleting the element from the element zone. 225 + */ 226 + STATIC void 227 + _xfs_mru_cache_clear_reap_list( 228 + xfs_mru_cache_t *mru) 229 + { 230 + xfs_mru_cache_elem_t *elem, *next; 231 + struct list_head tmp; 232 + 233 + INIT_LIST_HEAD(&tmp); 234 + list_for_each_entry_safe(elem, next, &mru->reap_list, list_node) { 235 + 236 + /* Remove the element from the data store. */ 237 + radix_tree_delete(&mru->store, elem->key); 238 + 239 + /* 240 + * remove to temp list so it can be freed without 241 + * needing to hold the lock 242 + */ 243 + list_move(&elem->list_node, &tmp); 244 + } 245 + mutex_spinunlock(&mru->lock, 0); 246 + 247 + list_for_each_entry_safe(elem, next, &tmp, list_node) { 248 + 249 + /* Remove the element from the reap list. */ 250 + list_del_init(&elem->list_node); 251 + 252 + /* Call the client's free function with the key and value pointer. */ 253 + mru->free_func(elem->key, elem->value); 254 + 255 + /* Free the element structure. */ 256 + kmem_zone_free(xfs_mru_elem_zone, elem); 257 + } 258 + 259 + mutex_spinlock(&mru->lock); 260 + } 261 + 262 + /* 263 + * We fire the reap timer every group expiry interval so 264 + * we always have a reaper ready to run. This makes shutdown 265 + * and flushing of the reaper easy to do. Hence we need to 266 + * keep when the next reap must occur so we can determine 267 + * at each interval whether there is anything we need to do. 268 + */ 269 + STATIC void 270 + _xfs_mru_cache_reap( 271 + struct work_struct *work) 272 + { 273 + xfs_mru_cache_t *mru = container_of(work, xfs_mru_cache_t, work.work); 274 + unsigned long now; 275 + 276 + ASSERT(mru && mru->lists); 277 + if (!mru || !mru->lists) 278 + return; 279 + 280 + mutex_spinlock(&mru->lock); 281 + now = jiffies; 282 + if (mru->reap_all || 283 + (mru->next_reap && time_after(now, mru->next_reap))) { 284 + if (mru->reap_all) 285 + now += mru->grp_count * mru->grp_time * 2; 286 + mru->next_reap = _xfs_mru_cache_migrate(mru, now); 287 + _xfs_mru_cache_clear_reap_list(mru); 288 + } 289 + 290 + /* 291 + * the process that triggered the reap_all is responsible 292 + * for restating the periodic reap if it is required. 293 + */ 294 + if (!mru->reap_all) 295 + queue_delayed_work(xfs_mru_reap_wq, &mru->work, mru->grp_time); 296 + mru->reap_all = 0; 297 + mutex_spinunlock(&mru->lock, 0); 298 + } 299 + 300 + int 301 + xfs_mru_cache_init(void) 302 + { 303 + xfs_mru_elem_zone = kmem_zone_init(sizeof(xfs_mru_cache_elem_t), 304 + "xfs_mru_cache_elem"); 305 + if (!xfs_mru_elem_zone) 306 + return ENOMEM; 307 + 308 + xfs_mru_reap_wq = create_singlethread_workqueue("xfs_mru_cache"); 309 + if (!xfs_mru_reap_wq) { 310 + kmem_zone_destroy(xfs_mru_elem_zone); 311 + return ENOMEM; 312 + } 313 + 314 + return 0; 315 + } 316 + 317 + void 318 + xfs_mru_cache_uninit(void) 319 + { 320 + destroy_workqueue(xfs_mru_reap_wq); 321 + kmem_zone_destroy(xfs_mru_elem_zone); 322 + } 323 + 324 + /* 325 + * To initialise a struct xfs_mru_cache pointer, call xfs_mru_cache_create() 326 + * with the address of the pointer, a lifetime value in milliseconds, a group 327 + * count and a free function to use when deleting elements. This function 328 + * returns 0 if the initialisation was successful. 329 + */ 330 + int 331 + xfs_mru_cache_create( 332 + xfs_mru_cache_t **mrup, 333 + unsigned int lifetime_ms, 334 + unsigned int grp_count, 335 + xfs_mru_cache_free_func_t free_func) 336 + { 337 + xfs_mru_cache_t *mru = NULL; 338 + int err = 0, grp; 339 + unsigned int grp_time; 340 + 341 + if (mrup) 342 + *mrup = NULL; 343 + 344 + if (!mrup || !grp_count || !lifetime_ms || !free_func) 345 + return EINVAL; 346 + 347 + if (!(grp_time = msecs_to_jiffies(lifetime_ms) / grp_count)) 348 + return EINVAL; 349 + 350 + if (!(mru = kmem_zalloc(sizeof(*mru), KM_SLEEP))) 351 + return ENOMEM; 352 + 353 + /* An extra list is needed to avoid reaping up to a grp_time early. */ 354 + mru->grp_count = grp_count + 1; 355 + mru->lists = kmem_alloc(mru->grp_count * sizeof(*mru->lists), KM_SLEEP); 356 + 357 + if (!mru->lists) { 358 + err = ENOMEM; 359 + goto exit; 360 + } 361 + 362 + for (grp = 0; grp < mru->grp_count; grp++) 363 + INIT_LIST_HEAD(mru->lists + grp); 364 + 365 + /* 366 + * We use GFP_KERNEL radix tree preload and do inserts under a 367 + * spinlock so GFP_ATOMIC is appropriate for the radix tree itself. 368 + */ 369 + INIT_RADIX_TREE(&mru->store, GFP_ATOMIC); 370 + INIT_LIST_HEAD(&mru->reap_list); 371 + spinlock_init(&mru->lock, "xfs_mru_cache"); 372 + INIT_DELAYED_WORK(&mru->work, _xfs_mru_cache_reap); 373 + 374 + mru->grp_time = grp_time; 375 + mru->free_func = free_func; 376 + 377 + /* start up the reaper event */ 378 + mru->next_reap = 0; 379 + mru->reap_all = 0; 380 + queue_delayed_work(xfs_mru_reap_wq, &mru->work, mru->grp_time); 381 + 382 + *mrup = mru; 383 + 384 + exit: 385 + if (err && mru && mru->lists) 386 + kmem_free(mru->lists, mru->grp_count * sizeof(*mru->lists)); 387 + if (err && mru) 388 + kmem_free(mru, sizeof(*mru)); 389 + 390 + return err; 391 + } 392 + 393 + /* 394 + * Call xfs_mru_cache_flush() to flush out all cached entries, calling their 395 + * free functions as they're deleted. When this function returns, the caller is 396 + * guaranteed that all the free functions for all the elements have finished 397 + * executing. 398 + * 399 + * While we are flushing, we stop the periodic reaper event from triggering. 400 + * Normally, we want to restart this periodic event, but if we are shutting 401 + * down the cache we do not want it restarted. hence the restart parameter 402 + * where 0 = do not restart reaper and 1 = restart reaper. 403 + */ 404 + void 405 + xfs_mru_cache_flush( 406 + xfs_mru_cache_t *mru, 407 + int restart) 408 + { 409 + if (!mru || !mru->lists) 410 + return; 411 + 412 + cancel_rearming_delayed_workqueue(xfs_mru_reap_wq, &mru->work); 413 + 414 + mutex_spinlock(&mru->lock); 415 + mru->reap_all = 1; 416 + mutex_spinunlock(&mru->lock, 0); 417 + 418 + queue_work(xfs_mru_reap_wq, &mru->work.work); 419 + flush_workqueue(xfs_mru_reap_wq); 420 + 421 + mutex_spinlock(&mru->lock); 422 + WARN_ON_ONCE(mru->reap_all != 0); 423 + mru->reap_all = 0; 424 + if (restart) 425 + queue_delayed_work(xfs_mru_reap_wq, &mru->work, mru->grp_time); 426 + mutex_spinunlock(&mru->lock, 0); 427 + } 428 + 429 + void 430 + xfs_mru_cache_destroy( 431 + xfs_mru_cache_t *mru) 432 + { 433 + if (!mru || !mru->lists) 434 + return; 435 + 436 + /* we don't want the reaper to restart here */ 437 + xfs_mru_cache_flush(mru, 0); 438 + 439 + kmem_free(mru->lists, mru->grp_count * sizeof(*mru->lists)); 440 + kmem_free(mru, sizeof(*mru)); 441 + } 442 + 443 + /* 444 + * To insert an element, call xfs_mru_cache_insert() with the data store, the 445 + * element's key and the client data pointer. This function returns 0 on 446 + * success or ENOMEM if memory for the data element couldn't be allocated. 447 + */ 448 + int 449 + xfs_mru_cache_insert( 450 + xfs_mru_cache_t *mru, 451 + unsigned long key, 452 + void *value) 453 + { 454 + xfs_mru_cache_elem_t *elem; 455 + 456 + ASSERT(mru && mru->lists); 457 + if (!mru || !mru->lists) 458 + return EINVAL; 459 + 460 + elem = kmem_zone_zalloc(xfs_mru_elem_zone, KM_SLEEP); 461 + if (!elem) 462 + return ENOMEM; 463 + 464 + if (radix_tree_preload(GFP_KERNEL)) { 465 + kmem_zone_free(xfs_mru_elem_zone, elem); 466 + return ENOMEM; 467 + } 468 + 469 + INIT_LIST_HEAD(&elem->list_node); 470 + elem->key = key; 471 + elem->value = value; 472 + 473 + mutex_spinlock(&mru->lock); 474 + 475 + radix_tree_insert(&mru->store, key, elem); 476 + radix_tree_preload_end(); 477 + _xfs_mru_cache_list_insert(mru, elem); 478 + 479 + mutex_spinunlock(&mru->lock, 0); 480 + 481 + return 0; 482 + } 483 + 484 + /* 485 + * To remove an element without calling the free function, call 486 + * xfs_mru_cache_remove() with the data store and the element's key. On success 487 + * the client data pointer for the removed element is returned, otherwise this 488 + * function will return a NULL pointer. 489 + */ 490 + void * 491 + xfs_mru_cache_remove( 492 + xfs_mru_cache_t *mru, 493 + unsigned long key) 494 + { 495 + xfs_mru_cache_elem_t *elem; 496 + void *value = NULL; 497 + 498 + ASSERT(mru && mru->lists); 499 + if (!mru || !mru->lists) 500 + return NULL; 501 + 502 + mutex_spinlock(&mru->lock); 503 + elem = radix_tree_delete(&mru->store, key); 504 + if (elem) { 505 + value = elem->value; 506 + list_del(&elem->list_node); 507 + } 508 + 509 + mutex_spinunlock(&mru->lock, 0); 510 + 511 + if (elem) 512 + kmem_zone_free(xfs_mru_elem_zone, elem); 513 + 514 + return value; 515 + } 516 + 517 + /* 518 + * To remove and element and call the free function, call xfs_mru_cache_delete() 519 + * with the data store and the element's key. 520 + */ 521 + void 522 + xfs_mru_cache_delete( 523 + xfs_mru_cache_t *mru, 524 + unsigned long key) 525 + { 526 + void *value = xfs_mru_cache_remove(mru, key); 527 + 528 + if (value) 529 + mru->free_func(key, value); 530 + } 531 + 532 + /* 533 + * To look up an element using its key, call xfs_mru_cache_lookup() with the 534 + * data store and the element's key. If found, the element will be moved to the 535 + * head of the MRU list to indicate that it's been touched. 536 + * 537 + * The internal data structures are protected by a spinlock that is STILL HELD 538 + * when this function returns. Call xfs_mru_cache_done() to release it. Note 539 + * that it is not safe to call any function that might sleep in the interim. 540 + * 541 + * The implementation could have used reference counting to avoid this 542 + * restriction, but since most clients simply want to get, set or test a member 543 + * of the returned data structure, the extra per-element memory isn't warranted. 544 + * 545 + * If the element isn't found, this function returns NULL and the spinlock is 546 + * released. xfs_mru_cache_done() should NOT be called when this occurs. 547 + */ 548 + void * 549 + xfs_mru_cache_lookup( 550 + xfs_mru_cache_t *mru, 551 + unsigned long key) 552 + { 553 + xfs_mru_cache_elem_t *elem; 554 + 555 + ASSERT(mru && mru->lists); 556 + if (!mru || !mru->lists) 557 + return NULL; 558 + 559 + mutex_spinlock(&mru->lock); 560 + elem = radix_tree_lookup(&mru->store, key); 561 + if (elem) { 562 + list_del(&elem->list_node); 563 + _xfs_mru_cache_list_insert(mru, elem); 564 + } 565 + else 566 + mutex_spinunlock(&mru->lock, 0); 567 + 568 + return elem ? elem->value : NULL; 569 + } 570 + 571 + /* 572 + * To look up an element using its key, but leave its location in the internal 573 + * lists alone, call xfs_mru_cache_peek(). If the element isn't found, this 574 + * function returns NULL. 575 + * 576 + * See the comments above the declaration of the xfs_mru_cache_lookup() function 577 + * for important locking information pertaining to this call. 578 + */ 579 + void * 580 + xfs_mru_cache_peek( 581 + xfs_mru_cache_t *mru, 582 + unsigned long key) 583 + { 584 + xfs_mru_cache_elem_t *elem; 585 + 586 + ASSERT(mru && mru->lists); 587 + if (!mru || !mru->lists) 588 + return NULL; 589 + 590 + mutex_spinlock(&mru->lock); 591 + elem = radix_tree_lookup(&mru->store, key); 592 + if (!elem) 593 + mutex_spinunlock(&mru->lock, 0); 594 + 595 + return elem ? elem->value : NULL; 596 + } 597 + 598 + /* 599 + * To release the internal data structure spinlock after having performed an 600 + * xfs_mru_cache_lookup() or an xfs_mru_cache_peek(), call xfs_mru_cache_done() 601 + * with the data store pointer. 602 + */ 603 + void 604 + xfs_mru_cache_done( 605 + xfs_mru_cache_t *mru) 606 + { 607 + mutex_spinunlock(&mru->lock, 0); 608 + }
+57
fs/xfs/xfs_mru_cache.h
··· 1 + /* 2 + * Copyright (c) 2006-2007 Silicon Graphics, Inc. 3 + * All Rights Reserved. 4 + * 5 + * This program is free software; you can redistribute it and/or 6 + * modify it under the terms of the GNU General Public License as 7 + * published by the Free Software Foundation. 8 + * 9 + * This program is distributed in the hope that it would be useful, 10 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 + * GNU General Public License for more details. 13 + * 14 + * You should have received a copy of the GNU General Public License 15 + * along with this program; if not, write the Free Software Foundation, 16 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 17 + */ 18 + #ifndef __XFS_MRU_CACHE_H__ 19 + #define __XFS_MRU_CACHE_H__ 20 + 21 + 22 + /* Function pointer type for callback to free a client's data pointer. */ 23 + typedef void (*xfs_mru_cache_free_func_t)(unsigned long, void*); 24 + 25 + typedef struct xfs_mru_cache 26 + { 27 + struct radix_tree_root store; /* Core storage data structure. */ 28 + struct list_head *lists; /* Array of lists, one per grp. */ 29 + struct list_head reap_list; /* Elements overdue for reaping. */ 30 + spinlock_t lock; /* Lock to protect this struct. */ 31 + unsigned int grp_count; /* Number of discrete groups. */ 32 + unsigned int grp_time; /* Time period spanned by grps. */ 33 + unsigned int lru_grp; /* Group containing time zero. */ 34 + unsigned long time_zero; /* Time first element was added. */ 35 + unsigned long next_reap; /* Time that the reaper should 36 + next do something. */ 37 + unsigned int reap_all; /* if set, reap all lists */ 38 + xfs_mru_cache_free_func_t free_func; /* Function pointer for freeing. */ 39 + struct delayed_work work; /* Workqueue data for reaping. */ 40 + } xfs_mru_cache_t; 41 + 42 + int xfs_mru_cache_init(void); 43 + void xfs_mru_cache_uninit(void); 44 + int xfs_mru_cache_create(struct xfs_mru_cache **mrup, unsigned int lifetime_ms, 45 + unsigned int grp_count, 46 + xfs_mru_cache_free_func_t free_func); 47 + void xfs_mru_cache_flush(xfs_mru_cache_t *mru, int restart); 48 + void xfs_mru_cache_destroy(struct xfs_mru_cache *mru); 49 + int xfs_mru_cache_insert(struct xfs_mru_cache *mru, unsigned long key, 50 + void *value); 51 + void * xfs_mru_cache_remove(struct xfs_mru_cache *mru, unsigned long key); 52 + void xfs_mru_cache_delete(struct xfs_mru_cache *mru, unsigned long key); 53 + void *xfs_mru_cache_lookup(struct xfs_mru_cache *mru, unsigned long key); 54 + void *xfs_mru_cache_peek(struct xfs_mru_cache *mru, unsigned long key); 55 + void xfs_mru_cache_done(struct xfs_mru_cache *mru); 56 + 57 + #endif /* __XFS_MRU_CACHE_H__ */
+3 -1
fs/xfs/xfs_rtalloc.c
··· 1882 1882 (nrblocks = in->newblocks) <= sbp->sb_rblocks || 1883 1883 (sbp->sb_rblocks && (in->extsize != sbp->sb_rextsize))) 1884 1884 return XFS_ERROR(EINVAL); 1885 + if ((error = xfs_sb_validate_fsb_count(sbp, nrblocks))) 1886 + return error; 1885 1887 /* 1886 1888 * Read in the last block of the device, make sure it exists. 1887 1889 */ 1888 1890 error = xfs_read_buf(mp, mp->m_rtdev_targp, 1889 - XFS_FSB_TO_BB(mp, in->newblocks - 1), 1891 + XFS_FSB_TO_BB(mp, nrblocks - 1), 1890 1892 XFS_FSB_TO_BB(mp, 1), 0, &bp); 1891 1893 if (error) 1892 1894 return error;
+33 -3
fs/xfs/xfs_rw.h
··· 72 72 } 73 73 74 74 /* 75 + * Flags for xfs_free_eofblocks 76 + */ 77 + #define XFS_FREE_EOF_LOCK (1<<0) 78 + #define XFS_FREE_EOF_NOLOCK (1<<1) 79 + 80 + 81 + /* 82 + * helper function to extract extent size hint from inode 83 + */ 84 + STATIC_INLINE xfs_extlen_t 85 + xfs_get_extsz_hint( 86 + xfs_inode_t *ip) 87 + { 88 + xfs_extlen_t extsz; 89 + 90 + if (unlikely(ip->i_d.di_flags & XFS_DIFLAG_REALTIME)) { 91 + extsz = (ip->i_d.di_flags & XFS_DIFLAG_EXTSIZE) 92 + ? ip->i_d.di_extsize 93 + : ip->i_mount->m_sb.sb_rextsize; 94 + ASSERT(extsz); 95 + } else { 96 + extsz = (ip->i_d.di_flags & XFS_DIFLAG_EXTSIZE) 97 + ? ip->i_d.di_extsize : 0; 98 + } 99 + return extsz; 100 + } 101 + 102 + /* 75 103 * Prototypes for functions in xfs_rw.c. 76 104 */ 77 105 extern int xfs_write_clear_setuid(struct xfs_inode *ip); ··· 119 91 extern int xfs_rwlock(bhv_desc_t *bdp, bhv_vrwlock_t write_lock); 120 92 extern void xfs_rwunlock(bhv_desc_t *bdp, bhv_vrwlock_t write_lock); 121 93 extern int xfs_setattr(bhv_desc_t *, bhv_vattr_t *vap, int flags, 122 - cred_t *credp); 94 + cred_t *credp); 123 95 extern int xfs_change_file_space(bhv_desc_t *bdp, int cmd, xfs_flock64_t *bf, 124 - xfs_off_t offset, cred_t *credp, int flags); 96 + xfs_off_t offset, cred_t *credp, int flags); 125 97 extern int xfs_set_dmattrs(bhv_desc_t *bdp, u_int evmask, u_int16_t state, 126 - cred_t *credp); 98 + cred_t *credp); 99 + extern int xfs_free_eofblocks(struct xfs_mount *mp, struct xfs_inode *ip, 100 + int flags); 127 101 128 102 #endif /* __XFS_RW_H__ */
+13 -3
fs/xfs/xfs_sb.h
··· 74 74 */ 75 75 #define XFS_SB_VERSION2_REALFBITS 0x00ffffff /* Mask: features */ 76 76 #define XFS_SB_VERSION2_RESERVED1BIT 0x00000001 77 - #define XFS_SB_VERSION2_RESERVED2BIT 0x00000002 77 + #define XFS_SB_VERSION2_LAZYSBCOUNTBIT 0x00000002 /* Superblk counters */ 78 78 #define XFS_SB_VERSION2_RESERVED4BIT 0x00000004 79 79 #define XFS_SB_VERSION2_ATTR2BIT 0x00000008 /* Inline attr rework */ 80 80 81 81 #define XFS_SB_VERSION2_OKREALFBITS \ 82 - (XFS_SB_VERSION2_ATTR2BIT) 82 + (XFS_SB_VERSION2_LAZYSBCOUNTBIT | \ 83 + XFS_SB_VERSION2_ATTR2BIT) 83 84 #define XFS_SB_VERSION2_OKSASHFBITS \ 84 85 (0) 85 86 #define XFS_SB_VERSION2_OKREALBITS \ ··· 182 181 #define XFS_SB_SHARED_VN XFS_SB_MVAL(SHARED_VN) 183 182 #define XFS_SB_UNIT XFS_SB_MVAL(UNIT) 184 183 #define XFS_SB_WIDTH XFS_SB_MVAL(WIDTH) 184 + #define XFS_SB_ICOUNT XFS_SB_MVAL(ICOUNT) 185 + #define XFS_SB_IFREE XFS_SB_MVAL(IFREE) 186 + #define XFS_SB_FDBLOCKS XFS_SB_MVAL(FDBLOCKS) 185 187 #define XFS_SB_FEATURES2 XFS_SB_MVAL(FEATURES2) 186 188 #define XFS_SB_NUM_BITS ((int)XFS_SBS_FIELDCOUNT) 187 189 #define XFS_SB_ALL_BITS ((1LL << XFS_SB_NUM_BITS) - 1) ··· 192 188 (XFS_SB_UUID | XFS_SB_ROOTINO | XFS_SB_RBMINO | XFS_SB_RSUMINO | \ 193 189 XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO | XFS_SB_GQUOTINO | \ 194 190 XFS_SB_QFLAGS | XFS_SB_SHARED_VN | XFS_SB_UNIT | XFS_SB_WIDTH | \ 195 - XFS_SB_FEATURES2) 191 + XFS_SB_ICOUNT | XFS_SB_IFREE | XFS_SB_FDBLOCKS | XFS_SB_FEATURES2) 196 192 197 193 198 194 /* ··· 417 413 * ((XFS_SB_VERSION_HASMOREBITS(sbp) && 418 414 * ((sbp)->sb_features2 & XFS_SB_VERSION2_FUNBIT) 419 415 */ 416 + 417 + static inline int xfs_sb_version_haslazysbcount(xfs_sb_t *sbp) 418 + { 419 + return (XFS_SB_VERSION_HASMOREBITS(sbp) && \ 420 + ((sbp)->sb_features2 & XFS_SB_VERSION2_LAZYSBCOUNTBIT)); 421 + } 420 422 421 423 #define XFS_SB_VERSION_HASATTR2(sbp) xfs_sb_version_hasattr2(sbp) 422 424 static inline int xfs_sb_version_hasattr2(xfs_sb_t *sbp)
+81 -44
fs/xfs/xfs_trans.c
··· 427 427 * 428 428 * Mark the transaction structure to indicate that the superblock 429 429 * needs to be updated before committing. 430 + * 431 + * Because we may not be keeping track of allocated/free inodes and 432 + * used filesystem blocks in the superblock, we do not mark the 433 + * superblock dirty in this transaction if we modify these fields. 434 + * We still need to update the transaction deltas so that they get 435 + * applied to the incore superblock, but we don't want them to 436 + * cause the superblock to get locked and logged if these are the 437 + * only fields in the superblock that the transaction modifies. 430 438 */ 431 439 void 432 440 xfs_trans_mod_sb( ··· 442 434 uint field, 443 435 int64_t delta) 444 436 { 437 + uint32_t flags = (XFS_TRANS_DIRTY|XFS_TRANS_SB_DIRTY); 438 + xfs_mount_t *mp = tp->t_mountp; 445 439 446 440 switch (field) { 447 441 case XFS_TRANS_SB_ICOUNT: 448 442 tp->t_icount_delta += delta; 443 + if (xfs_sb_version_haslazysbcount(&mp->m_sb)) 444 + flags &= ~XFS_TRANS_SB_DIRTY; 449 445 break; 450 446 case XFS_TRANS_SB_IFREE: 451 447 tp->t_ifree_delta += delta; 448 + if (xfs_sb_version_haslazysbcount(&mp->m_sb)) 449 + flags &= ~XFS_TRANS_SB_DIRTY; 452 450 break; 453 451 case XFS_TRANS_SB_FDBLOCKS: 454 452 /* ··· 467 453 ASSERT(tp->t_blk_res_used <= tp->t_blk_res); 468 454 } 469 455 tp->t_fdblocks_delta += delta; 456 + if (xfs_sb_version_haslazysbcount(&mp->m_sb)) 457 + flags &= ~XFS_TRANS_SB_DIRTY; 470 458 break; 471 459 case XFS_TRANS_SB_RES_FDBLOCKS: 472 460 /* ··· 478 462 */ 479 463 ASSERT(delta < 0); 480 464 tp->t_res_fdblocks_delta += delta; 465 + if (xfs_sb_version_haslazysbcount(&mp->m_sb)) 466 + flags &= ~XFS_TRANS_SB_DIRTY; 481 467 break; 482 468 case XFS_TRANS_SB_FREXTENTS: 483 469 /* ··· 533 515 return; 534 516 } 535 517 536 - tp->t_flags |= (XFS_TRANS_SB_DIRTY | XFS_TRANS_DIRTY); 518 + tp->t_flags |= flags; 537 519 } 538 520 539 521 /* ··· 562 544 (tp->t_ag_freeblks_delta + tp->t_ag_flist_delta + 563 545 tp->t_ag_btree_delta)); 564 546 565 - if (tp->t_icount_delta != 0) { 566 - INT_MOD(sbp->sb_icount, ARCH_CONVERT, tp->t_icount_delta); 567 - } 568 - if (tp->t_ifree_delta != 0) { 569 - INT_MOD(sbp->sb_ifree, ARCH_CONVERT, tp->t_ifree_delta); 570 - } 547 + /* 548 + * Only update the superblock counters if we are logging them 549 + */ 550 + if (!xfs_sb_version_haslazysbcount(&(tp->t_mountp->m_sb))) { 551 + if (tp->t_icount_delta != 0) { 552 + INT_MOD(sbp->sb_icount, ARCH_CONVERT, tp->t_icount_delta); 553 + } 554 + if (tp->t_ifree_delta != 0) { 555 + INT_MOD(sbp->sb_ifree, ARCH_CONVERT, tp->t_ifree_delta); 556 + } 571 557 572 - if (tp->t_fdblocks_delta != 0) { 573 - INT_MOD(sbp->sb_fdblocks, ARCH_CONVERT, tp->t_fdblocks_delta); 574 - } 575 - if (tp->t_res_fdblocks_delta != 0) { 576 - INT_MOD(sbp->sb_fdblocks, ARCH_CONVERT, tp->t_res_fdblocks_delta); 558 + if (tp->t_fdblocks_delta != 0) { 559 + INT_MOD(sbp->sb_fdblocks, ARCH_CONVERT, tp->t_fdblocks_delta); 560 + } 561 + if (tp->t_res_fdblocks_delta != 0) { 562 + INT_MOD(sbp->sb_fdblocks, ARCH_CONVERT, tp->t_res_fdblocks_delta); 563 + } 577 564 } 578 565 579 566 if (tp->t_frextents_delta != 0) { ··· 638 615 } 639 616 640 617 /* 641 - * xfs_trans_unreserve_and_mod_sb() is called to release unused 642 - * reservations and apply superblock counter changes to the in-core 643 - * superblock. 618 + * xfs_trans_unreserve_and_mod_sb() is called to release unused reservations 619 + * and apply superblock counter changes to the in-core superblock. The 620 + * t_res_fdblocks_delta and t_res_frextents_delta fields are explicitly NOT 621 + * applied to the in-core superblock. The idea is that that has already been 622 + * done. 644 623 * 645 624 * This is done efficiently with a single call to xfs_mod_incore_sb_batch(). 625 + * However, we have to ensure that we only modify each superblock field only 626 + * once because the application of the delta values may not be atomic. That can 627 + * lead to ENOSPC races occurring if we have two separate modifcations of the 628 + * free space counter to put back the entire reservation and then take away 629 + * what we used. 630 + * 631 + * If we are not logging superblock counters, then the inode allocated/free and 632 + * used block counts are not updated in the on disk superblock. In this case, 633 + * XFS_TRANS_SB_DIRTY will not be set when the transaction is updated but we 634 + * still need to update the incore superblock with the changes. 646 635 */ 647 636 STATIC void 648 637 xfs_trans_unreserve_and_mod_sb( ··· 662 627 { 663 628 xfs_mod_sb_t msb[14]; /* If you add cases, add entries */ 664 629 xfs_mod_sb_t *msbp; 630 + xfs_mount_t *mp = tp->t_mountp; 665 631 /* REFERENCED */ 666 632 int error; 667 633 int rsvd; 634 + int64_t blkdelta = 0; 635 + int64_t rtxdelta = 0; 668 636 669 637 msbp = msb; 670 638 rsvd = (tp->t_flags & XFS_TRANS_RESERVE) != 0; 671 639 672 - /* 673 - * Release any reserved blocks. Any that were allocated 674 - * will be taken back again by fdblocks_delta below. 675 - */ 676 - if (tp->t_blk_res > 0) { 640 + /* calculate free blocks delta */ 641 + if (tp->t_blk_res > 0) 642 + blkdelta = tp->t_blk_res; 643 + 644 + if ((tp->t_fdblocks_delta != 0) && 645 + (xfs_sb_version_haslazysbcount(&mp->m_sb) || 646 + (tp->t_flags & XFS_TRANS_SB_DIRTY))) 647 + blkdelta += tp->t_fdblocks_delta; 648 + 649 + if (blkdelta != 0) { 677 650 msbp->msb_field = XFS_SBS_FDBLOCKS; 678 - msbp->msb_delta = tp->t_blk_res; 651 + msbp->msb_delta = blkdelta; 679 652 msbp++; 680 653 } 681 654 682 - /* 683 - * Release any reserved real time extents . Any that were 684 - * allocated will be taken back again by frextents_delta below. 685 - */ 686 - if (tp->t_rtx_res > 0) { 655 + /* calculate free realtime extents delta */ 656 + if (tp->t_rtx_res > 0) 657 + rtxdelta = tp->t_rtx_res; 658 + 659 + if ((tp->t_frextents_delta != 0) && 660 + (tp->t_flags & XFS_TRANS_SB_DIRTY)) 661 + rtxdelta += tp->t_frextents_delta; 662 + 663 + if (rtxdelta != 0) { 687 664 msbp->msb_field = XFS_SBS_FREXTENTS; 688 - msbp->msb_delta = tp->t_rtx_res; 665 + msbp->msb_delta = rtxdelta; 689 666 msbp++; 690 667 } 691 668 692 - /* 693 - * Apply any superblock modifications to the in-core version. 694 - * The t_res_fdblocks_delta and t_res_frextents_delta fields are 695 - * explicitly NOT applied to the in-core superblock. 696 - * The idea is that that has already been done. 697 - */ 698 - if (tp->t_flags & XFS_TRANS_SB_DIRTY) { 669 + /* apply remaining deltas */ 670 + 671 + if (xfs_sb_version_haslazysbcount(&mp->m_sb) || 672 + (tp->t_flags & XFS_TRANS_SB_DIRTY)) { 699 673 if (tp->t_icount_delta != 0) { 700 674 msbp->msb_field = XFS_SBS_ICOUNT; 701 675 msbp->msb_delta = tp->t_icount_delta; ··· 715 671 msbp->msb_delta = tp->t_ifree_delta; 716 672 msbp++; 717 673 } 718 - if (tp->t_fdblocks_delta != 0) { 719 - msbp->msb_field = XFS_SBS_FDBLOCKS; 720 - msbp->msb_delta = tp->t_fdblocks_delta; 721 - msbp++; 722 - } 723 - if (tp->t_frextents_delta != 0) { 724 - msbp->msb_field = XFS_SBS_FREXTENTS; 725 - msbp->msb_delta = tp->t_frextents_delta; 726 - msbp++; 727 - } 674 + } 675 + 676 + if (tp->t_flags & XFS_TRANS_SB_DIRTY) { 728 677 if (tp->t_dblocks_delta != 0) { 729 678 msbp->msb_field = XFS_SBS_DBLOCKS; 730 679 msbp->msb_delta = tp->t_dblocks_delta;
+2 -1
fs/xfs/xfs_trans.h
··· 94 94 #define XFS_TRANS_GROWFSRT_ZERO 38 95 95 #define XFS_TRANS_GROWFSRT_FREE 39 96 96 #define XFS_TRANS_SWAPEXT 40 97 - #define XFS_TRANS_TYPE_MAX 40 97 + #define XFS_TRANS_SB_COUNT 41 98 + #define XFS_TRANS_TYPE_MAX 41 98 99 /* new transaction types need to be reflected in xfs_logprint(8) */ 99 100 100 101
+94 -65
fs/xfs/xfs_vfsops.c
··· 51 51 #include "xfs_acl.h" 52 52 #include "xfs_attr.h" 53 53 #include "xfs_clnt.h" 54 + #include "xfs_mru_cache.h" 55 + #include "xfs_filestream.h" 54 56 #include "xfs_fsops.h" 55 57 56 58 STATIC int xfs_sync(bhv_desc_t *, int, cred_t *); ··· 83 81 xfs_dabuf_zone = kmem_zone_init(sizeof(xfs_dabuf_t), "xfs_dabuf"); 84 82 xfs_ifork_zone = kmem_zone_init(sizeof(xfs_ifork_t), "xfs_ifork"); 85 83 xfs_acl_zone_init(xfs_acl_zone, "xfs_acl"); 84 + xfs_mru_cache_init(); 85 + xfs_filestream_init(); 86 86 87 87 /* 88 88 * The size of the zone allocated buf log item is the maximum ··· 168 164 xfs_cleanup_procfs(); 169 165 xfs_sysctl_unregister(); 170 166 xfs_refcache_destroy(); 167 + xfs_filestream_uninit(); 168 + xfs_mru_cache_uninit(); 171 169 xfs_acl_zone_destroy(xfs_acl_zone); 172 170 173 171 #ifdef XFS_DIR2_TRACE ··· 325 319 mp->m_flags |= XFS_MOUNT_BARRIER; 326 320 else 327 321 mp->m_flags &= ~XFS_MOUNT_BARRIER; 322 + 323 + if (ap->flags2 & XFSMNT2_FILESTREAMS) 324 + mp->m_flags |= XFS_MOUNT_FILESTREAMS; 328 325 329 326 return 0; 330 327 } ··· 527 518 if (mp->m_flags & XFS_MOUNT_BARRIER) 528 519 xfs_mountfs_check_barriers(mp); 529 520 521 + if ((error = xfs_filestream_mount(mp))) 522 + goto error2; 523 + 530 524 error = XFS_IOINIT(vfsp, args, flags); 531 525 if (error) 532 526 goto error2; ··· 586 574 * out of the reference cache, and delete the timer. 587 575 */ 588 576 xfs_refcache_purge_mp(mp); 577 + 578 + /* 579 + * Blow away any referenced inode in the filestreams cache. 580 + * This can and will cause log traffic as inodes go inactive 581 + * here. 582 + */ 583 + xfs_filestream_unmount(mp); 589 584 590 585 XFS_bflush(mp->m_ddev_targp); 591 586 error = xfs_unmount_flush(mp, 0); ··· 659 640 * we can write the unmount record. 660 641 */ 661 642 do { 662 - xfs_syncsub(mp, SYNC_REMOUNT|SYNC_ATTR|SYNC_WAIT, NULL); 643 + xfs_syncsub(mp, SYNC_INODE_QUIESCE, NULL); 663 644 pincount = xfs_flush_buftarg(mp->m_ddev_targp, 1); 664 645 if (!pincount) { 665 646 delay(50); ··· 668 649 } while (count < 2); 669 650 670 651 return 0; 652 + } 653 + 654 + /* 655 + * Second stage of a quiesce. The data is already synced, now we have to take 656 + * care of the metadata. New transactions are already blocked, so we need to 657 + * wait for any remaining transactions to drain out before proceding. 658 + */ 659 + STATIC void 660 + xfs_attr_quiesce( 661 + xfs_mount_t *mp) 662 + { 663 + /* wait for all modifications to complete */ 664 + while (atomic_read(&mp->m_active_trans) > 0) 665 + delay(100); 666 + 667 + /* flush inodes and push all remaining buffers out to disk */ 668 + xfs_quiesce_fs(mp); 669 + 670 + ASSERT_ALWAYS(atomic_read(&mp->m_active_trans) == 0); 671 + 672 + /* Push the superblock and write an unmount record */ 673 + xfs_log_sbcount(mp, 1); 674 + xfs_log_unmount_write(mp); 675 + xfs_unmountfs_writesb(mp); 671 676 } 672 677 673 678 STATIC int ··· 713 670 mp->m_flags &= ~XFS_MOUNT_BARRIER; 714 671 } 715 672 } else if (!(vfsp->vfs_flag & VFS_RDONLY)) { /* rw -> ro */ 716 - bhv_vfs_sync(vfsp, SYNC_FSDATA|SYNC_BDFLUSH|SYNC_ATTR, NULL); 717 - xfs_quiesce_fs(mp); 718 - xfs_log_unmount_write(mp); 719 - xfs_unmountfs_writesb(mp); 673 + xfs_filestream_flush(mp); 674 + bhv_vfs_sync(vfsp, SYNC_DATA_QUIESCE, NULL); 675 + xfs_attr_quiesce(mp); 720 676 vfsp->vfs_flag |= VFS_RDONLY; 721 677 } 722 678 return 0; ··· 928 886 cred_t *credp) 929 887 { 930 888 xfs_mount_t *mp = XFS_BHVTOM(bdp); 889 + 890 + if (flags & SYNC_IOWAIT) 891 + xfs_filestream_flush(mp); 931 892 932 893 return xfs_syncsub(mp, flags, NULL); 933 894 } ··· 1173 1128 * in the inode list. 1174 1129 */ 1175 1130 1176 - if ((flags & SYNC_CLOSE) && (vp != NULL)) { 1177 - /* 1178 - * This is the shutdown case. We just need to 1179 - * flush and invalidate all the pages associated 1180 - * with the inode. Drop the inode lock since 1181 - * we can't hold it across calls to the buffer 1182 - * cache. 1183 - * 1184 - * We don't set the VREMAPPING bit in the vnode 1185 - * here, because we don't hold the vnode lock 1186 - * exclusively. It doesn't really matter, though, 1187 - * because we only come here when we're shutting 1188 - * down anyway. 1189 - */ 1131 + /* 1132 + * If we have to flush data or wait for I/O completion 1133 + * we need to drop the ilock that we currently hold. 1134 + * If we need to drop the lock, insert a marker if we 1135 + * have not already done so. 1136 + */ 1137 + if ((flags & (SYNC_CLOSE|SYNC_IOWAIT)) || 1138 + ((flags & SYNC_DELWRI) && VN_DIRTY(vp))) { 1139 + if (mount_locked) { 1140 + IPOINTER_INSERT(ip, mp); 1141 + } 1190 1142 xfs_iunlock(ip, XFS_ILOCK_SHARED); 1191 1143 1192 - if (XFS_FORCED_SHUTDOWN(mp)) { 1193 - bhv_vop_toss_pages(vp, 0, -1, FI_REMAPF); 1194 - } else { 1195 - error = bhv_vop_flushinval_pages(vp, 0, -1, FI_REMAPF); 1196 - } 1197 - 1198 - xfs_ilock(ip, XFS_ILOCK_SHARED); 1199 - 1200 - } else if ((flags & SYNC_DELWRI) && (vp != NULL)) { 1201 - if (VN_DIRTY(vp)) { 1202 - /* We need to have dropped the lock here, 1203 - * so insert a marker if we have not already 1204 - * done so. 1205 - */ 1206 - if (mount_locked) { 1207 - IPOINTER_INSERT(ip, mp); 1208 - } 1209 - 1210 - /* 1211 - * Drop the inode lock since we can't hold it 1212 - * across calls to the buffer cache. 1213 - */ 1214 - xfs_iunlock(ip, XFS_ILOCK_SHARED); 1144 + if (flags & SYNC_CLOSE) { 1145 + /* Shutdown case. Flush and invalidate. */ 1146 + if (XFS_FORCED_SHUTDOWN(mp)) 1147 + bhv_vop_toss_pages(vp, 0, -1, FI_REMAPF); 1148 + else 1149 + error = bhv_vop_flushinval_pages(vp, 0, 1150 + -1, FI_REMAPF); 1151 + } else if ((flags & SYNC_DELWRI) && VN_DIRTY(vp)) { 1215 1152 error = bhv_vop_flush_pages(vp, (xfs_off_t)0, 1216 1153 -1, fflag, FI_NONE); 1217 - xfs_ilock(ip, XFS_ILOCK_SHARED); 1218 1154 } 1219 1155 1156 + /* 1157 + * When freezing, we need to wait ensure all I/O (including direct 1158 + * I/O) is complete to ensure no further data modification can take 1159 + * place after this point 1160 + */ 1161 + if (flags & SYNC_IOWAIT) 1162 + vn_iowait(vp); 1163 + 1164 + xfs_ilock(ip, XFS_ILOCK_SHARED); 1220 1165 } 1221 - /* 1222 - * When freezing, we need to wait ensure all I/O (including direct 1223 - * I/O) is complete to ensure no further data modification can take 1224 - * place after this point 1225 - */ 1226 - if (flags & SYNC_IOWAIT) 1227 - vn_iowait(vp); 1228 1166 1229 1167 if (flags & SYNC_BDFLUSH) { 1230 1168 if ((flags & SYNC_ATTR) && ··· 1542 1514 } 1543 1515 1544 1516 /* 1517 + * If asked, update the disk superblock with incore counter values if we 1518 + * are using non-persistent counters so that they don't get too far out 1519 + * of sync if we crash or get a forced shutdown. We don't want to force 1520 + * this to disk, just get a transaction into the iclogs.... 1521 + */ 1522 + if (flags & SYNC_SUPER) 1523 + xfs_log_sbcount(mp, 0); 1524 + 1525 + /* 1545 1526 * Now check to see if the log needs a "dummy" transaction. 1546 1527 */ 1547 1528 ··· 1682 1645 * in stat(). */ 1683 1646 #define MNTOPT_ATTR2 "attr2" /* do use attr2 attribute format */ 1684 1647 #define MNTOPT_NOATTR2 "noattr2" /* do not use attr2 attribute format */ 1648 + #define MNTOPT_FILESTREAM "filestreams" /* use filestreams allocator */ 1685 1649 1686 1650 STATIC unsigned long 1687 1651 suffix_strtoul(char *s, char **endp, unsigned int base) ··· 1869 1831 args->flags |= XFSMNT_ATTR2; 1870 1832 } else if (!strcmp(this_char, MNTOPT_NOATTR2)) { 1871 1833 args->flags &= ~XFSMNT_ATTR2; 1834 + } else if (!strcmp(this_char, MNTOPT_FILESTREAM)) { 1835 + args->flags2 |= XFSMNT2_FILESTREAMS; 1872 1836 } else if (!strcmp(this_char, "osyncisdsync")) { 1873 1837 /* no-op, this is now the default */ 1874 1838 cmn_err(CE_WARN, ··· 1999 1959 } 2000 1960 2001 1961 /* 2002 - * Second stage of a freeze. The data is already frozen, now we have to take 2003 - * care of the metadata. New transactions are already blocked, so we need to 2004 - * wait for any remaining transactions to drain out before proceding. 1962 + * Second stage of a freeze. The data is already frozen so we only 1963 + * need to take care of themetadata. Once that's done write a dummy 1964 + * record to dirty the log in case of a crash while frozen. 2005 1965 */ 2006 1966 STATIC void 2007 1967 xfs_freeze( ··· 2009 1969 { 2010 1970 xfs_mount_t *mp = XFS_BHVTOM(bdp); 2011 1971 2012 - /* wait for all modifications to complete */ 2013 - while (atomic_read(&mp->m_active_trans) > 0) 2014 - delay(100); 2015 - 2016 - /* flush inodes and push all remaining buffers out to disk */ 2017 - xfs_quiesce_fs(mp); 2018 - 2019 - ASSERT_ALWAYS(atomic_read(&mp->m_active_trans) == 0); 2020 - 2021 - /* Push the superblock and write an unmount record */ 2022 - xfs_log_unmount_write(mp); 2023 - xfs_unmountfs_writesb(mp); 1972 + xfs_attr_quiesce(mp); 2024 1973 xfs_fs_log_dummy(mp); 2025 1974 } 2026 1975
+64 -55
fs/xfs/xfs_vnodeops.c
··· 51 51 #include "xfs_refcache.h" 52 52 #include "xfs_trans_space.h" 53 53 #include "xfs_log_priv.h" 54 + #include "xfs_filestream.h" 54 55 55 56 STATIC int 56 57 xfs_open( ··· 75 74 (void)xfs_da_reada_buf(NULL, ip, 0, XFS_DATA_FORK); 76 75 xfs_iunlock(ip, mode); 77 76 } 78 - return 0; 79 - } 80 - 81 - STATIC int 82 - xfs_close( 83 - bhv_desc_t *bdp, 84 - int flags, 85 - lastclose_t lastclose, 86 - cred_t *credp) 87 - { 88 - bhv_vnode_t *vp = BHV_TO_VNODE(bdp); 89 - xfs_inode_t *ip = XFS_BHVTOI(bdp); 90 - 91 - if (XFS_FORCED_SHUTDOWN(ip->i_mount)) 92 - return XFS_ERROR(EIO); 93 - 94 - if (lastclose != L_TRUE || !VN_ISREG(vp)) 95 - return 0; 96 - 97 - /* 98 - * If we previously truncated this file and removed old data in 99 - * the process, we want to initiate "early" writeout on the last 100 - * close. This is an attempt to combat the notorious NULL files 101 - * problem which is particularly noticable from a truncate down, 102 - * buffered (re-)write (delalloc), followed by a crash. What we 103 - * are effectively doing here is significantly reducing the time 104 - * window where we'd otherwise be exposed to that problem. 105 - */ 106 - if (VUNTRUNCATE(vp) && VN_DIRTY(vp) && ip->i_delayed_blks > 0) 107 - return bhv_vop_flush_pages(vp, 0, -1, XFS_B_ASYNC, FI_NONE); 108 77 return 0; 109 78 } 110 79 ··· 154 183 * realtime extent size or the realtime volume's 155 184 * extent size. 156 185 */ 157 - vap->va_blocksize = ip->i_d.di_extsize ? 158 - (ip->i_d.di_extsize << mp->m_sb.sb_blocklog) : 159 - (mp->m_sb.sb_rextsize << mp->m_sb.sb_blocklog); 186 + vap->va_blocksize = 187 + xfs_get_extsz_hint(ip) << mp->m_sb.sb_blocklog; 160 188 } 161 189 break; 162 190 } ··· 784 814 di_flags |= XFS_DIFLAG_PROJINHERIT; 785 815 if (vap->va_xflags & XFS_XFLAG_NODEFRAG) 786 816 di_flags |= XFS_DIFLAG_NODEFRAG; 817 + if (vap->va_xflags & XFS_XFLAG_FILESTREAM) 818 + di_flags |= XFS_DIFLAG_FILESTREAM; 787 819 if ((ip->i_d.di_mode & S_IFMT) == S_IFDIR) { 788 820 if (vap->va_xflags & XFS_XFLAG_RTINHERIT) 789 821 di_flags |= XFS_DIFLAG_RTINHERIT; ··· 1173 1201 } 1174 1202 1175 1203 /* 1176 - * This is called by xfs_inactive to free any blocks beyond eof, 1177 - * when the link count isn't zero. 1204 + * This is called by xfs_inactive to free any blocks beyond eof 1205 + * when the link count isn't zero and by xfs_dm_punch_hole() when 1206 + * punching a hole to EOF. 1178 1207 */ 1179 - STATIC int 1180 - xfs_inactive_free_eofblocks( 1208 + int 1209 + xfs_free_eofblocks( 1181 1210 xfs_mount_t *mp, 1182 - xfs_inode_t *ip) 1211 + xfs_inode_t *ip, 1212 + int flags) 1183 1213 { 1184 1214 xfs_trans_t *tp; 1185 1215 int error; ··· 1190 1216 xfs_filblks_t map_len; 1191 1217 int nimaps; 1192 1218 xfs_bmbt_irec_t imap; 1219 + int use_iolock = (flags & XFS_FREE_EOF_LOCK); 1193 1220 1194 1221 /* 1195 1222 * Figure out if there are any blocks beyond the end ··· 1231 1256 * cache and we can't 1232 1257 * do that within a transaction. 1233 1258 */ 1234 - xfs_ilock(ip, XFS_IOLOCK_EXCL); 1259 + if (use_iolock) 1260 + xfs_ilock(ip, XFS_IOLOCK_EXCL); 1235 1261 error = xfs_itruncate_start(ip, XFS_ITRUNC_DEFINITE, 1236 1262 ip->i_size); 1237 1263 if (error) { 1238 - xfs_iunlock(ip, XFS_IOLOCK_EXCL); 1264 + xfs_trans_cancel(tp, 0); 1265 + if (use_iolock) 1266 + xfs_iunlock(ip, XFS_IOLOCK_EXCL); 1239 1267 return error; 1240 1268 } 1241 1269 ··· 1275 1297 error = xfs_trans_commit(tp, 1276 1298 XFS_TRANS_RELEASE_LOG_RES); 1277 1299 } 1278 - xfs_iunlock(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL); 1300 + xfs_iunlock(ip, (use_iolock ? (XFS_IOLOCK_EXCL|XFS_ILOCK_EXCL) 1301 + : XFS_ILOCK_EXCL)); 1279 1302 } 1280 1303 return error; 1281 1304 } ··· 1539 1560 if (vp->v_vfsp->vfs_flag & VFS_RDONLY) 1540 1561 return 0; 1541 1562 1563 + if (!XFS_FORCED_SHUTDOWN(mp)) { 1564 + /* 1565 + * If we are using filestreams, and we have an unlinked 1566 + * file that we are processing the last close on, then nothing 1567 + * will be able to reopen and write to this file. Purge this 1568 + * inode from the filestreams cache so that it doesn't delay 1569 + * teardown of the inode. 1570 + */ 1571 + if ((ip->i_d.di_nlink == 0) && xfs_inode_is_filestream(ip)) 1572 + xfs_filestream_deassociate(ip); 1573 + 1574 + /* 1575 + * If we previously truncated this file and removed old data 1576 + * in the process, we want to initiate "early" writeout on 1577 + * the last close. This is an attempt to combat the notorious 1578 + * NULL files problem which is particularly noticable from a 1579 + * truncate down, buffered (re-)write (delalloc), followed by 1580 + * a crash. What we are effectively doing here is 1581 + * significantly reducing the time window where we'd otherwise 1582 + * be exposed to that problem. 1583 + */ 1584 + if (VUNTRUNCATE(vp) && VN_DIRTY(vp) && ip->i_delayed_blks > 0) 1585 + bhv_vop_flush_pages(vp, 0, -1, XFS_B_ASYNC, FI_NONE); 1586 + } 1587 + 1542 1588 #ifdef HAVE_REFCACHE 1543 1589 /* If we are in the NFS reference cache then don't do this now */ 1544 1590 if (ip->i_refcache) ··· 1577 1573 (ip->i_df.if_flags & XFS_IFEXTENTS)) && 1578 1574 (!(ip->i_d.di_flags & 1579 1575 (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND)))) { 1580 - if ((error = xfs_inactive_free_eofblocks(mp, ip))) 1576 + error = xfs_free_eofblocks(mp, ip, XFS_FREE_EOF_LOCK); 1577 + if (error) 1581 1578 return error; 1582 1579 /* Update linux inode block count after free above */ 1583 1580 vn_to_inode(vp)->i_blocks = XFS_FSB_TO_BB(mp, ··· 1659 1654 (!(ip->i_d.di_flags & 1660 1655 (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND)) || 1661 1656 (ip->i_delayed_blks != 0)))) { 1662 - if ((error = xfs_inactive_free_eofblocks(mp, ip))) 1657 + error = xfs_free_eofblocks(mp, ip, XFS_FREE_EOF_LOCK); 1658 + if (error) 1663 1659 return VN_INACTIVE_CACHE; 1664 1660 /* Update linux inode block count after free above */ 1665 1661 vn_to_inode(vp)->i_blocks = XFS_FSB_TO_BB(mp, ··· 1686 1680 1687 1681 error = xfs_itruncate_start(ip, XFS_ITRUNC_DEFINITE, 0); 1688 1682 if (error) { 1683 + xfs_trans_cancel(tp, 0); 1689 1684 xfs_iunlock(ip, XFS_IOLOCK_EXCL); 1690 1685 return VN_INACTIVE_CACHE; 1691 1686 } ··· 2224 2217 xfs_lock_inumorder(int lock_mode, int subclass) 2225 2218 { 2226 2219 if (lock_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)) 2227 - lock_mode |= (subclass + XFS_IOLOCK_INUMORDER) << XFS_IOLOCK_SHIFT; 2220 + lock_mode |= (subclass + XFS_LOCK_INUMORDER) << XFS_IOLOCK_SHIFT; 2228 2221 if (lock_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)) 2229 - lock_mode |= (subclass + XFS_ILOCK_INUMORDER) << XFS_ILOCK_SHIFT; 2222 + lock_mode |= (subclass + XFS_LOCK_INUMORDER) << XFS_ILOCK_SHIFT; 2230 2223 2231 2224 return lock_mode; 2232 2225 } ··· 2552 2545 * xfs_refcache_purge_ip routine (although that would be OK). 2553 2546 */ 2554 2547 xfs_refcache_purge_ip(ip); 2548 + 2549 + /* 2550 + * If we are using filestreams, kill the stream association. 2551 + * If the file is still open it may get a new one but that 2552 + * will get killed on last close in xfs_close() so we don't 2553 + * have to worry about that. 2554 + */ 2555 + if (link_zero && xfs_inode_is_filestream(ip)) 2556 + xfs_filestream_deassociate(ip); 2555 2557 2556 2558 vn_trace_exit(XFS_ITOV(ip), __FUNCTION__, (inst_t *)__return_address); 2557 2559 ··· 4063 4047 if (XFS_FORCED_SHUTDOWN(mp)) 4064 4048 return XFS_ERROR(EIO); 4065 4049 4066 - rt = XFS_IS_REALTIME_INODE(ip); 4067 - if (unlikely(rt)) { 4068 - if (!(extsz = ip->i_d.di_extsize)) 4069 - extsz = mp->m_sb.sb_rextsize; 4070 - } else { 4071 - extsz = ip->i_d.di_extsize; 4072 - } 4073 - 4074 4050 if ((error = XFS_QM_DQATTACH(mp, ip, 0))) 4075 4051 return error; 4076 4052 4077 4053 if (len <= 0) 4078 4054 return XFS_ERROR(EINVAL); 4079 4055 4056 + rt = XFS_IS_REALTIME_INODE(ip); 4057 + extsz = xfs_get_extsz_hint(ip); 4058 + 4080 4059 count = len; 4081 - error = 0; 4082 4060 imapp = &imaps[0]; 4083 4061 nimaps = 1; 4084 4062 bmapi_flag = XFS_BMAPI_WRITE | (alloc_type ? XFS_BMAPI_PREALLOC : 0); ··· 4688 4678 bhv_vnodeops_t xfs_vnodeops = { 4689 4679 BHV_IDENTITY_INIT(VN_BHV_XFS,VNODE_POSITION_XFS), 4690 4680 .vop_open = xfs_open, 4691 - .vop_close = xfs_close, 4692 4681 .vop_read = xfs_read, 4693 4682 #ifdef HAVE_SPLICE 4694 4683 .vop_splice_read = xfs_splice_read,
+1
lib/radix-tree.c
··· 151 151 out: 152 152 return ret; 153 153 } 154 + EXPORT_SYMBOL(radix_tree_preload); 154 155 155 156 static inline void tag_set(struct radix_tree_node *node, unsigned int tag, 156 157 int offset)