Merge git://oss.sgi.com:8090/xfs/xfs-2.6

* git://oss.sgi.com:8090/xfs/xfs-2.6:
[XFS] Remove KERNEL_VERSION macros from xfs_dmapi.h
[XFS] Prevent a deadlock when xfslogd unpins inodes.
[XFS] Clean up i_flags and i_flags_lock handling.
[XFS] 956664: dm_read_invis() changes i_atime
[XFS] rename uio_read() to xfs_uio_read()
[XFS] Keep lockdep happy.
[XFS] 956618: Linux crashes on boot with XFS-DMAPI filesystem when

+192 -116
+1 -16
fs/xfs/Makefile-linux-2.6
··· 21 21 XFS_LINUX := linux-2.6 22 22 23 23 ifeq ($(CONFIG_XFS_DEBUG),y) 24 - EXTRA_CFLAGS += -g -DSTATIC="" -DDEBUG 25 - EXTRA_CFLAGS += -DXFS_BUF_LOCK_TRACKING 26 - endif 27 - ifeq ($(CONFIG_XFS_TRACE),y) 28 - EXTRA_CFLAGS += -DXFS_ALLOC_TRACE 29 - EXTRA_CFLAGS += -DXFS_ATTR_TRACE 30 - EXTRA_CFLAGS += -DXFS_BLI_TRACE 31 - EXTRA_CFLAGS += -DXFS_BMAP_TRACE 32 - EXTRA_CFLAGS += -DXFS_BMBT_TRACE 33 - EXTRA_CFLAGS += -DXFS_DIR2_TRACE 34 - EXTRA_CFLAGS += -DXFS_DQUOT_TRACE 35 - EXTRA_CFLAGS += -DXFS_ILOCK_TRACE 36 - EXTRA_CFLAGS += -DXFS_LOG_TRACE 37 - EXTRA_CFLAGS += -DXFS_RW_TRACE 38 - EXTRA_CFLAGS += -DXFS_BUF_TRACE 39 - EXTRA_CFLAGS += -DXFS_VNODE_TRACE 24 + EXTRA_CFLAGS += -g 40 25 endif 41 26 42 27 obj-$(CONFIG_XFS_FS) += xfs.o
+2 -2
fs/xfs/linux-2.6/xfs_buf.c
··· 15 15 * along with this program; if not, write the Free Software Foundation, 16 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 17 17 */ 18 + #include "xfs.h" 18 19 #include <linux/stddef.h> 19 20 #include <linux/errno.h> 20 21 #include <linux/slab.h> ··· 32 31 #include <linux/kthread.h> 33 32 #include <linux/migrate.h> 34 33 #include <linux/backing-dev.h> 35 - #include "xfs_linux.h" 36 34 37 35 STATIC kmem_zone_t *xfs_buf_zone; 38 36 STATIC kmem_shaker_t xfs_buf_shake; ··· 1406 1406 btp->bt_hashshift = external ? 3 : 8; /* 8 or 256 buckets */ 1407 1407 btp->bt_hashmask = (1 << btp->bt_hashshift) - 1; 1408 1408 btp->bt_hash = kmem_zalloc((1 << btp->bt_hashshift) * 1409 - sizeof(xfs_bufhash_t), KM_SLEEP); 1409 + sizeof(xfs_bufhash_t), KM_SLEEP | KM_LARGE); 1410 1410 for (i = 0; i < (1 << btp->bt_hashshift); i++) { 1411 1411 spin_lock_init(&btp->bt_hash[i].bh_lock); 1412 1412 INIT_LIST_HEAD(&btp->bt_hash[i].bh_list);
+28
fs/xfs/linux-2.6/xfs_dmapi_priv.h
··· 1 + /* 2 + * Copyright (c) 2000-2006 Silicon Graphics, Inc. 3 + * All Rights Reserved. 4 + * 5 + * This program is free software; you can redistribute it and/or 6 + * modify it under the terms of the GNU General Public License as 7 + * published by the Free Software Foundation. 8 + * 9 + * This program is distributed in the hope that it would be useful, 10 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 + * GNU General Public License for more details. 13 + * 14 + * You should have received a copy of the GNU General Public License 15 + * along with this program; if not, write the Free Software Foundation, 16 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 17 + */ 18 + #ifndef __XFS_DMAPI_PRIV_H__ 19 + #define __XFS_DMAPI_PRIV_H__ 20 + 21 + /* 22 + * Based on IO_ISDIRECT, decide which i_ flag is set. 23 + */ 24 + #define DM_SEM_FLAG_RD(ioflags) (((ioflags) & IO_ISDIRECT) ? \ 25 + DM_FLAGS_IMUX : 0) 26 + #define DM_SEM_FLAG_WR (DM_FLAGS_IALLOCSEM_WR | DM_FLAGS_IMUX) 27 + 28 + #endif /*__XFS_DMAPI_PRIV_H__*/
+4 -1
fs/xfs/linux-2.6/xfs_ioctl.c
··· 341 341 put_unused_fd(new_fd); 342 342 return -XFS_ERROR(-PTR_ERR(filp)); 343 343 } 344 - if (inode->i_mode & S_IFREG) 344 + if (inode->i_mode & S_IFREG) { 345 + /* invisible operation should not change atime */ 346 + filp->f_flags |= O_NOATIME; 345 347 filp->f_op = &xfs_invis_file_operations; 348 + } 346 349 347 350 fd_install(new_fd, filp); 348 351 return new_fd;
+1 -3
fs/xfs/linux-2.6/xfs_super.c
··· 227 227 xfs_revalidate_inode(XFS_BHVTOM(bdp), vp, ip); 228 228 xfs_set_inodeops(inode); 229 229 230 - spin_lock(&ip->i_flags_lock); 231 - ip->i_flags &= ~XFS_INEW; 232 - spin_unlock(&ip->i_flags_lock); 230 + xfs_iflags_clear(ip, XFS_INEW); 233 231 barrier(); 234 232 235 233 unlock_new_inode(inode);
+1 -3
fs/xfs/support/debug.c
··· 15 15 * along with this program; if not, write the Free Software Foundation, 16 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 17 17 */ 18 + #include <xfs.h> 18 19 #include "debug.h" 19 20 #include "spin.h" 20 - #include <asm/page.h> 21 - #include <linux/sched.h> 22 - #include <linux/kernel.h> 23 21 24 22 static char message[256]; /* keep it off the stack */ 25 23 static DEFINE_SPINLOCK(xfs_err_lock);
+1 -1
fs/xfs/support/move.c
··· 22 22 * as we go. 23 23 */ 24 24 int 25 - uio_read(caddr_t src, size_t len, struct uio *uio) 25 + xfs_uio_read(caddr_t src, size_t len, struct uio *uio) 26 26 { 27 27 size_t count; 28 28
+1 -1
fs/xfs/support/move.h
··· 65 65 typedef struct uio uio_t; 66 66 typedef struct iovec iovec_t; 67 67 68 - extern int uio_read (caddr_t, size_t, uio_t *); 68 + extern int xfs_uio_read (caddr_t, size_t, uio_t *); 69 69 70 70 #endif /* __XFS_SUPPORT_MOVE_H__ */
+23
fs/xfs/xfs.h
··· 17 17 */ 18 18 #ifndef __XFS_H__ 19 19 #define __XFS_H__ 20 + 21 + #ifdef CONFIG_XFS_DEBUG 22 + #define STATIC 23 + #define DEBUG 1 24 + #define XFS_BUF_LOCK_TRACKING 1 25 + /* #define QUOTADEBUG 1 */ 26 + #endif 27 + 28 + #ifdef CONFIG_XFS_TRACE 29 + #define XFS_ALLOC_TRACE 1 30 + #define XFS_ATTR_TRACE 1 31 + #define XFS_BLI_TRACE 1 32 + #define XFS_BMAP_TRACE 1 33 + #define XFS_BMBT_TRACE 1 34 + #define XFS_DIR2_TRACE 1 35 + #define XFS_DQUOT_TRACE 1 36 + #define XFS_ILOCK_TRACE 1 37 + #define XFS_LOG_TRACE 1 38 + #define XFS_RW_TRACE 1 39 + #define XFS_BUF_TRACE 1 40 + #define XFS_VNODE_TRACE 1 41 + #endif 42 + 20 43 #include <linux-2.6/xfs_linux.h> 21 44 #endif /* __XFS_H__ */
+1 -1
fs/xfs/xfs_dir2.c
··· 678 678 idbp->d_off = pa->cook; 679 679 idbp->d_name[namelen] = '\0'; 680 680 memcpy(idbp->d_name, pa->name, namelen); 681 - rval = uio_read((caddr_t)idbp, reclen, uio); 681 + rval = xfs_uio_read((caddr_t)idbp, reclen, uio); 682 682 pa->done = (rval == 0); 683 683 return rval; 684 684 }
+2 -20
fs/xfs/xfs_dmapi.h
··· 157 157 #define DM_FLAGS_IALLOCSEM_WR 0x020 /* thread holds i_alloc_sem wr */ 158 158 159 159 /* 160 - * Based on IO_ISDIRECT, decide which i_ flag is set. 160 + * Pull in platform specific event flags defines 161 161 */ 162 - #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0) 163 - #define DM_SEM_FLAG_RD(ioflags) (((ioflags) & IO_ISDIRECT) ? \ 164 - DM_FLAGS_IMUX : 0) 165 - #define DM_SEM_FLAG_WR (DM_FLAGS_IALLOCSEM_WR | DM_FLAGS_IMUX) 166 - #endif 167 - 168 - #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)) && \ 169 - (LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,22)) 170 - #define DM_SEM_FLAG_RD(ioflags) (((ioflags) & IO_ISDIRECT) ? \ 171 - DM_FLAGS_IALLOCSEM_RD : DM_FLAGS_IMUX) 172 - #define DM_SEM_FLAG_WR (DM_FLAGS_IALLOCSEM_WR | DM_FLAGS_IMUX) 173 - #endif 174 - 175 - #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,4,21) 176 - #define DM_SEM_FLAG_RD(ioflags) (((ioflags) & IO_ISDIRECT) ? \ 177 - 0 : DM_FLAGS_IMUX) 178 - #define DM_SEM_FLAG_WR (DM_FLAGS_IMUX) 179 - #endif 180 - 162 + #include "xfs_dmapi_priv.h" 181 163 182 164 /* 183 165 * Macros to turn caller specified delay/block flags into
+37 -14
fs/xfs/xfs_iget.c
··· 215 215 * If INEW is set this inode is being set up 216 216 * we need to pause and try again. 217 217 */ 218 - if (ip->i_flags & XFS_INEW) { 218 + if (xfs_iflags_test(ip, XFS_INEW)) { 219 219 read_unlock(&ih->ih_lock); 220 220 delay(1); 221 221 XFS_STATS_INC(xs_ig_frecycle); ··· 230 230 * on its way out of the system, 231 231 * we need to pause and try again. 232 232 */ 233 - if (ip->i_flags & XFS_IRECLAIM) { 233 + if (xfs_iflags_test(ip, XFS_IRECLAIM)) { 234 234 read_unlock(&ih->ih_lock); 235 235 delay(1); 236 236 XFS_STATS_INC(xs_ig_frecycle); 237 237 238 + goto again; 239 + } 240 + ASSERT(xfs_iflags_test(ip, XFS_IRECLAIMABLE)); 241 + 242 + /* 243 + * If lookup is racing with unlink, then we 244 + * should return an error immediately so we 245 + * don't remove it from the reclaim list and 246 + * potentially leak the inode. 247 + */ 248 + if ((ip->i_d.di_mode == 0) && 249 + !(flags & XFS_IGET_CREATE)) { 250 + read_unlock(&ih->ih_lock); 251 + return ENOENT; 252 + } 253 + 254 + /* 255 + * There may be transactions sitting in the 256 + * incore log buffers or being flushed to disk 257 + * at this time. We can't clear the 258 + * XFS_IRECLAIMABLE flag until these 259 + * transactions have hit the disk, otherwise we 260 + * will void the guarantee the flag provides 261 + * xfs_iunpin() 262 + */ 263 + if (xfs_ipincount(ip)) { 264 + read_unlock(&ih->ih_lock); 265 + xfs_log_force(mp, 0, 266 + XFS_LOG_FORCE|XFS_LOG_SYNC); 267 + XFS_STATS_INC(xs_ig_frecycle); 238 268 goto again; 239 269 } 240 270 ··· 273 243 274 244 XFS_STATS_INC(xs_ig_found); 275 245 276 - spin_lock(&ip->i_flags_lock); 277 - ip->i_flags &= ~XFS_IRECLAIMABLE; 278 - spin_unlock(&ip->i_flags_lock); 246 + xfs_iflags_clear(ip, XFS_IRECLAIMABLE); 279 247 version = ih->ih_version; 280 248 read_unlock(&ih->ih_lock); 281 249 xfs_ihash_promote(ih, ip, version); ··· 327 299 if (lock_flags != 0) 328 300 xfs_ilock(ip, lock_flags); 329 301 330 - spin_lock(&ip->i_flags_lock); 331 - ip->i_flags &= ~XFS_ISTALE; 332 - spin_unlock(&ip->i_flags_lock); 333 - 302 + xfs_iflags_clear(ip, XFS_ISTALE); 334 303 vn_trace_exit(vp, "xfs_iget.found", 335 304 (inst_t *)__return_address); 336 305 goto return_ip; ··· 396 371 ih->ih_next = ip; 397 372 ip->i_udquot = ip->i_gdquot = NULL; 398 373 ih->ih_version++; 399 - spin_lock(&ip->i_flags_lock); 400 - ip->i_flags |= XFS_INEW; 401 - spin_unlock(&ip->i_flags_lock); 402 - 374 + xfs_iflags_set(ip, XFS_INEW); 403 375 write_unlock(&ih->ih_lock); 404 376 405 377 /* ··· 647 625 vn_trace_entry(vp, "xfs_iput_new", (inst_t *)__return_address); 648 626 649 627 if ((ip->i_d.di_mode == 0)) { 650 - ASSERT(!(ip->i_flags & XFS_IRECLAIMABLE)); 628 + ASSERT(!xfs_iflags_test(ip, XFS_IRECLAIMABLE)); 651 629 vn_mark_bad(vp); 652 630 } 653 631 if (inode->i_state & I_NEW) ··· 705 683 /* 706 684 * Free all memory associated with the inode. 707 685 */ 686 + xfs_iunlock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); 708 687 xfs_idestroy(ip); 709 688 } 710 689
+30 -40
fs/xfs/xfs_inode.c
··· 2193 2193 /* Inode not in memory or we found it already, 2194 2194 * nothing to do 2195 2195 */ 2196 - if (!ip || (ip->i_flags & XFS_ISTALE)) { 2196 + if (!ip || xfs_iflags_test(ip, XFS_ISTALE)) { 2197 2197 read_unlock(&ih->ih_lock); 2198 2198 continue; 2199 2199 } ··· 2215 2215 2216 2216 if (ip == free_ip) { 2217 2217 if (xfs_iflock_nowait(ip)) { 2218 - spin_lock(&ip->i_flags_lock); 2219 - ip->i_flags |= XFS_ISTALE; 2220 - spin_unlock(&ip->i_flags_lock); 2221 - 2218 + xfs_iflags_set(ip, XFS_ISTALE); 2222 2219 if (xfs_inode_clean(ip)) { 2223 2220 xfs_ifunlock(ip); 2224 2221 } else { ··· 2228 2231 2229 2232 if (xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) { 2230 2233 if (xfs_iflock_nowait(ip)) { 2231 - spin_lock(&ip->i_flags_lock); 2232 - ip->i_flags |= XFS_ISTALE; 2233 - spin_unlock(&ip->i_flags_lock); 2234 + xfs_iflags_set(ip, XFS_ISTALE); 2234 2235 2235 2236 if (xfs_inode_clean(ip)) { 2236 2237 xfs_ifunlock(ip); ··· 2258 2263 AIL_LOCK(mp,s); 2259 2264 iip->ili_flush_lsn = iip->ili_item.li_lsn; 2260 2265 AIL_UNLOCK(mp, s); 2261 - spin_lock(&iip->ili_inode->i_flags_lock); 2262 - iip->ili_inode->i_flags |= XFS_ISTALE; 2263 - spin_unlock(&iip->ili_inode->i_flags_lock); 2266 + xfs_iflags_set(ip, XFS_ISTALE); 2264 2267 pre_flushed++; 2265 2268 } 2266 2269 lip = lip->li_bio_list; ··· 2741 2748 { 2742 2749 ASSERT(atomic_read(&ip->i_pincount) > 0); 2743 2750 2744 - if (atomic_dec_and_test(&ip->i_pincount)) { 2745 - /* 2746 - * If the inode is currently being reclaimed, the 2747 - * linux inode _and_ the xfs vnode may have been 2748 - * freed so we cannot reference either of them safely. 2749 - * Hence we should not try to do anything to them 2750 - * if the xfs inode is currently in the reclaim 2751 - * path. 2752 - * 2753 - * However, we still need to issue the unpin wakeup 2754 - * call as the inode reclaim may be blocked waiting for 2755 - * the inode to become unpinned. 2756 - */ 2757 - struct inode *inode = NULL; 2751 + if (atomic_dec_and_lock(&ip->i_pincount, &ip->i_flags_lock)) { 2758 2752 2759 - spin_lock(&ip->i_flags_lock); 2760 - if (!(ip->i_flags & (XFS_IRECLAIM|XFS_IRECLAIMABLE))) { 2753 + /* 2754 + * If the inode is currently being reclaimed, the link between 2755 + * the bhv_vnode and the xfs_inode will be broken after the 2756 + * XFS_IRECLAIM* flag is set. Hence, if these flags are not 2757 + * set, then we can move forward and mark the linux inode dirty 2758 + * knowing that it is still valid as it won't freed until after 2759 + * the bhv_vnode<->xfs_inode link is broken in xfs_reclaim. The 2760 + * i_flags_lock is used to synchronise the setting of the 2761 + * XFS_IRECLAIM* flags and the breaking of the link, and so we 2762 + * can execute atomically w.r.t to reclaim by holding this lock 2763 + * here. 2764 + * 2765 + * However, we still need to issue the unpin wakeup call as the 2766 + * inode reclaim may be blocked waiting for the inode to become 2767 + * unpinned. 2768 + */ 2769 + 2770 + if (!__xfs_iflags_test(ip, XFS_IRECLAIM|XFS_IRECLAIMABLE)) { 2761 2771 bhv_vnode_t *vp = XFS_ITOV_NULL(ip); 2772 + struct inode *inode = NULL; 2773 + 2774 + BUG_ON(vp == NULL); 2775 + inode = vn_to_inode(vp); 2776 + BUG_ON(inode->i_state & I_CLEAR); 2762 2777 2763 2778 /* make sync come back and flush this inode */ 2764 - if (vp) { 2765 - inode = vn_to_inode(vp); 2766 - 2767 - if (!(inode->i_state & 2768 - (I_NEW|I_FREEING|I_CLEAR))) { 2769 - inode = igrab(inode); 2770 - if (inode) 2771 - mark_inode_dirty_sync(inode); 2772 - } else 2773 - inode = NULL; 2774 - } 2779 + if (!(inode->i_state & (I_NEW|I_FREEING))) 2780 + mark_inode_dirty_sync(inode); 2775 2781 } 2776 2782 spin_unlock(&ip->i_flags_lock); 2777 2783 wake_up(&ip->i_ipin_wait); 2778 - if (inode) 2779 - iput(inode); 2780 2784 } 2781 2785 } 2782 2786
+41
fs/xfs/xfs_inode.h
··· 305 305 #endif 306 306 } xfs_inode_t; 307 307 308 + 309 + /* 310 + * i_flags helper functions 311 + */ 312 + static inline void 313 + __xfs_iflags_set(xfs_inode_t *ip, unsigned short flags) 314 + { 315 + ip->i_flags |= flags; 316 + } 317 + 318 + static inline void 319 + xfs_iflags_set(xfs_inode_t *ip, unsigned short flags) 320 + { 321 + spin_lock(&ip->i_flags_lock); 322 + __xfs_iflags_set(ip, flags); 323 + spin_unlock(&ip->i_flags_lock); 324 + } 325 + 326 + static inline void 327 + xfs_iflags_clear(xfs_inode_t *ip, unsigned short flags) 328 + { 329 + spin_lock(&ip->i_flags_lock); 330 + ip->i_flags &= ~flags; 331 + spin_unlock(&ip->i_flags_lock); 332 + } 333 + 334 + static inline int 335 + __xfs_iflags_test(xfs_inode_t *ip, unsigned short flags) 336 + { 337 + return (ip->i_flags & flags); 338 + } 339 + 340 + static inline int 341 + xfs_iflags_test(xfs_inode_t *ip, unsigned short flags) 342 + { 343 + int ret; 344 + spin_lock(&ip->i_flags_lock); 345 + ret = __xfs_iflags_test(ip, flags); 346 + spin_unlock(&ip->i_flags_lock); 347 + return ret; 348 + } 308 349 #endif /* __KERNEL__ */ 309 350 310 351
+19 -14
fs/xfs/xfs_vnodeops.c
··· 1013 1013 pathlen = (int)ip->i_d.di_size; 1014 1014 1015 1015 if (ip->i_df.if_flags & XFS_IFINLINE) { 1016 - error = uio_read(ip->i_df.if_u1.if_data, pathlen, uiop); 1016 + error = xfs_uio_read(ip->i_df.if_u1.if_data, pathlen, uiop); 1017 1017 } 1018 1018 else { 1019 1019 /* ··· 1044 1044 byte_cnt = pathlen; 1045 1045 pathlen -= byte_cnt; 1046 1046 1047 - error = uio_read(XFS_BUF_PTR(bp), byte_cnt, uiop); 1047 + error = xfs_uio_read(XFS_BUF_PTR(bp), byte_cnt, uiop); 1048 1048 xfs_buf_relse (bp); 1049 1049 } 1050 1050 ··· 3827 3827 */ 3828 3828 xfs_synchronize_atime(ip); 3829 3829 3830 - /* If we have nothing to flush with this inode then complete the 3831 - * teardown now, otherwise break the link between the xfs inode 3832 - * and the linux inode and clean up the xfs inode later. This 3833 - * avoids flushing the inode to disk during the delete operation 3834 - * itself. 3830 + /* 3831 + * If we have nothing to flush with this inode then complete the 3832 + * teardown now, otherwise break the link between the xfs inode and the 3833 + * linux inode and clean up the xfs inode later. This avoids flushing 3834 + * the inode to disk during the delete operation itself. 3835 + * 3836 + * When breaking the link, we need to set the XFS_IRECLAIMABLE flag 3837 + * first to ensure that xfs_iunpin() will never see an xfs inode 3838 + * that has a linux inode being reclaimed. Synchronisation is provided 3839 + * by the i_flags_lock. 3835 3840 */ 3836 3841 if (!ip->i_update_core && (ip->i_itemp == NULL)) { 3837 3842 xfs_ilock(ip, XFS_ILOCK_EXCL); ··· 3845 3840 } else { 3846 3841 xfs_mount_t *mp = ip->i_mount; 3847 3842 3848 - /* Protect sync from us */ 3843 + /* Protect sync and unpin from us */ 3849 3844 XFS_MOUNT_ILOCK(mp); 3850 - vn_bhv_remove(VN_BHV_HEAD(vp), XFS_ITOBHV(ip)); 3851 - list_add_tail(&ip->i_reclaim, &mp->m_del_inodes); 3852 3845 spin_lock(&ip->i_flags_lock); 3853 - ip->i_flags |= XFS_IRECLAIMABLE; 3846 + __xfs_iflags_set(ip, XFS_IRECLAIMABLE); 3847 + vn_bhv_remove(VN_BHV_HEAD(vp), XFS_ITOBHV(ip)); 3854 3848 spin_unlock(&ip->i_flags_lock); 3849 + list_add_tail(&ip->i_reclaim, &mp->m_del_inodes); 3855 3850 XFS_MOUNT_IUNLOCK(mp); 3856 3851 } 3857 3852 return 0; ··· 3877 3872 */ 3878 3873 write_lock(&ih->ih_lock); 3879 3874 spin_lock(&ip->i_flags_lock); 3880 - if ((ip->i_flags & XFS_IRECLAIM) || 3881 - (!(ip->i_flags & XFS_IRECLAIMABLE) && vp == NULL)) { 3875 + if (__xfs_iflags_test(ip, XFS_IRECLAIM) || 3876 + (!__xfs_iflags_test(ip, XFS_IRECLAIMABLE) && vp == NULL)) { 3882 3877 spin_unlock(&ip->i_flags_lock); 3883 3878 write_unlock(&ih->ih_lock); 3884 3879 if (locked) { ··· 3887 3882 } 3888 3883 return 1; 3889 3884 } 3890 - ip->i_flags |= XFS_IRECLAIM; 3885 + __xfs_iflags_set(ip, XFS_IRECLAIM); 3891 3886 spin_unlock(&ip->i_flags_lock); 3892 3887 write_unlock(&ih->ih_lock); 3893 3888