Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at c9a28fa7b9ac19b676deefa0a171ce7df8755c08 242 lines 5.3 kB view raw
1/* 2 * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc. 3 * All Rights Reserved. 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License as 7 * published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it would be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 17 */ 18#include "xfs.h" 19#include "xfs_vnodeops.h" 20#include "xfs_bmap_btree.h" 21#include "xfs_inode.h" 22 23/* 24 * And this gunk is needed for xfs_mount.h" 25 */ 26#include "xfs_log.h" 27#include "xfs_trans.h" 28#include "xfs_sb.h" 29#include "xfs_dmapi.h" 30#include "xfs_inum.h" 31#include "xfs_ag.h" 32#include "xfs_mount.h" 33 34 35/* 36 * Dedicated vnode inactive/reclaim sync semaphores. 37 * Prime number of hash buckets since address is used as the key. 38 */ 39#define NVSYNC 37 40#define vptosync(v) (&vsync[((unsigned long)v) % NVSYNC]) 41static wait_queue_head_t vsync[NVSYNC]; 42 43void 44vn_init(void) 45{ 46 int i; 47 48 for (i = 0; i < NVSYNC; i++) 49 init_waitqueue_head(&vsync[i]); 50} 51 52void 53vn_iowait( 54 xfs_inode_t *ip) 55{ 56 wait_queue_head_t *wq = vptosync(ip); 57 58 wait_event(*wq, (atomic_read(&ip->i_iocount) == 0)); 59} 60 61void 62vn_iowake( 63 xfs_inode_t *ip) 64{ 65 if (atomic_dec_and_test(&ip->i_iocount)) 66 wake_up(vptosync(ip)); 67} 68 69/* 70 * Volume managers supporting multiple paths can send back ENODEV when the 71 * final path disappears. In this case continuing to fill the page cache 72 * with dirty data which cannot be written out is evil, so prevent that. 73 */ 74void 75vn_ioerror( 76 xfs_inode_t *ip, 77 int error, 78 char *f, 79 int l) 80{ 81 if (unlikely(error == -ENODEV)) 82 xfs_do_force_shutdown(ip->i_mount, SHUTDOWN_DEVICE_REQ, f, l); 83} 84 85bhv_vnode_t * 86vn_initialize( 87 struct inode *inode) 88{ 89 bhv_vnode_t *vp = vn_from_inode(inode); 90 91 XFS_STATS_INC(vn_active); 92 XFS_STATS_INC(vn_alloc); 93 94 ASSERT(VN_CACHED(vp) == 0); 95 96 return vp; 97} 98 99/* 100 * Revalidate the Linux inode from the vattr. 101 * Note: i_size _not_ updated; we must hold the inode 102 * semaphore when doing that - callers responsibility. 103 */ 104void 105vn_revalidate_core( 106 bhv_vnode_t *vp, 107 bhv_vattr_t *vap) 108{ 109 struct inode *inode = vn_to_inode(vp); 110 111 inode->i_mode = vap->va_mode; 112 inode->i_nlink = vap->va_nlink; 113 inode->i_uid = vap->va_uid; 114 inode->i_gid = vap->va_gid; 115 inode->i_blocks = vap->va_nblocks; 116 inode->i_mtime = vap->va_mtime; 117 inode->i_ctime = vap->va_ctime; 118 if (vap->va_xflags & XFS_XFLAG_IMMUTABLE) 119 inode->i_flags |= S_IMMUTABLE; 120 else 121 inode->i_flags &= ~S_IMMUTABLE; 122 if (vap->va_xflags & XFS_XFLAG_APPEND) 123 inode->i_flags |= S_APPEND; 124 else 125 inode->i_flags &= ~S_APPEND; 126 if (vap->va_xflags & XFS_XFLAG_SYNC) 127 inode->i_flags |= S_SYNC; 128 else 129 inode->i_flags &= ~S_SYNC; 130 if (vap->va_xflags & XFS_XFLAG_NOATIME) 131 inode->i_flags |= S_NOATIME; 132 else 133 inode->i_flags &= ~S_NOATIME; 134} 135 136/* 137 * Revalidate the Linux inode from the vnode. 138 */ 139int 140__vn_revalidate( 141 bhv_vnode_t *vp, 142 bhv_vattr_t *vattr) 143{ 144 int error; 145 146 vn_trace_entry(xfs_vtoi(vp), __FUNCTION__, (inst_t *)__return_address); 147 vattr->va_mask = XFS_AT_STAT | XFS_AT_XFLAGS; 148 error = xfs_getattr(xfs_vtoi(vp), vattr, 0); 149 if (likely(!error)) { 150 vn_revalidate_core(vp, vattr); 151 xfs_iflags_clear(xfs_vtoi(vp), XFS_IMODIFIED); 152 } 153 return -error; 154} 155 156int 157vn_revalidate( 158 bhv_vnode_t *vp) 159{ 160 bhv_vattr_t vattr; 161 162 return __vn_revalidate(vp, &vattr); 163} 164 165/* 166 * Add a reference to a referenced vnode. 167 */ 168bhv_vnode_t * 169vn_hold( 170 bhv_vnode_t *vp) 171{ 172 struct inode *inode; 173 174 XFS_STATS_INC(vn_hold); 175 176 inode = igrab(vn_to_inode(vp)); 177 ASSERT(inode); 178 179 return vp; 180} 181 182#ifdef XFS_VNODE_TRACE 183 184/* 185 * Reference count of Linux inode if present, -1 if the xfs_inode 186 * has no associated Linux inode. 187 */ 188static inline int xfs_icount(struct xfs_inode *ip) 189{ 190 bhv_vnode_t *vp = XFS_ITOV_NULL(ip); 191 192 if (vp) 193 return vn_count(vp); 194 return -1; 195} 196 197#define KTRACE_ENTER(ip, vk, s, line, ra) \ 198 ktrace_enter( (ip)->i_trace, \ 199/* 0 */ (void *)(__psint_t)(vk), \ 200/* 1 */ (void *)(s), \ 201/* 2 */ (void *)(__psint_t) line, \ 202/* 3 */ (void *)(__psint_t)xfs_icount(ip), \ 203/* 4 */ (void *)(ra), \ 204/* 5 */ NULL, \ 205/* 6 */ (void *)(__psint_t)current_cpu(), \ 206/* 7 */ (void *)(__psint_t)current_pid(), \ 207/* 8 */ (void *)__return_address, \ 208/* 9 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL) 209 210/* 211 * Vnode tracing code. 212 */ 213void 214vn_trace_entry(xfs_inode_t *ip, const char *func, inst_t *ra) 215{ 216 KTRACE_ENTER(ip, VNODE_KTRACE_ENTRY, func, 0, ra); 217} 218 219void 220vn_trace_exit(xfs_inode_t *ip, const char *func, inst_t *ra) 221{ 222 KTRACE_ENTER(ip, VNODE_KTRACE_EXIT, func, 0, ra); 223} 224 225void 226vn_trace_hold(xfs_inode_t *ip, char *file, int line, inst_t *ra) 227{ 228 KTRACE_ENTER(ip, VNODE_KTRACE_HOLD, file, line, ra); 229} 230 231void 232vn_trace_ref(xfs_inode_t *ip, char *file, int line, inst_t *ra) 233{ 234 KTRACE_ENTER(ip, VNODE_KTRACE_REF, file, line, ra); 235} 236 237void 238vn_trace_rele(xfs_inode_t *ip, char *file, int line, inst_t *ra) 239{ 240 KTRACE_ENTER(ip, VNODE_KTRACE_RELE, file, line, ra); 241} 242#endif /* XFS_VNODE_TRACE */