Merge branch 'for-linus' of git://oss.sgi.com:8090/xfs/xfs-2.6

* 'for-linus' of git://oss.sgi.com:8090/xfs/xfs-2.6: (62 commits)
[XFS] add __init/__exit mark to specific init/cleanup functions
[XFS] Fix oops in xfs_file_readdir()
[XFS] kill xfs_root
[XFS] keep i_nlink updated and use proper accessors
[XFS] stop updating inode->i_blocks
[XFS] Make xfs_ail_check check less by default
[XFS] Move AIL pushing into it's own thread
[XFS] use generic_permission
[XFS] stop re-checking permissions in xfs_swapext
[XFS] clean up xfs_swapext
[XFS] remove permission check from xfs_change_file_space
[XFS] prevent panic during log recovery due to bogus op_hdr length
[XFS] Cleanup various fid related bits:
[XFS] Fix xfs_lowbit64
[XFS] Remove CFORK macros and use code directly in IFORK and DFORK macros.
[XFS] kill superflous buffer locking (2nd attempt)
[XFS] Use kernel-supplied "roundup_pow_of_two" for simplicity
[XFS] Remove the BPCSHIFT and NB* based macros from XFS.
[XFS] Remove bogus assert
[XFS] optimize XFS_IS_REALTIME_INODE w/o realtime config
...

+2298 -3179
-1
fs/xfs/Makefile-linux-2.6
··· 70 70 xfs_iget.o \ 71 71 xfs_inode.o \ 72 72 xfs_inode_item.o \ 73 - xfs_iocore.o \ 74 73 xfs_iomap.o \ 75 74 xfs_itable.o \ 76 75 xfs_dfrag.o \
-45
fs/xfs/linux-2.6/spin.h
··· 1 - /* 2 - * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc. 3 - * All Rights Reserved. 4 - * 5 - * This program is free software; you can redistribute it and/or 6 - * modify it under the terms of the GNU General Public License as 7 - * published by the Free Software Foundation. 8 - * 9 - * This program is distributed in the hope that it would be useful, 10 - * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 - * GNU General Public License for more details. 13 - * 14 - * You should have received a copy of the GNU General Public License 15 - * along with this program; if not, write the Free Software Foundation, 16 - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 17 - */ 18 - #ifndef __XFS_SUPPORT_SPIN_H__ 19 - #define __XFS_SUPPORT_SPIN_H__ 20 - 21 - #include <linux/sched.h> /* preempt needs this */ 22 - #include <linux/spinlock.h> 23 - 24 - /* 25 - * Map lock_t from IRIX to Linux spinlocks. 26 - * 27 - * We do not make use of lock_t from interrupt context, so we do not 28 - * have to worry about disabling interrupts at all (unlike IRIX). 29 - */ 30 - 31 - typedef spinlock_t lock_t; 32 - 33 - #define SPLDECL(s) unsigned long s 34 - #ifndef DEFINE_SPINLOCK 35 - #define DEFINE_SPINLOCK(s) spinlock_t s = SPIN_LOCK_UNLOCKED 36 - #endif 37 - 38 - #define spinlock_init(lock, name) spin_lock_init(lock) 39 - #define spinlock_destroy(lock) 40 - #define mutex_spinlock(lock) ({ spin_lock(lock); 0; }) 41 - #define mutex_spinunlock(lock, s) do { spin_unlock(lock); (void)s; } while (0) 42 - #define nested_spinlock(lock) spin_lock(lock) 43 - #define nested_spinunlock(lock) spin_unlock(lock) 44 - 45 - #endif /* __XFS_SUPPORT_SPIN_H__ */
+24 -19
fs/xfs/linux-2.6/xfs_aops.c
··· 107 107 #define xfs_page_trace(tag, inode, page, pgoff) 108 108 #endif 109 109 110 + STATIC struct block_device * 111 + xfs_find_bdev_for_inode( 112 + struct xfs_inode *ip) 113 + { 114 + struct xfs_mount *mp = ip->i_mount; 115 + 116 + if (XFS_IS_REALTIME_INODE(ip)) 117 + return mp->m_rtdev_targp->bt_bdev; 118 + else 119 + return mp->m_ddev_targp->bt_bdev; 120 + } 121 + 110 122 /* 111 123 * Schedule IO completion handling on a xfsdatad if this was 112 124 * the final hold on this ioend. If we are asked to wait, ··· 163 151 /* 164 152 * Update on-disk file size now that data has been written to disk. 165 153 * The current in-memory file size is i_size. If a write is beyond 166 - * eof io_new_size will be the intended file size until i_size is 154 + * eof i_new_size will be the intended file size until i_size is 167 155 * updated. If this write does not extend all the way to the valid 168 156 * file size then restrict this update to the end of the write. 169 157 */ ··· 185 173 186 174 xfs_ilock(ip, XFS_ILOCK_EXCL); 187 175 188 - isize = MAX(ip->i_size, ip->i_iocore.io_new_size); 176 + isize = MAX(ip->i_size, ip->i_new_size); 189 177 isize = MIN(isize, bsize); 190 178 191 179 if (ip->i_d.di_size < isize) { ··· 238 226 { 239 227 xfs_ioend_t *ioend = 240 228 container_of(work, xfs_ioend_t, io_work); 229 + struct xfs_inode *ip = XFS_I(ioend->io_inode); 241 230 xfs_off_t offset = ioend->io_offset; 242 231 size_t size = ioend->io_size; 243 232 244 233 if (likely(!ioend->io_error)) { 245 - xfs_bmap(XFS_I(ioend->io_inode), offset, size, 246 - BMAPI_UNWRITTEN, NULL, NULL); 234 + if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) 235 + xfs_iomap_write_unwritten(ip, offset, size); 247 236 xfs_setfilesize(ioend); 248 237 } 249 238 xfs_destroy_ioend(ioend); ··· 317 304 xfs_inode_t *ip = XFS_I(inode); 318 305 int error, nmaps = 1; 319 306 320 - error = xfs_bmap(ip, offset, count, 307 + error = xfs_iomap(ip, offset, count, 321 308 flags, mapp, &nmaps); 322 309 if (!error && (flags & (BMAPI_WRITE|BMAPI_ALLOCATE))) 323 310 xfs_iflags_set(ip, XFS_IMODIFIED); ··· 1336 1323 offset = (xfs_off_t)iblock << inode->i_blkbits; 1337 1324 ASSERT(bh_result->b_size >= (1 << inode->i_blkbits)); 1338 1325 size = bh_result->b_size; 1339 - error = xfs_bmap(XFS_I(inode), offset, size, 1326 + error = xfs_iomap(XFS_I(inode), offset, size, 1340 1327 create ? flags : BMAPI_READ, &iomap, &niomap); 1341 1328 if (error) 1342 1329 return -error; ··· 1484 1471 { 1485 1472 struct file *file = iocb->ki_filp; 1486 1473 struct inode *inode = file->f_mapping->host; 1487 - xfs_iomap_t iomap; 1488 - int maps = 1; 1489 - int error; 1474 + struct block_device *bdev; 1490 1475 ssize_t ret; 1491 1476 1492 - error = xfs_bmap(XFS_I(inode), offset, 0, 1493 - BMAPI_DEVICE, &iomap, &maps); 1494 - if (error) 1495 - return -error; 1477 + bdev = xfs_find_bdev_for_inode(XFS_I(inode)); 1496 1478 1497 1479 if (rw == WRITE) { 1498 1480 iocb->private = xfs_alloc_ioend(inode, IOMAP_UNWRITTEN); 1499 1481 ret = blockdev_direct_IO_own_locking(rw, iocb, inode, 1500 - iomap.iomap_target->bt_bdev, 1501 - iov, offset, nr_segs, 1482 + bdev, iov, offset, nr_segs, 1502 1483 xfs_get_blocks_direct, 1503 1484 xfs_end_io_direct); 1504 1485 } else { 1505 1486 iocb->private = xfs_alloc_ioend(inode, IOMAP_READ); 1506 1487 ret = blockdev_direct_IO_no_locking(rw, iocb, inode, 1507 - iomap.iomap_target->bt_bdev, 1508 - iov, offset, nr_segs, 1488 + bdev, iov, offset, nr_segs, 1509 1489 xfs_get_blocks_direct, 1510 1490 xfs_end_io_direct); 1511 1491 } ··· 1531 1525 struct inode *inode = (struct inode *)mapping->host; 1532 1526 struct xfs_inode *ip = XFS_I(inode); 1533 1527 1534 - vn_trace_entry(XFS_I(inode), __FUNCTION__, 1535 - (inst_t *)__return_address); 1528 + xfs_itrace_entry(XFS_I(inode)); 1536 1529 xfs_rwlock(ip, VRWLOCK_READ); 1537 1530 xfs_flush_pages(ip, (xfs_off_t)0, -1, 0, FI_REMAPF); 1538 1531 xfs_rwunlock(ip, VRWLOCK_READ);
+6 -51
fs/xfs/linux-2.6/xfs_buf.c
··· 387 387 if (unlikely(page == NULL)) { 388 388 if (flags & XBF_READ_AHEAD) { 389 389 bp->b_page_count = i; 390 - for (i = 0; i < bp->b_page_count; i++) 391 - unlock_page(bp->b_pages[i]); 392 390 return -ENOMEM; 393 391 } 394 392 ··· 416 418 ASSERT(!PagePrivate(page)); 417 419 if (!PageUptodate(page)) { 418 420 page_count--; 419 - if (blocksize >= PAGE_CACHE_SIZE) { 420 - if (flags & XBF_READ) 421 - bp->b_locked = 1; 422 - } else if (!PagePrivate(page)) { 421 + if (blocksize < PAGE_CACHE_SIZE && !PagePrivate(page)) { 423 422 if (test_page_region(page, offset, nbytes)) 424 423 page_count++; 425 424 } 426 425 } 427 426 427 + unlock_page(page); 428 428 bp->b_pages[i] = page; 429 429 offset = 0; 430 - } 431 - 432 - if (!bp->b_locked) { 433 - for (i = 0; i < bp->b_page_count; i++) 434 - unlock_page(bp->b_pages[i]); 435 430 } 436 431 437 432 if (page_count == bp->b_page_count) ··· 742 751 bp->b_pages[i] = mem_to_page((void *)pageaddr); 743 752 pageaddr += PAGE_CACHE_SIZE; 744 753 } 745 - bp->b_locked = 0; 746 754 747 755 bp->b_count_desired = len; 748 756 bp->b_buffer_length = buflen; ··· 1088 1098 return status; 1089 1099 } 1090 1100 1091 - STATIC_INLINE int 1092 - _xfs_buf_iolocked( 1093 - xfs_buf_t *bp) 1094 - { 1095 - ASSERT(bp->b_flags & (XBF_READ | XBF_WRITE)); 1096 - if (bp->b_flags & XBF_READ) 1097 - return bp->b_locked; 1098 - return 0; 1099 - } 1100 - 1101 1101 STATIC_INLINE void 1102 1102 _xfs_buf_ioend( 1103 1103 xfs_buf_t *bp, 1104 1104 int schedule) 1105 1105 { 1106 - if (atomic_dec_and_test(&bp->b_io_remaining) == 1) { 1107 - bp->b_locked = 0; 1106 + if (atomic_dec_and_test(&bp->b_io_remaining) == 1) 1108 1107 xfs_buf_ioend(bp, schedule); 1109 - } 1110 1108 } 1111 1109 1112 1110 STATIC void ··· 1125 1147 1126 1148 if (--bvec >= bio->bi_io_vec) 1127 1149 prefetchw(&bvec->bv_page->flags); 1128 - 1129 - if (_xfs_buf_iolocked(bp)) { 1130 - unlock_page(page); 1131 - } 1132 1150 } while (bvec >= bio->bi_io_vec); 1133 1151 1134 1152 _xfs_buf_ioend(bp, 1); ··· 1135 1161 _xfs_buf_ioapply( 1136 1162 xfs_buf_t *bp) 1137 1163 { 1138 - int i, rw, map_i, total_nr_pages, nr_pages; 1164 + int rw, map_i, total_nr_pages, nr_pages; 1139 1165 struct bio *bio; 1140 1166 int offset = bp->b_offset; 1141 1167 int size = bp->b_count_desired; 1142 1168 sector_t sector = bp->b_bn; 1143 1169 unsigned int blocksize = bp->b_target->bt_bsize; 1144 - int locking = _xfs_buf_iolocked(bp); 1145 1170 1146 1171 total_nr_pages = bp->b_page_count; 1147 1172 map_i = 0; ··· 1163 1190 * filesystem block size is not smaller than the page size. 1164 1191 */ 1165 1192 if ((bp->b_buffer_length < PAGE_CACHE_SIZE) && 1166 - (bp->b_flags & XBF_READ) && locking && 1193 + (bp->b_flags & XBF_READ) && 1167 1194 (blocksize >= PAGE_CACHE_SIZE)) { 1168 1195 bio = bio_alloc(GFP_NOIO, 1); 1169 1196 ··· 1178 1205 atomic_inc(&bp->b_io_remaining); 1179 1206 1180 1207 goto submit_io; 1181 - } 1182 - 1183 - /* Lock down the pages which we need to for the request */ 1184 - if (locking && (bp->b_flags & XBF_WRITE) && (bp->b_locked == 0)) { 1185 - for (i = 0; size; i++) { 1186 - int nbytes = PAGE_CACHE_SIZE - offset; 1187 - struct page *page = bp->b_pages[i]; 1188 - 1189 - if (nbytes > size) 1190 - nbytes = size; 1191 - 1192 - lock_page(page); 1193 - 1194 - size -= nbytes; 1195 - offset = 0; 1196 - } 1197 - offset = bp->b_offset; 1198 - size = bp->b_count_desired; 1199 1208 } 1200 1209 1201 1210 next_chunk: ··· 1526 1571 1527 1572 INIT_LIST_HEAD(&btp->bt_list); 1528 1573 INIT_LIST_HEAD(&btp->bt_delwrite_queue); 1529 - spinlock_init(&btp->bt_delwrite_lock, "delwri_lock"); 1574 + spin_lock_init(&btp->bt_delwrite_lock); 1530 1575 btp->bt_flags = 0; 1531 1576 btp->bt_task = kthread_run(xfsbufd, btp, "xfsbufd"); 1532 1577 if (IS_ERR(btp->bt_task)) {
-1
fs/xfs/linux-2.6/xfs_buf.h
··· 143 143 void *b_fspriv2; 144 144 void *b_fspriv3; 145 145 unsigned short b_error; /* error code on I/O */ 146 - unsigned short b_locked; /* page array is locked */ 147 146 unsigned int b_page_count; /* size of page array */ 148 147 unsigned int b_offset; /* page offset in first page */ 149 148 struct page **b_pages; /* array of page pointers */
+17 -8
fs/xfs/linux-2.6/xfs_export.c
··· 118 118 u64 ino, 119 119 u32 generation) 120 120 { 121 - xfs_fid_t xfid; 122 - bhv_vnode_t *vp; 121 + xfs_mount_t *mp = XFS_M(sb); 122 + xfs_inode_t *ip; 123 123 int error; 124 124 125 - xfid.fid_len = sizeof(xfs_fid_t) - sizeof(xfid.fid_len); 126 - xfid.fid_pad = 0; 127 - xfid.fid_ino = ino; 128 - xfid.fid_gen = generation; 125 + /* 126 + * NFS can sometimes send requests for ino 0. Fail them gracefully. 127 + */ 128 + if (ino == 0) 129 + return ERR_PTR(-ESTALE); 129 130 130 - error = xfs_vget(XFS_M(sb), &vp, &xfid); 131 + error = xfs_iget(mp, NULL, ino, 0, XFS_ILOCK_SHARED, &ip, 0); 131 132 if (error) 132 133 return ERR_PTR(-error); 134 + if (!ip) 135 + return ERR_PTR(-EIO); 133 136 134 - return vp ? vn_to_inode(vp) : NULL; 137 + if (!ip->i_d.di_mode || ip->i_d.di_gen != generation) { 138 + xfs_iput_new(ip, XFS_ILOCK_SHARED); 139 + return ERR_PTR(-ENOENT); 140 + } 141 + 142 + xfs_iunlock(ip, XFS_ILOCK_SHARED); 143 + return ip->i_vnode; 135 144 } 136 145 137 146 STATIC struct dentry *
+1 -2
fs/xfs/linux-2.6/xfs_file.c
··· 350 350 351 351 size = buf.used; 352 352 de = (struct hack_dirent *)buf.dirent; 353 - curr_offset = de->offset /* & 0x7fffffff */; 354 353 while (size > 0) { 354 + curr_offset = de->offset /* & 0x7fffffff */; 355 355 if (filldir(dirent, de->name, de->namlen, 356 356 curr_offset & 0x7fffffff, 357 357 de->ino, de->d_type)) { ··· 362 362 sizeof(u64)); 363 363 size -= reclen; 364 364 de = (struct hack_dirent *)((char *)de + reclen); 365 - curr_offset = de->offset /* & 0x7fffffff */; 366 365 } 367 366 } 368 367
+2 -1
fs/xfs/linux-2.6/xfs_globals.c
··· 47 47 /* 48 48 * Global system credential structure. 49 49 */ 50 - cred_t sys_cred_val, *sys_cred = &sys_cred_val; 50 + static cred_t sys_cred_val; 51 + cred_t *sys_cred = &sys_cred_val; 51 52
+31 -55
fs/xfs/linux-2.6/xfs_ioctl.c
··· 75 75 xfs_handle_t handle; 76 76 xfs_fsop_handlereq_t hreq; 77 77 struct inode *inode; 78 - bhv_vnode_t *vp; 79 78 80 79 if (copy_from_user(&hreq, arg, sizeof(hreq))) 81 80 return -XFS_ERROR(EFAULT); ··· 133 134 return -XFS_ERROR(EBADF); 134 135 } 135 136 136 - /* we need the vnode */ 137 - vp = vn_from_inode(inode); 138 - 139 137 /* now we can grab the fsid */ 140 138 memcpy(&handle.ha_fsid, XFS_I(inode)->i_mount->m_fixedfsid, 141 139 sizeof(xfs_fsid_t)); 142 140 hsize = sizeof(xfs_fsid_t); 143 141 144 142 if (cmd != XFS_IOC_PATH_TO_FSHANDLE) { 145 - xfs_inode_t *ip; 143 + xfs_inode_t *ip = XFS_I(inode); 146 144 int lock_mode; 147 145 148 146 /* need to get access to the xfs_inode to read the generation */ 149 - ip = xfs_vtoi(vp); 150 - ASSERT(ip); 151 147 lock_mode = xfs_ilock_map_shared(ip); 152 148 153 149 /* fill in fid section of handle from inode */ ··· 170 176 171 177 172 178 /* 173 - * Convert userspace handle data into vnode (and inode). 174 - * We [ab]use the fact that all the fsop_handlereq ioctl calls 175 - * have a data structure argument whose first component is always 176 - * a xfs_fsop_handlereq_t, so we can cast to and from this type. 177 - * This allows us to optimise the copy_from_user calls and gives 178 - * a handy, shared routine. 179 + * Convert userspace handle data into inode. 179 180 * 180 - * If no error, caller must always VN_RELE the returned vp. 181 + * We use the fact that all the fsop_handlereq ioctl calls have a data 182 + * structure argument whose first component is always a xfs_fsop_handlereq_t, 183 + * so we can pass that sub structure into this handy, shared routine. 184 + * 185 + * If no error, caller must always iput the returned inode. 181 186 */ 182 187 STATIC int 183 188 xfs_vget_fsop_handlereq( 184 189 xfs_mount_t *mp, 185 190 struct inode *parinode, /* parent inode pointer */ 186 191 xfs_fsop_handlereq_t *hreq, 187 - bhv_vnode_t **vp, 188 192 struct inode **inode) 189 193 { 190 194 void __user *hanp; ··· 191 199 xfs_handle_t *handlep; 192 200 xfs_handle_t handle; 193 201 xfs_inode_t *ip; 194 - struct inode *inodep; 195 - bhv_vnode_t *vpp; 196 202 xfs_ino_t ino; 197 203 __u32 igen; 198 204 int error; ··· 231 241 } 232 242 233 243 /* 234 - * Get the XFS inode, building a vnode to go with it. 244 + * Get the XFS inode, building a Linux inode to go with it. 235 245 */ 236 246 error = xfs_iget(mp, NULL, ino, 0, XFS_ILOCK_SHARED, &ip, 0); 237 247 if (error) ··· 243 253 return XFS_ERROR(ENOENT); 244 254 } 245 255 246 - vpp = XFS_ITOV(ip); 247 - inodep = vn_to_inode(vpp); 248 256 xfs_iunlock(ip, XFS_ILOCK_SHARED); 249 257 250 - *vp = vpp; 251 - *inode = inodep; 258 + *inode = XFS_ITOV(ip); 252 259 return 0; 253 260 } 254 261 ··· 262 275 struct file *filp; 263 276 struct inode *inode; 264 277 struct dentry *dentry; 265 - bhv_vnode_t *vp; 266 278 xfs_fsop_handlereq_t hreq; 267 279 268 280 if (!capable(CAP_SYS_ADMIN)) ··· 269 283 if (copy_from_user(&hreq, arg, sizeof(xfs_fsop_handlereq_t))) 270 284 return -XFS_ERROR(EFAULT); 271 285 272 - error = xfs_vget_fsop_handlereq(mp, parinode, &hreq, &vp, &inode); 286 + error = xfs_vget_fsop_handlereq(mp, parinode, &hreq, &inode); 273 287 if (error) 274 288 return -error; 275 289 ··· 371 385 { 372 386 struct inode *inode; 373 387 xfs_fsop_handlereq_t hreq; 374 - bhv_vnode_t *vp; 375 388 __u32 olen; 376 389 void *link; 377 390 int error; ··· 380 395 if (copy_from_user(&hreq, arg, sizeof(xfs_fsop_handlereq_t))) 381 396 return -XFS_ERROR(EFAULT); 382 397 383 - error = xfs_vget_fsop_handlereq(mp, parinode, &hreq, &vp, &inode); 398 + error = xfs_vget_fsop_handlereq(mp, parinode, &hreq, &inode); 384 399 if (error) 385 400 return -error; 386 401 ··· 423 438 struct fsdmidata fsd; 424 439 xfs_fsop_setdm_handlereq_t dmhreq; 425 440 struct inode *inode; 426 - bhv_vnode_t *vp; 427 441 428 442 if (!capable(CAP_MKNOD)) 429 443 return -XFS_ERROR(EPERM); 430 444 if (copy_from_user(&dmhreq, arg, sizeof(xfs_fsop_setdm_handlereq_t))) 431 445 return -XFS_ERROR(EFAULT); 432 446 433 - error = xfs_vget_fsop_handlereq(mp, parinode, &dmhreq.hreq, &vp, &inode); 447 + error = xfs_vget_fsop_handlereq(mp, parinode, &dmhreq.hreq, &inode); 434 448 if (error) 435 449 return -error; 436 450 437 451 if (IS_IMMUTABLE(inode) || IS_APPEND(inode)) { 438 - VN_RELE(vp); 439 - return -XFS_ERROR(EPERM); 452 + error = -XFS_ERROR(EPERM); 453 + goto out; 440 454 } 441 455 442 456 if (copy_from_user(&fsd, dmhreq.data, sizeof(fsd))) { 443 - VN_RELE(vp); 444 - return -XFS_ERROR(EFAULT); 457 + error = -XFS_ERROR(EFAULT); 458 + goto out; 445 459 } 446 460 447 - error = xfs_set_dmattrs(xfs_vtoi(vp), 448 - fsd.fsd_dmevmask, fsd.fsd_dmstate); 461 + error = -xfs_set_dmattrs(XFS_I(inode), fsd.fsd_dmevmask, 462 + fsd.fsd_dmstate); 449 463 450 - VN_RELE(vp); 451 - if (error) 452 - return -error; 453 - return 0; 464 + out: 465 + iput(inode); 466 + return error; 454 467 } 455 468 456 469 STATIC int ··· 461 478 attrlist_cursor_kern_t *cursor; 462 479 xfs_fsop_attrlist_handlereq_t al_hreq; 463 480 struct inode *inode; 464 - bhv_vnode_t *vp; 465 481 char *kbuf; 466 482 467 483 if (!capable(CAP_SYS_ADMIN)) ··· 470 488 if (al_hreq.buflen > XATTR_LIST_MAX) 471 489 return -XFS_ERROR(EINVAL); 472 490 473 - error = xfs_vget_fsop_handlereq(mp, parinode, &al_hreq.hreq, 474 - &vp, &inode); 491 + error = xfs_vget_fsop_handlereq(mp, parinode, &al_hreq.hreq, &inode); 475 492 if (error) 476 493 goto out; 477 494 ··· 490 509 out_kfree: 491 510 kfree(kbuf); 492 511 out_vn_rele: 493 - VN_RELE(vp); 512 + iput(inode); 494 513 out: 495 514 return -error; 496 515 } ··· 512 531 if (!kbuf) 513 532 return ENOMEM; 514 533 515 - error = xfs_attr_get(XFS_I(inode), name, kbuf, len, flags, NULL); 534 + error = xfs_attr_get(XFS_I(inode), name, kbuf, (int *)len, flags, NULL); 516 535 if (error) 517 536 goto out_kfree; 518 537 ··· 579 598 xfs_attr_multiop_t *ops; 580 599 xfs_fsop_attrmulti_handlereq_t am_hreq; 581 600 struct inode *inode; 582 - bhv_vnode_t *vp; 583 601 unsigned int i, size; 584 602 char *attr_name; 585 603 ··· 587 607 if (copy_from_user(&am_hreq, arg, sizeof(xfs_fsop_attrmulti_handlereq_t))) 588 608 return -XFS_ERROR(EFAULT); 589 609 590 - error = xfs_vget_fsop_handlereq(mp, parinode, &am_hreq.hreq, &vp, &inode); 610 + error = xfs_vget_fsop_handlereq(mp, parinode, &am_hreq.hreq, &inode); 591 611 if (error) 592 612 goto out; 593 613 ··· 646 666 out_kfree_ops: 647 667 kfree(ops); 648 668 out_vn_rele: 649 - VN_RELE(vp); 669 + iput(inode); 650 670 out: 651 671 return -error; 652 672 } ··· 682 702 683 703 STATIC int 684 704 xfs_ioc_xattr( 685 - bhv_vnode_t *vp, 686 705 xfs_inode_t *ip, 687 706 struct file *filp, 688 707 unsigned int cmd, ··· 714 735 void __user *arg) 715 736 { 716 737 struct inode *inode = filp->f_path.dentry->d_inode; 717 - bhv_vnode_t *vp = vn_from_inode(inode); 718 738 xfs_mount_t *mp = ip->i_mount; 719 739 int error; 720 740 721 - vn_trace_entry(XFS_I(inode), "xfs_ioctl", (inst_t *)__return_address); 722 - 741 + xfs_itrace_entry(XFS_I(inode)); 723 742 switch (cmd) { 724 743 725 744 case XFS_IOC_ALLOCSP: ··· 741 764 case XFS_IOC_DIOINFO: { 742 765 struct dioattr da; 743 766 xfs_buftarg_t *target = 744 - (ip->i_d.di_flags & XFS_DIFLAG_REALTIME) ? 767 + XFS_IS_REALTIME_INODE(ip) ? 745 768 mp->m_rtdev_targp : mp->m_ddev_targp; 746 769 747 770 da.d_mem = da.d_miniosz = 1 << target->bt_sshift; ··· 773 796 case XFS_IOC_GETXFLAGS: 774 797 case XFS_IOC_SETXFLAGS: 775 798 case XFS_IOC_FSSETXATTR: 776 - return xfs_ioc_xattr(vp, ip, filp, cmd, arg); 799 + return xfs_ioc_xattr(ip, filp, cmd, arg); 777 800 778 801 case XFS_IOC_FSSETDM: { 779 802 struct fsdmidata dmi; ··· 1180 1203 1181 1204 STATIC int 1182 1205 xfs_ioc_xattr( 1183 - bhv_vnode_t *vp, 1184 1206 xfs_inode_t *ip, 1185 1207 struct file *filp, 1186 1208 unsigned int cmd, ··· 1213 1237 1214 1238 error = xfs_setattr(ip, vattr, attr_flags, NULL); 1215 1239 if (likely(!error)) 1216 - __vn_revalidate(vp, vattr); /* update flags */ 1240 + vn_revalidate(XFS_ITOV(ip)); /* update flags */ 1217 1241 error = -error; 1218 1242 break; 1219 1243 } ··· 1248 1272 1249 1273 error = xfs_setattr(ip, vattr, attr_flags, NULL); 1250 1274 if (likely(!error)) 1251 - __vn_revalidate(vp, vattr); /* update flags */ 1275 + vn_revalidate(XFS_ITOV(ip)); /* update flags */ 1252 1276 error = -error; 1253 1277 break; 1254 1278 }
+6 -3
fs/xfs/linux-2.6/xfs_ioctl32.c
··· 44 44 #include "xfs_error.h" 45 45 #include "xfs_dfrag.h" 46 46 #include "xfs_vnodeops.h" 47 + #include "xfs_ioctl32.h" 47 48 48 49 #define _NATIVE_IOC(cmd, type) \ 49 50 _IOC(_IOC_DIR(cmd), _IOC_TYPE(cmd), _IOC_NR(cmd), sizeof(type)) ··· 380 379 switch (cmd) { 381 380 case XFS_IOC_DIOINFO: 382 381 case XFS_IOC_FSGEOMETRY: 383 - case XFS_IOC_GETVERSION: 384 - case XFS_IOC_GETXFLAGS: 385 - case XFS_IOC_SETXFLAGS: 386 382 case XFS_IOC_FSGETXATTR: 387 383 case XFS_IOC_FSSETXATTR: 388 384 case XFS_IOC_FSGETXATTRA: ··· 405 407 case XFS_IOC_ERROR_CLEARALL: 406 408 break; 407 409 410 + case XFS_IOC32_GETXFLAGS: 411 + case XFS_IOC32_SETXFLAGS: 412 + case XFS_IOC32_GETVERSION: 413 + cmd = _NATIVE_IOC(cmd, long); 414 + break; 408 415 #ifdef BROKEN_X86_ALIGNMENT 409 416 /* xfs_flock_t has wrong u32 vs u64 alignment */ 410 417 case XFS_IOC_ALLOCSP_32:
+136 -34
fs/xfs/linux-2.6/xfs_iops.c
··· 52 52 #include <linux/xattr.h> 53 53 #include <linux/namei.h> 54 54 #include <linux/security.h> 55 + #include <linux/falloc.h> 55 56 56 57 /* 57 58 * Bring the atime in the XFS inode uptodate. ··· 69 68 ip->i_d.di_atime.t_sec = (__int32_t)vp->i_atime.tv_sec; 70 69 ip->i_d.di_atime.t_nsec = (__int32_t)vp->i_atime.tv_nsec; 71 70 } 71 + } 72 + 73 + /* 74 + * If the linux inode exists, mark it dirty. 75 + * Used when commiting a dirty inode into a transaction so that 76 + * the inode will get written back by the linux code 77 + */ 78 + void 79 + xfs_mark_inode_dirty_sync( 80 + xfs_inode_t *ip) 81 + { 82 + bhv_vnode_t *vp; 83 + 84 + vp = XFS_ITOV_NULL(ip); 85 + if (vp) 86 + mark_inode_dirty_sync(vn_to_inode(vp)); 72 87 } 73 88 74 89 /* ··· 201 184 struct xfs_inode *ip = XFS_I(inode); 202 185 loff_t size; 203 186 204 - inode->i_nlink = ip->i_d.di_nlink; 205 - inode->i_blocks = 206 - XFS_FSB_TO_BB(ip->i_mount, ip->i_d.di_nblocks + 207 - ip->i_delayed_blks); 208 187 /* we're under i_sem so i_size can't change under us */ 209 188 size = XFS_ISIZE(ip); 210 189 if (i_size_read(inode) != size) ··· 555 542 556 543 #ifdef CONFIG_XFS_POSIX_ACL 557 544 STATIC int 558 - xfs_vn_permission( 559 - struct inode *inode, 560 - int mode, 561 - struct nameidata *nd) 545 + xfs_check_acl( 546 + struct inode *inode, 547 + int mask) 562 548 { 563 - return -xfs_access(XFS_I(inode), mode << 6, NULL); 549 + struct xfs_inode *ip = XFS_I(inode); 550 + int error; 551 + 552 + xfs_itrace_entry(ip); 553 + 554 + if (XFS_IFORK_Q(ip)) { 555 + error = xfs_acl_iaccess(ip, mask, NULL); 556 + if (error != -1) 557 + return -error; 558 + } 559 + 560 + return -EAGAIN; 561 + } 562 + 563 + STATIC int 564 + xfs_vn_permission( 565 + struct inode *inode, 566 + int mask, 567 + struct nameidata *nd) 568 + { 569 + return generic_permission(inode, mask, xfs_check_acl); 564 570 } 565 571 #else 566 572 #define xfs_vn_permission NULL ··· 587 555 588 556 STATIC int 589 557 xfs_vn_getattr( 590 - struct vfsmount *mnt, 591 - struct dentry *dentry, 592 - struct kstat *stat) 558 + struct vfsmount *mnt, 559 + struct dentry *dentry, 560 + struct kstat *stat) 593 561 { 594 - struct inode *inode = dentry->d_inode; 595 - bhv_vattr_t vattr = { .va_mask = XFS_AT_STAT }; 596 - int error; 562 + struct inode *inode = dentry->d_inode; 563 + struct xfs_inode *ip = XFS_I(inode); 564 + struct xfs_mount *mp = ip->i_mount; 597 565 598 - error = xfs_getattr(XFS_I(inode), &vattr, ATTR_LAZY); 599 - if (likely(!error)) { 600 - stat->size = i_size_read(inode); 601 - stat->dev = inode->i_sb->s_dev; 602 - stat->rdev = (vattr.va_rdev == 0) ? 0 : 603 - MKDEV(sysv_major(vattr.va_rdev) & 0x1ff, 604 - sysv_minor(vattr.va_rdev)); 605 - stat->mode = vattr.va_mode; 606 - stat->nlink = vattr.va_nlink; 607 - stat->uid = vattr.va_uid; 608 - stat->gid = vattr.va_gid; 609 - stat->ino = vattr.va_nodeid; 610 - stat->atime = vattr.va_atime; 611 - stat->mtime = vattr.va_mtime; 612 - stat->ctime = vattr.va_ctime; 613 - stat->blocks = vattr.va_nblocks; 614 - stat->blksize = vattr.va_blocksize; 566 + xfs_itrace_entry(ip); 567 + 568 + if (XFS_FORCED_SHUTDOWN(mp)) 569 + return XFS_ERROR(EIO); 570 + 571 + stat->size = XFS_ISIZE(ip); 572 + stat->dev = inode->i_sb->s_dev; 573 + stat->mode = ip->i_d.di_mode; 574 + stat->nlink = ip->i_d.di_nlink; 575 + stat->uid = ip->i_d.di_uid; 576 + stat->gid = ip->i_d.di_gid; 577 + stat->ino = ip->i_ino; 578 + #if XFS_BIG_INUMS 579 + stat->ino += mp->m_inoadd; 580 + #endif 581 + stat->atime = inode->i_atime; 582 + stat->mtime.tv_sec = ip->i_d.di_mtime.t_sec; 583 + stat->mtime.tv_nsec = ip->i_d.di_mtime.t_nsec; 584 + stat->ctime.tv_sec = ip->i_d.di_ctime.t_sec; 585 + stat->ctime.tv_nsec = ip->i_d.di_ctime.t_nsec; 586 + stat->blocks = 587 + XFS_FSB_TO_BB(mp, ip->i_d.di_nblocks + ip->i_delayed_blks); 588 + 589 + 590 + switch (inode->i_mode & S_IFMT) { 591 + case S_IFBLK: 592 + case S_IFCHR: 593 + stat->blksize = BLKDEV_IOSIZE; 594 + stat->rdev = MKDEV(sysv_major(ip->i_df.if_u2.if_rdev) & 0x1ff, 595 + sysv_minor(ip->i_df.if_u2.if_rdev)); 596 + break; 597 + default: 598 + if (XFS_IS_REALTIME_INODE(ip)) { 599 + /* 600 + * If the file blocks are being allocated from a 601 + * realtime volume, then return the inode's realtime 602 + * extent size or the realtime volume's extent size. 603 + */ 604 + stat->blksize = 605 + xfs_get_extsz_hint(ip) << mp->m_sb.sb_blocklog; 606 + } else 607 + stat->blksize = xfs_preferred_iosize(mp); 608 + stat->rdev = 0; 609 + break; 615 610 } 616 - return -error; 611 + 612 + return 0; 617 613 } 618 614 619 615 STATIC int ··· 696 636 697 637 error = xfs_setattr(XFS_I(inode), &vattr, flags, NULL); 698 638 if (likely(!error)) 699 - __vn_revalidate(vn_from_inode(inode), &vattr); 639 + vn_revalidate(vn_from_inode(inode)); 700 640 return -error; 701 641 } 702 642 ··· 810 750 return namesp->attr_remove(vp, attr, xflags); 811 751 } 812 752 753 + STATIC long 754 + xfs_vn_fallocate( 755 + struct inode *inode, 756 + int mode, 757 + loff_t offset, 758 + loff_t len) 759 + { 760 + long error; 761 + loff_t new_size = 0; 762 + xfs_flock64_t bf; 763 + xfs_inode_t *ip = XFS_I(inode); 764 + 765 + /* preallocation on directories not yet supported */ 766 + error = -ENODEV; 767 + if (S_ISDIR(inode->i_mode)) 768 + goto out_error; 769 + 770 + bf.l_whence = 0; 771 + bf.l_start = offset; 772 + bf.l_len = len; 773 + 774 + xfs_ilock(ip, XFS_IOLOCK_EXCL); 775 + error = xfs_change_file_space(ip, XFS_IOC_RESVSP, &bf, 776 + 0, NULL, ATTR_NOLOCK); 777 + if (!error && !(mode & FALLOC_FL_KEEP_SIZE) && 778 + offset + len > i_size_read(inode)) 779 + new_size = offset + len; 780 + 781 + /* Change file size if needed */ 782 + if (new_size) { 783 + bhv_vattr_t va; 784 + 785 + va.va_mask = XFS_AT_SIZE; 786 + va.va_size = new_size; 787 + error = xfs_setattr(ip, &va, ATTR_NOLOCK, NULL); 788 + } 789 + 790 + xfs_iunlock(ip, XFS_IOLOCK_EXCL); 791 + out_error: 792 + return error; 793 + } 813 794 814 795 const struct inode_operations xfs_inode_operations = { 815 796 .permission = xfs_vn_permission, ··· 861 760 .getxattr = xfs_vn_getxattr, 862 761 .listxattr = xfs_vn_listxattr, 863 762 .removexattr = xfs_vn_removexattr, 763 + .fallocate = xfs_vn_fallocate, 864 764 }; 865 765 866 766 const struct inode_operations xfs_dir_inode_operations = {
+3 -31
fs/xfs/linux-2.6/xfs_linux.h
··· 43 43 44 44 #include <kmem.h> 45 45 #include <mrlock.h> 46 - #include <spin.h> 47 46 #include <sv.h> 48 47 #include <mutex.h> 49 48 #include <sema.h> ··· 74 75 #include <linux/notifier.h> 75 76 #include <linux/delay.h> 76 77 #include <linux/log2.h> 78 + #include <linux/spinlock.h> 77 79 78 80 #include <asm/page.h> 79 81 #include <asm/div64.h> ··· 136 136 #define current_restore_flags_nested(sp, f) \ 137 137 (current->flags = ((current->flags & ~(f)) | (*(sp) & (f)))) 138 138 139 - #define NBPP PAGE_SIZE 140 - #define NDPP (1 << (PAGE_SHIFT - 9)) 139 + #define spinlock_destroy(lock) 141 140 142 141 #define NBBY 8 /* number of bits per byte */ 143 - #define NBPC PAGE_SIZE /* Number of bytes per click */ 144 - #define BPCSHIFT PAGE_SHIFT /* LOG2(NBPC) if exact */ 145 142 146 143 /* 147 144 * Size of block device i/o is parameterized here. 148 145 * Currently the system supports page-sized i/o. 149 146 */ 150 - #define BLKDEV_IOSHIFT BPCSHIFT 147 + #define BLKDEV_IOSHIFT PAGE_CACHE_SHIFT 151 148 #define BLKDEV_IOSIZE (1<<BLKDEV_IOSHIFT) 152 149 /* number of BB's per block device block */ 153 150 #define BLKDEV_BB BTOBB(BLKDEV_IOSIZE) 154 - 155 - /* bytes to clicks */ 156 - #define btoc(x) (((__psunsigned_t)(x)+(NBPC-1))>>BPCSHIFT) 157 - #define btoct(x) ((__psunsigned_t)(x)>>BPCSHIFT) 158 - #define btoc64(x) (((__uint64_t)(x)+(NBPC-1))>>BPCSHIFT) 159 - #define btoct64(x) ((__uint64_t)(x)>>BPCSHIFT) 160 - 161 - /* off_t bytes to clicks */ 162 - #define offtoc(x) (((__uint64_t)(x)+(NBPC-1))>>BPCSHIFT) 163 - #define offtoct(x) ((xfs_off_t)(x)>>BPCSHIFT) 164 - 165 - /* clicks to off_t bytes */ 166 - #define ctooff(x) ((xfs_off_t)(x)<<BPCSHIFT) 167 - 168 - /* clicks to bytes */ 169 - #define ctob(x) ((__psunsigned_t)(x)<<BPCSHIFT) 170 - #define btoct(x) ((__psunsigned_t)(x)>>BPCSHIFT) 171 - #define ctob64(x) ((__uint64_t)(x)<<BPCSHIFT) 172 - 173 - /* bytes to clicks */ 174 - #define btoc(x) (((__psunsigned_t)(x)+(NBPC-1))>>BPCSHIFT) 175 151 176 152 #define ENOATTR ENODATA /* Attribute not found */ 177 153 #define EWRONGFS EINVAL /* Mount with wrong filesystem type */ ··· 181 205 #define xfs_stack_trace() dump_stack() 182 206 #define xfs_itruncate_data(ip, off) \ 183 207 (-vmtruncate(vn_to_inode(XFS_ITOV(ip)), (off))) 184 - #define xfs_statvfs_fsid(statp, mp) \ 185 - ({ u64 id = huge_encode_dev((mp)->m_ddev_targp->bt_dev); \ 186 - __kernel_fsid_t *fsid = &(statp)->f_fsid; \ 187 - (fsid->val[0] = (u32)id, fsid->val[1] = (u32)(id >> 32)); }) 188 208 189 209 190 210 /* Move the kernel do_div definition off to one side */
+46 -76
fs/xfs/linux-2.6/xfs_lrw.c
··· 58 58 void 59 59 xfs_rw_enter_trace( 60 60 int tag, 61 - xfs_iocore_t *io, 61 + xfs_inode_t *ip, 62 62 void *data, 63 63 size_t segs, 64 64 loff_t offset, 65 65 int ioflags) 66 66 { 67 - xfs_inode_t *ip = XFS_IO_INODE(io); 68 - 69 67 if (ip->i_rwtrace == NULL) 70 68 return; 71 69 ktrace_enter(ip->i_rwtrace, ··· 76 78 (void *)((unsigned long)((offset >> 32) & 0xffffffff)), 77 79 (void *)((unsigned long)(offset & 0xffffffff)), 78 80 (void *)((unsigned long)ioflags), 79 - (void *)((unsigned long)((io->io_new_size >> 32) & 0xffffffff)), 80 - (void *)((unsigned long)(io->io_new_size & 0xffffffff)), 81 + (void *)((unsigned long)((ip->i_new_size >> 32) & 0xffffffff)), 82 + (void *)((unsigned long)(ip->i_new_size & 0xffffffff)), 81 83 (void *)((unsigned long)current_pid()), 82 84 (void *)NULL, 83 85 (void *)NULL, ··· 87 89 88 90 void 89 91 xfs_inval_cached_trace( 90 - xfs_iocore_t *io, 92 + xfs_inode_t *ip, 91 93 xfs_off_t offset, 92 94 xfs_off_t len, 93 95 xfs_off_t first, 94 96 xfs_off_t last) 95 97 { 96 - xfs_inode_t *ip = XFS_IO_INODE(io); 97 98 98 99 if (ip->i_rwtrace == NULL) 99 100 return; ··· 128 131 */ 129 132 STATIC int 130 133 xfs_iozero( 131 - struct inode *ip, /* inode */ 134 + struct xfs_inode *ip, /* inode */ 132 135 loff_t pos, /* offset in file */ 133 136 size_t count) /* size of data to zero */ 134 137 { ··· 136 139 struct address_space *mapping; 137 140 int status; 138 141 139 - mapping = ip->i_mapping; 142 + mapping = ip->i_vnode->i_mapping; 140 143 do { 141 144 unsigned offset, bytes; 142 145 void *fsdata; ··· 202 205 203 206 if (unlikely(ioflags & IO_ISDIRECT)) { 204 207 xfs_buftarg_t *target = 205 - (ip->i_d.di_flags & XFS_DIFLAG_REALTIME) ? 208 + XFS_IS_REALTIME_INODE(ip) ? 206 209 mp->m_rtdev_targp : mp->m_ddev_targp; 207 210 if ((*offset & target->bt_smask) || 208 211 (size & target->bt_smask)) { ··· 243 246 244 247 if (unlikely(ioflags & IO_ISDIRECT)) { 245 248 if (VN_CACHED(vp)) 246 - ret = xfs_flushinval_pages(ip, 247 - ctooff(offtoct(*offset)), 248 - -1, FI_REMAPF_LOCKED); 249 + ret = xfs_flushinval_pages(ip, (*offset & PAGE_CACHE_MASK), 250 + -1, FI_REMAPF_LOCKED); 249 251 mutex_unlock(&inode->i_mutex); 250 252 if (ret) { 251 253 xfs_iunlock(ip, XFS_IOLOCK_SHARED); ··· 252 256 } 253 257 } 254 258 255 - xfs_rw_enter_trace(XFS_READ_ENTER, &ip->i_iocore, 259 + xfs_rw_enter_trace(XFS_READ_ENTER, ip, 256 260 (void *)iovp, segs, *offset, ioflags); 257 261 258 262 iocb->ki_pos = *offset; ··· 297 301 return -error; 298 302 } 299 303 } 300 - xfs_rw_enter_trace(XFS_SPLICE_READ_ENTER, &ip->i_iocore, 304 + xfs_rw_enter_trace(XFS_SPLICE_READ_ENTER, ip, 301 305 pipe, count, *ppos, ioflags); 302 306 ret = generic_file_splice_read(infilp, ppos, pipe, count, flags); 303 307 if (ret > 0) ··· 319 323 { 320 324 bhv_vnode_t *vp = XFS_ITOV(ip); 321 325 xfs_mount_t *mp = ip->i_mount; 322 - xfs_iocore_t *io = &ip->i_iocore; 323 326 ssize_t ret; 324 327 struct inode *inode = outfilp->f_mapping->host; 325 328 xfs_fsize_t isize, new_size; ··· 345 350 346 351 xfs_ilock(ip, XFS_ILOCK_EXCL); 347 352 if (new_size > ip->i_size) 348 - io->io_new_size = new_size; 353 + ip->i_new_size = new_size; 349 354 xfs_iunlock(ip, XFS_ILOCK_EXCL); 350 355 351 - xfs_rw_enter_trace(XFS_SPLICE_WRITE_ENTER, &ip->i_iocore, 356 + xfs_rw_enter_trace(XFS_SPLICE_WRITE_ENTER, ip, 352 357 pipe, count, *ppos, ioflags); 353 358 ret = generic_file_splice_write(pipe, outfilp, ppos, count, flags); 354 359 if (ret > 0) ··· 365 370 xfs_iunlock(ip, XFS_ILOCK_EXCL); 366 371 } 367 372 368 - if (io->io_new_size) { 373 + if (ip->i_new_size) { 369 374 xfs_ilock(ip, XFS_ILOCK_EXCL); 370 - io->io_new_size = 0; 375 + ip->i_new_size = 0; 371 376 if (ip->i_d.di_size > ip->i_size) 372 377 ip->i_d.di_size = ip->i_size; 373 378 xfs_iunlock(ip, XFS_ILOCK_EXCL); ··· 384 389 */ 385 390 STATIC int /* error (positive) */ 386 391 xfs_zero_last_block( 387 - struct inode *ip, 388 - xfs_iocore_t *io, 392 + xfs_inode_t *ip, 389 393 xfs_fsize_t offset, 390 394 xfs_fsize_t isize) 391 395 { 392 396 xfs_fileoff_t last_fsb; 393 - xfs_mount_t *mp = io->io_mount; 397 + xfs_mount_t *mp = ip->i_mount; 394 398 int nimaps; 395 399 int zero_offset; 396 400 int zero_len; 397 401 int error = 0; 398 402 xfs_bmbt_irec_t imap; 399 403 400 - ASSERT(ismrlocked(io->io_lock, MR_UPDATE) != 0); 404 + ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE) != 0); 401 405 402 406 zero_offset = XFS_B_FSB_OFFSET(mp, isize); 403 407 if (zero_offset == 0) { ··· 409 415 410 416 last_fsb = XFS_B_TO_FSBT(mp, isize); 411 417 nimaps = 1; 412 - error = XFS_BMAPI(mp, NULL, io, last_fsb, 1, 0, NULL, 0, &imap, 418 + error = xfs_bmapi(NULL, ip, last_fsb, 1, 0, NULL, 0, &imap, 413 419 &nimaps, NULL, NULL); 414 420 if (error) { 415 421 return error; ··· 427 433 * out sync. We need to drop the ilock while we do this so we 428 434 * don't deadlock when the buffer cache calls back to us. 429 435 */ 430 - XFS_IUNLOCK(mp, io, XFS_ILOCK_EXCL| XFS_EXTSIZE_RD); 436 + xfs_iunlock(ip, XFS_ILOCK_EXCL| XFS_EXTSIZE_RD); 431 437 432 438 zero_len = mp->m_sb.sb_blocksize - zero_offset; 433 439 if (isize + zero_len > offset) 434 440 zero_len = offset - isize; 435 441 error = xfs_iozero(ip, isize, zero_len); 436 442 437 - XFS_ILOCK(mp, io, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD); 443 + xfs_ilock(ip, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD); 438 444 ASSERT(error >= 0); 439 445 return error; 440 446 } ··· 452 458 453 459 int /* error (positive) */ 454 460 xfs_zero_eof( 455 - bhv_vnode_t *vp, 456 - xfs_iocore_t *io, 461 + xfs_inode_t *ip, 457 462 xfs_off_t offset, /* starting I/O offset */ 458 463 xfs_fsize_t isize) /* current inode size */ 459 464 { 460 - struct inode *ip = vn_to_inode(vp); 465 + xfs_mount_t *mp = ip->i_mount; 461 466 xfs_fileoff_t start_zero_fsb; 462 467 xfs_fileoff_t end_zero_fsb; 463 468 xfs_fileoff_t zero_count_fsb; 464 469 xfs_fileoff_t last_fsb; 465 470 xfs_fileoff_t zero_off; 466 471 xfs_fsize_t zero_len; 467 - xfs_mount_t *mp = io->io_mount; 468 472 int nimaps; 469 473 int error = 0; 470 474 xfs_bmbt_irec_t imap; 471 475 472 - ASSERT(ismrlocked(io->io_lock, MR_UPDATE)); 473 - ASSERT(ismrlocked(io->io_iolock, MR_UPDATE)); 476 + ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE)); 477 + ASSERT(ismrlocked(&ip->i_iolock, MR_UPDATE)); 474 478 ASSERT(offset > isize); 475 479 476 480 /* 477 481 * First handle zeroing the block on which isize resides. 478 482 * We only zero a part of that block so it is handled specially. 479 483 */ 480 - error = xfs_zero_last_block(ip, io, offset, isize); 484 + error = xfs_zero_last_block(ip, offset, isize); 481 485 if (error) { 482 - ASSERT(ismrlocked(io->io_lock, MR_UPDATE)); 483 - ASSERT(ismrlocked(io->io_iolock, MR_UPDATE)); 486 + ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE)); 487 + ASSERT(ismrlocked(&ip->i_iolock, MR_UPDATE)); 484 488 return error; 485 489 } 486 490 ··· 506 514 while (start_zero_fsb <= end_zero_fsb) { 507 515 nimaps = 1; 508 516 zero_count_fsb = end_zero_fsb - start_zero_fsb + 1; 509 - error = XFS_BMAPI(mp, NULL, io, start_zero_fsb, zero_count_fsb, 517 + error = xfs_bmapi(NULL, ip, start_zero_fsb, zero_count_fsb, 510 518 0, NULL, 0, &imap, &nimaps, NULL, NULL); 511 519 if (error) { 512 - ASSERT(ismrlocked(io->io_lock, MR_UPDATE)); 513 - ASSERT(ismrlocked(io->io_iolock, MR_UPDATE)); 520 + ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE)); 521 + ASSERT(ismrlocked(&ip->i_iolock, MR_UPDATE)); 514 522 return error; 515 523 } 516 524 ASSERT(nimaps > 0); ··· 534 542 * Drop the inode lock while we're doing the I/O. 535 543 * We'll still have the iolock to protect us. 536 544 */ 537 - XFS_IUNLOCK(mp, io, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD); 545 + xfs_iunlock(ip, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD); 538 546 539 547 zero_off = XFS_FSB_TO_B(mp, start_zero_fsb); 540 548 zero_len = XFS_FSB_TO_B(mp, imap.br_blockcount); ··· 550 558 start_zero_fsb = imap.br_startoff + imap.br_blockcount; 551 559 ASSERT(start_zero_fsb <= (end_zero_fsb + 1)); 552 560 553 - XFS_ILOCK(mp, io, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD); 561 + xfs_ilock(ip, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD); 554 562 } 555 563 556 564 return 0; 557 565 558 566 out_lock: 559 - 560 - XFS_ILOCK(mp, io, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD); 567 + xfs_ilock(ip, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD); 561 568 ASSERT(error >= 0); 562 569 return error; 563 570 } ··· 578 587 xfs_mount_t *mp; 579 588 ssize_t ret = 0, error = 0; 580 589 xfs_fsize_t isize, new_size; 581 - xfs_iocore_t *io; 582 590 int iolock; 583 591 int eventsent = 0; 584 592 bhv_vrwlock_t locktype; ··· 597 607 if (count == 0) 598 608 return 0; 599 609 600 - io = &xip->i_iocore; 601 - mp = io->io_mount; 610 + mp = xip->i_mount; 602 611 603 612 xfs_wait_for_freeze(mp, SB_FREEZE_WRITE); 604 613 ··· 656 667 657 668 if (ioflags & IO_ISDIRECT) { 658 669 xfs_buftarg_t *target = 659 - (xip->i_d.di_flags & XFS_DIFLAG_REALTIME) ? 670 + XFS_IS_REALTIME_INODE(xip) ? 660 671 mp->m_rtdev_targp : mp->m_ddev_targp; 661 672 662 673 if ((pos & target->bt_smask) || (count & target->bt_smask)) { ··· 677 688 678 689 new_size = pos + count; 679 690 if (new_size > xip->i_size) 680 - io->io_new_size = new_size; 691 + xip->i_new_size = new_size; 681 692 682 693 if (likely(!(ioflags & IO_INVIS))) { 683 694 file_update_time(file); ··· 695 706 */ 696 707 697 708 if (pos > xip->i_size) { 698 - error = xfs_zero_eof(vp, io, pos, xip->i_size); 709 + error = xfs_zero_eof(xip, pos, xip->i_size); 699 710 if (error) { 700 711 xfs_iunlock(xip, XFS_ILOCK_EXCL); 701 712 goto out_unlock_internal; ··· 729 740 if ((ioflags & IO_ISDIRECT)) { 730 741 if (VN_CACHED(vp)) { 731 742 WARN_ON(need_i_mutex == 0); 732 - xfs_inval_cached_trace(io, pos, -1, 733 - ctooff(offtoct(pos)), -1); 743 + xfs_inval_cached_trace(xip, pos, -1, 744 + (pos & PAGE_CACHE_MASK), -1); 734 745 error = xfs_flushinval_pages(xip, 735 - ctooff(offtoct(pos)), 746 + (pos & PAGE_CACHE_MASK), 736 747 -1, FI_REMAPF_LOCKED); 737 748 if (error) 738 749 goto out_unlock_internal; ··· 740 751 741 752 if (need_i_mutex) { 742 753 /* demote the lock now the cached pages are gone */ 743 - XFS_ILOCK_DEMOTE(mp, io, XFS_IOLOCK_EXCL); 754 + xfs_ilock_demote(xip, XFS_IOLOCK_EXCL); 744 755 mutex_unlock(&inode->i_mutex); 745 756 746 757 iolock = XFS_IOLOCK_SHARED; ··· 748 759 need_i_mutex = 0; 749 760 } 750 761 751 - xfs_rw_enter_trace(XFS_DIOWR_ENTER, io, (void *)iovp, segs, 762 + xfs_rw_enter_trace(XFS_DIOWR_ENTER, xip, (void *)iovp, segs, 752 763 *offset, ioflags); 753 764 ret = generic_file_direct_write(iocb, iovp, 754 765 &segs, pos, offset, count, ocount); ··· 768 779 goto relock; 769 780 } 770 781 } else { 771 - xfs_rw_enter_trace(XFS_WRITE_ENTER, io, (void *)iovp, segs, 782 + xfs_rw_enter_trace(XFS_WRITE_ENTER, xip, (void *)iovp, segs, 772 783 *offset, ioflags); 773 784 ret = generic_file_buffered_write(iocb, iovp, segs, 774 785 pos, offset, count, ret); ··· 832 843 } 833 844 834 845 out_unlock_internal: 835 - if (io->io_new_size) { 846 + if (xip->i_new_size) { 836 847 xfs_ilock(xip, XFS_ILOCK_EXCL); 837 - io->io_new_size = 0; 848 + xip->i_new_size = 0; 838 849 /* 839 850 * If this was a direct or synchronous I/O that failed (such 840 851 * as ENOSPC) then part of the I/O may have been written to ··· 881 892 else 882 893 return (xfs_bioerror(bp)); 883 894 } 884 - } 885 - 886 - 887 - int 888 - xfs_bmap( 889 - xfs_inode_t *ip, 890 - xfs_off_t offset, 891 - ssize_t count, 892 - int flags, 893 - xfs_iomap_t *iomapp, 894 - int *niomaps) 895 - { 896 - xfs_iocore_t *io = &ip->i_iocore; 897 - 898 - ASSERT((ip->i_d.di_mode & S_IFMT) == S_IFREG); 899 - ASSERT(((ip->i_d.di_flags & XFS_DIFLAG_REALTIME) != 0) == 900 - ((ip->i_iocore.io_flags & XFS_IOCORE_RT) != 0)); 901 - 902 - return xfs_iomap(io, offset, count, flags, iomapp, niomaps); 903 895 } 904 896 905 897 /*
+7 -9
fs/xfs/linux-2.6/xfs_lrw.h
··· 19 19 #define __XFS_LRW_H__ 20 20 21 21 struct xfs_mount; 22 - struct xfs_iocore; 23 22 struct xfs_inode; 24 23 struct xfs_bmbt_irec; 25 24 struct xfs_buf; ··· 59 60 #define XFS_IOMAP_UNWRITTEN 27 60 61 #define XFS_SPLICE_READ_ENTER 28 61 62 #define XFS_SPLICE_WRITE_ENTER 29 62 - extern void xfs_rw_enter_trace(int, struct xfs_iocore *, 63 - void *, size_t, loff_t, int); 64 - extern void xfs_inval_cached_trace(struct xfs_iocore *, 65 - xfs_off_t, xfs_off_t, xfs_off_t, xfs_off_t); 63 + extern void xfs_rw_enter_trace(int, struct xfs_inode *, 64 + void *, size_t, loff_t, int); 65 + extern void xfs_inval_cached_trace(struct xfs_inode *, 66 + xfs_off_t, xfs_off_t, xfs_off_t, xfs_off_t); 66 67 #else 67 - #define xfs_rw_enter_trace(tag, io, data, size, offset, ioflags) 68 - #define xfs_inval_cached_trace(io, offset, len, first, last) 68 + #define xfs_rw_enter_trace(tag, ip, data, size, offset, ioflags) 69 + #define xfs_inval_cached_trace(ip, offset, len, first, last) 69 70 #endif 70 71 71 72 extern int xfsbdstrat(struct xfs_mount *, struct xfs_buf *); 72 73 extern int xfs_bdstrat_cb(struct xfs_buf *); 73 74 extern int xfs_dev_is_read_only(struct xfs_mount *, char *); 74 75 75 - extern int xfs_zero_eof(struct inode *, struct xfs_iocore *, xfs_off_t, 76 - xfs_fsize_t); 76 + extern int xfs_zero_eof(struct xfs_inode *, xfs_off_t, xfs_fsize_t); 77 77 78 78 #endif /* __XFS_LRW_H__ */
+549 -23
fs/xfs/linux-2.6/xfs_super.c
··· 41 41 #include "xfs_rtalloc.h" 42 42 #include "xfs_error.h" 43 43 #include "xfs_itable.h" 44 + #include "xfs_fsops.h" 44 45 #include "xfs_rw.h" 45 46 #include "xfs_acl.h" 46 47 #include "xfs_attr.h" ··· 50 49 #include "xfs_vnodeops.h" 51 50 #include "xfs_vfsops.h" 52 51 #include "xfs_version.h" 52 + #include "xfs_log_priv.h" 53 + #include "xfs_trans_priv.h" 53 54 54 55 #include <linux/namei.h> 55 56 #include <linux/init.h> ··· 90 87 return args; 91 88 } 92 89 90 + #define MNTOPT_LOGBUFS "logbufs" /* number of XFS log buffers */ 91 + #define MNTOPT_LOGBSIZE "logbsize" /* size of XFS log buffers */ 92 + #define MNTOPT_LOGDEV "logdev" /* log device */ 93 + #define MNTOPT_RTDEV "rtdev" /* realtime I/O device */ 94 + #define MNTOPT_BIOSIZE "biosize" /* log2 of preferred buffered io size */ 95 + #define MNTOPT_WSYNC "wsync" /* safe-mode nfs compatible mount */ 96 + #define MNTOPT_INO64 "ino64" /* force inodes into 64-bit range */ 97 + #define MNTOPT_NOALIGN "noalign" /* turn off stripe alignment */ 98 + #define MNTOPT_SWALLOC "swalloc" /* turn on stripe width allocation */ 99 + #define MNTOPT_SUNIT "sunit" /* data volume stripe unit */ 100 + #define MNTOPT_SWIDTH "swidth" /* data volume stripe width */ 101 + #define MNTOPT_NOUUID "nouuid" /* ignore filesystem UUID */ 102 + #define MNTOPT_MTPT "mtpt" /* filesystem mount point */ 103 + #define MNTOPT_GRPID "grpid" /* group-ID from parent directory */ 104 + #define MNTOPT_NOGRPID "nogrpid" /* group-ID from current process */ 105 + #define MNTOPT_BSDGROUPS "bsdgroups" /* group-ID from parent directory */ 106 + #define MNTOPT_SYSVGROUPS "sysvgroups" /* group-ID from current process */ 107 + #define MNTOPT_ALLOCSIZE "allocsize" /* preferred allocation size */ 108 + #define MNTOPT_NORECOVERY "norecovery" /* don't run XFS recovery */ 109 + #define MNTOPT_BARRIER "barrier" /* use writer barriers for log write and 110 + * unwritten extent conversion */ 111 + #define MNTOPT_NOBARRIER "nobarrier" /* .. disable */ 112 + #define MNTOPT_OSYNCISOSYNC "osyncisosync" /* o_sync is REALLY o_sync */ 113 + #define MNTOPT_64BITINODE "inode64" /* inodes can be allocated anywhere */ 114 + #define MNTOPT_IKEEP "ikeep" /* do not free empty inode clusters */ 115 + #define MNTOPT_NOIKEEP "noikeep" /* free empty inode clusters */ 116 + #define MNTOPT_LARGEIO "largeio" /* report large I/O sizes in stat() */ 117 + #define MNTOPT_NOLARGEIO "nolargeio" /* do not report large I/O sizes 118 + * in stat(). */ 119 + #define MNTOPT_ATTR2 "attr2" /* do use attr2 attribute format */ 120 + #define MNTOPT_NOATTR2 "noattr2" /* do not use attr2 attribute format */ 121 + #define MNTOPT_FILESTREAM "filestreams" /* use filestreams allocator */ 122 + #define MNTOPT_QUOTA "quota" /* disk quotas (user) */ 123 + #define MNTOPT_NOQUOTA "noquota" /* no quotas */ 124 + #define MNTOPT_USRQUOTA "usrquota" /* user quota enabled */ 125 + #define MNTOPT_GRPQUOTA "grpquota" /* group quota enabled */ 126 + #define MNTOPT_PRJQUOTA "prjquota" /* project quota enabled */ 127 + #define MNTOPT_UQUOTA "uquota" /* user quota (IRIX variant) */ 128 + #define MNTOPT_GQUOTA "gquota" /* group quota (IRIX variant) */ 129 + #define MNTOPT_PQUOTA "pquota" /* project quota (IRIX variant) */ 130 + #define MNTOPT_UQUOTANOENF "uqnoenforce"/* user quota limit enforcement */ 131 + #define MNTOPT_GQUOTANOENF "gqnoenforce"/* group quota limit enforcement */ 132 + #define MNTOPT_PQUOTANOENF "pqnoenforce"/* project quota limit enforcement */ 133 + #define MNTOPT_QUOTANOENF "qnoenforce" /* same as uqnoenforce */ 134 + #define MNTOPT_DMAPI "dmapi" /* DMI enabled (DMAPI / XDSM) */ 135 + #define MNTOPT_XDSM "xdsm" /* DMI enabled (DMAPI / XDSM) */ 136 + #define MNTOPT_DMI "dmi" /* DMI enabled (DMAPI / XDSM) */ 137 + 138 + STATIC unsigned long 139 + suffix_strtoul(char *s, char **endp, unsigned int base) 140 + { 141 + int last, shift_left_factor = 0; 142 + char *value = s; 143 + 144 + last = strlen(value) - 1; 145 + if (value[last] == 'K' || value[last] == 'k') { 146 + shift_left_factor = 10; 147 + value[last] = '\0'; 148 + } 149 + if (value[last] == 'M' || value[last] == 'm') { 150 + shift_left_factor = 20; 151 + value[last] = '\0'; 152 + } 153 + if (value[last] == 'G' || value[last] == 'g') { 154 + shift_left_factor = 30; 155 + value[last] = '\0'; 156 + } 157 + 158 + return simple_strtoul((const char *)s, endp, base) << shift_left_factor; 159 + } 160 + 161 + STATIC int 162 + xfs_parseargs( 163 + struct xfs_mount *mp, 164 + char *options, 165 + struct xfs_mount_args *args, 166 + int update) 167 + { 168 + char *this_char, *value, *eov; 169 + int dsunit, dswidth, vol_dsunit, vol_dswidth; 170 + int iosize; 171 + int ikeep = 0; 172 + 173 + args->flags |= XFSMNT_BARRIER; 174 + args->flags2 |= XFSMNT2_COMPAT_IOSIZE; 175 + 176 + if (!options) 177 + goto done; 178 + 179 + iosize = dsunit = dswidth = vol_dsunit = vol_dswidth = 0; 180 + 181 + while ((this_char = strsep(&options, ",")) != NULL) { 182 + if (!*this_char) 183 + continue; 184 + if ((value = strchr(this_char, '=')) != NULL) 185 + *value++ = 0; 186 + 187 + if (!strcmp(this_char, MNTOPT_LOGBUFS)) { 188 + if (!value || !*value) { 189 + cmn_err(CE_WARN, 190 + "XFS: %s option requires an argument", 191 + this_char); 192 + return EINVAL; 193 + } 194 + args->logbufs = simple_strtoul(value, &eov, 10); 195 + } else if (!strcmp(this_char, MNTOPT_LOGBSIZE)) { 196 + if (!value || !*value) { 197 + cmn_err(CE_WARN, 198 + "XFS: %s option requires an argument", 199 + this_char); 200 + return EINVAL; 201 + } 202 + args->logbufsize = suffix_strtoul(value, &eov, 10); 203 + } else if (!strcmp(this_char, MNTOPT_LOGDEV)) { 204 + if (!value || !*value) { 205 + cmn_err(CE_WARN, 206 + "XFS: %s option requires an argument", 207 + this_char); 208 + return EINVAL; 209 + } 210 + strncpy(args->logname, value, MAXNAMELEN); 211 + } else if (!strcmp(this_char, MNTOPT_MTPT)) { 212 + if (!value || !*value) { 213 + cmn_err(CE_WARN, 214 + "XFS: %s option requires an argument", 215 + this_char); 216 + return EINVAL; 217 + } 218 + strncpy(args->mtpt, value, MAXNAMELEN); 219 + } else if (!strcmp(this_char, MNTOPT_RTDEV)) { 220 + if (!value || !*value) { 221 + cmn_err(CE_WARN, 222 + "XFS: %s option requires an argument", 223 + this_char); 224 + return EINVAL; 225 + } 226 + strncpy(args->rtname, value, MAXNAMELEN); 227 + } else if (!strcmp(this_char, MNTOPT_BIOSIZE)) { 228 + if (!value || !*value) { 229 + cmn_err(CE_WARN, 230 + "XFS: %s option requires an argument", 231 + this_char); 232 + return EINVAL; 233 + } 234 + iosize = simple_strtoul(value, &eov, 10); 235 + args->flags |= XFSMNT_IOSIZE; 236 + args->iosizelog = (uint8_t) iosize; 237 + } else if (!strcmp(this_char, MNTOPT_ALLOCSIZE)) { 238 + if (!value || !*value) { 239 + cmn_err(CE_WARN, 240 + "XFS: %s option requires an argument", 241 + this_char); 242 + return EINVAL; 243 + } 244 + iosize = suffix_strtoul(value, &eov, 10); 245 + args->flags |= XFSMNT_IOSIZE; 246 + args->iosizelog = ffs(iosize) - 1; 247 + } else if (!strcmp(this_char, MNTOPT_GRPID) || 248 + !strcmp(this_char, MNTOPT_BSDGROUPS)) { 249 + mp->m_flags |= XFS_MOUNT_GRPID; 250 + } else if (!strcmp(this_char, MNTOPT_NOGRPID) || 251 + !strcmp(this_char, MNTOPT_SYSVGROUPS)) { 252 + mp->m_flags &= ~XFS_MOUNT_GRPID; 253 + } else if (!strcmp(this_char, MNTOPT_WSYNC)) { 254 + args->flags |= XFSMNT_WSYNC; 255 + } else if (!strcmp(this_char, MNTOPT_OSYNCISOSYNC)) { 256 + args->flags |= XFSMNT_OSYNCISOSYNC; 257 + } else if (!strcmp(this_char, MNTOPT_NORECOVERY)) { 258 + args->flags |= XFSMNT_NORECOVERY; 259 + } else if (!strcmp(this_char, MNTOPT_INO64)) { 260 + args->flags |= XFSMNT_INO64; 261 + #if !XFS_BIG_INUMS 262 + cmn_err(CE_WARN, 263 + "XFS: %s option not allowed on this system", 264 + this_char); 265 + return EINVAL; 266 + #endif 267 + } else if (!strcmp(this_char, MNTOPT_NOALIGN)) { 268 + args->flags |= XFSMNT_NOALIGN; 269 + } else if (!strcmp(this_char, MNTOPT_SWALLOC)) { 270 + args->flags |= XFSMNT_SWALLOC; 271 + } else if (!strcmp(this_char, MNTOPT_SUNIT)) { 272 + if (!value || !*value) { 273 + cmn_err(CE_WARN, 274 + "XFS: %s option requires an argument", 275 + this_char); 276 + return EINVAL; 277 + } 278 + dsunit = simple_strtoul(value, &eov, 10); 279 + } else if (!strcmp(this_char, MNTOPT_SWIDTH)) { 280 + if (!value || !*value) { 281 + cmn_err(CE_WARN, 282 + "XFS: %s option requires an argument", 283 + this_char); 284 + return EINVAL; 285 + } 286 + dswidth = simple_strtoul(value, &eov, 10); 287 + } else if (!strcmp(this_char, MNTOPT_64BITINODE)) { 288 + args->flags &= ~XFSMNT_32BITINODES; 289 + #if !XFS_BIG_INUMS 290 + cmn_err(CE_WARN, 291 + "XFS: %s option not allowed on this system", 292 + this_char); 293 + return EINVAL; 294 + #endif 295 + } else if (!strcmp(this_char, MNTOPT_NOUUID)) { 296 + args->flags |= XFSMNT_NOUUID; 297 + } else if (!strcmp(this_char, MNTOPT_BARRIER)) { 298 + args->flags |= XFSMNT_BARRIER; 299 + } else if (!strcmp(this_char, MNTOPT_NOBARRIER)) { 300 + args->flags &= ~XFSMNT_BARRIER; 301 + } else if (!strcmp(this_char, MNTOPT_IKEEP)) { 302 + ikeep = 1; 303 + args->flags &= ~XFSMNT_IDELETE; 304 + } else if (!strcmp(this_char, MNTOPT_NOIKEEP)) { 305 + args->flags |= XFSMNT_IDELETE; 306 + } else if (!strcmp(this_char, MNTOPT_LARGEIO)) { 307 + args->flags2 &= ~XFSMNT2_COMPAT_IOSIZE; 308 + } else if (!strcmp(this_char, MNTOPT_NOLARGEIO)) { 309 + args->flags2 |= XFSMNT2_COMPAT_IOSIZE; 310 + } else if (!strcmp(this_char, MNTOPT_ATTR2)) { 311 + args->flags |= XFSMNT_ATTR2; 312 + } else if (!strcmp(this_char, MNTOPT_NOATTR2)) { 313 + args->flags &= ~XFSMNT_ATTR2; 314 + } else if (!strcmp(this_char, MNTOPT_FILESTREAM)) { 315 + args->flags2 |= XFSMNT2_FILESTREAMS; 316 + } else if (!strcmp(this_char, MNTOPT_NOQUOTA)) { 317 + args->flags &= ~(XFSMNT_UQUOTAENF|XFSMNT_UQUOTA); 318 + args->flags &= ~(XFSMNT_GQUOTAENF|XFSMNT_GQUOTA); 319 + } else if (!strcmp(this_char, MNTOPT_QUOTA) || 320 + !strcmp(this_char, MNTOPT_UQUOTA) || 321 + !strcmp(this_char, MNTOPT_USRQUOTA)) { 322 + args->flags |= XFSMNT_UQUOTA | XFSMNT_UQUOTAENF; 323 + } else if (!strcmp(this_char, MNTOPT_QUOTANOENF) || 324 + !strcmp(this_char, MNTOPT_UQUOTANOENF)) { 325 + args->flags |= XFSMNT_UQUOTA; 326 + args->flags &= ~XFSMNT_UQUOTAENF; 327 + } else if (!strcmp(this_char, MNTOPT_PQUOTA) || 328 + !strcmp(this_char, MNTOPT_PRJQUOTA)) { 329 + args->flags |= XFSMNT_PQUOTA | XFSMNT_PQUOTAENF; 330 + } else if (!strcmp(this_char, MNTOPT_PQUOTANOENF)) { 331 + args->flags |= XFSMNT_PQUOTA; 332 + args->flags &= ~XFSMNT_PQUOTAENF; 333 + } else if (!strcmp(this_char, MNTOPT_GQUOTA) || 334 + !strcmp(this_char, MNTOPT_GRPQUOTA)) { 335 + args->flags |= XFSMNT_GQUOTA | XFSMNT_GQUOTAENF; 336 + } else if (!strcmp(this_char, MNTOPT_GQUOTANOENF)) { 337 + args->flags |= XFSMNT_GQUOTA; 338 + args->flags &= ~XFSMNT_GQUOTAENF; 339 + } else if (!strcmp(this_char, MNTOPT_DMAPI)) { 340 + args->flags |= XFSMNT_DMAPI; 341 + } else if (!strcmp(this_char, MNTOPT_XDSM)) { 342 + args->flags |= XFSMNT_DMAPI; 343 + } else if (!strcmp(this_char, MNTOPT_DMI)) { 344 + args->flags |= XFSMNT_DMAPI; 345 + } else if (!strcmp(this_char, "ihashsize")) { 346 + cmn_err(CE_WARN, 347 + "XFS: ihashsize no longer used, option is deprecated."); 348 + } else if (!strcmp(this_char, "osyncisdsync")) { 349 + /* no-op, this is now the default */ 350 + cmn_err(CE_WARN, 351 + "XFS: osyncisdsync is now the default, option is deprecated."); 352 + } else if (!strcmp(this_char, "irixsgid")) { 353 + cmn_err(CE_WARN, 354 + "XFS: irixsgid is now a sysctl(2) variable, option is deprecated."); 355 + } else { 356 + cmn_err(CE_WARN, 357 + "XFS: unknown mount option [%s].", this_char); 358 + return EINVAL; 359 + } 360 + } 361 + 362 + if (args->flags & XFSMNT_NORECOVERY) { 363 + if ((mp->m_flags & XFS_MOUNT_RDONLY) == 0) { 364 + cmn_err(CE_WARN, 365 + "XFS: no-recovery mounts must be read-only."); 366 + return EINVAL; 367 + } 368 + } 369 + 370 + if ((args->flags & XFSMNT_NOALIGN) && (dsunit || dswidth)) { 371 + cmn_err(CE_WARN, 372 + "XFS: sunit and swidth options incompatible with the noalign option"); 373 + return EINVAL; 374 + } 375 + 376 + if ((args->flags & XFSMNT_GQUOTA) && (args->flags & XFSMNT_PQUOTA)) { 377 + cmn_err(CE_WARN, 378 + "XFS: cannot mount with both project and group quota"); 379 + return EINVAL; 380 + } 381 + 382 + if ((args->flags & XFSMNT_DMAPI) && *args->mtpt == '\0') { 383 + printk("XFS: %s option needs the mount point option as well\n", 384 + MNTOPT_DMAPI); 385 + return EINVAL; 386 + } 387 + 388 + if ((dsunit && !dswidth) || (!dsunit && dswidth)) { 389 + cmn_err(CE_WARN, 390 + "XFS: sunit and swidth must be specified together"); 391 + return EINVAL; 392 + } 393 + 394 + if (dsunit && (dswidth % dsunit != 0)) { 395 + cmn_err(CE_WARN, 396 + "XFS: stripe width (%d) must be a multiple of the stripe unit (%d)", 397 + dswidth, dsunit); 398 + return EINVAL; 399 + } 400 + 401 + /* 402 + * Applications using DMI filesystems often expect the 403 + * inode generation number to be monotonically increasing. 404 + * If we delete inode chunks we break this assumption, so 405 + * keep unused inode chunks on disk for DMI filesystems 406 + * until we come up with a better solution. 407 + * Note that if "ikeep" or "noikeep" mount options are 408 + * supplied, then they are honored. 409 + */ 410 + if (!(args->flags & XFSMNT_DMAPI) && !ikeep) 411 + args->flags |= XFSMNT_IDELETE; 412 + 413 + if ((args->flags & XFSMNT_NOALIGN) != XFSMNT_NOALIGN) { 414 + if (dsunit) { 415 + args->sunit = dsunit; 416 + args->flags |= XFSMNT_RETERR; 417 + } else { 418 + args->sunit = vol_dsunit; 419 + } 420 + dswidth ? (args->swidth = dswidth) : 421 + (args->swidth = vol_dswidth); 422 + } else { 423 + args->sunit = args->swidth = 0; 424 + } 425 + 426 + done: 427 + if (args->flags & XFSMNT_32BITINODES) 428 + mp->m_flags |= XFS_MOUNT_SMALL_INUMS; 429 + if (args->flags2) 430 + args->flags |= XFSMNT_FLAGS2; 431 + return 0; 432 + } 433 + 434 + struct proc_xfs_info { 435 + int flag; 436 + char *str; 437 + }; 438 + 439 + STATIC int 440 + xfs_showargs( 441 + struct xfs_mount *mp, 442 + struct seq_file *m) 443 + { 444 + static struct proc_xfs_info xfs_info_set[] = { 445 + /* the few simple ones we can get from the mount struct */ 446 + { XFS_MOUNT_WSYNC, "," MNTOPT_WSYNC }, 447 + { XFS_MOUNT_INO64, "," MNTOPT_INO64 }, 448 + { XFS_MOUNT_NOALIGN, "," MNTOPT_NOALIGN }, 449 + { XFS_MOUNT_SWALLOC, "," MNTOPT_SWALLOC }, 450 + { XFS_MOUNT_NOUUID, "," MNTOPT_NOUUID }, 451 + { XFS_MOUNT_NORECOVERY, "," MNTOPT_NORECOVERY }, 452 + { XFS_MOUNT_OSYNCISOSYNC, "," MNTOPT_OSYNCISOSYNC }, 453 + { XFS_MOUNT_ATTR2, "," MNTOPT_ATTR2 }, 454 + { XFS_MOUNT_FILESTREAMS, "," MNTOPT_FILESTREAM }, 455 + { XFS_MOUNT_DMAPI, "," MNTOPT_DMAPI }, 456 + { XFS_MOUNT_GRPID, "," MNTOPT_GRPID }, 457 + { 0, NULL } 458 + }; 459 + static struct proc_xfs_info xfs_info_unset[] = { 460 + /* the few simple ones we can get from the mount struct */ 461 + { XFS_MOUNT_IDELETE, "," MNTOPT_IKEEP }, 462 + { XFS_MOUNT_COMPAT_IOSIZE, "," MNTOPT_LARGEIO }, 463 + { XFS_MOUNT_BARRIER, "," MNTOPT_NOBARRIER }, 464 + { XFS_MOUNT_SMALL_INUMS, "," MNTOPT_64BITINODE }, 465 + { 0, NULL } 466 + }; 467 + struct proc_xfs_info *xfs_infop; 468 + 469 + for (xfs_infop = xfs_info_set; xfs_infop->flag; xfs_infop++) { 470 + if (mp->m_flags & xfs_infop->flag) 471 + seq_puts(m, xfs_infop->str); 472 + } 473 + for (xfs_infop = xfs_info_unset; xfs_infop->flag; xfs_infop++) { 474 + if (!(mp->m_flags & xfs_infop->flag)) 475 + seq_puts(m, xfs_infop->str); 476 + } 477 + 478 + if (mp->m_flags & XFS_MOUNT_DFLT_IOSIZE) 479 + seq_printf(m, "," MNTOPT_ALLOCSIZE "=%dk", 480 + (int)(1 << mp->m_writeio_log) >> 10); 481 + 482 + if (mp->m_logbufs > 0) 483 + seq_printf(m, "," MNTOPT_LOGBUFS "=%d", mp->m_logbufs); 484 + if (mp->m_logbsize > 0) 485 + seq_printf(m, "," MNTOPT_LOGBSIZE "=%dk", mp->m_logbsize >> 10); 486 + 487 + if (mp->m_logname) 488 + seq_printf(m, "," MNTOPT_LOGDEV "=%s", mp->m_logname); 489 + if (mp->m_rtname) 490 + seq_printf(m, "," MNTOPT_RTDEV "=%s", mp->m_rtname); 491 + 492 + if (mp->m_dalign > 0) 493 + seq_printf(m, "," MNTOPT_SUNIT "=%d", 494 + (int)XFS_FSB_TO_BB(mp, mp->m_dalign)); 495 + if (mp->m_swidth > 0) 496 + seq_printf(m, "," MNTOPT_SWIDTH "=%d", 497 + (int)XFS_FSB_TO_BB(mp, mp->m_swidth)); 498 + 499 + if (mp->m_qflags & (XFS_UQUOTA_ACCT|XFS_UQUOTA_ENFD)) 500 + seq_puts(m, "," MNTOPT_USRQUOTA); 501 + else if (mp->m_qflags & XFS_UQUOTA_ACCT) 502 + seq_puts(m, "," MNTOPT_UQUOTANOENF); 503 + 504 + if (mp->m_qflags & (XFS_PQUOTA_ACCT|XFS_OQUOTA_ENFD)) 505 + seq_puts(m, "," MNTOPT_PRJQUOTA); 506 + else if (mp->m_qflags & XFS_PQUOTA_ACCT) 507 + seq_puts(m, "," MNTOPT_PQUOTANOENF); 508 + 509 + if (mp->m_qflags & (XFS_GQUOTA_ACCT|XFS_OQUOTA_ENFD)) 510 + seq_puts(m, "," MNTOPT_GRPQUOTA); 511 + else if (mp->m_qflags & XFS_GQUOTA_ACCT) 512 + seq_puts(m, "," MNTOPT_GQUOTANOENF); 513 + 514 + if (!(mp->m_qflags & XFS_ALL_QUOTA_ACCT)) 515 + seq_puts(m, "," MNTOPT_NOQUOTA); 516 + 517 + return 0; 518 + } 93 519 __uint64_t 94 520 xfs_max_file_offset( 95 521 unsigned int blockshift) ··· 569 137 break; 570 138 case S_IFLNK: 571 139 inode->i_op = &xfs_symlink_inode_operations; 572 - if (inode->i_blocks) 140 + if (!(XFS_I(inode)->i_df.if_flags & XFS_IFINLINE)) 573 141 inode->i_mapping->a_ops = &xfs_address_space_operations; 574 142 break; 575 143 default: ··· 606 174 607 175 inode->i_generation = ip->i_d.di_gen; 608 176 i_size_write(inode, ip->i_d.di_size); 609 - inode->i_blocks = 610 - XFS_FSB_TO_BB(mp, ip->i_d.di_nblocks + ip->i_delayed_blks); 611 177 inode->i_atime.tv_sec = ip->i_d.di_atime.t_sec; 612 178 inode->i_atime.tv_nsec = ip->i_d.di_atime.t_nsec; 613 179 inode->i_mtime.tv_sec = ip->i_d.di_mtime.t_sec; ··· 764 334 blkdev_issue_flush(buftarg->bt_bdev, NULL); 765 335 } 766 336 337 + /* 338 + * XFS AIL push thread support 339 + */ 340 + void 341 + xfsaild_wakeup( 342 + xfs_mount_t *mp, 343 + xfs_lsn_t threshold_lsn) 344 + { 345 + mp->m_ail.xa_target = threshold_lsn; 346 + wake_up_process(mp->m_ail.xa_task); 347 + } 348 + 349 + int 350 + xfsaild( 351 + void *data) 352 + { 353 + xfs_mount_t *mp = (xfs_mount_t *)data; 354 + xfs_lsn_t last_pushed_lsn = 0; 355 + long tout = 0; 356 + 357 + while (!kthread_should_stop()) { 358 + if (tout) 359 + schedule_timeout_interruptible(msecs_to_jiffies(tout)); 360 + tout = 1000; 361 + 362 + /* swsusp */ 363 + try_to_freeze(); 364 + 365 + ASSERT(mp->m_log); 366 + if (XFS_FORCED_SHUTDOWN(mp)) 367 + continue; 368 + 369 + tout = xfsaild_push(mp, &last_pushed_lsn); 370 + } 371 + 372 + return 0; 373 + } /* xfsaild */ 374 + 375 + int 376 + xfsaild_start( 377 + xfs_mount_t *mp) 378 + { 379 + mp->m_ail.xa_target = 0; 380 + mp->m_ail.xa_task = kthread_run(xfsaild, mp, "xfsaild"); 381 + if (IS_ERR(mp->m_ail.xa_task)) 382 + return -PTR_ERR(mp->m_ail.xa_task); 383 + return 0; 384 + } 385 + 386 + void 387 + xfsaild_stop( 388 + xfs_mount_t *mp) 389 + { 390 + kthread_stop(mp->m_ail.xa_task); 391 + } 392 + 393 + 394 + 767 395 STATIC struct inode * 768 396 xfs_fs_alloc_inode( 769 397 struct super_block *sb) ··· 849 361 inode_init_once(vn_to_inode((bhv_vnode_t *)vnode)); 850 362 } 851 363 852 - STATIC int 364 + STATIC int __init 853 365 xfs_init_zones(void) 854 366 { 855 367 xfs_vnode_zone = kmem_zone_init_flags(sizeof(bhv_vnode_t), "xfs_vnode", ··· 898 410 { 899 411 int error = 0, flags = FLUSH_INODE; 900 412 901 - vn_trace_entry(XFS_I(inode), __FUNCTION__, 902 - (inst_t *)__return_address); 413 + xfs_itrace_entry(XFS_I(inode)); 903 414 if (sync) { 904 415 filemap_fdatawait(inode->i_mapping); 905 416 flags |= FLUSH_SYNC; ··· 925 438 * find an inode with di_mode == 0 but without IGET_CREATE set. 926 439 */ 927 440 if (ip) { 928 - vn_trace_entry(ip, __FUNCTION__, (inst_t *)__return_address); 929 - 441 + xfs_itrace_entry(ip); 930 442 XFS_STATS_INC(vn_rele); 931 443 XFS_STATS_INC(vn_remove); 932 444 XFS_STATS_INC(vn_reclaim); ··· 1169 683 struct dentry *dentry, 1170 684 struct kstatfs *statp) 1171 685 { 1172 - return -xfs_statvfs(XFS_M(dentry->d_sb), statp, 1173 - vn_from_inode(dentry->d_inode)); 686 + struct xfs_mount *mp = XFS_M(dentry->d_sb); 687 + xfs_sb_t *sbp = &mp->m_sb; 688 + __uint64_t fakeinos, id; 689 + xfs_extlen_t lsize; 690 + 691 + statp->f_type = XFS_SB_MAGIC; 692 + statp->f_namelen = MAXNAMELEN - 1; 693 + 694 + id = huge_encode_dev(mp->m_ddev_targp->bt_dev); 695 + statp->f_fsid.val[0] = (u32)id; 696 + statp->f_fsid.val[1] = (u32)(id >> 32); 697 + 698 + xfs_icsb_sync_counters_flags(mp, XFS_ICSB_LAZY_COUNT); 699 + 700 + spin_lock(&mp->m_sb_lock); 701 + statp->f_bsize = sbp->sb_blocksize; 702 + lsize = sbp->sb_logstart ? sbp->sb_logblocks : 0; 703 + statp->f_blocks = sbp->sb_dblocks - lsize; 704 + statp->f_bfree = statp->f_bavail = 705 + sbp->sb_fdblocks - XFS_ALLOC_SET_ASIDE(mp); 706 + fakeinos = statp->f_bfree << sbp->sb_inopblog; 707 + #if XFS_BIG_INUMS 708 + fakeinos += mp->m_inoadd; 709 + #endif 710 + statp->f_files = 711 + MIN(sbp->sb_icount + fakeinos, (__uint64_t)XFS_MAXINUMBER); 712 + if (mp->m_maxicount) 713 + #if XFS_BIG_INUMS 714 + if (!mp->m_inoadd) 715 + #endif 716 + statp->f_files = min_t(typeof(statp->f_files), 717 + statp->f_files, 718 + mp->m_maxicount); 719 + statp->f_ffree = statp->f_files - (sbp->sb_icount - sbp->sb_ifree); 720 + spin_unlock(&mp->m_sb_lock); 721 + 722 + XFS_QM_DQSTATVFS(XFS_I(dentry->d_inode), statp); 723 + return 0; 1174 724 } 1175 725 1176 726 STATIC int ··· 1226 704 return -error; 1227 705 } 1228 706 707 + /* 708 + * Second stage of a freeze. The data is already frozen so we only 709 + * need to take care of themetadata. Once that's done write a dummy 710 + * record to dirty the log in case of a crash while frozen. 711 + */ 1229 712 STATIC void 1230 713 xfs_fs_lockfs( 1231 714 struct super_block *sb) 1232 715 { 1233 - xfs_freeze(XFS_M(sb)); 716 + struct xfs_mount *mp = XFS_M(sb); 717 + 718 + xfs_attr_quiesce(mp); 719 + xfs_fs_log_dummy(mp); 1234 720 } 1235 721 1236 722 STATIC int ··· 1309 779 struct inode *rootvp; 1310 780 struct xfs_mount *mp = NULL; 1311 781 struct xfs_mount_args *args = xfs_args_allocate(sb, silent); 1312 - struct kstatfs statvfs; 1313 782 int error; 1314 783 1315 784 mp = xfs_mount_init(); ··· 1336 807 if (error) 1337 808 goto fail_vfsop; 1338 809 1339 - error = xfs_statvfs(mp, &statvfs, NULL); 1340 - if (error) 1341 - goto fail_unmount; 1342 - 1343 810 sb->s_dirt = 1; 1344 - sb->s_magic = statvfs.f_type; 1345 - sb->s_blocksize = statvfs.f_bsize; 1346 - sb->s_blocksize_bits = ffs(statvfs.f_bsize) - 1; 811 + sb->s_magic = XFS_SB_MAGIC; 812 + sb->s_blocksize = mp->m_sb.sb_blocksize; 813 + sb->s_blocksize_bits = ffs(sb->s_blocksize) - 1; 1347 814 sb->s_maxbytes = xfs_max_file_offset(sb->s_blocksize_bits); 1348 815 sb->s_time_gran = 1; 1349 816 set_posix_acl_flag(sb); 1350 817 1351 - error = xfs_root(mp, &rootvp); 1352 - if (error) 818 + rootvp = igrab(mp->m_rootip->i_vnode); 819 + if (!rootvp) { 820 + error = ENOENT; 1353 821 goto fail_unmount; 822 + } 1354 823 1355 824 sb->s_root = d_alloc_root(vn_to_inode(rootvp)); 1356 825 if (!sb->s_root) { ··· 1368 841 goto fail_vnrele; 1369 842 } 1370 843 1371 - vn_trace_exit(XFS_I(sb->s_root->d_inode), __FUNCTION__, 1372 - (inst_t *)__return_address); 844 + xfs_itrace_exit(XFS_I(sb->s_root->d_inode)); 1373 845 1374 846 kmem_free(args, sizeof(*args)); 1375 847 return 0;
+43 -72
fs/xfs/linux-2.6/xfs_vnode.c
··· 40 40 #define vptosync(v) (&vsync[((unsigned long)v) % NVSYNC]) 41 41 static wait_queue_head_t vsync[NVSYNC]; 42 42 43 - void 43 + void __init 44 44 vn_init(void) 45 45 { 46 46 int i; ··· 82 82 xfs_do_force_shutdown(ip->i_mount, SHUTDOWN_DEVICE_REQ, f, l); 83 83 } 84 84 85 - bhv_vnode_t * 86 - vn_initialize( 87 - struct inode *inode) 88 - { 89 - bhv_vnode_t *vp = vn_from_inode(inode); 90 - 91 - XFS_STATS_INC(vn_active); 92 - XFS_STATS_INC(vn_alloc); 93 - 94 - ASSERT(VN_CACHED(vp) == 0); 95 - 96 - return vp; 97 - } 98 - 99 85 /* 100 - * Revalidate the Linux inode from the vattr. 86 + * Revalidate the Linux inode from the XFS inode. 101 87 * Note: i_size _not_ updated; we must hold the inode 102 88 * semaphore when doing that - callers responsibility. 103 89 */ 104 - void 105 - vn_revalidate_core( 106 - bhv_vnode_t *vp, 107 - bhv_vattr_t *vap) 90 + int 91 + vn_revalidate( 92 + bhv_vnode_t *vp) 108 93 { 109 - struct inode *inode = vn_to_inode(vp); 94 + struct inode *inode = vn_to_inode(vp); 95 + struct xfs_inode *ip = XFS_I(inode); 96 + struct xfs_mount *mp = ip->i_mount; 97 + unsigned long xflags; 110 98 111 - inode->i_mode = vap->va_mode; 112 - inode->i_nlink = vap->va_nlink; 113 - inode->i_uid = vap->va_uid; 114 - inode->i_gid = vap->va_gid; 115 - inode->i_blocks = vap->va_nblocks; 116 - inode->i_mtime = vap->va_mtime; 117 - inode->i_ctime = vap->va_ctime; 118 - if (vap->va_xflags & XFS_XFLAG_IMMUTABLE) 99 + xfs_itrace_entry(ip); 100 + 101 + if (XFS_FORCED_SHUTDOWN(mp)) 102 + return -EIO; 103 + 104 + xfs_ilock(ip, XFS_ILOCK_SHARED); 105 + inode->i_mode = ip->i_d.di_mode; 106 + inode->i_uid = ip->i_d.di_uid; 107 + inode->i_gid = ip->i_d.di_gid; 108 + inode->i_mtime.tv_sec = ip->i_d.di_mtime.t_sec; 109 + inode->i_mtime.tv_nsec = ip->i_d.di_mtime.t_nsec; 110 + inode->i_ctime.tv_sec = ip->i_d.di_ctime.t_sec; 111 + inode->i_ctime.tv_nsec = ip->i_d.di_ctime.t_nsec; 112 + 113 + xflags = xfs_ip2xflags(ip); 114 + if (xflags & XFS_XFLAG_IMMUTABLE) 119 115 inode->i_flags |= S_IMMUTABLE; 120 116 else 121 117 inode->i_flags &= ~S_IMMUTABLE; 122 - if (vap->va_xflags & XFS_XFLAG_APPEND) 118 + if (xflags & XFS_XFLAG_APPEND) 123 119 inode->i_flags |= S_APPEND; 124 120 else 125 121 inode->i_flags &= ~S_APPEND; 126 - if (vap->va_xflags & XFS_XFLAG_SYNC) 122 + if (xflags & XFS_XFLAG_SYNC) 127 123 inode->i_flags |= S_SYNC; 128 124 else 129 125 inode->i_flags &= ~S_SYNC; 130 - if (vap->va_xflags & XFS_XFLAG_NOATIME) 126 + if (xflags & XFS_XFLAG_NOATIME) 131 127 inode->i_flags |= S_NOATIME; 132 128 else 133 129 inode->i_flags &= ~S_NOATIME; 134 - } 130 + xfs_iunlock(ip, XFS_ILOCK_SHARED); 135 131 136 - /* 137 - * Revalidate the Linux inode from the vnode. 138 - */ 139 - int 140 - __vn_revalidate( 141 - bhv_vnode_t *vp, 142 - bhv_vattr_t *vattr) 143 - { 144 - int error; 145 - 146 - vn_trace_entry(xfs_vtoi(vp), __FUNCTION__, (inst_t *)__return_address); 147 - vattr->va_mask = XFS_AT_STAT | XFS_AT_XFLAGS; 148 - error = xfs_getattr(xfs_vtoi(vp), vattr, 0); 149 - if (likely(!error)) { 150 - vn_revalidate_core(vp, vattr); 151 - xfs_iflags_clear(xfs_vtoi(vp), XFS_IMODIFIED); 152 - } 153 - return -error; 154 - } 155 - 156 - int 157 - vn_revalidate( 158 - bhv_vnode_t *vp) 159 - { 160 - bhv_vattr_t vattr; 161 - 162 - return __vn_revalidate(vp, &vattr); 132 + xfs_iflags_clear(ip, XFS_IMODIFIED); 133 + return 0; 163 134 } 164 135 165 136 /* ··· 150 179 return vp; 151 180 } 152 181 153 - #ifdef XFS_VNODE_TRACE 182 + #ifdef XFS_INODE_TRACE 154 183 155 184 /* 156 185 * Reference count of Linux inode if present, -1 if the xfs_inode ··· 182 211 * Vnode tracing code. 183 212 */ 184 213 void 185 - vn_trace_entry(xfs_inode_t *ip, const char *func, inst_t *ra) 214 + _xfs_itrace_entry(xfs_inode_t *ip, const char *func, inst_t *ra) 186 215 { 187 - KTRACE_ENTER(ip, VNODE_KTRACE_ENTRY, func, 0, ra); 216 + KTRACE_ENTER(ip, INODE_KTRACE_ENTRY, func, 0, ra); 188 217 } 189 218 190 219 void 191 - vn_trace_exit(xfs_inode_t *ip, const char *func, inst_t *ra) 220 + _xfs_itrace_exit(xfs_inode_t *ip, const char *func, inst_t *ra) 192 221 { 193 - KTRACE_ENTER(ip, VNODE_KTRACE_EXIT, func, 0, ra); 222 + KTRACE_ENTER(ip, INODE_KTRACE_EXIT, func, 0, ra); 194 223 } 195 224 196 225 void 197 - vn_trace_hold(xfs_inode_t *ip, char *file, int line, inst_t *ra) 226 + xfs_itrace_hold(xfs_inode_t *ip, char *file, int line, inst_t *ra) 198 227 { 199 - KTRACE_ENTER(ip, VNODE_KTRACE_HOLD, file, line, ra); 228 + KTRACE_ENTER(ip, INODE_KTRACE_HOLD, file, line, ra); 200 229 } 201 230 202 231 void 203 - vn_trace_ref(xfs_inode_t *ip, char *file, int line, inst_t *ra) 232 + _xfs_itrace_ref(xfs_inode_t *ip, char *file, int line, inst_t *ra) 204 233 { 205 - KTRACE_ENTER(ip, VNODE_KTRACE_REF, file, line, ra); 234 + KTRACE_ENTER(ip, INODE_KTRACE_REF, file, line, ra); 206 235 } 207 236 208 237 void 209 - vn_trace_rele(xfs_inode_t *ip, char *file, int line, inst_t *ra) 238 + xfs_itrace_rele(xfs_inode_t *ip, char *file, int line, inst_t *ra) 210 239 { 211 - KTRACE_ENTER(ip, VNODE_KTRACE_RELE, file, line, ra); 240 + KTRACE_ENTER(ip, INODE_KTRACE_RELE, file, line, ra); 212 241 } 213 - #endif /* XFS_VNODE_TRACE */ 242 + #endif /* XFS_INODE_TRACE */
+30 -28
fs/xfs/linux-2.6/xfs_vnode.h
··· 187 187 (VN_ISREG(vp) && ((mode) & (VSGID|(VEXEC>>3))) == VSGID) 188 188 189 189 extern void vn_init(void); 190 - extern bhv_vnode_t *vn_initialize(struct inode *); 191 190 extern int vn_revalidate(bhv_vnode_t *); 192 - extern int __vn_revalidate(bhv_vnode_t *, bhv_vattr_t *); 193 - extern void vn_revalidate_core(bhv_vnode_t *, bhv_vattr_t *); 194 191 195 192 /* 196 193 * Yeah, these don't take vnode anymore at all, all this should be ··· 207 210 */ 208 211 extern bhv_vnode_t *vn_hold(bhv_vnode_t *); 209 212 210 - #if defined(XFS_VNODE_TRACE) 213 + #if defined(XFS_INODE_TRACE) 211 214 #define VN_HOLD(vp) \ 212 215 ((void)vn_hold(vp), \ 213 - vn_trace_hold(xfs_vtoi(vp), __FILE__, __LINE__, (inst_t *)__return_address)) 216 + xfs_itrace_hold(xfs_vtoi(vp), __FILE__, __LINE__, (inst_t *)__return_address)) 214 217 #define VN_RELE(vp) \ 215 - (vn_trace_rele(xfs_vtoi(vp), __FILE__, __LINE__, (inst_t *)__return_address), \ 218 + (xfs_itrace_rele(xfs_vtoi(vp), __FILE__, __LINE__, (inst_t *)__return_address), \ 216 219 iput(vn_to_inode(vp))) 217 220 #else 218 221 #define VN_HOLD(vp) ((void)vn_hold(vp)) ··· 235 238 /* 236 239 * Dealing with bad inodes 237 240 */ 238 - static inline void vn_mark_bad(bhv_vnode_t *vp) 239 - { 240 - make_bad_inode(vn_to_inode(vp)); 241 - } 242 - 243 241 static inline int VN_BAD(bhv_vnode_t *vp) 244 242 { 245 243 return is_bad_inode(vn_to_inode(vp)); ··· 288 296 /* 289 297 * Tracking vnode activity. 290 298 */ 291 - #if defined(XFS_VNODE_TRACE) 299 + #if defined(XFS_INODE_TRACE) 292 300 293 - #define VNODE_TRACE_SIZE 16 /* number of trace entries */ 294 - #define VNODE_KTRACE_ENTRY 1 295 - #define VNODE_KTRACE_EXIT 2 296 - #define VNODE_KTRACE_HOLD 3 297 - #define VNODE_KTRACE_REF 4 298 - #define VNODE_KTRACE_RELE 5 301 + #define INODE_TRACE_SIZE 16 /* number of trace entries */ 302 + #define INODE_KTRACE_ENTRY 1 303 + #define INODE_KTRACE_EXIT 2 304 + #define INODE_KTRACE_HOLD 3 305 + #define INODE_KTRACE_REF 4 306 + #define INODE_KTRACE_RELE 5 299 307 300 - extern void vn_trace_entry(struct xfs_inode *, const char *, inst_t *); 301 - extern void vn_trace_exit(struct xfs_inode *, const char *, inst_t *); 302 - extern void vn_trace_hold(struct xfs_inode *, char *, int, inst_t *); 303 - extern void vn_trace_ref(struct xfs_inode *, char *, int, inst_t *); 304 - extern void vn_trace_rele(struct xfs_inode *, char *, int, inst_t *); 308 + extern void _xfs_itrace_entry(struct xfs_inode *, const char *, inst_t *); 309 + extern void _xfs_itrace_exit(struct xfs_inode *, const char *, inst_t *); 310 + extern void xfs_itrace_hold(struct xfs_inode *, char *, int, inst_t *); 311 + extern void _xfs_itrace_ref(struct xfs_inode *, char *, int, inst_t *); 312 + extern void xfs_itrace_rele(struct xfs_inode *, char *, int, inst_t *); 313 + #define xfs_itrace_entry(ip) \ 314 + _xfs_itrace_entry(ip, __FUNCTION__, (inst_t *)__return_address) 315 + #define xfs_itrace_exit(ip) \ 316 + _xfs_itrace_exit(ip, __FUNCTION__, (inst_t *)__return_address) 317 + #define xfs_itrace_exit_tag(ip, tag) \ 318 + _xfs_itrace_exit(ip, tag, (inst_t *)__return_address) 319 + #define xfs_itrace_ref(ip) \ 320 + _xfs_itrace_ref(ip, __FILE__, __LINE__, (inst_t *)__return_address) 321 + 305 322 #else 306 - #define vn_trace_entry(a,b,c) 307 - #define vn_trace_exit(a,b,c) 308 - #define vn_trace_hold(a,b,c,d) 309 - #define vn_trace_ref(a,b,c,d) 310 - #define vn_trace_rele(a,b,c,d) 323 + #define xfs_itrace_entry(a) 324 + #define xfs_itrace_exit(a) 325 + #define xfs_itrace_exit_tag(a, b) 326 + #define xfs_itrace_hold(a, b, c, d) 327 + #define xfs_itrace_ref(a) 328 + #define xfs_itrace_rele(a, b, c, d) 311 329 #endif 312 330 313 331 #endif /* __XFS_VNODE_H__ */
+5 -7
fs/xfs/quota/xfs_dquot.c
··· 1209 1209 xfs_buf_t *bp; 1210 1210 xfs_disk_dquot_t *ddqp; 1211 1211 int error; 1212 - SPLDECL(s); 1213 1212 1214 1213 ASSERT(XFS_DQ_IS_LOCKED(dqp)); 1215 1214 ASSERT(XFS_DQ_IS_FLUSH_LOCKED(dqp)); ··· 1269 1270 mp = dqp->q_mount; 1270 1271 1271 1272 /* lsn is 64 bits */ 1272 - AIL_LOCK(mp, s); 1273 + spin_lock(&mp->m_ail_lock); 1273 1274 dqp->q_logitem.qli_flush_lsn = dqp->q_logitem.qli_item.li_lsn; 1274 - AIL_UNLOCK(mp, s); 1275 + spin_unlock(&mp->m_ail_lock); 1275 1276 1276 1277 /* 1277 1278 * Attach an iodone routine so that we can remove this dquot from the ··· 1317 1318 xfs_dq_logitem_t *qip) 1318 1319 { 1319 1320 xfs_dquot_t *dqp; 1320 - SPLDECL(s); 1321 1321 1322 1322 dqp = qip->qli_dquot; 1323 1323 ··· 1331 1333 if ((qip->qli_item.li_flags & XFS_LI_IN_AIL) && 1332 1334 qip->qli_item.li_lsn == qip->qli_flush_lsn) { 1333 1335 1334 - AIL_LOCK(dqp->q_mount, s); 1336 + spin_lock(&dqp->q_mount->m_ail_lock); 1335 1337 /* 1336 1338 * xfs_trans_delete_ail() drops the AIL lock. 1337 1339 */ 1338 1340 if (qip->qli_item.li_lsn == qip->qli_flush_lsn) 1339 1341 xfs_trans_delete_ail(dqp->q_mount, 1340 - (xfs_log_item_t*)qip, s); 1342 + (xfs_log_item_t*)qip); 1341 1343 else 1342 - AIL_UNLOCK(dqp->q_mount, s); 1344 + spin_unlock(&dqp->q_mount->m_ail_lock); 1343 1345 } 1344 1346 1345 1347 /*
-5
fs/xfs/quota/xfs_dquot.h
··· 123 123 vsema(&((dqp)->q_flock)); \ 124 124 (dqp)->dq_flags &= ~(XFS_DQ_FLOCKED); } 125 125 126 - #define XFS_DQ_PINLOCK(dqp) mutex_spinlock( \ 127 - &(XFS_DQ_TO_QINF(dqp)->qi_pinlock)) 128 - #define XFS_DQ_PINUNLOCK(dqp, s) mutex_spinunlock( \ 129 - &(XFS_DQ_TO_QINF(dqp)->qi_pinlock), s) 130 - 131 126 #define XFS_DQ_IS_FLUSH_LOCKED(dqp) (issemalocked(&((dqp)->q_flock))) 132 127 #define XFS_DQ_IS_ON_FREELIST(dqp) ((dqp)->dq_flnext != (dqp)) 133 128 #define XFS_DQ_IS_DIRTY(dqp) ((dqp)->dq_flags & XFS_DQ_DIRTY)
+11 -16
fs/xfs/quota/xfs_dquot_item.c
··· 94 94 xfs_qm_dquot_logitem_pin( 95 95 xfs_dq_logitem_t *logitem) 96 96 { 97 - unsigned long s; 98 97 xfs_dquot_t *dqp; 99 98 100 99 dqp = logitem->qli_dquot; 101 100 ASSERT(XFS_DQ_IS_LOCKED(dqp)); 102 - s = XFS_DQ_PINLOCK(dqp); 101 + spin_lock(&(XFS_DQ_TO_QINF(dqp)->qi_pinlock)); 103 102 dqp->q_pincount++; 104 - XFS_DQ_PINUNLOCK(dqp, s); 103 + spin_unlock(&(XFS_DQ_TO_QINF(dqp)->qi_pinlock)); 105 104 } 106 105 107 106 /* ··· 114 115 xfs_dq_logitem_t *logitem, 115 116 int stale) 116 117 { 117 - unsigned long s; 118 118 xfs_dquot_t *dqp; 119 119 120 120 dqp = logitem->qli_dquot; 121 121 ASSERT(dqp->q_pincount > 0); 122 - s = XFS_DQ_PINLOCK(dqp); 122 + spin_lock(&(XFS_DQ_TO_QINF(dqp)->qi_pinlock)); 123 123 dqp->q_pincount--; 124 124 if (dqp->q_pincount == 0) { 125 125 sv_broadcast(&dqp->q_pinwait); 126 126 } 127 - XFS_DQ_PINUNLOCK(dqp, s); 127 + spin_unlock(&(XFS_DQ_TO_QINF(dqp)->qi_pinlock)); 128 128 } 129 129 130 130 /* ARGSUSED */ ··· 187 189 xfs_qm_dqunpin_wait( 188 190 xfs_dquot_t *dqp) 189 191 { 190 - SPLDECL(s); 191 - 192 192 ASSERT(XFS_DQ_IS_LOCKED(dqp)); 193 193 if (dqp->q_pincount == 0) { 194 194 return; ··· 196 200 * Give the log a push so we don't wait here too long. 197 201 */ 198 202 xfs_log_force(dqp->q_mount, (xfs_lsn_t)0, XFS_LOG_FORCE); 199 - s = XFS_DQ_PINLOCK(dqp); 203 + spin_lock(&(XFS_DQ_TO_QINF(dqp)->qi_pinlock)); 200 204 if (dqp->q_pincount == 0) { 201 - XFS_DQ_PINUNLOCK(dqp, s); 205 + spin_unlock(&(XFS_DQ_TO_QINF(dqp)->qi_pinlock)); 202 206 return; 203 207 } 204 208 sv_wait(&(dqp->q_pinwait), PINOD, ··· 212 216 * If so, we want to push it out to help us take this item off the AIL as soon 213 217 * as possible. 214 218 * 215 - * We must not be holding the AIL_LOCK at this point. Calling incore() to 216 - * search the buffer cache can be a time consuming thing, and AIL_LOCK is a 219 + * We must not be holding the AIL lock at this point. Calling incore() to 220 + * search the buffer cache can be a time consuming thing, and AIL lock is a 217 221 * spinlock. 218 222 */ 219 223 STATIC void ··· 318 322 * want to do that now since we might sleep in the device 319 323 * strategy routine. We also don't want to grab the buffer lock 320 324 * here because we'd like not to call into the buffer cache 321 - * while holding the AIL_LOCK. 325 + * while holding the AIL lock. 322 326 * Make sure to only return PUSHBUF if we set pushbuf_flag 323 327 * ourselves. If someone else is doing it then we don't 324 328 * want to go to the push routine and duplicate their efforts. ··· 558 562 xfs_lsn_t lsn) 559 563 { 560 564 xfs_qoff_logitem_t *qfs; 561 - SPLDECL(s); 562 565 563 566 qfs = qfe->qql_start_lip; 564 - AIL_LOCK(qfs->qql_item.li_mountp,s); 567 + spin_lock(&qfs->qql_item.li_mountp->m_ail_lock); 565 568 /* 566 569 * Delete the qoff-start logitem from the AIL. 567 570 * xfs_trans_delete_ail() drops the AIL lock. 568 571 */ 569 - xfs_trans_delete_ail(qfs->qql_item.li_mountp, (xfs_log_item_t *)qfs, s); 572 + xfs_trans_delete_ail(qfs->qql_item.li_mountp, (xfs_log_item_t *)qfs); 570 573 kmem_free(qfs, sizeof(xfs_qoff_logitem_t)); 571 574 kmem_free(qfe, sizeof(xfs_qoff_logitem_t)); 572 575 return (xfs_lsn_t)-1;
+6 -8
fs/xfs/quota/xfs_qm.c
··· 310 310 xfs_mount_t *mp, 311 311 int mfsi_flags) 312 312 { 313 - unsigned long s; 314 313 int error = 0; 315 314 uint sbf; 316 315 ··· 366 367 367 368 write_changes: 368 369 /* 369 - * We actually don't have to acquire the SB_LOCK at all. 370 + * We actually don't have to acquire the m_sb_lock at all. 370 371 * This can only be called from mount, and that's single threaded. XXX 371 372 */ 372 - s = XFS_SB_LOCK(mp); 373 + spin_lock(&mp->m_sb_lock); 373 374 sbf = mp->m_sb.sb_qflags; 374 375 mp->m_sb.sb_qflags = mp->m_qflags & XFS_MOUNT_QUOTA_ALL; 375 - XFS_SB_UNLOCK(mp, s); 376 + spin_unlock(&mp->m_sb_lock); 376 377 377 378 if (sbf != (mp->m_qflags & XFS_MOUNT_QUOTA_ALL)) { 378 379 if (xfs_qm_write_sb_changes(mp, XFS_SB_QFLAGS)) { ··· 1138 1139 return error; 1139 1140 } 1140 1141 1141 - spinlock_init(&qinf->qi_pinlock, "xfs_qinf_pin"); 1142 + spin_lock_init(&qinf->qi_pinlock); 1142 1143 xfs_qm_list_init(&qinf->qi_dqlist, "mpdqlist", 0); 1143 1144 qinf->qi_dqreclaims = 0; 1144 1145 ··· 1369 1370 { 1370 1371 xfs_trans_t *tp; 1371 1372 int error; 1372 - unsigned long s; 1373 1373 int committed; 1374 1374 1375 1375 tp = xfs_trans_alloc(mp, XFS_TRANS_QM_QINOCREATE); ··· 1400 1402 * sbfields arg may contain fields other than *QUOTINO; 1401 1403 * VERSIONNUM for example. 1402 1404 */ 1403 - s = XFS_SB_LOCK(mp); 1405 + spin_lock(&mp->m_sb_lock); 1404 1406 if (flags & XFS_QMOPT_SBVERSION) { 1405 1407 #if defined(DEBUG) && defined(XFS_LOUD_RECOVERY) 1406 1408 unsigned oldv = mp->m_sb.sb_versionnum; ··· 1427 1429 mp->m_sb.sb_uquotino = (*ip)->i_ino; 1428 1430 else 1429 1431 mp->m_sb.sb_gquotino = (*ip)->i_ino; 1430 - XFS_SB_UNLOCK(mp, s); 1432 + spin_unlock(&mp->m_sb_lock); 1431 1433 xfs_mod_sb(tp, sbfields); 1432 1434 1433 1435 if ((error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES))) {
+3 -3
fs/xfs/quota/xfs_qm.h
··· 52 52 /* 53 53 * Dquot hashtable constants/threshold values. 54 54 */ 55 - #define XFS_QM_HASHSIZE_LOW (NBPP / sizeof(xfs_dqhash_t)) 56 - #define XFS_QM_HASHSIZE_HIGH ((NBPP * 4) / sizeof(xfs_dqhash_t)) 55 + #define XFS_QM_HASHSIZE_LOW (PAGE_SIZE / sizeof(xfs_dqhash_t)) 56 + #define XFS_QM_HASHSIZE_HIGH ((PAGE_SIZE * 4) / sizeof(xfs_dqhash_t)) 57 57 58 58 /* 59 59 * This defines the unit of allocation of dquots. ··· 106 106 typedef struct xfs_quotainfo { 107 107 xfs_inode_t *qi_uquotaip; /* user quota inode */ 108 108 xfs_inode_t *qi_gquotaip; /* group quota inode */ 109 - lock_t qi_pinlock; /* dquot pinning mutex */ 109 + spinlock_t qi_pinlock; /* dquot pinning lock */ 110 110 xfs_dqlist_t qi_dqlist; /* all dquots in filesys */ 111 111 int qi_dqreclaims; /* a change here indicates 112 112 a removal in the dqlist */
+8 -11
fs/xfs/quota/xfs_qm_syscalls.c
··· 200 200 boolean_t force) 201 201 { 202 202 uint dqtype; 203 - unsigned long s; 204 203 int error; 205 204 uint inactivate_flags; 206 205 xfs_qoff_logitem_t *qoffstart; ··· 236 237 if ((flags & XFS_ALL_QUOTA_ACCT) == 0) { 237 238 mp->m_qflags &= ~(flags); 238 239 239 - s = XFS_SB_LOCK(mp); 240 + spin_lock(&mp->m_sb_lock); 240 241 mp->m_sb.sb_qflags = mp->m_qflags; 241 - XFS_SB_UNLOCK(mp, s); 242 + spin_unlock(&mp->m_sb_lock); 242 243 mutex_unlock(&(XFS_QI_QOFFLOCK(mp))); 243 244 244 245 /* XXX what to do if error ? Revert back to old vals incore ? */ ··· 414 415 uint flags) 415 416 { 416 417 int error; 417 - unsigned long s; 418 418 uint qf; 419 419 uint accflags; 420 420 __int64_t sbflags; ··· 466 468 * Change sb_qflags on disk but not incore mp->qflags 467 469 * if this is the root filesystem. 468 470 */ 469 - s = XFS_SB_LOCK(mp); 471 + spin_lock(&mp->m_sb_lock); 470 472 qf = mp->m_sb.sb_qflags; 471 473 mp->m_sb.sb_qflags = qf | flags; 472 - XFS_SB_UNLOCK(mp, s); 474 + spin_unlock(&mp->m_sb_lock); 473 475 474 476 /* 475 477 * There's nothing to change if it's the same. ··· 813 815 { 814 816 xfs_trans_t *tp; 815 817 int error; 816 - unsigned long s; 817 818 xfs_qoff_logitem_t *qoffi=NULL; 818 819 uint oldsbqflag=0; 819 820 ··· 829 832 qoffi = xfs_trans_get_qoff_item(tp, NULL, flags & XFS_ALL_QUOTA_ACCT); 830 833 xfs_trans_log_quotaoff_item(tp, qoffi); 831 834 832 - s = XFS_SB_LOCK(mp); 835 + spin_lock(&mp->m_sb_lock); 833 836 oldsbqflag = mp->m_sb.sb_qflags; 834 837 mp->m_sb.sb_qflags = (mp->m_qflags & ~(flags)) & XFS_MOUNT_QUOTA_ALL; 835 - XFS_SB_UNLOCK(mp, s); 838 + spin_unlock(&mp->m_sb_lock); 836 839 837 840 xfs_mod_sb(tp, XFS_SB_QFLAGS); 838 841 ··· 851 854 * No one else is modifying sb_qflags, so this is OK. 852 855 * We still hold the quotaofflock. 853 856 */ 854 - s = XFS_SB_LOCK(mp); 857 + spin_lock(&mp->m_sb_lock); 855 858 mp->m_sb.sb_qflags = oldsbqflag; 856 - XFS_SB_UNLOCK(mp, s); 859 + spin_unlock(&mp->m_sb_lock); 857 860 } 858 861 *qoffstartp = qoffi; 859 862 return (error);
+6 -1
fs/xfs/support/debug.c
··· 17 17 */ 18 18 #include <xfs.h> 19 19 #include "debug.h" 20 - #include "spin.h" 21 20 22 21 static char message[1024]; /* keep it off the stack */ 23 22 static DEFINE_SPINLOCK(xfs_err_lock); ··· 79 80 { 80 81 printk("Assertion failed: %s, file: %s, line: %d\n", expr, file, line); 81 82 BUG(); 83 + } 84 + 85 + void 86 + xfs_hex_dump(void *p, int length) 87 + { 88 + print_hex_dump(KERN_ALERT, "", DUMP_PREFIX_OFFSET, 16, 1, p, length, 1); 82 89 }
+2 -6
fs/xfs/support/ktrace.c
··· 21 21 static kmem_zone_t *ktrace_ent_zone; 22 22 static int ktrace_zentries; 23 23 24 - void 24 + void __init 25 25 ktrace_init(int zentries) 26 26 { 27 27 ktrace_zentries = zentries; ··· 36 36 ASSERT(ktrace_ent_zone); 37 37 } 38 38 39 - void 39 + void __exit 40 40 ktrace_uninit(void) 41 41 { 42 42 kmem_zone_destroy(ktrace_hdr_zone); ··· 90 90 return NULL; 91 91 } 92 92 93 - spinlock_init(&(ktp->kt_lock), "kt_lock"); 94 - 95 93 ktp->kt_entries = ktep; 96 94 ktp->kt_nentries = nentries; 97 95 ktp->kt_index = 0; ··· 111 113 112 114 if (ktp == (ktrace_t *)NULL) 113 115 return; 114 - 115 - spinlock_destroy(&ktp->kt_lock); 116 116 117 117 /* 118 118 * Special treatment for the Vnode trace buffer.
-3
fs/xfs/support/ktrace.h
··· 18 18 #ifndef __XFS_SUPPORT_KTRACE_H__ 19 19 #define __XFS_SUPPORT_KTRACE_H__ 20 20 21 - #include <spin.h> 22 - 23 21 /* 24 22 * Trace buffer entry structure. 25 23 */ ··· 29 31 * Trace buffer header structure. 30 32 */ 31 33 typedef struct ktrace { 32 - lock_t kt_lock; /* mutex to guard counters */ 33 34 int kt_nentries; /* number of entries in trace buf */ 34 35 int kt_index; /* current index in entries */ 35 36 int kt_rollover;
+1 -1
fs/xfs/support/uuid.c
··· 133 133 mutex_unlock(&uuid_monitor); 134 134 } 135 135 136 - void 136 + void __init 137 137 uuid_init(void) 138 138 { 139 139 mutex_init(&uuid_monitor);
+1 -1
fs/xfs/xfs.h
··· 37 37 #define XFS_LOG_TRACE 1 38 38 #define XFS_RW_TRACE 1 39 39 #define XFS_BUF_TRACE 1 40 - #define XFS_VNODE_TRACE 1 40 + #define XFS_INODE_TRACE 1 41 41 #define XFS_FILESTREAMS_TRACE 1 42 42 #endif 43 43
+2 -28
fs/xfs/xfs_acl.c
··· 392 392 } 393 393 394 394 /* 395 - * The access control process to determine the access permission: 396 - * if uid == file owner id, use the file owner bits. 397 - * if gid == file owner group id, use the file group bits. 398 - * scan ACL for a matching user or group, and use matched entry 399 - * permission. Use total permissions of all matching group entries, 400 - * until all acl entries are exhausted. The final permission produced 401 - * by matching acl entry or entries needs to be & with group permission. 402 - * if not owner, owning group, or matching entry in ACL, use file 403 - * other bits. 404 - */ 405 - STATIC int 406 - xfs_acl_capability_check( 407 - mode_t mode, 408 - cred_t *cr) 409 - { 410 - if ((mode & ACL_READ) && !capable_cred(cr, CAP_DAC_READ_SEARCH)) 411 - return EACCES; 412 - if ((mode & ACL_WRITE) && !capable_cred(cr, CAP_DAC_OVERRIDE)) 413 - return EACCES; 414 - if ((mode & ACL_EXECUTE) && !capable_cred(cr, CAP_DAC_OVERRIDE)) 415 - return EACCES; 416 - 417 - return 0; 418 - } 419 - 420 - /* 421 395 * Note: cr is only used here for the capability check if the ACL test fails. 422 396 * It is not used to find out the credentials uid or groups etc, as was 423 397 * done in IRIX. It is assumed that the uid and groups for the current ··· 412 438 413 439 matched.ae_tag = 0; /* Invalid type */ 414 440 matched.ae_perm = 0; 415 - md >>= 6; /* Normalize the bits for comparison */ 416 441 417 442 for (i = 0; i < fap->acl_cnt; i++) { 418 443 /* ··· 493 520 break; 494 521 } 495 522 496 - return xfs_acl_capability_check(md, cr); 523 + /* EACCES tells generic_permission to check for capability overrides */ 524 + return EACCES; 497 525 } 498 526 499 527 /*
-2
fs/xfs/xfs_acl.h
··· 75 75 #define _ACL_GET_DEFAULT(pv,pd) (xfs_acl_vtoacl(pv,NULL,pd) == 0) 76 76 #define _ACL_ACCESS_EXISTS xfs_acl_vhasacl_access 77 77 #define _ACL_DEFAULT_EXISTS xfs_acl_vhasacl_default 78 - #define _ACL_XFS_IACCESS(i,m,c) (XFS_IFORK_Q(i) ? xfs_acl_iaccess(i,m,c) : -1) 79 78 80 79 #define _ACL_ALLOC(a) ((a) = kmem_zone_alloc(xfs_acl_zone, KM_SLEEP)) 81 80 #define _ACL_FREE(a) ((a)? kmem_zone_free(xfs_acl_zone, (a)):(void)0) ··· 94 95 #define _ACL_GET_DEFAULT(pv,pd) (0) 95 96 #define _ACL_ACCESS_EXISTS (NULL) 96 97 #define _ACL_DEFAULT_EXISTS (NULL) 97 - #define _ACL_XFS_IACCESS(i,m,c) (-1) 98 98 #endif 99 99 100 100 #endif /* __XFS_ACL_H__ */
+1 -1
fs/xfs/xfs_ag.h
··· 193 193 xfs_agino_t pagi_count; /* number of allocated inodes */ 194 194 int pagb_count; /* pagb slots in use */ 195 195 #ifdef __KERNEL__ 196 - lock_t pagb_lock; /* lock for pagb_list */ 196 + spinlock_t pagb_lock; /* lock for pagb_list */ 197 197 #endif 198 198 xfs_perag_busy_t *pagb_list; /* unstable blocks */ 199 199 atomic_t pagf_fstrms; /* # of filestreams active in this AG */
+8 -11
fs/xfs/xfs_alloc.c
··· 2206 2206 be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNOi]); 2207 2207 pag->pagf_levels[XFS_BTNUM_CNTi] = 2208 2208 be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNTi]); 2209 - spinlock_init(&pag->pagb_lock, "xfspagb"); 2209 + spin_lock_init(&pag->pagb_lock); 2210 2210 pag->pagb_list = kmem_zalloc(XFS_PAGB_NUM_SLOTS * 2211 2211 sizeof(xfs_perag_busy_t), KM_SLEEP); 2212 2212 pag->pagf_init = 1; ··· 2500 2500 xfs_mount_t *mp; 2501 2501 xfs_perag_busy_t *bsy; 2502 2502 int n; 2503 - SPLDECL(s); 2504 2503 2505 2504 mp = tp->t_mountp; 2506 - s = mutex_spinlock(&mp->m_perag[agno].pagb_lock); 2505 + spin_lock(&mp->m_perag[agno].pagb_lock); 2507 2506 2508 2507 /* search pagb_list for an open slot */ 2509 2508 for (bsy = mp->m_perag[agno].pagb_list, n = 0; ··· 2532 2533 xfs_trans_set_sync(tp); 2533 2534 } 2534 2535 2535 - mutex_spinunlock(&mp->m_perag[agno].pagb_lock, s); 2536 + spin_unlock(&mp->m_perag[agno].pagb_lock); 2536 2537 } 2537 2538 2538 2539 void ··· 2542 2543 { 2543 2544 xfs_mount_t *mp; 2544 2545 xfs_perag_busy_t *list; 2545 - SPLDECL(s); 2546 2546 2547 2547 mp = tp->t_mountp; 2548 2548 2549 - s = mutex_spinlock(&mp->m_perag[agno].pagb_lock); 2549 + spin_lock(&mp->m_perag[agno].pagb_lock); 2550 2550 list = mp->m_perag[agno].pagb_list; 2551 2551 2552 2552 ASSERT(idx < XFS_PAGB_NUM_SLOTS); ··· 2557 2559 TRACE_UNBUSY("xfs_alloc_clear_busy", "missing", agno, idx, tp); 2558 2560 } 2559 2561 2560 - mutex_spinunlock(&mp->m_perag[agno].pagb_lock, s); 2562 + spin_unlock(&mp->m_perag[agno].pagb_lock); 2561 2563 } 2562 2564 2563 2565 ··· 2576 2578 xfs_agblock_t uend, bend; 2577 2579 xfs_lsn_t lsn; 2578 2580 int cnt; 2579 - SPLDECL(s); 2580 2581 2581 2582 mp = tp->t_mountp; 2582 2583 2583 - s = mutex_spinlock(&mp->m_perag[agno].pagb_lock); 2584 + spin_lock(&mp->m_perag[agno].pagb_lock); 2584 2585 cnt = mp->m_perag[agno].pagb_count; 2585 2586 2586 2587 uend = bno + len - 1; ··· 2612 2615 if (cnt) { 2613 2616 TRACE_BUSYSEARCH("xfs_alloc_search_busy", "found", agno, bno, len, n, tp); 2614 2617 lsn = bsy->busy_tp->t_commit_lsn; 2615 - mutex_spinunlock(&mp->m_perag[agno].pagb_lock, s); 2618 + spin_unlock(&mp->m_perag[agno].pagb_lock); 2616 2619 xfs_log_force(mp, lsn, XFS_LOG_FORCE|XFS_LOG_SYNC); 2617 2620 } else { 2618 2621 TRACE_BUSYSEARCH("xfs_alloc_search_busy", "not-found", agno, bno, len, n, tp); 2619 2622 n = -1; 2620 - mutex_spinunlock(&mp->m_perag[agno].pagb_lock, s); 2623 + spin_unlock(&mp->m_perag[agno].pagb_lock); 2621 2624 } 2622 2625 2623 2626 return n;
+1 -1
fs/xfs/xfs_attr.c
··· 929 929 * This leaf block cannot have a "remote" value, we only call this routine 930 930 * if bmap_one_block() says there is only one block (ie: no remote blks). 931 931 */ 932 - int 932 + STATIC int 933 933 xfs_attr_leaf_addname(xfs_da_args_t *args) 934 934 { 935 935 xfs_inode_t *dp;
+3 -5
fs/xfs/xfs_attr_leaf.c
··· 226 226 STATIC void 227 227 xfs_sbversion_add_attr2(xfs_mount_t *mp, xfs_trans_t *tp) 228 228 { 229 - unsigned long s; 230 - 231 229 if ((mp->m_flags & XFS_MOUNT_ATTR2) && 232 230 !(XFS_SB_VERSION_HASATTR2(&mp->m_sb))) { 233 - s = XFS_SB_LOCK(mp); 231 + spin_lock(&mp->m_sb_lock); 234 232 if (!XFS_SB_VERSION_HASATTR2(&mp->m_sb)) { 235 233 XFS_SB_VERSION_ADDATTR2(&mp->m_sb); 236 - XFS_SB_UNLOCK(mp, s); 234 + spin_unlock(&mp->m_sb_lock); 237 235 xfs_mod_sb(tp, XFS_SB_VERSIONNUM | XFS_SB_FEATURES2); 238 236 } else 239 - XFS_SB_UNLOCK(mp, s); 237 + spin_unlock(&mp->m_sb_lock); 240 238 } 241 239 } 242 240
-103
fs/xfs/xfs_bit.c
··· 25 25 * XFS bit manipulation routines, used in non-realtime code. 26 26 */ 27 27 28 - #ifndef HAVE_ARCH_HIGHBIT 29 - /* 30 - * Index of high bit number in byte, -1 for none set, 0..7 otherwise. 31 - */ 32 - static const char xfs_highbit[256] = { 33 - -1, 0, 1, 1, 2, 2, 2, 2, /* 00 .. 07 */ 34 - 3, 3, 3, 3, 3, 3, 3, 3, /* 08 .. 0f */ 35 - 4, 4, 4, 4, 4, 4, 4, 4, /* 10 .. 17 */ 36 - 4, 4, 4, 4, 4, 4, 4, 4, /* 18 .. 1f */ 37 - 5, 5, 5, 5, 5, 5, 5, 5, /* 20 .. 27 */ 38 - 5, 5, 5, 5, 5, 5, 5, 5, /* 28 .. 2f */ 39 - 5, 5, 5, 5, 5, 5, 5, 5, /* 30 .. 37 */ 40 - 5, 5, 5, 5, 5, 5, 5, 5, /* 38 .. 3f */ 41 - 6, 6, 6, 6, 6, 6, 6, 6, /* 40 .. 47 */ 42 - 6, 6, 6, 6, 6, 6, 6, 6, /* 48 .. 4f */ 43 - 6, 6, 6, 6, 6, 6, 6, 6, /* 50 .. 57 */ 44 - 6, 6, 6, 6, 6, 6, 6, 6, /* 58 .. 5f */ 45 - 6, 6, 6, 6, 6, 6, 6, 6, /* 60 .. 67 */ 46 - 6, 6, 6, 6, 6, 6, 6, 6, /* 68 .. 6f */ 47 - 6, 6, 6, 6, 6, 6, 6, 6, /* 70 .. 77 */ 48 - 6, 6, 6, 6, 6, 6, 6, 6, /* 78 .. 7f */ 49 - 7, 7, 7, 7, 7, 7, 7, 7, /* 80 .. 87 */ 50 - 7, 7, 7, 7, 7, 7, 7, 7, /* 88 .. 8f */ 51 - 7, 7, 7, 7, 7, 7, 7, 7, /* 90 .. 97 */ 52 - 7, 7, 7, 7, 7, 7, 7, 7, /* 98 .. 9f */ 53 - 7, 7, 7, 7, 7, 7, 7, 7, /* a0 .. a7 */ 54 - 7, 7, 7, 7, 7, 7, 7, 7, /* a8 .. af */ 55 - 7, 7, 7, 7, 7, 7, 7, 7, /* b0 .. b7 */ 56 - 7, 7, 7, 7, 7, 7, 7, 7, /* b8 .. bf */ 57 - 7, 7, 7, 7, 7, 7, 7, 7, /* c0 .. c7 */ 58 - 7, 7, 7, 7, 7, 7, 7, 7, /* c8 .. cf */ 59 - 7, 7, 7, 7, 7, 7, 7, 7, /* d0 .. d7 */ 60 - 7, 7, 7, 7, 7, 7, 7, 7, /* d8 .. df */ 61 - 7, 7, 7, 7, 7, 7, 7, 7, /* e0 .. e7 */ 62 - 7, 7, 7, 7, 7, 7, 7, 7, /* e8 .. ef */ 63 - 7, 7, 7, 7, 7, 7, 7, 7, /* f0 .. f7 */ 64 - 7, 7, 7, 7, 7, 7, 7, 7, /* f8 .. ff */ 65 - }; 66 - #endif 67 - 68 - /* 69 - * xfs_highbit32: get high bit set out of 32-bit argument, -1 if none set. 70 - */ 71 - inline int 72 - xfs_highbit32( 73 - __uint32_t v) 74 - { 75 - #ifdef HAVE_ARCH_HIGHBIT 76 - return highbit32(v); 77 - #else 78 - int i; 79 - 80 - if (v & 0xffff0000) 81 - if (v & 0xff000000) 82 - i = 24; 83 - else 84 - i = 16; 85 - else if (v & 0x0000ffff) 86 - if (v & 0x0000ff00) 87 - i = 8; 88 - else 89 - i = 0; 90 - else 91 - return -1; 92 - return i + xfs_highbit[(v >> i) & 0xff]; 93 - #endif 94 - } 95 - 96 - /* 97 - * xfs_lowbit64: get low bit set out of 64-bit argument, -1 if none set. 98 - */ 99 - int 100 - xfs_lowbit64( 101 - __uint64_t v) 102 - { 103 - __uint32_t w = (__uint32_t)v; 104 - int n = 0; 105 - 106 - if (w) { /* lower bits */ 107 - n = ffs(w); 108 - } else { /* upper bits */ 109 - w = (__uint32_t)(v >> 32); 110 - if (w && (n = ffs(w))) 111 - n += 32; 112 - } 113 - return n - 1; 114 - } 115 - 116 - /* 117 - * xfs_highbit64: get high bit set out of 64-bit argument, -1 if none set. 118 - */ 119 - int 120 - xfs_highbit64( 121 - __uint64_t v) 122 - { 123 - __uint32_t h = (__uint32_t)(v >> 32); 124 - 125 - if (h) 126 - return xfs_highbit32(h) + 32; 127 - return xfs_highbit32((__uint32_t)v); 128 - } 129 - 130 - 131 28 /* 132 29 * Return whether bitmap is empty. 133 30 * Size is number of words in the bitmap, which is padded to word boundary
+22 -5
fs/xfs/xfs_bit.h
··· 47 47 } 48 48 49 49 /* Get high bit set out of 32-bit argument, -1 if none set */ 50 - extern int xfs_highbit32(__uint32_t v); 51 - 52 - /* Get low bit set out of 64-bit argument, -1 if none set */ 53 - extern int xfs_lowbit64(__uint64_t v); 50 + static inline int xfs_highbit32(__uint32_t v) 51 + { 52 + return fls(v) - 1; 53 + } 54 54 55 55 /* Get high bit set out of 64-bit argument, -1 if none set */ 56 - extern int xfs_highbit64(__uint64_t); 56 + static inline int xfs_highbit64(__uint64_t v) 57 + { 58 + return fls64(v) - 1; 59 + } 60 + 61 + /* Get low bit set out of 32-bit argument, -1 if none set */ 62 + static inline int xfs_lowbit32(__uint32_t v) 63 + { 64 + __uint32_t t = v; 65 + return (t) ? find_first_bit((unsigned long *)&t, 32) : -1; 66 + } 67 + 68 + /* Get low bit set out of 64-bit argument, -1 if none set */ 69 + static inline int xfs_lowbit64(__uint64_t v) 70 + { 71 + __uint64_t t = v; 72 + return (t) ? find_first_bit((unsigned long *)&t, 64) : -1; 73 + } 57 74 58 75 /* Return whether bitmap is empty (1 == empty) */ 59 76 extern int xfs_bitmap_empty(uint *map, uint size);
+10 -12
fs/xfs/xfs_bmap.c
··· 2830 2830 args.prod = align; 2831 2831 if ((args.mod = (xfs_extlen_t)do_mod(ap->off, args.prod))) 2832 2832 args.mod = (xfs_extlen_t)(args.prod - args.mod); 2833 - } else if (mp->m_sb.sb_blocksize >= NBPP) { 2833 + } else if (mp->m_sb.sb_blocksize >= PAGE_CACHE_SIZE) { 2834 2834 args.prod = 1; 2835 2835 args.mod = 0; 2836 2836 } else { 2837 - args.prod = NBPP >> mp->m_sb.sb_blocklog; 2837 + args.prod = PAGE_CACHE_SIZE >> mp->m_sb.sb_blocklog; 2838 2838 if ((args.mod = (xfs_extlen_t)(do_mod(ap->off, args.prod)))) 2839 2839 args.mod = (xfs_extlen_t)(args.prod - args.mod); 2840 2840 } ··· 2969 2969 xfs_bmap_alloc( 2970 2970 xfs_bmalloca_t *ap) /* bmap alloc argument struct */ 2971 2971 { 2972 - if ((ap->ip->i_d.di_flags & XFS_DIFLAG_REALTIME) && ap->userdata) 2972 + if (XFS_IS_REALTIME_INODE(ap->ip) && ap->userdata) 2973 2973 return xfs_bmap_rtalloc(ap); 2974 2974 return xfs_bmap_btalloc(ap); 2975 2975 } ··· 3096 3096 /* 3097 3097 * Realtime allocation. Free it and record di_nblocks update. 3098 3098 */ 3099 - if (whichfork == XFS_DATA_FORK && 3100 - (ip->i_d.di_flags & XFS_DIFLAG_REALTIME)) { 3099 + if (whichfork == XFS_DATA_FORK && XFS_IS_REALTIME_INODE(ip)) { 3101 3100 xfs_fsblock_t bno; 3102 3101 xfs_filblks_t len; 3103 3102 ··· 3955 3956 xfs_bmap_free_t flist; /* freed extent records */ 3956 3957 xfs_mount_t *mp; /* mount structure */ 3957 3958 xfs_trans_t *tp; /* transaction pointer */ 3958 - unsigned long s; /* spinlock spl value */ 3959 3959 int blks; /* space reservation */ 3960 3960 int version = 1; /* superblock attr version */ 3961 3961 int committed; /* xaction was committed */ ··· 4051 4053 (!XFS_SB_VERSION_HASATTR2(&mp->m_sb) && version == 2)) { 4052 4054 __int64_t sbfields = 0; 4053 4055 4054 - s = XFS_SB_LOCK(mp); 4056 + spin_lock(&mp->m_sb_lock); 4055 4057 if (!XFS_SB_VERSION_HASATTR(&mp->m_sb)) { 4056 4058 XFS_SB_VERSION_ADDATTR(&mp->m_sb); 4057 4059 sbfields |= XFS_SB_VERSIONNUM; ··· 4061 4063 sbfields |= (XFS_SB_VERSIONNUM | XFS_SB_FEATURES2); 4062 4064 } 4063 4065 if (sbfields) { 4064 - XFS_SB_UNLOCK(mp, s); 4066 + spin_unlock(&mp->m_sb_lock); 4065 4067 xfs_mod_sb(tp, sbfields); 4066 4068 } else 4067 - XFS_SB_UNLOCK(mp, s); 4069 + spin_unlock(&mp->m_sb_lock); 4068 4070 } 4069 4071 if ((error = xfs_bmap_finish(&tp, &flist, &committed))) 4070 4072 goto error2; ··· 6392 6394 * Recursively walks each level of a btree 6393 6395 * to count total fsblocks is use. 6394 6396 */ 6395 - int /* error */ 6397 + STATIC int /* error */ 6396 6398 xfs_bmap_count_tree( 6397 6399 xfs_mount_t *mp, /* file system mount point */ 6398 6400 xfs_trans_t *tp, /* transaction pointer */ ··· 6468 6470 /* 6469 6471 * Count leaf blocks given a range of extent records. 6470 6472 */ 6471 - int 6473 + STATIC int 6472 6474 xfs_bmap_count_leaves( 6473 6475 xfs_ifork_t *ifp, 6474 6476 xfs_extnum_t idx, ··· 6488 6490 * Count leaf blocks given a range of extent records originally 6489 6491 * in btree format. 6490 6492 */ 6491 - int 6493 + STATIC int 6492 6494 xfs_bmap_disk_count_leaves( 6493 6495 xfs_extnum_t idx, 6494 6496 xfs_bmbt_block_t *block,
+2
fs/xfs/xfs_bmap.h
··· 25 25 struct xfs_mount; 26 26 struct xfs_trans; 27 27 28 + extern kmem_zone_t *xfs_bmap_free_item_zone; 29 + 28 30 /* 29 31 * DELTA: describe a change to the in-core extent list. 30 32 *
+1 -2
fs/xfs/xfs_bmap_btree.c
··· 2062 2062 pcur->bc_private.b.allocated; 2063 2063 pcur->bc_private.b.allocated = 0; 2064 2064 ASSERT((cur->bc_private.b.firstblock != NULLFSBLOCK) || 2065 - (cur->bc_private.b.ip->i_d.di_flags & 2066 - XFS_DIFLAG_REALTIME)); 2065 + XFS_IS_REALTIME_INODE(cur->bc_private.b.ip)); 2067 2066 cur->bc_private.b.firstblock = 2068 2067 pcur->bc_private.b.firstblock; 2069 2068 ASSERT(cur->bc_private.b.flist ==
+2
fs/xfs/xfs_btree.h
··· 24 24 struct xfs_mount; 25 25 struct xfs_trans; 26 26 27 + extern kmem_zone_t *xfs_btree_cur_zone; 28 + 27 29 /* 28 30 * This nonsense is to make -wlint happy. 29 31 */
+4 -6
fs/xfs/xfs_buf_item.c
··· 378 378 xfs_mount_t *mp; 379 379 xfs_buf_t *bp; 380 380 int freed; 381 - SPLDECL(s); 382 381 383 382 bp = bip->bli_buf; 384 383 ASSERT(bp != NULL); ··· 408 409 XFS_BUF_SET_FSPRIVATE(bp, NULL); 409 410 XFS_BUF_CLR_IODONE_FUNC(bp); 410 411 } else { 411 - AIL_LOCK(mp,s); 412 - xfs_trans_delete_ail(mp, (xfs_log_item_t *)bip, s); 412 + spin_lock(&mp->m_ail_lock); 413 + xfs_trans_delete_ail(mp, (xfs_log_item_t *)bip); 413 414 xfs_buf_item_relse(bp); 414 415 ASSERT(XFS_BUF_FSPRIVATE(bp, void *) == NULL); 415 416 } ··· 1112 1113 xfs_buf_log_item_t *bip) 1113 1114 { 1114 1115 struct xfs_mount *mp; 1115 - SPLDECL(s); 1116 1116 1117 1117 ASSERT(bip->bli_buf == bp); 1118 1118 ··· 1126 1128 * 1127 1129 * Either way, AIL is useless if we're forcing a shutdown. 1128 1130 */ 1129 - AIL_LOCK(mp,s); 1131 + spin_lock(&mp->m_ail_lock); 1130 1132 /* 1131 1133 * xfs_trans_delete_ail() drops the AIL lock. 1132 1134 */ 1133 - xfs_trans_delete_ail(mp, (xfs_log_item_t *)bip, s); 1135 + xfs_trans_delete_ail(mp, (xfs_log_item_t *)bip); 1134 1136 1135 1137 #ifdef XFS_TRANS_DEBUG 1136 1138 kmem_free(bip->bli_orig, XFS_BUF_COUNT(bp));
+2
fs/xfs/xfs_buf_item.h
··· 18 18 #ifndef __XFS_BUF_ITEM_H__ 19 19 #define __XFS_BUF_ITEM_H__ 20 20 21 + extern kmem_zone_t *xfs_buf_item_zone; 22 + 21 23 /* 22 24 * This is the structure used to lay out a buf log item in the 23 25 * log. The data map describes which 128 byte chunks of the buffer
+5 -8
fs/xfs/xfs_da_btree.c
··· 2218 2218 2219 2219 #ifdef XFS_DABUF_DEBUG 2220 2220 xfs_dabuf_t *xfs_dabuf_global_list; 2221 - lock_t xfs_dabuf_global_lock; 2221 + spinlock_t xfs_dabuf_global_lock; 2222 2222 #endif 2223 2223 2224 2224 /* ··· 2264 2264 } 2265 2265 #ifdef XFS_DABUF_DEBUG 2266 2266 { 2267 - SPLDECL(s); 2268 2267 xfs_dabuf_t *p; 2269 2268 2270 - s = mutex_spinlock(&xfs_dabuf_global_lock); 2269 + spin_lock(&xfs_dabuf_global_lock); 2271 2270 for (p = xfs_dabuf_global_list; p; p = p->next) { 2272 2271 ASSERT(p->blkno != dabuf->blkno || 2273 2272 p->target != dabuf->target); ··· 2276 2277 xfs_dabuf_global_list->prev = dabuf; 2277 2278 dabuf->next = xfs_dabuf_global_list; 2278 2279 xfs_dabuf_global_list = dabuf; 2279 - mutex_spinunlock(&xfs_dabuf_global_lock, s); 2280 + spin_unlock(&xfs_dabuf_global_lock); 2280 2281 } 2281 2282 #endif 2282 2283 return dabuf; ··· 2318 2319 kmem_free(dabuf->data, BBTOB(dabuf->bbcount)); 2319 2320 #ifdef XFS_DABUF_DEBUG 2320 2321 { 2321 - SPLDECL(s); 2322 - 2323 - s = mutex_spinlock(&xfs_dabuf_global_lock); 2322 + spin_lock(&xfs_dabuf_global_lock); 2324 2323 if (dabuf->prev) 2325 2324 dabuf->prev->next = dabuf->next; 2326 2325 else 2327 2326 xfs_dabuf_global_list = dabuf->next; 2328 2327 if (dabuf->next) 2329 2328 dabuf->next->prev = dabuf->prev; 2330 - mutex_spinunlock(&xfs_dabuf_global_lock, s); 2329 + spin_unlock(&xfs_dabuf_global_lock); 2331 2330 } 2332 2331 memset(dabuf, 0, XFS_DA_BUF_SIZE(dabuf->nbuf)); 2333 2332 #endif
+1
fs/xfs/xfs_da_btree.h
··· 260 260 xfs_daddr_t xfs_da_blkno(xfs_dabuf_t *dabuf); 261 261 262 262 extern struct kmem_zone *xfs_da_state_zone; 263 + extern struct kmem_zone *xfs_dabuf_zone; 263 264 #endif /* __KERNEL__ */ 264 265 265 266 #endif /* __XFS_DA_BTREE_H__ */
+35 -49
fs/xfs/xfs_dfrag.c
··· 52 52 xfs_swapext_t __user *sxu) 53 53 { 54 54 xfs_swapext_t *sxp; 55 - xfs_inode_t *ip=NULL, *tip=NULL; 56 - xfs_mount_t *mp; 57 - struct file *fp = NULL, *tfp = NULL; 58 - bhv_vnode_t *vp, *tvp; 55 + xfs_inode_t *ip, *tip; 56 + struct file *file, *target_file; 59 57 int error = 0; 60 58 61 59 sxp = kmem_alloc(sizeof(xfs_swapext_t), KM_MAYFAIL); 62 60 if (!sxp) { 63 61 error = XFS_ERROR(ENOMEM); 64 - goto error0; 62 + goto out; 65 63 } 66 64 67 65 if (copy_from_user(sxp, sxu, sizeof(xfs_swapext_t))) { 68 66 error = XFS_ERROR(EFAULT); 69 - goto error0; 67 + goto out_free_sxp; 70 68 } 71 69 72 70 /* Pull information for the target fd */ 73 - if (((fp = fget((int)sxp->sx_fdtarget)) == NULL) || 74 - ((vp = vn_from_inode(fp->f_path.dentry->d_inode)) == NULL)) { 71 + file = fget((int)sxp->sx_fdtarget); 72 + if (!file) { 75 73 error = XFS_ERROR(EINVAL); 76 - goto error0; 74 + goto out_free_sxp; 77 75 } 78 76 79 - ip = xfs_vtoi(vp); 80 - if (ip == NULL) { 77 + if (!(file->f_mode & FMODE_WRITE) || (file->f_flags & O_APPEND)) { 81 78 error = XFS_ERROR(EBADF); 82 - goto error0; 79 + goto out_put_file; 83 80 } 84 81 85 - if (((tfp = fget((int)sxp->sx_fdtmp)) == NULL) || 86 - ((tvp = vn_from_inode(tfp->f_path.dentry->d_inode)) == NULL)) { 82 + target_file = fget((int)sxp->sx_fdtmp); 83 + if (!target_file) { 87 84 error = XFS_ERROR(EINVAL); 88 - goto error0; 85 + goto out_put_file; 89 86 } 90 87 91 - tip = xfs_vtoi(tvp); 92 - if (tip == NULL) { 88 + if (!(target_file->f_mode & FMODE_WRITE) || 89 + (target_file->f_flags & O_APPEND)) { 93 90 error = XFS_ERROR(EBADF); 94 - goto error0; 91 + goto out_put_target_file; 95 92 } 93 + 94 + ip = XFS_I(file->f_path.dentry->d_inode); 95 + tip = XFS_I(target_file->f_path.dentry->d_inode); 96 96 97 97 if (ip->i_mount != tip->i_mount) { 98 - error = XFS_ERROR(EINVAL); 99 - goto error0; 98 + error = XFS_ERROR(EINVAL); 99 + goto out_put_target_file; 100 100 } 101 101 102 102 if (ip->i_ino == tip->i_ino) { 103 - error = XFS_ERROR(EINVAL); 104 - goto error0; 103 + error = XFS_ERROR(EINVAL); 104 + goto out_put_target_file; 105 105 } 106 106 107 - mp = ip->i_mount; 108 - 109 - if (XFS_FORCED_SHUTDOWN(mp)) { 110 - error = XFS_ERROR(EIO); 111 - goto error0; 107 + if (XFS_FORCED_SHUTDOWN(ip->i_mount)) { 108 + error = XFS_ERROR(EIO); 109 + goto out_put_target_file; 112 110 } 113 111 114 - error = XFS_SWAP_EXTENTS(mp, &ip->i_iocore, &tip->i_iocore, sxp); 112 + error = xfs_swap_extents(ip, tip, sxp); 115 113 116 - error0: 117 - if (fp != NULL) 118 - fput(fp); 119 - if (tfp != NULL) 120 - fput(tfp); 121 - 122 - if (sxp != NULL) 123 - kmem_free(sxp, sizeof(xfs_swapext_t)); 124 - 114 + out_put_target_file: 115 + fput(target_file); 116 + out_put_file: 117 + fput(file); 118 + out_free_sxp: 119 + kmem_free(sxp, sizeof(xfs_swapext_t)); 120 + out: 125 121 return error; 126 122 } 127 123 ··· 165 169 xfs_lock_inodes(ips, 2, 0, lock_flags); 166 170 locked = 1; 167 171 168 - /* Check permissions */ 169 - error = xfs_iaccess(ip, S_IWUSR, NULL); 170 - if (error) 171 - goto error0; 172 - 173 - error = xfs_iaccess(tip, S_IWUSR, NULL); 174 - if (error) 175 - goto error0; 176 - 177 172 /* Verify that both files have the same format */ 178 173 if ((ip->i_d.di_mode & S_IFMT) != (tip->i_d.di_mode & S_IFMT)) { 179 174 error = XFS_ERROR(EINVAL); ··· 172 185 } 173 186 174 187 /* Verify both files are either real-time or non-realtime */ 175 - if ((ip->i_d.di_flags & XFS_DIFLAG_REALTIME) != 176 - (tip->i_d.di_flags & XFS_DIFLAG_REALTIME)) { 188 + if (XFS_IS_REALTIME_INODE(ip) != XFS_IS_REALTIME_INODE(tip)) { 177 189 error = XFS_ERROR(EINVAL); 178 190 goto error0; 179 191 } ··· 185 199 } 186 200 187 201 if (VN_CACHED(tvp) != 0) { 188 - xfs_inval_cached_trace(&tip->i_iocore, 0, -1, 0, -1); 202 + xfs_inval_cached_trace(tip, 0, -1, 0, -1); 189 203 error = xfs_flushinval_pages(tip, 0, -1, 190 204 FI_REMAPF_LOCKED); 191 205 if (error)
+27 -55
fs/xfs/xfs_dinode.h
··· 171 171 /* 172 172 * Inode data & attribute fork sizes, per inode. 173 173 */ 174 - #define XFS_CFORK_Q(dcp) ((dcp)->di_forkoff != 0) 175 - #define XFS_CFORK_Q_DISK(dcp) ((dcp)->di_forkoff != 0) 176 - 177 - #define XFS_CFORK_BOFF(dcp) ((int)((dcp)->di_forkoff << 3)) 178 - #define XFS_CFORK_BOFF_DISK(dcp) ((int)((dcp)->di_forkoff << 3)) 179 - 180 - #define XFS_CFORK_DSIZE_DISK(dcp,mp) \ 181 - (XFS_CFORK_Q_DISK(dcp) ? XFS_CFORK_BOFF_DISK(dcp) : XFS_LITINO(mp)) 182 - #define XFS_CFORK_DSIZE(dcp,mp) \ 183 - (XFS_CFORK_Q(dcp) ? XFS_CFORK_BOFF(dcp) : XFS_LITINO(mp)) 184 - 185 - #define XFS_CFORK_ASIZE_DISK(dcp,mp) \ 186 - (XFS_CFORK_Q_DISK(dcp) ? XFS_LITINO(mp) - XFS_CFORK_BOFF_DISK(dcp) : 0) 187 - #define XFS_CFORK_ASIZE(dcp,mp) \ 188 - (XFS_CFORK_Q(dcp) ? XFS_LITINO(mp) - XFS_CFORK_BOFF(dcp) : 0) 189 - 190 - #define XFS_CFORK_SIZE_DISK(dcp,mp,w) \ 191 - ((w) == XFS_DATA_FORK ? \ 192 - XFS_CFORK_DSIZE_DISK(dcp, mp) : \ 193 - XFS_CFORK_ASIZE_DISK(dcp, mp)) 194 - #define XFS_CFORK_SIZE(dcp,mp,w) \ 195 - ((w) == XFS_DATA_FORK ? \ 196 - XFS_CFORK_DSIZE(dcp, mp) : XFS_CFORK_ASIZE(dcp, mp)) 174 + #define XFS_DFORK_Q(dip) ((dip)->di_core.di_forkoff != 0) 175 + #define XFS_DFORK_BOFF(dip) ((int)((dip)->di_core.di_forkoff << 3)) 197 176 198 177 #define XFS_DFORK_DSIZE(dip,mp) \ 199 - XFS_CFORK_DSIZE_DISK(&(dip)->di_core, mp) 200 - #define XFS_DFORK_DSIZE_HOST(dip,mp) \ 201 - XFS_CFORK_DSIZE(&(dip)->di_core, mp) 178 + (XFS_DFORK_Q(dip) ? \ 179 + XFS_DFORK_BOFF(dip) : \ 180 + XFS_LITINO(mp)) 202 181 #define XFS_DFORK_ASIZE(dip,mp) \ 203 - XFS_CFORK_ASIZE_DISK(&(dip)->di_core, mp) 204 - #define XFS_DFORK_ASIZE_HOST(dip,mp) \ 205 - XFS_CFORK_ASIZE(&(dip)->di_core, mp) 206 - #define XFS_DFORK_SIZE(dip,mp,w) \ 207 - XFS_CFORK_SIZE_DISK(&(dip)->di_core, mp, w) 208 - #define XFS_DFORK_SIZE_HOST(dip,mp,w) \ 209 - XFS_CFORK_SIZE(&(dip)->di_core, mp, w) 182 + (XFS_DFORK_Q(dip) ? \ 183 + XFS_LITINO(mp) - XFS_DFORK_BOFF(dip) : \ 184 + 0) 185 + #define XFS_DFORK_SIZE(dip,mp,w) \ 186 + ((w) == XFS_DATA_FORK ? \ 187 + XFS_DFORK_DSIZE(dip, mp) : \ 188 + XFS_DFORK_ASIZE(dip, mp)) 210 189 211 - #define XFS_DFORK_Q(dip) XFS_CFORK_Q_DISK(&(dip)->di_core) 212 - #define XFS_DFORK_BOFF(dip) XFS_CFORK_BOFF_DISK(&(dip)->di_core) 213 - #define XFS_DFORK_DPTR(dip) ((dip)->di_u.di_c) 214 - #define XFS_DFORK_APTR(dip) \ 190 + #define XFS_DFORK_DPTR(dip) ((dip)->di_u.di_c) 191 + #define XFS_DFORK_APTR(dip) \ 215 192 ((dip)->di_u.di_c + XFS_DFORK_BOFF(dip)) 216 - #define XFS_DFORK_PTR(dip,w) \ 193 + #define XFS_DFORK_PTR(dip,w) \ 217 194 ((w) == XFS_DATA_FORK ? XFS_DFORK_DPTR(dip) : XFS_DFORK_APTR(dip)) 218 - #define XFS_CFORK_FORMAT(dcp,w) \ 219 - ((w) == XFS_DATA_FORK ? (dcp)->di_format : (dcp)->di_aformat) 220 - #define XFS_CFORK_FMT_SET(dcp,w,n) \ 195 + #define XFS_DFORK_FORMAT(dip,w) \ 221 196 ((w) == XFS_DATA_FORK ? \ 222 - ((dcp)->di_format = (n)) : ((dcp)->di_aformat = (n))) 223 - #define XFS_DFORK_FORMAT(dip,w) XFS_CFORK_FORMAT(&(dip)->di_core, w) 224 - 225 - #define XFS_CFORK_NEXTENTS_DISK(dcp,w) \ 197 + (dip)->di_core.di_format : \ 198 + (dip)->di_core.di_aformat) 199 + #define XFS_DFORK_NEXTENTS(dip,w) \ 226 200 ((w) == XFS_DATA_FORK ? \ 227 - be32_to_cpu((dcp)->di_nextents) : \ 228 - be16_to_cpu((dcp)->di_anextents)) 229 - #define XFS_CFORK_NEXTENTS(dcp,w) \ 230 - ((w) == XFS_DATA_FORK ? (dcp)->di_nextents : (dcp)->di_anextents) 231 - #define XFS_DFORK_NEXTENTS(dip,w) XFS_CFORK_NEXTENTS_DISK(&(dip)->di_core, w) 232 - #define XFS_DFORK_NEXTENTS_HOST(dip,w) XFS_CFORK_NEXTENTS(&(dip)->di_core, w) 233 - 234 - #define XFS_CFORK_NEXT_SET(dcp,w,n) \ 235 - ((w) == XFS_DATA_FORK ? \ 236 - ((dcp)->di_nextents = (n)) : ((dcp)->di_anextents = (n))) 201 + be32_to_cpu((dip)->di_core.di_nextents) : \ 202 + be16_to_cpu((dip)->di_core.di_anextents)) 237 203 238 204 #define XFS_BUF_TO_DINODE(bp) ((xfs_dinode_t *)XFS_BUF_PTR(bp)) 239 205 ··· 238 272 #define XFS_DIFLAG_EXTSZINHERIT (1 << XFS_DIFLAG_EXTSZINHERIT_BIT) 239 273 #define XFS_DIFLAG_NODEFRAG (1 << XFS_DIFLAG_NODEFRAG_BIT) 240 274 #define XFS_DIFLAG_FILESTREAM (1 << XFS_DIFLAG_FILESTREAM_BIT) 275 + 276 + #ifdef CONFIG_XFS_RT 277 + #define XFS_IS_REALTIME_INODE(ip) ((ip)->i_d.di_flags & XFS_DIFLAG_REALTIME) 278 + #else 279 + #define XFS_IS_REALTIME_INODE(ip) (0) 280 + #endif 241 281 242 282 #define XFS_DIFLAG_ANY \ 243 283 (XFS_DIFLAG_REALTIME | XFS_DIFLAG_PREALLOC | XFS_DIFLAG_NEWRTBM | \
+2 -1
fs/xfs/xfs_dir2.c
··· 42 42 #include "xfs_dir2_node.h" 43 43 #include "xfs_dir2_trace.h" 44 44 #include "xfs_error.h" 45 + #include "xfs_vnodeops.h" 45 46 46 47 47 48 void ··· 302 301 int rval; /* return value */ 303 302 int v; /* type-checking value */ 304 303 305 - vn_trace_entry(dp, __FUNCTION__, (inst_t *)__return_address); 304 + xfs_itrace_entry(dp); 306 305 307 306 if (XFS_FORCED_SHUTDOWN(dp->i_mount)) 308 307 return XFS_ERROR(EIO);
-31
fs/xfs/xfs_error.c
··· 230 230 } 231 231 } 232 232 233 - STATIC void 234 - xfs_hex_dump(void *p, int length) 235 - { 236 - __uint8_t *uip = (__uint8_t*)p; 237 - int i; 238 - char sbuf[128], *s; 239 - 240 - s = sbuf; 241 - *s = '\0'; 242 - for (i=0; i<length; i++, uip++) { 243 - if ((i % 16) == 0) { 244 - if (*s != '\0') 245 - cmn_err(CE_ALERT, "%s\n", sbuf); 246 - s = sbuf; 247 - sprintf(s, "0x%x: ", i); 248 - while( *s != '\0') 249 - s++; 250 - } 251 - sprintf(s, "%02x ", *uip); 252 - 253 - /* 254 - * the kernel sprintf is a void; user sprintf returns 255 - * the sprintf'ed string's length. Find the new end- 256 - * of-string 257 - */ 258 - while( *s != '\0') 259 - s++; 260 - } 261 - cmn_err(CE_ALERT, "%s\n", sbuf); 262 - } 263 - 264 233 void 265 234 xfs_corruption_error( 266 235 char *tag,
+2
fs/xfs/xfs_error.h
··· 174 174 /* PRINTFLIKE3 */ 175 175 extern void xfs_fs_cmn_err(int level, struct xfs_mount *mp, char *fmt, ...); 176 176 177 + extern void xfs_hex_dump(void *p, int length); 178 + 177 179 #define xfs_fs_repair_cmn_err(level, mp, fmt, args...) \ 178 180 xfs_fs_cmn_err(level, mp, fmt " Unmount and run xfs_repair.", ## args) 179 181
+9 -12
fs/xfs/xfs_extfree_item.c
··· 110 110 xfs_efi_item_unpin(xfs_efi_log_item_t *efip, int stale) 111 111 { 112 112 xfs_mount_t *mp; 113 - SPLDECL(s); 114 113 115 114 mp = efip->efi_item.li_mountp; 116 - AIL_LOCK(mp, s); 115 + spin_lock(&mp->m_ail_lock); 117 116 if (efip->efi_flags & XFS_EFI_CANCELED) { 118 117 /* 119 118 * xfs_trans_delete_ail() drops the AIL lock. 120 119 */ 121 - xfs_trans_delete_ail(mp, (xfs_log_item_t *)efip, s); 120 + xfs_trans_delete_ail(mp, (xfs_log_item_t *)efip); 122 121 xfs_efi_item_free(efip); 123 122 } else { 124 123 efip->efi_flags |= XFS_EFI_COMMITTED; 125 - AIL_UNLOCK(mp, s); 124 + spin_unlock(&mp->m_ail_lock); 126 125 } 127 126 } 128 127 ··· 137 138 { 138 139 xfs_mount_t *mp; 139 140 xfs_log_item_desc_t *lidp; 140 - SPLDECL(s); 141 141 142 142 mp = efip->efi_item.li_mountp; 143 - AIL_LOCK(mp, s); 143 + spin_lock(&mp->m_ail_lock); 144 144 if (efip->efi_flags & XFS_EFI_CANCELED) { 145 145 /* 146 146 * free the xaction descriptor pointing to this item ··· 150 152 * pull the item off the AIL. 151 153 * xfs_trans_delete_ail() drops the AIL lock. 152 154 */ 153 - xfs_trans_delete_ail(mp, (xfs_log_item_t *)efip, s); 155 + xfs_trans_delete_ail(mp, (xfs_log_item_t *)efip); 154 156 xfs_efi_item_free(efip); 155 157 } else { 156 158 efip->efi_flags |= XFS_EFI_COMMITTED; 157 - AIL_UNLOCK(mp, s); 159 + spin_unlock(&mp->m_ail_lock); 158 160 } 159 161 } 160 162 ··· 348 350 { 349 351 xfs_mount_t *mp; 350 352 int extents_left; 351 - SPLDECL(s); 352 353 353 354 mp = efip->efi_item.li_mountp; 354 355 ASSERT(efip->efi_next_extent > 0); 355 356 ASSERT(efip->efi_flags & XFS_EFI_COMMITTED); 356 357 357 - AIL_LOCK(mp, s); 358 + spin_lock(&mp->m_ail_lock); 358 359 ASSERT(efip->efi_next_extent >= nextents); 359 360 efip->efi_next_extent -= nextents; 360 361 extents_left = efip->efi_next_extent; ··· 361 364 /* 362 365 * xfs_trans_delete_ail() drops the AIL lock. 363 366 */ 364 - xfs_trans_delete_ail(mp, (xfs_log_item_t *)efip, s); 367 + xfs_trans_delete_ail(mp, (xfs_log_item_t *)efip); 365 368 xfs_efi_item_free(efip); 366 369 } else { 367 - AIL_UNLOCK(mp, s); 370 + spin_unlock(&mp->m_ail_lock); 368 371 } 369 372 } 370 373
+1 -1
fs/xfs/xfs_filestream.c
··· 348 348 } 349 349 350 350 /* xfs_fstrm_free_func(): callback for freeing cached stream items. */ 351 - void 351 + STATIC void 352 352 xfs_fstrm_free_func( 353 353 unsigned long ino, 354 354 void *data)
+7 -3
fs/xfs/xfs_fs.h
··· 419 419 /* 420 420 * ioctl commands that are used by Linux filesystems 421 421 */ 422 - #define XFS_IOC_GETXFLAGS _IOR('f', 1, long) 423 - #define XFS_IOC_SETXFLAGS _IOW('f', 2, long) 424 - #define XFS_IOC_GETVERSION _IOR('v', 1, long) 422 + #define XFS_IOC_GETXFLAGS FS_IOC_GETFLAGS 423 + #define XFS_IOC_SETXFLAGS FS_IOC_SETFLAGS 424 + #define XFS_IOC_GETVERSION FS_IOC_GETVERSION 425 + /* 32-bit compat counterparts */ 426 + #define XFS_IOC32_GETXFLAGS FS_IOC32_GETFLAGS 427 + #define XFS_IOC32_SETXFLAGS FS_IOC32_SETFLAGS 428 + #define XFS_IOC32_GETVERSION FS_IOC32_GETVERSION 425 429 426 430 /* 427 431 * ioctl commands that replace IRIX fcntl()'s
+5 -8
fs/xfs/xfs_fsops.c
··· 462 462 xfs_mount_t *mp, 463 463 xfs_fsop_counts_t *cnt) 464 464 { 465 - unsigned long s; 466 - 467 465 xfs_icsb_sync_counters_flags(mp, XFS_ICSB_LAZY_COUNT); 468 - s = XFS_SB_LOCK(mp); 466 + spin_lock(&mp->m_sb_lock); 469 467 cnt->freedata = mp->m_sb.sb_fdblocks - XFS_ALLOC_SET_ASIDE(mp); 470 468 cnt->freertx = mp->m_sb.sb_frextents; 471 469 cnt->freeino = mp->m_sb.sb_ifree; 472 470 cnt->allocino = mp->m_sb.sb_icount; 473 - XFS_SB_UNLOCK(mp, s); 471 + spin_unlock(&mp->m_sb_lock); 474 472 return 0; 475 473 } 476 474 ··· 495 497 { 496 498 __int64_t lcounter, delta, fdblks_delta; 497 499 __uint64_t request; 498 - unsigned long s; 499 500 500 501 /* If inval is null, report current values and return */ 501 502 if (inval == (__uint64_t *)NULL) { ··· 512 515 * problem. we needto work out if we are freeing or allocation 513 516 * blocks first, then we can do the modification as necessary. 514 517 * 515 - * We do this under the XFS_SB_LOCK so that if we are near 518 + * We do this under the m_sb_lock so that if we are near 516 519 * ENOSPC, we will hold out any changes while we work out 517 520 * what to do. This means that the amount of free space can 518 521 * change while we do this, so we need to retry if we end up ··· 523 526 * enabled, disabled or even compiled in.... 524 527 */ 525 528 retry: 526 - s = XFS_SB_LOCK(mp); 529 + spin_lock(&mp->m_sb_lock); 527 530 xfs_icsb_sync_counters_flags(mp, XFS_ICSB_SB_LOCKED); 528 531 529 532 /* ··· 566 569 outval->resblks = mp->m_resblks; 567 570 outval->resblks_avail = mp->m_resblks_avail; 568 571 } 569 - XFS_SB_UNLOCK(mp, s); 572 + spin_unlock(&mp->m_sb_lock); 570 573 571 574 if (fdblks_delta) { 572 575 /*
-2
fs/xfs/xfs_ialloc_btree.h
··· 81 81 #define XFS_INOBT_MASK(i) ((xfs_inofree_t)1 << (i)) 82 82 #define XFS_INOBT_IS_FREE(rp,i) \ 83 83 (((rp)->ir_free & XFS_INOBT_MASK(i)) != 0) 84 - #define XFS_INOBT_IS_FREE_DISK(rp,i) \ 85 - ((be64_to_cpu((rp)->ir_free) & XFS_INOBT_MASK(i)) != 0) 86 84 #define XFS_INOBT_SET_FREE(rp,i) ((rp)->ir_free |= XFS_INOBT_MASK(i)) 87 85 #define XFS_INOBT_CLR_FREE(rp,i) ((rp)->ir_free &= ~XFS_INOBT_MASK(i)) 88 86
+71 -110
fs/xfs/xfs_iget.c
··· 65 65 */ 66 66 STATIC int 67 67 xfs_iget_core( 68 - bhv_vnode_t *vp, 68 + struct inode *inode, 69 69 xfs_mount_t *mp, 70 70 xfs_trans_t *tp, 71 71 xfs_ino_t ino, ··· 74 74 xfs_inode_t **ipp, 75 75 xfs_daddr_t bno) 76 76 { 77 + struct inode *old_inode; 77 78 xfs_inode_t *ip; 78 79 xfs_inode_t *iq; 79 - bhv_vnode_t *inode_vp; 80 80 int error; 81 81 xfs_icluster_t *icl, *new_icl = NULL; 82 82 unsigned long first_index, mask; ··· 111 111 goto again; 112 112 } 113 113 114 - inode_vp = XFS_ITOV_NULL(ip); 115 - if (inode_vp == NULL) { 114 + old_inode = ip->i_vnode; 115 + if (old_inode == NULL) { 116 116 /* 117 117 * If IRECLAIM is set this inode is 118 118 * on its way out of the system, ··· 140 140 return ENOENT; 141 141 } 142 142 143 - /* 144 - * There may be transactions sitting in the 145 - * incore log buffers or being flushed to disk 146 - * at this time. We can't clear the 147 - * XFS_IRECLAIMABLE flag until these 148 - * transactions have hit the disk, otherwise we 149 - * will void the guarantee the flag provides 150 - * xfs_iunpin() 151 - */ 152 - if (xfs_ipincount(ip)) { 153 - read_unlock(&pag->pag_ici_lock); 154 - xfs_log_force(mp, 0, 155 - XFS_LOG_FORCE|XFS_LOG_SYNC); 156 - XFS_STATS_INC(xs_ig_frecycle); 157 - goto again; 158 - } 159 - 160 - vn_trace_exit(ip, "xfs_iget.alloc", 161 - (inst_t *)__return_address); 143 + xfs_itrace_exit_tag(ip, "xfs_iget.alloc"); 162 144 163 145 XFS_STATS_INC(xs_ig_found); 164 - 165 146 xfs_iflags_clear(ip, XFS_IRECLAIMABLE); 166 147 read_unlock(&pag->pag_ici_lock); 167 148 ··· 152 171 153 172 goto finish_inode; 154 173 155 - } else if (vp != inode_vp) { 156 - struct inode *inode = vn_to_inode(inode_vp); 157 - 174 + } else if (inode != old_inode) { 158 175 /* The inode is being torn down, pause and 159 176 * try again. 160 177 */ 161 - if (inode->i_state & (I_FREEING | I_CLEAR)) { 178 + if (old_inode->i_state & (I_FREEING | I_CLEAR)) { 162 179 read_unlock(&pag->pag_ici_lock); 163 180 delay(1); 164 181 XFS_STATS_INC(xs_ig_frecycle); ··· 169 190 */ 170 191 cmn_err(CE_PANIC, 171 192 "xfs_iget_core: ambiguous vns: vp/0x%p, invp/0x%p", 172 - inode_vp, vp); 193 + old_inode, inode); 173 194 } 174 195 175 196 /* ··· 179 200 XFS_STATS_INC(xs_ig_found); 180 201 181 202 finish_inode: 182 - if (ip->i_d.di_mode == 0) { 183 - if (!(flags & XFS_IGET_CREATE)) { 184 - xfs_put_perag(mp, pag); 185 - return ENOENT; 186 - } 187 - xfs_iocore_inode_reinit(ip); 203 + if (ip->i_d.di_mode == 0 && !(flags & XFS_IGET_CREATE)) { 204 + xfs_put_perag(mp, pag); 205 + return ENOENT; 188 206 } 189 207 190 208 if (lock_flags != 0) 191 209 xfs_ilock(ip, lock_flags); 192 210 193 211 xfs_iflags_clear(ip, XFS_ISTALE); 194 - vn_trace_exit(ip, "xfs_iget.found", 195 - (inst_t *)__return_address); 212 + xfs_itrace_exit_tag(ip, "xfs_iget.found"); 196 213 goto return_ip; 197 214 } 198 215 ··· 209 234 return error; 210 235 } 211 236 212 - vn_trace_exit(ip, "xfs_iget.alloc", (inst_t *)__return_address); 237 + xfs_itrace_exit_tag(ip, "xfs_iget.alloc"); 213 238 214 - xfs_inode_lock_init(ip, vp); 215 - xfs_iocore_inode_init(ip); 239 + 240 + mrlock_init(&ip->i_lock, MRLOCK_ALLOW_EQUAL_PRI|MRLOCK_BARRIER, 241 + "xfsino", ip->i_ino); 242 + mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino); 243 + init_waitqueue_head(&ip->i_ipin_wait); 244 + atomic_set(&ip->i_pincount, 0); 245 + initnsema(&ip->i_flock, 1, "xfsfino"); 246 + 216 247 if (lock_flags) 217 248 xfs_ilock(ip, lock_flags); 218 249 ··· 314 333 ASSERT(ip->i_df.if_ext_max == 315 334 XFS_IFORK_DSIZE(ip) / sizeof(xfs_bmbt_rec_t)); 316 335 317 - ASSERT(((ip->i_d.di_flags & XFS_DIFLAG_REALTIME) != 0) == 318 - ((ip->i_iocore.io_flags & XFS_IOCORE_RT) != 0)); 319 - 320 336 xfs_iflags_set(ip, XFS_IMODIFIED); 321 337 *ipp = ip; 322 338 ··· 321 343 * If we have a real type for an on-disk inode, we can set ops(&unlock) 322 344 * now. If it's a new inode being created, xfs_ialloc will handle it. 323 345 */ 324 - xfs_initialize_vnode(mp, vp, ip); 346 + xfs_initialize_vnode(mp, inode, ip); 325 347 return 0; 326 348 } 327 349 ··· 341 363 xfs_daddr_t bno) 342 364 { 343 365 struct inode *inode; 344 - bhv_vnode_t *vp = NULL; 366 + xfs_inode_t *ip; 345 367 int error; 346 368 347 369 XFS_STATS_INC(xs_ig_attempts); 348 370 349 371 retry: 350 372 inode = iget_locked(mp->m_super, ino); 351 - if (inode) { 352 - xfs_inode_t *ip; 373 + if (!inode) 374 + /* If we got no inode we are out of memory */ 375 + return ENOMEM; 353 376 354 - vp = vn_from_inode(inode); 355 - if (inode->i_state & I_NEW) { 356 - vn_initialize(inode); 357 - error = xfs_iget_core(vp, mp, tp, ino, flags, 358 - lock_flags, ipp, bno); 359 - if (error) { 360 - vn_mark_bad(vp); 361 - if (inode->i_state & I_NEW) 362 - unlock_new_inode(inode); 363 - iput(inode); 364 - } 365 - } else { 366 - /* 367 - * If the inode is not fully constructed due to 368 - * filehandle mismatches wait for the inode to go 369 - * away and try again. 370 - * 371 - * iget_locked will call __wait_on_freeing_inode 372 - * to wait for the inode to go away. 373 - */ 374 - if (is_bad_inode(inode) || 375 - ((ip = xfs_vtoi(vp)) == NULL)) { 376 - iput(inode); 377 - delay(1); 378 - goto retry; 379 - } 377 + if (inode->i_state & I_NEW) { 378 + XFS_STATS_INC(vn_active); 379 + XFS_STATS_INC(vn_alloc); 380 380 381 - if (lock_flags != 0) 382 - xfs_ilock(ip, lock_flags); 383 - XFS_STATS_INC(xs_ig_found); 384 - *ipp = ip; 385 - error = 0; 381 + error = xfs_iget_core(inode, mp, tp, ino, flags, 382 + lock_flags, ipp, bno); 383 + if (error) { 384 + make_bad_inode(inode); 385 + if (inode->i_state & I_NEW) 386 + unlock_new_inode(inode); 387 + iput(inode); 386 388 } 387 - } else 388 - error = ENOMEM; /* If we got no inode we are out of memory */ 389 + return error; 390 + } 389 391 390 - return error; 391 - } 392 + /* 393 + * If the inode is not fully constructed due to 394 + * filehandle mismatches wait for the inode to go 395 + * away and try again. 396 + * 397 + * iget_locked will call __wait_on_freeing_inode 398 + * to wait for the inode to go away. 399 + */ 400 + if (is_bad_inode(inode)) { 401 + iput(inode); 402 + delay(1); 403 + goto retry; 404 + } 392 405 393 - /* 394 - * Do the setup for the various locks within the incore inode. 395 - */ 396 - void 397 - xfs_inode_lock_init( 398 - xfs_inode_t *ip, 399 - bhv_vnode_t *vp) 400 - { 401 - mrlock_init(&ip->i_lock, MRLOCK_ALLOW_EQUAL_PRI|MRLOCK_BARRIER, 402 - "xfsino", ip->i_ino); 403 - mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino); 404 - init_waitqueue_head(&ip->i_ipin_wait); 405 - atomic_set(&ip->i_pincount, 0); 406 - initnsema(&ip->i_flock, 1, "xfsfino"); 406 + ip = XFS_I(inode); 407 + if (!ip) { 408 + iput(inode); 409 + delay(1); 410 + goto retry; 411 + } 412 + 413 + if (lock_flags != 0) 414 + xfs_ilock(ip, lock_flags); 415 + XFS_STATS_INC(xs_ig_found); 416 + *ipp = ip; 417 + return 0; 407 418 } 408 419 409 420 /* ··· 432 465 xfs_iput(xfs_inode_t *ip, 433 466 uint lock_flags) 434 467 { 435 - bhv_vnode_t *vp = XFS_ITOV(ip); 436 - 437 - vn_trace_entry(ip, "xfs_iput", (inst_t *)__return_address); 468 + xfs_itrace_entry(ip); 438 469 xfs_iunlock(ip, lock_flags); 439 - VN_RELE(vp); 470 + IRELE(ip); 440 471 } 441 472 442 473 /* ··· 444 479 xfs_iput_new(xfs_inode_t *ip, 445 480 uint lock_flags) 446 481 { 447 - bhv_vnode_t *vp = XFS_ITOV(ip); 448 - struct inode *inode = vn_to_inode(vp); 482 + struct inode *inode = ip->i_vnode; 449 483 450 - vn_trace_entry(ip, "xfs_iput_new", (inst_t *)__return_address); 484 + xfs_itrace_entry(ip); 451 485 452 486 if ((ip->i_d.di_mode == 0)) { 453 487 ASSERT(!xfs_iflags_test(ip, XFS_IRECLAIMABLE)); 454 - vn_mark_bad(vp); 488 + make_bad_inode(inode); 455 489 } 456 490 if (inode->i_state & I_NEW) 457 491 unlock_new_inode(inode); 458 492 if (lock_flags) 459 493 xfs_iunlock(ip, lock_flags); 460 - VN_RELE(vp); 494 + IRELE(ip); 461 495 } 462 496 463 497 ··· 469 505 void 470 506 xfs_ireclaim(xfs_inode_t *ip) 471 507 { 472 - bhv_vnode_t *vp; 473 - 474 508 /* 475 509 * Remove from old hash list and mount list. 476 510 */ ··· 497 535 /* 498 536 * Pull our behavior descriptor from the vnode chain. 499 537 */ 500 - vp = XFS_ITOV_NULL(ip); 501 - if (vp) { 502 - vn_to_inode(vp)->i_private = NULL; 538 + if (ip->i_vnode) { 539 + ip->i_vnode->i_private = NULL; 503 540 ip->i_vnode = NULL; 504 541 } 505 542
+54 -171
fs/xfs/xfs_inode.c
··· 15 15 * along with this program; if not, write the Free Software Foundation, 16 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 17 17 */ 18 + #include <linux/log2.h> 19 + 18 20 #include "xfs.h" 19 21 #include "xfs_fs.h" 20 22 #include "xfs_types.h" ··· 828 826 xfs_icdinode_t *dic = &ip->i_d; 829 827 830 828 return _xfs_dic2xflags(dic->di_flags) | 831 - (XFS_CFORK_Q(dic) ? XFS_XFLAG_HASATTR : 0); 829 + (XFS_IFORK_Q(ip) ? XFS_XFLAG_HASATTR : 0); 832 830 } 833 831 834 832 uint 835 833 xfs_dic2xflags( 836 - xfs_dinode_core_t *dic) 834 + xfs_dinode_t *dip) 837 835 { 836 + xfs_dinode_core_t *dic = &dip->di_core; 837 + 838 838 return _xfs_dic2xflags(be16_to_cpu(dic->di_flags)) | 839 - (XFS_CFORK_Q_DISK(dic) ? XFS_XFLAG_HASATTR : 0); 839 + (XFS_DFORK_Q(dip) ? XFS_XFLAG_HASATTR : 0); 840 840 } 841 841 842 842 /* ··· 888 884 * Initialize inode's trace buffers. 889 885 * Do this before xfs_iformat in case it adds entries. 890 886 */ 891 - #ifdef XFS_VNODE_TRACE 892 - ip->i_trace = ktrace_alloc(VNODE_TRACE_SIZE, KM_SLEEP); 887 + #ifdef XFS_INODE_TRACE 888 + ip->i_trace = ktrace_alloc(INODE_TRACE_SIZE, KM_SLEEP); 893 889 #endif 894 890 #ifdef XFS_BMAP_TRACE 895 891 ip->i_xtrace = ktrace_alloc(XFS_BMAP_KTRACE_SIZE, KM_SLEEP); ··· 1224 1220 ip->i_d.di_extsize = pip->i_d.di_extsize; 1225 1221 } 1226 1222 } else if ((mode & S_IFMT) == S_IFREG) { 1227 - if (pip->i_d.di_flags & XFS_DIFLAG_RTINHERIT) { 1223 + if (pip->i_d.di_flags & XFS_DIFLAG_RTINHERIT) 1228 1224 di_flags |= XFS_DIFLAG_REALTIME; 1229 - ip->i_iocore.io_flags |= XFS_IOCORE_RT; 1230 - } 1231 1225 if (pip->i_d.di_flags & XFS_DIFLAG_EXTSZINHERIT) { 1232 1226 di_flags |= XFS_DIFLAG_EXTSIZE; 1233 1227 ip->i_d.di_extsize = pip->i_d.di_extsize; ··· 1300 1298 if ((ip->i_d.di_mode & S_IFMT) != S_IFREG) 1301 1299 return; 1302 1300 1303 - if (ip->i_d.di_flags & (XFS_DIFLAG_REALTIME | XFS_DIFLAG_EXTSIZE)) 1301 + if (XFS_IS_REALTIME_INODE(ip)) 1302 + return; 1303 + 1304 + if (ip->i_d.di_flags & XFS_DIFLAG_EXTSIZE) 1304 1305 return; 1305 1306 1306 1307 nimaps = 2; ··· 1716 1711 * runs. 1717 1712 */ 1718 1713 XFS_BMAP_INIT(&free_list, &first_block); 1719 - error = XFS_BUNMAPI(mp, ntp, &ip->i_iocore, 1714 + error = xfs_bunmapi(ntp, ip, 1720 1715 first_unmap_block, unmap_len, 1721 1716 XFS_BMAPI_AFLAG(fork) | 1722 1717 (sync ? 0 : XFS_BMAPI_ASYNC), ··· 1849 1844 xfs_fsize_t new_size, 1850 1845 cred_t *credp) 1851 1846 { 1852 - int error; 1853 - 1854 1847 ASSERT(ismrlocked(&(ip->i_lock), MR_UPDATE) != 0); 1855 1848 ASSERT(ismrlocked(&(ip->i_iolock), MR_UPDATE) != 0); 1856 1849 ASSERT(new_size > ip->i_size); ··· 1858 1855 * xfs_write_file() beyond the end of the file 1859 1856 * and any blocks between the old and new file sizes. 1860 1857 */ 1861 - error = xfs_zero_eof(XFS_ITOV(ip), &ip->i_iocore, new_size, 1862 - ip->i_size); 1863 - return error; 1858 + return xfs_zero_eof(ip, new_size, ip->i_size); 1864 1859 } 1865 1860 1866 1861 /* ··· 1960 1959 ASSERT(agi->agi_unlinked[bucket_index]); 1961 1960 ASSERT(be32_to_cpu(agi->agi_unlinked[bucket_index]) != agino); 1962 1961 1963 - error = xfs_itobp(mp, tp, ip, &dip, &ibp, 0, 0); 1964 - if (error) 1965 - return error; 1966 - 1967 - /* 1968 - * Clear the on-disk di_nlink. This is to prevent xfs_bulkstat 1969 - * from picking up this inode when it is reclaimed (its incore state 1970 - * initialzed but not flushed to disk yet). The in-core di_nlink is 1971 - * already cleared in xfs_droplink() and a corresponding transaction 1972 - * logged. The hack here just synchronizes the in-core to on-disk 1973 - * di_nlink value in advance before the actual inode sync to disk. 1974 - * This is OK because the inode is already unlinked and would never 1975 - * change its di_nlink again for this inode generation. 1976 - * This is a temporary hack that would require a proper fix 1977 - * in the future. 1978 - */ 1979 - dip->di_core.di_nlink = 0; 1980 - 1981 1962 if (be32_to_cpu(agi->agi_unlinked[bucket_index]) != NULLAGINO) { 1982 1963 /* 1983 1964 * There is already another inode in the bucket we need ··· 1967 1984 * Here we put the head pointer into our next pointer, 1968 1985 * and then we fall through to point the head at us. 1969 1986 */ 1987 + error = xfs_itobp(mp, tp, ip, &dip, &ibp, 0, 0); 1988 + if (error) 1989 + return error; 1990 + 1970 1991 ASSERT(be32_to_cpu(dip->di_next_unlinked) == NULLAGINO); 1971 1992 /* both on-disk, don't endian flip twice */ 1972 1993 dip->di_next_unlinked = agi->agi_unlinked[bucket_index]; ··· 2196 2209 xfs_inode_log_item_t *iip; 2197 2210 xfs_log_item_t *lip; 2198 2211 xfs_perag_t *pag = xfs_get_perag(mp, inum); 2199 - SPLDECL(s); 2200 2212 2201 2213 if (mp->m_sb.sb_blocksize >= XFS_INODE_CLUSTER_SIZE(mp)) { 2202 2214 blks_per_cluster = 1; ··· 2297 2311 iip = (xfs_inode_log_item_t *)lip; 2298 2312 ASSERT(iip->ili_logged == 1); 2299 2313 lip->li_cb = (void(*)(xfs_buf_t*,xfs_log_item_t*)) xfs_istale_done; 2300 - AIL_LOCK(mp,s); 2314 + spin_lock(&mp->m_ail_lock); 2301 2315 iip->ili_flush_lsn = iip->ili_item.li_lsn; 2302 - AIL_UNLOCK(mp, s); 2316 + spin_unlock(&mp->m_ail_lock); 2303 2317 xfs_iflags_set(iip->ili_inode, XFS_ISTALE); 2304 2318 pre_flushed++; 2305 2319 } ··· 2320 2334 iip->ili_last_fields = iip->ili_format.ilf_fields; 2321 2335 iip->ili_format.ilf_fields = 0; 2322 2336 iip->ili_logged = 1; 2323 - AIL_LOCK(mp,s); 2337 + spin_lock(&mp->m_ail_lock); 2324 2338 iip->ili_flush_lsn = iip->ili_item.li_lsn; 2325 - AIL_UNLOCK(mp, s); 2339 + spin_unlock(&mp->m_ail_lock); 2326 2340 2327 2341 xfs_buf_attach_iodone(bp, 2328 2342 (void(*)(xfs_buf_t*,xfs_log_item_t*)) ··· 2360 2374 int error; 2361 2375 int delete; 2362 2376 xfs_ino_t first_ino; 2377 + xfs_dinode_t *dip; 2378 + xfs_buf_t *ibp; 2363 2379 2364 2380 ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE)); 2365 2381 ASSERT(ip->i_transp == tp); ··· 2397 2409 * by reincarnations of this inode. 2398 2410 */ 2399 2411 ip->i_d.di_gen++; 2412 + 2400 2413 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 2414 + 2415 + error = xfs_itobp(ip->i_mount, tp, ip, &dip, &ibp, 0, 0); 2416 + if (error) 2417 + return error; 2418 + 2419 + /* 2420 + * Clear the on-disk di_mode. This is to prevent xfs_bulkstat 2421 + * from picking up this inode when it is reclaimed (its incore state 2422 + * initialzed but not flushed to disk yet). The in-core di_mode is 2423 + * already cleared and a corresponding transaction logged. 2424 + * The hack here just synchronizes the in-core to on-disk 2425 + * di_mode value in advance before the actual inode sync to disk. 2426 + * This is OK because the inode is already unlinked and would never 2427 + * change its di_mode again for this inode generation. 2428 + * This is a temporary hack that would require a proper fix 2429 + * in the future. 2430 + */ 2431 + dip->di_core.di_mode = 0; 2401 2432 2402 2433 if (delete) { 2403 2434 xfs_ifree_cluster(ip, tp, first_ino); ··· 2742 2735 xfs_idestroy( 2743 2736 xfs_inode_t *ip) 2744 2737 { 2745 - 2746 2738 switch (ip->i_d.di_mode & S_IFMT) { 2747 2739 case S_IFREG: 2748 2740 case S_IFDIR: ··· 2755 2749 mrfree(&ip->i_iolock); 2756 2750 freesema(&ip->i_flock); 2757 2751 2758 - #ifdef XFS_VNODE_TRACE 2752 + #ifdef XFS_INODE_TRACE 2759 2753 ktrace_free(ip->i_trace); 2760 2754 #endif 2761 2755 #ifdef XFS_BMAP_TRACE ··· 2781 2775 */ 2782 2776 xfs_mount_t *mp = ip->i_mount; 2783 2777 xfs_log_item_t *lip = &ip->i_itemp->ili_item; 2784 - int s; 2785 2778 2786 2779 ASSERT(((lip->li_flags & XFS_LI_IN_AIL) == 0) || 2787 2780 XFS_FORCED_SHUTDOWN(ip->i_mount)); 2788 2781 if (lip->li_flags & XFS_LI_IN_AIL) { 2789 - AIL_LOCK(mp, s); 2782 + spin_lock(&mp->m_ail_lock); 2790 2783 if (lip->li_flags & XFS_LI_IN_AIL) 2791 - xfs_trans_delete_ail(mp, lip, s); 2784 + xfs_trans_delete_ail(mp, lip); 2792 2785 else 2793 - AIL_UNLOCK(mp, s); 2786 + spin_unlock(&mp->m_ail_lock); 2794 2787 } 2795 2788 xfs_inode_item_destroy(ip); 2796 2789 } ··· 2821 2816 { 2822 2817 ASSERT(atomic_read(&ip->i_pincount) > 0); 2823 2818 2824 - if (atomic_dec_and_lock(&ip->i_pincount, &ip->i_flags_lock)) { 2825 - 2826 - /* 2827 - * If the inode is currently being reclaimed, the link between 2828 - * the bhv_vnode and the xfs_inode will be broken after the 2829 - * XFS_IRECLAIM* flag is set. Hence, if these flags are not 2830 - * set, then we can move forward and mark the linux inode dirty 2831 - * knowing that it is still valid as it won't freed until after 2832 - * the bhv_vnode<->xfs_inode link is broken in xfs_reclaim. The 2833 - * i_flags_lock is used to synchronise the setting of the 2834 - * XFS_IRECLAIM* flags and the breaking of the link, and so we 2835 - * can execute atomically w.r.t to reclaim by holding this lock 2836 - * here. 2837 - * 2838 - * However, we still need to issue the unpin wakeup call as the 2839 - * inode reclaim may be blocked waiting for the inode to become 2840 - * unpinned. 2841 - */ 2842 - 2843 - if (!__xfs_iflags_test(ip, XFS_IRECLAIM|XFS_IRECLAIMABLE)) { 2844 - bhv_vnode_t *vp = XFS_ITOV_NULL(ip); 2845 - struct inode *inode = NULL; 2846 - 2847 - BUG_ON(vp == NULL); 2848 - inode = vn_to_inode(vp); 2849 - BUG_ON(inode->i_state & I_CLEAR); 2850 - 2851 - /* make sync come back and flush this inode */ 2852 - if (!(inode->i_state & (I_NEW|I_FREEING))) 2853 - mark_inode_dirty_sync(inode); 2854 - } 2855 - spin_unlock(&ip->i_flags_lock); 2819 + if (atomic_dec_and_test(&ip->i_pincount)) 2856 2820 wake_up(&ip->i_ipin_wait); 2857 - } 2858 2821 } 2859 2822 2860 2823 /* ··· 3311 3338 #ifdef XFS_TRANS_DEBUG 3312 3339 int first; 3313 3340 #endif 3314 - SPLDECL(s); 3315 3341 3316 3342 ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE|MR_ACCESS)); 3317 3343 ASSERT(issemalocked(&(ip->i_flock))); ··· 3505 3533 iip->ili_logged = 1; 3506 3534 3507 3535 ASSERT(sizeof(xfs_lsn_t) == 8); /* don't lock if it shrinks */ 3508 - AIL_LOCK(mp,s); 3536 + spin_lock(&mp->m_ail_lock); 3509 3537 iip->ili_flush_lsn = iip->ili_item.li_lsn; 3510 - AIL_UNLOCK(mp, s); 3538 + spin_unlock(&mp->m_ail_lock); 3511 3539 3512 3540 /* 3513 3541 * Attach the function xfs_iflush_done to the inode's ··· 3581 3609 } while (ip != mp->m_inodes); 3582 3610 out: 3583 3611 XFS_MOUNT_IUNLOCK(mp); 3584 - } 3585 - 3586 - /* 3587 - * xfs_iaccess: check accessibility of inode for mode. 3588 - */ 3589 - int 3590 - xfs_iaccess( 3591 - xfs_inode_t *ip, 3592 - mode_t mode, 3593 - cred_t *cr) 3594 - { 3595 - int error; 3596 - mode_t orgmode = mode; 3597 - struct inode *inode = vn_to_inode(XFS_ITOV(ip)); 3598 - 3599 - if (mode & S_IWUSR) { 3600 - umode_t imode = inode->i_mode; 3601 - 3602 - if (IS_RDONLY(inode) && 3603 - (S_ISREG(imode) || S_ISDIR(imode) || S_ISLNK(imode))) 3604 - return XFS_ERROR(EROFS); 3605 - 3606 - if (IS_IMMUTABLE(inode)) 3607 - return XFS_ERROR(EACCES); 3608 - } 3609 - 3610 - /* 3611 - * If there's an Access Control List it's used instead of 3612 - * the mode bits. 3613 - */ 3614 - if ((error = _ACL_XFS_IACCESS(ip, mode, cr)) != -1) 3615 - return error ? XFS_ERROR(error) : 0; 3616 - 3617 - if (current_fsuid(cr) != ip->i_d.di_uid) { 3618 - mode >>= 3; 3619 - if (!in_group_p((gid_t)ip->i_d.di_gid)) 3620 - mode >>= 3; 3621 - } 3622 - 3623 - /* 3624 - * If the DACs are ok we don't need any capability check. 3625 - */ 3626 - if ((ip->i_d.di_mode & mode) == mode) 3627 - return 0; 3628 - /* 3629 - * Read/write DACs are always overridable. 3630 - * Executable DACs are overridable if at least one exec bit is set. 3631 - */ 3632 - if (!(orgmode & S_IXUSR) || 3633 - (inode->i_mode & S_IXUGO) || S_ISDIR(inode->i_mode)) 3634 - if (capable_cred(cr, CAP_DAC_OVERRIDE)) 3635 - return 0; 3636 - 3637 - if ((orgmode == S_IRUSR) || 3638 - (S_ISDIR(inode->i_mode) && (!(orgmode & S_IWUSR)))) { 3639 - if (capable_cred(cr, CAP_DAC_READ_SEARCH)) 3640 - return 0; 3641 - #ifdef NOISE 3642 - cmn_err(CE_NOTE, "Ick: mode=%o, orgmode=%o", mode, orgmode); 3643 - #endif /* NOISE */ 3644 - return XFS_ERROR(EACCES); 3645 - } 3646 - return XFS_ERROR(EACCES); 3647 - } 3648 - 3649 - /* 3650 - * xfs_iroundup: round up argument to next power of two 3651 - */ 3652 - uint 3653 - xfs_iroundup( 3654 - uint v) 3655 - { 3656 - int i; 3657 - uint m; 3658 - 3659 - if ((v & (v - 1)) == 0) 3660 - return v; 3661 - ASSERT((v & 0x80000000) == 0); 3662 - if ((v & (v + 1)) == 0) 3663 - return v + 1; 3664 - for (i = 0, m = 1; i < 31; i++, m <<= 1) { 3665 - if (v & m) 3666 - continue; 3667 - v |= m; 3668 - if ((v & (v + 1)) == 0) 3669 - return v + 1; 3670 - } 3671 - ASSERT(0); 3672 - return( 0 ); 3673 3612 } 3674 3613 3675 3614 #ifdef XFS_ILOCK_TRACE ··· 4089 4206 return; 4090 4207 } 4091 4208 if (!is_power_of_2(new_size)){ 4092 - rnew_size = xfs_iroundup(new_size); 4209 + rnew_size = roundup_pow_of_two(new_size); 4093 4210 } 4094 4211 if (rnew_size != ifp->if_real_bytes) { 4095 4212 ifp->if_u1.if_extents = ··· 4112 4229 else { 4113 4230 new_size += ifp->if_bytes; 4114 4231 if (!is_power_of_2(new_size)) { 4115 - rnew_size = xfs_iroundup(new_size); 4232 + rnew_size = roundup_pow_of_two(new_size); 4116 4233 } 4117 4234 xfs_iext_inline_to_direct(ifp, rnew_size); 4118 4235 }
+40 -58
fs/xfs/xfs_inode.h
··· 132 132 __uint16_t da_pad; /* DMIG extra padding */ 133 133 } dm_attrs_t; 134 134 135 - typedef struct xfs_iocore { 136 - void *io_obj; /* pointer to container 137 - * inode or dcxvn structure */ 138 - struct xfs_mount *io_mount; /* fs mount struct ptr */ 139 - #ifdef DEBUG 140 - mrlock_t *io_lock; /* inode IO lock */ 141 - mrlock_t *io_iolock; /* inode IO lock */ 142 - #endif 143 - 144 - /* I/O state */ 145 - xfs_fsize_t io_new_size; /* sz when write completes */ 146 - 147 - /* Miscellaneous state. */ 148 - unsigned int io_flags; /* IO related flags */ 149 - 150 - /* DMAPI state */ 151 - dm_attrs_t io_dmattrs; 152 - 153 - } xfs_iocore_t; 154 - 155 - #define io_dmevmask io_dmattrs.da_dmevmask 156 - #define io_dmstate io_dmattrs.da_dmstate 157 - 158 - #define XFS_IO_INODE(io) ((xfs_inode_t *) ((io)->io_obj)) 159 - #define XFS_IO_DCXVN(io) ((dcxvn_t *) ((io)->io_obj)) 160 - 161 - /* 162 - * Flags in the flags field 163 - */ 164 - 165 - #define XFS_IOCORE_RT 0x1 166 - 167 - /* 168 - * xfs_iocore prototypes 169 - */ 170 - 171 - extern void xfs_iocore_inode_init(struct xfs_inode *); 172 - extern void xfs_iocore_inode_reinit(struct xfs_inode *); 173 - 174 135 /* 175 136 * This is the xfs inode cluster structure. This structure is used by 176 137 * xfs_iflush to find inodes that share a cluster and can be flushed to disk at ··· 142 181 xfs_daddr_t icl_blkno; /* starting block number of 143 182 * the cluster */ 144 183 struct xfs_buf *icl_buf; /* the inode buffer */ 145 - lock_t icl_lock; /* inode list lock */ 184 + spinlock_t icl_lock; /* inode list lock */ 146 185 } xfs_icluster_t; 147 186 148 187 /* ··· 244 283 struct xfs_inode **i_refcache; /* ptr to entry in ref cache */ 245 284 struct xfs_inode *i_release; /* inode to unref */ 246 285 #endif 247 - /* I/O state */ 248 - xfs_iocore_t i_iocore; /* I/O core */ 249 - 250 286 /* Miscellaneous state. */ 251 287 unsigned short i_flags; /* see defined flags below */ 252 288 unsigned char i_update_core; /* timestamps/size is dirty */ ··· 256 298 struct hlist_node i_cnode; /* cluster link node */ 257 299 258 300 xfs_fsize_t i_size; /* in-memory size */ 301 + xfs_fsize_t i_new_size; /* size when write completes */ 259 302 atomic_t i_iocount; /* outstanding I/O count */ 260 303 /* Trace buffers per inode. */ 261 - #ifdef XFS_VNODE_TRACE 304 + #ifdef XFS_INODE_TRACE 262 305 struct ktrace *i_trace; /* general inode trace */ 263 306 #endif 264 307 #ifdef XFS_BMAP_TRACE ··· 341 382 /* 342 383 * Fork handling. 343 384 */ 344 - #define XFS_IFORK_PTR(ip,w) \ 345 - ((w) == XFS_DATA_FORK ? &(ip)->i_df : (ip)->i_afp) 346 - #define XFS_IFORK_Q(ip) XFS_CFORK_Q(&(ip)->i_d) 347 - #define XFS_IFORK_DSIZE(ip) XFS_CFORK_DSIZE(&ip->i_d, ip->i_mount) 348 - #define XFS_IFORK_ASIZE(ip) XFS_CFORK_ASIZE(&ip->i_d, ip->i_mount) 349 - #define XFS_IFORK_SIZE(ip,w) XFS_CFORK_SIZE(&ip->i_d, ip->i_mount, w) 350 - #define XFS_IFORK_FORMAT(ip,w) XFS_CFORK_FORMAT(&ip->i_d, w) 351 - #define XFS_IFORK_FMT_SET(ip,w,n) XFS_CFORK_FMT_SET(&ip->i_d, w, n) 352 - #define XFS_IFORK_NEXTENTS(ip,w) XFS_CFORK_NEXTENTS(&ip->i_d, w) 353 - #define XFS_IFORK_NEXT_SET(ip,w,n) XFS_CFORK_NEXT_SET(&ip->i_d, w, n) 354 385 386 + #define XFS_IFORK_Q(ip) ((ip)->i_d.di_forkoff != 0) 387 + #define XFS_IFORK_BOFF(ip) ((int)((ip)->i_d.di_forkoff << 3)) 388 + 389 + #define XFS_IFORK_PTR(ip,w) \ 390 + ((w) == XFS_DATA_FORK ? \ 391 + &(ip)->i_df : \ 392 + (ip)->i_afp) 393 + #define XFS_IFORK_DSIZE(ip) \ 394 + (XFS_IFORK_Q(ip) ? \ 395 + XFS_IFORK_BOFF(ip) : \ 396 + XFS_LITINO((ip)->i_mount)) 397 + #define XFS_IFORK_ASIZE(ip) \ 398 + (XFS_IFORK_Q(ip) ? \ 399 + XFS_LITINO((ip)->i_mount) - XFS_IFORK_BOFF(ip) : \ 400 + 0) 401 + #define XFS_IFORK_SIZE(ip,w) \ 402 + ((w) == XFS_DATA_FORK ? \ 403 + XFS_IFORK_DSIZE(ip) : \ 404 + XFS_IFORK_ASIZE(ip)) 405 + #define XFS_IFORK_FORMAT(ip,w) \ 406 + ((w) == XFS_DATA_FORK ? \ 407 + (ip)->i_d.di_format : \ 408 + (ip)->i_d.di_aformat) 409 + #define XFS_IFORK_FMT_SET(ip,w,n) \ 410 + ((w) == XFS_DATA_FORK ? \ 411 + ((ip)->i_d.di_format = (n)) : \ 412 + ((ip)->i_d.di_aformat = (n))) 413 + #define XFS_IFORK_NEXTENTS(ip,w) \ 414 + ((w) == XFS_DATA_FORK ? \ 415 + (ip)->i_d.di_nextents : \ 416 + (ip)->i_d.di_anextents) 417 + #define XFS_IFORK_NEXT_SET(ip,w,n) \ 418 + ((w) == XFS_DATA_FORK ? \ 419 + ((ip)->i_d.di_nextents = (n)) : \ 420 + ((ip)->i_d.di_anextents = (n))) 355 421 356 422 #ifdef __KERNEL__ 357 423 ··· 493 509 void xfs_ihash_free(struct xfs_mount *); 494 510 xfs_inode_t *xfs_inode_incore(struct xfs_mount *, xfs_ino_t, 495 511 struct xfs_trans *); 496 - void xfs_inode_lock_init(xfs_inode_t *, bhv_vnode_t *); 497 512 int xfs_iget(struct xfs_mount *, struct xfs_trans *, xfs_ino_t, 498 513 uint, uint, xfs_inode_t **, xfs_daddr_t); 499 514 void xfs_iput(xfs_inode_t *, uint); ··· 528 545 struct xfs_icdinode *); 529 546 530 547 uint xfs_ip2xflags(struct xfs_inode *); 531 - uint xfs_dic2xflags(struct xfs_dinode_core *); 548 + uint xfs_dic2xflags(struct xfs_dinode *); 532 549 int xfs_ifree(struct xfs_trans *, xfs_inode_t *, 533 550 struct xfs_bmap_free *); 534 551 int xfs_itruncate_start(xfs_inode_t *, uint, xfs_fsize_t); ··· 550 567 int xfs_iextents_copy(xfs_inode_t *, xfs_bmbt_rec_t *, int); 551 568 int xfs_iflush(xfs_inode_t *, uint); 552 569 void xfs_iflush_all(struct xfs_mount *); 553 - int xfs_iaccess(xfs_inode_t *, mode_t, cred_t *); 554 - uint xfs_iroundup(uint); 555 570 void xfs_ichgtime(xfs_inode_t *, int); 556 571 xfs_fsize_t xfs_file_last_byte(xfs_inode_t *); 557 572 void xfs_lock_inodes(xfs_inode_t **, int, int, uint); 558 573 559 574 void xfs_synchronize_atime(xfs_inode_t *); 575 + void xfs_mark_inode_dirty_sync(xfs_inode_t *); 560 576 561 577 xfs_bmbt_rec_host_t *xfs_iext_get_ext(xfs_ifork_t *, xfs_extnum_t); 562 578 void xfs_iext_insert(xfs_ifork_t *, xfs_extnum_t, xfs_extnum_t,
+14 -12
fs/xfs/xfs_inode_item.c
··· 274 274 */ 275 275 xfs_synchronize_atime(ip); 276 276 277 + /* 278 + * make sure the linux inode is dirty 279 + */ 280 + xfs_mark_inode_dirty_sync(ip); 281 + 277 282 vecp->i_addr = (xfs_caddr_t)&ip->i_d; 278 283 vecp->i_len = sizeof(xfs_dinode_core_t); 279 284 XLOG_VEC_SET_TYPE(vecp, XLOG_REG_TYPE_ICORE); ··· 620 615 return XFS_ITEM_PUSHBUF; 621 616 } else { 622 617 /* 623 - * We hold the AIL_LOCK, so we must specify the 618 + * We hold the AIL lock, so we must specify the 624 619 * NONOTIFY flag so that we won't double trip. 625 620 */ 626 621 xfs_iunlock(ip, XFS_ILOCK_SHARED|XFS_IUNLOCK_NONOTIFY); ··· 754 749 * marked delayed write. If that's the case, we'll initiate a bawrite on that 755 750 * buffer to expedite the process. 756 751 * 757 - * We aren't holding the AIL_LOCK (or the flush lock) when this gets called, 752 + * We aren't holding the AIL lock (or the flush lock) when this gets called, 758 753 * so it is inherently race-y. 759 754 */ 760 755 STATIC void ··· 797 792 if (XFS_BUF_ISDELAYWRITE(bp)) { 798 793 /* 799 794 * We were racing with iflush because we don't hold 800 - * the AIL_LOCK or the flush lock. However, at this point, 795 + * the AIL lock or the flush lock. However, at this point, 801 796 * we have the buffer, and we know that it's dirty. 802 797 * So, it's possible that iflush raced with us, and 803 798 * this item is already taken off the AIL. ··· 973 968 xfs_inode_log_item_t *iip) 974 969 { 975 970 xfs_inode_t *ip; 976 - SPLDECL(s); 977 971 978 972 ip = iip->ili_inode; 979 973 ··· 987 983 */ 988 984 if (iip->ili_logged && 989 985 (iip->ili_item.li_lsn == iip->ili_flush_lsn)) { 990 - AIL_LOCK(ip->i_mount, s); 986 + spin_lock(&ip->i_mount->m_ail_lock); 991 987 if (iip->ili_item.li_lsn == iip->ili_flush_lsn) { 992 988 /* 993 989 * xfs_trans_delete_ail() drops the AIL lock. 994 990 */ 995 991 xfs_trans_delete_ail(ip->i_mount, 996 - (xfs_log_item_t*)iip, s); 992 + (xfs_log_item_t*)iip); 997 993 } else { 998 - AIL_UNLOCK(ip->i_mount, s); 994 + spin_unlock(&ip->i_mount->m_ail_lock); 999 995 } 1000 996 } 1001 997 ··· 1029 1025 { 1030 1026 xfs_inode_log_item_t *iip; 1031 1027 xfs_mount_t *mp; 1032 - SPLDECL(s); 1033 1028 1034 1029 iip = ip->i_itemp; 1035 1030 mp = ip->i_mount; 1036 1031 if (iip) { 1037 1032 if (iip->ili_item.li_flags & XFS_LI_IN_AIL) { 1038 - AIL_LOCK(mp, s); 1033 + spin_lock(&mp->m_ail_lock); 1039 1034 if (iip->ili_item.li_flags & XFS_LI_IN_AIL) { 1040 1035 /* 1041 1036 * xfs_trans_delete_ail() drops the AIL lock. 1042 1037 */ 1043 - xfs_trans_delete_ail(mp, (xfs_log_item_t *)iip, 1044 - s); 1038 + xfs_trans_delete_ail(mp, (xfs_log_item_t *)iip); 1045 1039 } else 1046 - AIL_UNLOCK(mp, s); 1040 + spin_unlock(&mp->m_ail_lock); 1047 1041 } 1048 1042 iip->ili_logged = 0; 1049 1043 /*
-119
fs/xfs/xfs_iocore.c
··· 1 - /* 2 - * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc. 3 - * All Rights Reserved. 4 - * 5 - * This program is free software; you can redistribute it and/or 6 - * modify it under the terms of the GNU General Public License as 7 - * published by the Free Software Foundation. 8 - * 9 - * This program is distributed in the hope that it would be useful, 10 - * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 - * GNU General Public License for more details. 13 - * 14 - * You should have received a copy of the GNU General Public License 15 - * along with this program; if not, write the Free Software Foundation, 16 - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 17 - */ 18 - #include "xfs.h" 19 - #include "xfs_fs.h" 20 - #include "xfs_types.h" 21 - #include "xfs_bit.h" 22 - #include "xfs_log.h" 23 - #include "xfs_inum.h" 24 - #include "xfs_trans.h" 25 - #include "xfs_sb.h" 26 - #include "xfs_ag.h" 27 - #include "xfs_dir2.h" 28 - #include "xfs_dfrag.h" 29 - #include "xfs_dmapi.h" 30 - #include "xfs_mount.h" 31 - #include "xfs_bmap_btree.h" 32 - #include "xfs_alloc_btree.h" 33 - #include "xfs_ialloc_btree.h" 34 - #include "xfs_dir2_sf.h" 35 - #include "xfs_attr_sf.h" 36 - #include "xfs_dinode.h" 37 - #include "xfs_inode.h" 38 - #include "xfs_inode_item.h" 39 - #include "xfs_itable.h" 40 - #include "xfs_btree.h" 41 - #include "xfs_alloc.h" 42 - #include "xfs_ialloc.h" 43 - #include "xfs_bmap.h" 44 - #include "xfs_error.h" 45 - #include "xfs_rw.h" 46 - #include "xfs_quota.h" 47 - #include "xfs_trans_space.h" 48 - #include "xfs_iomap.h" 49 - 50 - 51 - STATIC xfs_fsize_t 52 - xfs_size_fn( 53 - xfs_inode_t *ip) 54 - { 55 - return XFS_ISIZE(ip); 56 - } 57 - 58 - STATIC int 59 - xfs_ioinit( 60 - struct xfs_mount *mp, 61 - struct xfs_mount_args *mntargs, 62 - int flags) 63 - { 64 - return xfs_mountfs(mp, flags); 65 - } 66 - 67 - xfs_ioops_t xfs_iocore_xfs = { 68 - .xfs_ioinit = (xfs_ioinit_t) xfs_ioinit, 69 - .xfs_bmapi_func = (xfs_bmapi_t) xfs_bmapi, 70 - .xfs_bunmapi_func = (xfs_bunmapi_t) xfs_bunmapi, 71 - .xfs_bmap_eof_func = (xfs_bmap_eof_t) xfs_bmap_eof, 72 - .xfs_iomap_write_direct = 73 - (xfs_iomap_write_direct_t) xfs_iomap_write_direct, 74 - .xfs_iomap_write_delay = 75 - (xfs_iomap_write_delay_t) xfs_iomap_write_delay, 76 - .xfs_iomap_write_allocate = 77 - (xfs_iomap_write_allocate_t) xfs_iomap_write_allocate, 78 - .xfs_iomap_write_unwritten = 79 - (xfs_iomap_write_unwritten_t) xfs_iomap_write_unwritten, 80 - .xfs_ilock = (xfs_lock_t) xfs_ilock, 81 - .xfs_lck_map_shared = (xfs_lck_map_shared_t) xfs_ilock_map_shared, 82 - .xfs_ilock_demote = (xfs_lock_demote_t) xfs_ilock_demote, 83 - .xfs_ilock_nowait = (xfs_lock_nowait_t) xfs_ilock_nowait, 84 - .xfs_unlock = (xfs_unlk_t) xfs_iunlock, 85 - .xfs_size_func = (xfs_size_t) xfs_size_fn, 86 - .xfs_iodone = (xfs_iodone_t) fs_noerr, 87 - .xfs_swap_extents_func = (xfs_swap_extents_t) xfs_swap_extents, 88 - }; 89 - 90 - void 91 - xfs_iocore_inode_reinit( 92 - xfs_inode_t *ip) 93 - { 94 - xfs_iocore_t *io = &ip->i_iocore; 95 - 96 - io->io_flags = 0; 97 - if (ip->i_d.di_flags & XFS_DIFLAG_REALTIME) 98 - io->io_flags |= XFS_IOCORE_RT; 99 - io->io_dmevmask = ip->i_d.di_dmevmask; 100 - io->io_dmstate = ip->i_d.di_dmstate; 101 - } 102 - 103 - void 104 - xfs_iocore_inode_init( 105 - xfs_inode_t *ip) 106 - { 107 - xfs_iocore_t *io = &ip->i_iocore; 108 - xfs_mount_t *mp = ip->i_mount; 109 - 110 - io->io_mount = mp; 111 - #ifdef DEBUG 112 - io->io_lock = &ip->i_lock; 113 - io->io_iolock = &ip->i_iolock; 114 - #endif 115 - 116 - io->io_obj = (void *)ip; 117 - 118 - xfs_iocore_inode_reinit(ip); 119 - }
+108 -104
fs/xfs/xfs_iomap.c
··· 53 53 void 54 54 xfs_iomap_enter_trace( 55 55 int tag, 56 - xfs_iocore_t *io, 56 + xfs_inode_t *ip, 57 57 xfs_off_t offset, 58 58 ssize_t count) 59 59 { 60 - xfs_inode_t *ip = XFS_IO_INODE(io); 61 - 62 60 if (!ip->i_rwtrace) 63 61 return; 64 62 ··· 68 70 (void *)((unsigned long)((offset >> 32) & 0xffffffff)), 69 71 (void *)((unsigned long)(offset & 0xffffffff)), 70 72 (void *)((unsigned long)count), 71 - (void *)((unsigned long)((io->io_new_size >> 32) & 0xffffffff)), 72 - (void *)((unsigned long)(io->io_new_size & 0xffffffff)), 73 + (void *)((unsigned long)((ip->i_new_size >> 32) & 0xffffffff)), 74 + (void *)((unsigned long)(ip->i_new_size & 0xffffffff)), 73 75 (void *)((unsigned long)current_pid()), 74 76 (void *)NULL, 75 77 (void *)NULL, ··· 82 84 void 83 85 xfs_iomap_map_trace( 84 86 int tag, 85 - xfs_iocore_t *io, 87 + xfs_inode_t *ip, 86 88 xfs_off_t offset, 87 89 ssize_t count, 88 90 xfs_iomap_t *iomapp, 89 91 xfs_bmbt_irec_t *imapp, 90 92 int flags) 91 93 { 92 - xfs_inode_t *ip = XFS_IO_INODE(io); 93 - 94 94 if (!ip->i_rwtrace) 95 95 return; 96 96 ··· 122 126 123 127 STATIC int 124 128 xfs_imap_to_bmap( 125 - xfs_iocore_t *io, 129 + xfs_inode_t *ip, 126 130 xfs_off_t offset, 127 131 xfs_bmbt_irec_t *imap, 128 132 xfs_iomap_t *iomapp, ··· 130 134 int iomaps, /* Number of iomap entries */ 131 135 int flags) 132 136 { 133 - xfs_mount_t *mp; 137 + xfs_mount_t *mp = ip->i_mount; 134 138 int pbm; 135 139 xfs_fsblock_t start_block; 136 140 137 - mp = io->io_mount; 138 141 139 142 for (pbm = 0; imaps && pbm < iomaps; imaps--, iomapp++, imap++, pbm++) { 140 143 iomapp->iomap_offset = XFS_FSB_TO_B(mp, imap->br_startoff); ··· 141 146 iomapp->iomap_bsize = XFS_FSB_TO_B(mp, imap->br_blockcount); 142 147 iomapp->iomap_flags = flags; 143 148 144 - if (io->io_flags & XFS_IOCORE_RT) { 149 + if (XFS_IS_REALTIME_INODE(ip)) { 145 150 iomapp->iomap_flags |= IOMAP_REALTIME; 146 151 iomapp->iomap_target = mp->m_rtdev_targp; 147 152 } else { ··· 155 160 iomapp->iomap_bn = IOMAP_DADDR_NULL; 156 161 iomapp->iomap_flags |= IOMAP_DELAY; 157 162 } else { 158 - iomapp->iomap_bn = XFS_FSB_TO_DB_IO(io, start_block); 163 + iomapp->iomap_bn = XFS_FSB_TO_DB(ip, start_block); 159 164 if (ISUNWRITTEN(imap)) 160 165 iomapp->iomap_flags |= IOMAP_UNWRITTEN; 161 166 } ··· 167 172 168 173 int 169 174 xfs_iomap( 170 - xfs_iocore_t *io, 175 + xfs_inode_t *ip, 171 176 xfs_off_t offset, 172 177 ssize_t count, 173 178 int flags, 174 179 xfs_iomap_t *iomapp, 175 180 int *niomaps) 176 181 { 177 - xfs_mount_t *mp = io->io_mount; 182 + xfs_mount_t *mp = ip->i_mount; 178 183 xfs_fileoff_t offset_fsb, end_fsb; 179 184 int error = 0; 180 185 int lockmode = 0; ··· 183 188 int bmapi_flags = 0; 184 189 int iomap_flags = 0; 185 190 191 + ASSERT((ip->i_d.di_mode & S_IFMT) == S_IFREG); 192 + 186 193 if (XFS_FORCED_SHUTDOWN(mp)) 187 194 return XFS_ERROR(EIO); 188 195 189 - switch (flags & 190 - (BMAPI_READ | BMAPI_WRITE | BMAPI_ALLOCATE | 191 - BMAPI_UNWRITTEN | BMAPI_DEVICE)) { 196 + switch (flags & (BMAPI_READ | BMAPI_WRITE | BMAPI_ALLOCATE)) { 192 197 case BMAPI_READ: 193 - xfs_iomap_enter_trace(XFS_IOMAP_READ_ENTER, io, offset, count); 194 - lockmode = XFS_LCK_MAP_SHARED(mp, io); 198 + xfs_iomap_enter_trace(XFS_IOMAP_READ_ENTER, ip, offset, count); 199 + lockmode = xfs_ilock_map_shared(ip); 195 200 bmapi_flags = XFS_BMAPI_ENTIRE; 196 201 break; 197 202 case BMAPI_WRITE: 198 - xfs_iomap_enter_trace(XFS_IOMAP_WRITE_ENTER, io, offset, count); 203 + xfs_iomap_enter_trace(XFS_IOMAP_WRITE_ENTER, ip, offset, count); 199 204 lockmode = XFS_ILOCK_EXCL|XFS_EXTSIZE_WR; 200 205 if (flags & BMAPI_IGNSTATE) 201 206 bmapi_flags |= XFS_BMAPI_IGSTATE|XFS_BMAPI_ENTIRE; 202 - XFS_ILOCK(mp, io, lockmode); 207 + xfs_ilock(ip, lockmode); 203 208 break; 204 209 case BMAPI_ALLOCATE: 205 - xfs_iomap_enter_trace(XFS_IOMAP_ALLOC_ENTER, io, offset, count); 210 + xfs_iomap_enter_trace(XFS_IOMAP_ALLOC_ENTER, ip, offset, count); 206 211 lockmode = XFS_ILOCK_SHARED|XFS_EXTSIZE_RD; 207 212 bmapi_flags = XFS_BMAPI_ENTIRE; 213 + 208 214 /* Attempt non-blocking lock */ 209 215 if (flags & BMAPI_TRYLOCK) { 210 - if (!XFS_ILOCK_NOWAIT(mp, io, lockmode)) 216 + if (!xfs_ilock_nowait(ip, lockmode)) 211 217 return XFS_ERROR(EAGAIN); 212 218 } else { 213 - XFS_ILOCK(mp, io, lockmode); 219 + xfs_ilock(ip, lockmode); 214 220 } 215 221 break; 216 - case BMAPI_UNWRITTEN: 217 - goto phase2; 218 - case BMAPI_DEVICE: 219 - lockmode = XFS_LCK_MAP_SHARED(mp, io); 220 - iomapp->iomap_target = io->io_flags & XFS_IOCORE_RT ? 221 - mp->m_rtdev_targp : mp->m_ddev_targp; 222 - error = 0; 223 - *niomaps = 1; 224 - goto out; 225 222 default: 226 223 BUG(); 227 224 } ··· 224 237 end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count); 225 238 offset_fsb = XFS_B_TO_FSBT(mp, offset); 226 239 227 - error = XFS_BMAPI(mp, NULL, io, offset_fsb, 240 + error = xfs_bmapi(NULL, ip, offset_fsb, 228 241 (xfs_filblks_t)(end_fsb - offset_fsb), 229 242 bmapi_flags, NULL, 0, &imap, 230 243 &nimaps, NULL, NULL); ··· 232 245 if (error) 233 246 goto out; 234 247 235 - phase2: 236 - switch (flags & (BMAPI_WRITE|BMAPI_ALLOCATE|BMAPI_UNWRITTEN)) { 248 + switch (flags & (BMAPI_WRITE|BMAPI_ALLOCATE)) { 237 249 case BMAPI_WRITE: 238 250 /* If we found an extent, return it */ 239 251 if (nimaps && 240 252 (imap.br_startblock != HOLESTARTBLOCK) && 241 253 (imap.br_startblock != DELAYSTARTBLOCK)) { 242 - xfs_iomap_map_trace(XFS_IOMAP_WRITE_MAP, io, 254 + xfs_iomap_map_trace(XFS_IOMAP_WRITE_MAP, ip, 243 255 offset, count, iomapp, &imap, flags); 244 256 break; 245 257 } 246 258 247 259 if (flags & (BMAPI_DIRECT|BMAPI_MMAP)) { 248 - error = XFS_IOMAP_WRITE_DIRECT(mp, io, offset, 249 - count, flags, &imap, &nimaps, nimaps); 260 + error = xfs_iomap_write_direct(ip, offset, count, flags, 261 + &imap, &nimaps, nimaps); 250 262 } else { 251 - error = XFS_IOMAP_WRITE_DELAY(mp, io, offset, count, 252 - flags, &imap, &nimaps); 263 + error = xfs_iomap_write_delay(ip, offset, count, flags, 264 + &imap, &nimaps); 253 265 } 254 266 if (!error) { 255 - xfs_iomap_map_trace(XFS_IOMAP_ALLOC_MAP, io, 267 + xfs_iomap_map_trace(XFS_IOMAP_ALLOC_MAP, ip, 256 268 offset, count, iomapp, &imap, flags); 257 269 } 258 270 iomap_flags = IOMAP_NEW; 259 271 break; 260 272 case BMAPI_ALLOCATE: 261 273 /* If we found an extent, return it */ 262 - XFS_IUNLOCK(mp, io, lockmode); 274 + xfs_iunlock(ip, lockmode); 263 275 lockmode = 0; 264 276 265 277 if (nimaps && !ISNULLSTARTBLOCK(imap.br_startblock)) { 266 - xfs_iomap_map_trace(XFS_IOMAP_WRITE_MAP, io, 278 + xfs_iomap_map_trace(XFS_IOMAP_WRITE_MAP, ip, 267 279 offset, count, iomapp, &imap, flags); 268 280 break; 269 281 } 270 282 271 - error = XFS_IOMAP_WRITE_ALLOCATE(mp, io, offset, count, 283 + error = xfs_iomap_write_allocate(ip, offset, count, 272 284 &imap, &nimaps); 273 - break; 274 - case BMAPI_UNWRITTEN: 275 - lockmode = 0; 276 - error = XFS_IOMAP_WRITE_UNWRITTEN(mp, io, offset, count); 277 - nimaps = 0; 278 285 break; 279 286 } 280 287 281 288 if (nimaps) { 282 - *niomaps = xfs_imap_to_bmap(io, offset, &imap, 289 + *niomaps = xfs_imap_to_bmap(ip, offset, &imap, 283 290 iomapp, nimaps, *niomaps, iomap_flags); 284 291 } else if (niomaps) { 285 292 *niomaps = 0; ··· 281 300 282 301 out: 283 302 if (lockmode) 284 - XFS_IUNLOCK(mp, io, lockmode); 303 + xfs_iunlock(ip, lockmode); 285 304 return XFS_ERROR(error); 286 305 } 306 + 287 307 288 308 STATIC int 289 309 xfs_iomap_eof_align_last_fsb( 290 310 xfs_mount_t *mp, 291 - xfs_iocore_t *io, 311 + xfs_inode_t *ip, 292 312 xfs_fsize_t isize, 293 313 xfs_extlen_t extsize, 294 314 xfs_fileoff_t *last_fsb) ··· 298 316 xfs_extlen_t align; 299 317 int eof, error; 300 318 301 - if (io->io_flags & XFS_IOCORE_RT) 319 + if (XFS_IS_REALTIME_INODE(ip)) 302 320 ; 303 321 /* 304 322 * If mounted with the "-o swalloc" option, roundup the allocation ··· 329 347 } 330 348 331 349 if (new_last_fsb) { 332 - error = XFS_BMAP_EOF(mp, io, new_last_fsb, XFS_DATA_FORK, &eof); 350 + error = xfs_bmap_eof(ip, new_last_fsb, XFS_DATA_FORK, &eof); 333 351 if (error) 334 352 return error; 335 353 if (eof) ··· 398 416 int found) 399 417 { 400 418 xfs_mount_t *mp = ip->i_mount; 401 - xfs_iocore_t *io = &ip->i_iocore; 402 419 xfs_fileoff_t offset_fsb; 403 420 xfs_fileoff_t last_fsb; 404 421 xfs_filblks_t count_fsb, resaligned; ··· 427 446 extsz = xfs_get_extsz_hint(ip); 428 447 429 448 isize = ip->i_size; 430 - if (io->io_new_size > isize) 431 - isize = io->io_new_size; 449 + if (ip->i_new_size > isize) 450 + isize = ip->i_new_size; 432 451 433 452 offset_fsb = XFS_B_TO_FSBT(mp, offset); 434 453 last_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)(offset + count))); 435 454 if ((offset + count) > isize) { 436 - error = xfs_iomap_eof_align_last_fsb(mp, io, isize, extsz, 455 + error = xfs_iomap_eof_align_last_fsb(mp, ip, isize, extsz, 437 456 &last_fsb); 438 457 if (error) 439 458 goto error_out; ··· 500 519 */ 501 520 XFS_BMAP_INIT(&free_list, &firstfsb); 502 521 nimaps = 1; 503 - error = XFS_BMAPI(mp, tp, io, offset_fsb, count_fsb, bmapi_flag, 522 + error = xfs_bmapi(tp, ip, offset_fsb, count_fsb, bmapi_flag, 504 523 &firstfsb, 0, &imap, &nimaps, &free_list, NULL); 505 524 if (error) 506 525 goto error0; ··· 523 542 goto error_out; 524 543 } 525 544 526 - if (unlikely(!imap.br_startblock && !(io->io_flags & XFS_IOCORE_RT))) { 545 + if (unlikely(!imap.br_startblock && 546 + !(XFS_IS_REALTIME_INODE(ip)))) { 527 547 error = xfs_cmn_err_fsblock_zero(ip, &imap); 528 548 goto error_out; 529 549 } ··· 559 577 STATIC int 560 578 xfs_iomap_eof_want_preallocate( 561 579 xfs_mount_t *mp, 562 - xfs_iocore_t *io, 580 + xfs_inode_t *ip, 563 581 xfs_fsize_t isize, 564 582 xfs_off_t offset, 565 583 size_t count, ··· 586 604 while (count_fsb > 0) { 587 605 imaps = nimaps; 588 606 firstblock = NULLFSBLOCK; 589 - error = XFS_BMAPI(mp, NULL, io, start_fsb, count_fsb, 0, 607 + error = xfs_bmapi(NULL, ip, start_fsb, count_fsb, 0, 590 608 &firstblock, 0, imap, &imaps, NULL, NULL); 591 609 if (error) 592 610 return error; ··· 612 630 int *nmaps) 613 631 { 614 632 xfs_mount_t *mp = ip->i_mount; 615 - xfs_iocore_t *io = &ip->i_iocore; 616 633 xfs_fileoff_t offset_fsb; 617 634 xfs_fileoff_t last_fsb; 618 635 xfs_off_t aligned_offset; ··· 639 658 640 659 retry: 641 660 isize = ip->i_size; 642 - if (io->io_new_size > isize) 643 - isize = io->io_new_size; 661 + if (ip->i_new_size > isize) 662 + isize = ip->i_new_size; 644 663 645 - error = xfs_iomap_eof_want_preallocate(mp, io, isize, offset, count, 664 + error = xfs_iomap_eof_want_preallocate(mp, ip, isize, offset, count, 646 665 ioflag, imap, XFS_WRITE_IMAPS, &prealloc); 647 666 if (error) 648 667 return error; ··· 656 675 } 657 676 658 677 if (prealloc || extsz) { 659 - error = xfs_iomap_eof_align_last_fsb(mp, io, isize, extsz, 678 + error = xfs_iomap_eof_align_last_fsb(mp, ip, isize, extsz, 660 679 &last_fsb); 661 680 if (error) 662 681 return error; ··· 664 683 665 684 nimaps = XFS_WRITE_IMAPS; 666 685 firstblock = NULLFSBLOCK; 667 - error = XFS_BMAPI(mp, NULL, io, offset_fsb, 686 + error = xfs_bmapi(NULL, ip, offset_fsb, 668 687 (xfs_filblks_t)(last_fsb - offset_fsb), 669 688 XFS_BMAPI_DELAY | XFS_BMAPI_WRITE | 670 689 XFS_BMAPI_ENTIRE, &firstblock, 1, imap, ··· 678 697 */ 679 698 if (nimaps == 0) { 680 699 xfs_iomap_enter_trace(XFS_IOMAP_WRITE_NOSPACE, 681 - io, offset, count); 700 + ip, offset, count); 682 701 if (xfs_flush_space(ip, &fsynced, &ioflag)) 683 702 return XFS_ERROR(ENOSPC); 684 703 ··· 686 705 goto retry; 687 706 } 688 707 689 - if (unlikely(!imap[0].br_startblock && !(io->io_flags & XFS_IOCORE_RT))) 708 + if (unlikely(!imap[0].br_startblock && 709 + !(XFS_IS_REALTIME_INODE(ip)))) 690 710 return xfs_cmn_err_fsblock_zero(ip, &imap[0]); 691 711 692 712 *ret_imap = imap[0]; ··· 702 720 * the originating callers request. 703 721 * 704 722 * Called without a lock on the inode. 723 + * 724 + * We no longer bother to look at the incoming map - all we have to 725 + * guarantee is that whatever we allocate fills the required range. 705 726 */ 706 727 int 707 728 xfs_iomap_write_allocate( ··· 715 730 int *retmap) 716 731 { 717 732 xfs_mount_t *mp = ip->i_mount; 718 - xfs_iocore_t *io = &ip->i_iocore; 719 733 xfs_fileoff_t offset_fsb, last_block; 720 734 xfs_fileoff_t end_fsb, map_start_fsb; 721 735 xfs_fsblock_t first_block; 722 736 xfs_bmap_free_t free_list; 723 737 xfs_filblks_t count_fsb; 724 - xfs_bmbt_irec_t imap[XFS_STRAT_WRITE_IMAPS]; 738 + xfs_bmbt_irec_t imap; 725 739 xfs_trans_t *tp; 726 - int i, nimaps, committed; 740 + int nimaps, committed; 727 741 int error = 0; 728 742 int nres; 729 743 ··· 769 785 770 786 XFS_BMAP_INIT(&free_list, &first_block); 771 787 772 - nimaps = XFS_STRAT_WRITE_IMAPS; 773 788 /* 774 - * Ensure we don't go beyond eof - it is possible 775 - * the extents changed since we did the read call, 776 - * we dropped the ilock in the interim. 789 + * it is possible that the extents have changed since 790 + * we did the read call as we dropped the ilock for a 791 + * while. We have to be careful about truncates or hole 792 + * punchs here - we are not allowed to allocate 793 + * non-delalloc blocks here. 794 + * 795 + * The only protection against truncation is the pages 796 + * for the range we are being asked to convert are 797 + * locked and hence a truncate will block on them 798 + * first. 799 + * 800 + * As a result, if we go beyond the range we really 801 + * need and hit an delalloc extent boundary followed by 802 + * a hole while we have excess blocks in the map, we 803 + * will fill the hole incorrectly and overrun the 804 + * transaction reservation. 805 + * 806 + * Using a single map prevents this as we are forced to 807 + * check each map we look for overlap with the desired 808 + * range and abort as soon as we find it. Also, given 809 + * that we only return a single map, having one beyond 810 + * what we can return is probably a bit silly. 811 + * 812 + * We also need to check that we don't go beyond EOF; 813 + * this is a truncate optimisation as a truncate sets 814 + * the new file size before block on the pages we 815 + * currently have locked under writeback. Because they 816 + * are about to be tossed, we don't need to write them 817 + * back.... 777 818 */ 778 - 819 + nimaps = 1; 779 820 end_fsb = XFS_B_TO_FSB(mp, ip->i_size); 780 821 xfs_bmap_last_offset(NULL, ip, &last_block, 781 822 XFS_DATA_FORK); ··· 814 805 } 815 806 816 807 /* Go get the actual blocks */ 817 - error = XFS_BMAPI(mp, tp, io, map_start_fsb, count_fsb, 808 + error = xfs_bmapi(tp, ip, map_start_fsb, count_fsb, 818 809 XFS_BMAPI_WRITE, &first_block, 1, 819 - imap, &nimaps, &free_list, NULL); 810 + &imap, &nimaps, &free_list, NULL); 820 811 if (error) 821 812 goto trans_cancel; 822 813 ··· 835 826 * See if we were able to allocate an extent that 836 827 * covers at least part of the callers request 837 828 */ 838 - for (i = 0; i < nimaps; i++) { 839 - if (unlikely(!imap[i].br_startblock && 840 - !(io->io_flags & XFS_IOCORE_RT))) 841 - return xfs_cmn_err_fsblock_zero(ip, &imap[i]); 842 - if ((offset_fsb >= imap[i].br_startoff) && 843 - (offset_fsb < (imap[i].br_startoff + 844 - imap[i].br_blockcount))) { 845 - *map = imap[i]; 846 - *retmap = 1; 847 - XFS_STATS_INC(xs_xstrat_quick); 848 - return 0; 849 - } 850 - count_fsb -= imap[i].br_blockcount; 829 + if (unlikely(!imap.br_startblock && 830 + XFS_IS_REALTIME_INODE(ip))) 831 + return xfs_cmn_err_fsblock_zero(ip, &imap); 832 + if ((offset_fsb >= imap.br_startoff) && 833 + (offset_fsb < (imap.br_startoff + 834 + imap.br_blockcount))) { 835 + *map = imap; 836 + *retmap = 1; 837 + XFS_STATS_INC(xs_xstrat_quick); 838 + return 0; 851 839 } 852 840 853 - /* So far we have not mapped the requested part of the 841 + /* 842 + * So far we have not mapped the requested part of the 854 843 * file, just surrounding data, try again. 855 844 */ 856 - nimaps--; 857 - map_start_fsb = imap[nimaps].br_startoff + 858 - imap[nimaps].br_blockcount; 845 + count_fsb -= imap.br_blockcount; 846 + map_start_fsb = imap.br_startoff + imap.br_blockcount; 859 847 } 860 848 861 849 trans_cancel: ··· 870 864 size_t count) 871 865 { 872 866 xfs_mount_t *mp = ip->i_mount; 873 - xfs_iocore_t *io = &ip->i_iocore; 874 867 xfs_fileoff_t offset_fsb; 875 868 xfs_filblks_t count_fsb; 876 869 xfs_filblks_t numblks_fsb; ··· 882 877 int committed; 883 878 int error; 884 879 885 - xfs_iomap_enter_trace(XFS_IOMAP_UNWRITTEN, 886 - &ip->i_iocore, offset, count); 880 + xfs_iomap_enter_trace(XFS_IOMAP_UNWRITTEN, ip, offset, count); 887 881 888 882 offset_fsb = XFS_B_TO_FSBT(mp, offset); 889 883 count_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count); ··· 916 912 */ 917 913 XFS_BMAP_INIT(&free_list, &firstfsb); 918 914 nimaps = 1; 919 - error = XFS_BMAPI(mp, tp, io, offset_fsb, count_fsb, 915 + error = xfs_bmapi(tp, ip, offset_fsb, count_fsb, 920 916 XFS_BMAPI_WRITE|XFS_BMAPI_CONVERT, &firstfsb, 921 917 1, &imap, &nimaps, &free_list, NULL); 922 918 if (error) ··· 932 928 return XFS_ERROR(error); 933 929 934 930 if (unlikely(!imap.br_startblock && 935 - !(io->io_flags & XFS_IOCORE_RT))) 931 + !(XFS_IS_REALTIME_INODE(ip)))) 936 932 return xfs_cmn_err_fsblock_zero(ip, &imap); 937 933 938 934 if ((numblks_fsb = imap.br_blockcount) == 0) {
+1 -4
fs/xfs/xfs_iomap.h
··· 36 36 BMAPI_READ = (1 << 0), /* read extents */ 37 37 BMAPI_WRITE = (1 << 1), /* create extents */ 38 38 BMAPI_ALLOCATE = (1 << 2), /* delayed allocate to real extents */ 39 - BMAPI_UNWRITTEN = (1 << 3), /* unwritten extents to real extents */ 40 39 /* modifiers */ 41 40 BMAPI_IGNSTATE = (1 << 4), /* ignore unwritten state on read */ 42 41 BMAPI_DIRECT = (1 << 5), /* direct instead of buffered write */ 43 42 BMAPI_MMAP = (1 << 6), /* allocate for mmap write */ 44 43 BMAPI_SYNC = (1 << 7), /* sync write to flush delalloc space */ 45 44 BMAPI_TRYLOCK = (1 << 8), /* non-blocking request */ 46 - BMAPI_DEVICE = (1 << 9), /* we only want to know the device */ 47 45 } bmapi_flags_t; 48 46 49 47 ··· 71 73 iomap_flags_t iomap_flags; 72 74 } xfs_iomap_t; 73 75 74 - struct xfs_iocore; 75 76 struct xfs_inode; 76 77 struct xfs_bmbt_irec; 77 78 78 - extern int xfs_iomap(struct xfs_iocore *, xfs_off_t, ssize_t, int, 79 + extern int xfs_iomap(struct xfs_inode *, xfs_off_t, ssize_t, int, 79 80 struct xfs_iomap *, int *); 80 81 extern int xfs_iomap_write_direct(struct xfs_inode *, xfs_off_t, size_t, 81 82 int, struct xfs_bmbt_irec *, int *, int);
+6 -6
fs/xfs/xfs_itable.c
··· 170 170 buf->bs_mtime.tv_nsec = be32_to_cpu(dic->di_mtime.t_nsec); 171 171 buf->bs_ctime.tv_sec = be32_to_cpu(dic->di_ctime.t_sec); 172 172 buf->bs_ctime.tv_nsec = be32_to_cpu(dic->di_ctime.t_nsec); 173 - buf->bs_xflags = xfs_dic2xflags(dic); 173 + buf->bs_xflags = xfs_dic2xflags(dip); 174 174 buf->bs_extsize = be32_to_cpu(dic->di_extsize) << mp->m_sb.sb_blocklog; 175 175 buf->bs_extents = be32_to_cpu(dic->di_nextents); 176 176 buf->bs_gen = be32_to_cpu(dic->di_gen); ··· 291 291 dip = (xfs_dinode_t *) 292 292 xfs_buf_offset(bp, clustidx << mp->m_sb.sb_inodelog); 293 293 /* 294 - * Check the buffer containing the on-disk inode for di_nlink == 0. 294 + * Check the buffer containing the on-disk inode for di_mode == 0. 295 295 * This is to prevent xfs_bulkstat from picking up just reclaimed 296 296 * inodes that have their in-core state initialized but not flushed 297 297 * to disk yet. This is a temporary hack that would require a proper ··· 299 299 */ 300 300 if (be16_to_cpu(dip->di_core.di_magic) != XFS_DINODE_MAGIC || 301 301 !XFS_DINODE_GOOD_VERSION(dip->di_core.di_version) || 302 - !dip->di_core.di_nlink) 302 + !dip->di_core.di_mode) 303 303 return 0; 304 304 if (flags & BULKSTAT_FG_QUICK) { 305 305 *dipp = dip; ··· 307 307 } 308 308 /* BULKSTAT_FG_INLINE: if attr fork is local, or not there, use it */ 309 309 aformat = dip->di_core.di_aformat; 310 - if ((XFS_CFORK_Q(&dip->di_core) == 0) || 310 + if ((XFS_DFORK_Q(dip) == 0) || 311 311 (aformat == XFS_DINODE_FMT_LOCAL) || 312 312 (aformat == XFS_DINODE_FMT_EXTENTS && !dip->di_core.di_anextents)) { 313 313 *dipp = dip; ··· 399 399 (XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog); 400 400 nimask = ~(nicluster - 1); 401 401 nbcluster = nicluster >> mp->m_sb.sb_inopblog; 402 - irbuf = kmem_zalloc_greedy(&irbsize, NBPC, NBPC * 4, 402 + irbuf = kmem_zalloc_greedy(&irbsize, PAGE_SIZE, PAGE_SIZE * 4, 403 403 KM_SLEEP | KM_MAYFAIL | KM_LARGE); 404 404 nirbuf = irbsize / sizeof(*irbuf); 405 405 ··· 830 830 agino = XFS_INO_TO_AGINO(mp, ino); 831 831 left = *count; 832 832 *count = 0; 833 - bcount = MIN(left, (int)(NBPP / sizeof(*buffer))); 833 + bcount = MIN(left, (int)(PAGE_SIZE / sizeof(*buffer))); 834 834 buffer = kmem_alloc(bcount * sizeof(*buffer), KM_SLEEP); 835 835 error = bufidx = 0; 836 836 cur = NULL;
+200 -216
fs/xfs/xfs_log.c
··· 399 399 { 400 400 xlog_t *log = mp->m_log; 401 401 xlog_in_core_t *iclog = (xlog_in_core_t *)iclog_hndl; 402 - int abortflg, spl; 402 + int abortflg; 403 403 404 404 cb->cb_next = NULL; 405 - spl = LOG_LOCK(log); 405 + spin_lock(&log->l_icloglock); 406 406 abortflg = (iclog->ic_state & XLOG_STATE_IOERROR); 407 407 if (!abortflg) { 408 408 ASSERT_ALWAYS((iclog->ic_state == XLOG_STATE_ACTIVE) || ··· 411 411 *(iclog->ic_callback_tail) = cb; 412 412 iclog->ic_callback_tail = &(cb->cb_next); 413 413 } 414 - LOG_UNLOCK(log, spl); 414 + spin_unlock(&log->l_icloglock); 415 415 return abortflg; 416 416 } /* xfs_log_notify */ 417 417 ··· 498 498 * Return error or zero. 499 499 */ 500 500 int 501 - xfs_log_mount(xfs_mount_t *mp, 502 - xfs_buftarg_t *log_target, 503 - xfs_daddr_t blk_offset, 504 - int num_bblks) 501 + xfs_log_mount( 502 + xfs_mount_t *mp, 503 + xfs_buftarg_t *log_target, 504 + xfs_daddr_t blk_offset, 505 + int num_bblks) 505 506 { 507 + int error; 508 + 506 509 if (!(mp->m_flags & XFS_MOUNT_NORECOVERY)) 507 510 cmn_err(CE_NOTE, "XFS mounting filesystem %s", mp->m_fsname); 508 511 else { ··· 518 515 mp->m_log = xlog_alloc_log(mp, log_target, blk_offset, num_bblks); 519 516 520 517 /* 518 + * Initialize the AIL now we have a log. 519 + */ 520 + spin_lock_init(&mp->m_ail_lock); 521 + error = xfs_trans_ail_init(mp); 522 + if (error) { 523 + cmn_err(CE_WARN, "XFS: AIL initialisation failed: error %d", error); 524 + goto error; 525 + } 526 + 527 + /* 521 528 * skip log recovery on a norecovery mount. pretend it all 522 529 * just worked. 523 530 */ 524 531 if (!(mp->m_flags & XFS_MOUNT_NORECOVERY)) { 525 - int error, readonly = (mp->m_flags & XFS_MOUNT_RDONLY); 532 + int readonly = (mp->m_flags & XFS_MOUNT_RDONLY); 526 533 527 534 if (readonly) 528 535 mp->m_flags &= ~XFS_MOUNT_RDONLY; ··· 543 530 mp->m_flags |= XFS_MOUNT_RDONLY; 544 531 if (error) { 545 532 cmn_err(CE_WARN, "XFS: log mount/recovery failed: error %d", error); 546 - xlog_dealloc_log(mp->m_log); 547 - return error; 533 + goto error; 548 534 } 549 535 } 550 536 ··· 552 540 553 541 /* End mounting message in xfs_log_mount_finish */ 554 542 return 0; 543 + error: 544 + xfs_log_unmount_dealloc(mp); 545 + return error; 555 546 } /* xfs_log_mount */ 556 547 557 548 /* ··· 621 606 xfs_log_ticket_t tic = NULL; 622 607 xfs_lsn_t lsn; 623 608 int error; 624 - SPLDECL(s); 625 609 626 610 /* the data section must be 32 bit size aligned */ 627 611 struct { ··· 673 659 } 674 660 675 661 676 - s = LOG_LOCK(log); 662 + spin_lock(&log->l_icloglock); 677 663 iclog = log->l_iclog; 678 664 iclog->ic_refcnt++; 679 - LOG_UNLOCK(log, s); 665 + spin_unlock(&log->l_icloglock); 680 666 xlog_state_want_sync(log, iclog); 681 667 (void) xlog_state_release_iclog(log, iclog); 682 668 683 - s = LOG_LOCK(log); 669 + spin_lock(&log->l_icloglock); 684 670 if (!(iclog->ic_state == XLOG_STATE_ACTIVE || 685 671 iclog->ic_state == XLOG_STATE_DIRTY)) { 686 672 if (!XLOG_FORCED_SHUTDOWN(log)) { 687 673 sv_wait(&iclog->ic_forcesema, PMEM, 688 674 &log->l_icloglock, s); 689 675 } else { 690 - LOG_UNLOCK(log, s); 676 + spin_unlock(&log->l_icloglock); 691 677 } 692 678 } else { 693 - LOG_UNLOCK(log, s); 679 + spin_unlock(&log->l_icloglock); 694 680 } 695 681 if (tic) { 696 682 xlog_trace_loggrant(log, tic, "unmount rec"); ··· 711 697 * a file system that went into forced_shutdown as 712 698 * the result of an unmount.. 713 699 */ 714 - s = LOG_LOCK(log); 700 + spin_lock(&log->l_icloglock); 715 701 iclog = log->l_iclog; 716 702 iclog->ic_refcnt++; 717 - LOG_UNLOCK(log, s); 703 + spin_unlock(&log->l_icloglock); 718 704 719 705 xlog_state_want_sync(log, iclog); 720 706 (void) xlog_state_release_iclog(log, iclog); 721 707 722 - s = LOG_LOCK(log); 708 + spin_lock(&log->l_icloglock); 723 709 724 710 if ( ! ( iclog->ic_state == XLOG_STATE_ACTIVE 725 711 || iclog->ic_state == XLOG_STATE_DIRTY ··· 728 714 sv_wait(&iclog->ic_forcesema, PMEM, 729 715 &log->l_icloglock, s); 730 716 } else { 731 - LOG_UNLOCK(log, s); 717 + spin_unlock(&log->l_icloglock); 732 718 } 733 719 } 734 720 ··· 737 723 738 724 /* 739 725 * Deallocate log structures for unmount/relocation. 726 + * 727 + * We need to stop the aild from running before we destroy 728 + * and deallocate the log as the aild references the log. 740 729 */ 741 730 void 742 731 xfs_log_unmount_dealloc(xfs_mount_t *mp) 743 732 { 733 + xfs_trans_ail_destroy(mp); 744 734 xlog_dealloc_log(mp->m_log); 745 735 } 746 736 ··· 780 762 xlog_ticket_t *tic; 781 763 xlog_t *log = mp->m_log; 782 764 int need_bytes, free_bytes, cycle, bytes; 783 - SPLDECL(s); 784 765 785 766 if (XLOG_FORCED_SHUTDOWN(log)) 786 767 return; 787 - ASSERT(!XFS_FORCED_SHUTDOWN(mp)); 788 768 789 769 if (tail_lsn == 0) { 790 770 /* needed since sync_lsn is 64 bits */ 791 - s = LOG_LOCK(log); 771 + spin_lock(&log->l_icloglock); 792 772 tail_lsn = log->l_last_sync_lsn; 793 - LOG_UNLOCK(log, s); 773 + spin_unlock(&log->l_icloglock); 794 774 } 795 775 796 - s = GRANT_LOCK(log); 776 + spin_lock(&log->l_grant_lock); 797 777 798 778 /* Also an invalid lsn. 1 implies that we aren't passing in a valid 799 779 * tail_lsn. ··· 840 824 tic = tic->t_next; 841 825 } while (tic != log->l_reserve_headq); 842 826 } 843 - GRANT_UNLOCK(log, s); 827 + spin_unlock(&log->l_grant_lock); 844 828 } /* xfs_log_move_tail */ 845 829 846 830 /* ··· 852 836 int 853 837 xfs_log_need_covered(xfs_mount_t *mp) 854 838 { 855 - SPLDECL(s); 856 839 int needed = 0, gen; 857 840 xlog_t *log = mp->m_log; 858 841 859 842 if (!xfs_fs_writable(mp)) 860 843 return 0; 861 844 862 - s = LOG_LOCK(log); 845 + spin_lock(&log->l_icloglock); 863 846 if (((log->l_covered_state == XLOG_STATE_COVER_NEED) || 864 847 (log->l_covered_state == XLOG_STATE_COVER_NEED2)) 865 848 && !xfs_trans_first_ail(mp, &gen) ··· 871 856 } 872 857 needed = 1; 873 858 } 874 - LOG_UNLOCK(log, s); 859 + spin_unlock(&log->l_icloglock); 875 860 return needed; 876 861 } 877 862 ··· 896 881 xlog_assign_tail_lsn(xfs_mount_t *mp) 897 882 { 898 883 xfs_lsn_t tail_lsn; 899 - SPLDECL(s); 900 884 xlog_t *log = mp->m_log; 901 885 902 886 tail_lsn = xfs_trans_tail_ail(mp); 903 - s = GRANT_LOCK(log); 887 + spin_lock(&log->l_grant_lock); 904 888 if (tail_lsn != 0) { 905 889 log->l_tail_lsn = tail_lsn; 906 890 } else { 907 891 tail_lsn = log->l_tail_lsn = log->l_last_sync_lsn; 908 892 } 909 - GRANT_UNLOCK(log, s); 893 + spin_unlock(&log->l_grant_lock); 910 894 911 895 return tail_lsn; 912 896 } /* xlog_assign_tail_lsn */ ··· 925 911 * the tail. The details of this case are described below, but the end 926 912 * result is that we return the size of the log as the amount of space left. 927 913 */ 928 - int 914 + STATIC int 929 915 xlog_space_left(xlog_t *log, int cycle, int bytes) 930 916 { 931 917 int free_bytes; ··· 1179 1165 log->l_flags |= XLOG_ACTIVE_RECOVERY; 1180 1166 1181 1167 log->l_prev_block = -1; 1182 - ASSIGN_ANY_LSN_HOST(log->l_tail_lsn, 1, 0); 1168 + log->l_tail_lsn = xlog_assign_lsn(1, 0); 1183 1169 /* log->l_tail_lsn = 0x100000000LL; cycle = 1; current block = 0 */ 1184 1170 log->l_last_sync_lsn = log->l_tail_lsn; 1185 1171 log->l_curr_cycle = 1; /* 0 is bad since this is initial value */ ··· 1207 1193 ASSERT(XFS_BUF_VALUSEMA(bp) <= 0); 1208 1194 log->l_xbuf = bp; 1209 1195 1210 - spinlock_init(&log->l_icloglock, "iclog"); 1211 - spinlock_init(&log->l_grant_lock, "grhead_iclog"); 1196 + spin_lock_init(&log->l_icloglock); 1197 + spin_lock_init(&log->l_grant_lock); 1212 1198 initnsema(&log->l_flushsema, 0, "ic-flush"); 1213 1199 xlog_state_ticket_alloc(log); /* wait until after icloglock inited */ 1214 1200 ··· 1245 1231 1246 1232 head = &iclog->ic_header; 1247 1233 memset(head, 0, sizeof(xlog_rec_header_t)); 1248 - INT_SET(head->h_magicno, ARCH_CONVERT, XLOG_HEADER_MAGIC_NUM); 1249 - INT_SET(head->h_version, ARCH_CONVERT, 1234 + head->h_magicno = cpu_to_be32(XLOG_HEADER_MAGIC_NUM); 1235 + head->h_version = cpu_to_be32( 1250 1236 XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb) ? 2 : 1); 1251 - INT_SET(head->h_size, ARCH_CONVERT, log->l_iclog_size); 1237 + head->h_size = cpu_to_be32(log->l_iclog_size); 1252 1238 /* new fields */ 1253 - INT_SET(head->h_fmt, ARCH_CONVERT, XLOG_FMT); 1239 + head->h_fmt = cpu_to_be32(XLOG_FMT); 1254 1240 memcpy(&head->h_fs_uuid, &mp->m_sb.sb_uuid, sizeof(uuid_t)); 1255 1241 1256 1242 ··· 1307 1293 * pushes on an lsn which is further along in the log once we reach the high 1308 1294 * water mark. In this manner, we would be creating a low water mark. 1309 1295 */ 1310 - void 1296 + STATIC void 1311 1297 xlog_grant_push_ail(xfs_mount_t *mp, 1312 1298 int need_bytes) 1313 1299 { ··· 1319 1305 int threshold_block; /* block in lsn we'd like to be at */ 1320 1306 int threshold_cycle; /* lsn cycle we'd like to be at */ 1321 1307 int free_threshold; 1322 - SPLDECL(s); 1323 1308 1324 1309 ASSERT(BTOBB(need_bytes) < log->l_logBBsize); 1325 1310 1326 - s = GRANT_LOCK(log); 1311 + spin_lock(&log->l_grant_lock); 1327 1312 free_bytes = xlog_space_left(log, 1328 1313 log->l_grant_reserve_cycle, 1329 1314 log->l_grant_reserve_bytes); ··· 1344 1331 threshold_block -= log->l_logBBsize; 1345 1332 threshold_cycle += 1; 1346 1333 } 1347 - ASSIGN_ANY_LSN_HOST(threshold_lsn, threshold_cycle, 1348 - threshold_block); 1334 + threshold_lsn = xlog_assign_lsn(threshold_cycle, threshold_block); 1349 1335 1350 1336 /* Don't pass in an lsn greater than the lsn of the last 1351 1337 * log record known to be on disk. ··· 1352 1340 if (XFS_LSN_CMP(threshold_lsn, log->l_last_sync_lsn) > 0) 1353 1341 threshold_lsn = log->l_last_sync_lsn; 1354 1342 } 1355 - GRANT_UNLOCK(log, s); 1343 + spin_unlock(&log->l_grant_lock); 1356 1344 1357 1345 /* 1358 1346 * Get the transaction layer to kick the dirty buffers out to ··· 1390 1378 * is added immediately before calling bwrite(). 1391 1379 */ 1392 1380 1393 - int 1381 + STATIC int 1394 1382 xlog_sync(xlog_t *log, 1395 1383 xlog_in_core_t *iclog) 1396 1384 { 1397 1385 xfs_caddr_t dptr; /* pointer to byte sized element */ 1398 1386 xfs_buf_t *bp; 1399 - int i, ops; 1387 + int i; 1400 1388 uint count; /* byte count of bwrite */ 1401 1389 uint count_init; /* initial count before roundup */ 1402 1390 int roundoff; /* roundoff to BB or stripe */ 1403 1391 int split = 0; /* split write into two regions */ 1404 1392 int error; 1405 - SPLDECL(s); 1406 1393 int v2 = XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb); 1407 1394 1408 1395 XFS_STATS_INC(xs_log_writes); ··· 1426 1415 roundoff < BBTOB(1))); 1427 1416 1428 1417 /* move grant heads by roundoff in sync */ 1429 - s = GRANT_LOCK(log); 1418 + spin_lock(&log->l_grant_lock); 1430 1419 xlog_grant_add_space(log, roundoff); 1431 - GRANT_UNLOCK(log, s); 1420 + spin_unlock(&log->l_grant_lock); 1432 1421 1433 1422 /* put cycle number in every block */ 1434 1423 xlog_pack_data(log, iclog, roundoff); 1435 1424 1436 1425 /* real byte length */ 1437 1426 if (v2) { 1438 - INT_SET(iclog->ic_header.h_len, 1439 - ARCH_CONVERT, 1440 - iclog->ic_offset + roundoff); 1427 + iclog->ic_header.h_len = 1428 + cpu_to_be32(iclog->ic_offset + roundoff); 1441 1429 } else { 1442 - INT_SET(iclog->ic_header.h_len, ARCH_CONVERT, iclog->ic_offset); 1430 + iclog->ic_header.h_len = 1431 + cpu_to_be32(iclog->ic_offset); 1443 1432 } 1444 - 1445 - /* put ops count in correct order */ 1446 - ops = iclog->ic_header.h_num_logops; 1447 - INT_SET(iclog->ic_header.h_num_logops, ARCH_CONVERT, ops); 1448 1433 1449 1434 bp = iclog->ic_bp; 1450 1435 ASSERT(XFS_BUF_FSPRIVATE2(bp, unsigned long) == (unsigned long)1); 1451 1436 XFS_BUF_SET_FSPRIVATE2(bp, (unsigned long)2); 1452 - XFS_BUF_SET_ADDR(bp, BLOCK_LSN(INT_GET(iclog->ic_header.h_lsn, ARCH_CONVERT))); 1437 + XFS_BUF_SET_ADDR(bp, BLOCK_LSN(be64_to_cpu(iclog->ic_header.h_lsn))); 1453 1438 1454 1439 XFS_STATS_ADD(xs_log_blocks, BTOBB(count)); 1455 1440 ··· 1508 1501 * a new cycle. Watch out for the header magic number 1509 1502 * case, though. 1510 1503 */ 1511 - for (i=0; i<split; i += BBSIZE) { 1512 - INT_MOD(*(uint *)dptr, ARCH_CONVERT, +1); 1513 - if (INT_GET(*(uint *)dptr, ARCH_CONVERT) == XLOG_HEADER_MAGIC_NUM) 1514 - INT_MOD(*(uint *)dptr, ARCH_CONVERT, +1); 1504 + for (i = 0; i < split; i += BBSIZE) { 1505 + be32_add((__be32 *)dptr, 1); 1506 + if (be32_to_cpu(*(__be32 *)dptr) == XLOG_HEADER_MAGIC_NUM) 1507 + be32_add((__be32 *)dptr, 1); 1515 1508 dptr += BBSIZE; 1516 1509 } 1517 1510 ··· 1534 1527 /* 1535 1528 * Deallocate a log structure 1536 1529 */ 1537 - void 1530 + STATIC void 1538 1531 xlog_dealloc_log(xlog_t *log) 1539 1532 { 1540 1533 xlog_in_core_t *iclog, *next_iclog; 1541 1534 xlog_ticket_t *tic, *next_tic; 1542 1535 int i; 1543 - 1544 1536 1545 1537 iclog = log->l_iclog; 1546 1538 for (i=0; i<log->l_iclog_bufs; i++) { ··· 1571 1565 tic = log->l_unmount_free; 1572 1566 while (tic) { 1573 1567 next_tic = tic->t_next; 1574 - kmem_free(tic, NBPP); 1568 + kmem_free(tic, PAGE_SIZE); 1575 1569 tic = next_tic; 1576 1570 } 1577 1571 } ··· 1598 1592 int record_cnt, 1599 1593 int copy_bytes) 1600 1594 { 1601 - SPLDECL(s); 1595 + spin_lock(&log->l_icloglock); 1602 1596 1603 - s = LOG_LOCK(log); 1604 - 1605 - iclog->ic_header.h_num_logops += record_cnt; 1597 + be32_add(&iclog->ic_header.h_num_logops, record_cnt); 1606 1598 iclog->ic_offset += copy_bytes; 1607 1599 1608 - LOG_UNLOCK(log, s); 1600 + spin_unlock(&log->l_icloglock); 1609 1601 } /* xlog_state_finish_copy */ 1610 1602 1611 1603 ··· 1756 1752 * we don't update ic_offset until the end when we know exactly how many 1757 1753 * bytes have been written out. 1758 1754 */ 1759 - int 1755 + STATIC int 1760 1756 xlog_write(xfs_mount_t * mp, 1761 1757 xfs_log_iovec_t reg[], 1762 1758 int nentries, ··· 1827 1823 1828 1824 /* start_lsn is the first lsn written to. That's all we need. */ 1829 1825 if (! *start_lsn) 1830 - *start_lsn = INT_GET(iclog->ic_header.h_lsn, ARCH_CONVERT); 1826 + *start_lsn = be64_to_cpu(iclog->ic_header.h_lsn); 1831 1827 1832 1828 /* This loop writes out as many regions as can fit in the amount 1833 1829 * of space which was allocated by xlog_state_get_iclog_space(). ··· 1843 1839 */ 1844 1840 if (ticket->t_flags & XLOG_TIC_INITED) { 1845 1841 logop_head = (xlog_op_header_t *)ptr; 1846 - INT_SET(logop_head->oh_tid, ARCH_CONVERT, ticket->t_tid); 1842 + logop_head->oh_tid = cpu_to_be32(ticket->t_tid); 1847 1843 logop_head->oh_clientid = ticket->t_clientid; 1848 1844 logop_head->oh_len = 0; 1849 1845 logop_head->oh_flags = XLOG_START_TRANS; ··· 1857 1853 1858 1854 /* Copy log operation header directly into data section */ 1859 1855 logop_head = (xlog_op_header_t *)ptr; 1860 - INT_SET(logop_head->oh_tid, ARCH_CONVERT, ticket->t_tid); 1856 + logop_head->oh_tid = cpu_to_be32(ticket->t_tid); 1861 1857 logop_head->oh_clientid = ticket->t_clientid; 1862 1858 logop_head->oh_res2 = 0; 1863 1859 ··· 1892 1888 1893 1889 copy_off = partial_copy_len; 1894 1890 if (need_copy <= iclog->ic_size - log_offset) { /*complete write */ 1895 - INT_SET(logop_head->oh_len, ARCH_CONVERT, copy_len = need_copy); 1891 + copy_len = need_copy; 1892 + logop_head->oh_len = cpu_to_be32(copy_len); 1896 1893 if (partial_copy) 1897 1894 logop_head->oh_flags|= (XLOG_END_TRANS|XLOG_WAS_CONT_TRANS); 1898 1895 partial_copy_len = partial_copy = 0; 1899 1896 } else { /* partial write */ 1900 1897 copy_len = iclog->ic_size - log_offset; 1901 - INT_SET(logop_head->oh_len, ARCH_CONVERT, copy_len); 1898 + logop_head->oh_len = cpu_to_be32(copy_len); 1902 1899 logop_head->oh_flags |= XLOG_CONTINUE_TRANS; 1903 1900 if (partial_copy) 1904 1901 logop_head->oh_flags |= XLOG_WAS_CONT_TRANS; ··· 1997 1992 * We don't need to cover the dummy. 1998 1993 */ 1999 1994 if (!changed && 2000 - (INT_GET(iclog->ic_header.h_num_logops, ARCH_CONVERT) == XLOG_COVER_OPS)) { 1995 + (be32_to_cpu(iclog->ic_header.h_num_logops) == 1996 + XLOG_COVER_OPS)) { 2001 1997 changed = 1; 2002 1998 } else { 2003 1999 /* ··· 2066 2060 lowest_lsn = 0; 2067 2061 do { 2068 2062 if (!(lsn_log->ic_state & (XLOG_STATE_ACTIVE|XLOG_STATE_DIRTY))) { 2069 - lsn = INT_GET(lsn_log->ic_header.h_lsn, ARCH_CONVERT); 2063 + lsn = be64_to_cpu(lsn_log->ic_header.h_lsn); 2070 2064 if ((lsn && !lowest_lsn) || 2071 2065 (XFS_LSN_CMP(lsn, lowest_lsn) < 0)) { 2072 2066 lowest_lsn = lsn; ··· 2095 2089 int funcdidcallbacks; /* flag: function did callbacks */ 2096 2090 int repeats; /* for issuing console warnings if 2097 2091 * looping too many times */ 2098 - SPLDECL(s); 2099 2092 2100 - s = LOG_LOCK(log); 2093 + spin_lock(&log->l_icloglock); 2101 2094 first_iclog = iclog = log->l_iclog; 2102 2095 ioerrors = 0; 2103 2096 funcdidcallbacks = 0; ··· 2141 2136 * to DO_CALLBACK, we will not process it when 2142 2137 * we retry since a previous iclog is in the 2143 2138 * CALLBACK and the state cannot change since 2144 - * we are holding the LOG_LOCK. 2139 + * we are holding the l_icloglock. 2145 2140 */ 2146 2141 if (!(iclog->ic_state & 2147 2142 (XLOG_STATE_DONE_SYNC | ··· 2167 2162 */ 2168 2163 2169 2164 lowest_lsn = xlog_get_lowest_lsn(log); 2170 - if (lowest_lsn && ( 2171 - XFS_LSN_CMP( 2172 - lowest_lsn, 2173 - INT_GET(iclog->ic_header.h_lsn, ARCH_CONVERT) 2174 - )<0)) { 2165 + if (lowest_lsn && 2166 + XFS_LSN_CMP(lowest_lsn, 2167 + be64_to_cpu(iclog->ic_header.h_lsn)) < 0) { 2175 2168 iclog = iclog->ic_next; 2176 2169 continue; /* Leave this iclog for 2177 2170 * another thread */ ··· 2177 2174 2178 2175 iclog->ic_state = XLOG_STATE_CALLBACK; 2179 2176 2180 - LOG_UNLOCK(log, s); 2177 + spin_unlock(&log->l_icloglock); 2181 2178 2182 2179 /* l_last_sync_lsn field protected by 2183 - * GRANT_LOCK. Don't worry about iclog's lsn. 2180 + * l_grant_lock. Don't worry about iclog's lsn. 2184 2181 * No one else can be here except us. 2185 2182 */ 2186 - s = GRANT_LOCK(log); 2187 - ASSERT(XFS_LSN_CMP( 2188 - log->l_last_sync_lsn, 2189 - INT_GET(iclog->ic_header.h_lsn, ARCH_CONVERT) 2190 - )<=0); 2191 - log->l_last_sync_lsn = INT_GET(iclog->ic_header.h_lsn, ARCH_CONVERT); 2192 - GRANT_UNLOCK(log, s); 2183 + spin_lock(&log->l_grant_lock); 2184 + ASSERT(XFS_LSN_CMP(log->l_last_sync_lsn, 2185 + be64_to_cpu(iclog->ic_header.h_lsn)) <= 0); 2186 + log->l_last_sync_lsn = 2187 + be64_to_cpu(iclog->ic_header.h_lsn); 2188 + spin_unlock(&log->l_grant_lock); 2193 2189 2194 2190 /* 2195 2191 * Keep processing entries in the callback list ··· 2197 2195 * empty and change the state to DIRTY so that 2198 2196 * we don't miss any more callbacks being added. 2199 2197 */ 2200 - s = LOG_LOCK(log); 2198 + spin_lock(&log->l_icloglock); 2201 2199 } else { 2202 2200 ioerrors++; 2203 2201 } ··· 2206 2204 while (cb) { 2207 2205 iclog->ic_callback_tail = &(iclog->ic_callback); 2208 2206 iclog->ic_callback = NULL; 2209 - LOG_UNLOCK(log, s); 2207 + spin_unlock(&log->l_icloglock); 2210 2208 2211 2209 /* perform callbacks in the order given */ 2212 2210 for (; cb; cb = cb_next) { 2213 2211 cb_next = cb->cb_next; 2214 2212 cb->cb_func(cb->cb_arg, aborted); 2215 2213 } 2216 - s = LOG_LOCK(log); 2214 + spin_lock(&log->l_icloglock); 2217 2215 cb = iclog->ic_callback; 2218 2216 } 2219 2217 ··· 2260 2258 * 2261 2259 * SYNCING - i/o completion will go through logs 2262 2260 * DONE_SYNC - interrupt thread should be waiting for 2263 - * LOG_LOCK 2261 + * l_icloglock 2264 2262 * IOERROR - give up hope all ye who enter here 2265 2263 */ 2266 2264 if (iclog->ic_state == XLOG_STATE_WANT_SYNC || ··· 2278 2276 flushcnt = log->l_flushcnt; 2279 2277 log->l_flushcnt = 0; 2280 2278 } 2281 - LOG_UNLOCK(log, s); 2279 + spin_unlock(&log->l_icloglock); 2282 2280 while (flushcnt--) 2283 2281 vsema(&log->l_flushsema); 2284 2282 } /* xlog_state_do_callback */ ··· 2298 2296 * global state machine log lock. Assume that the calls to cvsema won't 2299 2297 * take a long time. At least we know it won't sleep. 2300 2298 */ 2301 - void 2299 + STATIC void 2302 2300 xlog_state_done_syncing( 2303 2301 xlog_in_core_t *iclog, 2304 2302 int aborted) 2305 2303 { 2306 2304 xlog_t *log = iclog->ic_log; 2307 - SPLDECL(s); 2308 2305 2309 - s = LOG_LOCK(log); 2306 + spin_lock(&log->l_icloglock); 2310 2307 2311 2308 ASSERT(iclog->ic_state == XLOG_STATE_SYNCING || 2312 2309 iclog->ic_state == XLOG_STATE_IOERROR); ··· 2321 2320 */ 2322 2321 if (iclog->ic_state != XLOG_STATE_IOERROR) { 2323 2322 if (--iclog->ic_bwritecnt == 1) { 2324 - LOG_UNLOCK(log, s); 2323 + spin_unlock(&log->l_icloglock); 2325 2324 return; 2326 2325 } 2327 2326 iclog->ic_state = XLOG_STATE_DONE_SYNC; ··· 2333 2332 * I/O, the others get to wait for the result. 2334 2333 */ 2335 2334 sv_broadcast(&iclog->ic_writesema); 2336 - LOG_UNLOCK(log, s); 2335 + spin_unlock(&log->l_icloglock); 2337 2336 xlog_state_do_callback(log, aborted, iclog); /* also cleans log */ 2338 2337 } /* xlog_state_done_syncing */ 2339 2338 ··· 2358 2357 * needs to be incremented, depending on the amount of data which 2359 2358 * is copied. 2360 2359 */ 2361 - int 2360 + STATIC int 2362 2361 xlog_state_get_iclog_space(xlog_t *log, 2363 2362 int len, 2364 2363 xlog_in_core_t **iclogp, ··· 2366 2365 int *continued_write, 2367 2366 int *logoffsetp) 2368 2367 { 2369 - SPLDECL(s); 2370 2368 int log_offset; 2371 2369 xlog_rec_header_t *head; 2372 2370 xlog_in_core_t *iclog; 2373 2371 int error; 2374 2372 2375 2373 restart: 2376 - s = LOG_LOCK(log); 2374 + spin_lock(&log->l_icloglock); 2377 2375 if (XLOG_FORCED_SHUTDOWN(log)) { 2378 - LOG_UNLOCK(log, s); 2376 + spin_unlock(&log->l_icloglock); 2379 2377 return XFS_ERROR(EIO); 2380 2378 } 2381 2379 2382 2380 iclog = log->l_iclog; 2383 2381 if (! (iclog->ic_state == XLOG_STATE_ACTIVE)) { 2384 2382 log->l_flushcnt++; 2385 - LOG_UNLOCK(log, s); 2383 + spin_unlock(&log->l_icloglock); 2386 2384 xlog_trace_iclog(iclog, XLOG_TRACE_SLEEP_FLUSH); 2387 2385 XFS_STATS_INC(xs_log_noiclogs); 2388 2386 /* Ensure that log writes happen */ ··· 2404 2404 xlog_tic_add_region(ticket, 2405 2405 log->l_iclog_hsize, 2406 2406 XLOG_REG_TYPE_LRHEADER); 2407 - INT_SET(head->h_cycle, ARCH_CONVERT, log->l_curr_cycle); 2408 - ASSIGN_LSN(head->h_lsn, log); 2407 + head->h_cycle = cpu_to_be32(log->l_curr_cycle); 2408 + head->h_lsn = cpu_to_be64( 2409 + xlog_assign_lsn(log->l_curr_cycle, log->l_curr_block)); 2409 2410 ASSERT(log->l_curr_block >= 0); 2410 2411 } 2411 2412 ··· 2424 2423 2425 2424 /* If I'm the only one writing to this iclog, sync it to disk */ 2426 2425 if (iclog->ic_refcnt == 1) { 2427 - LOG_UNLOCK(log, s); 2426 + spin_unlock(&log->l_icloglock); 2428 2427 if ((error = xlog_state_release_iclog(log, iclog))) 2429 2428 return error; 2430 2429 } else { 2431 2430 iclog->ic_refcnt--; 2432 - LOG_UNLOCK(log, s); 2431 + spin_unlock(&log->l_icloglock); 2433 2432 } 2434 2433 goto restart; 2435 2434 } ··· 2450 2449 *iclogp = iclog; 2451 2450 2452 2451 ASSERT(iclog->ic_offset <= iclog->ic_size); 2453 - LOG_UNLOCK(log, s); 2452 + spin_unlock(&log->l_icloglock); 2454 2453 2455 2454 *logoffsetp = log_offset; 2456 2455 return 0; ··· 2468 2467 { 2469 2468 int free_bytes; 2470 2469 int need_bytes; 2471 - SPLDECL(s); 2472 2470 #ifdef DEBUG 2473 2471 xfs_lsn_t tail_lsn; 2474 2472 #endif ··· 2479 2479 #endif 2480 2480 2481 2481 /* Is there space or do we need to sleep? */ 2482 - s = GRANT_LOCK(log); 2482 + spin_lock(&log->l_grant_lock); 2483 2483 xlog_trace_loggrant(log, tic, "xlog_grant_log_space: enter"); 2484 2484 2485 2485 /* something is already sleeping; insert new transaction at end */ ··· 2502 2502 */ 2503 2503 xlog_trace_loggrant(log, tic, 2504 2504 "xlog_grant_log_space: wake 1"); 2505 - s = GRANT_LOCK(log); 2505 + spin_lock(&log->l_grant_lock); 2506 2506 } 2507 2507 if (tic->t_flags & XFS_LOG_PERM_RESERV) 2508 2508 need_bytes = tic->t_unit_res*tic->t_ocnt; ··· 2524 2524 sv_wait(&tic->t_sema, PINOD|PLTWAIT, &log->l_grant_lock, s); 2525 2525 2526 2526 if (XLOG_FORCED_SHUTDOWN(log)) { 2527 - s = GRANT_LOCK(log); 2527 + spin_lock(&log->l_grant_lock); 2528 2528 goto error_return; 2529 2529 } 2530 2530 2531 2531 xlog_trace_loggrant(log, tic, 2532 2532 "xlog_grant_log_space: wake 2"); 2533 2533 xlog_grant_push_ail(log->l_mp, need_bytes); 2534 - s = GRANT_LOCK(log); 2534 + spin_lock(&log->l_grant_lock); 2535 2535 goto redo; 2536 2536 } else if (tic->t_flags & XLOG_TIC_IN_Q) 2537 2537 xlog_del_ticketq(&log->l_reserve_headq, tic); ··· 2553 2553 #endif 2554 2554 xlog_trace_loggrant(log, tic, "xlog_grant_log_space: exit"); 2555 2555 xlog_verify_grant_head(log, 1); 2556 - GRANT_UNLOCK(log, s); 2556 + spin_unlock(&log->l_grant_lock); 2557 2557 return 0; 2558 2558 2559 2559 error_return: ··· 2567 2567 */ 2568 2568 tic->t_curr_res = 0; 2569 2569 tic->t_cnt = 0; /* ungrant will give back unit_res * t_cnt. */ 2570 - GRANT_UNLOCK(log, s); 2570 + spin_unlock(&log->l_grant_lock); 2571 2571 return XFS_ERROR(EIO); 2572 2572 } /* xlog_grant_log_space */ 2573 2573 ··· 2581 2581 xlog_regrant_write_log_space(xlog_t *log, 2582 2582 xlog_ticket_t *tic) 2583 2583 { 2584 - SPLDECL(s); 2585 2584 int free_bytes, need_bytes; 2586 2585 xlog_ticket_t *ntic; 2587 2586 #ifdef DEBUG ··· 2598 2599 panic("regrant Recovery problem"); 2599 2600 #endif 2600 2601 2601 - s = GRANT_LOCK(log); 2602 + spin_lock(&log->l_grant_lock); 2602 2603 xlog_trace_loggrant(log, tic, "xlog_regrant_write_log_space: enter"); 2603 2604 2604 2605 if (XLOG_FORCED_SHUTDOWN(log)) ··· 2637 2638 /* If we're shutting down, this tic is already 2638 2639 * off the queue */ 2639 2640 if (XLOG_FORCED_SHUTDOWN(log)) { 2640 - s = GRANT_LOCK(log); 2641 + spin_lock(&log->l_grant_lock); 2641 2642 goto error_return; 2642 2643 } 2643 2644 2644 2645 xlog_trace_loggrant(log, tic, 2645 2646 "xlog_regrant_write_log_space: wake 1"); 2646 2647 xlog_grant_push_ail(log->l_mp, tic->t_unit_res); 2647 - s = GRANT_LOCK(log); 2648 + spin_lock(&log->l_grant_lock); 2648 2649 } 2649 2650 } 2650 2651 ··· 2664 2665 2665 2666 /* If we're shutting down, this tic is already off the queue */ 2666 2667 if (XLOG_FORCED_SHUTDOWN(log)) { 2667 - s = GRANT_LOCK(log); 2668 + spin_lock(&log->l_grant_lock); 2668 2669 goto error_return; 2669 2670 } 2670 2671 2671 2672 xlog_trace_loggrant(log, tic, 2672 2673 "xlog_regrant_write_log_space: wake 2"); 2673 2674 xlog_grant_push_ail(log->l_mp, need_bytes); 2674 - s = GRANT_LOCK(log); 2675 + spin_lock(&log->l_grant_lock); 2675 2676 goto redo; 2676 2677 } else if (tic->t_flags & XLOG_TIC_IN_Q) 2677 2678 xlog_del_ticketq(&log->l_write_headq, tic); ··· 2688 2689 2689 2690 xlog_trace_loggrant(log, tic, "xlog_regrant_write_log_space: exit"); 2690 2691 xlog_verify_grant_head(log, 1); 2691 - GRANT_UNLOCK(log, s); 2692 + spin_unlock(&log->l_grant_lock); 2692 2693 return 0; 2693 2694 2694 2695 ··· 2703 2704 */ 2704 2705 tic->t_curr_res = 0; 2705 2706 tic->t_cnt = 0; /* ungrant will give back unit_res * t_cnt. */ 2706 - GRANT_UNLOCK(log, s); 2707 + spin_unlock(&log->l_grant_lock); 2707 2708 return XFS_ERROR(EIO); 2708 2709 } /* xlog_regrant_write_log_space */ 2709 2710 ··· 2719 2720 xlog_regrant_reserve_log_space(xlog_t *log, 2720 2721 xlog_ticket_t *ticket) 2721 2722 { 2722 - SPLDECL(s); 2723 - 2724 2723 xlog_trace_loggrant(log, ticket, 2725 2724 "xlog_regrant_reserve_log_space: enter"); 2726 2725 if (ticket->t_cnt > 0) 2727 2726 ticket->t_cnt--; 2728 2727 2729 - s = GRANT_LOCK(log); 2728 + spin_lock(&log->l_grant_lock); 2730 2729 xlog_grant_sub_space(log, ticket->t_curr_res); 2731 2730 ticket->t_curr_res = ticket->t_unit_res; 2732 2731 xlog_tic_reset_res(ticket); ··· 2734 2737 2735 2738 /* just return if we still have some of the pre-reserved space */ 2736 2739 if (ticket->t_cnt > 0) { 2737 - GRANT_UNLOCK(log, s); 2740 + spin_unlock(&log->l_grant_lock); 2738 2741 return; 2739 2742 } 2740 2743 ··· 2742 2745 xlog_trace_loggrant(log, ticket, 2743 2746 "xlog_regrant_reserve_log_space: exit"); 2744 2747 xlog_verify_grant_head(log, 0); 2745 - GRANT_UNLOCK(log, s); 2748 + spin_unlock(&log->l_grant_lock); 2746 2749 ticket->t_curr_res = ticket->t_unit_res; 2747 2750 xlog_tic_reset_res(ticket); 2748 2751 } /* xlog_regrant_reserve_log_space */ ··· 2766 2769 xlog_ungrant_log_space(xlog_t *log, 2767 2770 xlog_ticket_t *ticket) 2768 2771 { 2769 - SPLDECL(s); 2770 - 2771 2772 if (ticket->t_cnt > 0) 2772 2773 ticket->t_cnt--; 2773 2774 2774 - s = GRANT_LOCK(log); 2775 + spin_lock(&log->l_grant_lock); 2775 2776 xlog_trace_loggrant(log, ticket, "xlog_ungrant_log_space: enter"); 2776 2777 2777 2778 xlog_grant_sub_space(log, ticket->t_curr_res); ··· 2786 2791 2787 2792 xlog_trace_loggrant(log, ticket, "xlog_ungrant_log_space: exit"); 2788 2793 xlog_verify_grant_head(log, 1); 2789 - GRANT_UNLOCK(log, s); 2794 + spin_unlock(&log->l_grant_lock); 2790 2795 xfs_log_move_tail(log->l_mp, 1); 2791 2796 } /* xlog_ungrant_log_space */ 2792 2797 ··· 2794 2799 /* 2795 2800 * Atomically put back used ticket. 2796 2801 */ 2797 - void 2802 + STATIC void 2798 2803 xlog_state_put_ticket(xlog_t *log, 2799 2804 xlog_ticket_t *tic) 2800 2805 { 2801 - unsigned long s; 2802 - 2803 - s = LOG_LOCK(log); 2806 + spin_lock(&log->l_icloglock); 2804 2807 xlog_ticket_put(log, tic); 2805 - LOG_UNLOCK(log, s); 2808 + spin_unlock(&log->l_icloglock); 2806 2809 } /* xlog_state_put_ticket */ 2807 2810 2808 2811 /* ··· 2812 2819 * 2813 2820 * 2814 2821 */ 2815 - int 2822 + STATIC int 2816 2823 xlog_state_release_iclog(xlog_t *log, 2817 2824 xlog_in_core_t *iclog) 2818 2825 { 2819 - SPLDECL(s); 2820 2826 int sync = 0; /* do we sync? */ 2821 2827 2822 2828 xlog_assign_tail_lsn(log->l_mp); 2823 2829 2824 - s = LOG_LOCK(log); 2830 + spin_lock(&log->l_icloglock); 2825 2831 2826 2832 if (iclog->ic_state & XLOG_STATE_IOERROR) { 2827 - LOG_UNLOCK(log, s); 2833 + spin_unlock(&log->l_icloglock); 2828 2834 return XFS_ERROR(EIO); 2829 2835 } 2830 2836 ··· 2835 2843 iclog->ic_state == XLOG_STATE_WANT_SYNC) { 2836 2844 sync++; 2837 2845 iclog->ic_state = XLOG_STATE_SYNCING; 2838 - INT_SET(iclog->ic_header.h_tail_lsn, ARCH_CONVERT, log->l_tail_lsn); 2846 + iclog->ic_header.h_tail_lsn = cpu_to_be64(log->l_tail_lsn); 2839 2847 xlog_verify_tail_lsn(log, iclog, log->l_tail_lsn); 2840 2848 /* cycle incremented when incrementing curr_block */ 2841 2849 } 2842 2850 2843 - LOG_UNLOCK(log, s); 2851 + spin_unlock(&log->l_icloglock); 2844 2852 2845 2853 /* 2846 2854 * We let the log lock go, so it's possible that we hit a log I/O ··· 2873 2881 if (!eventual_size) 2874 2882 eventual_size = iclog->ic_offset; 2875 2883 iclog->ic_state = XLOG_STATE_WANT_SYNC; 2876 - INT_SET(iclog->ic_header.h_prev_block, ARCH_CONVERT, log->l_prev_block); 2884 + iclog->ic_header.h_prev_block = cpu_to_be32(log->l_prev_block); 2877 2885 log->l_prev_block = log->l_curr_block; 2878 2886 log->l_prev_cycle = log->l_curr_cycle; 2879 2887 ··· 2931 2939 { 2932 2940 xlog_in_core_t *iclog; 2933 2941 xfs_lsn_t lsn; 2934 - SPLDECL(s); 2935 2942 2936 - s = LOG_LOCK(log); 2943 + spin_lock(&log->l_icloglock); 2937 2944 2938 2945 iclog = log->l_iclog; 2939 2946 if (iclog->ic_state & XLOG_STATE_IOERROR) { 2940 - LOG_UNLOCK(log, s); 2947 + spin_unlock(&log->l_icloglock); 2941 2948 return XFS_ERROR(EIO); 2942 2949 } 2943 2950 ··· 2969 2978 * the previous sync. 2970 2979 */ 2971 2980 iclog->ic_refcnt++; 2972 - lsn = INT_GET(iclog->ic_header.h_lsn, ARCH_CONVERT); 2981 + lsn = be64_to_cpu(iclog->ic_header.h_lsn); 2973 2982 xlog_state_switch_iclogs(log, iclog, 0); 2974 - LOG_UNLOCK(log, s); 2983 + spin_unlock(&log->l_icloglock); 2975 2984 2976 2985 if (xlog_state_release_iclog(log, iclog)) 2977 2986 return XFS_ERROR(EIO); 2978 2987 *log_flushed = 1; 2979 - s = LOG_LOCK(log); 2980 - if (INT_GET(iclog->ic_header.h_lsn, ARCH_CONVERT) == lsn && 2988 + spin_lock(&log->l_icloglock); 2989 + if (be64_to_cpu(iclog->ic_header.h_lsn) == lsn && 2981 2990 iclog->ic_state != XLOG_STATE_DIRTY) 2982 2991 goto maybe_sleep; 2983 2992 else ··· 3002 3011 if (flags & XFS_LOG_SYNC) { 3003 3012 /* 3004 3013 * We must check if we're shutting down here, before 3005 - * we wait, while we're holding the LOG_LOCK. 3014 + * we wait, while we're holding the l_icloglock. 3006 3015 * Then we check again after waking up, in case our 3007 3016 * sleep was disturbed by a bad news. 3008 3017 */ 3009 3018 if (iclog->ic_state & XLOG_STATE_IOERROR) { 3010 - LOG_UNLOCK(log, s); 3019 + spin_unlock(&log->l_icloglock); 3011 3020 return XFS_ERROR(EIO); 3012 3021 } 3013 3022 XFS_STATS_INC(xs_log_force_sleep); ··· 3024 3033 } else { 3025 3034 3026 3035 no_sleep: 3027 - LOG_UNLOCK(log, s); 3036 + spin_unlock(&log->l_icloglock); 3028 3037 } 3029 3038 return 0; 3030 3039 } /* xlog_state_sync_all */ ··· 3042 3051 * If filesystem activity goes to zero, the iclog will get flushed only by 3043 3052 * bdflush(). 3044 3053 */ 3045 - int 3054 + STATIC int 3046 3055 xlog_state_sync(xlog_t *log, 3047 3056 xfs_lsn_t lsn, 3048 3057 uint flags, ··· 3050 3059 { 3051 3060 xlog_in_core_t *iclog; 3052 3061 int already_slept = 0; 3053 - SPLDECL(s); 3054 - 3055 3062 3056 3063 try_again: 3057 - s = LOG_LOCK(log); 3064 + spin_lock(&log->l_icloglock); 3058 3065 iclog = log->l_iclog; 3059 3066 3060 3067 if (iclog->ic_state & XLOG_STATE_IOERROR) { 3061 - LOG_UNLOCK(log, s); 3068 + spin_unlock(&log->l_icloglock); 3062 3069 return XFS_ERROR(EIO); 3063 3070 } 3064 3071 3065 3072 do { 3066 - if (INT_GET(iclog->ic_header.h_lsn, ARCH_CONVERT) != lsn) { 3067 - iclog = iclog->ic_next; 3068 - continue; 3073 + if (be64_to_cpu(iclog->ic_header.h_lsn) != lsn) { 3074 + iclog = iclog->ic_next; 3075 + continue; 3069 3076 } 3070 3077 3071 3078 if (iclog->ic_state == XLOG_STATE_DIRTY) { 3072 - LOG_UNLOCK(log, s); 3079 + spin_unlock(&log->l_icloglock); 3073 3080 return 0; 3074 3081 } 3075 3082 ··· 3102 3113 } else { 3103 3114 iclog->ic_refcnt++; 3104 3115 xlog_state_switch_iclogs(log, iclog, 0); 3105 - LOG_UNLOCK(log, s); 3116 + spin_unlock(&log->l_icloglock); 3106 3117 if (xlog_state_release_iclog(log, iclog)) 3107 3118 return XFS_ERROR(EIO); 3108 3119 *log_flushed = 1; 3109 - s = LOG_LOCK(log); 3120 + spin_lock(&log->l_icloglock); 3110 3121 } 3111 3122 } 3112 3123 ··· 3118 3129 * gotten a log write error. 3119 3130 */ 3120 3131 if (iclog->ic_state & XLOG_STATE_IOERROR) { 3121 - LOG_UNLOCK(log, s); 3132 + spin_unlock(&log->l_icloglock); 3122 3133 return XFS_ERROR(EIO); 3123 3134 } 3124 3135 XFS_STATS_INC(xs_log_force_sleep); ··· 3132 3143 return XFS_ERROR(EIO); 3133 3144 *log_flushed = 1; 3134 3145 } else { /* just return */ 3135 - LOG_UNLOCK(log, s); 3146 + spin_unlock(&log->l_icloglock); 3136 3147 } 3137 3148 return 0; 3138 3149 3139 3150 } while (iclog != log->l_iclog); 3140 3151 3141 - LOG_UNLOCK(log, s); 3152 + spin_unlock(&log->l_icloglock); 3142 3153 return 0; 3143 3154 } /* xlog_state_sync */ 3144 3155 ··· 3147 3158 * Called when we want to mark the current iclog as being ready to sync to 3148 3159 * disk. 3149 3160 */ 3150 - void 3161 + STATIC void 3151 3162 xlog_state_want_sync(xlog_t *log, xlog_in_core_t *iclog) 3152 3163 { 3153 - SPLDECL(s); 3154 - 3155 - s = LOG_LOCK(log); 3164 + spin_lock(&log->l_icloglock); 3156 3165 3157 3166 if (iclog->ic_state == XLOG_STATE_ACTIVE) { 3158 3167 xlog_state_switch_iclogs(log, iclog, 0); ··· 3159 3172 (XLOG_STATE_WANT_SYNC|XLOG_STATE_IOERROR)); 3160 3173 } 3161 3174 3162 - LOG_UNLOCK(log, s); 3175 + spin_unlock(&log->l_icloglock); 3163 3176 } /* xlog_state_want_sync */ 3164 3177 3165 3178 ··· 3180 3193 xlog_ticket_t *t_list; 3181 3194 xlog_ticket_t *next; 3182 3195 xfs_caddr_t buf; 3183 - uint i = (NBPP / sizeof(xlog_ticket_t)) - 2; 3184 - SPLDECL(s); 3196 + uint i = (PAGE_SIZE / sizeof(xlog_ticket_t)) - 2; 3185 3197 3186 3198 /* 3187 3199 * The kmem_zalloc may sleep, so we shouldn't be holding the 3188 3200 * global lock. XXXmiken: may want to use zone allocator. 3189 3201 */ 3190 - buf = (xfs_caddr_t) kmem_zalloc(NBPP, KM_SLEEP); 3202 + buf = (xfs_caddr_t) kmem_zalloc(PAGE_SIZE, KM_SLEEP); 3191 3203 3192 - s = LOG_LOCK(log); 3204 + spin_lock(&log->l_icloglock); 3193 3205 3194 3206 /* Attach 1st ticket to Q, so we can keep track of allocated memory */ 3195 3207 t_list = (xlog_ticket_t *)buf; ··· 3217 3231 } 3218 3232 t_list->t_next = NULL; 3219 3233 log->l_tail = t_list; 3220 - LOG_UNLOCK(log, s); 3234 + spin_unlock(&log->l_icloglock); 3221 3235 } /* xlog_state_ticket_alloc */ 3222 3236 3223 3237 ··· 3259 3273 /* 3260 3274 * Grab ticket off freelist or allocation some more 3261 3275 */ 3262 - xlog_ticket_t * 3276 + STATIC xlog_ticket_t * 3263 3277 xlog_ticket_get(xlog_t *log, 3264 3278 int unit_bytes, 3265 3279 int cnt, ··· 3268 3282 { 3269 3283 xlog_ticket_t *tic; 3270 3284 uint num_headers; 3271 - SPLDECL(s); 3272 3285 3273 3286 alloc: 3274 3287 if (log->l_freelist == NULL) 3275 3288 xlog_state_ticket_alloc(log); /* potentially sleep */ 3276 3289 3277 - s = LOG_LOCK(log); 3290 + spin_lock(&log->l_icloglock); 3278 3291 if (log->l_freelist == NULL) { 3279 - LOG_UNLOCK(log, s); 3292 + spin_unlock(&log->l_icloglock); 3280 3293 goto alloc; 3281 3294 } 3282 3295 tic = log->l_freelist; ··· 3283 3298 if (log->l_freelist == NULL) 3284 3299 log->l_tail = NULL; 3285 3300 log->l_ticket_cnt--; 3286 - LOG_UNLOCK(log, s); 3301 + spin_unlock(&log->l_icloglock); 3287 3302 3288 3303 /* 3289 3304 * Permanent reservations have up to 'cnt'-1 active log operations ··· 3458 3473 __uint8_t clientid; 3459 3474 int len, i, j, k, op_len; 3460 3475 int idx; 3461 - SPLDECL(s); 3462 3476 3463 3477 /* check validity of iclog pointers */ 3464 - s = LOG_LOCK(log); 3478 + spin_lock(&log->l_icloglock); 3465 3479 icptr = log->l_iclog; 3466 3480 for (i=0; i < log->l_iclog_bufs; i++) { 3467 3481 if (icptr == NULL) ··· 3469 3485 } 3470 3486 if (icptr != log->l_iclog) 3471 3487 xlog_panic("xlog_verify_iclog: corrupt iclog ring"); 3472 - LOG_UNLOCK(log, s); 3488 + spin_unlock(&log->l_icloglock); 3473 3489 3474 3490 /* check log magic numbers */ 3475 - ptr = (xfs_caddr_t) &(iclog->ic_header); 3476 - if (INT_GET(*(uint *)ptr, ARCH_CONVERT) != XLOG_HEADER_MAGIC_NUM) 3491 + if (be32_to_cpu(iclog->ic_header.h_magicno) != XLOG_HEADER_MAGIC_NUM) 3477 3492 xlog_panic("xlog_verify_iclog: invalid magic num"); 3478 3493 3479 - for (ptr += BBSIZE; ptr < ((xfs_caddr_t)&(iclog->ic_header))+count; 3494 + ptr = (xfs_caddr_t) &iclog->ic_header; 3495 + for (ptr += BBSIZE; ptr < ((xfs_caddr_t)&iclog->ic_header) + count; 3480 3496 ptr += BBSIZE) { 3481 - if (INT_GET(*(uint *)ptr, ARCH_CONVERT) == XLOG_HEADER_MAGIC_NUM) 3497 + if (be32_to_cpu(*(__be32 *)ptr) == XLOG_HEADER_MAGIC_NUM) 3482 3498 xlog_panic("xlog_verify_iclog: unexpected magic num"); 3483 3499 } 3484 3500 3485 3501 /* check fields */ 3486 - len = INT_GET(iclog->ic_header.h_num_logops, ARCH_CONVERT); 3502 + len = be32_to_cpu(iclog->ic_header.h_num_logops); 3487 3503 ptr = iclog->ic_datap; 3488 3504 base_ptr = ptr; 3489 3505 ophead = (xlog_op_header_t *)ptr; ··· 3501 3517 if (idx >= (XLOG_HEADER_CYCLE_SIZE / BBSIZE)) { 3502 3518 j = idx / (XLOG_HEADER_CYCLE_SIZE / BBSIZE); 3503 3519 k = idx % (XLOG_HEADER_CYCLE_SIZE / BBSIZE); 3504 - clientid = GET_CLIENT_ID(xhdr[j].hic_xheader.xh_cycle_data[k], ARCH_CONVERT); 3520 + clientid = xlog_get_client_id( 3521 + xhdr[j].hic_xheader.xh_cycle_data[k]); 3505 3522 } else { 3506 - clientid = GET_CLIENT_ID(iclog->ic_header.h_cycle_data[idx], ARCH_CONVERT); 3523 + clientid = xlog_get_client_id( 3524 + iclog->ic_header.h_cycle_data[idx]); 3507 3525 } 3508 3526 } 3509 3527 if (clientid != XFS_TRANSACTION && clientid != XFS_LOG) ··· 3517 3531 field_offset = (__psint_t) 3518 3532 ((xfs_caddr_t)&(ophead->oh_len) - base_ptr); 3519 3533 if (syncing == B_FALSE || (field_offset & 0x1ff)) { 3520 - op_len = INT_GET(ophead->oh_len, ARCH_CONVERT); 3534 + op_len = be32_to_cpu(ophead->oh_len); 3521 3535 } else { 3522 3536 idx = BTOBBT((__psint_t)&ophead->oh_len - 3523 3537 (__psint_t)iclog->ic_datap); 3524 3538 if (idx >= (XLOG_HEADER_CYCLE_SIZE / BBSIZE)) { 3525 3539 j = idx / (XLOG_HEADER_CYCLE_SIZE / BBSIZE); 3526 3540 k = idx % (XLOG_HEADER_CYCLE_SIZE / BBSIZE); 3527 - op_len = INT_GET(xhdr[j].hic_xheader.xh_cycle_data[k], ARCH_CONVERT); 3541 + op_len = be32_to_cpu(xhdr[j].hic_xheader.xh_cycle_data[k]); 3528 3542 } else { 3529 - op_len = INT_GET(iclog->ic_header.h_cycle_data[idx], ARCH_CONVERT); 3543 + op_len = be32_to_cpu(iclog->ic_header.h_cycle_data[idx]); 3530 3544 } 3531 3545 } 3532 3546 ptr += sizeof(xlog_op_header_t) + op_len; ··· 3535 3549 #endif 3536 3550 3537 3551 /* 3538 - * Mark all iclogs IOERROR. LOG_LOCK is held by the caller. 3552 + * Mark all iclogs IOERROR. l_icloglock is held by the caller. 3539 3553 */ 3540 3554 STATIC int 3541 3555 xlog_state_ioerror( ··· 3583 3597 xlog_t *log; 3584 3598 int retval; 3585 3599 int dummy; 3586 - SPLDECL(s); 3587 - SPLDECL(s2); 3588 3600 3589 3601 log = mp->m_log; 3590 3602 ··· 3611 3627 * before we mark the filesystem SHUTDOWN and wake 3612 3628 * everybody up to tell the bad news. 3613 3629 */ 3614 - s = GRANT_LOCK(log); 3615 - s2 = LOG_LOCK(log); 3630 + spin_lock(&log->l_grant_lock); 3631 + spin_lock(&log->l_icloglock); 3616 3632 mp->m_flags |= XFS_MOUNT_FS_SHUTDOWN; 3617 3633 XFS_BUF_DONE(mp->m_sb_bp); 3618 3634 /* ··· 3628 3644 */ 3629 3645 if (logerror) 3630 3646 retval = xlog_state_ioerror(log); 3631 - LOG_UNLOCK(log, s2); 3647 + spin_unlock(&log->l_icloglock); 3632 3648 3633 3649 /* 3634 3650 * We don't want anybody waiting for log reservations ··· 3651 3667 tic = tic->t_next; 3652 3668 } while (tic != log->l_write_headq); 3653 3669 } 3654 - GRANT_UNLOCK(log, s); 3670 + spin_unlock(&log->l_grant_lock); 3655 3671 3656 3672 if (! (log->l_iclog->ic_state & XLOG_STATE_IOERROR)) { 3657 3673 ASSERT(!logerror); ··· 3660 3676 * log down completely. 3661 3677 */ 3662 3678 xlog_state_sync_all(log, XFS_LOG_FORCE|XFS_LOG_SYNC, &dummy); 3663 - s2 = LOG_LOCK(log); 3679 + spin_lock(&log->l_icloglock); 3664 3680 retval = xlog_state_ioerror(log); 3665 - LOG_UNLOCK(log, s2); 3681 + spin_unlock(&log->l_icloglock); 3666 3682 } 3667 3683 /* 3668 3684 * Wake up everybody waiting on xfs_log_force. ··· 3675 3691 { 3676 3692 xlog_in_core_t *iclog; 3677 3693 3678 - s = LOG_LOCK(log); 3694 + spin_lock(&log->l_icloglock); 3679 3695 iclog = log->l_iclog; 3680 3696 do { 3681 3697 ASSERT(iclog->ic_callback == 0); 3682 3698 iclog = iclog->ic_next; 3683 3699 } while (iclog != log->l_iclog); 3684 - LOG_UNLOCK(log, s); 3700 + spin_unlock(&log->l_icloglock); 3685 3701 } 3686 3702 #endif 3687 3703 /* return non-zero if log IOERROR transition had already happened */
+2 -1
fs/xfs/xfs_log.h
··· 22 22 23 23 #define CYCLE_LSN(lsn) ((uint)((lsn)>>32)) 24 24 #define BLOCK_LSN(lsn) ((uint)(lsn)) 25 + 25 26 /* this is used in a spot where we might otherwise double-endian-flip */ 26 - #define CYCLE_LSN_DISK(lsn) (((uint *)&(lsn))[0]) 27 + #define CYCLE_LSN_DISK(lsn) (((__be32 *)&(lsn))[0]) 27 28 28 29 #ifdef __KERNEL__ 29 30 /*
+37 -57
fs/xfs/xfs_log_priv.h
··· 55 55 BTOBB(XLOG_MAX_ICLOGS << (XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb) ? \ 56 56 XLOG_MAX_RECORD_BSHIFT : XLOG_BIG_RECORD_BSHIFT)) 57 57 58 - /* 59 - * set lsns 60 - */ 61 58 62 - #define ASSIGN_ANY_LSN_HOST(lsn,cycle,block) \ 63 - { \ 64 - (lsn) = ((xfs_lsn_t)(cycle)<<32)|(block); \ 65 - } 66 - #define ASSIGN_ANY_LSN_DISK(lsn,cycle,block) \ 67 - { \ 68 - INT_SET(((uint *)&(lsn))[0], ARCH_CONVERT, (cycle)); \ 69 - INT_SET(((uint *)&(lsn))[1], ARCH_CONVERT, (block)); \ 70 - } 71 - #define ASSIGN_LSN(lsn,log) \ 72 - ASSIGN_ANY_LSN_DISK(lsn,(log)->l_curr_cycle,(log)->l_curr_block); 59 + static inline xfs_lsn_t xlog_assign_lsn(uint cycle, uint block) 60 + { 61 + return ((xfs_lsn_t)cycle << 32) | block; 62 + } 73 63 74 - #define XLOG_SET(f,b) (((f) & (b)) == (b)) 75 - 76 - #define GET_CYCLE(ptr, arch) \ 77 - (INT_GET(*(uint *)(ptr), arch) == XLOG_HEADER_MAGIC_NUM ? \ 78 - INT_GET(*((uint *)(ptr)+1), arch) : \ 79 - INT_GET(*(uint *)(ptr), arch) \ 80 - ) 64 + static inline uint xlog_get_cycle(char *ptr) 65 + { 66 + if (be32_to_cpu(*(__be32 *)ptr) == XLOG_HEADER_MAGIC_NUM) 67 + return be32_to_cpu(*((__be32 *)ptr + 1)); 68 + else 69 + return be32_to_cpu(*(__be32 *)ptr); 70 + } 81 71 82 72 #define BLK_AVG(blk1, blk2) ((blk1+blk2) >> 1) 83 - 84 73 85 74 #ifdef __KERNEL__ 86 75 ··· 85 96 * 86 97 * this has endian issues, of course. 87 98 */ 88 - 89 - #ifndef XFS_NATIVE_HOST 90 - #define GET_CLIENT_ID(i,arch) \ 91 - ((i) & 0xff) 92 - #else 93 - #define GET_CLIENT_ID(i,arch) \ 94 - ((i) >> 24) 95 - #endif 96 - 97 - #define GRANT_LOCK(log) mutex_spinlock(&(log)->l_grant_lock) 98 - #define GRANT_UNLOCK(log, s) mutex_spinunlock(&(log)->l_grant_lock, s) 99 - #define LOG_LOCK(log) mutex_spinlock(&(log)->l_icloglock) 100 - #define LOG_UNLOCK(log, s) mutex_spinunlock(&(log)->l_icloglock, s) 99 + static inline uint xlog_get_client_id(__be32 i) 100 + { 101 + return be32_to_cpu(i) >> 24; 102 + } 101 103 102 104 #define xlog_panic(args...) cmn_err(CE_PANIC, ## args) 103 105 #define xlog_exit(args...) cmn_err(CE_PANIC, ## args) ··· 265 285 266 286 267 287 typedef struct xlog_op_header { 268 - xlog_tid_t oh_tid; /* transaction id of operation : 4 b */ 269 - int oh_len; /* bytes in data region : 4 b */ 270 - __uint8_t oh_clientid; /* who sent me this : 1 b */ 271 - __uint8_t oh_flags; /* : 1 b */ 272 - ushort oh_res2; /* 32 bit align : 2 b */ 288 + __be32 oh_tid; /* transaction id of operation : 4 b */ 289 + __be32 oh_len; /* bytes in data region : 4 b */ 290 + __u8 oh_clientid; /* who sent me this : 1 b */ 291 + __u8 oh_flags; /* : 1 b */ 292 + __u16 oh_res2; /* 32 bit align : 2 b */ 273 293 } xlog_op_header_t; 274 294 275 295 ··· 287 307 #endif 288 308 289 309 typedef struct xlog_rec_header { 290 - uint h_magicno; /* log record (LR) identifier : 4 */ 291 - uint h_cycle; /* write cycle of log : 4 */ 292 - int h_version; /* LR version : 4 */ 293 - int h_len; /* len in bytes; should be 64-bit aligned: 4 */ 294 - xfs_lsn_t h_lsn; /* lsn of this LR : 8 */ 295 - xfs_lsn_t h_tail_lsn; /* lsn of 1st LR w/ buffers not committed: 8 */ 296 - uint h_chksum; /* may not be used; non-zero if used : 4 */ 297 - int h_prev_block; /* block number to previous LR : 4 */ 298 - int h_num_logops; /* number of log operations in this LR : 4 */ 299 - uint h_cycle_data[XLOG_HEADER_CYCLE_SIZE / BBSIZE]; 310 + __be32 h_magicno; /* log record (LR) identifier : 4 */ 311 + __be32 h_cycle; /* write cycle of log : 4 */ 312 + __be32 h_version; /* LR version : 4 */ 313 + __be32 h_len; /* len in bytes; should be 64-bit aligned: 4 */ 314 + __be64 h_lsn; /* lsn of this LR : 8 */ 315 + __be64 h_tail_lsn; /* lsn of 1st LR w/ buffers not committed: 8 */ 316 + __be32 h_chksum; /* may not be used; non-zero if used : 4 */ 317 + __be32 h_prev_block; /* block number to previous LR : 4 */ 318 + __be32 h_num_logops; /* number of log operations in this LR : 4 */ 319 + __be32 h_cycle_data[XLOG_HEADER_CYCLE_SIZE / BBSIZE]; 300 320 /* new fields */ 301 - int h_fmt; /* format of log record : 4 */ 302 - uuid_t h_fs_uuid; /* uuid of FS : 16 */ 303 - int h_size; /* iclog size : 4 */ 321 + __be32 h_fmt; /* format of log record : 4 */ 322 + uuid_t h_fs_uuid; /* uuid of FS : 16 */ 323 + __be32 h_size; /* iclog size : 4 */ 304 324 } xlog_rec_header_t; 305 325 306 326 typedef struct xlog_rec_ext_header { 307 - uint xh_cycle; /* write cycle of log : 4 */ 308 - uint xh_cycle_data[XLOG_HEADER_CYCLE_SIZE / BBSIZE]; /* : 256 */ 327 + __be32 xh_cycle; /* write cycle of log : 4 */ 328 + __be32 xh_cycle_data[XLOG_HEADER_CYCLE_SIZE / BBSIZE]; /* : 256 */ 309 329 } xlog_rec_ext_header_t; 310 330 311 331 #ifdef __KERNEL__ ··· 395 415 xlog_ticket_t *l_unmount_free;/* kmem_free these addresses */ 396 416 xlog_ticket_t *l_tail; /* free list of tickets */ 397 417 xlog_in_core_t *l_iclog; /* head log queue */ 398 - lock_t l_icloglock; /* grab to change iclog state */ 418 + spinlock_t l_icloglock; /* grab to change iclog state */ 399 419 xfs_lsn_t l_tail_lsn; /* lsn of 1st LR with unflushed 400 420 * buffers */ 401 421 xfs_lsn_t l_last_sync_lsn;/* lsn of last LR on disk */ ··· 419 439 char *l_iclog_bak[XLOG_MAX_ICLOGS]; 420 440 421 441 /* The following block of fields are changed while holding grant_lock */ 422 - lock_t l_grant_lock; 442 + spinlock_t l_grant_lock; 423 443 xlog_ticket_t *l_reserve_headq; 424 444 xlog_ticket_t *l_write_headq; 425 445 int l_grant_reserve_cycle;
+92 -105
fs/xfs/xfs_log_recover.c
··· 198 198 cmn_err(CE_DEBUG, " log : uuid = "); 199 199 for (b = 0; b < 16; b++) 200 200 cmn_err(CE_DEBUG, "%02x",((uchar_t *)&head->h_fs_uuid)[b]); 201 - cmn_err(CE_DEBUG, ", fmt = %d\n", INT_GET(head->h_fmt, ARCH_CONVERT)); 201 + cmn_err(CE_DEBUG, ", fmt = %d\n", be32_to_cpu(head->h_fmt)); 202 202 } 203 203 #else 204 204 #define xlog_header_check_dump(mp, head) ··· 212 212 xfs_mount_t *mp, 213 213 xlog_rec_header_t *head) 214 214 { 215 - ASSERT(INT_GET(head->h_magicno, ARCH_CONVERT) == XLOG_HEADER_MAGIC_NUM); 215 + ASSERT(be32_to_cpu(head->h_magicno) == XLOG_HEADER_MAGIC_NUM); 216 216 217 217 /* 218 218 * IRIX doesn't write the h_fmt field and leaves it zeroed 219 219 * (XLOG_FMT_UNKNOWN). This stops us from trying to recover 220 220 * a dirty log created in IRIX. 221 221 */ 222 - if (unlikely(INT_GET(head->h_fmt, ARCH_CONVERT) != XLOG_FMT)) { 222 + if (unlikely(be32_to_cpu(head->h_fmt) != XLOG_FMT)) { 223 223 xlog_warn( 224 224 "XFS: dirty log written in incompatible format - can't recover"); 225 225 xlog_header_check_dump(mp, head); ··· 245 245 xfs_mount_t *mp, 246 246 xlog_rec_header_t *head) 247 247 { 248 - ASSERT(INT_GET(head->h_magicno, ARCH_CONVERT) == XLOG_HEADER_MAGIC_NUM); 248 + ASSERT(be32_to_cpu(head->h_magicno) == XLOG_HEADER_MAGIC_NUM); 249 249 250 250 if (uuid_is_nil(&head->h_fs_uuid)) { 251 251 /* ··· 293 293 * Note that the algorithm can not be perfect because the disk will not 294 294 * necessarily be perfect. 295 295 */ 296 - int 296 + STATIC int 297 297 xlog_find_cycle_start( 298 298 xlog_t *log, 299 299 xfs_buf_t *bp, ··· 311 311 if ((error = xlog_bread(log, mid_blk, 1, bp))) 312 312 return error; 313 313 offset = xlog_align(log, mid_blk, 1, bp); 314 - mid_cycle = GET_CYCLE(offset, ARCH_CONVERT); 314 + mid_cycle = xlog_get_cycle(offset); 315 315 if (mid_cycle == cycle) { 316 316 *last_blk = mid_blk; 317 317 /* last_half_cycle == mid_cycle */ ··· 371 371 372 372 buf = xlog_align(log, i, bcount, bp); 373 373 for (j = 0; j < bcount; j++) { 374 - cycle = GET_CYCLE(buf, ARCH_CONVERT); 374 + cycle = xlog_get_cycle(buf); 375 375 if (cycle == stop_on_cycle_no) { 376 376 *new_blk = i+j; 377 377 goto out; ··· 447 447 448 448 head = (xlog_rec_header_t *)offset; 449 449 450 - if (XLOG_HEADER_MAGIC_NUM == 451 - INT_GET(head->h_magicno, ARCH_CONVERT)) 450 + if (XLOG_HEADER_MAGIC_NUM == be32_to_cpu(head->h_magicno)) 452 451 break; 453 452 454 453 if (!smallmem) ··· 479 480 * record do we update last_blk. 480 481 */ 481 482 if (XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb)) { 482 - uint h_size = INT_GET(head->h_size, ARCH_CONVERT); 483 + uint h_size = be32_to_cpu(head->h_size); 483 484 484 485 xhdrs = h_size / XLOG_HEADER_CYCLE_SIZE; 485 486 if (h_size % XLOG_HEADER_CYCLE_SIZE) ··· 488 489 xhdrs = 1; 489 490 } 490 491 491 - if (*last_blk - i + extra_bblks 492 - != BTOBB(INT_GET(head->h_len, ARCH_CONVERT)) + xhdrs) 492 + if (*last_blk - i + extra_bblks != 493 + BTOBB(be32_to_cpu(head->h_len)) + xhdrs) 493 494 *last_blk = i; 494 495 495 496 out: ··· 549 550 if ((error = xlog_bread(log, 0, 1, bp))) 550 551 goto bp_err; 551 552 offset = xlog_align(log, 0, 1, bp); 552 - first_half_cycle = GET_CYCLE(offset, ARCH_CONVERT); 553 + first_half_cycle = xlog_get_cycle(offset); 553 554 554 555 last_blk = head_blk = log_bbnum - 1; /* get cycle # of last block */ 555 556 if ((error = xlog_bread(log, last_blk, 1, bp))) 556 557 goto bp_err; 557 558 offset = xlog_align(log, last_blk, 1, bp); 558 - last_half_cycle = GET_CYCLE(offset, ARCH_CONVERT); 559 + last_half_cycle = xlog_get_cycle(offset); 559 560 ASSERT(last_half_cycle != 0); 560 561 561 562 /* ··· 807 808 if ((error = xlog_bread(log, 0, 1, bp))) 808 809 goto bread_err; 809 810 offset = xlog_align(log, 0, 1, bp); 810 - if (GET_CYCLE(offset, ARCH_CONVERT) == 0) { 811 + if (xlog_get_cycle(offset) == 0) { 811 812 *tail_blk = 0; 812 813 /* leave all other log inited values alone */ 813 814 goto exit; ··· 822 823 if ((error = xlog_bread(log, i, 1, bp))) 823 824 goto bread_err; 824 825 offset = xlog_align(log, i, 1, bp); 825 - if (XLOG_HEADER_MAGIC_NUM == 826 - INT_GET(*(uint *)offset, ARCH_CONVERT)) { 826 + if (XLOG_HEADER_MAGIC_NUM == be32_to_cpu(*(__be32 *)offset)) { 827 827 found = 1; 828 828 break; 829 829 } ··· 839 841 goto bread_err; 840 842 offset = xlog_align(log, i, 1, bp); 841 843 if (XLOG_HEADER_MAGIC_NUM == 842 - INT_GET(*(uint*)offset, ARCH_CONVERT)) { 844 + be32_to_cpu(*(__be32 *)offset)) { 843 845 found = 2; 844 846 break; 845 847 } ··· 853 855 854 856 /* find blk_no of tail of log */ 855 857 rhead = (xlog_rec_header_t *)offset; 856 - *tail_blk = BLOCK_LSN(INT_GET(rhead->h_tail_lsn, ARCH_CONVERT)); 858 + *tail_blk = BLOCK_LSN(be64_to_cpu(rhead->h_tail_lsn)); 857 859 858 860 /* 859 861 * Reset log values according to the state of the log when we ··· 867 869 */ 868 870 log->l_prev_block = i; 869 871 log->l_curr_block = (int)*head_blk; 870 - log->l_curr_cycle = INT_GET(rhead->h_cycle, ARCH_CONVERT); 872 + log->l_curr_cycle = be32_to_cpu(rhead->h_cycle); 871 873 if (found == 2) 872 874 log->l_curr_cycle++; 873 - log->l_tail_lsn = INT_GET(rhead->h_tail_lsn, ARCH_CONVERT); 874 - log->l_last_sync_lsn = INT_GET(rhead->h_lsn, ARCH_CONVERT); 875 + log->l_tail_lsn = be64_to_cpu(rhead->h_tail_lsn); 876 + log->l_last_sync_lsn = be64_to_cpu(rhead->h_lsn); 875 877 log->l_grant_reserve_cycle = log->l_curr_cycle; 876 878 log->l_grant_reserve_bytes = BBTOB(log->l_curr_block); 877 879 log->l_grant_write_cycle = log->l_curr_cycle; ··· 889 891 * unmount record rather than the block after it. 890 892 */ 891 893 if (XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb)) { 892 - int h_size = INT_GET(rhead->h_size, ARCH_CONVERT); 893 - int h_version = INT_GET(rhead->h_version, ARCH_CONVERT); 894 + int h_size = be32_to_cpu(rhead->h_size); 895 + int h_version = be32_to_cpu(rhead->h_version); 894 896 895 897 if ((h_version & XLOG_VERSION_2) && 896 898 (h_size > XLOG_HEADER_CYCLE_SIZE)) { ··· 904 906 hblks = 1; 905 907 } 906 908 after_umount_blk = (i + hblks + (int) 907 - BTOBB(INT_GET(rhead->h_len, ARCH_CONVERT))) % log->l_logBBsize; 909 + BTOBB(be32_to_cpu(rhead->h_len))) % log->l_logBBsize; 908 910 tail_lsn = log->l_tail_lsn; 909 911 if (*head_blk == after_umount_blk && 910 - INT_GET(rhead->h_num_logops, ARCH_CONVERT) == 1) { 912 + be32_to_cpu(rhead->h_num_logops) == 1) { 911 913 umount_data_blk = (i + hblks) % log->l_logBBsize; 912 914 if ((error = xlog_bread(log, umount_data_blk, 1, bp))) { 913 915 goto bread_err; ··· 920 922 * log records will point recovery to after the 921 923 * current unmount record. 922 924 */ 923 - ASSIGN_ANY_LSN_HOST(log->l_tail_lsn, log->l_curr_cycle, 924 - after_umount_blk); 925 - ASSIGN_ANY_LSN_HOST(log->l_last_sync_lsn, log->l_curr_cycle, 926 - after_umount_blk); 925 + log->l_tail_lsn = 926 + xlog_assign_lsn(log->l_curr_cycle, 927 + after_umount_blk); 928 + log->l_last_sync_lsn = 929 + xlog_assign_lsn(log->l_curr_cycle, 930 + after_umount_blk); 927 931 *tail_blk = after_umount_blk; 928 932 929 933 /* ··· 986 986 * -1 => use *blk_no as the first block of the log 987 987 * >0 => error has occurred 988 988 */ 989 - int 989 + STATIC int 990 990 xlog_find_zeroed( 991 991 xlog_t *log, 992 992 xfs_daddr_t *blk_no) ··· 1007 1007 if ((error = xlog_bread(log, 0, 1, bp))) 1008 1008 goto bp_err; 1009 1009 offset = xlog_align(log, 0, 1, bp); 1010 - first_cycle = GET_CYCLE(offset, ARCH_CONVERT); 1010 + first_cycle = xlog_get_cycle(offset); 1011 1011 if (first_cycle == 0) { /* completely zeroed log */ 1012 1012 *blk_no = 0; 1013 1013 xlog_put_bp(bp); ··· 1018 1018 if ((error = xlog_bread(log, log_bbnum-1, 1, bp))) 1019 1019 goto bp_err; 1020 1020 offset = xlog_align(log, log_bbnum-1, 1, bp); 1021 - last_cycle = GET_CYCLE(offset, ARCH_CONVERT); 1021 + last_cycle = xlog_get_cycle(offset); 1022 1022 if (last_cycle != 0) { /* log completely written to */ 1023 1023 xlog_put_bp(bp); 1024 1024 return 0; ··· 1098 1098 xlog_rec_header_t *recp = (xlog_rec_header_t *)buf; 1099 1099 1100 1100 memset(buf, 0, BBSIZE); 1101 - INT_SET(recp->h_magicno, ARCH_CONVERT, XLOG_HEADER_MAGIC_NUM); 1102 - INT_SET(recp->h_cycle, ARCH_CONVERT, cycle); 1103 - INT_SET(recp->h_version, ARCH_CONVERT, 1101 + recp->h_magicno = cpu_to_be32(XLOG_HEADER_MAGIC_NUM); 1102 + recp->h_cycle = cpu_to_be32(cycle); 1103 + recp->h_version = cpu_to_be32( 1104 1104 XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb) ? 2 : 1); 1105 - ASSIGN_ANY_LSN_DISK(recp->h_lsn, cycle, block); 1106 - ASSIGN_ANY_LSN_DISK(recp->h_tail_lsn, tail_cycle, tail_block); 1107 - INT_SET(recp->h_fmt, ARCH_CONVERT, XLOG_FMT); 1105 + recp->h_lsn = cpu_to_be64(xlog_assign_lsn(cycle, block)); 1106 + recp->h_tail_lsn = cpu_to_be64(xlog_assign_lsn(tail_cycle, tail_block)); 1107 + recp->h_fmt = cpu_to_be32(XLOG_FMT); 1108 1108 memcpy(&recp->h_fs_uuid, &log->l_mp->m_sb.sb_uuid, sizeof(uuid_t)); 1109 1109 } 1110 1110 ··· 2211 2211 * overlap with future reads of those inodes. 2212 2212 */ 2213 2213 if (XFS_DINODE_MAGIC == 2214 - INT_GET(*((__uint16_t *)(xfs_buf_offset(bp, 0))), ARCH_CONVERT) && 2214 + be16_to_cpu(*((__be16 *)xfs_buf_offset(bp, 0))) && 2215 2215 (XFS_BUF_COUNT(bp) != MAX(log->l_mp->m_sb.sb_blocksize, 2216 2216 (__uint32_t)XFS_INODE_CLUSTER_SIZE(log->l_mp)))) { 2217 2217 XFS_BUF_STALE(bp); ··· 2581 2581 /* 2582 2582 * This type of quotas was turned off, so ignore this record. 2583 2583 */ 2584 - type = INT_GET(recddq->d_flags, ARCH_CONVERT) & 2585 - (XFS_DQ_USER | XFS_DQ_PROJ | XFS_DQ_GROUP); 2584 + type = recddq->d_flags & (XFS_DQ_USER | XFS_DQ_PROJ | XFS_DQ_GROUP); 2586 2585 ASSERT(type); 2587 2586 if (log->l_quotaoffs_flag & type) 2588 2587 return (0); ··· 2659 2660 xfs_mount_t *mp; 2660 2661 xfs_efi_log_item_t *efip; 2661 2662 xfs_efi_log_format_t *efi_formatp; 2662 - SPLDECL(s); 2663 2663 2664 2664 if (pass == XLOG_RECOVER_PASS1) { 2665 2665 return 0; ··· 2676 2678 efip->efi_next_extent = efi_formatp->efi_nextents; 2677 2679 efip->efi_flags |= XFS_EFI_COMMITTED; 2678 2680 2679 - AIL_LOCK(mp,s); 2681 + spin_lock(&mp->m_ail_lock); 2680 2682 /* 2681 2683 * xfs_trans_update_ail() drops the AIL lock. 2682 2684 */ 2683 - xfs_trans_update_ail(mp, (xfs_log_item_t *)efip, lsn, s); 2685 + xfs_trans_update_ail(mp, (xfs_log_item_t *)efip, lsn); 2684 2686 return 0; 2685 2687 } 2686 2688 ··· 2705 2707 xfs_log_item_t *lip; 2706 2708 int gen; 2707 2709 __uint64_t efi_id; 2708 - SPLDECL(s); 2709 2710 2710 2711 if (pass == XLOG_RECOVER_PASS1) { 2711 2712 return; ··· 2722 2725 * in the AIL. 2723 2726 */ 2724 2727 mp = log->l_mp; 2725 - AIL_LOCK(mp,s); 2728 + spin_lock(&mp->m_ail_lock); 2726 2729 lip = xfs_trans_first_ail(mp, &gen); 2727 2730 while (lip != NULL) { 2728 2731 if (lip->li_type == XFS_LI_EFI) { ··· 2732 2735 * xfs_trans_delete_ail() drops the 2733 2736 * AIL lock. 2734 2737 */ 2735 - xfs_trans_delete_ail(mp, lip, s); 2736 - break; 2738 + xfs_trans_delete_ail(mp, lip); 2739 + xfs_efi_item_free(efip); 2740 + return; 2737 2741 } 2738 2742 } 2739 2743 lip = xfs_trans_next_ail(mp, lip, &gen, NULL); 2740 2744 } 2741 - 2742 - /* 2743 - * If we found it, then free it up. If it wasn't there, it 2744 - * must have been overwritten in the log. Oh well. 2745 - */ 2746 - if (lip != NULL) { 2747 - xfs_efi_item_free(efip); 2748 - } else { 2749 - AIL_UNLOCK(mp, s); 2750 - } 2745 + spin_unlock(&mp->m_ail_lock); 2751 2746 } 2752 2747 2753 2748 /* ··· 2886 2897 unsigned long hash; 2887 2898 uint flags; 2888 2899 2889 - lp = dp + INT_GET(rhead->h_len, ARCH_CONVERT); 2890 - num_logops = INT_GET(rhead->h_num_logops, ARCH_CONVERT); 2900 + lp = dp + be32_to_cpu(rhead->h_len); 2901 + num_logops = be32_to_cpu(rhead->h_num_logops); 2891 2902 2892 2903 /* check the log format matches our own - else we can't recover */ 2893 2904 if (xlog_header_check_recover(log->l_mp, rhead)) ··· 2904 2915 ASSERT(0); 2905 2916 return (XFS_ERROR(EIO)); 2906 2917 } 2907 - tid = INT_GET(ohead->oh_tid, ARCH_CONVERT); 2918 + tid = be32_to_cpu(ohead->oh_tid); 2908 2919 hash = XLOG_RHASH(tid); 2909 2920 trans = xlog_recover_find_tid(rhash[hash], tid); 2910 2921 if (trans == NULL) { /* not found; add new tid */ 2911 2922 if (ohead->oh_flags & XLOG_START_TRANS) 2912 2923 xlog_recover_new_tid(&rhash[hash], tid, 2913 - INT_GET(rhead->h_lsn, ARCH_CONVERT)); 2924 + be64_to_cpu(rhead->h_lsn)); 2914 2925 } else { 2915 - ASSERT(dp+INT_GET(ohead->oh_len, ARCH_CONVERT) <= lp); 2926 + if (dp + be32_to_cpu(ohead->oh_len) > lp) { 2927 + xlog_warn( 2928 + "XFS: xlog_recover_process_data: bad length"); 2929 + WARN_ON(1); 2930 + return (XFS_ERROR(EIO)); 2931 + } 2916 2932 flags = ohead->oh_flags & ~XLOG_END_TRANS; 2917 2933 if (flags & XLOG_WAS_CONT_TRANS) 2918 2934 flags &= ~XLOG_CONTINUE_TRANS; ··· 2931 2937 break; 2932 2938 case XLOG_WAS_CONT_TRANS: 2933 2939 error = xlog_recover_add_to_cont_trans(trans, 2934 - dp, INT_GET(ohead->oh_len, 2935 - ARCH_CONVERT)); 2940 + dp, be32_to_cpu(ohead->oh_len)); 2936 2941 break; 2937 2942 case XLOG_START_TRANS: 2938 2943 xlog_warn( ··· 2942 2949 case 0: 2943 2950 case XLOG_CONTINUE_TRANS: 2944 2951 error = xlog_recover_add_to_trans(trans, 2945 - dp, INT_GET(ohead->oh_len, 2946 - ARCH_CONVERT)); 2952 + dp, be32_to_cpu(ohead->oh_len)); 2947 2953 break; 2948 2954 default: 2949 2955 xlog_warn( ··· 2954 2962 if (error) 2955 2963 return error; 2956 2964 } 2957 - dp += INT_GET(ohead->oh_len, ARCH_CONVERT); 2965 + dp += be32_to_cpu(ohead->oh_len); 2958 2966 num_logops--; 2959 2967 } 2960 2968 return 0; ··· 3067 3075 xfs_efi_log_item_t *efip; 3068 3076 int gen; 3069 3077 xfs_mount_t *mp; 3070 - SPLDECL(s); 3071 3078 3072 3079 mp = log->l_mp; 3073 - AIL_LOCK(mp,s); 3080 + spin_lock(&mp->m_ail_lock); 3074 3081 3075 3082 lip = xfs_trans_first_ail(mp, &gen); 3076 3083 while (lip != NULL) { ··· 3090 3099 continue; 3091 3100 } 3092 3101 3093 - AIL_UNLOCK(mp, s); 3102 + spin_unlock(&mp->m_ail_lock); 3094 3103 xlog_recover_process_efi(mp, efip); 3095 - AIL_LOCK(mp,s); 3104 + spin_lock(&mp->m_ail_lock); 3096 3105 lip = xfs_trans_next_ail(mp, lip, &gen, NULL); 3097 3106 } 3098 - AIL_UNLOCK(mp, s); 3107 + spin_unlock(&mp->m_ail_lock); 3099 3108 } 3100 3109 3101 3110 /* ··· 3306 3315 int size) 3307 3316 { 3308 3317 int i; 3309 - uint *up; 3318 + __be32 *up; 3310 3319 uint chksum = 0; 3311 3320 3312 - up = (uint *)iclog->ic_datap; 3321 + up = (__be32 *)iclog->ic_datap; 3313 3322 /* divide length by 4 to get # words */ 3314 3323 for (i = 0; i < (size >> 2); i++) { 3315 - chksum ^= INT_GET(*up, ARCH_CONVERT); 3324 + chksum ^= be32_to_cpu(*up); 3316 3325 up++; 3317 3326 } 3318 - INT_SET(iclog->ic_header.h_chksum, ARCH_CONVERT, chksum); 3327 + iclog->ic_header.h_chksum = cpu_to_be32(chksum); 3319 3328 } 3320 3329 #else 3321 3330 #define xlog_pack_data_checksum(log, iclog, size) ··· 3332 3341 { 3333 3342 int i, j, k; 3334 3343 int size = iclog->ic_offset + roundoff; 3335 - uint cycle_lsn; 3344 + __be32 cycle_lsn; 3336 3345 xfs_caddr_t dp; 3337 3346 xlog_in_core_2_t *xhdr; 3338 3347 ··· 3343 3352 dp = iclog->ic_datap; 3344 3353 for (i = 0; i < BTOBB(size) && 3345 3354 i < (XLOG_HEADER_CYCLE_SIZE / BBSIZE); i++) { 3346 - iclog->ic_header.h_cycle_data[i] = *(uint *)dp; 3347 - *(uint *)dp = cycle_lsn; 3355 + iclog->ic_header.h_cycle_data[i] = *(__be32 *)dp; 3356 + *(__be32 *)dp = cycle_lsn; 3348 3357 dp += BBSIZE; 3349 3358 } 3350 3359 ··· 3353 3362 for ( ; i < BTOBB(size); i++) { 3354 3363 j = i / (XLOG_HEADER_CYCLE_SIZE / BBSIZE); 3355 3364 k = i % (XLOG_HEADER_CYCLE_SIZE / BBSIZE); 3356 - xhdr[j].hic_xheader.xh_cycle_data[k] = *(uint *)dp; 3357 - *(uint *)dp = cycle_lsn; 3365 + xhdr[j].hic_xheader.xh_cycle_data[k] = *(__be32 *)dp; 3366 + *(__be32 *)dp = cycle_lsn; 3358 3367 dp += BBSIZE; 3359 3368 } 3360 3369 ··· 3371 3380 xfs_caddr_t dp, 3372 3381 xlog_t *log) 3373 3382 { 3374 - uint *up = (uint *)dp; 3383 + __be32 *up = (__be32 *)dp; 3375 3384 uint chksum = 0; 3376 3385 int i; 3377 3386 3378 3387 /* divide length by 4 to get # words */ 3379 - for (i=0; i < INT_GET(rhead->h_len, ARCH_CONVERT) >> 2; i++) { 3380 - chksum ^= INT_GET(*up, ARCH_CONVERT); 3388 + for (i=0; i < be32_to_cpu(rhead->h_len) >> 2; i++) { 3389 + chksum ^= be32_to_cpu(*up); 3381 3390 up++; 3382 3391 } 3383 - if (chksum != INT_GET(rhead->h_chksum, ARCH_CONVERT)) { 3392 + if (chksum != be32_to_cpu(rhead->h_chksum)) { 3384 3393 if (rhead->h_chksum || 3385 3394 ((log->l_flags & XLOG_CHKSUM_MISMATCH) == 0)) { 3386 3395 cmn_err(CE_DEBUG, 3387 3396 "XFS: LogR chksum mismatch: was (0x%x) is (0x%x)\n", 3388 - INT_GET(rhead->h_chksum, ARCH_CONVERT), chksum); 3397 + be32_to_cpu(rhead->h_chksum), chksum); 3389 3398 cmn_err(CE_DEBUG, 3390 3399 "XFS: Disregard message if filesystem was created with non-DEBUG kernel"); 3391 3400 if (XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb)) { ··· 3409 3418 int i, j, k; 3410 3419 xlog_in_core_2_t *xhdr; 3411 3420 3412 - for (i = 0; i < BTOBB(INT_GET(rhead->h_len, ARCH_CONVERT)) && 3421 + for (i = 0; i < BTOBB(be32_to_cpu(rhead->h_len)) && 3413 3422 i < (XLOG_HEADER_CYCLE_SIZE / BBSIZE); i++) { 3414 - *(uint *)dp = *(uint *)&rhead->h_cycle_data[i]; 3423 + *(__be32 *)dp = *(__be32 *)&rhead->h_cycle_data[i]; 3415 3424 dp += BBSIZE; 3416 3425 } 3417 3426 3418 3427 if (XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb)) { 3419 3428 xhdr = (xlog_in_core_2_t *)rhead; 3420 - for ( ; i < BTOBB(INT_GET(rhead->h_len, ARCH_CONVERT)); i++) { 3429 + for ( ; i < BTOBB(be32_to_cpu(rhead->h_len)); i++) { 3421 3430 j = i / (XLOG_HEADER_CYCLE_SIZE / BBSIZE); 3422 3431 k = i % (XLOG_HEADER_CYCLE_SIZE / BBSIZE); 3423 - *(uint *)dp = xhdr[j].hic_xheader.xh_cycle_data[k]; 3432 + *(__be32 *)dp = xhdr[j].hic_xheader.xh_cycle_data[k]; 3424 3433 dp += BBSIZE; 3425 3434 } 3426 3435 } ··· 3436 3445 { 3437 3446 int hlen; 3438 3447 3439 - if (unlikely( 3440 - (INT_GET(rhead->h_magicno, ARCH_CONVERT) != 3441 - XLOG_HEADER_MAGIC_NUM))) { 3448 + if (unlikely(be32_to_cpu(rhead->h_magicno) != XLOG_HEADER_MAGIC_NUM)) { 3442 3449 XFS_ERROR_REPORT("xlog_valid_rec_header(1)", 3443 3450 XFS_ERRLEVEL_LOW, log->l_mp); 3444 3451 return XFS_ERROR(EFSCORRUPTED); 3445 3452 } 3446 3453 if (unlikely( 3447 3454 (!rhead->h_version || 3448 - (INT_GET(rhead->h_version, ARCH_CONVERT) & 3449 - (~XLOG_VERSION_OKBITS)) != 0))) { 3455 + (be32_to_cpu(rhead->h_version) & (~XLOG_VERSION_OKBITS))))) { 3450 3456 xlog_warn("XFS: %s: unrecognised log version (%d).", 3451 - __FUNCTION__, INT_GET(rhead->h_version, ARCH_CONVERT)); 3457 + __FUNCTION__, be32_to_cpu(rhead->h_version)); 3452 3458 return XFS_ERROR(EIO); 3453 3459 } 3454 3460 3455 3461 /* LR body must have data or it wouldn't have been written */ 3456 - hlen = INT_GET(rhead->h_len, ARCH_CONVERT); 3462 + hlen = be32_to_cpu(rhead->h_len); 3457 3463 if (unlikely( hlen <= 0 || hlen > INT_MAX )) { 3458 3464 XFS_ERROR_REPORT("xlog_valid_rec_header(2)", 3459 3465 XFS_ERRLEVEL_LOW, log->l_mp); ··· 3510 3522 error = xlog_valid_rec_header(log, rhead, tail_blk); 3511 3523 if (error) 3512 3524 goto bread_err1; 3513 - h_size = INT_GET(rhead->h_size, ARCH_CONVERT); 3514 - if ((INT_GET(rhead->h_version, ARCH_CONVERT) 3515 - & XLOG_VERSION_2) && 3525 + h_size = be32_to_cpu(rhead->h_size); 3526 + if ((be32_to_cpu(rhead->h_version) & XLOG_VERSION_2) && 3516 3527 (h_size > XLOG_HEADER_CYCLE_SIZE)) { 3517 3528 hblks = h_size / XLOG_HEADER_CYCLE_SIZE; 3518 3529 if (h_size % XLOG_HEADER_CYCLE_SIZE) ··· 3548 3561 goto bread_err2; 3549 3562 3550 3563 /* blocks in data section */ 3551 - bblks = (int)BTOBB(INT_GET(rhead->h_len, ARCH_CONVERT)); 3564 + bblks = (int)BTOBB(be32_to_cpu(rhead->h_len)); 3552 3565 error = xlog_bread(log, blk_no + hblks, bblks, dbp); 3553 3566 if (error) 3554 3567 goto bread_err2; ··· 3623 3636 if (error) 3624 3637 goto bread_err2; 3625 3638 3626 - bblks = (int)BTOBB(INT_GET(rhead->h_len, ARCH_CONVERT)); 3639 + bblks = (int)BTOBB(be32_to_cpu(rhead->h_len)); 3627 3640 blk_no += hblks; 3628 3641 3629 3642 /* Read in data for log record */ ··· 3694 3707 error = xlog_valid_rec_header(log, rhead, blk_no); 3695 3708 if (error) 3696 3709 goto bread_err2; 3697 - bblks = (int)BTOBB(INT_GET(rhead->h_len, ARCH_CONVERT)); 3710 + bblks = (int)BTOBB(be32_to_cpu(rhead->h_len)); 3698 3711 if ((error = xlog_bread(log, blk_no+hblks, bblks, dbp))) 3699 3712 goto bread_err2; 3700 3713 offset = xlog_align(log, blk_no+hblks, bblks, dbp);
+195 -151
fs/xfs/xfs_mount.c
··· 136 136 mp->m_flags |= XFS_MOUNT_NO_PERCPU_SB; 137 137 } 138 138 139 - AIL_LOCKINIT(&mp->m_ail_lock, "xfs_ail"); 140 - spinlock_init(&mp->m_sb_lock, "xfs_sb"); 139 + spin_lock_init(&mp->m_sb_lock); 141 140 mutex_init(&mp->m_ilock); 142 141 mutex_init(&mp->m_growlock); 143 - /* 144 - * Initialize the AIL. 145 - */ 146 - xfs_trans_ail_init(mp); 147 - 148 142 atomic_set(&mp->m_active_trans, 0); 149 143 150 144 return mp; ··· 165 171 sizeof(xfs_perag_t) * mp->m_sb.sb_agcount); 166 172 } 167 173 168 - AIL_LOCK_DESTROY(&mp->m_ail_lock); 174 + spinlock_destroy(&mp->m_ail_lock); 169 175 spinlock_destroy(&mp->m_sb_lock); 170 176 mutex_destroy(&mp->m_ilock); 171 177 mutex_destroy(&mp->m_growlock); ··· 610 616 int i; 611 617 612 618 mp->m_agfrotor = mp->m_agirotor = 0; 613 - spinlock_init(&mp->m_agirotor_lock, "m_agirotor_lock"); 619 + spin_lock_init(&mp->m_agirotor_lock); 614 620 mp->m_maxagi = mp->m_sb.sb_agcount; 615 621 mp->m_blkbit_log = sbp->sb_blocklog + XFS_NBBYLOG; 616 622 mp->m_blkbb_log = sbp->sb_blocklog - BBSHIFT; ··· 690 696 uint64_t bfreelst = 0; 691 697 uint64_t btree = 0; 692 698 int error; 693 - int s; 694 699 695 700 for (index = 0; index < agcount; index++) { 696 701 /* ··· 714 721 /* 715 722 * Overwrite incore superblock counters with just-read data 716 723 */ 717 - s = XFS_SB_LOCK(mp); 724 + spin_lock(&mp->m_sb_lock); 718 725 sbp->sb_ifree = ifree; 719 726 sbp->sb_icount = ialloc; 720 727 sbp->sb_fdblocks = bfree + bfreelst + btree; 721 - XFS_SB_UNLOCK(mp, s); 728 + spin_unlock(&mp->m_sb_lock); 722 729 723 730 /* Fixup the per-cpu counters as well. */ 724 731 xfs_icsb_reinit_counters(mp); ··· 727 734 } 728 735 729 736 /* 730 - * xfs_mountfs 731 - * 732 - * This function does the following on an initial mount of a file system: 733 - * - reads the superblock from disk and init the mount struct 734 - * - if we're a 32-bit kernel, do a size check on the superblock 735 - * so we don't mount terabyte filesystems 736 - * - init mount struct realtime fields 737 - * - allocate inode hash table for fs 738 - * - init directory manager 739 - * - perform recovery and init the log manager 737 + * Update alignment values based on mount options and sb values 740 738 */ 741 - int 742 - xfs_mountfs( 743 - xfs_mount_t *mp, 744 - int mfsi_flags) 739 + STATIC int 740 + xfs_update_alignment(xfs_mount_t *mp, int mfsi_flags, __uint64_t *update_flags) 745 741 { 746 - xfs_buf_t *bp; 747 742 xfs_sb_t *sbp = &(mp->m_sb); 748 - xfs_inode_t *rip; 749 - bhv_vnode_t *rvp = NULL; 750 - int readio_log, writeio_log; 751 - xfs_daddr_t d; 752 - __uint64_t resblks; 753 - __int64_t update_flags; 754 - uint quotamount, quotaflags; 755 - int agno; 756 - int uuid_mounted = 0; 757 - int error = 0; 758 743 759 - if (mp->m_sb_bp == NULL) { 760 - if ((error = xfs_readsb(mp, mfsi_flags))) { 761 - return error; 762 - } 763 - } 764 - xfs_mount_common(mp, sbp); 765 - 766 - /* 767 - * Check if sb_agblocks is aligned at stripe boundary 768 - * If sb_agblocks is NOT aligned turn off m_dalign since 769 - * allocator alignment is within an ag, therefore ag has 770 - * to be aligned at stripe boundary. 771 - */ 772 - update_flags = 0LL; 773 744 if (mp->m_dalign && !(mfsi_flags & XFS_MFSI_SECOND)) { 774 745 /* 775 746 * If stripe unit and stripe width are not multiples ··· 744 787 if (mp->m_flags & XFS_MOUNT_RETERR) { 745 788 cmn_err(CE_WARN, 746 789 "XFS: alignment check 1 failed"); 747 - error = XFS_ERROR(EINVAL); 748 - goto error1; 790 + return XFS_ERROR(EINVAL); 749 791 } 750 792 mp->m_dalign = mp->m_swidth = 0; 751 793 } else { ··· 754 798 mp->m_dalign = XFS_BB_TO_FSBT(mp, mp->m_dalign); 755 799 if (mp->m_dalign && (sbp->sb_agblocks % mp->m_dalign)) { 756 800 if (mp->m_flags & XFS_MOUNT_RETERR) { 757 - error = XFS_ERROR(EINVAL); 758 - goto error1; 801 + return XFS_ERROR(EINVAL); 759 802 } 760 803 xfs_fs_cmn_err(CE_WARN, mp, 761 804 "stripe alignment turned off: sunit(%d)/swidth(%d) incompatible with agsize(%d)", ··· 771 816 "stripe alignment turned off: sunit(%d) less than bsize(%d)", 772 817 mp->m_dalign, 773 818 mp->m_blockmask +1); 774 - error = XFS_ERROR(EINVAL); 775 - goto error1; 819 + return XFS_ERROR(EINVAL); 776 820 } 777 821 mp->m_swidth = 0; 778 822 } ··· 784 830 if (XFS_SB_VERSION_HASDALIGN(sbp)) { 785 831 if (sbp->sb_unit != mp->m_dalign) { 786 832 sbp->sb_unit = mp->m_dalign; 787 - update_flags |= XFS_SB_UNIT; 833 + *update_flags |= XFS_SB_UNIT; 788 834 } 789 835 if (sbp->sb_width != mp->m_swidth) { 790 836 sbp->sb_width = mp->m_swidth; 791 - update_flags |= XFS_SB_WIDTH; 837 + *update_flags |= XFS_SB_WIDTH; 792 838 } 793 839 } 794 840 } else if ((mp->m_flags & XFS_MOUNT_NOALIGN) != XFS_MOUNT_NOALIGN && ··· 797 843 mp->m_swidth = sbp->sb_width; 798 844 } 799 845 800 - xfs_alloc_compute_maxlevels(mp); 801 - xfs_bmap_compute_maxlevels(mp, XFS_DATA_FORK); 802 - xfs_bmap_compute_maxlevels(mp, XFS_ATTR_FORK); 803 - xfs_ialloc_compute_maxlevels(mp); 846 + return 0; 847 + } 848 + 849 + /* 850 + * Set the maximum inode count for this filesystem 851 + */ 852 + STATIC void 853 + xfs_set_maxicount(xfs_mount_t *mp) 854 + { 855 + xfs_sb_t *sbp = &(mp->m_sb); 856 + __uint64_t icount; 804 857 805 858 if (sbp->sb_imax_pct) { 806 - __uint64_t icount; 807 - 808 - /* Make sure the maximum inode count is a multiple of the 809 - * units we allocate inodes in. 859 + /* 860 + * Make sure the maximum inode count is a multiple 861 + * of the units we allocate inodes in. 810 862 */ 811 - 812 863 icount = sbp->sb_dblocks * sbp->sb_imax_pct; 813 864 do_div(icount, 100); 814 865 do_div(icount, mp->m_ialloc_blks); 815 866 mp->m_maxicount = (icount * mp->m_ialloc_blks) << 816 867 sbp->sb_inopblog; 817 - } else 868 + } else { 818 869 mp->m_maxicount = 0; 819 - 820 - mp->m_maxioffset = xfs_max_file_offset(sbp->sb_blocklog); 821 - 822 - /* 823 - * XFS uses the uuid from the superblock as the unique 824 - * identifier for fsid. We can not use the uuid from the volume 825 - * since a single partition filesystem is identical to a single 826 - * partition volume/filesystem. 827 - */ 828 - if ((mfsi_flags & XFS_MFSI_SECOND) == 0 && 829 - (mp->m_flags & XFS_MOUNT_NOUUID) == 0) { 830 - if (xfs_uuid_mount(mp)) { 831 - error = XFS_ERROR(EINVAL); 832 - goto error1; 833 - } 834 - uuid_mounted=1; 835 870 } 871 + } 836 872 837 - /* 838 - * Set the default minimum read and write sizes unless 839 - * already specified in a mount option. 840 - * We use smaller I/O sizes when the file system 841 - * is being used for NFS service (wsync mount option). 842 - */ 873 + /* 874 + * Set the default minimum read and write sizes unless 875 + * already specified in a mount option. 876 + * We use smaller I/O sizes when the file system 877 + * is being used for NFS service (wsync mount option). 878 + */ 879 + STATIC void 880 + xfs_set_rw_sizes(xfs_mount_t *mp) 881 + { 882 + xfs_sb_t *sbp = &(mp->m_sb); 883 + int readio_log, writeio_log; 884 + 843 885 if (!(mp->m_flags & XFS_MOUNT_DFLT_IOSIZE)) { 844 886 if (mp->m_flags & XFS_MOUNT_WSYNC) { 845 887 readio_log = XFS_WSYNC_READIO_LOG; ··· 861 911 mp->m_writeio_log = writeio_log; 862 912 } 863 913 mp->m_writeio_blocks = 1 << (mp->m_writeio_log - sbp->sb_blocklog); 914 + } 864 915 865 - /* 866 - * Set the inode cluster size. 867 - * This may still be overridden by the file system 868 - * block size if it is larger than the chosen cluster size. 869 - */ 870 - mp->m_inode_cluster_size = XFS_INODE_BIG_CLUSTER_SIZE; 871 - 872 - /* 873 - * Set whether we're using inode alignment. 874 - */ 916 + /* 917 + * Set whether we're using inode alignment. 918 + */ 919 + STATIC void 920 + xfs_set_inoalignment(xfs_mount_t *mp) 921 + { 875 922 if (XFS_SB_VERSION_HASALIGN(&mp->m_sb) && 876 923 mp->m_sb.sb_inoalignmt >= 877 924 XFS_B_TO_FSBT(mp, mp->m_inode_cluster_size)) ··· 884 937 mp->m_sinoalign = mp->m_dalign; 885 938 else 886 939 mp->m_sinoalign = 0; 887 - /* 888 - * Check that the data (and log if separate) are an ok size. 889 - */ 940 + } 941 + 942 + /* 943 + * Check that the data (and log if separate) are an ok size. 944 + */ 945 + STATIC int 946 + xfs_check_sizes(xfs_mount_t *mp, int mfsi_flags) 947 + { 948 + xfs_buf_t *bp; 949 + xfs_daddr_t d; 950 + int error; 951 + 890 952 d = (xfs_daddr_t)XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks); 891 953 if (XFS_BB_TO_FSB(mp, d) != mp->m_sb.sb_dblocks) { 892 954 cmn_err(CE_WARN, "XFS: size check 1 failed"); 893 - error = XFS_ERROR(E2BIG); 894 - goto error1; 955 + return XFS_ERROR(E2BIG); 895 956 } 896 957 error = xfs_read_buf(mp, mp->m_ddev_targp, 897 958 d - XFS_FSS_TO_BB(mp, 1), ··· 908 953 xfs_buf_relse(bp); 909 954 } else { 910 955 cmn_err(CE_WARN, "XFS: size check 2 failed"); 911 - if (error == ENOSPC) { 956 + if (error == ENOSPC) 912 957 error = XFS_ERROR(E2BIG); 913 - } 914 - goto error1; 958 + return error; 915 959 } 916 960 917 961 if (((mfsi_flags & XFS_MFSI_CLIENT) == 0) && ··· 918 964 d = (xfs_daddr_t)XFS_FSB_TO_BB(mp, mp->m_sb.sb_logblocks); 919 965 if (XFS_BB_TO_FSB(mp, d) != mp->m_sb.sb_logblocks) { 920 966 cmn_err(CE_WARN, "XFS: size check 3 failed"); 921 - error = XFS_ERROR(E2BIG); 922 - goto error1; 967 + return XFS_ERROR(E2BIG); 923 968 } 924 969 error = xfs_read_buf(mp, mp->m_logdev_targp, 925 970 d - XFS_FSB_TO_BB(mp, 1), ··· 927 974 xfs_buf_relse(bp); 928 975 } else { 929 976 cmn_err(CE_WARN, "XFS: size check 3 failed"); 930 - if (error == ENOSPC) { 977 + if (error == ENOSPC) 931 978 error = XFS_ERROR(E2BIG); 932 - } 933 - goto error1; 979 + return error; 934 980 } 935 981 } 982 + return 0; 983 + } 984 + 985 + /* 986 + * xfs_mountfs 987 + * 988 + * This function does the following on an initial mount of a file system: 989 + * - reads the superblock from disk and init the mount struct 990 + * - if we're a 32-bit kernel, do a size check on the superblock 991 + * so we don't mount terabyte filesystems 992 + * - init mount struct realtime fields 993 + * - allocate inode hash table for fs 994 + * - init directory manager 995 + * - perform recovery and init the log manager 996 + */ 997 + int 998 + xfs_mountfs( 999 + xfs_mount_t *mp, 1000 + int mfsi_flags) 1001 + { 1002 + xfs_sb_t *sbp = &(mp->m_sb); 1003 + xfs_inode_t *rip; 1004 + bhv_vnode_t *rvp = NULL; 1005 + __uint64_t resblks; 1006 + __int64_t update_flags = 0LL; 1007 + uint quotamount, quotaflags; 1008 + int agno; 1009 + int uuid_mounted = 0; 1010 + int error = 0; 1011 + 1012 + if (mp->m_sb_bp == NULL) { 1013 + error = xfs_readsb(mp, mfsi_flags); 1014 + if (error) 1015 + return error; 1016 + } 1017 + xfs_mount_common(mp, sbp); 1018 + 1019 + /* 1020 + * Check if sb_agblocks is aligned at stripe boundary 1021 + * If sb_agblocks is NOT aligned turn off m_dalign since 1022 + * allocator alignment is within an ag, therefore ag has 1023 + * to be aligned at stripe boundary. 1024 + */ 1025 + error = xfs_update_alignment(mp, mfsi_flags, &update_flags); 1026 + if (error) 1027 + goto error1; 1028 + 1029 + xfs_alloc_compute_maxlevels(mp); 1030 + xfs_bmap_compute_maxlevels(mp, XFS_DATA_FORK); 1031 + xfs_bmap_compute_maxlevels(mp, XFS_ATTR_FORK); 1032 + xfs_ialloc_compute_maxlevels(mp); 1033 + 1034 + xfs_set_maxicount(mp); 1035 + 1036 + mp->m_maxioffset = xfs_max_file_offset(sbp->sb_blocklog); 1037 + 1038 + /* 1039 + * XFS uses the uuid from the superblock as the unique 1040 + * identifier for fsid. We can not use the uuid from the volume 1041 + * since a single partition filesystem is identical to a single 1042 + * partition volume/filesystem. 1043 + */ 1044 + if ((mfsi_flags & XFS_MFSI_SECOND) == 0 && 1045 + (mp->m_flags & XFS_MOUNT_NOUUID) == 0) { 1046 + if (xfs_uuid_mount(mp)) { 1047 + error = XFS_ERROR(EINVAL); 1048 + goto error1; 1049 + } 1050 + uuid_mounted=1; 1051 + } 1052 + 1053 + /* 1054 + * Set the minimum read and write sizes 1055 + */ 1056 + xfs_set_rw_sizes(mp); 1057 + 1058 + /* 1059 + * Set the inode cluster size. 1060 + * This may still be overridden by the file system 1061 + * block size if it is larger than the chosen cluster size. 1062 + */ 1063 + mp->m_inode_cluster_size = XFS_INODE_BIG_CLUSTER_SIZE; 1064 + 1065 + /* 1066 + * Set inode alignment fields 1067 + */ 1068 + xfs_set_inoalignment(mp); 1069 + 1070 + /* 1071 + * Check that the data (and log if separate) are an ok size. 1072 + */ 1073 + error = xfs_check_sizes(mp, mfsi_flags); 1074 + if (error) 1075 + goto error1; 936 1076 937 1077 /* 938 1078 * Initialize realtime fields in the mount structure 939 1079 */ 940 - if ((error = xfs_rtmount_init(mp))) { 1080 + error = xfs_rtmount_init(mp); 1081 + if (error) { 941 1082 cmn_err(CE_WARN, "XFS: RT mount failed"); 942 1083 goto error1; 943 1084 } ··· 1149 1102 /* 1150 1103 * Initialize realtime inode pointers in the mount structure 1151 1104 */ 1152 - if ((error = xfs_rtmount_inodes(mp))) { 1105 + error = xfs_rtmount_inodes(mp); 1106 + if (error) { 1153 1107 /* 1154 1108 * Free up the root inode. 1155 1109 */ ··· 1168 1120 /* 1169 1121 * Initialise the XFS quota management subsystem for this mount 1170 1122 */ 1171 - if ((error = XFS_QM_INIT(mp, &quotamount, &quotaflags))) 1123 + error = XFS_QM_INIT(mp, &quotamount, &quotaflags); 1124 + if (error) 1172 1125 goto error4; 1173 1126 1174 1127 /* ··· 1186 1137 /* 1187 1138 * Complete the quota initialisation, post-log-replay component. 1188 1139 */ 1189 - if ((error = XFS_QM_MOUNT(mp, quotamount, quotaflags, mfsi_flags))) 1140 + error = XFS_QM_MOUNT(mp, quotamount, quotaflags, mfsi_flags); 1141 + if (error) 1190 1142 goto error4; 1191 1143 1192 1144 /* ··· 1305 1255 #if defined(DEBUG) || defined(INDUCE_IO_ERROR) 1306 1256 xfs_errortag_clearall(mp, 0); 1307 1257 #endif 1308 - XFS_IODONE(mp); 1309 1258 xfs_mount_free(mp); 1310 1259 return 0; 1311 1260 } ··· 1490 1441 * Fields are not allowed to dip below zero, so if the delta would 1491 1442 * do this do not apply it and return EINVAL. 1492 1443 * 1493 - * The SB_LOCK must be held when this routine is called. 1444 + * The m_sb_lock must be held when this routine is called. 1494 1445 */ 1495 1446 int 1496 1447 xfs_mod_incore_sb_unlocked( ··· 1655 1606 /* 1656 1607 * xfs_mod_incore_sb() is used to change a field in the in-core 1657 1608 * superblock structure by the specified delta. This modification 1658 - * is protected by the SB_LOCK. Just use the xfs_mod_incore_sb_unlocked() 1609 + * is protected by the m_sb_lock. Just use the xfs_mod_incore_sb_unlocked() 1659 1610 * routine to do the work. 1660 1611 */ 1661 1612 int ··· 1665 1616 int64_t delta, 1666 1617 int rsvd) 1667 1618 { 1668 - unsigned long s; 1669 1619 int status; 1670 1620 1671 1621 /* check for per-cpu counters */ ··· 1681 1633 /* FALLTHROUGH */ 1682 1634 #endif 1683 1635 default: 1684 - s = XFS_SB_LOCK(mp); 1636 + spin_lock(&mp->m_sb_lock); 1685 1637 status = xfs_mod_incore_sb_unlocked(mp, field, delta, rsvd); 1686 - XFS_SB_UNLOCK(mp, s); 1638 + spin_unlock(&mp->m_sb_lock); 1687 1639 break; 1688 1640 } 1689 1641 ··· 1704 1656 int 1705 1657 xfs_mod_incore_sb_batch(xfs_mount_t *mp, xfs_mod_sb_t *msb, uint nmsb, int rsvd) 1706 1658 { 1707 - unsigned long s; 1708 1659 int status=0; 1709 1660 xfs_mod_sb_t *msbp; 1710 1661 ··· 1711 1664 * Loop through the array of mod structures and apply each 1712 1665 * individually. If any fail, then back out all those 1713 1666 * which have already been applied. Do all of this within 1714 - * the scope of the SB_LOCK so that all of the changes will 1667 + * the scope of the m_sb_lock so that all of the changes will 1715 1668 * be atomic. 1716 1669 */ 1717 - s = XFS_SB_LOCK(mp); 1670 + spin_lock(&mp->m_sb_lock); 1718 1671 msbp = &msb[0]; 1719 1672 for (msbp = &msbp[0]; msbp < (msb + nmsb); msbp++) { 1720 1673 /* ··· 1728 1681 case XFS_SBS_IFREE: 1729 1682 case XFS_SBS_FDBLOCKS: 1730 1683 if (!(mp->m_flags & XFS_MOUNT_NO_PERCPU_SB)) { 1731 - XFS_SB_UNLOCK(mp, s); 1684 + spin_unlock(&mp->m_sb_lock); 1732 1685 status = xfs_icsb_modify_counters(mp, 1733 1686 msbp->msb_field, 1734 1687 msbp->msb_delta, rsvd); 1735 - s = XFS_SB_LOCK(mp); 1688 + spin_lock(&mp->m_sb_lock); 1736 1689 break; 1737 1690 } 1738 1691 /* FALLTHROUGH */ ··· 1766 1719 case XFS_SBS_IFREE: 1767 1720 case XFS_SBS_FDBLOCKS: 1768 1721 if (!(mp->m_flags & XFS_MOUNT_NO_PERCPU_SB)) { 1769 - XFS_SB_UNLOCK(mp, s); 1722 + spin_unlock(&mp->m_sb_lock); 1770 1723 status = xfs_icsb_modify_counters(mp, 1771 1724 msbp->msb_field, 1772 1725 -(msbp->msb_delta), 1773 1726 rsvd); 1774 - s = XFS_SB_LOCK(mp); 1727 + spin_lock(&mp->m_sb_lock); 1775 1728 break; 1776 1729 } 1777 1730 /* FALLTHROUGH */ ··· 1787 1740 msbp--; 1788 1741 } 1789 1742 } 1790 - XFS_SB_UNLOCK(mp, s); 1743 + spin_unlock(&mp->m_sb_lock); 1791 1744 return status; 1792 1745 } 1793 1746 ··· 1935 1888 * 1936 1889 * Locking rules: 1937 1890 * 1938 - * 1. XFS_SB_LOCK() before picking up per-cpu locks 1891 + * 1. m_sb_lock before picking up per-cpu locks 1939 1892 * 2. per-cpu locks always picked up via for_each_online_cpu() order 1940 - * 3. accurate counter sync requires XFS_SB_LOCK + per cpu locks 1893 + * 3. accurate counter sync requires m_sb_lock + per cpu locks 1941 1894 * 4. modifying per-cpu counters requires holding per-cpu lock 1942 - * 5. modifying global counters requires holding XFS_SB_LOCK 1943 - * 6. enabling or disabling a counter requires holding the XFS_SB_LOCK 1895 + * 5. modifying global counters requires holding m_sb_lock 1896 + * 6. enabling or disabling a counter requires holding the m_sb_lock 1944 1897 * and _none_ of the per-cpu locks. 1945 1898 * 1946 1899 * Disabled counters are only ever re-enabled by a balance operation ··· 1967 1920 { 1968 1921 xfs_icsb_cnts_t *cntp; 1969 1922 xfs_mount_t *mp; 1970 - int s; 1971 1923 1972 1924 mp = (xfs_mount_t *)container_of(nfb, xfs_mount_t, m_icsb_notifier); 1973 1925 cntp = (xfs_icsb_cnts_t *) ··· 1992 1946 * count into the total on the global superblock and 1993 1947 * re-enable the counters. */ 1994 1948 xfs_icsb_lock(mp); 1995 - s = XFS_SB_LOCK(mp); 1949 + spin_lock(&mp->m_sb_lock); 1996 1950 xfs_icsb_disable_counter(mp, XFS_SBS_ICOUNT); 1997 1951 xfs_icsb_disable_counter(mp, XFS_SBS_IFREE); 1998 1952 xfs_icsb_disable_counter(mp, XFS_SBS_FDBLOCKS); ··· 2009 1963 XFS_ICSB_SB_LOCKED, 0); 2010 1964 xfs_icsb_balance_counter(mp, XFS_SBS_FDBLOCKS, 2011 1965 XFS_ICSB_SB_LOCKED, 0); 2012 - XFS_SB_UNLOCK(mp, s); 1966 + spin_unlock(&mp->m_sb_lock); 2013 1967 xfs_icsb_unlock(mp); 2014 1968 break; 2015 1969 } ··· 2240 2194 int flags) 2241 2195 { 2242 2196 xfs_icsb_cnts_t cnt; 2243 - int s; 2244 2197 2245 2198 /* Pass 1: lock all counters */ 2246 2199 if ((flags & XFS_ICSB_SB_LOCKED) == 0) 2247 - s = XFS_SB_LOCK(mp); 2200 + spin_lock(&mp->m_sb_lock); 2248 2201 2249 2202 xfs_icsb_count(mp, &cnt, flags); 2250 2203 ··· 2256 2211 mp->m_sb.sb_fdblocks = cnt.icsb_fdblocks; 2257 2212 2258 2213 if ((flags & XFS_ICSB_SB_LOCKED) == 0) 2259 - XFS_SB_UNLOCK(mp, s); 2214 + spin_unlock(&mp->m_sb_lock); 2260 2215 } 2261 2216 2262 2217 /* ··· 2297 2252 { 2298 2253 uint64_t count, resid; 2299 2254 int weight = num_online_cpus(); 2300 - int s; 2301 2255 uint64_t min = (uint64_t)min_per_cpu; 2302 2256 2303 2257 if (!(flags & XFS_ICSB_SB_LOCKED)) 2304 - s = XFS_SB_LOCK(mp); 2258 + spin_lock(&mp->m_sb_lock); 2305 2259 2306 2260 /* disable counter and sync counter */ 2307 2261 xfs_icsb_disable_counter(mp, field); ··· 2334 2290 xfs_icsb_enable_counter(mp, field, count, resid); 2335 2291 out: 2336 2292 if (!(flags & XFS_ICSB_SB_LOCKED)) 2337 - XFS_SB_UNLOCK(mp, s); 2293 + spin_unlock(&mp->m_sb_lock); 2338 2294 } 2339 2295 2340 - int 2296 + STATIC int 2341 2297 xfs_icsb_modify_counters( 2342 2298 xfs_mount_t *mp, 2343 2299 xfs_sb_field_t field, ··· 2346 2302 { 2347 2303 xfs_icsb_cnts_t *icsbp; 2348 2304 long long lcounter; /* long counter for 64 bit fields */ 2349 - int cpu, ret = 0, s; 2305 + int cpu, ret = 0; 2350 2306 2351 2307 might_sleep(); 2352 2308 again: ··· 2424 2380 * running atomically here, we know a rebalance cannot 2425 2381 * be in progress. Hence we can go straight to operating 2426 2382 * on the global superblock. We do not call xfs_mod_incore_sb() 2427 - * here even though we need to get the SB_LOCK. Doing so 2383 + * here even though we need to get the m_sb_lock. Doing so 2428 2384 * will cause us to re-enter this function and deadlock. 2429 - * Hence we get the SB_LOCK ourselves and then call 2385 + * Hence we get the m_sb_lock ourselves and then call 2430 2386 * xfs_mod_incore_sb_unlocked() as the unlocked path operates 2431 2387 * directly on the global counters. 2432 2388 */ 2433 - s = XFS_SB_LOCK(mp); 2389 + spin_lock(&mp->m_sb_lock); 2434 2390 ret = xfs_mod_incore_sb_unlocked(mp, field, delta, rsvd); 2435 - XFS_SB_UNLOCK(mp, s); 2391 + spin_unlock(&mp->m_sb_lock); 2436 2392 2437 2393 /* 2438 2394 * Now that we've modified the global superblock, we
+11 -116
fs/xfs/xfs_mount.h
··· 56 56 struct log; 57 57 struct xfs_mount_args; 58 58 struct xfs_inode; 59 - struct xfs_iocore; 60 59 struct xfs_bmbt_irec; 61 60 struct xfs_bmap_free; 62 61 struct xfs_extdelta; 63 62 struct xfs_swapext; 64 63 struct xfs_mru_cache; 65 - 66 - #define AIL_LOCK_T lock_t 67 - #define AIL_LOCKINIT(x,y) spinlock_init(x,y) 68 - #define AIL_LOCK_DESTROY(x) spinlock_destroy(x) 69 - #define AIL_LOCK(mp,s) s=mutex_spinlock(&(mp)->m_ail_lock) 70 - #define AIL_UNLOCK(mp,s) mutex_spinunlock(&(mp)->m_ail_lock, s) 71 - 72 64 73 65 /* 74 66 * Prototypes and functions for the Data Migration subsystem. ··· 188 196 #define XFS_QM_QUOTACTL(mp, cmd, id, addr) \ 189 197 (*(mp)->m_qm_ops->xfs_quotactl)(mp, cmd, id, addr) 190 198 191 - 192 - /* 193 - * Prototypes and functions for I/O core modularization. 194 - */ 195 - 196 - typedef int (*xfs_ioinit_t)(struct xfs_mount *, 197 - struct xfs_mount_args *, int); 198 - typedef int (*xfs_bmapi_t)(struct xfs_trans *, void *, 199 - xfs_fileoff_t, xfs_filblks_t, int, 200 - xfs_fsblock_t *, xfs_extlen_t, 201 - struct xfs_bmbt_irec *, int *, 202 - struct xfs_bmap_free *, struct xfs_extdelta *); 203 - typedef int (*xfs_bunmapi_t)(struct xfs_trans *, 204 - void *, xfs_fileoff_t, 205 - xfs_filblks_t, int, xfs_extnum_t, 206 - xfs_fsblock_t *, struct xfs_bmap_free *, 207 - struct xfs_extdelta *, int *); 208 - typedef int (*xfs_bmap_eof_t)(void *, xfs_fileoff_t, int, int *); 209 - typedef int (*xfs_iomap_write_direct_t)( 210 - void *, xfs_off_t, size_t, int, 211 - struct xfs_bmbt_irec *, int *, int); 212 - typedef int (*xfs_iomap_write_delay_t)( 213 - void *, xfs_off_t, size_t, int, 214 - struct xfs_bmbt_irec *, int *); 215 - typedef int (*xfs_iomap_write_allocate_t)( 216 - void *, xfs_off_t, size_t, 217 - struct xfs_bmbt_irec *, int *); 218 - typedef int (*xfs_iomap_write_unwritten_t)( 219 - void *, xfs_off_t, size_t); 220 - typedef uint (*xfs_lck_map_shared_t)(void *); 221 - typedef void (*xfs_lock_t)(void *, uint); 222 - typedef void (*xfs_lock_demote_t)(void *, uint); 223 - typedef int (*xfs_lock_nowait_t)(void *, uint); 224 - typedef void (*xfs_unlk_t)(void *, unsigned int); 225 - typedef xfs_fsize_t (*xfs_size_t)(void *); 226 - typedef xfs_fsize_t (*xfs_iodone_t)(struct xfs_mount *); 227 - typedef int (*xfs_swap_extents_t)(void *, void *, 228 - struct xfs_swapext*); 229 - 230 - typedef struct xfs_ioops { 231 - xfs_ioinit_t xfs_ioinit; 232 - xfs_bmapi_t xfs_bmapi_func; 233 - xfs_bunmapi_t xfs_bunmapi_func; 234 - xfs_bmap_eof_t xfs_bmap_eof_func; 235 - xfs_iomap_write_direct_t xfs_iomap_write_direct; 236 - xfs_iomap_write_delay_t xfs_iomap_write_delay; 237 - xfs_iomap_write_allocate_t xfs_iomap_write_allocate; 238 - xfs_iomap_write_unwritten_t xfs_iomap_write_unwritten; 239 - xfs_lock_t xfs_ilock; 240 - xfs_lck_map_shared_t xfs_lck_map_shared; 241 - xfs_lock_demote_t xfs_ilock_demote; 242 - xfs_lock_nowait_t xfs_ilock_nowait; 243 - xfs_unlk_t xfs_unlock; 244 - xfs_size_t xfs_size_func; 245 - xfs_iodone_t xfs_iodone; 246 - xfs_swap_extents_t xfs_swap_extents_func; 247 - } xfs_ioops_t; 248 - 249 - #define XFS_IOINIT(mp, args, flags) \ 250 - (*(mp)->m_io_ops.xfs_ioinit)(mp, args, flags) 251 - #define XFS_BMAPI(mp, trans,io,bno,len,f,first,tot,mval,nmap,flist,delta) \ 252 - (*(mp)->m_io_ops.xfs_bmapi_func) \ 253 - (trans,(io)->io_obj,bno,len,f,first,tot,mval,nmap,flist,delta) 254 - #define XFS_BUNMAPI(mp, trans,io,bno,len,f,nexts,first,flist,delta,done) \ 255 - (*(mp)->m_io_ops.xfs_bunmapi_func) \ 256 - (trans,(io)->io_obj,bno,len,f,nexts,first,flist,delta,done) 257 - #define XFS_BMAP_EOF(mp, io, endoff, whichfork, eof) \ 258 - (*(mp)->m_io_ops.xfs_bmap_eof_func) \ 259 - ((io)->io_obj, endoff, whichfork, eof) 260 - #define XFS_IOMAP_WRITE_DIRECT(mp, io, offset, count, flags, mval, nmap, found)\ 261 - (*(mp)->m_io_ops.xfs_iomap_write_direct) \ 262 - ((io)->io_obj, offset, count, flags, mval, nmap, found) 263 - #define XFS_IOMAP_WRITE_DELAY(mp, io, offset, count, flags, mval, nmap) \ 264 - (*(mp)->m_io_ops.xfs_iomap_write_delay) \ 265 - ((io)->io_obj, offset, count, flags, mval, nmap) 266 - #define XFS_IOMAP_WRITE_ALLOCATE(mp, io, offset, count, mval, nmap) \ 267 - (*(mp)->m_io_ops.xfs_iomap_write_allocate) \ 268 - ((io)->io_obj, offset, count, mval, nmap) 269 - #define XFS_IOMAP_WRITE_UNWRITTEN(mp, io, offset, count) \ 270 - (*(mp)->m_io_ops.xfs_iomap_write_unwritten) \ 271 - ((io)->io_obj, offset, count) 272 - #define XFS_LCK_MAP_SHARED(mp, io) \ 273 - (*(mp)->m_io_ops.xfs_lck_map_shared)((io)->io_obj) 274 - #define XFS_ILOCK(mp, io, mode) \ 275 - (*(mp)->m_io_ops.xfs_ilock)((io)->io_obj, mode) 276 - #define XFS_ILOCK_NOWAIT(mp, io, mode) \ 277 - (*(mp)->m_io_ops.xfs_ilock_nowait)((io)->io_obj, mode) 278 - #define XFS_IUNLOCK(mp, io, mode) \ 279 - (*(mp)->m_io_ops.xfs_unlock)((io)->io_obj, mode) 280 - #define XFS_ILOCK_DEMOTE(mp, io, mode) \ 281 - (*(mp)->m_io_ops.xfs_ilock_demote)((io)->io_obj, mode) 282 - #define XFS_SIZE(mp, io) \ 283 - (*(mp)->m_io_ops.xfs_size_func)((io)->io_obj) 284 - #define XFS_IODONE(mp) \ 285 - (*(mp)->m_io_ops.xfs_iodone)(mp) 286 - #define XFS_SWAP_EXTENTS(mp, io, tio, sxp) \ 287 - (*(mp)->m_io_ops.xfs_swap_extents_func) \ 288 - ((io)->io_obj, (tio)->io_obj, sxp) 289 - 290 199 #ifdef HAVE_PERCPU_SB 291 200 292 201 /* ··· 219 326 #define xfs_icsb_sync_counters_flags(mp, flags) do { } while (0) 220 327 #endif 221 328 329 + typedef struct xfs_ail { 330 + xfs_ail_entry_t xa_ail; 331 + uint xa_gen; 332 + struct task_struct *xa_task; 333 + xfs_lsn_t xa_target; 334 + } xfs_ail_t; 335 + 222 336 typedef struct xfs_mount { 223 337 struct super_block *m_super; 224 338 xfs_tid_t m_tid; /* next unused tid for fs */ 225 - AIL_LOCK_T m_ail_lock; /* fs AIL mutex */ 226 - xfs_ail_entry_t m_ail; /* fs active log item list */ 227 - uint m_ail_gen; /* fs AIL generation count */ 339 + spinlock_t m_ail_lock; /* fs AIL mutex */ 340 + xfs_ail_t m_ail; /* fs active log item list */ 228 341 xfs_sb_t m_sb; /* copy of fs superblock */ 229 - lock_t m_sb_lock; /* sb counter mutex */ 342 + spinlock_t m_sb_lock; /* sb counter lock */ 230 343 struct xfs_buf *m_sb_bp; /* buffer for superblock */ 231 344 char *m_fsname; /* filesystem name */ 232 345 int m_fsname_len; /* strlen of fs name */ ··· 241 342 int m_bsize; /* fs logical block size */ 242 343 xfs_agnumber_t m_agfrotor; /* last ag where space found */ 243 344 xfs_agnumber_t m_agirotor; /* last ag dir inode alloced */ 244 - lock_t m_agirotor_lock;/* .. and lock protecting it */ 345 + spinlock_t m_agirotor_lock;/* .. and lock protecting it */ 245 346 xfs_agnumber_t m_maxagi; /* highest inode alloc group */ 246 347 struct xfs_inode *m_inodes; /* active inode list */ 247 348 struct list_head m_del_inodes; /* inodes to reclaim */ ··· 322 423 * hash table */ 323 424 struct xfs_dmops *m_dm_ops; /* vector of DMI ops */ 324 425 struct xfs_qmops *m_qm_ops; /* vector of XQM ops */ 325 - struct xfs_ioops m_io_ops; /* vector of I/O ops */ 326 426 atomic_t m_active_trans; /* number trans frozen */ 327 427 #ifdef HAVE_PERCPU_SB 328 428 xfs_icsb_cnts_t *m_sb_cnts; /* per-cpu superblock counters */ ··· 508 610 509 611 #define XFS_MOUNT_ILOCK(mp) mutex_lock(&((mp)->m_ilock)) 510 612 #define XFS_MOUNT_IUNLOCK(mp) mutex_unlock(&((mp)->m_ilock)) 511 - #define XFS_SB_LOCK(mp) mutex_spinlock(&(mp)->m_sb_lock) 512 - #define XFS_SB_UNLOCK(mp,s) mutex_spinunlock(&(mp)->m_sb_lock,(s)) 513 613 514 614 extern xfs_mount_t *xfs_mount_init(void); 515 615 extern void xfs_mod_sb(xfs_trans_t *, __int64_t); ··· 542 646 extern void xfs_qmops_put(struct xfs_mount *); 543 647 544 648 extern struct xfs_dmops xfs_dmcore_xfs; 545 - extern struct xfs_ioops xfs_iocore_xfs; 546 649 547 650 extern int xfs_init(void); 548 651 extern void xfs_cleanup(void);
+32 -22
fs/xfs/xfs_mru_cache.c
··· 225 225 * list need to be deleted. For each element this involves removing it from the 226 226 * data store, removing it from the reap list, calling the client's free 227 227 * function and deleting the element from the element zone. 228 + * 229 + * We get called holding the mru->lock, which we drop and then reacquire. 230 + * Sparse need special help with this to tell it we know what we are doing. 228 231 */ 229 232 STATIC void 230 233 _xfs_mru_cache_clear_reap_list( 231 - xfs_mru_cache_t *mru) 234 + xfs_mru_cache_t *mru) __releases(mru->lock) __acquires(mru->lock) 235 + 232 236 { 233 237 xfs_mru_cache_elem_t *elem, *next; 234 238 struct list_head tmp; ··· 249 245 */ 250 246 list_move(&elem->list_node, &tmp); 251 247 } 252 - mutex_spinunlock(&mru->lock, 0); 248 + spin_unlock(&mru->lock); 253 249 254 250 list_for_each_entry_safe(elem, next, &tmp, list_node) { 255 251 ··· 263 259 kmem_zone_free(xfs_mru_elem_zone, elem); 264 260 } 265 261 266 - mutex_spinlock(&mru->lock); 262 + spin_lock(&mru->lock); 267 263 } 268 264 269 265 /* ··· 284 280 if (!mru || !mru->lists) 285 281 return; 286 282 287 - mutex_spinlock(&mru->lock); 283 + spin_lock(&mru->lock); 288 284 next = _xfs_mru_cache_migrate(mru, jiffies); 289 285 _xfs_mru_cache_clear_reap_list(mru); 290 286 ··· 298 294 queue_delayed_work(xfs_mru_reap_wq, &mru->work, next); 299 295 } 300 296 301 - mutex_spinunlock(&mru->lock, 0); 297 + spin_unlock(&mru->lock); 302 298 } 303 299 304 300 int ··· 372 368 */ 373 369 INIT_RADIX_TREE(&mru->store, GFP_ATOMIC); 374 370 INIT_LIST_HEAD(&mru->reap_list); 375 - spinlock_init(&mru->lock, "xfs_mru_cache"); 371 + spin_lock_init(&mru->lock); 376 372 INIT_DELAYED_WORK(&mru->work, _xfs_mru_cache_reap); 377 373 378 374 mru->grp_time = grp_time; ··· 402 398 if (!mru || !mru->lists) 403 399 return; 404 400 405 - mutex_spinlock(&mru->lock); 401 + spin_lock(&mru->lock); 406 402 if (mru->queued) { 407 - mutex_spinunlock(&mru->lock, 0); 403 + spin_unlock(&mru->lock); 408 404 cancel_rearming_delayed_workqueue(xfs_mru_reap_wq, &mru->work); 409 - mutex_spinlock(&mru->lock); 405 + spin_lock(&mru->lock); 410 406 } 411 407 412 408 _xfs_mru_cache_migrate(mru, jiffies + mru->grp_count * mru->grp_time); 413 409 _xfs_mru_cache_clear_reap_list(mru); 414 410 415 - mutex_spinunlock(&mru->lock, 0); 411 + spin_unlock(&mru->lock); 416 412 } 417 413 418 414 void ··· 458 454 elem->key = key; 459 455 elem->value = value; 460 456 461 - mutex_spinlock(&mru->lock); 457 + spin_lock(&mru->lock); 462 458 463 459 radix_tree_insert(&mru->store, key, elem); 464 460 radix_tree_preload_end(); 465 461 _xfs_mru_cache_list_insert(mru, elem); 466 462 467 - mutex_spinunlock(&mru->lock, 0); 463 + spin_unlock(&mru->lock); 468 464 469 465 return 0; 470 466 } ··· 487 483 if (!mru || !mru->lists) 488 484 return NULL; 489 485 490 - mutex_spinlock(&mru->lock); 486 + spin_lock(&mru->lock); 491 487 elem = radix_tree_delete(&mru->store, key); 492 488 if (elem) { 493 489 value = elem->value; 494 490 list_del(&elem->list_node); 495 491 } 496 492 497 - mutex_spinunlock(&mru->lock, 0); 493 + spin_unlock(&mru->lock); 498 494 499 495 if (elem) 500 496 kmem_zone_free(xfs_mru_elem_zone, elem); ··· 532 528 * 533 529 * If the element isn't found, this function returns NULL and the spinlock is 534 530 * released. xfs_mru_cache_done() should NOT be called when this occurs. 531 + * 532 + * Because sparse isn't smart enough to know about conditional lock return 533 + * status, we need to help it get it right by annotating the path that does 534 + * not release the lock. 535 535 */ 536 536 void * 537 537 xfs_mru_cache_lookup( ··· 548 540 if (!mru || !mru->lists) 549 541 return NULL; 550 542 551 - mutex_spinlock(&mru->lock); 543 + spin_lock(&mru->lock); 552 544 elem = radix_tree_lookup(&mru->store, key); 553 545 if (elem) { 554 546 list_del(&elem->list_node); 555 547 _xfs_mru_cache_list_insert(mru, elem); 556 - } 557 - else 558 - mutex_spinunlock(&mru->lock, 0); 548 + __release(mru_lock); /* help sparse not be stupid */ 549 + } else 550 + spin_unlock(&mru->lock); 559 551 560 552 return elem ? elem->value : NULL; 561 553 } ··· 579 571 if (!mru || !mru->lists) 580 572 return NULL; 581 573 582 - mutex_spinlock(&mru->lock); 574 + spin_lock(&mru->lock); 583 575 elem = radix_tree_lookup(&mru->store, key); 584 576 if (!elem) 585 - mutex_spinunlock(&mru->lock, 0); 577 + spin_unlock(&mru->lock); 578 + else 579 + __release(mru_lock); /* help sparse not be stupid */ 586 580 587 581 return elem ? elem->value : NULL; 588 582 } ··· 596 586 */ 597 587 void 598 588 xfs_mru_cache_done( 599 - xfs_mru_cache_t *mru) 589 + xfs_mru_cache_t *mru) __releases(mru->lock) 600 590 { 601 - mutex_spinunlock(&mru->lock, 0); 591 + spin_unlock(&mru->lock); 602 592 }
+3 -4
fs/xfs/xfs_qmops.c
··· 49 49 { 50 50 int error; 51 51 xfs_trans_t *tp; 52 - unsigned long s; 53 52 54 53 mp->m_qflags = 0; 55 54 /* 56 55 * It is OK to look at sb_qflags here in mount path, 57 - * without SB_LOCK. 56 + * without m_sb_lock. 58 57 */ 59 58 if (mp->m_sb.sb_qflags == 0) 60 59 return 0; 61 - s = XFS_SB_LOCK(mp); 60 + spin_lock(&mp->m_sb_lock); 62 61 mp->m_sb.sb_qflags = 0; 63 - XFS_SB_UNLOCK(mp, s); 62 + spin_unlock(&mp->m_sb_lock); 64 63 65 64 /* 66 65 * if the fs is readonly, let the incore superblock run
+5 -4
fs/xfs/xfs_rename.c
··· 39 39 #include "xfs_refcache.h" 40 40 #include "xfs_utils.h" 41 41 #include "xfs_trans_space.h" 42 + #include "xfs_vnodeops.h" 42 43 43 44 44 45 /* ··· 119 118 inum1 = ip1->i_ino; 120 119 121 120 ASSERT(ip1); 122 - ITRACE(ip1); 121 + xfs_itrace_ref(ip1); 123 122 124 123 /* 125 124 * Unlock dp1 and lock dp2 if they are different. ··· 142 141 IRELE (ip1); 143 142 return error; 144 143 } else { 145 - ITRACE(ip2); 144 + xfs_itrace_ref(ip2); 146 145 } 147 146 148 147 /* ··· 248 247 int src_namelen = VNAMELEN(src_vname); 249 248 int target_namelen = VNAMELEN(target_vname); 250 249 251 - vn_trace_entry(src_dp, "xfs_rename", (inst_t *)__return_address); 252 - vn_trace_entry(xfs_vtoi(target_dir_vp), "xfs_rename", (inst_t *)__return_address); 250 + xfs_itrace_entry(src_dp); 251 + xfs_itrace_entry(xfs_vtoi(target_dir_vp)); 253 252 254 253 /* 255 254 * Find the XFS behavior descriptor for the target directory
+7 -12
fs/xfs/xfs_rtalloc.c
··· 73 73 */ 74 74 75 75 /* 76 - * xfs_lowbit32: get low bit set out of 32-bit argument, -1 if none set. 77 - */ 78 - STATIC int 79 - xfs_lowbit32( 80 - __uint32_t v) 81 - { 82 - if (v) 83 - return ffs(v) - 1; 84 - return -1; 85 - } 86 - 87 - /* 88 76 * Allocate space to the bitmap or summary file, and zero it, for growfs. 89 77 */ 90 78 STATIC int /* error */ ··· 432 444 } 433 445 bbno = XFS_BITTOBLOCK(mp, bno); 434 446 i = 0; 447 + ASSERT(minlen != 0); 435 448 log2len = xfs_highbit32(minlen); 436 449 /* 437 450 * Loop over all bitmap blocks (bbno + i is current block). ··· 601 612 xfs_suminfo_t sum; /* summary information for extents */ 602 613 603 614 ASSERT(minlen % prod == 0 && maxlen % prod == 0); 615 + ASSERT(maxlen != 0); 616 + 604 617 /* 605 618 * Loop over all the levels starting with maxlen. 606 619 * At each level, look at all the bitmap blocks, to see if there ··· 660 669 *rtblock = NULLRTBLOCK; 661 670 return 0; 662 671 } 672 + ASSERT(minlen != 0); 673 + ASSERT(maxlen != 0); 674 + 663 675 /* 664 676 * Loop over sizes, from maxlen down to minlen. 665 677 * This time, when we do the allocations, allow smaller ones ··· 1948 1954 nsbp->sb_blocksize * nsbp->sb_rextsize); 1949 1955 nsbp->sb_rextents = nsbp->sb_rblocks; 1950 1956 do_div(nsbp->sb_rextents, nsbp->sb_rextsize); 1957 + ASSERT(nsbp->sb_rextents != 0); 1951 1958 nsbp->sb_rextslog = xfs_highbit32(nsbp->sb_rextents); 1952 1959 nrsumlevels = nmp->m_rsumlevels = nsbp->sb_rextslog + 1; 1953 1960 nrsumsize =
-2
fs/xfs/xfs_rtalloc.h
··· 21 21 struct xfs_mount; 22 22 struct xfs_trans; 23 23 24 - #define XFS_IS_REALTIME_INODE(ip) ((ip)->i_d.di_flags & XFS_DIFLAG_REALTIME) 25 - 26 24 /* Min and max rt extent sizes, specified in bytes */ 27 25 #define XFS_MAX_RTEXTSIZE (1024 * 1024 * 1024) /* 1GB */ 28 26 #define XFS_DFL_RTEXTSIZE (64 * 1024) /* 64KB */
+2 -10
fs/xfs/xfs_rw.h
··· 32 32 static inline xfs_daddr_t 33 33 xfs_fsb_to_db(struct xfs_inode *ip, xfs_fsblock_t fsb) 34 34 { 35 - return (((ip)->i_d.di_flags & XFS_DIFLAG_REALTIME) ? \ 35 + return (XFS_IS_REALTIME_INODE(ip) ? \ 36 36 (xfs_daddr_t)XFS_FSB_TO_BB((ip)->i_mount, (fsb)) : \ 37 37 XFS_FSB_TO_DADDR((ip)->i_mount, (fsb))); 38 - } 39 - #define XFS_FSB_TO_DB_IO(io,fsb) xfs_fsb_to_db_io(io,fsb) 40 - static inline xfs_daddr_t 41 - xfs_fsb_to_db_io(struct xfs_iocore *io, xfs_fsblock_t fsb) 42 - { 43 - return (((io)->io_flags & XFS_IOCORE_RT) ? \ 44 - XFS_FSB_TO_BB((io)->io_mount, (fsb)) : \ 45 - XFS_FSB_TO_DADDR((io)->io_mount, (fsb))); 46 38 } 47 39 48 40 /* ··· 53 61 { 54 62 xfs_extlen_t extsz; 55 63 56 - if (unlikely(ip->i_d.di_flags & XFS_DIFLAG_REALTIME)) { 64 + if (unlikely(XFS_IS_REALTIME_INODE(ip))) { 57 65 extsz = (ip->i_d.di_flags & XFS_DIFLAG_EXTSIZE) 58 66 ? ip->i_d.di_extsize 59 67 : ip->i_mount->m_sb.sb_rextsize;
+3 -4
fs/xfs/xfs_trans.c
··· 1322 1322 xfs_lsn_t item_lsn; 1323 1323 struct xfs_mount *mp; 1324 1324 int i; 1325 - SPLDECL(s); 1326 1325 1327 1326 lidp = licp->lic_descs; 1328 1327 for (i = 0; i < licp->lic_unused; i++, lidp++) { ··· 1362 1363 * the test below. 1363 1364 */ 1364 1365 mp = lip->li_mountp; 1365 - AIL_LOCK(mp,s); 1366 + spin_lock(&mp->m_ail_lock); 1366 1367 if (XFS_LSN_CMP(item_lsn, lip->li_lsn) > 0) { 1367 1368 /* 1368 1369 * This will set the item's lsn to item_lsn ··· 1371 1372 * 1372 1373 * xfs_trans_update_ail() drops the AIL lock. 1373 1374 */ 1374 - xfs_trans_update_ail(mp, lip, item_lsn, s); 1375 + xfs_trans_update_ail(mp, lip, item_lsn); 1375 1376 } else { 1376 - AIL_UNLOCK(mp, s); 1377 + spin_unlock(&mp->m_ail_lock); 1377 1378 } 1378 1379 1379 1380 /*
+5 -2
fs/xfs/xfs_trans.h
··· 992 992 int *); 993 993 #define xfs_trans_commit(tp, flags) _xfs_trans_commit(tp, flags, NULL) 994 994 void xfs_trans_cancel(xfs_trans_t *, int); 995 - void xfs_trans_ail_init(struct xfs_mount *); 996 - xfs_lsn_t xfs_trans_push_ail(struct xfs_mount *, xfs_lsn_t); 995 + int xfs_trans_ail_init(struct xfs_mount *); 996 + void xfs_trans_ail_destroy(struct xfs_mount *); 997 + void xfs_trans_push_ail(struct xfs_mount *, xfs_lsn_t); 997 998 xfs_lsn_t xfs_trans_tail_ail(struct xfs_mount *); 998 999 void xfs_trans_unlocked_item(struct xfs_mount *, 999 1000 xfs_log_item_t *); 1000 1001 xfs_log_busy_slot_t *xfs_trans_add_busy(xfs_trans_t *tp, 1001 1002 xfs_agnumber_t ag, 1002 1003 xfs_extlen_t idx); 1004 + 1005 + extern kmem_zone_t *xfs_trans_zone; 1003 1006 1004 1007 #endif /* __KERNEL__ */ 1005 1008
+220 -120
fs/xfs/xfs_trans_ail.c
··· 34 34 STATIC xfs_log_item_t * xfs_ail_next(xfs_ail_entry_t *, xfs_log_item_t *); 35 35 36 36 #ifdef DEBUG 37 - STATIC void xfs_ail_check(xfs_ail_entry_t *); 37 + STATIC void xfs_ail_check(xfs_ail_entry_t *, xfs_log_item_t *); 38 38 #else 39 - #define xfs_ail_check(a) 39 + #define xfs_ail_check(a,l) 40 40 #endif /* DEBUG */ 41 41 42 42 ··· 55 55 { 56 56 xfs_lsn_t lsn; 57 57 xfs_log_item_t *lip; 58 - SPLDECL(s); 59 58 60 - AIL_LOCK(mp,s); 61 - lip = xfs_ail_min(&(mp->m_ail)); 59 + spin_lock(&mp->m_ail_lock); 60 + lip = xfs_ail_min(&(mp->m_ail.xa_ail)); 62 61 if (lip == NULL) { 63 62 lsn = (xfs_lsn_t)0; 64 63 } else { 65 64 lsn = lip->li_lsn; 66 65 } 67 - AIL_UNLOCK(mp, s); 66 + spin_unlock(&mp->m_ail_lock); 68 67 69 68 return lsn; 70 69 } ··· 71 72 /* 72 73 * xfs_trans_push_ail 73 74 * 74 - * This routine is called to move the tail of the AIL 75 - * forward. It does this by trying to flush items in the AIL 76 - * whose lsns are below the given threshold_lsn. 75 + * This routine is called to move the tail of the AIL forward. It does this by 76 + * trying to flush items in the AIL whose lsns are below the given 77 + * threshold_lsn. 77 78 * 78 - * The routine returns the lsn of the tail of the log. 79 + * the push is run asynchronously in a separate thread, so we return the tail 80 + * of the log right now instead of the tail after the push. This means we will 81 + * either continue right away, or we will sleep waiting on the async thread to 82 + * do it's work. 83 + * 84 + * We do this unlocked - we only need to know whether there is anything in the 85 + * AIL at the time we are called. We don't need to access the contents of 86 + * any of the objects, so the lock is not needed. 79 87 */ 80 - xfs_lsn_t 88 + void 81 89 xfs_trans_push_ail( 82 90 xfs_mount_t *mp, 83 91 xfs_lsn_t threshold_lsn) 84 92 { 85 - xfs_lsn_t lsn; 86 93 xfs_log_item_t *lip; 87 - int gen; 88 - int restarts; 89 - int lock_result; 90 - int flush_log; 91 - SPLDECL(s); 92 94 93 - #define XFS_TRANS_PUSH_AIL_RESTARTS 1000 95 + lip = xfs_ail_min(&mp->m_ail.xa_ail); 96 + if (lip && !XFS_FORCED_SHUTDOWN(mp)) { 97 + if (XFS_LSN_CMP(threshold_lsn, mp->m_ail.xa_target) > 0) 98 + xfsaild_wakeup(mp, threshold_lsn); 99 + } 100 + } 94 101 95 - AIL_LOCK(mp,s); 96 - lip = xfs_trans_first_ail(mp, &gen); 97 - if (lip == NULL || XFS_FORCED_SHUTDOWN(mp)) { 102 + /* 103 + * Return the item in the AIL with the current lsn. 104 + * Return the current tree generation number for use 105 + * in calls to xfs_trans_next_ail(). 106 + */ 107 + STATIC xfs_log_item_t * 108 + xfs_trans_first_push_ail( 109 + xfs_mount_t *mp, 110 + int *gen, 111 + xfs_lsn_t lsn) 112 + { 113 + xfs_log_item_t *lip; 114 + 115 + lip = xfs_ail_min(&(mp->m_ail.xa_ail)); 116 + *gen = (int)mp->m_ail.xa_gen; 117 + if (lsn == 0) 118 + return lip; 119 + 120 + while (lip && (XFS_LSN_CMP(lip->li_lsn, lsn) < 0)) 121 + lip = lip->li_ail.ail_forw; 122 + 123 + return lip; 124 + } 125 + 126 + /* 127 + * Function that does the work of pushing on the AIL 128 + */ 129 + long 130 + xfsaild_push( 131 + xfs_mount_t *mp, 132 + xfs_lsn_t *last_lsn) 133 + { 134 + long tout = 1000; /* milliseconds */ 135 + xfs_lsn_t last_pushed_lsn = *last_lsn; 136 + xfs_lsn_t target = mp->m_ail.xa_target; 137 + xfs_lsn_t lsn; 138 + xfs_log_item_t *lip; 139 + int gen; 140 + int restarts; 141 + int flush_log, count, stuck; 142 + 143 + #define XFS_TRANS_PUSH_AIL_RESTARTS 10 144 + 145 + spin_lock(&mp->m_ail_lock); 146 + lip = xfs_trans_first_push_ail(mp, &gen, *last_lsn); 147 + if (!lip || XFS_FORCED_SHUTDOWN(mp)) { 98 148 /* 99 - * Just return if the AIL is empty. 149 + * AIL is empty or our push has reached the end. 100 150 */ 101 - AIL_UNLOCK(mp, s); 102 - return (xfs_lsn_t)0; 151 + spin_unlock(&mp->m_ail_lock); 152 + last_pushed_lsn = 0; 153 + goto out; 103 154 } 104 155 105 156 XFS_STATS_INC(xs_push_ail); 106 157 107 158 /* 108 159 * While the item we are looking at is below the given threshold 109 - * try to flush it out. Make sure to limit the number of times 110 - * we allow xfs_trans_next_ail() to restart scanning from the 111 - * beginning of the list. We'd like not to stop until we've at least 160 + * try to flush it out. We'd like not to stop until we've at least 112 161 * tried to push on everything in the AIL with an LSN less than 113 - * the given threshold. However, we may give up before that if 114 - * we realize that we've been holding the AIL_LOCK for 'too long', 115 - * blocking interrupts. Currently, too long is < 500us roughly. 162 + * the given threshold. 163 + * 164 + * However, we will stop after a certain number of pushes and wait 165 + * for a reduced timeout to fire before pushing further. This 166 + * prevents use from spinning when we can't do anything or there is 167 + * lots of contention on the AIL lists. 116 168 */ 117 - flush_log = 0; 118 - restarts = 0; 119 - while (((restarts < XFS_TRANS_PUSH_AIL_RESTARTS) && 120 - (XFS_LSN_CMP(lip->li_lsn, threshold_lsn) < 0))) { 169 + tout = 10; 170 + lsn = lip->li_lsn; 171 + flush_log = stuck = count = restarts = 0; 172 + while ((XFS_LSN_CMP(lip->li_lsn, target) < 0)) { 173 + int lock_result; 121 174 /* 122 - * If we can lock the item without sleeping, unlock 123 - * the AIL lock and flush the item. Then re-grab the 124 - * AIL lock so we can look for the next item on the 125 - * AIL. Since we unlock the AIL while we flush the 126 - * item, the next routine may start over again at the 127 - * the beginning of the list if anything has changed. 128 - * That is what the generation count is for. 175 + * If we can lock the item without sleeping, unlock the AIL 176 + * lock and flush the item. Then re-grab the AIL lock so we 177 + * can look for the next item on the AIL. List changes are 178 + * handled by the AIL lookup functions internally 129 179 * 130 - * If we can't lock the item, either its holder will flush 131 - * it or it is already being flushed or it is being relogged. 132 - * In any of these case it is being taken care of and we 133 - * can just skip to the next item in the list. 180 + * If we can't lock the item, either its holder will flush it 181 + * or it is already being flushed or it is being relogged. In 182 + * any of these case it is being taken care of and we can just 183 + * skip to the next item in the list. 134 184 */ 135 185 lock_result = IOP_TRYLOCK(lip); 186 + spin_unlock(&mp->m_ail_lock); 136 187 switch (lock_result) { 137 - case XFS_ITEM_SUCCESS: 138 - AIL_UNLOCK(mp, s); 188 + case XFS_ITEM_SUCCESS: 139 189 XFS_STATS_INC(xs_push_ail_success); 140 190 IOP_PUSH(lip); 141 - AIL_LOCK(mp,s); 191 + last_pushed_lsn = lsn; 142 192 break; 143 193 144 - case XFS_ITEM_PUSHBUF: 145 - AIL_UNLOCK(mp, s); 194 + case XFS_ITEM_PUSHBUF: 146 195 XFS_STATS_INC(xs_push_ail_pushbuf); 147 - #ifdef XFSRACEDEBUG 148 - delay_for_intr(); 149 - delay(300); 150 - #endif 151 - ASSERT(lip->li_ops->iop_pushbuf); 152 - ASSERT(lip); 153 196 IOP_PUSHBUF(lip); 154 - AIL_LOCK(mp,s); 197 + last_pushed_lsn = lsn; 155 198 break; 156 199 157 - case XFS_ITEM_PINNED: 200 + case XFS_ITEM_PINNED: 158 201 XFS_STATS_INC(xs_push_ail_pinned); 202 + stuck++; 159 203 flush_log = 1; 160 204 break; 161 205 162 - case XFS_ITEM_LOCKED: 206 + case XFS_ITEM_LOCKED: 163 207 XFS_STATS_INC(xs_push_ail_locked); 208 + last_pushed_lsn = lsn; 209 + stuck++; 164 210 break; 165 211 166 - case XFS_ITEM_FLUSHING: 212 + case XFS_ITEM_FLUSHING: 167 213 XFS_STATS_INC(xs_push_ail_flushing); 214 + last_pushed_lsn = lsn; 215 + stuck++; 168 216 break; 169 217 170 - default: 218 + default: 171 219 ASSERT(0); 172 220 break; 173 221 } 174 222 175 - lip = xfs_trans_next_ail(mp, lip, &gen, &restarts); 176 - if (lip == NULL) { 223 + spin_lock(&mp->m_ail_lock); 224 + /* should we bother continuing? */ 225 + if (XFS_FORCED_SHUTDOWN(mp)) 177 226 break; 178 - } 179 - if (XFS_FORCED_SHUTDOWN(mp)) { 180 - /* 181 - * Just return if we shut down during the last try. 182 - */ 183 - AIL_UNLOCK(mp, s); 184 - return (xfs_lsn_t)0; 185 - } 227 + ASSERT(mp->m_log); 186 228 229 + count++; 230 + 231 + /* 232 + * Are there too many items we can't do anything with? 233 + * If we we are skipping too many items because we can't flush 234 + * them or they are already being flushed, we back off and 235 + * given them time to complete whatever operation is being 236 + * done. i.e. remove pressure from the AIL while we can't make 237 + * progress so traversals don't slow down further inserts and 238 + * removals to/from the AIL. 239 + * 240 + * The value of 100 is an arbitrary magic number based on 241 + * observation. 242 + */ 243 + if (stuck > 100) 244 + break; 245 + 246 + lip = xfs_trans_next_ail(mp, lip, &gen, &restarts); 247 + if (lip == NULL) 248 + break; 249 + if (restarts > XFS_TRANS_PUSH_AIL_RESTARTS) 250 + break; 251 + lsn = lip->li_lsn; 187 252 } 253 + spin_unlock(&mp->m_ail_lock); 188 254 189 255 if (flush_log) { 190 256 /* ··· 257 193 * push out the log so it will become unpinned and 258 194 * move forward in the AIL. 259 195 */ 260 - AIL_UNLOCK(mp, s); 261 196 XFS_STATS_INC(xs_push_ail_flush); 262 197 xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE); 263 - AIL_LOCK(mp, s); 264 198 } 265 199 266 - lip = xfs_ail_min(&(mp->m_ail)); 267 - if (lip == NULL) { 268 - lsn = (xfs_lsn_t)0; 269 - } else { 270 - lsn = lip->li_lsn; 200 + /* 201 + * We reached the target so wait a bit longer for I/O to complete and 202 + * remove pushed items from the AIL before we start the next scan from 203 + * the start of the AIL. 204 + */ 205 + if ((XFS_LSN_CMP(lsn, target) >= 0)) { 206 + tout += 20; 207 + last_pushed_lsn = 0; 208 + } else if ((restarts > XFS_TRANS_PUSH_AIL_RESTARTS) || 209 + (count && ((stuck * 100) / count > 90))) { 210 + /* 211 + * Either there is a lot of contention on the AIL or we 212 + * are stuck due to operations in progress. "Stuck" in this 213 + * case is defined as >90% of the items we tried to push 214 + * were stuck. 215 + * 216 + * Backoff a bit more to allow some I/O to complete before 217 + * continuing from where we were. 218 + */ 219 + tout += 10; 271 220 } 272 - 273 - AIL_UNLOCK(mp, s); 274 - return lsn; 275 - } /* xfs_trans_push_ail */ 221 + out: 222 + *last_lsn = last_pushed_lsn; 223 + return tout; 224 + } /* xfsaild_push */ 276 225 277 226 278 227 /* ··· 326 249 * the call to xfs_log_move_tail() doesn't do anything if there's 327 250 * not enough free space to wake people up so we're safe calling it. 328 251 */ 329 - min_lip = xfs_ail_min(&mp->m_ail); 252 + min_lip = xfs_ail_min(&mp->m_ail.xa_ail); 330 253 331 254 if (min_lip == lip) 332 255 xfs_log_move_tail(mp, 1); ··· 346 269 * has changed. 347 270 * 348 271 * This function must be called with the AIL lock held. The lock 349 - * is dropped before returning, so the caller must pass in the 350 - * cookie returned by AIL_LOCK. 272 + * is dropped before returning. 351 273 */ 352 274 void 353 275 xfs_trans_update_ail( 354 276 xfs_mount_t *mp, 355 277 xfs_log_item_t *lip, 356 - xfs_lsn_t lsn, 357 - unsigned long s) __releases(mp->m_ail_lock) 278 + xfs_lsn_t lsn) __releases(mp->m_ail_lock) 358 279 { 359 280 xfs_ail_entry_t *ailp; 360 281 xfs_log_item_t *dlip=NULL; 361 282 xfs_log_item_t *mlip; /* ptr to minimum lip */ 362 283 363 - ailp = &(mp->m_ail); 284 + ailp = &(mp->m_ail.xa_ail); 364 285 mlip = xfs_ail_min(ailp); 365 286 366 287 if (lip->li_flags & XFS_LI_IN_AIL) { ··· 371 296 lip->li_lsn = lsn; 372 297 373 298 xfs_ail_insert(ailp, lip); 374 - mp->m_ail_gen++; 299 + mp->m_ail.xa_gen++; 375 300 376 301 if (mlip == dlip) { 377 - mlip = xfs_ail_min(&(mp->m_ail)); 378 - AIL_UNLOCK(mp, s); 302 + mlip = xfs_ail_min(&(mp->m_ail.xa_ail)); 303 + spin_unlock(&mp->m_ail_lock); 379 304 xfs_log_move_tail(mp, mlip->li_lsn); 380 305 } else { 381 - AIL_UNLOCK(mp, s); 306 + spin_unlock(&mp->m_ail_lock); 382 307 } 383 308 384 309 ··· 397 322 * has changed. 398 323 * 399 324 * This function must be called with the AIL lock held. The lock 400 - * is dropped before returning, so the caller must pass in the 401 - * cookie returned by AIL_LOCK. 325 + * is dropped before returning. 402 326 */ 403 327 void 404 328 xfs_trans_delete_ail( 405 329 xfs_mount_t *mp, 406 - xfs_log_item_t *lip, 407 - unsigned long s) __releases(mp->m_ail_lock) 330 + xfs_log_item_t *lip) __releases(mp->m_ail_lock) 408 331 { 409 332 xfs_ail_entry_t *ailp; 410 333 xfs_log_item_t *dlip; 411 334 xfs_log_item_t *mlip; 412 335 413 336 if (lip->li_flags & XFS_LI_IN_AIL) { 414 - ailp = &(mp->m_ail); 337 + ailp = &(mp->m_ail.xa_ail); 415 338 mlip = xfs_ail_min(ailp); 416 339 dlip = xfs_ail_delete(ailp, lip); 417 340 ASSERT(dlip == lip); ··· 417 344 418 345 lip->li_flags &= ~XFS_LI_IN_AIL; 419 346 lip->li_lsn = 0; 420 - mp->m_ail_gen++; 347 + mp->m_ail.xa_gen++; 421 348 422 349 if (mlip == dlip) { 423 - mlip = xfs_ail_min(&(mp->m_ail)); 424 - AIL_UNLOCK(mp, s); 350 + mlip = xfs_ail_min(&(mp->m_ail.xa_ail)); 351 + spin_unlock(&mp->m_ail_lock); 425 352 xfs_log_move_tail(mp, (mlip ? mlip->li_lsn : 0)); 426 353 } else { 427 - AIL_UNLOCK(mp, s); 354 + spin_unlock(&mp->m_ail_lock); 428 355 } 429 356 } 430 357 else { ··· 433 360 * serious trouble if we get to this stage. 434 361 */ 435 362 if (XFS_FORCED_SHUTDOWN(mp)) 436 - AIL_UNLOCK(mp, s); 363 + spin_unlock(&mp->m_ail_lock); 437 364 else { 438 365 xfs_cmn_err(XFS_PTAG_AILDELETE, CE_ALERT, mp, 439 366 "%s: attempting to delete a log item that is not in the AIL", 440 367 __FUNCTION__); 441 - AIL_UNLOCK(mp, s); 368 + spin_unlock(&mp->m_ail_lock); 442 369 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); 443 370 } 444 371 } ··· 458 385 { 459 386 xfs_log_item_t *lip; 460 387 461 - lip = xfs_ail_min(&(mp->m_ail)); 462 - *gen = (int)mp->m_ail_gen; 388 + lip = xfs_ail_min(&(mp->m_ail.xa_ail)); 389 + *gen = (int)mp->m_ail.xa_gen; 463 390 464 - return (lip); 391 + return lip; 465 392 } 466 393 467 394 /* ··· 481 408 xfs_log_item_t *nlip; 482 409 483 410 ASSERT(mp && lip && gen); 484 - if (mp->m_ail_gen == *gen) { 485 - nlip = xfs_ail_next(&(mp->m_ail), lip); 411 + if (mp->m_ail.xa_gen == *gen) { 412 + nlip = xfs_ail_next(&(mp->m_ail.xa_ail), lip); 486 413 } else { 487 - nlip = xfs_ail_min(&(mp->m_ail)); 488 - *gen = (int)mp->m_ail_gen; 414 + nlip = xfs_ail_min(&(mp->m_ail).xa_ail); 415 + *gen = (int)mp->m_ail.xa_gen; 489 416 if (restarts != NULL) { 490 417 XFS_STATS_INC(xs_push_ail_restarts); 491 418 (*restarts)++; ··· 510 437 /* 511 438 * Initialize the doubly linked list to point only to itself. 512 439 */ 513 - void 440 + int 514 441 xfs_trans_ail_init( 515 442 xfs_mount_t *mp) 516 443 { 517 - mp->m_ail.ail_forw = (xfs_log_item_t*)&(mp->m_ail); 518 - mp->m_ail.ail_back = (xfs_log_item_t*)&(mp->m_ail); 444 + mp->m_ail.xa_ail.ail_forw = (xfs_log_item_t*)&mp->m_ail.xa_ail; 445 + mp->m_ail.xa_ail.ail_back = (xfs_log_item_t*)&mp->m_ail.xa_ail; 446 + return xfsaild_start(mp); 447 + } 448 + 449 + void 450 + xfs_trans_ail_destroy( 451 + xfs_mount_t *mp) 452 + { 453 + xfsaild_stop(mp); 519 454 } 520 455 521 456 /* ··· 563 482 next_lip->li_ail.ail_forw = lip; 564 483 lip->li_ail.ail_forw->li_ail.ail_back = lip; 565 484 566 - xfs_ail_check(base); 485 + xfs_ail_check(base, lip); 567 486 return; 568 487 } 569 488 ··· 577 496 xfs_log_item_t *lip) 578 497 /* ARGSUSED */ 579 498 { 499 + xfs_ail_check(base, lip); 580 500 lip->li_ail.ail_forw->li_ail.ail_back = lip->li_ail.ail_back; 581 501 lip->li_ail.ail_back->li_ail.ail_forw = lip->li_ail.ail_forw; 582 502 lip->li_ail.ail_forw = NULL; 583 503 lip->li_ail.ail_back = NULL; 584 504 585 - xfs_ail_check(base); 586 505 return lip; 587 506 } 588 507 ··· 626 545 */ 627 546 STATIC void 628 547 xfs_ail_check( 629 - xfs_ail_entry_t *base) 548 + xfs_ail_entry_t *base, 549 + xfs_log_item_t *lip) 630 550 { 631 - xfs_log_item_t *lip; 632 551 xfs_log_item_t *prev_lip; 633 552 634 - lip = base->ail_forw; 635 - if (lip == (xfs_log_item_t*)base) { 553 + prev_lip = base->ail_forw; 554 + if (prev_lip == (xfs_log_item_t*)base) { 636 555 /* 637 556 * Make sure the pointers are correct when the list 638 557 * is empty. ··· 642 561 } 643 562 644 563 /* 564 + * Check the next and previous entries are valid. 565 + */ 566 + ASSERT((lip->li_flags & XFS_LI_IN_AIL) != 0); 567 + prev_lip = lip->li_ail.ail_back; 568 + if (prev_lip != (xfs_log_item_t*)base) { 569 + ASSERT(prev_lip->li_ail.ail_forw == lip); 570 + ASSERT(XFS_LSN_CMP(prev_lip->li_lsn, lip->li_lsn) <= 0); 571 + } 572 + prev_lip = lip->li_ail.ail_forw; 573 + if (prev_lip != (xfs_log_item_t*)base) { 574 + ASSERT(prev_lip->li_ail.ail_back == lip); 575 + ASSERT(XFS_LSN_CMP(prev_lip->li_lsn, lip->li_lsn) >= 0); 576 + } 577 + 578 + 579 + #ifdef XFS_TRANS_DEBUG 580 + /* 645 581 * Walk the list checking forward and backward pointers, 646 582 * lsn ordering, and that every entry has the XFS_LI_IN_AIL 647 - * flag set. 583 + * flag set. This is really expensive, so only do it when 584 + * specifically debugging the transaction subsystem. 648 585 */ 649 586 prev_lip = (xfs_log_item_t*)base; 650 587 while (lip != (xfs_log_item_t*)base) { ··· 677 578 } 678 579 ASSERT(lip == (xfs_log_item_t*)base); 679 580 ASSERT(base->ail_back == prev_lip); 581 + #endif /* XFS_TRANS_DEBUG */ 680 582 } 681 583 #endif /* DEBUG */
+1
fs/xfs/xfs_trans_item.c
··· 21 21 #include "xfs_log.h" 22 22 #include "xfs_inum.h" 23 23 #include "xfs_trans.h" 24 + #include "xfs_trans_priv.h" 24 25 25 26 STATIC int xfs_trans_unlock_chunk(xfs_log_item_chunk_t *, 26 27 int, int, xfs_lsn_t);
+10 -3
fs/xfs/xfs_trans_priv.h
··· 47 47 * From xfs_trans_ail.c 48 48 */ 49 49 void xfs_trans_update_ail(struct xfs_mount *mp, 50 - struct xfs_log_item *lip, xfs_lsn_t lsn, 51 - unsigned long s) 50 + struct xfs_log_item *lip, xfs_lsn_t lsn) 52 51 __releases(mp->m_ail_lock); 53 52 void xfs_trans_delete_ail(struct xfs_mount *mp, 54 - struct xfs_log_item *lip, unsigned long s) 53 + struct xfs_log_item *lip) 55 54 __releases(mp->m_ail_lock); 56 55 struct xfs_log_item *xfs_trans_first_ail(struct xfs_mount *, int *); 57 56 struct xfs_log_item *xfs_trans_next_ail(struct xfs_mount *, 58 57 struct xfs_log_item *, int *, int *); 59 58 59 + 60 + /* 61 + * AIL push thread support 62 + */ 63 + long xfsaild_push(struct xfs_mount *, xfs_lsn_t *); 64 + void xfsaild_wakeup(struct xfs_mount *, xfs_lsn_t); 65 + int xfsaild_start(struct xfs_mount *); 66 + void xfsaild_stop(struct xfs_mount *); 60 67 61 68 #endif /* __XFS_TRANS_PRIV_H__ */
+6 -5
fs/xfs/xfs_utils.c
··· 73 73 { 74 74 int error; 75 75 76 - vn_trace_entry(dp, __FUNCTION__, (inst_t *)__return_address); 76 + xfs_itrace_entry(dp); 77 77 78 78 error = xfs_dir_lookup(NULL, dp, VNAME(dentry), VNAMELEN(dentry), inum); 79 79 if (!error) { ··· 302 302 303 303 ASSERT (ip->i_d.di_nlink > 0); 304 304 ip->i_d.di_nlink--; 305 + drop_nlink(ip->i_vnode); 305 306 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 306 307 307 308 error = 0; ··· 331 330 xfs_inode_t *ip) 332 331 { 333 332 xfs_mount_t *mp; 334 - unsigned long s; 335 333 336 334 ASSERT(ismrlocked (&ip->i_lock, MR_UPDATE)); 337 335 ASSERT(ip->i_d.di_version == XFS_DINODE_VERSION_1); ··· 340 340 memset(&(ip->i_d.di_pad[0]), 0, sizeof(ip->i_d.di_pad)); 341 341 mp = tp->t_mountp; 342 342 if (!XFS_SB_VERSION_HASNLINK(&mp->m_sb)) { 343 - s = XFS_SB_LOCK(mp); 343 + spin_lock(&mp->m_sb_lock); 344 344 if (!XFS_SB_VERSION_HASNLINK(&mp->m_sb)) { 345 345 XFS_SB_VERSION_ADDNLINK(&mp->m_sb); 346 - XFS_SB_UNLOCK(mp, s); 346 + spin_unlock(&mp->m_sb_lock); 347 347 xfs_mod_sb(tp, XFS_SB_VERSIONNUM); 348 348 } else { 349 - XFS_SB_UNLOCK(mp, s); 349 + spin_unlock(&mp->m_sb_lock); 350 350 } 351 351 } 352 352 /* Caller must log the inode */ ··· 366 366 367 367 ASSERT(ip->i_d.di_nlink > 0); 368 368 ip->i_d.di_nlink++; 369 + inc_nlink(ip->i_vnode); 369 370 if ((ip->i_d.di_version == XFS_DINODE_VERSION_1) && 370 371 (ip->i_d.di_nlink > XFS_MAXLINK_1)) { 371 372 /*
-2
fs/xfs/xfs_utils.h
··· 20 20 21 21 #define IRELE(ip) VN_RELE(XFS_ITOV(ip)) 22 22 #define IHOLD(ip) VN_HOLD(XFS_ITOV(ip)) 23 - #define ITRACE(ip) vn_trace_ref(ip, __FILE__, __LINE__, \ 24 - (inst_t *)__return_address) 25 23 26 24 extern int xfs_get_dir_entry (bhv_vname_t *, xfs_inode_t **); 27 25 extern int xfs_dir_lookup_int (xfs_inode_t *, uint, bhv_vname_t *, xfs_ino_t *,
+29 -764
fs/xfs/xfs_vfsops.c
··· 58 58 #include "xfs_vfsops.h" 59 59 60 60 61 - int 61 + int __init 62 62 xfs_init(void) 63 63 { 64 - extern kmem_zone_t *xfs_bmap_free_item_zone; 65 - extern kmem_zone_t *xfs_btree_cur_zone; 66 - extern kmem_zone_t *xfs_trans_zone; 67 - extern kmem_zone_t *xfs_buf_item_zone; 68 - extern kmem_zone_t *xfs_dabuf_zone; 69 64 #ifdef XFS_DABUF_DEBUG 70 - extern lock_t xfs_dabuf_global_lock; 71 - spinlock_init(&xfs_dabuf_global_lock, "xfsda"); 65 + extern spinlock_t xfs_dabuf_global_lock; 66 + spin_lock_init(&xfs_dabuf_global_lock); 72 67 #endif 73 68 74 69 /* ··· 147 152 return 0; 148 153 } 149 154 150 - void 155 + void __exit 151 156 xfs_cleanup(void) 152 157 { 153 - extern kmem_zone_t *xfs_bmap_free_item_zone; 154 - extern kmem_zone_t *xfs_btree_cur_zone; 155 158 extern kmem_zone_t *xfs_inode_zone; 156 - extern kmem_zone_t *xfs_trans_zone; 157 - extern kmem_zone_t *xfs_da_state_zone; 158 - extern kmem_zone_t *xfs_dabuf_zone; 159 159 extern kmem_zone_t *xfs_efd_zone; 160 160 extern kmem_zone_t *xfs_efi_zone; 161 - extern kmem_zone_t *xfs_buf_item_zone; 162 161 extern kmem_zone_t *xfs_icluster_zone; 163 162 164 163 xfs_cleanup_procfs(); ··· 438 449 if (error) 439 450 return error; 440 451 441 - mp->m_io_ops = xfs_iocore_xfs; 442 - 443 452 if (args->flags & XFSMNT_QUIET) 444 453 flags |= XFS_MFSI_QUIET; 445 454 ··· 531 544 if ((error = xfs_filestream_mount(mp))) 532 545 goto error2; 533 546 534 - error = XFS_IOINIT(mp, args, flags); 547 + error = xfs_mountfs(mp, flags); 535 548 if (error) 536 549 goto error2; 537 550 ··· 681 694 * care of the metadata. New transactions are already blocked, so we need to 682 695 * wait for any remaining transactions to drain out before proceding. 683 696 */ 684 - STATIC void 697 + void 685 698 xfs_attr_quiesce( 686 699 xfs_mount_t *mp) 687 700 { ··· 808 821 } 809 822 810 823 /* 811 - * xfs_root extracts the root vnode from a vfs. 812 - * 813 - * vfsp -- the vfs struct for the desired file system 814 - * vpp -- address of the caller's vnode pointer which should be 815 - * set to the desired fs root vnode 816 - */ 817 - int 818 - xfs_root( 819 - xfs_mount_t *mp, 820 - bhv_vnode_t **vpp) 821 - { 822 - bhv_vnode_t *vp; 823 - 824 - vp = XFS_ITOV(mp->m_rootip); 825 - VN_HOLD(vp); 826 - *vpp = vp; 827 - return 0; 828 - } 829 - 830 - /* 831 - * xfs_statvfs 832 - * 833 - * Fill in the statvfs structure for the given file system. We use 834 - * the superblock lock in the mount structure to ensure a consistent 835 - * snapshot of the counters returned. 836 - */ 837 - int 838 - xfs_statvfs( 839 - xfs_mount_t *mp, 840 - bhv_statvfs_t *statp, 841 - bhv_vnode_t *vp) 842 - { 843 - __uint64_t fakeinos; 844 - xfs_extlen_t lsize; 845 - xfs_sb_t *sbp; 846 - unsigned long s; 847 - 848 - sbp = &(mp->m_sb); 849 - 850 - statp->f_type = XFS_SB_MAGIC; 851 - 852 - xfs_icsb_sync_counters_flags(mp, XFS_ICSB_LAZY_COUNT); 853 - s = XFS_SB_LOCK(mp); 854 - statp->f_bsize = sbp->sb_blocksize; 855 - lsize = sbp->sb_logstart ? sbp->sb_logblocks : 0; 856 - statp->f_blocks = sbp->sb_dblocks - lsize; 857 - statp->f_bfree = statp->f_bavail = 858 - sbp->sb_fdblocks - XFS_ALLOC_SET_ASIDE(mp); 859 - fakeinos = statp->f_bfree << sbp->sb_inopblog; 860 - #if XFS_BIG_INUMS 861 - fakeinos += mp->m_inoadd; 862 - #endif 863 - statp->f_files = 864 - MIN(sbp->sb_icount + fakeinos, (__uint64_t)XFS_MAXINUMBER); 865 - if (mp->m_maxicount) 866 - #if XFS_BIG_INUMS 867 - if (!mp->m_inoadd) 868 - #endif 869 - statp->f_files = min_t(typeof(statp->f_files), 870 - statp->f_files, 871 - mp->m_maxicount); 872 - statp->f_ffree = statp->f_files - (sbp->sb_icount - sbp->sb_ifree); 873 - XFS_SB_UNLOCK(mp, s); 874 - 875 - xfs_statvfs_fsid(statp, mp); 876 - statp->f_namelen = MAXNAMELEN - 1; 877 - 878 - if (vp) 879 - XFS_QM_DQSTATVFS(xfs_vtoi(vp), statp); 880 - return 0; 881 - } 882 - 883 - 884 - /* 885 824 * xfs_sync flushes any pending I/O to file system vfsp. 886 825 * 887 826 * This routine is called by vfs_sync() to make sure that things make it ··· 894 981 int *bypassed) 895 982 { 896 983 xfs_inode_t *ip = NULL; 897 - xfs_inode_t *ip_next; 898 - xfs_buf_t *bp; 899 984 bhv_vnode_t *vp = NULL; 900 985 int error; 901 986 int last_error; ··· 903 992 boolean_t mount_locked; 904 993 boolean_t vnode_refed; 905 994 int preempt; 906 - xfs_dinode_t *dip; 907 995 xfs_iptr_t *ipointer; 908 996 #ifdef DEBUG 909 997 boolean_t ipointer_in = B_FALSE; ··· 955 1045 956 1046 #define XFS_PREEMPT_MASK 0x7f 957 1047 1048 + ASSERT(!(flags & SYNC_BDFLUSH)); 1049 + 958 1050 if (bypassed) 959 1051 *bypassed = 0; 960 1052 if (mp->m_flags & XFS_MOUNT_RDONLY) ··· 969 1057 ipointer = (xfs_iptr_t *)kmem_zalloc(sizeof(xfs_iptr_t), KM_SLEEP); 970 1058 971 1059 fflag = XFS_B_ASYNC; /* default is don't wait */ 972 - if (flags & (SYNC_BDFLUSH | SYNC_DELWRI)) 1060 + if (flags & SYNC_DELWRI) 973 1061 fflag = XFS_B_DELWRI; 974 1062 if (flags & SYNC_WAIT) 975 1063 fflag = 0; /* synchronous overrides all */ ··· 1059 1147 } 1060 1148 1061 1149 /* 1062 - * If this is just vfs_sync() or pflushd() calling 1063 - * then we can skip inodes for which it looks like 1064 - * there is nothing to do. Since we don't have the 1065 - * inode locked this is racy, but these are periodic 1066 - * calls so it doesn't matter. For the others we want 1067 - * to know for sure, so we at least try to lock them. 1068 - */ 1069 - if (flags & SYNC_BDFLUSH) { 1070 - if (((ip->i_itemp == NULL) || 1071 - !(ip->i_itemp->ili_format.ilf_fields & 1072 - XFS_ILOG_ALL)) && 1073 - (ip->i_update_core == 0)) { 1074 - ip = ip->i_mnext; 1075 - continue; 1076 - } 1077 - } 1078 - 1079 - /* 1080 1150 * Try to lock without sleeping. We're out of order with 1081 1151 * the inode list lock here, so if we fail we need to drop 1082 1152 * the mount lock and try again. If we're called from ··· 1075 1181 * it. 1076 1182 */ 1077 1183 if (xfs_ilock_nowait(ip, lock_flags) == 0) { 1078 - if ((flags & SYNC_BDFLUSH) || (vp == NULL)) { 1184 + if (vp == NULL) { 1079 1185 ip = ip->i_mnext; 1080 1186 continue; 1081 1187 } ··· 1136 1242 xfs_ilock(ip, XFS_ILOCK_SHARED); 1137 1243 } 1138 1244 1139 - if (flags & SYNC_BDFLUSH) { 1140 - if ((flags & SYNC_ATTR) && 1141 - ((ip->i_update_core) || 1142 - ((ip->i_itemp != NULL) && 1143 - (ip->i_itemp->ili_format.ilf_fields != 0)))) { 1245 + if ((flags & SYNC_ATTR) && 1246 + (ip->i_update_core || 1247 + (ip->i_itemp && ip->i_itemp->ili_format.ilf_fields))) { 1248 + if (mount_locked) 1249 + IPOINTER_INSERT(ip, mp); 1144 1250 1145 - /* Insert marker and drop lock if not already 1146 - * done. 1147 - */ 1148 - if (mount_locked) { 1149 - IPOINTER_INSERT(ip, mp); 1150 - } 1251 + if (flags & SYNC_WAIT) { 1252 + xfs_iflock(ip); 1253 + error = xfs_iflush(ip, XFS_IFLUSH_SYNC); 1151 1254 1152 - /* 1153 - * We don't want the periodic flushing of the 1154 - * inodes by vfs_sync() to interfere with 1155 - * I/O to the file, especially read I/O 1156 - * where it is only the access time stamp 1157 - * that is being flushed out. To prevent 1158 - * long periods where we have both inode 1159 - * locks held shared here while reading the 1160 - * inode's buffer in from disk, we drop the 1161 - * inode lock while reading in the inode 1162 - * buffer. We have to release the buffer 1163 - * and reacquire the inode lock so that they 1164 - * are acquired in the proper order (inode 1165 - * locks first). The buffer will go at the 1166 - * end of the lru chain, though, so we can 1167 - * expect it to still be there when we go 1168 - * for it again in xfs_iflush(). 1169 - */ 1170 - if ((xfs_ipincount(ip) == 0) && 1171 - xfs_iflock_nowait(ip)) { 1172 - 1173 - xfs_ifunlock(ip); 1174 - xfs_iunlock(ip, XFS_ILOCK_SHARED); 1175 - 1176 - error = xfs_itobp(mp, NULL, ip, 1177 - &dip, &bp, 0, 0); 1178 - if (!error) { 1179 - xfs_buf_relse(bp); 1180 - } else { 1181 - /* Bailing out, remove the 1182 - * marker and free it. 1183 - */ 1184 - XFS_MOUNT_ILOCK(mp); 1185 - IPOINTER_REMOVE(ip, mp); 1186 - XFS_MOUNT_IUNLOCK(mp); 1187 - 1188 - ASSERT(!(lock_flags & 1189 - XFS_IOLOCK_SHARED)); 1190 - 1191 - kmem_free(ipointer, 1192 - sizeof(xfs_iptr_t)); 1193 - return (0); 1194 - } 1195 - 1196 - /* 1197 - * Since we dropped the inode lock, 1198 - * the inode may have been reclaimed. 1199 - * Therefore, we reacquire the mount 1200 - * lock and check to see if we were the 1201 - * inode reclaimed. If this happened 1202 - * then the ipointer marker will no 1203 - * longer point back at us. In this 1204 - * case, move ip along to the inode 1205 - * after the marker, remove the marker 1206 - * and continue. 1207 - */ 1208 - XFS_MOUNT_ILOCK(mp); 1209 - mount_locked = B_TRUE; 1210 - 1211 - if (ip != ipointer->ip_mprev) { 1212 - IPOINTER_REMOVE(ip, mp); 1213 - 1214 - ASSERT(!vnode_refed); 1215 - ASSERT(!(lock_flags & 1216 - XFS_IOLOCK_SHARED)); 1217 - continue; 1218 - } 1219 - 1220 - ASSERT(ip->i_mount == mp); 1221 - 1222 - if (xfs_ilock_nowait(ip, 1223 - XFS_ILOCK_SHARED) == 0) { 1224 - ASSERT(ip->i_mount == mp); 1225 - /* 1226 - * We failed to reacquire 1227 - * the inode lock without 1228 - * sleeping, so just skip 1229 - * the inode for now. We 1230 - * clear the ILOCK bit from 1231 - * the lock_flags so that we 1232 - * won't try to drop a lock 1233 - * we don't hold below. 1234 - */ 1235 - lock_flags &= ~XFS_ILOCK_SHARED; 1236 - IPOINTER_REMOVE(ip_next, mp); 1237 - } else if ((xfs_ipincount(ip) == 0) && 1238 - xfs_iflock_nowait(ip)) { 1239 - ASSERT(ip->i_mount == mp); 1240 - /* 1241 - * Since this is vfs_sync() 1242 - * calling we only flush the 1243 - * inode out if we can lock 1244 - * it without sleeping and 1245 - * it is not pinned. Drop 1246 - * the mount lock here so 1247 - * that we don't hold it for 1248 - * too long. We already have 1249 - * a marker in the list here. 1250 - */ 1251 - XFS_MOUNT_IUNLOCK(mp); 1252 - mount_locked = B_FALSE; 1253 - error = xfs_iflush(ip, 1254 - XFS_IFLUSH_DELWRI); 1255 - } else { 1256 - ASSERT(ip->i_mount == mp); 1257 - IPOINTER_REMOVE(ip_next, mp); 1258 - } 1259 - } 1260 - 1261 - } 1262 - 1263 - } else { 1264 - if ((flags & SYNC_ATTR) && 1265 - ((ip->i_update_core) || 1266 - ((ip->i_itemp != NULL) && 1267 - (ip->i_itemp->ili_format.ilf_fields != 0)))) { 1268 - if (mount_locked) { 1269 - IPOINTER_INSERT(ip, mp); 1270 - } 1271 - 1272 - if (flags & SYNC_WAIT) { 1273 - xfs_iflock(ip); 1274 - error = xfs_iflush(ip, 1275 - XFS_IFLUSH_SYNC); 1276 - } else { 1277 - /* 1278 - * If we can't acquire the flush 1279 - * lock, then the inode is already 1280 - * being flushed so don't bother 1281 - * waiting. If we can lock it then 1282 - * do a delwri flush so we can 1283 - * combine multiple inode flushes 1284 - * in each disk write. 1285 - */ 1286 - if (xfs_iflock_nowait(ip)) { 1287 - error = xfs_iflush(ip, 1288 - XFS_IFLUSH_DELWRI); 1289 - } 1290 - else if (bypassed) 1291 - (*bypassed)++; 1292 - } 1255 + /* 1256 + * If we can't acquire the flush lock, then the inode 1257 + * is already being flushed so don't bother waiting. 1258 + * 1259 + * If we can lock it then do a delwri flush so we can 1260 + * combine multiple inode flushes in each disk write. 1261 + */ 1262 + } else if (xfs_iflock_nowait(ip)) { 1263 + error = xfs_iflush(ip, XFS_IFLUSH_DELWRI); 1264 + } else if (bypassed) { 1265 + (*bypassed)++; 1293 1266 } 1294 1267 } 1295 1268 ··· 1387 1626 } 1388 1627 1389 1628 return XFS_ERROR(last_error); 1390 - } 1391 - 1392 - /* 1393 - * xfs_vget - called by DMAPI and NFSD to get vnode from file handle 1394 - */ 1395 - int 1396 - xfs_vget( 1397 - xfs_mount_t *mp, 1398 - bhv_vnode_t **vpp, 1399 - xfs_fid_t *xfid) 1400 - { 1401 - xfs_inode_t *ip; 1402 - int error; 1403 - xfs_ino_t ino; 1404 - unsigned int igen; 1405 - 1406 - /* 1407 - * Invalid. Since handles can be created in user space and passed in 1408 - * via gethandle(), this is not cause for a panic. 1409 - */ 1410 - if (xfid->fid_len != sizeof(*xfid) - sizeof(xfid->fid_len)) 1411 - return XFS_ERROR(EINVAL); 1412 - 1413 - ino = xfid->fid_ino; 1414 - igen = xfid->fid_gen; 1415 - 1416 - /* 1417 - * NFS can sometimes send requests for ino 0. Fail them gracefully. 1418 - */ 1419 - if (ino == 0) 1420 - return XFS_ERROR(ESTALE); 1421 - 1422 - error = xfs_iget(mp, NULL, ino, 0, XFS_ILOCK_SHARED, &ip, 0); 1423 - if (error) { 1424 - *vpp = NULL; 1425 - return error; 1426 - } 1427 - 1428 - if (ip == NULL) { 1429 - *vpp = NULL; 1430 - return XFS_ERROR(EIO); 1431 - } 1432 - 1433 - if (ip->i_d.di_mode == 0 || ip->i_d.di_gen != igen) { 1434 - xfs_iput_new(ip, XFS_ILOCK_SHARED); 1435 - *vpp = NULL; 1436 - return XFS_ERROR(ENOENT); 1437 - } 1438 - 1439 - *vpp = XFS_ITOV(ip); 1440 - xfs_iunlock(ip, XFS_ILOCK_SHARED); 1441 - return 0; 1442 - } 1443 - 1444 - 1445 - #define MNTOPT_LOGBUFS "logbufs" /* number of XFS log buffers */ 1446 - #define MNTOPT_LOGBSIZE "logbsize" /* size of XFS log buffers */ 1447 - #define MNTOPT_LOGDEV "logdev" /* log device */ 1448 - #define MNTOPT_RTDEV "rtdev" /* realtime I/O device */ 1449 - #define MNTOPT_BIOSIZE "biosize" /* log2 of preferred buffered io size */ 1450 - #define MNTOPT_WSYNC "wsync" /* safe-mode nfs compatible mount */ 1451 - #define MNTOPT_INO64 "ino64" /* force inodes into 64-bit range */ 1452 - #define MNTOPT_NOALIGN "noalign" /* turn off stripe alignment */ 1453 - #define MNTOPT_SWALLOC "swalloc" /* turn on stripe width allocation */ 1454 - #define MNTOPT_SUNIT "sunit" /* data volume stripe unit */ 1455 - #define MNTOPT_SWIDTH "swidth" /* data volume stripe width */ 1456 - #define MNTOPT_NOUUID "nouuid" /* ignore filesystem UUID */ 1457 - #define MNTOPT_MTPT "mtpt" /* filesystem mount point */ 1458 - #define MNTOPT_GRPID "grpid" /* group-ID from parent directory */ 1459 - #define MNTOPT_NOGRPID "nogrpid" /* group-ID from current process */ 1460 - #define MNTOPT_BSDGROUPS "bsdgroups" /* group-ID from parent directory */ 1461 - #define MNTOPT_SYSVGROUPS "sysvgroups" /* group-ID from current process */ 1462 - #define MNTOPT_ALLOCSIZE "allocsize" /* preferred allocation size */ 1463 - #define MNTOPT_NORECOVERY "norecovery" /* don't run XFS recovery */ 1464 - #define MNTOPT_BARRIER "barrier" /* use writer barriers for log write and 1465 - * unwritten extent conversion */ 1466 - #define MNTOPT_NOBARRIER "nobarrier" /* .. disable */ 1467 - #define MNTOPT_OSYNCISOSYNC "osyncisosync" /* o_sync is REALLY o_sync */ 1468 - #define MNTOPT_64BITINODE "inode64" /* inodes can be allocated anywhere */ 1469 - #define MNTOPT_IKEEP "ikeep" /* do not free empty inode clusters */ 1470 - #define MNTOPT_NOIKEEP "noikeep" /* free empty inode clusters */ 1471 - #define MNTOPT_LARGEIO "largeio" /* report large I/O sizes in stat() */ 1472 - #define MNTOPT_NOLARGEIO "nolargeio" /* do not report large I/O sizes 1473 - * in stat(). */ 1474 - #define MNTOPT_ATTR2 "attr2" /* do use attr2 attribute format */ 1475 - #define MNTOPT_NOATTR2 "noattr2" /* do not use attr2 attribute format */ 1476 - #define MNTOPT_FILESTREAM "filestreams" /* use filestreams allocator */ 1477 - #define MNTOPT_QUOTA "quota" /* disk quotas (user) */ 1478 - #define MNTOPT_NOQUOTA "noquota" /* no quotas */ 1479 - #define MNTOPT_USRQUOTA "usrquota" /* user quota enabled */ 1480 - #define MNTOPT_GRPQUOTA "grpquota" /* group quota enabled */ 1481 - #define MNTOPT_PRJQUOTA "prjquota" /* project quota enabled */ 1482 - #define MNTOPT_UQUOTA "uquota" /* user quota (IRIX variant) */ 1483 - #define MNTOPT_GQUOTA "gquota" /* group quota (IRIX variant) */ 1484 - #define MNTOPT_PQUOTA "pquota" /* project quota (IRIX variant) */ 1485 - #define MNTOPT_UQUOTANOENF "uqnoenforce"/* user quota limit enforcement */ 1486 - #define MNTOPT_GQUOTANOENF "gqnoenforce"/* group quota limit enforcement */ 1487 - #define MNTOPT_PQUOTANOENF "pqnoenforce"/* project quota limit enforcement */ 1488 - #define MNTOPT_QUOTANOENF "qnoenforce" /* same as uqnoenforce */ 1489 - #define MNTOPT_DMAPI "dmapi" /* DMI enabled (DMAPI / XDSM) */ 1490 - #define MNTOPT_XDSM "xdsm" /* DMI enabled (DMAPI / XDSM) */ 1491 - #define MNTOPT_DMI "dmi" /* DMI enabled (DMAPI / XDSM) */ 1492 - 1493 - STATIC unsigned long 1494 - suffix_strtoul(char *s, char **endp, unsigned int base) 1495 - { 1496 - int last, shift_left_factor = 0; 1497 - char *value = s; 1498 - 1499 - last = strlen(value) - 1; 1500 - if (value[last] == 'K' || value[last] == 'k') { 1501 - shift_left_factor = 10; 1502 - value[last] = '\0'; 1503 - } 1504 - if (value[last] == 'M' || value[last] == 'm') { 1505 - shift_left_factor = 20; 1506 - value[last] = '\0'; 1507 - } 1508 - if (value[last] == 'G' || value[last] == 'g') { 1509 - shift_left_factor = 30; 1510 - value[last] = '\0'; 1511 - } 1512 - 1513 - return simple_strtoul((const char *)s, endp, base) << shift_left_factor; 1514 - } 1515 - 1516 - int 1517 - xfs_parseargs( 1518 - struct xfs_mount *mp, 1519 - char *options, 1520 - struct xfs_mount_args *args, 1521 - int update) 1522 - { 1523 - char *this_char, *value, *eov; 1524 - int dsunit, dswidth, vol_dsunit, vol_dswidth; 1525 - int iosize; 1526 - int ikeep = 0; 1527 - 1528 - args->flags |= XFSMNT_BARRIER; 1529 - args->flags2 |= XFSMNT2_COMPAT_IOSIZE; 1530 - 1531 - if (!options) 1532 - goto done; 1533 - 1534 - iosize = dsunit = dswidth = vol_dsunit = vol_dswidth = 0; 1535 - 1536 - while ((this_char = strsep(&options, ",")) != NULL) { 1537 - if (!*this_char) 1538 - continue; 1539 - if ((value = strchr(this_char, '=')) != NULL) 1540 - *value++ = 0; 1541 - 1542 - if (!strcmp(this_char, MNTOPT_LOGBUFS)) { 1543 - if (!value || !*value) { 1544 - cmn_err(CE_WARN, 1545 - "XFS: %s option requires an argument", 1546 - this_char); 1547 - return EINVAL; 1548 - } 1549 - args->logbufs = simple_strtoul(value, &eov, 10); 1550 - } else if (!strcmp(this_char, MNTOPT_LOGBSIZE)) { 1551 - if (!value || !*value) { 1552 - cmn_err(CE_WARN, 1553 - "XFS: %s option requires an argument", 1554 - this_char); 1555 - return EINVAL; 1556 - } 1557 - args->logbufsize = suffix_strtoul(value, &eov, 10); 1558 - } else if (!strcmp(this_char, MNTOPT_LOGDEV)) { 1559 - if (!value || !*value) { 1560 - cmn_err(CE_WARN, 1561 - "XFS: %s option requires an argument", 1562 - this_char); 1563 - return EINVAL; 1564 - } 1565 - strncpy(args->logname, value, MAXNAMELEN); 1566 - } else if (!strcmp(this_char, MNTOPT_MTPT)) { 1567 - if (!value || !*value) { 1568 - cmn_err(CE_WARN, 1569 - "XFS: %s option requires an argument", 1570 - this_char); 1571 - return EINVAL; 1572 - } 1573 - strncpy(args->mtpt, value, MAXNAMELEN); 1574 - } else if (!strcmp(this_char, MNTOPT_RTDEV)) { 1575 - if (!value || !*value) { 1576 - cmn_err(CE_WARN, 1577 - "XFS: %s option requires an argument", 1578 - this_char); 1579 - return EINVAL; 1580 - } 1581 - strncpy(args->rtname, value, MAXNAMELEN); 1582 - } else if (!strcmp(this_char, MNTOPT_BIOSIZE)) { 1583 - if (!value || !*value) { 1584 - cmn_err(CE_WARN, 1585 - "XFS: %s option requires an argument", 1586 - this_char); 1587 - return EINVAL; 1588 - } 1589 - iosize = simple_strtoul(value, &eov, 10); 1590 - args->flags |= XFSMNT_IOSIZE; 1591 - args->iosizelog = (uint8_t) iosize; 1592 - } else if (!strcmp(this_char, MNTOPT_ALLOCSIZE)) { 1593 - if (!value || !*value) { 1594 - cmn_err(CE_WARN, 1595 - "XFS: %s option requires an argument", 1596 - this_char); 1597 - return EINVAL; 1598 - } 1599 - iosize = suffix_strtoul(value, &eov, 10); 1600 - args->flags |= XFSMNT_IOSIZE; 1601 - args->iosizelog = ffs(iosize) - 1; 1602 - } else if (!strcmp(this_char, MNTOPT_GRPID) || 1603 - !strcmp(this_char, MNTOPT_BSDGROUPS)) { 1604 - mp->m_flags |= XFS_MOUNT_GRPID; 1605 - } else if (!strcmp(this_char, MNTOPT_NOGRPID) || 1606 - !strcmp(this_char, MNTOPT_SYSVGROUPS)) { 1607 - mp->m_flags &= ~XFS_MOUNT_GRPID; 1608 - } else if (!strcmp(this_char, MNTOPT_WSYNC)) { 1609 - args->flags |= XFSMNT_WSYNC; 1610 - } else if (!strcmp(this_char, MNTOPT_OSYNCISOSYNC)) { 1611 - args->flags |= XFSMNT_OSYNCISOSYNC; 1612 - } else if (!strcmp(this_char, MNTOPT_NORECOVERY)) { 1613 - args->flags |= XFSMNT_NORECOVERY; 1614 - } else if (!strcmp(this_char, MNTOPT_INO64)) { 1615 - args->flags |= XFSMNT_INO64; 1616 - #if !XFS_BIG_INUMS 1617 - cmn_err(CE_WARN, 1618 - "XFS: %s option not allowed on this system", 1619 - this_char); 1620 - return EINVAL; 1621 - #endif 1622 - } else if (!strcmp(this_char, MNTOPT_NOALIGN)) { 1623 - args->flags |= XFSMNT_NOALIGN; 1624 - } else if (!strcmp(this_char, MNTOPT_SWALLOC)) { 1625 - args->flags |= XFSMNT_SWALLOC; 1626 - } else if (!strcmp(this_char, MNTOPT_SUNIT)) { 1627 - if (!value || !*value) { 1628 - cmn_err(CE_WARN, 1629 - "XFS: %s option requires an argument", 1630 - this_char); 1631 - return EINVAL; 1632 - } 1633 - dsunit = simple_strtoul(value, &eov, 10); 1634 - } else if (!strcmp(this_char, MNTOPT_SWIDTH)) { 1635 - if (!value || !*value) { 1636 - cmn_err(CE_WARN, 1637 - "XFS: %s option requires an argument", 1638 - this_char); 1639 - return EINVAL; 1640 - } 1641 - dswidth = simple_strtoul(value, &eov, 10); 1642 - } else if (!strcmp(this_char, MNTOPT_64BITINODE)) { 1643 - args->flags &= ~XFSMNT_32BITINODES; 1644 - #if !XFS_BIG_INUMS 1645 - cmn_err(CE_WARN, 1646 - "XFS: %s option not allowed on this system", 1647 - this_char); 1648 - return EINVAL; 1649 - #endif 1650 - } else if (!strcmp(this_char, MNTOPT_NOUUID)) { 1651 - args->flags |= XFSMNT_NOUUID; 1652 - } else if (!strcmp(this_char, MNTOPT_BARRIER)) { 1653 - args->flags |= XFSMNT_BARRIER; 1654 - } else if (!strcmp(this_char, MNTOPT_NOBARRIER)) { 1655 - args->flags &= ~XFSMNT_BARRIER; 1656 - } else if (!strcmp(this_char, MNTOPT_IKEEP)) { 1657 - ikeep = 1; 1658 - args->flags &= ~XFSMNT_IDELETE; 1659 - } else if (!strcmp(this_char, MNTOPT_NOIKEEP)) { 1660 - args->flags |= XFSMNT_IDELETE; 1661 - } else if (!strcmp(this_char, MNTOPT_LARGEIO)) { 1662 - args->flags2 &= ~XFSMNT2_COMPAT_IOSIZE; 1663 - } else if (!strcmp(this_char, MNTOPT_NOLARGEIO)) { 1664 - args->flags2 |= XFSMNT2_COMPAT_IOSIZE; 1665 - } else if (!strcmp(this_char, MNTOPT_ATTR2)) { 1666 - args->flags |= XFSMNT_ATTR2; 1667 - } else if (!strcmp(this_char, MNTOPT_NOATTR2)) { 1668 - args->flags &= ~XFSMNT_ATTR2; 1669 - } else if (!strcmp(this_char, MNTOPT_FILESTREAM)) { 1670 - args->flags2 |= XFSMNT2_FILESTREAMS; 1671 - } else if (!strcmp(this_char, MNTOPT_NOQUOTA)) { 1672 - args->flags &= ~(XFSMNT_UQUOTAENF|XFSMNT_UQUOTA); 1673 - args->flags &= ~(XFSMNT_GQUOTAENF|XFSMNT_GQUOTA); 1674 - } else if (!strcmp(this_char, MNTOPT_QUOTA) || 1675 - !strcmp(this_char, MNTOPT_UQUOTA) || 1676 - !strcmp(this_char, MNTOPT_USRQUOTA)) { 1677 - args->flags |= XFSMNT_UQUOTA | XFSMNT_UQUOTAENF; 1678 - } else if (!strcmp(this_char, MNTOPT_QUOTANOENF) || 1679 - !strcmp(this_char, MNTOPT_UQUOTANOENF)) { 1680 - args->flags |= XFSMNT_UQUOTA; 1681 - args->flags &= ~XFSMNT_UQUOTAENF; 1682 - } else if (!strcmp(this_char, MNTOPT_PQUOTA) || 1683 - !strcmp(this_char, MNTOPT_PRJQUOTA)) { 1684 - args->flags |= XFSMNT_PQUOTA | XFSMNT_PQUOTAENF; 1685 - } else if (!strcmp(this_char, MNTOPT_PQUOTANOENF)) { 1686 - args->flags |= XFSMNT_PQUOTA; 1687 - args->flags &= ~XFSMNT_PQUOTAENF; 1688 - } else if (!strcmp(this_char, MNTOPT_GQUOTA) || 1689 - !strcmp(this_char, MNTOPT_GRPQUOTA)) { 1690 - args->flags |= XFSMNT_GQUOTA | XFSMNT_GQUOTAENF; 1691 - } else if (!strcmp(this_char, MNTOPT_GQUOTANOENF)) { 1692 - args->flags |= XFSMNT_GQUOTA; 1693 - args->flags &= ~XFSMNT_GQUOTAENF; 1694 - } else if (!strcmp(this_char, MNTOPT_DMAPI)) { 1695 - args->flags |= XFSMNT_DMAPI; 1696 - } else if (!strcmp(this_char, MNTOPT_XDSM)) { 1697 - args->flags |= XFSMNT_DMAPI; 1698 - } else if (!strcmp(this_char, MNTOPT_DMI)) { 1699 - args->flags |= XFSMNT_DMAPI; 1700 - } else if (!strcmp(this_char, "ihashsize")) { 1701 - cmn_err(CE_WARN, 1702 - "XFS: ihashsize no longer used, option is deprecated."); 1703 - } else if (!strcmp(this_char, "osyncisdsync")) { 1704 - /* no-op, this is now the default */ 1705 - cmn_err(CE_WARN, 1706 - "XFS: osyncisdsync is now the default, option is deprecated."); 1707 - } else if (!strcmp(this_char, "irixsgid")) { 1708 - cmn_err(CE_WARN, 1709 - "XFS: irixsgid is now a sysctl(2) variable, option is deprecated."); 1710 - } else { 1711 - cmn_err(CE_WARN, 1712 - "XFS: unknown mount option [%s].", this_char); 1713 - return EINVAL; 1714 - } 1715 - } 1716 - 1717 - if (args->flags & XFSMNT_NORECOVERY) { 1718 - if ((mp->m_flags & XFS_MOUNT_RDONLY) == 0) { 1719 - cmn_err(CE_WARN, 1720 - "XFS: no-recovery mounts must be read-only."); 1721 - return EINVAL; 1722 - } 1723 - } 1724 - 1725 - if ((args->flags & XFSMNT_NOALIGN) && (dsunit || dswidth)) { 1726 - cmn_err(CE_WARN, 1727 - "XFS: sunit and swidth options incompatible with the noalign option"); 1728 - return EINVAL; 1729 - } 1730 - 1731 - if ((args->flags & XFSMNT_GQUOTA) && (args->flags & XFSMNT_PQUOTA)) { 1732 - cmn_err(CE_WARN, 1733 - "XFS: cannot mount with both project and group quota"); 1734 - return EINVAL; 1735 - } 1736 - 1737 - if ((args->flags & XFSMNT_DMAPI) && *args->mtpt == '\0') { 1738 - printk("XFS: %s option needs the mount point option as well\n", 1739 - MNTOPT_DMAPI); 1740 - return EINVAL; 1741 - } 1742 - 1743 - if ((dsunit && !dswidth) || (!dsunit && dswidth)) { 1744 - cmn_err(CE_WARN, 1745 - "XFS: sunit and swidth must be specified together"); 1746 - return EINVAL; 1747 - } 1748 - 1749 - if (dsunit && (dswidth % dsunit != 0)) { 1750 - cmn_err(CE_WARN, 1751 - "XFS: stripe width (%d) must be a multiple of the stripe unit (%d)", 1752 - dswidth, dsunit); 1753 - return EINVAL; 1754 - } 1755 - 1756 - /* 1757 - * Applications using DMI filesystems often expect the 1758 - * inode generation number to be monotonically increasing. 1759 - * If we delete inode chunks we break this assumption, so 1760 - * keep unused inode chunks on disk for DMI filesystems 1761 - * until we come up with a better solution. 1762 - * Note that if "ikeep" or "noikeep" mount options are 1763 - * supplied, then they are honored. 1764 - */ 1765 - if (!(args->flags & XFSMNT_DMAPI) && !ikeep) 1766 - args->flags |= XFSMNT_IDELETE; 1767 - 1768 - if ((args->flags & XFSMNT_NOALIGN) != XFSMNT_NOALIGN) { 1769 - if (dsunit) { 1770 - args->sunit = dsunit; 1771 - args->flags |= XFSMNT_RETERR; 1772 - } else { 1773 - args->sunit = vol_dsunit; 1774 - } 1775 - dswidth ? (args->swidth = dswidth) : 1776 - (args->swidth = vol_dswidth); 1777 - } else { 1778 - args->sunit = args->swidth = 0; 1779 - } 1780 - 1781 - done: 1782 - if (args->flags & XFSMNT_32BITINODES) 1783 - mp->m_flags |= XFS_MOUNT_SMALL_INUMS; 1784 - if (args->flags2) 1785 - args->flags |= XFSMNT_FLAGS2; 1786 - return 0; 1787 - } 1788 - 1789 - int 1790 - xfs_showargs( 1791 - struct xfs_mount *mp, 1792 - struct seq_file *m) 1793 - { 1794 - static struct proc_xfs_info { 1795 - int flag; 1796 - char *str; 1797 - } xfs_info[] = { 1798 - /* the few simple ones we can get from the mount struct */ 1799 - { XFS_MOUNT_WSYNC, "," MNTOPT_WSYNC }, 1800 - { XFS_MOUNT_INO64, "," MNTOPT_INO64 }, 1801 - { XFS_MOUNT_NOALIGN, "," MNTOPT_NOALIGN }, 1802 - { XFS_MOUNT_SWALLOC, "," MNTOPT_SWALLOC }, 1803 - { XFS_MOUNT_NOUUID, "," MNTOPT_NOUUID }, 1804 - { XFS_MOUNT_NORECOVERY, "," MNTOPT_NORECOVERY }, 1805 - { XFS_MOUNT_OSYNCISOSYNC, "," MNTOPT_OSYNCISOSYNC }, 1806 - { 0, NULL } 1807 - }; 1808 - struct proc_xfs_info *xfs_infop; 1809 - 1810 - for (xfs_infop = xfs_info; xfs_infop->flag; xfs_infop++) { 1811 - if (mp->m_flags & xfs_infop->flag) 1812 - seq_puts(m, xfs_infop->str); 1813 - } 1814 - 1815 - if (mp->m_flags & XFS_MOUNT_DFLT_IOSIZE) 1816 - seq_printf(m, "," MNTOPT_ALLOCSIZE "=%dk", 1817 - (int)(1 << mp->m_writeio_log) >> 10); 1818 - 1819 - if (mp->m_logbufs > 0) 1820 - seq_printf(m, "," MNTOPT_LOGBUFS "=%d", mp->m_logbufs); 1821 - if (mp->m_logbsize > 0) 1822 - seq_printf(m, "," MNTOPT_LOGBSIZE "=%dk", mp->m_logbsize >> 10); 1823 - 1824 - if (mp->m_logname) 1825 - seq_printf(m, "," MNTOPT_LOGDEV "=%s", mp->m_logname); 1826 - if (mp->m_rtname) 1827 - seq_printf(m, "," MNTOPT_RTDEV "=%s", mp->m_rtname); 1828 - 1829 - if (mp->m_dalign > 0) 1830 - seq_printf(m, "," MNTOPT_SUNIT "=%d", 1831 - (int)XFS_FSB_TO_BB(mp, mp->m_dalign)); 1832 - if (mp->m_swidth > 0) 1833 - seq_printf(m, "," MNTOPT_SWIDTH "=%d", 1834 - (int)XFS_FSB_TO_BB(mp, mp->m_swidth)); 1835 - 1836 - if (!(mp->m_flags & XFS_MOUNT_IDELETE)) 1837 - seq_printf(m, "," MNTOPT_IKEEP); 1838 - if (!(mp->m_flags & XFS_MOUNT_COMPAT_IOSIZE)) 1839 - seq_printf(m, "," MNTOPT_LARGEIO); 1840 - 1841 - if (!(mp->m_flags & XFS_MOUNT_SMALL_INUMS)) 1842 - seq_printf(m, "," MNTOPT_64BITINODE); 1843 - if (mp->m_flags & XFS_MOUNT_GRPID) 1844 - seq_printf(m, "," MNTOPT_GRPID); 1845 - 1846 - if (mp->m_qflags & XFS_UQUOTA_ACCT) { 1847 - if (mp->m_qflags & XFS_UQUOTA_ENFD) 1848 - seq_puts(m, "," MNTOPT_USRQUOTA); 1849 - else 1850 - seq_puts(m, "," MNTOPT_UQUOTANOENF); 1851 - } 1852 - 1853 - if (mp->m_qflags & XFS_PQUOTA_ACCT) { 1854 - if (mp->m_qflags & XFS_OQUOTA_ENFD) 1855 - seq_puts(m, "," MNTOPT_PRJQUOTA); 1856 - else 1857 - seq_puts(m, "," MNTOPT_PQUOTANOENF); 1858 - } 1859 - 1860 - if (mp->m_qflags & XFS_GQUOTA_ACCT) { 1861 - if (mp->m_qflags & XFS_OQUOTA_ENFD) 1862 - seq_puts(m, "," MNTOPT_GRPQUOTA); 1863 - else 1864 - seq_puts(m, "," MNTOPT_GQUOTANOENF); 1865 - } 1866 - 1867 - if (!(mp->m_qflags & XFS_ALL_QUOTA_ACCT)) 1868 - seq_puts(m, "," MNTOPT_NOQUOTA); 1869 - 1870 - if (mp->m_flags & XFS_MOUNT_DMAPI) 1871 - seq_puts(m, "," MNTOPT_DMAPI); 1872 - return 0; 1873 - } 1874 - 1875 - /* 1876 - * Second stage of a freeze. The data is already frozen so we only 1877 - * need to take care of themetadata. Once that's done write a dummy 1878 - * record to dirty the log in case of a crash while frozen. 1879 - */ 1880 - void 1881 - xfs_freeze( 1882 - xfs_mount_t *mp) 1883 - { 1884 - xfs_attr_quiesce(mp); 1885 - xfs_fs_log_dummy(mp); 1886 1629 }
+1 -8
fs/xfs/xfs_vfsops.h
··· 13 13 int xfs_unmount(struct xfs_mount *mp, int flags, struct cred *credp); 14 14 int xfs_mntupdate(struct xfs_mount *mp, int *flags, 15 15 struct xfs_mount_args *args); 16 - int xfs_root(struct xfs_mount *mp, bhv_vnode_t **vpp); 17 - int xfs_statvfs(struct xfs_mount *mp, struct kstatfs *statp, 18 - bhv_vnode_t *vp); 19 16 int xfs_sync(struct xfs_mount *mp, int flags); 20 - int xfs_vget(struct xfs_mount *mp, bhv_vnode_t **vpp, struct xfs_fid *xfid); 21 - int xfs_parseargs(struct xfs_mount *mp, char *options, 22 - struct xfs_mount_args *args, int update); 23 - int xfs_showargs(struct xfs_mount *mp, struct seq_file *m); 24 - void xfs_freeze(struct xfs_mount *mp); 25 17 void xfs_do_force_shutdown(struct xfs_mount *mp, int flags, char *fname, 26 18 int lnnum); 19 + void xfs_attr_quiesce(struct xfs_mount *mp); 27 20 28 21 #endif /* _XFS_VFSOPS_H */
+47 -118
fs/xfs/xfs_vnodeops.c
··· 88 88 bhv_vnode_t *vp = XFS_ITOV(ip); 89 89 xfs_mount_t *mp = ip->i_mount; 90 90 91 - vn_trace_entry(ip, __FUNCTION__, (inst_t *)__return_address); 91 + xfs_itrace_entry(ip); 92 92 93 93 if (XFS_FORCED_SHUTDOWN(mp)) 94 94 return XFS_ERROR(EIO); ··· 136 136 default: 137 137 vap->va_rdev = 0; 138 138 139 - if (!(ip->i_d.di_flags & XFS_DIFLAG_REALTIME)) { 139 + if (!(XFS_IS_REALTIME_INODE(ip))) { 140 140 vap->va_blocksize = xfs_preferred_iosize(mp); 141 141 } else { 142 142 ··· 228 228 int file_owner; 229 229 int need_iolock = 1; 230 230 231 - vn_trace_entry(ip, __FUNCTION__, (inst_t *)__return_address); 231 + xfs_itrace_entry(ip); 232 232 233 233 if (mp->m_flags & XFS_MOUNT_RDONLY) 234 234 return XFS_ERROR(EROFS); ··· 508 508 */ 509 509 if ((ip->i_d.di_nextents || ip->i_delayed_blks) && 510 510 (mask & XFS_AT_XFLAGS) && 511 - (ip->i_d.di_flags & XFS_DIFLAG_REALTIME) != 511 + (XFS_IS_REALTIME_INODE(ip)) != 512 512 (vap->va_xflags & XFS_XFLAG_REALTIME)) { 513 513 code = XFS_ERROR(EINVAL); /* EFBIG? */ 514 514 goto error_return; ··· 520 520 if ((mask & XFS_AT_EXTSIZE) && vap->va_extsize != 0) { 521 521 xfs_extlen_t size; 522 522 523 - if ((ip->i_d.di_flags & XFS_DIFLAG_REALTIME) || 523 + if (XFS_IS_REALTIME_INODE(ip) || 524 524 ((mask & XFS_AT_XFLAGS) && 525 525 (vap->va_xflags & XFS_XFLAG_REALTIME))) { 526 526 size = mp->m_sb.sb_rextsize << ··· 804 804 if (vap->va_xflags & XFS_XFLAG_EXTSZINHERIT) 805 805 di_flags |= XFS_DIFLAG_EXTSZINHERIT; 806 806 } else if ((ip->i_d.di_mode & S_IFMT) == S_IFREG) { 807 - if (vap->va_xflags & XFS_XFLAG_REALTIME) { 807 + if (vap->va_xflags & XFS_XFLAG_REALTIME) 808 808 di_flags |= XFS_DIFLAG_REALTIME; 809 - ip->i_iocore.io_flags |= XFS_IOCORE_RT; 810 - } else { 811 - ip->i_iocore.io_flags &= ~XFS_IOCORE_RT; 812 - } 813 809 if (vap->va_xflags & XFS_XFLAG_EXTSIZE) 814 810 di_flags |= XFS_DIFLAG_EXTSIZE; 815 811 } ··· 898 902 return code; 899 903 } 900 904 901 - 902 - /* 903 - * xfs_access 904 - * Null conversion from vnode mode bits to inode mode bits, as in efs. 905 - */ 906 - int 907 - xfs_access( 908 - xfs_inode_t *ip, 909 - int mode, 910 - cred_t *credp) 911 - { 912 - int error; 913 - 914 - vn_trace_entry(ip, __FUNCTION__, (inst_t *)__return_address); 915 - 916 - xfs_ilock(ip, XFS_ILOCK_SHARED); 917 - error = xfs_iaccess(ip, mode, credp); 918 - xfs_iunlock(ip, XFS_ILOCK_SHARED); 919 - return error; 920 - } 921 - 922 - 923 905 /* 924 906 * The maximum pathlen is 1024 bytes. Since the minimum file system 925 907 * blocksize is 512 bytes, we can get a max of 2 extents back from ··· 961 987 int pathlen; 962 988 int error = 0; 963 989 964 - vn_trace_entry(ip, __FUNCTION__, (inst_t *)__return_address); 990 + xfs_itrace_entry(ip); 965 991 966 992 if (XFS_FORCED_SHUTDOWN(mp)) 967 993 return XFS_ERROR(EIO); ··· 1007 1033 int error; 1008 1034 int log_flushed = 0, changed = 1; 1009 1035 1010 - vn_trace_entry(ip, __FUNCTION__, (inst_t *)__return_address); 1036 + xfs_itrace_entry(ip); 1011 1037 1012 1038 ASSERT(start >= 0 && stop >= -1); 1013 1039 ··· 1123 1149 * If this inode is on the RT dev we need to flush that 1124 1150 * cache as well. 1125 1151 */ 1126 - if (ip->i_d.di_flags & XFS_DIFLAG_REALTIME) 1152 + if (XFS_IS_REALTIME_INODE(ip)) 1127 1153 xfs_blkdev_issue_flush(ip->i_mount->m_rtdev_targp); 1128 1154 } 1129 1155 ··· 1162 1188 1163 1189 nimaps = 1; 1164 1190 xfs_ilock(ip, XFS_ILOCK_SHARED); 1165 - error = XFS_BMAPI(mp, NULL, &ip->i_iocore, end_fsb, map_len, 0, 1191 + error = xfs_bmapi(NULL, ip, end_fsb, map_len, 0, 1166 1192 NULL, 0, &imap, &nimaps, NULL, NULL); 1167 1193 xfs_iunlock(ip, XFS_ILOCK_SHARED); 1168 1194 ··· 1536 1562 error = xfs_free_eofblocks(mp, ip, XFS_FREE_EOF_LOCK); 1537 1563 if (error) 1538 1564 return error; 1539 - /* Update linux inode block count after free above */ 1540 - vn_to_inode(vp)->i_blocks = XFS_FSB_TO_BB(mp, 1541 - ip->i_d.di_nblocks + ip->i_delayed_blks); 1542 1565 } 1543 1566 } 1544 1567 ··· 1563 1592 int error; 1564 1593 int truncate; 1565 1594 1566 - vn_trace_entry(ip, __FUNCTION__, (inst_t *)__return_address); 1595 + xfs_itrace_entry(ip); 1567 1596 1568 1597 /* 1569 1598 * If the inode is already free, then there can be nothing ··· 1609 1638 error = xfs_free_eofblocks(mp, ip, XFS_FREE_EOF_LOCK); 1610 1639 if (error) 1611 1640 return VN_INACTIVE_CACHE; 1612 - /* Update linux inode block count after free above */ 1613 - vn_to_inode(vp)->i_blocks = XFS_FSB_TO_BB(mp, 1614 - ip->i_d.di_nblocks + ip->i_delayed_blks); 1615 1641 } 1616 1642 goto out; 1617 1643 } ··· 1773 1805 int error; 1774 1806 uint lock_mode; 1775 1807 1776 - vn_trace_entry(dp, __FUNCTION__, (inst_t *)__return_address); 1808 + xfs_itrace_entry(dp); 1777 1809 1778 1810 if (XFS_FORCED_SHUTDOWN(dp->i_mount)) 1779 1811 return XFS_ERROR(EIO); ··· 1782 1814 error = xfs_dir_lookup_int(dp, lock_mode, dentry, &e_inum, &ip); 1783 1815 if (!error) { 1784 1816 *vpp = XFS_ITOV(ip); 1785 - ITRACE(ip); 1817 + xfs_itrace_ref(ip); 1786 1818 } 1787 1819 xfs_iunlock_map_shared(dp, lock_mode); 1788 1820 return error; ··· 1816 1848 int namelen; 1817 1849 1818 1850 ASSERT(!*vpp); 1819 - vn_trace_entry(dp, __FUNCTION__, (inst_t *)__return_address); 1851 + xfs_itrace_entry(dp); 1820 1852 1821 1853 namelen = VNAMELEN(dentry); 1822 1854 ··· 1898 1930 goto error_return; 1899 1931 goto abort_return; 1900 1932 } 1901 - ITRACE(ip); 1933 + xfs_itrace_ref(ip); 1902 1934 1903 1935 /* 1904 1936 * At this point, we've gotten a newly allocated inode. ··· 2066 2098 2067 2099 e_inum = ip->i_ino; 2068 2100 2069 - ITRACE(ip); 2101 + xfs_itrace_ref(ip); 2070 2102 2071 2103 /* 2072 2104 * We want to lock in increasing inum. Since we've already ··· 2289 2321 uint resblks; 2290 2322 int namelen; 2291 2323 2292 - vn_trace_entry(dp, __FUNCTION__, (inst_t *)__return_address); 2324 + xfs_itrace_entry(dp); 2293 2325 2294 2326 if (XFS_FORCED_SHUTDOWN(mp)) 2295 2327 return XFS_ERROR(EIO); ··· 2332 2364 2333 2365 dm_di_mode = ip->i_d.di_mode; 2334 2366 2335 - vn_trace_entry(ip, __FUNCTION__, (inst_t *)__return_address); 2336 - 2337 - ITRACE(ip); 2367 + xfs_itrace_entry(ip); 2368 + xfs_itrace_ref(ip); 2338 2369 2339 2370 error = XFS_QM_DQATTACH(mp, dp, 0); 2340 2371 if (!error && dp != ip) ··· 2465 2498 if (link_zero && xfs_inode_is_filestream(ip)) 2466 2499 xfs_filestream_deassociate(ip); 2467 2500 2468 - vn_trace_exit(ip, __FUNCTION__, (inst_t *)__return_address); 2469 - 2501 + xfs_itrace_exit(ip); 2470 2502 IRELE(ip); 2471 2503 2472 2504 /* Fall through to std_return with error = 0 */ ··· 2528 2562 char *target_name = VNAME(dentry); 2529 2563 int target_namelen; 2530 2564 2531 - vn_trace_entry(tdp, __FUNCTION__, (inst_t *)__return_address); 2532 - vn_trace_entry(xfs_vtoi(src_vp), __FUNCTION__, (inst_t *)__return_address); 2565 + xfs_itrace_entry(tdp); 2566 + xfs_itrace_entry(xfs_vtoi(src_vp)); 2533 2567 2534 2568 target_namelen = VNAMELEN(dentry); 2535 2569 ASSERT(!VN_ISDIR(src_vp)); ··· 2710 2744 2711 2745 /* Return through std_return after this point. */ 2712 2746 2713 - vn_trace_entry(dp, __FUNCTION__, (inst_t *)__return_address); 2747 + xfs_itrace_entry(dp); 2714 2748 2715 2749 mp = dp->i_mount; 2716 2750 udqp = gdqp = NULL; ··· 2776 2810 goto error_return; 2777 2811 goto abort_return; 2778 2812 } 2779 - ITRACE(cdp); 2813 + xfs_itrace_ref(cdp); 2780 2814 2781 2815 /* 2782 2816 * Now we add the directory inode to the transaction. ··· 2902 2936 int last_cdp_link; 2903 2937 uint resblks; 2904 2938 2905 - vn_trace_entry(dp, __FUNCTION__, (inst_t *)__return_address); 2939 + xfs_itrace_entry(dp); 2906 2940 2907 2941 if (XFS_FORCED_SHUTDOWN(mp)) 2908 2942 return XFS_ERROR(EIO); ··· 3007 3041 VN_HOLD(dir_vp); 3008 3042 } 3009 3043 3010 - ITRACE(cdp); 3044 + xfs_itrace_ref(cdp); 3011 3045 xfs_trans_ijoin(tp, cdp, XFS_ILOCK_EXCL); 3012 3046 3013 3047 ASSERT(cdp->i_d.di_nlink >= 2); ··· 3155 3189 ip = NULL; 3156 3190 tp = NULL; 3157 3191 3158 - vn_trace_entry(dp, __FUNCTION__, (inst_t *)__return_address); 3159 - 3192 + xfs_itrace_entry(dp); 3160 3193 3161 3194 if (XFS_FORCED_SHUTDOWN(mp)) 3162 3195 return XFS_ERROR(EIO); ··· 3282 3317 goto error_return; 3283 3318 goto error1; 3284 3319 } 3285 - ITRACE(ip); 3320 + xfs_itrace_ref(ip); 3286 3321 3287 3322 /* 3288 3323 * An error after we've joined dp to the transaction will result in the ··· 3430 3465 goto std_return; 3431 3466 } 3432 3467 3433 - 3434 - int 3435 - xfs_fid2( 3436 - xfs_inode_t *ip, 3437 - xfs_fid_t *xfid) 3438 - { 3439 - vn_trace_entry(ip, __FUNCTION__, (inst_t *)__return_address); 3440 - 3441 - xfid->fid_len = sizeof(xfs_fid_t) - sizeof(xfid->fid_len); 3442 - xfid->fid_pad = 0; 3443 - /* 3444 - * use memcpy because the inode is a long long and there's no 3445 - * assurance that xfid->fid_ino is properly aligned. 3446 - */ 3447 - memcpy(&xfid->fid_ino, &ip->i_ino, sizeof(xfid->fid_ino)); 3448 - xfid->fid_gen = ip->i_d.di_gen; 3449 - 3450 - return 0; 3451 - } 3452 - 3453 - 3454 3468 int 3455 3469 xfs_rwlock( 3456 3470 xfs_inode_t *ip, ··· 3502 3558 if (iip && iip->ili_last_lsn) { 3503 3559 xlog_t *log = mp->m_log; 3504 3560 xfs_lsn_t sync_lsn; 3505 - int s, log_flags = XFS_LOG_FORCE; 3561 + int log_flags = XFS_LOG_FORCE; 3506 3562 3507 - s = GRANT_LOCK(log); 3563 + spin_lock(&log->l_grant_lock); 3508 3564 sync_lsn = log->l_last_sync_lsn; 3509 - GRANT_UNLOCK(log, s); 3565 + spin_unlock(&log->l_grant_lock); 3510 3566 3511 3567 if ((XFS_LSN_CMP(iip->ili_last_lsn, sync_lsn) > 0)) { 3512 3568 if (flags & FLUSH_SYNC) ··· 3581 3637 xfs_ilock(ip, XFS_ILOCK_EXCL); 3582 3638 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); 3583 3639 3584 - ip->i_iocore.io_dmevmask = ip->i_d.di_dmevmask = evmask; 3585 - ip->i_iocore.io_dmstate = ip->i_d.di_dmstate = state; 3640 + ip->i_d.di_dmevmask = evmask; 3641 + ip->i_d.di_dmstate = state; 3586 3642 3587 3643 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 3588 3644 IHOLD(ip); ··· 3597 3653 { 3598 3654 bhv_vnode_t *vp = XFS_ITOV(ip); 3599 3655 3600 - vn_trace_entry(ip, __FUNCTION__, (inst_t *)__return_address); 3656 + xfs_itrace_entry(ip); 3601 3657 3602 3658 ASSERT(!VN_MAPPED(vp)); 3603 3659 ··· 3815 3871 int committed; 3816 3872 int error; 3817 3873 3818 - vn_trace_entry(ip, __FUNCTION__, (inst_t *)__return_address); 3874 + xfs_itrace_entry(ip); 3819 3875 3820 3876 if (XFS_FORCED_SHUTDOWN(mp)) 3821 3877 return XFS_ERROR(EIO); ··· 3920 3976 * Issue the xfs_bmapi() call to allocate the blocks 3921 3977 */ 3922 3978 XFS_BMAP_INIT(&free_list, &firstfsb); 3923 - error = XFS_BMAPI(mp, tp, &ip->i_iocore, startoffset_fsb, 3979 + error = xfs_bmapi(tp, ip, startoffset_fsb, 3924 3980 allocatesize_fsb, bmapi_flag, 3925 3981 &firstfsb, 0, imapp, &nimaps, 3926 3982 &free_list, NULL); ··· 3996 4052 int error = 0; 3997 4053 3998 4054 bp = xfs_buf_get_noaddr(mp->m_sb.sb_blocksize, 3999 - ip->i_d.di_flags & XFS_DIFLAG_REALTIME ? 4055 + XFS_IS_REALTIME_INODE(ip) ? 4000 4056 mp->m_rtdev_targp : mp->m_ddev_targp); 4001 4057 4002 4058 for (offset = startoff; offset <= endoff; offset = lastoffset + 1) { 4003 4059 offset_fsb = XFS_B_TO_FSBT(mp, offset); 4004 4060 nimap = 1; 4005 - error = XFS_BMAPI(mp, NULL, &ip->i_iocore, offset_fsb, 1, 0, 4061 + error = xfs_bmapi(NULL, ip, offset_fsb, 1, 0, 4006 4062 NULL, 0, &imap, &nimap, NULL, NULL); 4007 4063 if (error || nimap < 1) 4008 4064 break; ··· 4085 4141 vp = XFS_ITOV(ip); 4086 4142 mp = ip->i_mount; 4087 4143 4088 - vn_trace_entry(ip, __FUNCTION__, (inst_t *)__return_address); 4144 + xfs_itrace_entry(ip); 4089 4145 4090 4146 if ((error = XFS_QM_DQATTACH(mp, ip, 0))) 4091 4147 return error; ··· 4093 4149 error = 0; 4094 4150 if (len <= 0) /* if nothing being freed */ 4095 4151 return error; 4096 - rt = (ip->i_d.di_flags & XFS_DIFLAG_REALTIME); 4152 + rt = XFS_IS_REALTIME_INODE(ip); 4097 4153 startoffset_fsb = XFS_B_TO_FSB(mp, offset); 4098 4154 end_dmi_offset = offset + len; 4099 4155 endoffset_fsb = XFS_B_TO_FSBT(mp, end_dmi_offset); ··· 4116 4172 vn_iowait(ip); /* wait for the completion of any pending DIOs */ 4117 4173 } 4118 4174 4119 - rounding = max_t(uint, 1 << mp->m_sb.sb_blocklog, NBPP); 4175 + rounding = max_t(uint, 1 << mp->m_sb.sb_blocklog, PAGE_CACHE_SIZE); 4120 4176 ioffset = offset & ~(rounding - 1); 4121 4177 4122 4178 if (VN_CACHED(vp) != 0) { 4123 - xfs_inval_cached_trace(&ip->i_iocore, ioffset, -1, 4124 - ctooff(offtoct(ioffset)), -1); 4125 - error = xfs_flushinval_pages(ip, 4126 - ctooff(offtoct(ioffset)), 4127 - -1, FI_REMAPF_LOCKED); 4179 + xfs_inval_cached_trace(ip, ioffset, -1, ioffset, -1); 4180 + error = xfs_flushinval_pages(ip, ioffset, -1, FI_REMAPF_LOCKED); 4128 4181 if (error) 4129 4182 goto out_unlock_iolock; 4130 4183 } ··· 4134 4193 */ 4135 4194 if (rt && !XFS_SB_VERSION_HASEXTFLGBIT(&mp->m_sb)) { 4136 4195 nimap = 1; 4137 - error = XFS_BMAPI(mp, NULL, &ip->i_iocore, startoffset_fsb, 4196 + error = xfs_bmapi(NULL, ip, startoffset_fsb, 4138 4197 1, 0, NULL, 0, &imap, &nimap, NULL, NULL); 4139 4198 if (error) 4140 4199 goto out_unlock_iolock; ··· 4149 4208 startoffset_fsb += mp->m_sb.sb_rextsize - mod; 4150 4209 } 4151 4210 nimap = 1; 4152 - error = XFS_BMAPI(mp, NULL, &ip->i_iocore, endoffset_fsb - 1, 4211 + error = xfs_bmapi(NULL, ip, endoffset_fsb - 1, 4153 4212 1, 0, NULL, 0, &imap, &nimap, NULL, NULL); 4154 4213 if (error) 4155 4214 goto out_unlock_iolock; ··· 4225 4284 * issue the bunmapi() call to free the blocks 4226 4285 */ 4227 4286 XFS_BMAP_INIT(&free_list, &firstfsb); 4228 - error = XFS_BUNMAPI(mp, tp, &ip->i_iocore, startoffset_fsb, 4287 + error = xfs_bunmapi(tp, ip, startoffset_fsb, 4229 4288 endoffset_fsb - startoffset_fsb, 4230 4289 0, 2, &firstfsb, &free_list, NULL, &done); 4231 4290 if (error) { ··· 4288 4347 xfs_trans_t *tp; 4289 4348 bhv_vattr_t va; 4290 4349 4291 - vn_trace_entry(ip, __FUNCTION__, (inst_t *)__return_address); 4350 + xfs_itrace_entry(ip); 4292 4351 4293 - /* 4294 - * must be a regular file and have write permission 4295 - */ 4296 4352 if (!S_ISREG(ip->i_d.di_mode)) 4297 4353 return XFS_ERROR(EINVAL); 4298 - 4299 - xfs_ilock(ip, XFS_ILOCK_SHARED); 4300 - 4301 - if ((error = xfs_iaccess(ip, S_IWUSR, credp))) { 4302 - xfs_iunlock(ip, XFS_ILOCK_SHARED); 4303 - return error; 4304 - } 4305 - 4306 - xfs_iunlock(ip, XFS_ILOCK_SHARED); 4307 4354 4308 4355 switch (bf->l_whence) { 4309 4356 case 0: /*SEEK_SET*/
-2
fs/xfs/xfs_vnodeops.h
··· 18 18 int xfs_getattr(struct xfs_inode *ip, struct bhv_vattr *vap, int flags); 19 19 int xfs_setattr(struct xfs_inode *ip, struct bhv_vattr *vap, int flags, 20 20 struct cred *credp); 21 - int xfs_access(struct xfs_inode *ip, int mode, struct cred *credp); 22 21 int xfs_readlink(struct xfs_inode *ip, char *link); 23 22 int xfs_fsync(struct xfs_inode *ip, int flag, xfs_off_t start, 24 23 xfs_off_t stop); ··· 38 39 int xfs_symlink(struct xfs_inode *dp, bhv_vname_t *dentry, 39 40 char *target_path, mode_t mode, bhv_vnode_t **vpp, 40 41 struct cred *credp); 41 - int xfs_fid2(struct xfs_inode *ip, struct xfs_fid *xfid); 42 42 int xfs_rwlock(struct xfs_inode *ip, bhv_vrwlock_t locktype); 43 43 void xfs_rwunlock(struct xfs_inode *ip, bhv_vrwlock_t locktype); 44 44 int xfs_inode_flush(struct xfs_inode *ip, int flags);