Merge branch 'for-linus' of git://oss.sgi.com/xfs/xfs

* 'for-linus' of git://oss.sgi.com/xfs/xfs:
XFS: Free buffer pages array unconditionally
xfs: kill xfs_bmbt_rec_32/64 types
xfs: improve metadata I/O merging in the elevator
xfs: check for not fully initialized inodes in xfs_ireclaim

+25 -21
+7 -3
fs/xfs/linux-2.6/xfs_buf.c
··· 292 292 { 293 293 if (bp->b_pages != bp->b_page_array) { 294 294 kmem_free(bp->b_pages); 295 + bp->b_pages = NULL; 295 296 } 296 297 } 297 298 ··· 324 323 ASSERT(!PagePrivate(page)); 325 324 page_cache_release(page); 326 325 } 327 - _xfs_buf_free_pages(bp); 328 326 } 329 - 327 + _xfs_buf_free_pages(bp); 330 328 xfs_buf_deallocate(bp); 331 329 } 332 330 ··· 1149 1149 if (bp->b_flags & XBF_ORDERED) { 1150 1150 ASSERT(!(bp->b_flags & XBF_READ)); 1151 1151 rw = WRITE_BARRIER; 1152 - } else if (bp->b_flags & _XBF_RUN_QUEUES) { 1152 + } else if (bp->b_flags & XBF_LOG_BUFFER) { 1153 1153 ASSERT(!(bp->b_flags & XBF_READ_AHEAD)); 1154 1154 bp->b_flags &= ~_XBF_RUN_QUEUES; 1155 1155 rw = (bp->b_flags & XBF_WRITE) ? WRITE_SYNC : READ_SYNC; 1156 + } else if (bp->b_flags & _XBF_RUN_QUEUES) { 1157 + ASSERT(!(bp->b_flags & XBF_READ_AHEAD)); 1158 + bp->b_flags &= ~_XBF_RUN_QUEUES; 1159 + rw = (bp->b_flags & XBF_WRITE) ? WRITE_META : READ_META; 1156 1160 } else { 1157 1161 rw = (bp->b_flags & XBF_WRITE) ? WRITE : 1158 1162 (bp->b_flags & XBF_READ_AHEAD) ? READA : READ;
+1
fs/xfs/linux-2.6/xfs_buf.h
··· 55 55 XBF_FS_MANAGED = (1 << 8), /* filesystem controls freeing memory */ 56 56 XBF_ORDERED = (1 << 11), /* use ordered writes */ 57 57 XBF_READ_AHEAD = (1 << 12), /* asynchronous read-ahead */ 58 + XBF_LOG_BUFFER = (1 << 13), /* this is a buffer used for the log */ 58 59 59 60 /* flags used only as arguments to access routines */ 60 61 XBF_LOCK = (1 << 14), /* lock requested */
+3 -11
fs/xfs/xfs_bmap_btree.h
··· 46 46 #define BMBT_STARTBLOCK_BITLEN 52 47 47 #define BMBT_BLOCKCOUNT_BITLEN 21 48 48 49 - 50 - #define BMBT_USE_64 1 51 - 52 - typedef struct xfs_bmbt_rec_32 53 - { 54 - __uint32_t l0, l1, l2, l3; 55 - } xfs_bmbt_rec_32_t; 56 - typedef struct xfs_bmbt_rec_64 57 - { 49 + typedef struct xfs_bmbt_rec { 58 50 __be64 l0, l1; 59 - } xfs_bmbt_rec_64_t; 51 + } xfs_bmbt_rec_t; 60 52 61 53 typedef __uint64_t xfs_bmbt_rec_base_t; /* use this for casts */ 62 - typedef xfs_bmbt_rec_64_t xfs_bmbt_rec_t, xfs_bmdr_rec_t; 54 + typedef xfs_bmbt_rec_t xfs_bmdr_rec_t; 63 55 64 56 typedef struct xfs_bmbt_rec_host { 65 57 __uint64_t l0, l1;
+8 -4
fs/xfs/xfs_iget.c
··· 478 478 { 479 479 struct xfs_mount *mp = ip->i_mount; 480 480 struct xfs_perag *pag; 481 + xfs_agino_t agino = XFS_INO_TO_AGINO(mp, ip->i_ino); 481 482 482 483 XFS_STATS_INC(xs_ig_reclaims); 483 484 484 485 /* 485 - * Remove the inode from the per-AG radix tree. It doesn't matter 486 - * if it was never added to it because radix_tree_delete can deal 487 - * with that case just fine. 486 + * Remove the inode from the per-AG radix tree. 487 + * 488 + * Because radix_tree_delete won't complain even if the item was never 489 + * added to the tree assert that it's been there before to catch 490 + * problems with the inode life time early on. 488 491 */ 489 492 pag = xfs_get_perag(mp, ip->i_ino); 490 493 write_lock(&pag->pag_ici_lock); 491 - radix_tree_delete(&pag->pag_ici_root, XFS_INO_TO_AGINO(mp, ip->i_ino)); 494 + if (!radix_tree_delete(&pag->pag_ici_root, agino)) 495 + ASSERT(0); 492 496 write_unlock(&pag->pag_ici_lock); 493 497 xfs_put_perag(mp, pag); 494 498
+3 -3
fs/xfs/xfs_inode_item.h
··· 127 127 #ifdef __KERNEL__ 128 128 129 129 struct xfs_buf; 130 - struct xfs_bmbt_rec_64; 130 + struct xfs_bmbt_rec; 131 131 struct xfs_inode; 132 132 struct xfs_mount; 133 133 ··· 140 140 unsigned short ili_flags; /* misc flags */ 141 141 unsigned short ili_logged; /* flushed logged data */ 142 142 unsigned int ili_last_fields; /* fields when flushed */ 143 - struct xfs_bmbt_rec_64 *ili_extents_buf; /* array of logged 143 + struct xfs_bmbt_rec *ili_extents_buf; /* array of logged 144 144 data exts */ 145 - struct xfs_bmbt_rec_64 *ili_aextents_buf; /* array of logged 145 + struct xfs_bmbt_rec *ili_aextents_buf; /* array of logged 146 146 attr exts */ 147 147 unsigned int ili_pushbuf_flag; /* one bit used in push_ail */ 148 148
+2
fs/xfs/xfs_log.c
··· 1441 1441 XFS_BUF_ZEROFLAGS(bp); 1442 1442 XFS_BUF_BUSY(bp); 1443 1443 XFS_BUF_ASYNC(bp); 1444 + bp->b_flags |= XBF_LOG_BUFFER; 1444 1445 /* 1445 1446 * Do an ordered write for the log block. 1446 1447 * Its unnecessary to flush the first split block in the log wrap case. ··· 1479 1478 XFS_BUF_ZEROFLAGS(bp); 1480 1479 XFS_BUF_BUSY(bp); 1481 1480 XFS_BUF_ASYNC(bp); 1481 + bp->b_flags |= XBF_LOG_BUFFER; 1482 1482 if (log->l_mp->m_flags & XFS_MOUNT_BARRIER) 1483 1483 XFS_BUF_ORDERED(bp); 1484 1484 dptr = XFS_BUF_PTR(bp);
+1
include/linux/fs.h
··· 152 152 #define WRITE_SYNC_PLUG (WRITE | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_NOIDLE)) 153 153 #define WRITE_SYNC (WRITE_SYNC_PLUG | (1 << BIO_RW_UNPLUG)) 154 154 #define WRITE_ODIRECT_PLUG (WRITE | (1 << BIO_RW_SYNCIO)) 155 + #define WRITE_META (WRITE | (1 << BIO_RW_META)) 155 156 #define SWRITE_SYNC_PLUG \ 156 157 (SWRITE | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_NOIDLE)) 157 158 #define SWRITE_SYNC (SWRITE_SYNC_PLUG | (1 << BIO_RW_UNPLUG))