Merge branch 'for-linus' of git://oss.sgi.com/xfs/xfs

* 'for-linus' of git://oss.sgi.com/xfs/xfs:
xfs: only issues a cache flush on unmount if barriers are enabled
xfs: prevent lockdep false positive in xfs_iget_cache_miss
xfs: prevent kernel crash due to corrupted inode log format

+39 -17
+10 -2
fs/xfs/linux-2.6/xfs_buf.c
··· 34 #include <linux/backing-dev.h> 35 #include <linux/freezer.h> 36 37 static kmem_zone_t *xfs_buf_zone; 38 STATIC int xfsbufd(void *); 39 STATIC int xfsbufd_wakeup(int, gfp_t); ··· 1441 1442 void 1443 xfs_free_buftarg( 1444 - xfs_buftarg_t *btp) 1445 { 1446 xfs_flush_buftarg(btp, 1); 1447 - xfs_blkdev_issue_flush(btp); 1448 xfs_free_bufhash(btp); 1449 iput(btp->bt_mapping->host); 1450
··· 34 #include <linux/backing-dev.h> 35 #include <linux/freezer.h> 36 37 + #include "xfs_sb.h" 38 + #include "xfs_inum.h" 39 + #include "xfs_ag.h" 40 + #include "xfs_dmapi.h" 41 + #include "xfs_mount.h" 42 + 43 static kmem_zone_t *xfs_buf_zone; 44 STATIC int xfsbufd(void *); 45 STATIC int xfsbufd_wakeup(int, gfp_t); ··· 1435 1436 void 1437 xfs_free_buftarg( 1438 + struct xfs_mount *mp, 1439 + struct xfs_buftarg *btp) 1440 { 1441 xfs_flush_buftarg(btp, 1); 1442 + if (mp->m_flags & XFS_MOUNT_BARRIER) 1443 + xfs_blkdev_issue_flush(btp); 1444 xfs_free_bufhash(btp); 1445 iput(btp->bt_mapping->host); 1446
+1 -1
fs/xfs/linux-2.6/xfs_buf.h
··· 413 * Handling of buftargs. 414 */ 415 extern xfs_buftarg_t *xfs_alloc_buftarg(struct block_device *, int); 416 - extern void xfs_free_buftarg(xfs_buftarg_t *); 417 extern void xfs_wait_buftarg(xfs_buftarg_t *); 418 extern int xfs_setsize_buftarg(xfs_buftarg_t *, unsigned int, unsigned int); 419 extern int xfs_flush_buftarg(xfs_buftarg_t *, int);
··· 413 * Handling of buftargs. 414 */ 415 extern xfs_buftarg_t *xfs_alloc_buftarg(struct block_device *, int); 416 + extern void xfs_free_buftarg(struct xfs_mount *, struct xfs_buftarg *); 417 extern void xfs_wait_buftarg(xfs_buftarg_t *); 418 extern int xfs_setsize_buftarg(xfs_buftarg_t *, unsigned int, unsigned int); 419 extern int xfs_flush_buftarg(xfs_buftarg_t *, int);
+5 -5
fs/xfs/linux-2.6/xfs_super.c
··· 734 { 735 if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) { 736 struct block_device *logdev = mp->m_logdev_targp->bt_bdev; 737 - xfs_free_buftarg(mp->m_logdev_targp); 738 xfs_blkdev_put(logdev); 739 } 740 if (mp->m_rtdev_targp) { 741 struct block_device *rtdev = mp->m_rtdev_targp->bt_bdev; 742 - xfs_free_buftarg(mp->m_rtdev_targp); 743 xfs_blkdev_put(rtdev); 744 } 745 - xfs_free_buftarg(mp->m_ddev_targp); 746 } 747 748 /* ··· 811 812 out_free_rtdev_targ: 813 if (mp->m_rtdev_targp) 814 - xfs_free_buftarg(mp->m_rtdev_targp); 815 out_free_ddev_targ: 816 - xfs_free_buftarg(mp->m_ddev_targp); 817 out_close_rtdev: 818 if (rtdev) 819 xfs_blkdev_put(rtdev);
··· 734 { 735 if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) { 736 struct block_device *logdev = mp->m_logdev_targp->bt_bdev; 737 + xfs_free_buftarg(mp, mp->m_logdev_targp); 738 xfs_blkdev_put(logdev); 739 } 740 if (mp->m_rtdev_targp) { 741 struct block_device *rtdev = mp->m_rtdev_targp->bt_bdev; 742 + xfs_free_buftarg(mp, mp->m_rtdev_targp); 743 xfs_blkdev_put(rtdev); 744 } 745 + xfs_free_buftarg(mp, mp->m_ddev_targp); 746 } 747 748 /* ··· 811 812 out_free_rtdev_targ: 813 if (mp->m_rtdev_targp) 814 + xfs_free_buftarg(mp, mp->m_rtdev_targp); 815 out_free_ddev_targ: 816 + xfs_free_buftarg(mp, mp->m_ddev_targp); 817 out_close_rtdev: 818 if (rtdev) 819 xfs_blkdev_put(rtdev);
+10 -5
fs/xfs/xfs_iget.c
··· 246 goto out_destroy; 247 } 248 249 - if (lock_flags) 250 - xfs_ilock(ip, lock_flags); 251 - 252 /* 253 * Preload the radix tree so we can insert safely under the 254 * write spinlock. Note that we cannot sleep inside the preload ··· 253 */ 254 if (radix_tree_preload(GFP_KERNEL)) { 255 error = EAGAIN; 256 - goto out_unlock; 257 } 258 259 mask = ~(((XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog)) - 1); ··· 290 out_preload_end: 291 write_unlock(&pag->pag_ici_lock); 292 radix_tree_preload_end(); 293 - out_unlock: 294 if (lock_flags) 295 xfs_iunlock(ip, lock_flags); 296 out_destroy:
··· 246 goto out_destroy; 247 } 248 249 /* 250 * Preload the radix tree so we can insert safely under the 251 * write spinlock. Note that we cannot sleep inside the preload ··· 256 */ 257 if (radix_tree_preload(GFP_KERNEL)) { 258 error = EAGAIN; 259 + goto out_destroy; 260 + } 261 + 262 + /* 263 + * Because the inode hasn't been added to the radix-tree yet it can't 264 + * be found by another thread, so we can do the non-sleeping lock here. 265 + */ 266 + if (lock_flags) { 267 + if (!xfs_ilock_nowait(ip, lock_flags)) 268 + BUG(); 269 } 270 271 mask = ~(((XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog)) - 1); ··· 284 out_preload_end: 285 write_unlock(&pag->pag_ici_lock); 286 radix_tree_preload_end(); 287 if (lock_flags) 288 xfs_iunlock(ip, lock_flags); 289 out_destroy:
+13 -4
fs/xfs/xfs_log_recover.c
··· 1455 item = item->ri_prev; 1456 1457 if (item->ri_total == 0) { /* first region to be added */ 1458 - item->ri_total = in_f->ilf_size; 1459 - ASSERT(item->ri_total <= XLOG_MAX_REGIONS_IN_ITEM); 1460 - item->ri_buf = kmem_zalloc((item->ri_total * 1461 - sizeof(xfs_log_iovec_t)), KM_SLEEP); 1462 } 1463 ASSERT(item->ri_total > item->ri_cnt); 1464 /* Description region is ri_buf[0] */
··· 1455 item = item->ri_prev; 1456 1457 if (item->ri_total == 0) { /* first region to be added */ 1458 + if (in_f->ilf_size == 0 || 1459 + in_f->ilf_size > XLOG_MAX_REGIONS_IN_ITEM) { 1460 + xlog_warn( 1461 + "XFS: bad number of regions (%d) in inode log format", 1462 + in_f->ilf_size); 1463 + ASSERT(0); 1464 + return XFS_ERROR(EIO); 1465 + } 1466 + 1467 + item->ri_total = in_f->ilf_size; 1468 + item->ri_buf = 1469 + kmem_zalloc(item->ri_total * sizeof(xfs_log_iovec_t), 1470 + KM_SLEEP); 1471 } 1472 ASSERT(item->ri_total > item->ri_cnt); 1473 /* Description region is ri_buf[0] */