Merge branch 'for-linus' of git://oss.sgi.com/xfs/xfs

* 'for-linus' of git://oss.sgi.com/xfs/xfs:
xfs: only issues a cache flush on unmount if barriers are enabled
xfs: prevent lockdep false positive in xfs_iget_cache_miss
xfs: prevent kernel crash due to corrupted inode log format

+39 -17
+10 -2
fs/xfs/linux-2.6/xfs_buf.c
··· 34 34 #include <linux/backing-dev.h> 35 35 #include <linux/freezer.h> 36 36 37 + #include "xfs_sb.h" 38 + #include "xfs_inum.h" 39 + #include "xfs_ag.h" 40 + #include "xfs_dmapi.h" 41 + #include "xfs_mount.h" 42 + 37 43 static kmem_zone_t *xfs_buf_zone; 38 44 STATIC int xfsbufd(void *); 39 45 STATIC int xfsbufd_wakeup(int, gfp_t); ··· 1441 1435 1442 1436 void 1443 1437 xfs_free_buftarg( 1444 - xfs_buftarg_t *btp) 1438 + struct xfs_mount *mp, 1439 + struct xfs_buftarg *btp) 1445 1440 { 1446 1441 xfs_flush_buftarg(btp, 1); 1447 - xfs_blkdev_issue_flush(btp); 1442 + if (mp->m_flags & XFS_MOUNT_BARRIER) 1443 + xfs_blkdev_issue_flush(btp); 1448 1444 xfs_free_bufhash(btp); 1449 1445 iput(btp->bt_mapping->host); 1450 1446
+1 -1
fs/xfs/linux-2.6/xfs_buf.h
··· 413 413 * Handling of buftargs. 414 414 */ 415 415 extern xfs_buftarg_t *xfs_alloc_buftarg(struct block_device *, int); 416 - extern void xfs_free_buftarg(xfs_buftarg_t *); 416 + extern void xfs_free_buftarg(struct xfs_mount *, struct xfs_buftarg *); 417 417 extern void xfs_wait_buftarg(xfs_buftarg_t *); 418 418 extern int xfs_setsize_buftarg(xfs_buftarg_t *, unsigned int, unsigned int); 419 419 extern int xfs_flush_buftarg(xfs_buftarg_t *, int);
+5 -5
fs/xfs/linux-2.6/xfs_super.c
··· 734 734 { 735 735 if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) { 736 736 struct block_device *logdev = mp->m_logdev_targp->bt_bdev; 737 - xfs_free_buftarg(mp->m_logdev_targp); 737 + xfs_free_buftarg(mp, mp->m_logdev_targp); 738 738 xfs_blkdev_put(logdev); 739 739 } 740 740 if (mp->m_rtdev_targp) { 741 741 struct block_device *rtdev = mp->m_rtdev_targp->bt_bdev; 742 - xfs_free_buftarg(mp->m_rtdev_targp); 742 + xfs_free_buftarg(mp, mp->m_rtdev_targp); 743 743 xfs_blkdev_put(rtdev); 744 744 } 745 - xfs_free_buftarg(mp->m_ddev_targp); 745 + xfs_free_buftarg(mp, mp->m_ddev_targp); 746 746 } 747 747 748 748 /* ··· 811 811 812 812 out_free_rtdev_targ: 813 813 if (mp->m_rtdev_targp) 814 - xfs_free_buftarg(mp->m_rtdev_targp); 814 + xfs_free_buftarg(mp, mp->m_rtdev_targp); 815 815 out_free_ddev_targ: 816 - xfs_free_buftarg(mp->m_ddev_targp); 816 + xfs_free_buftarg(mp, mp->m_ddev_targp); 817 817 out_close_rtdev: 818 818 if (rtdev) 819 819 xfs_blkdev_put(rtdev);
+10 -5
fs/xfs/xfs_iget.c
··· 246 246 goto out_destroy; 247 247 } 248 248 249 - if (lock_flags) 250 - xfs_ilock(ip, lock_flags); 251 - 252 249 /* 253 250 * Preload the radix tree so we can insert safely under the 254 251 * write spinlock. Note that we cannot sleep inside the preload ··· 253 256 */ 254 257 if (radix_tree_preload(GFP_KERNEL)) { 255 258 error = EAGAIN; 256 - goto out_unlock; 259 + goto out_destroy; 260 + } 261 + 262 + /* 263 + * Because the inode hasn't been added to the radix-tree yet it can't 264 + * be found by another thread, so we can do the non-sleeping lock here. 265 + */ 266 + if (lock_flags) { 267 + if (!xfs_ilock_nowait(ip, lock_flags)) 268 + BUG(); 257 269 } 258 270 259 271 mask = ~(((XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog)) - 1); ··· 290 284 out_preload_end: 291 285 write_unlock(&pag->pag_ici_lock); 292 286 radix_tree_preload_end(); 293 - out_unlock: 294 287 if (lock_flags) 295 288 xfs_iunlock(ip, lock_flags); 296 289 out_destroy:
+13 -4
fs/xfs/xfs_log_recover.c
··· 1455 1455 item = item->ri_prev; 1456 1456 1457 1457 if (item->ri_total == 0) { /* first region to be added */ 1458 - item->ri_total = in_f->ilf_size; 1459 - ASSERT(item->ri_total <= XLOG_MAX_REGIONS_IN_ITEM); 1460 - item->ri_buf = kmem_zalloc((item->ri_total * 1461 - sizeof(xfs_log_iovec_t)), KM_SLEEP); 1458 + if (in_f->ilf_size == 0 || 1459 + in_f->ilf_size > XLOG_MAX_REGIONS_IN_ITEM) { 1460 + xlog_warn( 1461 + "XFS: bad number of regions (%d) in inode log format", 1462 + in_f->ilf_size); 1463 + ASSERT(0); 1464 + return XFS_ERROR(EIO); 1465 + } 1466 + 1467 + item->ri_total = in_f->ilf_size; 1468 + item->ri_buf = 1469 + kmem_zalloc(item->ri_total * sizeof(xfs_log_iovec_t), 1470 + KM_SLEEP); 1462 1471 } 1463 1472 ASSERT(item->ri_total > item->ri_cnt); 1464 1473 /* Description region is ri_buf[0] */