Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'xfs-for-linus-3.17-rc1' of git://oss.sgi.com/xfs/xfs

Pull xfs update from Dave Chinner:
"This update contains:
- conversion of the XFS core to pass negative error numbers
- restructing of core XFS code that is shared with userspace to
fs/xfs/libxfs
- introduction of sysfs interface for XFS
- bulkstat refactoring
- demand driven speculative preallocation removal
- XFS now always requires 64 bit sectors to be configured
- metadata verifier changes to ensure CRCs are calculated during log
recovery
- various minor code cleanups
- miscellaneous bug fixes

The diffstat is kind of noisy because of the restructuring of the code
to make kernel/userspace code sharing simpler, along with the XFS wide
change to use the standard negative error return convention (at last!)"

* tag 'xfs-for-linus-3.17-rc1' of git://oss.sgi.com/xfs/xfs: (45 commits)
xfs: fix coccinelle warnings
xfs: flush both inodes in xfs_swap_extents
xfs: fix swapext ilock deadlock
xfs: kill xfs_vnode.h
xfs: kill VN_MAPPED
xfs: kill VN_CACHED
xfs: kill VN_DIRTY()
xfs: dquot recovery needs verifiers
xfs: quotacheck leaves dquot buffers without verifiers
xfs: ensure verifiers are attached to recovered buffers
xfs: catch buffers written without verifiers attached
xfs: avoid false quotacheck after unclean shutdown
xfs: fix rounding error of fiemap length parameter
xfs: introduce xfs_bulkstat_ag_ichunk
xfs: require 64-bit sector_t
xfs: fix uflags detection at xfs_fs_rm_xquota
xfs: remove XFS_IS_OQUOTA_ON macros
xfs: tidy up xfs_set_inode32
xfs: allow inode allocations in post-growfs disk space
xfs: mark xfs_qm_quotacheck as static
...

+2249 -2041
+39
Documentation/ABI/testing/sysfs-fs-xfs
··· 1 + What: /sys/fs/xfs/<disk>/log/log_head_lsn 2 + Date: July 2014 3 + KernelVersion: 3.17 4 + Contact: xfs@oss.sgi.com 5 + Description: 6 + The log sequence number (LSN) of the current head of the 7 + log. The LSN is exported in "cycle:basic block" format. 8 + Users: xfstests 9 + 10 + What: /sys/fs/xfs/<disk>/log/log_tail_lsn 11 + Date: July 2014 12 + KernelVersion: 3.17 13 + Contact: xfs@oss.sgi.com 14 + Description: 15 + The log sequence number (LSN) of the current tail of the 16 + log. The LSN is exported in "cycle:basic block" format. 17 + 18 + What: /sys/fs/xfs/<disk>/log/reserve_grant_head 19 + Date: July 2014 20 + KernelVersion: 3.17 21 + Contact: xfs@oss.sgi.com 22 + Description: 23 + The current state of the log reserve grant head. It 24 + represents the total log reservation of all currently 25 + outstanding transactions. The grant head is exported in 26 + "cycle:bytes" format. 27 + Users: xfstests 28 + 29 + What: /sys/fs/xfs/<disk>/log/write_grant_head 30 + Date: July 2014 31 + KernelVersion: 3.17 32 + Contact: xfs@oss.sgi.com 33 + Description: 34 + The current state of the log write grant head. It 35 + represents the total log reservation of all currently 36 + oustanding transactions, including regrants due to 37 + rolling transactions. The grant head is exported in 38 + "cycle:bytes" format. 39 + Users: xfstests
+1
fs/xfs/Kconfig
··· 1 1 config XFS_FS 2 2 tristate "XFS filesystem support" 3 3 depends on BLOCK 4 + depends on (64BIT || LBDAF) 4 5 select EXPORTFS 5 6 select LIBCRC32C 6 7 help
+39 -32
fs/xfs/Makefile
··· 17 17 # 18 18 19 19 ccflags-y += -I$(src) # needed for trace events 20 + ccflags-y += -I$(src)/libxfs 20 21 21 22 ccflags-$(CONFIG_XFS_DEBUG) += -g 22 23 ··· 25 24 26 25 # this one should be compiled first, as the tracing macros can easily blow up 27 26 xfs-y += xfs_trace.o 27 + 28 + # build the libxfs code first 29 + xfs-y += $(addprefix libxfs/, \ 30 + xfs_alloc.o \ 31 + xfs_alloc_btree.o \ 32 + xfs_attr.o \ 33 + xfs_attr_leaf.o \ 34 + xfs_attr_remote.o \ 35 + xfs_bmap.o \ 36 + xfs_bmap_btree.o \ 37 + xfs_btree.o \ 38 + xfs_da_btree.o \ 39 + xfs_da_format.o \ 40 + xfs_dir2.o \ 41 + xfs_dir2_block.o \ 42 + xfs_dir2_data.o \ 43 + xfs_dir2_leaf.o \ 44 + xfs_dir2_node.o \ 45 + xfs_dir2_sf.o \ 46 + xfs_dquot_buf.o \ 47 + xfs_ialloc.o \ 48 + xfs_ialloc_btree.o \ 49 + xfs_inode_fork.o \ 50 + xfs_inode_buf.o \ 51 + xfs_log_rlimit.o \ 52 + xfs_sb.o \ 53 + xfs_symlink_remote.o \ 54 + xfs_trans_resv.o \ 55 + ) 56 + # xfs_rtbitmap is shared with libxfs 57 + xfs-$(CONFIG_XFS_RT) += $(addprefix libxfs/, \ 58 + xfs_rtbitmap.o \ 59 + ) 28 60 29 61 # highlevel code 30 62 xfs-y += xfs_aops.o \ ··· 79 45 xfs_ioctl.o \ 80 46 xfs_iomap.o \ 81 47 xfs_iops.o \ 48 + xfs_inode.o \ 82 49 xfs_itable.o \ 83 50 xfs_message.o \ 84 51 xfs_mount.o \ 85 52 xfs_mru_cache.o \ 86 53 xfs_super.o \ 87 54 xfs_symlink.o \ 55 + xfs_sysfs.o \ 88 56 xfs_trans.o \ 89 57 xfs_xattr.o \ 90 58 kmem.o \ 91 59 uuid.o 92 - 93 - # code shared with libxfs 94 - xfs-y += xfs_alloc.o \ 95 - xfs_alloc_btree.o \ 96 - xfs_attr.o \ 97 - xfs_attr_leaf.o \ 98 - xfs_attr_remote.o \ 99 - xfs_bmap.o \ 100 - xfs_bmap_btree.o \ 101 - xfs_btree.o \ 102 - xfs_da_btree.o \ 103 - xfs_da_format.o \ 104 - xfs_dir2.o \ 105 - xfs_dir2_block.o \ 106 - xfs_dir2_data.o \ 107 - xfs_dir2_leaf.o \ 108 - xfs_dir2_node.o \ 109 - xfs_dir2_sf.o \ 110 - xfs_dquot_buf.o \ 111 - xfs_ialloc.o \ 112 - xfs_ialloc_btree.o \ 113 - xfs_icreate_item.o \ 114 - xfs_inode.o \ 115 - xfs_inode_fork.o \ 116 - xfs_inode_buf.o \ 117 - xfs_log_recover.o \ 118 - xfs_log_rlimit.o \ 119 - xfs_sb.o \ 120 - xfs_symlink_remote.o \ 121 - xfs_trans_resv.o 122 60 123 61 # low-level transaction/log code 124 62 xfs-y += xfs_log.o \ 125 63 xfs_log_cil.o \ 126 64 xfs_buf_item.o \ 127 65 xfs_extfree_item.o \ 66 + xfs_icreate_item.o \ 128 67 xfs_inode_item.o \ 68 + xfs_log_recover.o \ 129 69 xfs_trans_ail.o \ 130 70 xfs_trans_buf.o \ 131 71 xfs_trans_extfree.o \ ··· 115 107 xfs_quotaops.o 116 108 117 109 # xfs_rtbitmap is shared with libxfs 118 - xfs-$(CONFIG_XFS_RT) += xfs_rtalloc.o \ 119 - xfs_rtbitmap.o 110 + xfs-$(CONFIG_XFS_RT) += xfs_rtalloc.o 120 111 121 112 xfs-$(CONFIG_XFS_POSIX_ACL) += xfs_acl.o 122 113 xfs-$(CONFIG_PROC_FS) += xfs_stats.o
+4 -4
fs/xfs/xfs_acl.c
··· 152 152 if (!xfs_acl) 153 153 return ERR_PTR(-ENOMEM); 154 154 155 - error = -xfs_attr_get(ip, ea_name, (unsigned char *)xfs_acl, 155 + error = xfs_attr_get(ip, ea_name, (unsigned char *)xfs_acl, 156 156 &len, ATTR_ROOT); 157 157 if (error) { 158 158 /* ··· 210 210 len -= sizeof(struct xfs_acl_entry) * 211 211 (XFS_ACL_MAX_ENTRIES(ip->i_mount) - acl->a_count); 212 212 213 - error = -xfs_attr_set(ip, ea_name, (unsigned char *)xfs_acl, 213 + error = xfs_attr_set(ip, ea_name, (unsigned char *)xfs_acl, 214 214 len, ATTR_ROOT); 215 215 216 216 kmem_free(xfs_acl); ··· 218 218 /* 219 219 * A NULL ACL argument means we want to remove the ACL. 220 220 */ 221 - error = -xfs_attr_remove(ip, ea_name, ATTR_ROOT); 221 + error = xfs_attr_remove(ip, ea_name, ATTR_ROOT); 222 222 223 223 /* 224 224 * If the attribute didn't exist to start with that's fine. ··· 244 244 iattr.ia_mode = mode; 245 245 iattr.ia_ctime = current_fs_time(inode->i_sb); 246 246 247 - error = -xfs_setattr_nonsize(XFS_I(inode), &iattr, XFS_ATTR_NOACL); 247 + error = xfs_setattr_nonsize(XFS_I(inode), &iattr, XFS_ATTR_NOACL); 248 248 } 249 249 250 250 return error;
fs/xfs/xfs_ag.h fs/xfs/libxfs/xfs_ag.h
+10 -10
fs/xfs/xfs_alloc.c fs/xfs/libxfs/xfs_alloc.c
··· 483 483 return; 484 484 485 485 if (!xfs_buf_verify_cksum(bp, XFS_AGFL_CRC_OFF)) 486 - xfs_buf_ioerror(bp, EFSBADCRC); 486 + xfs_buf_ioerror(bp, -EFSBADCRC); 487 487 else if (!xfs_agfl_verify(bp)) 488 - xfs_buf_ioerror(bp, EFSCORRUPTED); 488 + xfs_buf_ioerror(bp, -EFSCORRUPTED); 489 489 490 490 if (bp->b_error) 491 491 xfs_verifier_error(bp); ··· 503 503 return; 504 504 505 505 if (!xfs_agfl_verify(bp)) { 506 - xfs_buf_ioerror(bp, EFSCORRUPTED); 506 + xfs_buf_ioerror(bp, -EFSCORRUPTED); 507 507 xfs_verifier_error(bp); 508 508 return; 509 509 } ··· 559 559 xfs_trans_agblocks_delta(tp, len); 560 560 if (unlikely(be32_to_cpu(agf->agf_freeblks) > 561 561 be32_to_cpu(agf->agf_length))) 562 - return EFSCORRUPTED; 562 + return -EFSCORRUPTED; 563 563 564 564 xfs_alloc_log_agf(tp, agbp, XFS_AGF_FREEBLKS); 565 565 return 0; ··· 2234 2234 2235 2235 if (xfs_sb_version_hascrc(&mp->m_sb) && 2236 2236 !xfs_buf_verify_cksum(bp, XFS_AGF_CRC_OFF)) 2237 - xfs_buf_ioerror(bp, EFSBADCRC); 2237 + xfs_buf_ioerror(bp, -EFSBADCRC); 2238 2238 else if (XFS_TEST_ERROR(!xfs_agf_verify(mp, bp), mp, 2239 2239 XFS_ERRTAG_ALLOC_READ_AGF, 2240 2240 XFS_RANDOM_ALLOC_READ_AGF)) 2241 - xfs_buf_ioerror(bp, EFSCORRUPTED); 2241 + xfs_buf_ioerror(bp, -EFSCORRUPTED); 2242 2242 2243 2243 if (bp->b_error) 2244 2244 xfs_verifier_error(bp); ··· 2252 2252 struct xfs_buf_log_item *bip = bp->b_fspriv; 2253 2253 2254 2254 if (!xfs_agf_verify(mp, bp)) { 2255 - xfs_buf_ioerror(bp, EFSCORRUPTED); 2255 + xfs_buf_ioerror(bp, -EFSCORRUPTED); 2256 2256 xfs_verifier_error(bp); 2257 2257 return; 2258 2258 } ··· 2601 2601 */ 2602 2602 args.agno = XFS_FSB_TO_AGNO(args.mp, bno); 2603 2603 if (args.agno >= args.mp->m_sb.sb_agcount) 2604 - return EFSCORRUPTED; 2604 + return -EFSCORRUPTED; 2605 2605 2606 2606 args.agbno = XFS_FSB_TO_AGBNO(args.mp, bno); 2607 2607 if (args.agbno >= args.mp->m_sb.sb_agblocks) 2608 - return EFSCORRUPTED; 2608 + return -EFSCORRUPTED; 2609 2609 2610 2610 args.pag = xfs_perag_get(args.mp, args.agno); 2611 2611 ASSERT(args.pag); ··· 2617 2617 /* validate the extent size is legal now we have the agf locked */ 2618 2618 if (args.agbno + len > 2619 2619 be32_to_cpu(XFS_BUF_TO_AGF(args.agbp)->agf_length)) { 2620 - error = EFSCORRUPTED; 2620 + error = -EFSCORRUPTED; 2621 2621 goto error0; 2622 2622 } 2623 2623
fs/xfs/xfs_alloc.h fs/xfs/libxfs/xfs_alloc.h
+3 -3
fs/xfs/xfs_alloc_btree.c fs/xfs/libxfs/xfs_alloc_btree.c
··· 355 355 struct xfs_buf *bp) 356 356 { 357 357 if (!xfs_btree_sblock_verify_crc(bp)) 358 - xfs_buf_ioerror(bp, EFSBADCRC); 358 + xfs_buf_ioerror(bp, -EFSBADCRC); 359 359 else if (!xfs_allocbt_verify(bp)) 360 - xfs_buf_ioerror(bp, EFSCORRUPTED); 360 + xfs_buf_ioerror(bp, -EFSCORRUPTED); 361 361 362 362 if (bp->b_error) { 363 363 trace_xfs_btree_corrupt(bp, _RET_IP_); ··· 371 371 { 372 372 if (!xfs_allocbt_verify(bp)) { 373 373 trace_xfs_btree_corrupt(bp, _RET_IP_); 374 - xfs_buf_ioerror(bp, EFSCORRUPTED); 374 + xfs_buf_ioerror(bp, -EFSCORRUPTED); 375 375 xfs_verifier_error(bp); 376 376 return; 377 377 }
fs/xfs/xfs_alloc_btree.h fs/xfs/libxfs/xfs_alloc_btree.h
+9 -9
fs/xfs/xfs_aops.c
··· 240 240 241 241 done: 242 242 if (error) 243 - ioend->io_error = -error; 243 + ioend->io_error = error; 244 244 xfs_destroy_ioend(ioend); 245 245 } 246 246 ··· 308 308 int nimaps = 1; 309 309 310 310 if (XFS_FORCED_SHUTDOWN(mp)) 311 - return -XFS_ERROR(EIO); 311 + return -EIO; 312 312 313 313 if (type == XFS_IO_UNWRITTEN) 314 314 bmapi_flags |= XFS_BMAPI_IGSTATE; 315 315 316 316 if (!xfs_ilock_nowait(ip, XFS_ILOCK_SHARED)) { 317 317 if (nonblocking) 318 - return -XFS_ERROR(EAGAIN); 318 + return -EAGAIN; 319 319 xfs_ilock(ip, XFS_ILOCK_SHARED); 320 320 } 321 321 ··· 332 332 xfs_iunlock(ip, XFS_ILOCK_SHARED); 333 333 334 334 if (error) 335 - return -XFS_ERROR(error); 335 + return error; 336 336 337 337 if (type == XFS_IO_DELALLOC && 338 338 (!nimaps || isnullstartblock(imap->br_startblock))) { 339 339 error = xfs_iomap_write_allocate(ip, offset, imap); 340 340 if (!error) 341 341 trace_xfs_map_blocks_alloc(ip, offset, count, type, imap); 342 - return -XFS_ERROR(error); 342 + return error; 343 343 } 344 344 345 345 #ifdef DEBUG ··· 502 502 * time. 503 503 */ 504 504 if (fail) { 505 - ioend->io_error = -fail; 505 + ioend->io_error = fail; 506 506 xfs_finish_ioend(ioend); 507 507 continue; 508 508 } ··· 1253 1253 int new = 0; 1254 1254 1255 1255 if (XFS_FORCED_SHUTDOWN(mp)) 1256 - return -XFS_ERROR(EIO); 1256 + return -EIO; 1257 1257 1258 1258 offset = (xfs_off_t)iblock << inode->i_blkbits; 1259 1259 ASSERT(bh_result->b_size >= (1 << inode->i_blkbits)); ··· 1302 1302 error = xfs_iomap_write_direct(ip, offset, size, 1303 1303 &imap, nimaps); 1304 1304 if (error) 1305 - return -error; 1305 + return error; 1306 1306 new = 1; 1307 1307 } else { 1308 1308 /* ··· 1415 1415 1416 1416 out_unlock: 1417 1417 xfs_iunlock(ip, lockmode); 1418 - return -error; 1418 + return error; 1419 1419 } 1420 1420 1421 1421 int
+46 -46
fs/xfs/xfs_attr.c fs/xfs/libxfs/xfs_attr.c
··· 85 85 { 86 86 87 87 if (!name) 88 - return EINVAL; 88 + return -EINVAL; 89 89 90 90 memset(args, 0, sizeof(*args)); 91 91 args->geo = dp->i_mount->m_attr_geo; ··· 95 95 args->name = name; 96 96 args->namelen = strlen((const char *)name); 97 97 if (args->namelen >= MAXNAMELEN) 98 - return EFAULT; /* match IRIX behaviour */ 98 + return -EFAULT; /* match IRIX behaviour */ 99 99 100 100 args->hashval = xfs_da_hashname(args->name, args->namelen); 101 101 return 0; ··· 131 131 XFS_STATS_INC(xs_attr_get); 132 132 133 133 if (XFS_FORCED_SHUTDOWN(ip->i_mount)) 134 - return EIO; 134 + return -EIO; 135 135 136 136 if (!xfs_inode_hasattr(ip)) 137 - return ENOATTR; 137 + return -ENOATTR; 138 138 139 139 error = xfs_attr_args_init(&args, ip, name, flags); 140 140 if (error) ··· 145 145 146 146 lock_mode = xfs_ilock_attr_map_shared(ip); 147 147 if (!xfs_inode_hasattr(ip)) 148 - error = ENOATTR; 148 + error = -ENOATTR; 149 149 else if (ip->i_d.di_aformat == XFS_DINODE_FMT_LOCAL) 150 150 error = xfs_attr_shortform_getvalue(&args); 151 151 else if (xfs_bmap_one_block(ip, XFS_ATTR_FORK)) ··· 155 155 xfs_iunlock(ip, lock_mode); 156 156 157 157 *valuelenp = args.valuelen; 158 - return error == EEXIST ? 0 : error; 158 + return error == -EEXIST ? 0 : error; 159 159 } 160 160 161 161 /* ··· 213 213 XFS_STATS_INC(xs_attr_set); 214 214 215 215 if (XFS_FORCED_SHUTDOWN(dp->i_mount)) 216 - return EIO; 216 + return -EIO; 217 217 218 218 error = xfs_attr_args_init(&args, dp, name, flags); 219 219 if (error) ··· 304 304 * the inode. 305 305 */ 306 306 error = xfs_attr_shortform_addname(&args); 307 - if (error != ENOSPC) { 307 + if (error != -ENOSPC) { 308 308 /* 309 309 * Commit the shortform mods, and we're done. 310 310 * NOTE: this is also the error path (EEXIST, etc). ··· 419 419 XFS_STATS_INC(xs_attr_remove); 420 420 421 421 if (XFS_FORCED_SHUTDOWN(dp->i_mount)) 422 - return EIO; 422 + return -EIO; 423 423 424 424 if (!xfs_inode_hasattr(dp)) 425 - return ENOATTR; 425 + return -ENOATTR; 426 426 427 427 error = xfs_attr_args_init(&args, dp, name, flags); 428 428 if (error) ··· 477 477 xfs_trans_ijoin(args.trans, dp, 0); 478 478 479 479 if (!xfs_inode_hasattr(dp)) { 480 - error = XFS_ERROR(ENOATTR); 480 + error = -ENOATTR; 481 481 } else if (dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL) { 482 482 ASSERT(dp->i_afp->if_flags & XFS_IFINLINE); 483 483 error = xfs_attr_shortform_remove(&args); ··· 534 534 trace_xfs_attr_sf_addname(args); 535 535 536 536 retval = xfs_attr_shortform_lookup(args); 537 - if ((args->flags & ATTR_REPLACE) && (retval == ENOATTR)) { 538 - return(retval); 539 - } else if (retval == EEXIST) { 537 + if ((args->flags & ATTR_REPLACE) && (retval == -ENOATTR)) { 538 + return retval; 539 + } else if (retval == -EEXIST) { 540 540 if (args->flags & ATTR_CREATE) 541 - return(retval); 541 + return retval; 542 542 retval = xfs_attr_shortform_remove(args); 543 543 ASSERT(retval == 0); 544 544 } 545 545 546 546 if (args->namelen >= XFS_ATTR_SF_ENTSIZE_MAX || 547 547 args->valuelen >= XFS_ATTR_SF_ENTSIZE_MAX) 548 - return(XFS_ERROR(ENOSPC)); 548 + return -ENOSPC; 549 549 550 550 newsize = XFS_ATTR_SF_TOTSIZE(args->dp); 551 551 newsize += XFS_ATTR_SF_ENTSIZE_BYNAME(args->namelen, args->valuelen); 552 552 553 553 forkoff = xfs_attr_shortform_bytesfit(args->dp, newsize); 554 554 if (!forkoff) 555 - return(XFS_ERROR(ENOSPC)); 555 + return -ENOSPC; 556 556 557 557 xfs_attr_shortform_add(args, forkoff); 558 - return(0); 558 + return 0; 559 559 } 560 560 561 561 ··· 592 592 * the given flags produce an error or call for an atomic rename. 593 593 */ 594 594 retval = xfs_attr3_leaf_lookup_int(bp, args); 595 - if ((args->flags & ATTR_REPLACE) && (retval == ENOATTR)) { 595 + if ((args->flags & ATTR_REPLACE) && (retval == -ENOATTR)) { 596 596 xfs_trans_brelse(args->trans, bp); 597 597 return retval; 598 - } else if (retval == EEXIST) { 598 + } else if (retval == -EEXIST) { 599 599 if (args->flags & ATTR_CREATE) { /* pure create op */ 600 600 xfs_trans_brelse(args->trans, bp); 601 601 return retval; ··· 626 626 * if required. 627 627 */ 628 628 retval = xfs_attr3_leaf_add(bp, args); 629 - if (retval == ENOSPC) { 629 + if (retval == -ENOSPC) { 630 630 /* 631 631 * Promote the attribute list to the Btree format, then 632 632 * Commit that transaction so that the node_addname() call ··· 642 642 ASSERT(committed); 643 643 args->trans = NULL; 644 644 xfs_bmap_cancel(args->flist); 645 - return(error); 645 + return error; 646 646 } 647 647 648 648 /* ··· 658 658 */ 659 659 error = xfs_trans_roll(&args->trans, dp); 660 660 if (error) 661 - return (error); 661 + return error; 662 662 663 663 /* 664 664 * Fob the whole rest of the problem off on the Btree code. 665 665 */ 666 666 error = xfs_attr_node_addname(args); 667 - return(error); 667 + return error; 668 668 } 669 669 670 670 /* ··· 673 673 */ 674 674 error = xfs_trans_roll(&args->trans, dp); 675 675 if (error) 676 - return (error); 676 + return error; 677 677 678 678 /* 679 679 * If there was an out-of-line value, allocate the blocks we ··· 684 684 if (args->rmtblkno > 0) { 685 685 error = xfs_attr_rmtval_set(args); 686 686 if (error) 687 - return(error); 687 + return error; 688 688 } 689 689 690 690 /* ··· 700 700 */ 701 701 error = xfs_attr3_leaf_flipflags(args); 702 702 if (error) 703 - return(error); 703 + return error; 704 704 705 705 /* 706 706 * Dismantle the "old" attribute/value pair by removing ··· 714 714 if (args->rmtblkno) { 715 715 error = xfs_attr_rmtval_remove(args); 716 716 if (error) 717 - return(error); 717 + return error; 718 718 } 719 719 720 720 /* ··· 744 744 ASSERT(committed); 745 745 args->trans = NULL; 746 746 xfs_bmap_cancel(args->flist); 747 - return(error); 747 + return error; 748 748 } 749 749 750 750 /* ··· 795 795 return error; 796 796 797 797 error = xfs_attr3_leaf_lookup_int(bp, args); 798 - if (error == ENOATTR) { 798 + if (error == -ENOATTR) { 799 799 xfs_trans_brelse(args->trans, bp); 800 800 return error; 801 801 } ··· 850 850 return error; 851 851 852 852 error = xfs_attr3_leaf_lookup_int(bp, args); 853 - if (error != EEXIST) { 853 + if (error != -EEXIST) { 854 854 xfs_trans_brelse(args->trans, bp); 855 855 return error; 856 856 } ··· 906 906 goto out; 907 907 blk = &state->path.blk[ state->path.active-1 ]; 908 908 ASSERT(blk->magic == XFS_ATTR_LEAF_MAGIC); 909 - if ((args->flags & ATTR_REPLACE) && (retval == ENOATTR)) { 909 + if ((args->flags & ATTR_REPLACE) && (retval == -ENOATTR)) { 910 910 goto out; 911 - } else if (retval == EEXIST) { 911 + } else if (retval == -EEXIST) { 912 912 if (args->flags & ATTR_CREATE) 913 913 goto out; 914 914 ··· 933 933 } 934 934 935 935 retval = xfs_attr3_leaf_add(blk->bp, state->args); 936 - if (retval == ENOSPC) { 936 + if (retval == -ENOSPC) { 937 937 if (state->path.active == 1) { 938 938 /* 939 939 * Its really a single leaf node, but it had ··· 1031 1031 if (args->rmtblkno > 0) { 1032 1032 error = xfs_attr_rmtval_set(args); 1033 1033 if (error) 1034 - return(error); 1034 + return error; 1035 1035 } 1036 1036 1037 1037 /* ··· 1061 1061 if (args->rmtblkno) { 1062 1062 error = xfs_attr_rmtval_remove(args); 1063 1063 if (error) 1064 - return(error); 1064 + return error; 1065 1065 } 1066 1066 1067 1067 /* ··· 1134 1134 if (state) 1135 1135 xfs_da_state_free(state); 1136 1136 if (error) 1137 - return(error); 1138 - return(retval); 1137 + return error; 1138 + return retval; 1139 1139 } 1140 1140 1141 1141 /* ··· 1168 1168 * Search to see if name exists, and get back a pointer to it. 1169 1169 */ 1170 1170 error = xfs_da3_node_lookup_int(state, &retval); 1171 - if (error || (retval != EEXIST)) { 1171 + if (error || (retval != -EEXIST)) { 1172 1172 if (error == 0) 1173 1173 error = retval; 1174 1174 goto out; ··· 1297 1297 1298 1298 out: 1299 1299 xfs_da_state_free(state); 1300 - return(error); 1300 + return error; 1301 1301 } 1302 1302 1303 1303 /* ··· 1345 1345 } 1346 1346 } 1347 1347 1348 - return(0); 1348 + return 0; 1349 1349 } 1350 1350 1351 1351 /* ··· 1376 1376 blk->blkno, blk->disk_blkno, 1377 1377 &blk->bp, XFS_ATTR_FORK); 1378 1378 if (error) 1379 - return(error); 1379 + return error; 1380 1380 } else { 1381 1381 blk->bp = NULL; 1382 1382 } ··· 1395 1395 blk->blkno, blk->disk_blkno, 1396 1396 &blk->bp, XFS_ATTR_FORK); 1397 1397 if (error) 1398 - return(error); 1398 + return error; 1399 1399 } else { 1400 1400 blk->bp = NULL; 1401 1401 } 1402 1402 } 1403 1403 1404 - return(0); 1404 + return 0; 1405 1405 } 1406 1406 1407 1407 /* ··· 1431 1431 error = xfs_da3_node_lookup_int(state, &retval); 1432 1432 if (error) { 1433 1433 retval = error; 1434 - } else if (retval == EEXIST) { 1434 + } else if (retval == -EEXIST) { 1435 1435 blk = &state->path.blk[ state->path.active-1 ]; 1436 1436 ASSERT(blk->bp != NULL); 1437 1437 ASSERT(blk->magic == XFS_ATTR_LEAF_MAGIC); ··· 1455 1455 } 1456 1456 1457 1457 xfs_da_state_free(state); 1458 - return(retval); 1458 + return retval; 1459 1459 }
+11 -11
fs/xfs/xfs_attr_inactive.c
··· 76 76 error = xfs_bmapi_read(dp, (xfs_fileoff_t)tblkno, tblkcnt, 77 77 &map, &nmap, XFS_BMAPI_ATTRFORK); 78 78 if (error) { 79 - return(error); 79 + return error; 80 80 } 81 81 ASSERT(nmap == 1); 82 82 ASSERT(map.br_startblock != DELAYSTARTBLOCK); ··· 95 95 dp->i_mount->m_ddev_targp, 96 96 dblkno, dblkcnt, 0); 97 97 if (!bp) 98 - return ENOMEM; 98 + return -ENOMEM; 99 99 xfs_trans_binval(*trans, bp); 100 100 /* 101 101 * Roll to next transaction. 102 102 */ 103 103 error = xfs_trans_roll(trans, dp); 104 104 if (error) 105 - return (error); 105 + return error; 106 106 } 107 107 108 108 tblkno += map.br_blockcount; 109 109 tblkcnt -= map.br_blockcount; 110 110 } 111 111 112 - return(0); 112 + return 0; 113 113 } 114 114 115 115 /* ··· 227 227 */ 228 228 if (level > XFS_DA_NODE_MAXDEPTH) { 229 229 xfs_trans_brelse(*trans, bp); /* no locks for later trans */ 230 - return XFS_ERROR(EIO); 230 + return -EIO; 231 231 } 232 232 233 233 node = bp->b_addr; ··· 256 256 error = xfs_da3_node_read(*trans, dp, child_fsb, -2, &child_bp, 257 257 XFS_ATTR_FORK); 258 258 if (error) 259 - return(error); 259 + return error; 260 260 if (child_bp) { 261 261 /* save for re-read later */ 262 262 child_blkno = XFS_BUF_ADDR(child_bp); ··· 277 277 child_bp); 278 278 break; 279 279 default: 280 - error = XFS_ERROR(EIO); 280 + error = -EIO; 281 281 xfs_trans_brelse(*trans, child_bp); 282 282 break; 283 283 } ··· 360 360 error = xfs_attr3_leaf_inactive(trans, dp, bp); 361 361 break; 362 362 default: 363 - error = XFS_ERROR(EIO); 363 + error = -EIO; 364 364 xfs_trans_brelse(*trans, bp); 365 365 break; 366 366 } ··· 414 414 error = xfs_trans_reserve(trans, &M_RES(mp)->tr_attrinval, 0, 0); 415 415 if (error) { 416 416 xfs_trans_cancel(trans, 0); 417 - return(error); 417 + return error; 418 418 } 419 419 xfs_ilock(dp, XFS_ILOCK_EXCL); 420 420 ··· 443 443 error = xfs_trans_commit(trans, XFS_TRANS_RELEASE_LOG_RES); 444 444 xfs_iunlock(dp, XFS_ILOCK_EXCL); 445 445 446 - return(error); 446 + return error; 447 447 448 448 out: 449 449 xfs_trans_cancel(trans, XFS_TRANS_RELEASE_LOG_RES|XFS_TRANS_ABORT); 450 450 xfs_iunlock(dp, XFS_ILOCK_EXCL); 451 - return(error); 451 + return error; 452 452 }
+39 -39
fs/xfs/xfs_attr_leaf.c fs/xfs/libxfs/xfs_attr_leaf.c
··· 214 214 struct xfs_attr3_leaf_hdr *hdr3 = bp->b_addr; 215 215 216 216 if (!xfs_attr3_leaf_verify(bp)) { 217 - xfs_buf_ioerror(bp, EFSCORRUPTED); 217 + xfs_buf_ioerror(bp, -EFSCORRUPTED); 218 218 xfs_verifier_error(bp); 219 219 return; 220 220 } ··· 242 242 243 243 if (xfs_sb_version_hascrc(&mp->m_sb) && 244 244 !xfs_buf_verify_cksum(bp, XFS_ATTR3_LEAF_CRC_OFF)) 245 - xfs_buf_ioerror(bp, EFSBADCRC); 245 + xfs_buf_ioerror(bp, -EFSBADCRC); 246 246 else if (!xfs_attr3_leaf_verify(bp)) 247 - xfs_buf_ioerror(bp, EFSCORRUPTED); 247 + xfs_buf_ioerror(bp, -EFSCORRUPTED); 248 248 249 249 if (bp->b_error) 250 250 xfs_verifier_error(bp); ··· 547 547 break; 548 548 } 549 549 if (i == end) 550 - return(XFS_ERROR(ENOATTR)); 550 + return -ENOATTR; 551 551 552 552 /* 553 553 * Fix up the attribute fork data, covering the hole ··· 582 582 583 583 xfs_sbversion_add_attr2(mp, args->trans); 584 584 585 - return(0); 585 + return 0; 586 586 } 587 587 588 588 /* ··· 611 611 continue; 612 612 if (!xfs_attr_namesp_match(args->flags, sfe->flags)) 613 613 continue; 614 - return(XFS_ERROR(EEXIST)); 614 + return -EEXIST; 615 615 } 616 - return(XFS_ERROR(ENOATTR)); 616 + return -ENOATTR; 617 617 } 618 618 619 619 /* ··· 640 640 continue; 641 641 if (args->flags & ATTR_KERNOVAL) { 642 642 args->valuelen = sfe->valuelen; 643 - return(XFS_ERROR(EEXIST)); 643 + return -EEXIST; 644 644 } 645 645 if (args->valuelen < sfe->valuelen) { 646 646 args->valuelen = sfe->valuelen; 647 - return(XFS_ERROR(ERANGE)); 647 + return -ERANGE; 648 648 } 649 649 args->valuelen = sfe->valuelen; 650 650 memcpy(args->value, &sfe->nameval[args->namelen], 651 651 args->valuelen); 652 - return(XFS_ERROR(EEXIST)); 652 + return -EEXIST; 653 653 } 654 - return(XFS_ERROR(ENOATTR)); 654 + return -ENOATTR; 655 655 } 656 656 657 657 /* ··· 691 691 * If we hit an IO error middle of the transaction inside 692 692 * grow_inode(), we may have inconsistent data. Bail out. 693 693 */ 694 - if (error == EIO) 694 + if (error == -EIO) 695 695 goto out; 696 696 xfs_idata_realloc(dp, size, XFS_ATTR_FORK); /* try to put */ 697 697 memcpy(ifp->if_u1.if_data, tmpbuffer, size); /* it back */ ··· 730 730 sfe->namelen); 731 731 nargs.flags = XFS_ATTR_NSP_ONDISK_TO_ARGS(sfe->flags); 732 732 error = xfs_attr3_leaf_lookup_int(bp, &nargs); /* set a->index */ 733 - ASSERT(error == ENOATTR); 733 + ASSERT(error == -ENOATTR); 734 734 error = xfs_attr3_leaf_add(bp, &nargs); 735 - ASSERT(error != ENOSPC); 735 + ASSERT(error != -ENOSPC); 736 736 if (error) 737 737 goto out; 738 738 sfe = XFS_ATTR_SF_NEXTENTRY(sfe); ··· 741 741 742 742 out: 743 743 kmem_free(tmpbuffer); 744 - return(error); 744 + return error; 745 745 } 746 746 747 747 /* ··· 769 769 if (entry->flags & XFS_ATTR_INCOMPLETE) 770 770 continue; /* don't copy partial entries */ 771 771 if (!(entry->flags & XFS_ATTR_LOCAL)) 772 - return(0); 772 + return 0; 773 773 name_loc = xfs_attr3_leaf_name_local(leaf, i); 774 774 if (name_loc->namelen >= XFS_ATTR_SF_ENTSIZE_MAX) 775 - return(0); 775 + return 0; 776 776 if (be16_to_cpu(name_loc->valuelen) >= XFS_ATTR_SF_ENTSIZE_MAX) 777 - return(0); 777 + return 0; 778 778 bytes += sizeof(struct xfs_attr_sf_entry) - 1 779 779 + name_loc->namelen 780 780 + be16_to_cpu(name_loc->valuelen); ··· 809 809 810 810 tmpbuffer = kmem_alloc(args->geo->blksize, KM_SLEEP); 811 811 if (!tmpbuffer) 812 - return ENOMEM; 812 + return -ENOMEM; 813 813 814 814 memcpy(tmpbuffer, bp->b_addr, args->geo->blksize); 815 815 ··· 1017 1017 ASSERT(oldblk->magic == XFS_ATTR_LEAF_MAGIC); 1018 1018 error = xfs_da_grow_inode(state->args, &blkno); 1019 1019 if (error) 1020 - return(error); 1020 + return error; 1021 1021 error = xfs_attr3_leaf_create(state->args, blkno, &newblk->bp); 1022 1022 if (error) 1023 - return(error); 1023 + return error; 1024 1024 newblk->blkno = blkno; 1025 1025 newblk->magic = XFS_ATTR_LEAF_MAGIC; 1026 1026 ··· 1031 1031 xfs_attr3_leaf_rebalance(state, oldblk, newblk); 1032 1032 error = xfs_da3_blk_link(state, oldblk, newblk); 1033 1033 if (error) 1034 - return(error); 1034 + return error; 1035 1035 1036 1036 /* 1037 1037 * Save info on "old" attribute for "atomic rename" ops, leaf_add() ··· 1053 1053 */ 1054 1054 oldblk->hashval = xfs_attr_leaf_lasthash(oldblk->bp, NULL); 1055 1055 newblk->hashval = xfs_attr_leaf_lasthash(newblk->bp, NULL); 1056 - return(error); 1056 + return error; 1057 1057 } 1058 1058 1059 1059 /* ··· 1108 1108 * no good and we should just give up. 1109 1109 */ 1110 1110 if (!ichdr.holes && sum < entsize) 1111 - return XFS_ERROR(ENOSPC); 1111 + return -ENOSPC; 1112 1112 1113 1113 /* 1114 1114 * Compact the entries to coalesce free space. ··· 1121 1121 * free region, in freemap[0]. If it is not big enough, give up. 1122 1122 */ 1123 1123 if (ichdr.freemap[0].size < (entsize + sizeof(xfs_attr_leaf_entry_t))) { 1124 - tmp = ENOSPC; 1124 + tmp = -ENOSPC; 1125 1125 goto out_log_hdr; 1126 1126 } 1127 1127 ··· 1692 1692 ichdr.usedbytes; 1693 1693 if (bytes > (state->args->geo->blksize >> 1)) { 1694 1694 *action = 0; /* blk over 50%, don't try to join */ 1695 - return(0); 1695 + return 0; 1696 1696 } 1697 1697 1698 1698 /* ··· 1711 1711 error = xfs_da3_path_shift(state, &state->altpath, forward, 1712 1712 0, &retval); 1713 1713 if (error) 1714 - return(error); 1714 + return error; 1715 1715 if (retval) { 1716 1716 *action = 0; 1717 1717 } else { ··· 1740 1740 error = xfs_attr3_leaf_read(state->args->trans, state->args->dp, 1741 1741 blkno, -1, &bp); 1742 1742 if (error) 1743 - return(error); 1743 + return error; 1744 1744 1745 1745 xfs_attr3_leaf_hdr_from_disk(&ichdr2, bp->b_addr); 1746 1746 ··· 1757 1757 } 1758 1758 if (i >= 2) { 1759 1759 *action = 0; 1760 - return(0); 1760 + return 0; 1761 1761 } 1762 1762 1763 1763 /* ··· 1773 1773 0, &retval); 1774 1774 } 1775 1775 if (error) 1776 - return(error); 1776 + return error; 1777 1777 if (retval) { 1778 1778 *action = 0; 1779 1779 } else { 1780 1780 *action = 1; 1781 1781 } 1782 - return(0); 1782 + return 0; 1783 1783 } 1784 1784 1785 1785 /* ··· 2123 2123 } 2124 2124 if (probe == ichdr.count || be32_to_cpu(entry->hashval) != hashval) { 2125 2125 args->index = probe; 2126 - return XFS_ERROR(ENOATTR); 2126 + return -ENOATTR; 2127 2127 } 2128 2128 2129 2129 /* ··· 2152 2152 if (!xfs_attr_namesp_match(args->flags, entry->flags)) 2153 2153 continue; 2154 2154 args->index = probe; 2155 - return XFS_ERROR(EEXIST); 2155 + return -EEXIST; 2156 2156 } else { 2157 2157 name_rmt = xfs_attr3_leaf_name_remote(leaf, probe); 2158 2158 if (name_rmt->namelen != args->namelen) ··· 2168 2168 args->rmtblkcnt = xfs_attr3_rmt_blocks( 2169 2169 args->dp->i_mount, 2170 2170 args->rmtvaluelen); 2171 - return XFS_ERROR(EEXIST); 2171 + return -EEXIST; 2172 2172 } 2173 2173 } 2174 2174 args->index = probe; 2175 - return XFS_ERROR(ENOATTR); 2175 + return -ENOATTR; 2176 2176 } 2177 2177 2178 2178 /* ··· 2208 2208 } 2209 2209 if (args->valuelen < valuelen) { 2210 2210 args->valuelen = valuelen; 2211 - return XFS_ERROR(ERANGE); 2211 + return -ERANGE; 2212 2212 } 2213 2213 args->valuelen = valuelen; 2214 2214 memcpy(args->value, &name_loc->nameval[args->namelen], valuelen); ··· 2226 2226 } 2227 2227 if (args->valuelen < args->rmtvaluelen) { 2228 2228 args->valuelen = args->rmtvaluelen; 2229 - return XFS_ERROR(ERANGE); 2229 + return -ERANGE; 2230 2230 } 2231 2231 args->valuelen = args->rmtvaluelen; 2232 2232 } ··· 2481 2481 */ 2482 2482 error = xfs_attr3_leaf_read(args->trans, args->dp, args->blkno, -1, &bp); 2483 2483 if (error) 2484 - return(error); 2484 + return error; 2485 2485 2486 2486 leaf = bp->b_addr; 2487 2487 entry = &xfs_attr3_leaf_entryp(leaf)[args->index]; ··· 2548 2548 */ 2549 2549 error = xfs_attr3_leaf_read(args->trans, args->dp, args->blkno, -1, &bp); 2550 2550 if (error) 2551 - return(error); 2551 + return error; 2552 2552 2553 2553 leaf = bp->b_addr; 2554 2554 #ifdef DEBUG
fs/xfs/xfs_attr_leaf.h fs/xfs/libxfs/xfs_attr_leaf.h
+19 -19
fs/xfs/xfs_attr_list.c
··· 50 50 sa = (xfs_attr_sf_sort_t *)a; 51 51 sb = (xfs_attr_sf_sort_t *)b; 52 52 if (sa->hash < sb->hash) { 53 - return(-1); 53 + return -1; 54 54 } else if (sa->hash > sb->hash) { 55 - return(1); 55 + return 1; 56 56 } else { 57 - return(sa->entno - sb->entno); 57 + return sa->entno - sb->entno; 58 58 } 59 59 } 60 60 ··· 86 86 sf = (xfs_attr_shortform_t *)dp->i_afp->if_u1.if_data; 87 87 ASSERT(sf != NULL); 88 88 if (!sf->hdr.count) 89 - return(0); 89 + return 0; 90 90 cursor = context->cursor; 91 91 ASSERT(cursor != NULL); 92 92 ··· 124 124 sfe = XFS_ATTR_SF_NEXTENTRY(sfe); 125 125 } 126 126 trace_xfs_attr_list_sf_all(context); 127 - return(0); 127 + return 0; 128 128 } 129 129 130 130 /* do no more for a search callback */ ··· 150 150 XFS_ERRLEVEL_LOW, 151 151 context->dp->i_mount, sfe); 152 152 kmem_free(sbuf); 153 - return XFS_ERROR(EFSCORRUPTED); 153 + return -EFSCORRUPTED; 154 154 } 155 155 156 156 sbp->entno = i; ··· 188 188 } 189 189 if (i == nsbuf) { 190 190 kmem_free(sbuf); 191 - return(0); 191 + return 0; 192 192 } 193 193 194 194 /* ··· 213 213 } 214 214 215 215 kmem_free(sbuf); 216 - return(0); 216 + return 0; 217 217 } 218 218 219 219 STATIC int ··· 243 243 if (cursor->blkno > 0) { 244 244 error = xfs_da3_node_read(NULL, dp, cursor->blkno, -1, 245 245 &bp, XFS_ATTR_FORK); 246 - if ((error != 0) && (error != EFSCORRUPTED)) 247 - return(error); 246 + if ((error != 0) && (error != -EFSCORRUPTED)) 247 + return error; 248 248 if (bp) { 249 249 struct xfs_attr_leaf_entry *entries; 250 250 ··· 295 295 cursor->blkno, -1, &bp, 296 296 XFS_ATTR_FORK); 297 297 if (error) 298 - return(error); 298 + return error; 299 299 node = bp->b_addr; 300 300 magic = be16_to_cpu(node->hdr.info.magic); 301 301 if (magic == XFS_ATTR_LEAF_MAGIC || ··· 308 308 context->dp->i_mount, 309 309 node); 310 310 xfs_trans_brelse(NULL, bp); 311 - return XFS_ERROR(EFSCORRUPTED); 311 + return -EFSCORRUPTED; 312 312 } 313 313 314 314 dp->d_ops->node_hdr_from_disk(&nodehdr, node); ··· 496 496 context->cursor->blkno = 0; 497 497 error = xfs_attr3_leaf_read(NULL, context->dp, 0, -1, &bp); 498 498 if (error) 499 - return XFS_ERROR(error); 499 + return error; 500 500 501 501 error = xfs_attr3_leaf_list_int(bp, context); 502 502 xfs_trans_brelse(NULL, bp); 503 - return XFS_ERROR(error); 503 + return error; 504 504 } 505 505 506 506 int ··· 514 514 XFS_STATS_INC(xs_attr_list); 515 515 516 516 if (XFS_FORCED_SHUTDOWN(dp->i_mount)) 517 - return EIO; 517 + return -EIO; 518 518 519 519 /* 520 520 * Decide on what work routines to call based on the inode size. ··· 616 616 * Validate the cursor. 617 617 */ 618 618 if (cursor->pad1 || cursor->pad2) 619 - return(XFS_ERROR(EINVAL)); 619 + return -EINVAL; 620 620 if ((cursor->initted == 0) && 621 621 (cursor->hashval || cursor->blkno || cursor->offset)) 622 - return XFS_ERROR(EINVAL); 622 + return -EINVAL; 623 623 624 624 /* 625 625 * Check for a properly aligned buffer. 626 626 */ 627 627 if (((long)buffer) & (sizeof(int)-1)) 628 - return XFS_ERROR(EFAULT); 628 + return -EFAULT; 629 629 if (flags & ATTR_KERNOVAL) 630 630 bufsize = 0; 631 631 ··· 648 648 alist->al_offset[0] = context.bufsize; 649 649 650 650 error = xfs_attr_list_int(&context); 651 - ASSERT(error >= 0); 651 + ASSERT(error <= 0); 652 652 return error; 653 653 }
+11 -11
fs/xfs/xfs_attr_remote.c fs/xfs/libxfs/xfs_attr_remote.c
··· 138 138 139 139 while (len > 0) { 140 140 if (!xfs_verify_cksum(ptr, blksize, XFS_ATTR3_RMT_CRC_OFF)) { 141 - xfs_buf_ioerror(bp, EFSBADCRC); 141 + xfs_buf_ioerror(bp, -EFSBADCRC); 142 142 break; 143 143 } 144 144 if (!xfs_attr3_rmt_verify(mp, ptr, blksize, bno)) { 145 - xfs_buf_ioerror(bp, EFSCORRUPTED); 145 + xfs_buf_ioerror(bp, -EFSCORRUPTED); 146 146 break; 147 147 } 148 148 len -= blksize; ··· 178 178 179 179 while (len > 0) { 180 180 if (!xfs_attr3_rmt_verify(mp, ptr, blksize, bno)) { 181 - xfs_buf_ioerror(bp, EFSCORRUPTED); 181 + xfs_buf_ioerror(bp, -EFSCORRUPTED); 182 182 xfs_verifier_error(bp); 183 183 return; 184 184 } ··· 257 257 xfs_alert(mp, 258 258 "remote attribute header mismatch bno/off/len/owner (0x%llx/0x%x/Ox%x/0x%llx)", 259 259 bno, *offset, byte_cnt, ino); 260 - return EFSCORRUPTED; 260 + return -EFSCORRUPTED; 261 261 } 262 262 hdr_size = sizeof(struct xfs_attr3_rmt_hdr); 263 263 } ··· 452 452 ASSERT(committed); 453 453 args->trans = NULL; 454 454 xfs_bmap_cancel(args->flist); 455 - return(error); 455 + return error; 456 456 } 457 457 458 458 /* ··· 473 473 */ 474 474 error = xfs_trans_roll(&args->trans, dp); 475 475 if (error) 476 - return (error); 476 + return error; 477 477 } 478 478 479 479 /* ··· 498 498 blkcnt, &map, &nmap, 499 499 XFS_BMAPI_ATTRFORK); 500 500 if (error) 501 - return(error); 501 + return error; 502 502 ASSERT(nmap == 1); 503 503 ASSERT((map.br_startblock != DELAYSTARTBLOCK) && 504 504 (map.br_startblock != HOLESTARTBLOCK)); ··· 508 508 509 509 bp = xfs_buf_get(mp->m_ddev_targp, dblkno, dblkcnt, 0); 510 510 if (!bp) 511 - return ENOMEM; 511 + return -ENOMEM; 512 512 bp->b_ops = &xfs_attr3_rmt_buf_ops; 513 513 514 514 xfs_attr_rmtval_copyin(mp, bp, args->dp->i_ino, &offset, ··· 563 563 error = xfs_bmapi_read(args->dp, (xfs_fileoff_t)lblkno, 564 564 blkcnt, &map, &nmap, XFS_BMAPI_ATTRFORK); 565 565 if (error) 566 - return(error); 566 + return error; 567 567 ASSERT(nmap == 1); 568 568 ASSERT((map.br_startblock != DELAYSTARTBLOCK) && 569 569 (map.br_startblock != HOLESTARTBLOCK)); ··· 622 622 */ 623 623 error = xfs_trans_roll(&args->trans, args->dp); 624 624 if (error) 625 - return (error); 625 + return error; 626 626 } 627 - return(0); 627 + return 0; 628 628 }
fs/xfs/xfs_attr_remote.h fs/xfs/libxfs/xfs_attr_remote.h
fs/xfs/xfs_attr_sf.h fs/xfs/libxfs/xfs_attr_sf.h
fs/xfs/xfs_bit.h fs/xfs/libxfs/xfs_bit.h
+28 -32
fs/xfs/xfs_bmap.c fs/xfs/libxfs/xfs_bmap.c
··· 392 392 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes); 393 393 bno = be64_to_cpu(*pp); 394 394 395 - ASSERT(bno != NULLDFSBNO); 395 + ASSERT(bno != NULLFSBLOCK); 396 396 ASSERT(XFS_FSB_TO_AGNO(mp, bno) < mp->m_sb.sb_agcount); 397 397 ASSERT(XFS_FSB_TO_AGBNO(mp, bno) < mp->m_sb.sb_agblocks); 398 398 ··· 1033 1033 goto error0; 1034 1034 if (stat == 0) { 1035 1035 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); 1036 - return XFS_ERROR(ENOSPC); 1036 + return -ENOSPC; 1037 1037 } 1038 1038 *firstblock = cur->bc_private.b.firstblock; 1039 1039 cur->bc_private.b.allocated = 0; ··· 1115 1115 1116 1116 /* should only be called for types that support local format data */ 1117 1117 ASSERT(0); 1118 - return EFSCORRUPTED; 1118 + return -EFSCORRUPTED; 1119 1119 } 1120 1120 1121 1121 /* ··· 1192 1192 break; 1193 1193 default: 1194 1194 ASSERT(0); 1195 - error = XFS_ERROR(EINVAL); 1195 + error = -EINVAL; 1196 1196 goto trans_cancel; 1197 1197 } 1198 1198 ··· 1299 1299 ASSERT(level > 0); 1300 1300 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes); 1301 1301 bno = be64_to_cpu(*pp); 1302 - ASSERT(bno != NULLDFSBNO); 1302 + ASSERT(bno != NULLFSBLOCK); 1303 1303 ASSERT(XFS_FSB_TO_AGNO(mp, bno) < mp->m_sb.sb_agcount); 1304 1304 ASSERT(XFS_FSB_TO_AGBNO(mp, bno) < mp->m_sb.sb_agblocks); 1305 1305 /* ··· 1399 1399 return 0; 1400 1400 error0: 1401 1401 xfs_trans_brelse(tp, bp); 1402 - return XFS_ERROR(EFSCORRUPTED); 1402 + return -EFSCORRUPTED; 1403 1403 } 1404 1404 1405 1405 ··· 1429 1429 gotp->br_startoff = 0xffa5a5a5a5a5a5a5LL; 1430 1430 gotp->br_blockcount = 0xa55a5a5a5a5a5a5aLL; 1431 1431 gotp->br_state = XFS_EXT_INVALID; 1432 - #if XFS_BIG_BLKNOS 1433 1432 gotp->br_startblock = 0xffffa5a5a5a5a5a5LL; 1434 - #else 1435 - gotp->br_startblock = 0xffffa5a5; 1436 - #endif 1437 1433 prevp->br_startoff = NULLFILEOFF; 1438 1434 1439 1435 ep = xfs_iext_bno_to_ext(ifp, bno, &lastx); ··· 1572 1576 if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE && 1573 1577 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS && 1574 1578 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_LOCAL) 1575 - return XFS_ERROR(EIO); 1579 + return -EIO; 1576 1580 if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) { 1577 1581 *last_block = 0; 1578 1582 return 0; ··· 1686 1690 1687 1691 if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE && 1688 1692 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS) 1689 - return XFS_ERROR(EIO); 1693 + return -EIO; 1690 1694 1691 1695 error = xfs_bmap_last_extent(NULL, ip, whichfork, &rec, &is_empty); 1692 1696 if (error || is_empty) ··· 3319 3323 if (orig_off < align_off || 3320 3324 orig_end > align_off + align_alen || 3321 3325 align_alen - temp < orig_alen) 3322 - return XFS_ERROR(EINVAL); 3326 + return -EINVAL; 3323 3327 /* 3324 3328 * Try to fix it by moving the start up. 3325 3329 */ ··· 3344 3348 * Result doesn't cover the request, fail it. 3345 3349 */ 3346 3350 if (orig_off < align_off || orig_end > align_off + align_alen) 3347 - return XFS_ERROR(EINVAL); 3351 + return -EINVAL; 3348 3352 } else { 3349 3353 ASSERT(orig_off >= align_off); 3350 3354 ASSERT(orig_end <= align_off + align_alen); ··· 4047 4051 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE), 4048 4052 mp, XFS_ERRTAG_BMAPIFORMAT, XFS_RANDOM_BMAPIFORMAT))) { 4049 4053 XFS_ERROR_REPORT("xfs_bmapi_read", XFS_ERRLEVEL_LOW, mp); 4050 - return XFS_ERROR(EFSCORRUPTED); 4054 + return -EFSCORRUPTED; 4051 4055 } 4052 4056 4053 4057 if (XFS_FORCED_SHUTDOWN(mp)) 4054 - return XFS_ERROR(EIO); 4058 + return -EIO; 4055 4059 4056 4060 XFS_STATS_INC(xs_blk_mapr); 4057 4061 ··· 4242 4246 XFS_IFORK_FORMAT(ip, XFS_DATA_FORK) != XFS_DINODE_FMT_BTREE), 4243 4247 mp, XFS_ERRTAG_BMAPIFORMAT, XFS_RANDOM_BMAPIFORMAT))) { 4244 4248 XFS_ERROR_REPORT("xfs_bmapi_delay", XFS_ERRLEVEL_LOW, mp); 4245 - return XFS_ERROR(EFSCORRUPTED); 4249 + return -EFSCORRUPTED; 4246 4250 } 4247 4251 4248 4252 if (XFS_FORCED_SHUTDOWN(mp)) 4249 - return XFS_ERROR(EIO); 4253 + return -EIO; 4250 4254 4251 4255 XFS_STATS_INC(xs_blk_mapw); 4252 4256 ··· 4465 4469 * so generate another request. 4466 4470 */ 4467 4471 if (mval->br_blockcount < len) 4468 - return EAGAIN; 4472 + return -EAGAIN; 4469 4473 return 0; 4470 4474 } 4471 4475 ··· 4536 4540 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE), 4537 4541 mp, XFS_ERRTAG_BMAPIFORMAT, XFS_RANDOM_BMAPIFORMAT))) { 4538 4542 XFS_ERROR_REPORT("xfs_bmapi_write", XFS_ERRLEVEL_LOW, mp); 4539 - return XFS_ERROR(EFSCORRUPTED); 4543 + return -EFSCORRUPTED; 4540 4544 } 4541 4545 4542 4546 if (XFS_FORCED_SHUTDOWN(mp)) 4543 - return XFS_ERROR(EIO); 4547 + return -EIO; 4544 4548 4545 4549 ifp = XFS_IFORK_PTR(ip, whichfork); 4546 4550 ··· 4616 4620 4617 4621 /* Execute unwritten extent conversion if necessary */ 4618 4622 error = xfs_bmapi_convert_unwritten(&bma, mval, len, flags); 4619 - if (error == EAGAIN) 4623 + if (error == -EAGAIN) 4620 4624 continue; 4621 4625 if (error) 4622 4626 goto error0; ··· 4918 4922 goto done; 4919 4923 cur->bc_rec.b = new; 4920 4924 error = xfs_btree_insert(cur, &i); 4921 - if (error && error != ENOSPC) 4925 + if (error && error != -ENOSPC) 4922 4926 goto done; 4923 4927 /* 4924 4928 * If get no-space back from btree insert, ··· 4926 4930 * block reservation. 4927 4931 * Fix up our state and return the error. 4928 4932 */ 4929 - if (error == ENOSPC) { 4933 + if (error == -ENOSPC) { 4930 4934 /* 4931 4935 * Reset the cursor, don't trust 4932 4936 * it after any insert operation. ··· 4954 4958 xfs_bmbt_set_blockcount(ep, 4955 4959 got.br_blockcount); 4956 4960 flags = 0; 4957 - error = XFS_ERROR(ENOSPC); 4961 + error = -ENOSPC; 4958 4962 goto done; 4959 4963 } 4960 4964 XFS_WANT_CORRUPTED_GOTO(i == 1, done); ··· 5072 5076 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)) { 5073 5077 XFS_ERROR_REPORT("xfs_bunmapi", XFS_ERRLEVEL_LOW, 5074 5078 ip->i_mount); 5075 - return XFS_ERROR(EFSCORRUPTED); 5079 + return -EFSCORRUPTED; 5076 5080 } 5077 5081 mp = ip->i_mount; 5078 5082 if (XFS_FORCED_SHUTDOWN(mp)) 5079 - return XFS_ERROR(EIO); 5083 + return -EIO; 5080 5084 5081 5085 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 5082 5086 ASSERT(len > 0); ··· 5321 5325 del.br_startoff > got.br_startoff && 5322 5326 del.br_startoff + del.br_blockcount < 5323 5327 got.br_startoff + got.br_blockcount) { 5324 - error = XFS_ERROR(ENOSPC); 5328 + error = -ENOSPC; 5325 5329 goto error0; 5326 5330 } 5327 5331 error = xfs_bmap_del_extent(ip, tp, &lastx, flist, cur, &del, ··· 5445 5449 mp, XFS_ERRTAG_BMAPIFORMAT, XFS_RANDOM_BMAPIFORMAT))) { 5446 5450 XFS_ERROR_REPORT("xfs_bmap_shift_extents", 5447 5451 XFS_ERRLEVEL_LOW, mp); 5448 - return XFS_ERROR(EFSCORRUPTED); 5452 + return -EFSCORRUPTED; 5449 5453 } 5450 5454 5451 5455 if (XFS_FORCED_SHUTDOWN(mp)) 5452 - return XFS_ERROR(EIO); 5456 + return -EIO; 5453 5457 5454 5458 ASSERT(current_ext != NULL); 5455 5459 ··· 5512 5516 *current_ext - 1), &left); 5513 5517 5514 5518 if (startoff < left.br_startoff + left.br_blockcount) 5515 - error = XFS_ERROR(EINVAL); 5519 + error = -EINVAL; 5516 5520 } else if (offset_shift_fsb > got.br_startoff) { 5517 5521 /* 5518 5522 * When first extent is shifted, offset_shift_fsb 5519 5523 * should be less than the stating offset of 5520 5524 * the first extent. 5521 5525 */ 5522 - error = XFS_ERROR(EINVAL); 5526 + error = -EINVAL; 5523 5527 } 5524 5528 5525 5529 if (error)
fs/xfs/xfs_bmap.h fs/xfs/libxfs/xfs_bmap.h
+9 -90
fs/xfs/xfs_bmap_btree.c fs/xfs/libxfs/xfs_bmap_btree.c
··· 111 111 ext_flag = (int)(l0 >> (64 - BMBT_EXNTFLAG_BITLEN)); 112 112 s->br_startoff = ((xfs_fileoff_t)l0 & 113 113 xfs_mask64lo(64 - BMBT_EXNTFLAG_BITLEN)) >> 9; 114 - #if XFS_BIG_BLKNOS 115 114 s->br_startblock = (((xfs_fsblock_t)l0 & xfs_mask64lo(9)) << 43) | 116 115 (((xfs_fsblock_t)l1) >> 21); 117 - #else 118 - #ifdef DEBUG 119 - { 120 - xfs_dfsbno_t b; 121 - 122 - b = (((xfs_dfsbno_t)l0 & xfs_mask64lo(9)) << 43) | 123 - (((xfs_dfsbno_t)l1) >> 21); 124 - ASSERT((b >> 32) == 0 || isnulldstartblock(b)); 125 - s->br_startblock = (xfs_fsblock_t)b; 126 - } 127 - #else /* !DEBUG */ 128 - s->br_startblock = (xfs_fsblock_t)(((xfs_dfsbno_t)l1) >> 21); 129 - #endif /* DEBUG */ 130 - #endif /* XFS_BIG_BLKNOS */ 131 116 s->br_blockcount = (xfs_filblks_t)(l1 & xfs_mask64lo(21)); 132 117 /* This is xfs_extent_state() in-line */ 133 118 if (ext_flag) { ··· 148 163 xfs_bmbt_get_startblock( 149 164 xfs_bmbt_rec_host_t *r) 150 165 { 151 - #if XFS_BIG_BLKNOS 152 166 return (((xfs_fsblock_t)r->l0 & xfs_mask64lo(9)) << 43) | 153 167 (((xfs_fsblock_t)r->l1) >> 21); 154 - #else 155 - #ifdef DEBUG 156 - xfs_dfsbno_t b; 157 - 158 - b = (((xfs_dfsbno_t)r->l0 & xfs_mask64lo(9)) << 43) | 159 - (((xfs_dfsbno_t)r->l1) >> 21); 160 - ASSERT((b >> 32) == 0 || isnulldstartblock(b)); 161 - return (xfs_fsblock_t)b; 162 - #else /* !DEBUG */ 163 - return (xfs_fsblock_t)(((xfs_dfsbno_t)r->l1) >> 21); 164 - #endif /* DEBUG */ 165 - #endif /* XFS_BIG_BLKNOS */ 166 168 } 167 169 168 170 /* ··· 213 241 ASSERT((startoff & xfs_mask64hi(64-BMBT_STARTOFF_BITLEN)) == 0); 214 242 ASSERT((blockcount & xfs_mask64hi(64-BMBT_BLOCKCOUNT_BITLEN)) == 0); 215 243 216 - #if XFS_BIG_BLKNOS 217 244 ASSERT((startblock & xfs_mask64hi(64-BMBT_STARTBLOCK_BITLEN)) == 0); 218 245 219 246 r->l0 = ((xfs_bmbt_rec_base_t)extent_flag << 63) | ··· 221 250 r->l1 = ((xfs_bmbt_rec_base_t)startblock << 21) | 222 251 ((xfs_bmbt_rec_base_t)blockcount & 223 252 (xfs_bmbt_rec_base_t)xfs_mask64lo(21)); 224 - #else /* !XFS_BIG_BLKNOS */ 225 - if (isnullstartblock(startblock)) { 226 - r->l0 = ((xfs_bmbt_rec_base_t)extent_flag << 63) | 227 - ((xfs_bmbt_rec_base_t)startoff << 9) | 228 - (xfs_bmbt_rec_base_t)xfs_mask64lo(9); 229 - r->l1 = xfs_mask64hi(11) | 230 - ((xfs_bmbt_rec_base_t)startblock << 21) | 231 - ((xfs_bmbt_rec_base_t)blockcount & 232 - (xfs_bmbt_rec_base_t)xfs_mask64lo(21)); 233 - } else { 234 - r->l0 = ((xfs_bmbt_rec_base_t)extent_flag << 63) | 235 - ((xfs_bmbt_rec_base_t)startoff << 9); 236 - r->l1 = ((xfs_bmbt_rec_base_t)startblock << 21) | 237 - ((xfs_bmbt_rec_base_t)blockcount & 238 - (xfs_bmbt_rec_base_t)xfs_mask64lo(21)); 239 - } 240 - #endif /* XFS_BIG_BLKNOS */ 241 253 } 242 254 243 255 /* ··· 252 298 ASSERT(state == XFS_EXT_NORM || state == XFS_EXT_UNWRITTEN); 253 299 ASSERT((startoff & xfs_mask64hi(64-BMBT_STARTOFF_BITLEN)) == 0); 254 300 ASSERT((blockcount & xfs_mask64hi(64-BMBT_BLOCKCOUNT_BITLEN)) == 0); 255 - 256 - #if XFS_BIG_BLKNOS 257 301 ASSERT((startblock & xfs_mask64hi(64-BMBT_STARTBLOCK_BITLEN)) == 0); 258 302 259 303 r->l0 = cpu_to_be64( ··· 262 310 ((xfs_bmbt_rec_base_t)startblock << 21) | 263 311 ((xfs_bmbt_rec_base_t)blockcount & 264 312 (xfs_bmbt_rec_base_t)xfs_mask64lo(21))); 265 - #else /* !XFS_BIG_BLKNOS */ 266 - if (isnullstartblock(startblock)) { 267 - r->l0 = cpu_to_be64( 268 - ((xfs_bmbt_rec_base_t)extent_flag << 63) | 269 - ((xfs_bmbt_rec_base_t)startoff << 9) | 270 - (xfs_bmbt_rec_base_t)xfs_mask64lo(9)); 271 - r->l1 = cpu_to_be64(xfs_mask64hi(11) | 272 - ((xfs_bmbt_rec_base_t)startblock << 21) | 273 - ((xfs_bmbt_rec_base_t)blockcount & 274 - (xfs_bmbt_rec_base_t)xfs_mask64lo(21))); 275 - } else { 276 - r->l0 = cpu_to_be64( 277 - ((xfs_bmbt_rec_base_t)extent_flag << 63) | 278 - ((xfs_bmbt_rec_base_t)startoff << 9)); 279 - r->l1 = cpu_to_be64( 280 - ((xfs_bmbt_rec_base_t)startblock << 21) | 281 - ((xfs_bmbt_rec_base_t)blockcount & 282 - (xfs_bmbt_rec_base_t)xfs_mask64lo(21))); 283 - } 284 - #endif /* XFS_BIG_BLKNOS */ 285 313 } 286 314 287 315 /* ··· 297 365 xfs_bmbt_rec_host_t *r, 298 366 xfs_fsblock_t v) 299 367 { 300 - #if XFS_BIG_BLKNOS 301 368 ASSERT((v & xfs_mask64hi(12)) == 0); 302 369 r->l0 = (r->l0 & (xfs_bmbt_rec_base_t)xfs_mask64hi(55)) | 303 370 (xfs_bmbt_rec_base_t)(v >> 43); 304 371 r->l1 = (r->l1 & (xfs_bmbt_rec_base_t)xfs_mask64lo(21)) | 305 372 (xfs_bmbt_rec_base_t)(v << 21); 306 - #else /* !XFS_BIG_BLKNOS */ 307 - if (isnullstartblock(v)) { 308 - r->l0 |= (xfs_bmbt_rec_base_t)xfs_mask64lo(9); 309 - r->l1 = (xfs_bmbt_rec_base_t)xfs_mask64hi(11) | 310 - ((xfs_bmbt_rec_base_t)v << 21) | 311 - (r->l1 & (xfs_bmbt_rec_base_t)xfs_mask64lo(21)); 312 - } else { 313 - r->l0 &= ~(xfs_bmbt_rec_base_t)xfs_mask64lo(9); 314 - r->l1 = ((xfs_bmbt_rec_base_t)v << 21) | 315 - (r->l1 & (xfs_bmbt_rec_base_t)xfs_mask64lo(21)); 316 - } 317 - #endif /* XFS_BIG_BLKNOS */ 318 373 } 319 374 320 375 /* ··· 357 438 cpu_to_be64(XFS_BUF_DADDR_NULL)); 358 439 } else 359 440 ASSERT(rblock->bb_magic == cpu_to_be32(XFS_BMAP_MAGIC)); 360 - ASSERT(rblock->bb_u.l.bb_leftsib == cpu_to_be64(NULLDFSBNO)); 361 - ASSERT(rblock->bb_u.l.bb_rightsib == cpu_to_be64(NULLDFSBNO)); 441 + ASSERT(rblock->bb_u.l.bb_leftsib == cpu_to_be64(NULLFSBLOCK)); 442 + ASSERT(rblock->bb_u.l.bb_rightsib == cpu_to_be64(NULLFSBLOCK)); 362 443 ASSERT(rblock->bb_level != 0); 363 444 dblock->bb_level = rblock->bb_level; 364 445 dblock->bb_numrecs = rblock->bb_numrecs; ··· 473 554 args.minlen = args.maxlen = args.prod = 1; 474 555 args.wasdel = cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL; 475 556 if (!args.wasdel && xfs_trans_get_block_res(args.tp) == 0) { 476 - error = XFS_ERROR(ENOSPC); 557 + error = -ENOSPC; 477 558 goto error0; 478 559 } 479 560 error = xfs_alloc_vextent(&args); ··· 682 763 683 764 /* sibling pointer verification */ 684 765 if (!block->bb_u.l.bb_leftsib || 685 - (block->bb_u.l.bb_leftsib != cpu_to_be64(NULLDFSBNO) && 766 + (block->bb_u.l.bb_leftsib != cpu_to_be64(NULLFSBLOCK) && 686 767 !XFS_FSB_SANITY_CHECK(mp, be64_to_cpu(block->bb_u.l.bb_leftsib)))) 687 768 return false; 688 769 if (!block->bb_u.l.bb_rightsib || 689 - (block->bb_u.l.bb_rightsib != cpu_to_be64(NULLDFSBNO) && 770 + (block->bb_u.l.bb_rightsib != cpu_to_be64(NULLFSBLOCK) && 690 771 !XFS_FSB_SANITY_CHECK(mp, be64_to_cpu(block->bb_u.l.bb_rightsib)))) 691 772 return false; 692 773 ··· 698 779 struct xfs_buf *bp) 699 780 { 700 781 if (!xfs_btree_lblock_verify_crc(bp)) 701 - xfs_buf_ioerror(bp, EFSBADCRC); 782 + xfs_buf_ioerror(bp, -EFSBADCRC); 702 783 else if (!xfs_bmbt_verify(bp)) 703 - xfs_buf_ioerror(bp, EFSCORRUPTED); 784 + xfs_buf_ioerror(bp, -EFSCORRUPTED); 704 785 705 786 if (bp->b_error) { 706 787 trace_xfs_btree_corrupt(bp, _RET_IP_); ··· 714 795 { 715 796 if (!xfs_bmbt_verify(bp)) { 716 797 trace_xfs_btree_corrupt(bp, _RET_IP_); 717 - xfs_buf_ioerror(bp, EFSCORRUPTED); 798 + xfs_buf_ioerror(bp, -EFSCORRUPTED); 718 799 xfs_verifier_error(bp); 719 800 return; 720 801 } ··· 878 959 879 960 cur = xfs_bmbt_init_cursor(ip->i_mount, tp, ip, whichfork); 880 961 if (!cur) 881 - return ENOMEM; 962 + return -ENOMEM; 882 963 883 964 error = xfs_btree_change_owner(cur, new_owner, buffer_list); 884 965 xfs_btree_del_cursor(cur, error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
fs/xfs/xfs_bmap_btree.h fs/xfs/libxfs/xfs_bmap_btree.h
+85 -89
fs/xfs/xfs_bmap_util.c
··· 133 133 mp = ntp->t_mountp; 134 134 if (!XFS_FORCED_SHUTDOWN(mp)) 135 135 xfs_force_shutdown(mp, 136 - (error == EFSCORRUPTED) ? 136 + (error == -EFSCORRUPTED) ? 137 137 SHUTDOWN_CORRUPT_INCORE : 138 138 SHUTDOWN_META_IO_ERROR); 139 139 return error; ··· 365 365 xfs_trans_brelse(tp, bp); 366 366 XFS_ERROR_REPORT("xfs_bmap_count_tree(1)", 367 367 XFS_ERRLEVEL_LOW, mp); 368 - return XFS_ERROR(EFSCORRUPTED); 368 + return -EFSCORRUPTED; 369 369 } 370 370 xfs_trans_brelse(tp, bp); 371 371 } else { ··· 425 425 ASSERT(level > 0); 426 426 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes); 427 427 bno = be64_to_cpu(*pp); 428 - ASSERT(bno != NULLDFSBNO); 428 + ASSERT(bno != NULLFSBLOCK); 429 429 ASSERT(XFS_FSB_TO_AGNO(mp, bno) < mp->m_sb.sb_agcount); 430 430 ASSERT(XFS_FSB_TO_AGBNO(mp, bno) < mp->m_sb.sb_agblocks); 431 431 432 432 if (unlikely(xfs_bmap_count_tree(mp, tp, ifp, bno, level, count) < 0)) { 433 433 XFS_ERROR_REPORT("xfs_bmap_count_blocks(2)", XFS_ERRLEVEL_LOW, 434 434 mp); 435 - return XFS_ERROR(EFSCORRUPTED); 435 + return -EFSCORRUPTED; 436 436 } 437 437 438 438 return 0; ··· 524 524 if (ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS && 525 525 ip->i_d.di_aformat != XFS_DINODE_FMT_BTREE && 526 526 ip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL) 527 - return XFS_ERROR(EINVAL); 527 + return -EINVAL; 528 528 } else if (unlikely( 529 529 ip->i_d.di_aformat != 0 && 530 530 ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS)) { 531 531 XFS_ERROR_REPORT("xfs_getbmap", XFS_ERRLEVEL_LOW, 532 532 ip->i_mount); 533 - return XFS_ERROR(EFSCORRUPTED); 533 + return -EFSCORRUPTED; 534 534 } 535 535 536 536 prealloced = 0; ··· 539 539 if (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS && 540 540 ip->i_d.di_format != XFS_DINODE_FMT_BTREE && 541 541 ip->i_d.di_format != XFS_DINODE_FMT_LOCAL) 542 - return XFS_ERROR(EINVAL); 542 + return -EINVAL; 543 543 544 544 if (xfs_get_extsz_hint(ip) || 545 545 ip->i_d.di_flags & (XFS_DIFLAG_PREALLOC|XFS_DIFLAG_APPEND)){ ··· 559 559 bmv->bmv_entries = 0; 560 560 return 0; 561 561 } else if (bmv->bmv_length < 0) { 562 - return XFS_ERROR(EINVAL); 562 + return -EINVAL; 563 563 } 564 564 565 565 nex = bmv->bmv_count - 1; 566 566 if (nex <= 0) 567 - return XFS_ERROR(EINVAL); 567 + return -EINVAL; 568 568 bmvend = bmv->bmv_offset + bmv->bmv_length; 569 569 570 570 571 571 if (bmv->bmv_count > ULONG_MAX / sizeof(struct getbmapx)) 572 - return XFS_ERROR(ENOMEM); 572 + return -ENOMEM; 573 573 out = kmem_zalloc_large(bmv->bmv_count * sizeof(struct getbmapx), 0); 574 574 if (!out) 575 - return XFS_ERROR(ENOMEM); 575 + return -ENOMEM; 576 576 577 577 xfs_ilock(ip, XFS_IOLOCK_SHARED); 578 578 if (whichfork == XFS_DATA_FORK) { 579 579 if (!(iflags & BMV_IF_DELALLOC) && 580 580 (ip->i_delayed_blks || XFS_ISIZE(ip) > ip->i_d.di_size)) { 581 - error = -filemap_write_and_wait(VFS_I(ip)->i_mapping); 581 + error = filemap_write_and_wait(VFS_I(ip)->i_mapping); 582 582 if (error) 583 583 goto out_unlock_iolock; 584 584 ··· 611 611 /* 612 612 * Allocate enough space to handle "subnex" maps at a time. 613 613 */ 614 - error = ENOMEM; 614 + error = -ENOMEM; 615 615 subnex = 16; 616 616 map = kmem_alloc(subnex * sizeof(*map), KM_MAYFAIL | KM_NOFS); 617 617 if (!map) ··· 809 809 * have speculative prealloc/delalloc blocks to remove. 810 810 */ 811 811 if (VFS_I(ip)->i_size == 0 && 812 - VN_CACHED(VFS_I(ip)) == 0 && 812 + VFS_I(ip)->i_mapping->nrpages == 0 && 813 813 ip->i_delayed_blks == 0) 814 814 return false; 815 815 ··· 882 882 if (need_iolock) { 883 883 if (!xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) { 884 884 xfs_trans_cancel(tp, 0); 885 - return EAGAIN; 885 + return -EAGAIN; 886 886 } 887 887 } 888 888 ··· 955 955 trace_xfs_alloc_file_space(ip); 956 956 957 957 if (XFS_FORCED_SHUTDOWN(mp)) 958 - return XFS_ERROR(EIO); 958 + return -EIO; 959 959 960 960 error = xfs_qm_dqattach(ip, 0); 961 961 if (error) 962 962 return error; 963 963 964 964 if (len <= 0) 965 - return XFS_ERROR(EINVAL); 965 + return -EINVAL; 966 966 967 967 rt = XFS_IS_REALTIME_INODE(ip); 968 968 extsz = xfs_get_extsz_hint(ip); ··· 1028 1028 /* 1029 1029 * Free the transaction structure. 1030 1030 */ 1031 - ASSERT(error == ENOSPC || XFS_FORCED_SHUTDOWN(mp)); 1031 + ASSERT(error == -ENOSPC || XFS_FORCED_SHUTDOWN(mp)); 1032 1032 xfs_trans_cancel(tp, 0); 1033 1033 break; 1034 1034 } ··· 1065 1065 allocated_fsb = imapp->br_blockcount; 1066 1066 1067 1067 if (nimaps == 0) { 1068 - error = XFS_ERROR(ENOSPC); 1068 + error = -ENOSPC; 1069 1069 break; 1070 1070 } 1071 1071 ··· 1126 1126 mp->m_rtdev_targp : mp->m_ddev_targp, 1127 1127 BTOBB(mp->m_sb.sb_blocksize), 0); 1128 1128 if (!bp) 1129 - return XFS_ERROR(ENOMEM); 1129 + return -ENOMEM; 1130 1130 1131 1131 xfs_buf_unlock(bp); 1132 1132 ··· 1158 1158 XFS_BUF_SET_ADDR(bp, xfs_fsb_to_db(ip, imap.br_startblock)); 1159 1159 1160 1160 if (XFS_FORCED_SHUTDOWN(mp)) { 1161 - error = XFS_ERROR(EIO); 1161 + error = -EIO; 1162 1162 break; 1163 1163 } 1164 1164 xfs_buf_iorequest(bp); ··· 1176 1176 XFS_BUF_WRITE(bp); 1177 1177 1178 1178 if (XFS_FORCED_SHUTDOWN(mp)) { 1179 - error = XFS_ERROR(EIO); 1179 + error = -EIO; 1180 1180 break; 1181 1181 } 1182 1182 xfs_buf_iorequest(bp); ··· 1234 1234 1235 1235 rounding = max_t(xfs_off_t, 1 << mp->m_sb.sb_blocklog, PAGE_CACHE_SIZE); 1236 1236 ioffset = offset & ~(rounding - 1); 1237 - error = -filemap_write_and_wait_range(VFS_I(ip)->i_mapping, 1237 + error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping, 1238 1238 ioffset, -1); 1239 1239 if (error) 1240 1240 goto out; ··· 1315 1315 /* 1316 1316 * Free the transaction structure. 1317 1317 */ 1318 - ASSERT(error == ENOSPC || XFS_FORCED_SHUTDOWN(mp)); 1318 + ASSERT(error == -ENOSPC || XFS_FORCED_SHUTDOWN(mp)); 1319 1319 xfs_trans_cancel(tp, 0); 1320 1320 break; 1321 1321 } ··· 1557 1557 /* Should never get a local format */ 1558 1558 if (ip->i_d.di_format == XFS_DINODE_FMT_LOCAL || 1559 1559 tip->i_d.di_format == XFS_DINODE_FMT_LOCAL) 1560 - return EINVAL; 1560 + return -EINVAL; 1561 1561 1562 1562 /* 1563 1563 * if the target inode has less extents that then temporary inode then 1564 1564 * why did userspace call us? 1565 1565 */ 1566 1566 if (ip->i_d.di_nextents < tip->i_d.di_nextents) 1567 - return EINVAL; 1567 + return -EINVAL; 1568 1568 1569 1569 /* 1570 1570 * if the target inode is in extent form and the temp inode is in btree ··· 1573 1573 */ 1574 1574 if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS && 1575 1575 tip->i_d.di_format == XFS_DINODE_FMT_BTREE) 1576 - return EINVAL; 1576 + return -EINVAL; 1577 1577 1578 1578 /* Check temp in extent form to max in target */ 1579 1579 if (tip->i_d.di_format == XFS_DINODE_FMT_EXTENTS && 1580 1580 XFS_IFORK_NEXTENTS(tip, XFS_DATA_FORK) > 1581 1581 XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK)) 1582 - return EINVAL; 1582 + return -EINVAL; 1583 1583 1584 1584 /* Check target in extent form to max in temp */ 1585 1585 if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS && 1586 1586 XFS_IFORK_NEXTENTS(ip, XFS_DATA_FORK) > 1587 1587 XFS_IFORK_MAXEXT(tip, XFS_DATA_FORK)) 1588 - return EINVAL; 1588 + return -EINVAL; 1589 1589 1590 1590 /* 1591 1591 * If we are in a btree format, check that the temp root block will fit ··· 1599 1599 if (tip->i_d.di_format == XFS_DINODE_FMT_BTREE) { 1600 1600 if (XFS_IFORK_BOFF(ip) && 1601 1601 XFS_BMAP_BMDR_SPACE(tip->i_df.if_broot) > XFS_IFORK_BOFF(ip)) 1602 - return EINVAL; 1602 + return -EINVAL; 1603 1603 if (XFS_IFORK_NEXTENTS(tip, XFS_DATA_FORK) <= 1604 1604 XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK)) 1605 - return EINVAL; 1605 + return -EINVAL; 1606 1606 } 1607 1607 1608 1608 /* Reciprocal target->temp btree format checks */ 1609 1609 if (ip->i_d.di_format == XFS_DINODE_FMT_BTREE) { 1610 1610 if (XFS_IFORK_BOFF(tip) && 1611 1611 XFS_BMAP_BMDR_SPACE(ip->i_df.if_broot) > XFS_IFORK_BOFF(tip)) 1612 - return EINVAL; 1612 + return -EINVAL; 1613 1613 if (XFS_IFORK_NEXTENTS(ip, XFS_DATA_FORK) <= 1614 1614 XFS_IFORK_MAXEXT(tip, XFS_DATA_FORK)) 1615 - return EINVAL; 1615 + return -EINVAL; 1616 1616 } 1617 1617 1618 + return 0; 1619 + } 1620 + 1621 + int 1622 + xfs_swap_extent_flush( 1623 + struct xfs_inode *ip) 1624 + { 1625 + int error; 1626 + 1627 + error = filemap_write_and_wait(VFS_I(ip)->i_mapping); 1628 + if (error) 1629 + return error; 1630 + truncate_pagecache_range(VFS_I(ip), 0, -1); 1631 + 1632 + /* Verify O_DIRECT for ftmp */ 1633 + if (VFS_I(ip)->i_mapping->nrpages) 1634 + return -EINVAL; 1635 + 1636 + /* 1637 + * Don't try to swap extents on mmap()d files because we can't lock 1638 + * out races against page faults safely. 1639 + */ 1640 + if (mapping_mapped(VFS_I(ip)->i_mapping)) 1641 + return -EBUSY; 1618 1642 return 0; 1619 1643 } 1620 1644 ··· 1657 1633 int aforkblks = 0; 1658 1634 int taforkblks = 0; 1659 1635 __uint64_t tmp; 1636 + int lock_flags; 1660 1637 1661 1638 tempifp = kmem_alloc(sizeof(xfs_ifork_t), KM_MAYFAIL); 1662 1639 if (!tempifp) { 1663 - error = XFS_ERROR(ENOMEM); 1640 + error = -ENOMEM; 1664 1641 goto out; 1665 1642 } 1666 1643 1667 1644 /* 1668 - * we have to do two separate lock calls here to keep lockdep 1669 - * happy. If we try to get all the locks in one call, lock will 1670 - * report false positives when we drop the ILOCK and regain them 1671 - * below. 1645 + * Lock up the inodes against other IO and truncate to begin with. 1646 + * Then we can ensure the inodes are flushed and have no page cache 1647 + * safely. Once we have done this we can take the ilocks and do the rest 1648 + * of the checks. 1672 1649 */ 1650 + lock_flags = XFS_IOLOCK_EXCL; 1673 1651 xfs_lock_two_inodes(ip, tip, XFS_IOLOCK_EXCL); 1674 - xfs_lock_two_inodes(ip, tip, XFS_ILOCK_EXCL); 1675 1652 1676 1653 /* Verify that both files have the same format */ 1677 1654 if ((ip->i_d.di_mode & S_IFMT) != (tip->i_d.di_mode & S_IFMT)) { 1678 - error = XFS_ERROR(EINVAL); 1655 + error = -EINVAL; 1679 1656 goto out_unlock; 1680 1657 } 1681 1658 1682 1659 /* Verify both files are either real-time or non-realtime */ 1683 1660 if (XFS_IS_REALTIME_INODE(ip) != XFS_IS_REALTIME_INODE(tip)) { 1684 - error = XFS_ERROR(EINVAL); 1661 + error = -EINVAL; 1685 1662 goto out_unlock; 1686 1663 } 1687 1664 1688 - error = -filemap_write_and_wait(VFS_I(tip)->i_mapping); 1665 + error = xfs_swap_extent_flush(ip); 1689 1666 if (error) 1690 1667 goto out_unlock; 1691 - truncate_pagecache_range(VFS_I(tip), 0, -1); 1668 + error = xfs_swap_extent_flush(tip); 1669 + if (error) 1670 + goto out_unlock; 1692 1671 1693 - /* Verify O_DIRECT for ftmp */ 1694 - if (VN_CACHED(VFS_I(tip)) != 0) { 1695 - error = XFS_ERROR(EINVAL); 1672 + tp = xfs_trans_alloc(mp, XFS_TRANS_SWAPEXT); 1673 + error = xfs_trans_reserve(tp, &M_RES(mp)->tr_ichange, 0, 0); 1674 + if (error) { 1675 + xfs_trans_cancel(tp, 0); 1696 1676 goto out_unlock; 1697 1677 } 1678 + xfs_lock_two_inodes(ip, tip, XFS_ILOCK_EXCL); 1679 + lock_flags |= XFS_ILOCK_EXCL; 1698 1680 1699 1681 /* Verify all data are being swapped */ 1700 1682 if (sxp->sx_offset != 0 || 1701 1683 sxp->sx_length != ip->i_d.di_size || 1702 1684 sxp->sx_length != tip->i_d.di_size) { 1703 - error = XFS_ERROR(EFAULT); 1704 - goto out_unlock; 1685 + error = -EFAULT; 1686 + goto out_trans_cancel; 1705 1687 } 1706 1688 1707 1689 trace_xfs_swap_extent_before(ip, 0); ··· 1719 1689 xfs_notice(mp, 1720 1690 "%s: inode 0x%llx format is incompatible for exchanging.", 1721 1691 __func__, ip->i_ino); 1722 - goto out_unlock; 1692 + goto out_trans_cancel; 1723 1693 } 1724 1694 1725 1695 /* ··· 1733 1703 (sbp->bs_ctime.tv_nsec != VFS_I(ip)->i_ctime.tv_nsec) || 1734 1704 (sbp->bs_mtime.tv_sec != VFS_I(ip)->i_mtime.tv_sec) || 1735 1705 (sbp->bs_mtime.tv_nsec != VFS_I(ip)->i_mtime.tv_nsec)) { 1736 - error = XFS_ERROR(EBUSY); 1737 - goto out_unlock; 1706 + error = -EBUSY; 1707 + goto out_trans_cancel; 1738 1708 } 1739 - 1740 - /* We need to fail if the file is memory mapped. Once we have tossed 1741 - * all existing pages, the page fault will have no option 1742 - * but to go to the filesystem for pages. By making the page fault call 1743 - * vop_read (or write in the case of autogrow) they block on the iolock 1744 - * until we have switched the extents. 1745 - */ 1746 - if (VN_MAPPED(VFS_I(ip))) { 1747 - error = XFS_ERROR(EBUSY); 1748 - goto out_unlock; 1749 - } 1750 - 1751 - xfs_iunlock(ip, XFS_ILOCK_EXCL); 1752 - xfs_iunlock(tip, XFS_ILOCK_EXCL); 1753 - 1754 - /* 1755 - * There is a race condition here since we gave up the 1756 - * ilock. However, the data fork will not change since 1757 - * we have the iolock (locked for truncation too) so we 1758 - * are safe. We don't really care if non-io related 1759 - * fields change. 1760 - */ 1761 - truncate_pagecache_range(VFS_I(ip), 0, -1); 1762 - 1763 - tp = xfs_trans_alloc(mp, XFS_TRANS_SWAPEXT); 1764 - error = xfs_trans_reserve(tp, &M_RES(mp)->tr_ichange, 0, 0); 1765 - if (error) { 1766 - xfs_iunlock(ip, XFS_IOLOCK_EXCL); 1767 - xfs_iunlock(tip, XFS_IOLOCK_EXCL); 1768 - xfs_trans_cancel(tp, 0); 1769 - goto out; 1770 - } 1771 - xfs_lock_two_inodes(ip, tip, XFS_ILOCK_EXCL); 1772 - 1773 1709 /* 1774 1710 * Count the number of extended attribute blocks 1775 1711 */ ··· 1753 1757 goto out_trans_cancel; 1754 1758 } 1755 1759 1756 - xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); 1757 - xfs_trans_ijoin(tp, tip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); 1760 + xfs_trans_ijoin(tp, ip, lock_flags); 1761 + xfs_trans_ijoin(tp, tip, lock_flags); 1758 1762 1759 1763 /* 1760 1764 * Before we've swapped the forks, lets set the owners of the forks ··· 1883 1887 return error; 1884 1888 1885 1889 out_unlock: 1886 - xfs_iunlock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); 1887 - xfs_iunlock(tip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); 1890 + xfs_iunlock(ip, lock_flags); 1891 + xfs_iunlock(tip, lock_flags); 1888 1892 goto out; 1889 1893 1890 1894 out_trans_cancel:
+23 -23
fs/xfs/xfs_btree.c fs/xfs/libxfs/xfs_btree.c
··· 78 78 be16_to_cpu(block->bb_numrecs) <= 79 79 cur->bc_ops->get_maxrecs(cur, level) && 80 80 block->bb_u.l.bb_leftsib && 81 - (block->bb_u.l.bb_leftsib == cpu_to_be64(NULLDFSBNO) || 81 + (block->bb_u.l.bb_leftsib == cpu_to_be64(NULLFSBLOCK) || 82 82 XFS_FSB_SANITY_CHECK(mp, 83 83 be64_to_cpu(block->bb_u.l.bb_leftsib))) && 84 84 block->bb_u.l.bb_rightsib && 85 - (block->bb_u.l.bb_rightsib == cpu_to_be64(NULLDFSBNO) || 85 + (block->bb_u.l.bb_rightsib == cpu_to_be64(NULLFSBLOCK) || 86 86 XFS_FSB_SANITY_CHECK(mp, 87 87 be64_to_cpu(block->bb_u.l.bb_rightsib))); 88 88 ··· 92 92 if (bp) 93 93 trace_xfs_btree_corrupt(bp, _RET_IP_); 94 94 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp); 95 - return XFS_ERROR(EFSCORRUPTED); 95 + return -EFSCORRUPTED; 96 96 } 97 97 return 0; 98 98 } ··· 140 140 if (bp) 141 141 trace_xfs_btree_corrupt(bp, _RET_IP_); 142 142 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp); 143 - return XFS_ERROR(EFSCORRUPTED); 143 + return -EFSCORRUPTED; 144 144 } 145 145 return 0; 146 146 } ··· 167 167 int /* error (0 or EFSCORRUPTED) */ 168 168 xfs_btree_check_lptr( 169 169 struct xfs_btree_cur *cur, /* btree cursor */ 170 - xfs_dfsbno_t bno, /* btree block disk address */ 170 + xfs_fsblock_t bno, /* btree block disk address */ 171 171 int level) /* btree block level */ 172 172 { 173 173 XFS_WANT_CORRUPTED_RETURN( 174 174 level > 0 && 175 - bno != NULLDFSBNO && 175 + bno != NULLFSBLOCK && 176 176 XFS_FSB_SANITY_CHECK(cur->bc_mp, bno)); 177 177 return 0; 178 178 } ··· 595 595 block = xfs_btree_get_block(cur, level, &bp); 596 596 xfs_btree_check_block(cur, block, level, bp); 597 597 if (cur->bc_flags & XFS_BTREE_LONG_PTRS) 598 - return block->bb_u.l.bb_rightsib == cpu_to_be64(NULLDFSBNO); 598 + return block->bb_u.l.bb_rightsib == cpu_to_be64(NULLFSBLOCK); 599 599 else 600 600 return block->bb_u.s.bb_rightsib == cpu_to_be32(NULLAGBLOCK); 601 601 } ··· 771 771 struct xfs_btree_block *block) 772 772 { 773 773 int rval = 0; 774 - xfs_dfsbno_t left = be64_to_cpu(block->bb_u.l.bb_leftsib); 775 - xfs_dfsbno_t right = be64_to_cpu(block->bb_u.l.bb_rightsib); 774 + xfs_fsblock_t left = be64_to_cpu(block->bb_u.l.bb_leftsib); 775 + xfs_fsblock_t right = be64_to_cpu(block->bb_u.l.bb_rightsib); 776 776 777 - if ((lr & XFS_BTCUR_LEFTRA) && left != NULLDFSBNO) { 777 + if ((lr & XFS_BTCUR_LEFTRA) && left != NULLFSBLOCK) { 778 778 xfs_btree_reada_bufl(cur->bc_mp, left, 1, 779 779 cur->bc_ops->buf_ops); 780 780 rval++; 781 781 } 782 782 783 - if ((lr & XFS_BTCUR_RIGHTRA) && right != NULLDFSBNO) { 783 + if ((lr & XFS_BTCUR_RIGHTRA) && right != NULLFSBLOCK) { 784 784 xfs_btree_reada_bufl(cur->bc_mp, right, 1, 785 785 cur->bc_ops->buf_ops); 786 786 rval++; ··· 852 852 union xfs_btree_ptr *ptr) 853 853 { 854 854 if (cur->bc_flags & XFS_BTREE_LONG_PTRS) { 855 - ASSERT(ptr->l != cpu_to_be64(NULLDFSBNO)); 855 + ASSERT(ptr->l != cpu_to_be64(NULLFSBLOCK)); 856 856 857 857 return XFS_FSB_TO_DADDR(cur->bc_mp, be64_to_cpu(ptr->l)); 858 858 } else { ··· 900 900 901 901 b = XFS_BUF_TO_BLOCK(bp); 902 902 if (cur->bc_flags & XFS_BTREE_LONG_PTRS) { 903 - if (b->bb_u.l.bb_leftsib == cpu_to_be64(NULLDFSBNO)) 903 + if (b->bb_u.l.bb_leftsib == cpu_to_be64(NULLFSBLOCK)) 904 904 cur->bc_ra[lev] |= XFS_BTCUR_LEFTRA; 905 - if (b->bb_u.l.bb_rightsib == cpu_to_be64(NULLDFSBNO)) 905 + if (b->bb_u.l.bb_rightsib == cpu_to_be64(NULLFSBLOCK)) 906 906 cur->bc_ra[lev] |= XFS_BTCUR_RIGHTRA; 907 907 } else { 908 908 if (b->bb_u.s.bb_leftsib == cpu_to_be32(NULLAGBLOCK)) ··· 918 918 union xfs_btree_ptr *ptr) 919 919 { 920 920 if (cur->bc_flags & XFS_BTREE_LONG_PTRS) 921 - return ptr->l == cpu_to_be64(NULLDFSBNO); 921 + return ptr->l == cpu_to_be64(NULLFSBLOCK); 922 922 else 923 923 return ptr->s == cpu_to_be32(NULLAGBLOCK); 924 924 } ··· 929 929 union xfs_btree_ptr *ptr) 930 930 { 931 931 if (cur->bc_flags & XFS_BTREE_LONG_PTRS) 932 - ptr->l = cpu_to_be64(NULLDFSBNO); 932 + ptr->l = cpu_to_be64(NULLFSBLOCK); 933 933 else 934 934 ptr->s = cpu_to_be32(NULLAGBLOCK); 935 935 } ··· 997 997 buf->bb_numrecs = cpu_to_be16(numrecs); 998 998 999 999 if (flags & XFS_BTREE_LONG_PTRS) { 1000 - buf->bb_u.l.bb_leftsib = cpu_to_be64(NULLDFSBNO); 1001 - buf->bb_u.l.bb_rightsib = cpu_to_be64(NULLDFSBNO); 1000 + buf->bb_u.l.bb_leftsib = cpu_to_be64(NULLFSBLOCK); 1001 + buf->bb_u.l.bb_rightsib = cpu_to_be64(NULLFSBLOCK); 1002 1002 if (flags & XFS_BTREE_CRC_BLOCKS) { 1003 1003 buf->bb_u.l.bb_blkno = cpu_to_be64(blkno); 1004 1004 buf->bb_u.l.bb_owner = cpu_to_be64(owner); ··· 1140 1140 mp->m_bsize, flags); 1141 1141 1142 1142 if (!*bpp) 1143 - return ENOMEM; 1143 + return -ENOMEM; 1144 1144 1145 1145 (*bpp)->b_ops = cur->bc_ops->buf_ops; 1146 1146 *block = XFS_BUF_TO_BLOCK(*bpp); ··· 1498 1498 if (cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) 1499 1499 goto out0; 1500 1500 ASSERT(0); 1501 - error = EFSCORRUPTED; 1501 + error = -EFSCORRUPTED; 1502 1502 goto error0; 1503 1503 } 1504 1504 ASSERT(lev < cur->bc_nlevels); ··· 1597 1597 if (cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) 1598 1598 goto out0; 1599 1599 ASSERT(0); 1600 - error = EFSCORRUPTED; 1600 + error = -EFSCORRUPTED; 1601 1601 goto error0; 1602 1602 } 1603 1603 ASSERT(lev < cur->bc_nlevels); ··· 4018 4018 /* now read rh sibling block for next iteration */ 4019 4019 xfs_btree_get_sibling(cur, block, &rptr, XFS_BB_RIGHTSIB); 4020 4020 if (xfs_btree_ptr_is_null(cur, &rptr)) 4021 - return ENOENT; 4021 + return -ENOENT; 4022 4022 4023 4023 return xfs_btree_lookup_get_block(cur, level, &rptr, &block); 4024 4024 } ··· 4061 4061 buffer_list); 4062 4062 } while (!error); 4063 4063 4064 - if (error != ENOENT) 4064 + if (error != -ENOENT) 4065 4065 return error; 4066 4066 } 4067 4067
+1 -1
fs/xfs/xfs_btree.h fs/xfs/libxfs/xfs_btree.h
··· 258 258 int /* error (0 or EFSCORRUPTED) */ 259 259 xfs_btree_check_lptr( 260 260 struct xfs_btree_cur *cur, /* btree cursor */ 261 - xfs_dfsbno_t ptr, /* btree block disk address */ 261 + xfs_fsblock_t ptr, /* btree block disk address */ 262 262 int level); /* btree block level */ 263 263 264 264 /*
+27 -13
fs/xfs/xfs_buf.c
··· 130 130 bp->b_maps = kmem_zalloc(map_count * sizeof(struct xfs_buf_map), 131 131 KM_NOFS); 132 132 if (!bp->b_maps) 133 - return ENOMEM; 133 + return -ENOMEM; 134 134 return 0; 135 135 } 136 136 ··· 344 344 if (unlikely(page == NULL)) { 345 345 if (flags & XBF_READ_AHEAD) { 346 346 bp->b_page_count = i; 347 - error = ENOMEM; 347 + error = -ENOMEM; 348 348 goto out_free_pages; 349 349 } 350 350 ··· 465 465 eofs = XFS_FSB_TO_BB(btp->bt_mount, btp->bt_mount->m_sb.sb_dblocks); 466 466 if (blkno >= eofs) { 467 467 /* 468 - * XXX (dgc): we should really be returning EFSCORRUPTED here, 468 + * XXX (dgc): we should really be returning -EFSCORRUPTED here, 469 469 * but none of the higher level infrastructure supports 470 470 * returning a specific error on buffer lookup failures. 471 471 */ ··· 1052 1052 xfs_buf_t *bp, 1053 1053 int error) 1054 1054 { 1055 - ASSERT(error >= 0 && error <= 0xffff); 1056 - bp->b_error = (unsigned short)error; 1055 + ASSERT(error <= 0 && error >= -1000); 1056 + bp->b_error = error; 1057 1057 trace_xfs_buf_ioerror(bp, error, _RET_IP_); 1058 1058 } 1059 1059 ··· 1064 1064 { 1065 1065 xfs_alert(bp->b_target->bt_mount, 1066 1066 "metadata I/O error: block 0x%llx (\"%s\") error %d numblks %d", 1067 - (__uint64_t)XFS_BUF_ADDR(bp), func, bp->b_error, bp->b_length); 1067 + (__uint64_t)XFS_BUF_ADDR(bp), func, -bp->b_error, bp->b_length); 1068 1068 } 1069 1069 1070 1070 /* ··· 1083 1083 /* 1084 1084 * No need to wait until the buffer is unpinned, we aren't flushing it. 1085 1085 */ 1086 - xfs_buf_ioerror(bp, EIO); 1086 + xfs_buf_ioerror(bp, -EIO); 1087 1087 1088 1088 /* 1089 1089 * We're calling xfs_buf_ioend, so delete XBF_DONE flag. ··· 1094 1094 1095 1095 xfs_buf_ioend(bp, 0); 1096 1096 1097 - return EIO; 1097 + return -EIO; 1098 1098 } 1099 1099 1100 1100 /* ··· 1127 1127 * There's no reason to mark error for 1128 1128 * ASYNC buffers. 1129 1129 */ 1130 - xfs_buf_ioerror(bp, EIO); 1130 + xfs_buf_ioerror(bp, -EIO); 1131 1131 complete(&bp->b_iowait); 1132 1132 } else { 1133 1133 xfs_buf_relse(bp); 1134 1134 } 1135 1135 1136 - return EIO; 1136 + return -EIO; 1137 1137 } 1138 1138 1139 1139 STATIC int ··· 1199 1199 * buffers that require multiple bios to complete. 1200 1200 */ 1201 1201 if (!bp->b_error) 1202 - xfs_buf_ioerror(bp, -error); 1202 + xfs_buf_ioerror(bp, error); 1203 1203 1204 1204 if (!bp->b_error && xfs_buf_is_vmapped(bp) && (bp->b_flags & XBF_READ)) 1205 1205 invalidate_kernel_vmap_range(bp->b_addr, xfs_buf_vmap_len(bp)); ··· 1286 1286 * because the caller (xfs_buf_iorequest) holds a count itself. 1287 1287 */ 1288 1288 atomic_dec(&bp->b_io_remaining); 1289 - xfs_buf_ioerror(bp, EIO); 1289 + xfs_buf_ioerror(bp, -EIO); 1290 1290 bio_put(bio); 1291 1291 } 1292 1292 ··· 1329 1329 xfs_force_shutdown(bp->b_target->bt_mount, 1330 1330 SHUTDOWN_CORRUPT_INCORE); 1331 1331 return; 1332 + } 1333 + } else if (bp->b_bn != XFS_BUF_DADDR_NULL) { 1334 + struct xfs_mount *mp = bp->b_target->bt_mount; 1335 + 1336 + /* 1337 + * non-crc filesystems don't attach verifiers during 1338 + * log recovery, so don't warn for such filesystems. 1339 + */ 1340 + if (xfs_sb_version_hascrc(&mp->m_sb)) { 1341 + xfs_warn(mp, 1342 + "%s: no ops on block 0x%llx/0x%x", 1343 + __func__, bp->b_bn, bp->b_length); 1344 + xfs_hex_dump(bp->b_addr, 64); 1345 + dump_stack(); 1332 1346 } 1333 1347 } 1334 1348 } else if (bp->b_flags & XBF_READ_AHEAD) { ··· 1642 1628 xfs_warn(btp->bt_mount, 1643 1629 "Cannot set_blocksize to %u on device %s", 1644 1630 sectorsize, name); 1645 - return EINVAL; 1631 + return -EINVAL; 1646 1632 } 1647 1633 1648 1634 /* Set up device logical sector size mask */
+1 -1
fs/xfs/xfs_buf.h
··· 178 178 atomic_t b_io_remaining; /* #outstanding I/O requests */ 179 179 unsigned int b_page_count; /* size of page array */ 180 180 unsigned int b_offset; /* page offset in first page */ 181 - unsigned short b_error; /* error code on I/O */ 181 + int b_error; /* error code on I/O */ 182 182 const struct xfs_buf_ops *b_ops; 183 183 184 184 #ifdef XFS_BUF_LOCK_TRACKING
+2 -2
fs/xfs/xfs_buf_item.c
··· 488 488 xfs_buf_lock(bp); 489 489 xfs_buf_hold(bp); 490 490 bp->b_flags |= XBF_ASYNC; 491 - xfs_buf_ioerror(bp, EIO); 491 + xfs_buf_ioerror(bp, -EIO); 492 492 XFS_BUF_UNDONE(bp); 493 493 xfs_buf_stale(bp); 494 494 xfs_buf_ioend(bp, 0); ··· 725 725 bip->bli_formats = kmem_zalloc(count * sizeof(struct xfs_buf_log_format), 726 726 KM_SLEEP); 727 727 if (!bip->bli_formats) 728 - return ENOMEM; 728 + return -ENOMEM; 729 729 return 0; 730 730 } 731 731
fs/xfs/xfs_cksum.h fs/xfs/libxfs/xfs_cksum.h
+56 -56
fs/xfs/xfs_da_btree.c fs/xfs/libxfs/xfs_da_btree.c
··· 185 185 struct xfs_da3_node_hdr *hdr3 = bp->b_addr; 186 186 187 187 if (!xfs_da3_node_verify(bp)) { 188 - xfs_buf_ioerror(bp, EFSCORRUPTED); 188 + xfs_buf_ioerror(bp, -EFSCORRUPTED); 189 189 xfs_verifier_error(bp); 190 190 return; 191 191 } ··· 214 214 switch (be16_to_cpu(info->magic)) { 215 215 case XFS_DA3_NODE_MAGIC: 216 216 if (!xfs_buf_verify_cksum(bp, XFS_DA3_NODE_CRC_OFF)) { 217 - xfs_buf_ioerror(bp, EFSBADCRC); 217 + xfs_buf_ioerror(bp, -EFSBADCRC); 218 218 break; 219 219 } 220 220 /* fall through */ 221 221 case XFS_DA_NODE_MAGIC: 222 222 if (!xfs_da3_node_verify(bp)) { 223 - xfs_buf_ioerror(bp, EFSCORRUPTED); 223 + xfs_buf_ioerror(bp, -EFSCORRUPTED); 224 224 break; 225 225 } 226 226 return; ··· 315 315 316 316 error = xfs_da_get_buf(tp, dp, blkno, -1, &bp, whichfork); 317 317 if (error) 318 - return(error); 318 + return error; 319 319 bp->b_ops = &xfs_da3_node_buf_ops; 320 320 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DA_NODE_BUF); 321 321 node = bp->b_addr; ··· 337 337 XFS_DA_LOGRANGE(node, &node->hdr, dp->d_ops->node_hdr_size)); 338 338 339 339 *bpp = bp; 340 - return(0); 340 + return 0; 341 341 } 342 342 343 343 /* ··· 385 385 switch (oldblk->magic) { 386 386 case XFS_ATTR_LEAF_MAGIC: 387 387 error = xfs_attr3_leaf_split(state, oldblk, newblk); 388 - if ((error != 0) && (error != ENOSPC)) { 389 - return(error); /* GROT: attr is inconsistent */ 388 + if ((error != 0) && (error != -ENOSPC)) { 389 + return error; /* GROT: attr is inconsistent */ 390 390 } 391 391 if (!error) { 392 392 addblk = newblk; ··· 408 408 &state->extrablk); 409 409 } 410 410 if (error) 411 - return(error); /* GROT: attr inconsistent */ 411 + return error; /* GROT: attr inconsistent */ 412 412 addblk = newblk; 413 413 break; 414 414 case XFS_DIR2_LEAFN_MAGIC: ··· 422 422 max - i, &action); 423 423 addblk->bp = NULL; 424 424 if (error) 425 - return(error); /* GROT: dir is inconsistent */ 425 + return error; /* GROT: dir is inconsistent */ 426 426 /* 427 427 * Record the newly split block for the next time thru? 428 428 */ ··· 439 439 xfs_da3_fixhashpath(state, &state->path); 440 440 } 441 441 if (!addblk) 442 - return(0); 442 + return 0; 443 443 444 444 /* 445 445 * Split the root node. ··· 449 449 error = xfs_da3_root_split(state, oldblk, addblk); 450 450 if (error) { 451 451 addblk->bp = NULL; 452 - return(error); /* GROT: dir is inconsistent */ 452 + return error; /* GROT: dir is inconsistent */ 453 453 } 454 454 455 455 /* ··· 492 492 sizeof(node->hdr.info))); 493 493 } 494 494 addblk->bp = NULL; 495 - return(0); 495 + return 0; 496 496 } 497 497 498 498 /* ··· 670 670 */ 671 671 error = xfs_da_grow_inode(state->args, &blkno); 672 672 if (error) 673 - return(error); /* GROT: dir is inconsistent */ 673 + return error; /* GROT: dir is inconsistent */ 674 674 675 675 error = xfs_da3_node_create(state->args, blkno, treelevel, 676 676 &newblk->bp, state->args->whichfork); 677 677 if (error) 678 - return(error); /* GROT: dir is inconsistent */ 678 + return error; /* GROT: dir is inconsistent */ 679 679 newblk->blkno = blkno; 680 680 newblk->magic = XFS_DA_NODE_MAGIC; 681 681 xfs_da3_node_rebalance(state, oldblk, newblk); 682 682 error = xfs_da3_blk_link(state, oldblk, newblk); 683 683 if (error) 684 - return(error); 684 + return error; 685 685 *result = 1; 686 686 } else { 687 687 *result = 0; ··· 721 721 } 722 722 } 723 723 724 - return(0); 724 + return 0; 725 725 } 726 726 727 727 /* ··· 963 963 case XFS_ATTR_LEAF_MAGIC: 964 964 error = xfs_attr3_leaf_toosmall(state, &action); 965 965 if (error) 966 - return(error); 966 + return error; 967 967 if (action == 0) 968 - return(0); 968 + return 0; 969 969 xfs_attr3_leaf_unbalance(state, drop_blk, save_blk); 970 970 break; 971 971 case XFS_DIR2_LEAFN_MAGIC: ··· 985 985 xfs_da3_fixhashpath(state, &state->path); 986 986 error = xfs_da3_node_toosmall(state, &action); 987 987 if (error) 988 - return(error); 988 + return error; 989 989 if (action == 0) 990 990 return 0; 991 991 xfs_da3_node_unbalance(state, drop_blk, save_blk); ··· 995 995 error = xfs_da3_blk_unlink(state, drop_blk, save_blk); 996 996 xfs_da_state_kill_altpath(state); 997 997 if (error) 998 - return(error); 998 + return error; 999 999 error = xfs_da_shrink_inode(state->args, drop_blk->blkno, 1000 1000 drop_blk->bp); 1001 1001 drop_blk->bp = NULL; 1002 1002 if (error) 1003 - return(error); 1003 + return error; 1004 1004 } 1005 1005 /* 1006 1006 * We joined all the way to the top. If it turns out that ··· 1010 1010 xfs_da3_node_remove(state, drop_blk); 1011 1011 xfs_da3_fixhashpath(state, &state->path); 1012 1012 error = xfs_da3_root_join(state, &state->path.blk[0]); 1013 - return(error); 1013 + return error; 1014 1014 } 1015 1015 1016 1016 #ifdef DEBUG ··· 1099 1099 xfs_trans_log_buf(args->trans, root_blk->bp, 0, 1100 1100 args->geo->blksize - 1); 1101 1101 error = xfs_da_shrink_inode(args, child, bp); 1102 - return(error); 1102 + return error; 1103 1103 } 1104 1104 1105 1105 /* ··· 1142 1142 dp->d_ops->node_hdr_from_disk(&nodehdr, node); 1143 1143 if (nodehdr.count > (state->args->geo->node_ents >> 1)) { 1144 1144 *action = 0; /* blk over 50%, don't try to join */ 1145 - return(0); /* blk over 50%, don't try to join */ 1145 + return 0; /* blk over 50%, don't try to join */ 1146 1146 } 1147 1147 1148 1148 /* ··· 1161 1161 error = xfs_da3_path_shift(state, &state->altpath, forward, 1162 1162 0, &retval); 1163 1163 if (error) 1164 - return(error); 1164 + return error; 1165 1165 if (retval) { 1166 1166 *action = 0; 1167 1167 } else { 1168 1168 *action = 2; 1169 1169 } 1170 - return(0); 1170 + return 0; 1171 1171 } 1172 1172 1173 1173 /* ··· 1194 1194 error = xfs_da3_node_read(state->args->trans, dp, 1195 1195 blkno, -1, &bp, state->args->whichfork); 1196 1196 if (error) 1197 - return(error); 1197 + return error; 1198 1198 1199 1199 node = bp->b_addr; 1200 1200 dp->d_ops->node_hdr_from_disk(&thdr, node); ··· 1486 1486 if (error) { 1487 1487 blk->blkno = 0; 1488 1488 state->path.active--; 1489 - return(error); 1489 + return error; 1490 1490 } 1491 1491 curr = blk->bp->b_addr; 1492 1492 blk->magic = be16_to_cpu(curr->magic); ··· 1579 1579 args->blkno = blk->blkno; 1580 1580 } else { 1581 1581 ASSERT(0); 1582 - return XFS_ERROR(EFSCORRUPTED); 1582 + return -EFSCORRUPTED; 1583 1583 } 1584 - if (((retval == ENOENT) || (retval == ENOATTR)) && 1584 + if (((retval == -ENOENT) || (retval == -ENOATTR)) && 1585 1585 (blk->hashval == args->hashval)) { 1586 1586 error = xfs_da3_path_shift(state, &state->path, 1, 1, 1587 1587 &retval); 1588 1588 if (error) 1589 - return(error); 1589 + return error; 1590 1590 if (retval == 0) { 1591 1591 continue; 1592 1592 } else if (blk->magic == XFS_ATTR_LEAF_MAGIC) { 1593 1593 /* path_shift() gives ENOENT */ 1594 - retval = XFS_ERROR(ENOATTR); 1594 + retval = -ENOATTR; 1595 1595 } 1596 1596 } 1597 1597 break; 1598 1598 } 1599 1599 *result = retval; 1600 - return(0); 1600 + return 0; 1601 1601 } 1602 1602 1603 1603 /*======================================================================== ··· 1692 1692 be32_to_cpu(old_info->back), 1693 1693 -1, &bp, args->whichfork); 1694 1694 if (error) 1695 - return(error); 1695 + return error; 1696 1696 ASSERT(bp != NULL); 1697 1697 tmp_info = bp->b_addr; 1698 1698 ASSERT(tmp_info->magic == old_info->magic); ··· 1713 1713 be32_to_cpu(old_info->forw), 1714 1714 -1, &bp, args->whichfork); 1715 1715 if (error) 1716 - return(error); 1716 + return error; 1717 1717 ASSERT(bp != NULL); 1718 1718 tmp_info = bp->b_addr; 1719 1719 ASSERT(tmp_info->magic == old_info->magic); ··· 1726 1726 1727 1727 xfs_trans_log_buf(args->trans, old_blk->bp, 0, sizeof(*tmp_info) - 1); 1728 1728 xfs_trans_log_buf(args->trans, new_blk->bp, 0, sizeof(*tmp_info) - 1); 1729 - return(0); 1729 + return 0; 1730 1730 } 1731 1731 1732 1732 /* ··· 1772 1772 be32_to_cpu(drop_info->back), 1773 1773 -1, &bp, args->whichfork); 1774 1774 if (error) 1775 - return(error); 1775 + return error; 1776 1776 ASSERT(bp != NULL); 1777 1777 tmp_info = bp->b_addr; 1778 1778 ASSERT(tmp_info->magic == save_info->magic); ··· 1789 1789 be32_to_cpu(drop_info->forw), 1790 1790 -1, &bp, args->whichfork); 1791 1791 if (error) 1792 - return(error); 1792 + return error; 1793 1793 ASSERT(bp != NULL); 1794 1794 tmp_info = bp->b_addr; 1795 1795 ASSERT(tmp_info->magic == save_info->magic); ··· 1801 1801 } 1802 1802 1803 1803 xfs_trans_log_buf(args->trans, save_blk->bp, 0, sizeof(*save_info) - 1); 1804 - return(0); 1804 + return 0; 1805 1805 } 1806 1806 1807 1807 /* ··· 1859 1859 } 1860 1860 } 1861 1861 if (level < 0) { 1862 - *result = XFS_ERROR(ENOENT); /* we're out of our tree */ 1862 + *result = -ENOENT; /* we're out of our tree */ 1863 1863 ASSERT(args->op_flags & XFS_DA_OP_OKNOENT); 1864 - return(0); 1864 + return 0; 1865 1865 } 1866 1866 1867 1867 /* ··· 1883 1883 error = xfs_da3_node_read(args->trans, dp, blkno, -1, 1884 1884 &blk->bp, args->whichfork); 1885 1885 if (error) 1886 - return(error); 1886 + return error; 1887 1887 info = blk->bp->b_addr; 1888 1888 ASSERT(info->magic == cpu_to_be16(XFS_DA_NODE_MAGIC) || 1889 1889 info->magic == cpu_to_be16(XFS_DA3_NODE_MAGIC) || ··· 2004 2004 struct xfs_trans *tp = args->trans; 2005 2005 struct xfs_inode *dp = args->dp; 2006 2006 int w = args->whichfork; 2007 - xfs_drfsbno_t nblks = dp->i_d.di_nblocks; 2007 + xfs_rfsblock_t nblks = dp->i_d.di_nblocks; 2008 2008 struct xfs_bmbt_irec map, *mapp; 2009 2009 int nmap, error, got, i, mapi; 2010 2010 ··· 2068 2068 if (got != count || mapp[0].br_startoff != *bno || 2069 2069 mapp[mapi - 1].br_startoff + mapp[mapi - 1].br_blockcount != 2070 2070 *bno + count) { 2071 - error = XFS_ERROR(ENOSPC); 2071 + error = -ENOSPC; 2072 2072 goto out_free_map; 2073 2073 } 2074 2074 ··· 2158 2158 if (unlikely(lastoff == 0)) { 2159 2159 XFS_ERROR_REPORT("xfs_da_swap_lastblock(1)", XFS_ERRLEVEL_LOW, 2160 2160 mp); 2161 - return XFS_ERROR(EFSCORRUPTED); 2161 + return -EFSCORRUPTED; 2162 2162 } 2163 2163 /* 2164 2164 * Read the last block in the btree space. ··· 2209 2209 sib_info->magic != dead_info->magic)) { 2210 2210 XFS_ERROR_REPORT("xfs_da_swap_lastblock(2)", 2211 2211 XFS_ERRLEVEL_LOW, mp); 2212 - error = XFS_ERROR(EFSCORRUPTED); 2212 + error = -EFSCORRUPTED; 2213 2213 goto done; 2214 2214 } 2215 2215 sib_info->forw = cpu_to_be32(dead_blkno); ··· 2231 2231 sib_info->magic != dead_info->magic)) { 2232 2232 XFS_ERROR_REPORT("xfs_da_swap_lastblock(3)", 2233 2233 XFS_ERRLEVEL_LOW, mp); 2234 - error = XFS_ERROR(EFSCORRUPTED); 2234 + error = -EFSCORRUPTED; 2235 2235 goto done; 2236 2236 } 2237 2237 sib_info->back = cpu_to_be32(dead_blkno); ··· 2254 2254 if (level >= 0 && level != par_hdr.level + 1) { 2255 2255 XFS_ERROR_REPORT("xfs_da_swap_lastblock(4)", 2256 2256 XFS_ERRLEVEL_LOW, mp); 2257 - error = XFS_ERROR(EFSCORRUPTED); 2257 + error = -EFSCORRUPTED; 2258 2258 goto done; 2259 2259 } 2260 2260 level = par_hdr.level; ··· 2267 2267 if (entno == par_hdr.count) { 2268 2268 XFS_ERROR_REPORT("xfs_da_swap_lastblock(5)", 2269 2269 XFS_ERRLEVEL_LOW, mp); 2270 - error = XFS_ERROR(EFSCORRUPTED); 2270 + error = -EFSCORRUPTED; 2271 2271 goto done; 2272 2272 } 2273 2273 par_blkno = be32_to_cpu(btree[entno].before); ··· 2294 2294 if (unlikely(par_blkno == 0)) { 2295 2295 XFS_ERROR_REPORT("xfs_da_swap_lastblock(6)", 2296 2296 XFS_ERRLEVEL_LOW, mp); 2297 - error = XFS_ERROR(EFSCORRUPTED); 2297 + error = -EFSCORRUPTED; 2298 2298 goto done; 2299 2299 } 2300 2300 error = xfs_da3_node_read(tp, dp, par_blkno, -1, &par_buf, w); ··· 2305 2305 if (par_hdr.level != level) { 2306 2306 XFS_ERROR_REPORT("xfs_da_swap_lastblock(7)", 2307 2307 XFS_ERRLEVEL_LOW, mp); 2308 - error = XFS_ERROR(EFSCORRUPTED); 2308 + error = -EFSCORRUPTED; 2309 2309 goto done; 2310 2310 } 2311 2311 btree = dp->d_ops->node_tree_p(par_node); ··· 2359 2359 error = xfs_bunmapi(tp, dp, dead_blkno, count, 2360 2360 xfs_bmapi_aflag(w)|XFS_BMAPI_METADATA, 2361 2361 0, args->firstblock, args->flist, &done); 2362 - if (error == ENOSPC) { 2362 + if (error == -ENOSPC) { 2363 2363 if (w != XFS_DATA_FORK) 2364 2364 break; 2365 2365 error = xfs_da3_swap_lastblock(args, &dead_blkno, ··· 2427 2427 map = kmem_zalloc(nirecs * sizeof(struct xfs_buf_map), 2428 2428 KM_SLEEP | KM_NOFS); 2429 2429 if (!map) 2430 - return ENOMEM; 2430 + return -ENOMEM; 2431 2431 *mapp = map; 2432 2432 } 2433 2433 ··· 2500 2500 } 2501 2501 2502 2502 if (!xfs_da_map_covers_blocks(nirecs, irecs, bno, nfsb)) { 2503 - error = mappedbno == -2 ? -1 : XFS_ERROR(EFSCORRUPTED); 2504 - if (unlikely(error == EFSCORRUPTED)) { 2503 + error = mappedbno == -2 ? -1 : -EFSCORRUPTED; 2504 + if (unlikely(error == -EFSCORRUPTED)) { 2505 2505 if (xfs_error_level >= XFS_ERRLEVEL_LOW) { 2506 2506 int i; 2507 2507 xfs_alert(mp, "%s: bno %lld dir: inode %lld", ··· 2561 2561 2562 2562 bp = xfs_trans_get_buf_map(trans, dp->i_mount->m_ddev_targp, 2563 2563 mapp, nmap, 0); 2564 - error = bp ? bp->b_error : XFS_ERROR(EIO); 2564 + error = bp ? bp->b_error : -EIO; 2565 2565 if (error) { 2566 2566 xfs_trans_brelse(trans, bp); 2567 2567 goto out_free;
fs/xfs/xfs_da_btree.h fs/xfs/libxfs/xfs_da_btree.h
fs/xfs/xfs_da_format.c fs/xfs/libxfs/xfs_da_format.c
fs/xfs/xfs_da_format.h fs/xfs/libxfs/xfs_da_format.h
fs/xfs/xfs_dinode.h fs/xfs/libxfs/xfs_dinode.h
+12 -12
fs/xfs/xfs_dir2.c fs/xfs/libxfs/xfs_dir2.c
··· 108 108 if (!mp->m_dir_geo || !mp->m_attr_geo) { 109 109 kmem_free(mp->m_dir_geo); 110 110 kmem_free(mp->m_attr_geo); 111 - return ENOMEM; 111 + return -ENOMEM; 112 112 } 113 113 114 114 /* set up directory geometry */ ··· 202 202 xfs_warn(mp, "Invalid inode number 0x%Lx", 203 203 (unsigned long long) ino); 204 204 XFS_ERROR_REPORT("xfs_dir_ino_validate", XFS_ERRLEVEL_LOW, mp); 205 - return XFS_ERROR(EFSCORRUPTED); 205 + return -EFSCORRUPTED; 206 206 } 207 207 return 0; 208 208 } ··· 226 226 227 227 args = kmem_zalloc(sizeof(*args), KM_SLEEP | KM_NOFS); 228 228 if (!args) 229 - return ENOMEM; 229 + return -ENOMEM; 230 230 231 231 args->geo = dp->i_mount->m_dir_geo; 232 232 args->dp = dp; ··· 261 261 262 262 args = kmem_zalloc(sizeof(*args), KM_SLEEP | KM_NOFS); 263 263 if (!args) 264 - return ENOMEM; 264 + return -ENOMEM; 265 265 266 266 args->geo = dp->i_mount->m_dir_geo; 267 267 args->name = name->name; ··· 314 314 int len) 315 315 { 316 316 if (args->cmpresult == XFS_CMP_DIFFERENT) 317 - return ENOENT; 317 + return -ENOENT; 318 318 if (args->cmpresult != XFS_CMP_CASE || 319 319 !(args->op_flags & XFS_DA_OP_CILOOKUP)) 320 - return EEXIST; 320 + return -EEXIST; 321 321 322 322 args->value = kmem_alloc(len, KM_NOFS | KM_MAYFAIL); 323 323 if (!args->value) 324 - return ENOMEM; 324 + return -ENOMEM; 325 325 326 326 memcpy(args->value, name, len); 327 327 args->valuelen = len; 328 - return EEXIST; 328 + return -EEXIST; 329 329 } 330 330 331 331 /* ··· 392 392 rval = xfs_dir2_node_lookup(args); 393 393 394 394 out_check_rval: 395 - if (rval == EEXIST) 395 + if (rval == -EEXIST) 396 396 rval = 0; 397 397 if (!rval) { 398 398 *inum = args->inumber; ··· 428 428 429 429 args = kmem_zalloc(sizeof(*args), KM_SLEEP | KM_NOFS); 430 430 if (!args) 431 - return ENOMEM; 431 + return -ENOMEM; 432 432 433 433 args->geo = dp->i_mount->m_dir_geo; 434 434 args->name = name->name; ··· 493 493 494 494 args = kmem_zalloc(sizeof(*args), KM_SLEEP | KM_NOFS); 495 495 if (!args) 496 - return ENOMEM; 496 + return -ENOMEM; 497 497 498 498 args->geo = dp->i_mount->m_dir_geo; 499 499 args->name = name->name; ··· 555 555 556 556 args = kmem_zalloc(sizeof(*args), KM_SLEEP | KM_NOFS); 557 557 if (!args) 558 - return ENOMEM; 558 + return -ENOMEM; 559 559 560 560 args->geo = dp->i_mount->m_dir_geo; 561 561 args->name = name->name;
fs/xfs/xfs_dir2.h fs/xfs/libxfs/xfs_dir2.h
+9 -9
fs/xfs/xfs_dir2_block.c fs/xfs/libxfs/xfs_dir2_block.c
··· 91 91 92 92 if (xfs_sb_version_hascrc(&mp->m_sb) && 93 93 !xfs_buf_verify_cksum(bp, XFS_DIR3_DATA_CRC_OFF)) 94 - xfs_buf_ioerror(bp, EFSBADCRC); 94 + xfs_buf_ioerror(bp, -EFSBADCRC); 95 95 else if (!xfs_dir3_block_verify(bp)) 96 - xfs_buf_ioerror(bp, EFSCORRUPTED); 96 + xfs_buf_ioerror(bp, -EFSCORRUPTED); 97 97 98 98 if (bp->b_error) 99 99 xfs_verifier_error(bp); ··· 108 108 struct xfs_dir3_blk_hdr *hdr3 = bp->b_addr; 109 109 110 110 if (!xfs_dir3_block_verify(bp)) { 111 - xfs_buf_ioerror(bp, EFSCORRUPTED); 111 + xfs_buf_ioerror(bp, -EFSCORRUPTED); 112 112 xfs_verifier_error(bp); 113 113 return; 114 114 } ··· 392 392 if (args->op_flags & XFS_DA_OP_JUSTCHECK) { 393 393 xfs_trans_brelse(tp, bp); 394 394 if (!dup) 395 - return XFS_ERROR(ENOSPC); 395 + return -ENOSPC; 396 396 return 0; 397 397 } 398 398 ··· 402 402 if (!dup) { 403 403 /* Don't have a space reservation: return no-space. */ 404 404 if (args->total == 0) 405 - return XFS_ERROR(ENOSPC); 405 + return -ENOSPC; 406 406 /* 407 407 * Convert to the next larger format. 408 408 * Then add the new entry in that format. ··· 647 647 args->filetype = dp->d_ops->data_get_ftype(dep); 648 648 error = xfs_dir_cilookup_result(args, dep->name, dep->namelen); 649 649 xfs_trans_brelse(args->trans, bp); 650 - return XFS_ERROR(error); 650 + return error; 651 651 } 652 652 653 653 /* ··· 703 703 if (low > high) { 704 704 ASSERT(args->op_flags & XFS_DA_OP_OKNOENT); 705 705 xfs_trans_brelse(tp, bp); 706 - return XFS_ERROR(ENOENT); 706 + return -ENOENT; 707 707 } 708 708 } 709 709 /* ··· 751 751 * No match, release the buffer and return ENOENT. 752 752 */ 753 753 xfs_trans_brelse(tp, bp); 754 - return XFS_ERROR(ENOENT); 754 + return -ENOENT; 755 755 } 756 756 757 757 /* ··· 1091 1091 */ 1092 1092 if (dp->i_d.di_size < offsetof(xfs_dir2_sf_hdr_t, parent)) { 1093 1093 ASSERT(XFS_FORCED_SHUTDOWN(mp)); 1094 - return XFS_ERROR(EIO); 1094 + return -EIO; 1095 1095 } 1096 1096 1097 1097 oldsfp = (xfs_dir2_sf_hdr_t *)ifp->if_u1.if_data;
+5 -5
fs/xfs/xfs_dir2_data.c fs/xfs/libxfs/xfs_dir2_data.c
··· 100 100 break; 101 101 default: 102 102 XFS_ERROR_REPORT("Bad Magic", XFS_ERRLEVEL_LOW, mp); 103 - return EFSCORRUPTED; 103 + return -EFSCORRUPTED; 104 104 } 105 105 106 106 /* ··· 256 256 xfs_dir3_data_verify(bp); 257 257 return; 258 258 default: 259 - xfs_buf_ioerror(bp, EFSCORRUPTED); 259 + xfs_buf_ioerror(bp, -EFSCORRUPTED); 260 260 xfs_verifier_error(bp); 261 261 break; 262 262 } ··· 270 270 271 271 if (xfs_sb_version_hascrc(&mp->m_sb) && 272 272 !xfs_buf_verify_cksum(bp, XFS_DIR3_DATA_CRC_OFF)) 273 - xfs_buf_ioerror(bp, EFSBADCRC); 273 + xfs_buf_ioerror(bp, -EFSBADCRC); 274 274 else if (!xfs_dir3_data_verify(bp)) 275 - xfs_buf_ioerror(bp, EFSCORRUPTED); 275 + xfs_buf_ioerror(bp, -EFSCORRUPTED); 276 276 277 277 if (bp->b_error) 278 278 xfs_verifier_error(bp); ··· 287 287 struct xfs_dir3_blk_hdr *hdr3 = bp->b_addr; 288 288 289 289 if (!xfs_dir3_data_verify(bp)) { 290 - xfs_buf_ioerror(bp, EFSCORRUPTED); 290 + xfs_buf_ioerror(bp, -EFSCORRUPTED); 291 291 xfs_verifier_error(bp); 292 292 return; 293 293 }
+12 -12
fs/xfs/xfs_dir2_leaf.c fs/xfs/libxfs/xfs_dir2_leaf.c
··· 183 183 184 184 if (xfs_sb_version_hascrc(&mp->m_sb) && 185 185 !xfs_buf_verify_cksum(bp, XFS_DIR3_LEAF_CRC_OFF)) 186 - xfs_buf_ioerror(bp, EFSBADCRC); 186 + xfs_buf_ioerror(bp, -EFSBADCRC); 187 187 else if (!xfs_dir3_leaf_verify(bp, magic)) 188 - xfs_buf_ioerror(bp, EFSCORRUPTED); 188 + xfs_buf_ioerror(bp, -EFSCORRUPTED); 189 189 190 190 if (bp->b_error) 191 191 xfs_verifier_error(bp); ··· 201 201 struct xfs_dir3_leaf_hdr *hdr3 = bp->b_addr; 202 202 203 203 if (!xfs_dir3_leaf_verify(bp, magic)) { 204 - xfs_buf_ioerror(bp, EFSCORRUPTED); 204 + xfs_buf_ioerror(bp, -EFSCORRUPTED); 205 205 xfs_verifier_error(bp); 206 206 return; 207 207 } ··· 731 731 if ((args->op_flags & XFS_DA_OP_JUSTCHECK) || 732 732 args->total == 0) { 733 733 xfs_trans_brelse(tp, lbp); 734 - return XFS_ERROR(ENOSPC); 734 + return -ENOSPC; 735 735 } 736 736 /* 737 737 * Convert to node form. ··· 755 755 */ 756 756 if (args->op_flags & XFS_DA_OP_JUSTCHECK) { 757 757 xfs_trans_brelse(tp, lbp); 758 - return use_block == -1 ? XFS_ERROR(ENOSPC) : 0; 758 + return use_block == -1 ? -ENOSPC : 0; 759 759 } 760 760 /* 761 761 * If no allocations are allowed, return now before we've ··· 763 763 */ 764 764 if (args->total == 0 && use_block == -1) { 765 765 xfs_trans_brelse(tp, lbp); 766 - return XFS_ERROR(ENOSPC); 766 + return -ENOSPC; 767 767 } 768 768 /* 769 769 * Need to compact the leaf entries, removing stale ones. ··· 1198 1198 error = xfs_dir_cilookup_result(args, dep->name, dep->namelen); 1199 1199 xfs_trans_brelse(tp, dbp); 1200 1200 xfs_trans_brelse(tp, lbp); 1201 - return XFS_ERROR(error); 1201 + return error; 1202 1202 } 1203 1203 1204 1204 /* ··· 1327 1327 return 0; 1328 1328 } 1329 1329 /* 1330 - * No match found, return ENOENT. 1330 + * No match found, return -ENOENT. 1331 1331 */ 1332 1332 ASSERT(cidb == -1); 1333 1333 if (dbp) 1334 1334 xfs_trans_brelse(tp, dbp); 1335 1335 xfs_trans_brelse(tp, lbp); 1336 - return XFS_ERROR(ENOENT); 1336 + return -ENOENT; 1337 1337 } 1338 1338 1339 1339 /* ··· 1440 1440 * Just go on, returning success, leaving the 1441 1441 * empty block in place. 1442 1442 */ 1443 - if (error == ENOSPC && args->total == 0) 1443 + if (error == -ENOSPC && args->total == 0) 1444 1444 error = 0; 1445 1445 xfs_dir3_leaf_check(dp, lbp); 1446 1446 return error; ··· 1641 1641 * Get rid of the data block. 1642 1642 */ 1643 1643 if ((error = xfs_dir2_shrink_inode(args, db, dbp))) { 1644 - ASSERT(error != ENOSPC); 1644 + ASSERT(error != -ENOSPC); 1645 1645 xfs_trans_brelse(tp, dbp); 1646 1646 return error; 1647 1647 } ··· 1815 1815 * punching out the middle of an extent, and this is an 1816 1816 * isolated block. 1817 1817 */ 1818 - ASSERT(error != ENOSPC); 1818 + ASSERT(error != -ENOSPC); 1819 1819 return error; 1820 1820 } 1821 1821 fbp = NULL;
+20 -20
fs/xfs/xfs_dir2_node.c fs/xfs/libxfs/xfs_dir2_node.c
··· 117 117 118 118 if (xfs_sb_version_hascrc(&mp->m_sb) && 119 119 !xfs_buf_verify_cksum(bp, XFS_DIR3_FREE_CRC_OFF)) 120 - xfs_buf_ioerror(bp, EFSBADCRC); 120 + xfs_buf_ioerror(bp, -EFSBADCRC); 121 121 else if (!xfs_dir3_free_verify(bp)) 122 - xfs_buf_ioerror(bp, EFSCORRUPTED); 122 + xfs_buf_ioerror(bp, -EFSCORRUPTED); 123 123 124 124 if (bp->b_error) 125 125 xfs_verifier_error(bp); ··· 134 134 struct xfs_dir3_blk_hdr *hdr3 = bp->b_addr; 135 135 136 136 if (!xfs_dir3_free_verify(bp)) { 137 - xfs_buf_ioerror(bp, EFSCORRUPTED); 137 + xfs_buf_ioerror(bp, -EFSCORRUPTED); 138 138 xfs_verifier_error(bp); 139 139 return; 140 140 } ··· 406 406 * into other peoples memory 407 407 */ 408 408 if (index < 0) 409 - return XFS_ERROR(EFSCORRUPTED); 409 + return -EFSCORRUPTED; 410 410 411 411 /* 412 412 * If there are already the maximum number of leaf entries in ··· 417 417 418 418 if (leafhdr.count == dp->d_ops->leaf_max_ents(args->geo)) { 419 419 if (!leafhdr.stale) 420 - return XFS_ERROR(ENOSPC); 420 + return -ENOSPC; 421 421 compact = leafhdr.stale > 1; 422 422 } else 423 423 compact = 0; ··· 629 629 XFS_ERRLEVEL_LOW, mp); 630 630 if (curfdb != newfdb) 631 631 xfs_trans_brelse(tp, curbp); 632 - return XFS_ERROR(EFSCORRUPTED); 632 + return -EFSCORRUPTED; 633 633 } 634 634 curfdb = newfdb; 635 635 if (be16_to_cpu(bests[fi]) >= length) ··· 660 660 * Return the index, that will be the insertion point. 661 661 */ 662 662 *indexp = index; 663 - return XFS_ERROR(ENOENT); 663 + return -ENOENT; 664 664 } 665 665 666 666 /* ··· 789 789 curbp->b_ops = &xfs_dir3_data_buf_ops; 790 790 xfs_trans_buf_set_type(tp, curbp, XFS_BLFT_DIR_DATA_BUF); 791 791 if (cmp == XFS_CMP_EXACT) 792 - return XFS_ERROR(EEXIST); 792 + return -EEXIST; 793 793 } 794 794 } 795 795 ASSERT(index == leafhdr.count || (args->op_flags & XFS_DA_OP_OKNOENT)); ··· 812 812 state->extravalid = 0; 813 813 } 814 814 *indexp = index; 815 - return XFS_ERROR(ENOENT); 815 + return -ENOENT; 816 816 } 817 817 818 818 /* ··· 1133 1133 if (error == 0) { 1134 1134 fbp = NULL; 1135 1135 logfree = 0; 1136 - } else if (error != ENOSPC || args->total != 0) 1136 + } else if (error != -ENOSPC || args->total != 0) 1137 1137 return error; 1138 1138 /* 1139 1139 * It's possible to get ENOSPC if there is no ··· 1287 1287 * In this case just drop the buffer and some one else 1288 1288 * will eventually get rid of the empty block. 1289 1289 */ 1290 - else if (!(error == ENOSPC && args->total == 0)) 1290 + else if (!(error == -ENOSPC && args->total == 0)) 1291 1291 return error; 1292 1292 } 1293 1293 /* ··· 1599 1599 error = xfs_da3_node_lookup_int(state, &rval); 1600 1600 if (error) 1601 1601 rval = error; 1602 - if (rval != ENOENT) { 1602 + if (rval != -ENOENT) { 1603 1603 goto done; 1604 1604 } 1605 1605 /* ··· 1628 1628 * It didn't work, we need to split the leaf block. 1629 1629 */ 1630 1630 if (args->total == 0) { 1631 - ASSERT(rval == ENOSPC); 1631 + ASSERT(rval == -ENOSPC); 1632 1632 goto done; 1633 1633 } 1634 1634 /* ··· 1815 1815 * Not allowed to allocate, return failure. 1816 1816 */ 1817 1817 if ((args->op_flags & XFS_DA_OP_JUSTCHECK) || args->total == 0) 1818 - return XFS_ERROR(ENOSPC); 1818 + return -ENOSPC; 1819 1819 1820 1820 /* 1821 1821 * Allocate and initialize the new data block. ··· 1876 1876 } 1877 1877 XFS_ERROR_REPORT("xfs_dir2_node_addname_int", 1878 1878 XFS_ERRLEVEL_LOW, mp); 1879 - return XFS_ERROR(EFSCORRUPTED); 1879 + return -EFSCORRUPTED; 1880 1880 } 1881 1881 1882 1882 /* ··· 2042 2042 error = xfs_da3_node_lookup_int(state, &rval); 2043 2043 if (error) 2044 2044 rval = error; 2045 - else if (rval == ENOENT && args->cmpresult == XFS_CMP_CASE) { 2046 - /* If a CI match, dup the actual name and return EEXIST */ 2045 + else if (rval == -ENOENT && args->cmpresult == XFS_CMP_CASE) { 2046 + /* If a CI match, dup the actual name and return -EEXIST */ 2047 2047 xfs_dir2_data_entry_t *dep; 2048 2048 2049 2049 dep = (xfs_dir2_data_entry_t *) ··· 2096 2096 goto out_free; 2097 2097 2098 2098 /* Didn't find it, upper layer screwed up. */ 2099 - if (rval != EEXIST) { 2099 + if (rval != -EEXIST) { 2100 2100 error = rval; 2101 2101 goto out_free; 2102 2102 } ··· 2169 2169 * It should be found, since the vnodeops layer has looked it up 2170 2170 * and locked it. But paranoia is good. 2171 2171 */ 2172 - if (rval == EEXIST) { 2172 + if (rval == -EEXIST) { 2173 2173 struct xfs_dir2_leaf_entry *ents; 2174 2174 /* 2175 2175 * Find the leaf entry. ··· 2272 2272 * space reservation, when breaking up an extent into two 2273 2273 * pieces. This is the last block of an extent. 2274 2274 */ 2275 - ASSERT(error != ENOSPC); 2275 + ASSERT(error != -ENOSPC); 2276 2276 xfs_trans_brelse(tp, bp); 2277 2277 return error; 2278 2278 }
fs/xfs/xfs_dir2_priv.h fs/xfs/libxfs/xfs_dir2_priv.h
+2 -2
fs/xfs/xfs_dir2_readdir.c
··· 95 95 */ 96 96 if (dp->i_d.di_size < offsetof(xfs_dir2_sf_hdr_t, parent)) { 97 97 ASSERT(XFS_FORCED_SHUTDOWN(dp->i_mount)); 98 - return XFS_ERROR(EIO); 98 + return -EIO; 99 99 } 100 100 101 101 ASSERT(dp->i_df.if_bytes == dp->i_d.di_size); ··· 677 677 trace_xfs_readdir(dp); 678 678 679 679 if (XFS_FORCED_SHUTDOWN(dp->i_mount)) 680 - return XFS_ERROR(EIO); 680 + return -EIO; 681 681 682 682 ASSERT(S_ISDIR(dp->i_d.di_mode)); 683 683 XFS_STATS_INC(xs_dir_getdents);
+23 -52
fs/xfs/xfs_dir2_sf.c fs/xfs/libxfs/xfs_dir2_sf.c
··· 51 51 #else 52 52 #define xfs_dir2_sf_check(args) 53 53 #endif /* DEBUG */ 54 - #if XFS_BIG_INUMS 54 + 55 55 static void xfs_dir2_sf_toino4(xfs_da_args_t *args); 56 56 static void xfs_dir2_sf_toino8(xfs_da_args_t *args); 57 - #endif /* XFS_BIG_INUMS */ 58 57 59 58 /* 60 59 * Given a block directory (dp/block), calculate its size as a shortform (sf) ··· 116 117 isdotdot = 117 118 dep->namelen == 2 && 118 119 dep->name[0] == '.' && dep->name[1] == '.'; 119 - #if XFS_BIG_INUMS 120 + 120 121 if (!isdot) 121 122 i8count += be64_to_cpu(dep->inumber) > XFS_DIR2_MAX_SHORT_INUM; 122 - #endif 123 + 123 124 /* take into account the file type field */ 124 125 if (!isdot && !isdotdot) { 125 126 count++; ··· 250 251 logflags = XFS_ILOG_CORE; 251 252 error = xfs_dir2_shrink_inode(args, args->geo->datablk, bp); 252 253 if (error) { 253 - ASSERT(error != ENOSPC); 254 + ASSERT(error != -ENOSPC); 254 255 goto out; 255 256 } 256 257 ··· 298 299 299 300 trace_xfs_dir2_sf_addname(args); 300 301 301 - ASSERT(xfs_dir2_sf_lookup(args) == ENOENT); 302 + ASSERT(xfs_dir2_sf_lookup(args) == -ENOENT); 302 303 dp = args->dp; 303 304 ASSERT(dp->i_df.if_flags & XFS_IFINLINE); 304 305 /* ··· 306 307 */ 307 308 if (dp->i_d.di_size < offsetof(xfs_dir2_sf_hdr_t, parent)) { 308 309 ASSERT(XFS_FORCED_SHUTDOWN(dp->i_mount)); 309 - return XFS_ERROR(EIO); 310 + return -EIO; 310 311 } 311 312 ASSERT(dp->i_df.if_bytes == dp->i_d.di_size); 312 313 ASSERT(dp->i_df.if_u1.if_data != NULL); ··· 317 318 */ 318 319 incr_isize = dp->d_ops->sf_entsize(sfp, args->namelen); 319 320 objchange = 0; 320 - #if XFS_BIG_INUMS 321 + 321 322 /* 322 323 * Do we have to change to 8 byte inodes? 323 324 */ ··· 331 332 (uint)sizeof(xfs_dir2_ino4_t)); 332 333 objchange = 1; 333 334 } 334 - #endif 335 + 335 336 new_isize = (int)dp->i_d.di_size + incr_isize; 336 337 /* 337 338 * Won't fit as shortform any more (due to size), ··· 344 345 * Just checking or no space reservation, it doesn't fit. 345 346 */ 346 347 if ((args->op_flags & XFS_DA_OP_JUSTCHECK) || args->total == 0) 347 - return XFS_ERROR(ENOSPC); 348 + return -ENOSPC; 348 349 /* 349 350 * Convert to block form then add the name. 350 351 */ ··· 369 370 */ 370 371 else { 371 372 ASSERT(pick == 2); 372 - #if XFS_BIG_INUMS 373 373 if (objchange) 374 374 xfs_dir2_sf_toino8(args); 375 - #endif 376 375 xfs_dir2_sf_addname_hard(args, objchange, new_isize); 377 376 } 378 377 xfs_trans_log_inode(args->trans, dp, XFS_ILOG_CORE | XFS_ILOG_DDATA); ··· 422 425 * Update the header and inode. 423 426 */ 424 427 sfp->count++; 425 - #if XFS_BIG_INUMS 426 428 if (args->inumber > XFS_DIR2_MAX_SHORT_INUM) 427 429 sfp->i8count++; 428 - #endif 429 430 dp->i_d.di_size = new_isize; 430 431 xfs_dir2_sf_check(args); 431 432 } ··· 511 516 dp->d_ops->sf_put_ino(sfp, sfep, args->inumber); 512 517 dp->d_ops->sf_put_ftype(sfep, args->filetype); 513 518 sfp->count++; 514 - #if XFS_BIG_INUMS 515 519 if (args->inumber > XFS_DIR2_MAX_SHORT_INUM && !objchange) 516 520 sfp->i8count++; 517 - #endif 518 521 /* 519 522 * If there's more left to copy, do that. 520 523 */ ··· 586 593 /* 587 594 * If changing the inode number size, do it the hard way. 588 595 */ 589 - #if XFS_BIG_INUMS 590 - if (objchange) { 596 + if (objchange) 591 597 return 2; 592 - } 593 - #else 594 - ASSERT(objchange == 0); 595 - #endif 596 598 /* 597 599 * If it won't fit at the end then do it the hard way (use the hole). 598 600 */ ··· 638 650 ASSERT(dp->d_ops->sf_get_ftype(sfep) < XFS_DIR3_FT_MAX); 639 651 } 640 652 ASSERT(i8count == sfp->i8count); 641 - ASSERT(XFS_BIG_INUMS || i8count == 0); 642 653 ASSERT((char *)sfep - (char *)sfp == dp->i_d.di_size); 643 654 ASSERT(offset + 644 655 (sfp->count + 2) * (uint)sizeof(xfs_dir2_leaf_entry_t) + ··· 725 738 */ 726 739 if (dp->i_d.di_size < offsetof(xfs_dir2_sf_hdr_t, parent)) { 727 740 ASSERT(XFS_FORCED_SHUTDOWN(dp->i_mount)); 728 - return XFS_ERROR(EIO); 741 + return -EIO; 729 742 } 730 743 ASSERT(dp->i_df.if_bytes == dp->i_d.di_size); 731 744 ASSERT(dp->i_df.if_u1.if_data != NULL); ··· 738 751 args->inumber = dp->i_ino; 739 752 args->cmpresult = XFS_CMP_EXACT; 740 753 args->filetype = XFS_DIR3_FT_DIR; 741 - return XFS_ERROR(EEXIST); 754 + return -EEXIST; 742 755 } 743 756 /* 744 757 * Special case for .. ··· 748 761 args->inumber = dp->d_ops->sf_get_parent_ino(sfp); 749 762 args->cmpresult = XFS_CMP_EXACT; 750 763 args->filetype = XFS_DIR3_FT_DIR; 751 - return XFS_ERROR(EEXIST); 764 + return -EEXIST; 752 765 } 753 766 /* 754 767 * Loop over all the entries trying to match ours. ··· 768 781 args->inumber = dp->d_ops->sf_get_ino(sfp, sfep); 769 782 args->filetype = dp->d_ops->sf_get_ftype(sfep); 770 783 if (cmp == XFS_CMP_EXACT) 771 - return XFS_ERROR(EEXIST); 784 + return -EEXIST; 772 785 ci_sfep = sfep; 773 786 } 774 787 } 775 788 ASSERT(args->op_flags & XFS_DA_OP_OKNOENT); 776 789 /* 777 790 * Here, we can only be doing a lookup (not a rename or replace). 778 - * If a case-insensitive match was not found, return ENOENT. 791 + * If a case-insensitive match was not found, return -ENOENT. 779 792 */ 780 793 if (!ci_sfep) 781 - return XFS_ERROR(ENOENT); 794 + return -ENOENT; 782 795 /* otherwise process the CI match as required by the caller */ 783 796 error = xfs_dir_cilookup_result(args, ci_sfep->name, ci_sfep->namelen); 784 - return XFS_ERROR(error); 797 + return error; 785 798 } 786 799 787 800 /* ··· 811 824 */ 812 825 if (oldsize < offsetof(xfs_dir2_sf_hdr_t, parent)) { 813 826 ASSERT(XFS_FORCED_SHUTDOWN(dp->i_mount)); 814 - return XFS_ERROR(EIO); 827 + return -EIO; 815 828 } 816 829 ASSERT(dp->i_df.if_bytes == oldsize); 817 830 ASSERT(dp->i_df.if_u1.if_data != NULL); ··· 834 847 * Didn't find it. 835 848 */ 836 849 if (i == sfp->count) 837 - return XFS_ERROR(ENOENT); 850 + return -ENOENT; 838 851 /* 839 852 * Calculate sizes. 840 853 */ ··· 857 870 */ 858 871 xfs_idata_realloc(dp, newsize - oldsize, XFS_DATA_FORK); 859 872 sfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data; 860 - #if XFS_BIG_INUMS 861 873 /* 862 874 * Are we changing inode number size? 863 875 */ ··· 866 880 else 867 881 sfp->i8count--; 868 882 } 869 - #endif 870 883 xfs_dir2_sf_check(args); 871 884 xfs_trans_log_inode(args->trans, dp, XFS_ILOG_CORE | XFS_ILOG_DDATA); 872 885 return 0; ··· 880 895 { 881 896 xfs_inode_t *dp; /* incore directory inode */ 882 897 int i; /* entry index */ 883 - #if XFS_BIG_INUMS || defined(DEBUG) 884 898 xfs_ino_t ino=0; /* entry old inode number */ 885 - #endif 886 - #if XFS_BIG_INUMS 887 899 int i8elevated; /* sf_toino8 set i8count=1 */ 888 - #endif 889 900 xfs_dir2_sf_entry_t *sfep; /* shortform directory entry */ 890 901 xfs_dir2_sf_hdr_t *sfp; /* shortform structure */ 891 902 ··· 895 914 */ 896 915 if (dp->i_d.di_size < offsetof(xfs_dir2_sf_hdr_t, parent)) { 897 916 ASSERT(XFS_FORCED_SHUTDOWN(dp->i_mount)); 898 - return XFS_ERROR(EIO); 917 + return -EIO; 899 918 } 900 919 ASSERT(dp->i_df.if_bytes == dp->i_d.di_size); 901 920 ASSERT(dp->i_df.if_u1.if_data != NULL); 902 921 sfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data; 903 922 ASSERT(dp->i_d.di_size >= xfs_dir2_sf_hdr_size(sfp->i8count)); 904 - #if XFS_BIG_INUMS 923 + 905 924 /* 906 925 * New inode number is large, and need to convert to 8-byte inodes. 907 926 */ ··· 932 951 sfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data; 933 952 } else 934 953 i8elevated = 0; 935 - #endif 954 + 936 955 ASSERT(args->namelen != 1 || args->name[0] != '.'); 937 956 /* 938 957 * Replace ..'s entry. 939 958 */ 940 959 if (args->namelen == 2 && 941 960 args->name[0] == '.' && args->name[1] == '.') { 942 - #if XFS_BIG_INUMS || defined(DEBUG) 943 961 ino = dp->d_ops->sf_get_parent_ino(sfp); 944 962 ASSERT(args->inumber != ino); 945 - #endif 946 963 dp->d_ops->sf_put_parent_ino(sfp, args->inumber); 947 964 } 948 965 /* ··· 951 972 i++, sfep = dp->d_ops->sf_nextentry(sfp, sfep)) { 952 973 if (xfs_da_compname(args, sfep->name, sfep->namelen) == 953 974 XFS_CMP_EXACT) { 954 - #if XFS_BIG_INUMS || defined(DEBUG) 955 975 ino = dp->d_ops->sf_get_ino(sfp, sfep); 956 976 ASSERT(args->inumber != ino); 957 - #endif 958 977 dp->d_ops->sf_put_ino(sfp, sfep, args->inumber); 959 978 dp->d_ops->sf_put_ftype(sfep, args->filetype); 960 979 break; ··· 963 986 */ 964 987 if (i == sfp->count) { 965 988 ASSERT(args->op_flags & XFS_DA_OP_OKNOENT); 966 - #if XFS_BIG_INUMS 967 989 if (i8elevated) 968 990 xfs_dir2_sf_toino4(args); 969 - #endif 970 - return XFS_ERROR(ENOENT); 991 + return -ENOENT; 971 992 } 972 993 } 973 - #if XFS_BIG_INUMS 974 994 /* 975 995 * See if the old number was large, the new number is small. 976 996 */ ··· 994 1020 if (!i8elevated) 995 1021 sfp->i8count++; 996 1022 } 997 - #endif 998 1023 xfs_dir2_sf_check(args); 999 1024 xfs_trans_log_inode(args->trans, dp, XFS_ILOG_DDATA); 1000 1025 return 0; 1001 1026 } 1002 1027 1003 - #if XFS_BIG_INUMS 1004 1028 /* 1005 1029 * Convert from 8-byte inode numbers to 4-byte inode numbers. 1006 1030 * The last 8-byte inode number is gone, but the count is still 1. ··· 1153 1181 dp->i_d.di_size = newsize; 1154 1182 xfs_trans_log_inode(args->trans, dp, XFS_ILOG_CORE | XFS_ILOG_DDATA); 1155 1183 } 1156 - #endif /* XFS_BIG_INUMS */
+9 -9
fs/xfs/xfs_discard.c
··· 124 124 } 125 125 126 126 trace_xfs_discard_extent(mp, agno, fbno, flen); 127 - error = -blkdev_issue_discard(bdev, dbno, dlen, GFP_NOFS, 0); 127 + error = blkdev_issue_discard(bdev, dbno, dlen, GFP_NOFS, 0); 128 128 if (error) 129 129 goto out_del_cursor; 130 130 *blocks_trimmed += flen; ··· 166 166 int error, last_error = 0; 167 167 168 168 if (!capable(CAP_SYS_ADMIN)) 169 - return -XFS_ERROR(EPERM); 169 + return -EPERM; 170 170 if (!blk_queue_discard(q)) 171 - return -XFS_ERROR(EOPNOTSUPP); 171 + return -EOPNOTSUPP; 172 172 if (copy_from_user(&range, urange, sizeof(range))) 173 - return -XFS_ERROR(EFAULT); 173 + return -EFAULT; 174 174 175 175 /* 176 176 * Truncating down the len isn't actually quite correct, but using ··· 182 182 if (range.start >= XFS_FSB_TO_B(mp, mp->m_sb.sb_dblocks) || 183 183 range.minlen > XFS_FSB_TO_B(mp, XFS_ALLOC_AG_MAX_USABLE(mp)) || 184 184 range.len < mp->m_sb.sb_blocksize) 185 - return -XFS_ERROR(EINVAL); 185 + return -EINVAL; 186 186 187 187 start = BTOBB(range.start); 188 188 end = start + BTOBBT(range.len) - 1; ··· 195 195 end_agno = xfs_daddr_to_agno(mp, end); 196 196 197 197 for (agno = start_agno; agno <= end_agno; agno++) { 198 - error = -xfs_trim_extents(mp, agno, start, end, minlen, 198 + error = xfs_trim_extents(mp, agno, start, end, minlen, 199 199 &blocks_trimmed); 200 200 if (error) 201 201 last_error = error; ··· 206 206 207 207 range.len = XFS_FSB_TO_B(mp, blocks_trimmed); 208 208 if (copy_to_user(urange, &range, sizeof(range))) 209 - return -XFS_ERROR(EFAULT); 209 + return -EFAULT; 210 210 return 0; 211 211 } 212 212 ··· 222 222 trace_xfs_discard_extent(mp, busyp->agno, busyp->bno, 223 223 busyp->length); 224 224 225 - error = -blkdev_issue_discard(mp->m_ddev_targp->bt_bdev, 225 + error = blkdev_issue_discard(mp->m_ddev_targp->bt_bdev, 226 226 XFS_AGB_TO_DADDR(mp, busyp->agno, busyp->bno), 227 227 XFS_FSB_TO_BB(mp, busyp->length), 228 228 GFP_NOFS, 0); 229 - if (error && error != EOPNOTSUPP) { 229 + if (error && error != -EOPNOTSUPP) { 230 230 xfs_info(mp, 231 231 "discard failed for extent [0x%llu,%u], error %d", 232 232 (unsigned long long)busyp->bno,
+21 -20
fs/xfs/xfs_dquot.c
··· 327 327 */ 328 328 if (!xfs_this_quota_on(dqp->q_mount, dqp->dq_flags)) { 329 329 xfs_iunlock(quotip, XFS_ILOCK_EXCL); 330 - return (ESRCH); 330 + return -ESRCH; 331 331 } 332 332 333 333 xfs_trans_ijoin(tp, quotip, XFS_ILOCK_EXCL); ··· 354 354 mp->m_quotainfo->qi_dqchunklen, 355 355 0); 356 356 if (!bp) { 357 - error = ENOMEM; 357 + error = -ENOMEM; 358 358 goto error1; 359 359 } 360 360 bp->b_ops = &xfs_dquot_buf_ops; ··· 400 400 error0: 401 401 xfs_iunlock(quotip, XFS_ILOCK_EXCL); 402 402 403 - return (error); 403 + return error; 404 404 } 405 405 406 406 STATIC int ··· 426 426 427 427 if (error) { 428 428 ASSERT(*bpp == NULL); 429 - return XFS_ERROR(error); 429 + return error; 430 430 } 431 431 (*bpp)->b_ops = &xfs_dquot_buf_ops; 432 432 ··· 442 442 if (error) { 443 443 /* repair failed, we're screwed */ 444 444 xfs_trans_brelse(tp, *bpp); 445 - return XFS_ERROR(EIO); 445 + return -EIO; 446 446 } 447 447 } 448 448 ··· 480 480 * didn't have the quota inode lock. 481 481 */ 482 482 xfs_iunlock(quotip, lock_mode); 483 - return ESRCH; 483 + return -ESRCH; 484 484 } 485 485 486 486 /* ··· 508 508 * We don't allocate unless we're asked to 509 509 */ 510 510 if (!(flags & XFS_QMOPT_DQALLOC)) 511 - return ENOENT; 511 + return -ENOENT; 512 512 513 513 ASSERT(tp); 514 514 error = xfs_qm_dqalloc(tpp, mp, dqp, quotip, ··· 530 530 mp->m_quotainfo->qi_dqchunklen, 531 531 0, &bp, &xfs_dquot_buf_ops); 532 532 533 - if (error == EFSCORRUPTED && (flags & XFS_QMOPT_DQREPAIR)) { 533 + if (error == -EFSCORRUPTED && (flags & XFS_QMOPT_DQREPAIR)) { 534 534 xfs_dqid_t firstid = (xfs_dqid_t)map.br_startoff * 535 535 mp->m_quotainfo->qi_dqperchunk; 536 536 ASSERT(bp == NULL); ··· 539 539 540 540 if (error) { 541 541 ASSERT(bp == NULL); 542 - return XFS_ERROR(error); 542 + return error; 543 543 } 544 544 } 545 545 ··· 547 547 *O_bpp = bp; 548 548 *O_ddpp = bp->b_addr + dqp->q_bufoffset; 549 549 550 - return (0); 550 + return 0; 551 551 } 552 552 553 553 ··· 715 715 if ((! XFS_IS_UQUOTA_ON(mp) && type == XFS_DQ_USER) || 716 716 (! XFS_IS_PQUOTA_ON(mp) && type == XFS_DQ_PROJ) || 717 717 (! XFS_IS_GQUOTA_ON(mp) && type == XFS_DQ_GROUP)) { 718 - return (ESRCH); 718 + return -ESRCH; 719 719 } 720 720 721 721 #ifdef DEBUG ··· 723 723 if ((xfs_dqerror_target == mp->m_ddev_targp) && 724 724 (xfs_dqreq_num++ % xfs_dqerror_mod) == 0) { 725 725 xfs_debug(mp, "Returning error in dqget"); 726 - return (EIO); 726 + return -EIO; 727 727 } 728 728 } 729 729 ··· 796 796 } else { 797 797 /* inode stays locked on return */ 798 798 xfs_qm_dqdestroy(dqp); 799 - return XFS_ERROR(ESRCH); 799 + return -ESRCH; 800 800 } 801 801 } 802 802 803 803 mutex_lock(&qi->qi_tree_lock); 804 - error = -radix_tree_insert(tree, id, dqp); 804 + error = radix_tree_insert(tree, id, dqp); 805 805 if (unlikely(error)) { 806 - WARN_ON(error != EEXIST); 806 + WARN_ON(error != -EEXIST); 807 807 808 808 /* 809 809 * Duplicate found. Just throw away the new dquot and start ··· 829 829 ASSERT((ip == NULL) || xfs_isilocked(ip, XFS_ILOCK_EXCL)); 830 830 trace_xfs_dqget_miss(dqp); 831 831 *O_dqpp = dqp; 832 - return (0); 832 + return 0; 833 833 } 834 834 835 835 /* ··· 966 966 SHUTDOWN_CORRUPT_INCORE); 967 967 else 968 968 spin_unlock(&mp->m_ail->xa_lock); 969 - error = XFS_ERROR(EIO); 969 + error = -EIO; 970 970 goto out_unlock; 971 971 } 972 972 ··· 974 974 * Get the buffer containing the on-disk dquot 975 975 */ 976 976 error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, dqp->q_blkno, 977 - mp->m_quotainfo->qi_dqchunklen, 0, &bp, NULL); 977 + mp->m_quotainfo->qi_dqchunklen, 0, &bp, 978 + &xfs_dquot_buf_ops); 978 979 if (error) 979 980 goto out_unlock; 980 981 ··· 993 992 xfs_buf_relse(bp); 994 993 xfs_dqfunlock(dqp); 995 994 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); 996 - return XFS_ERROR(EIO); 995 + return -EIO; 997 996 } 998 997 999 998 /* This is the only portion of data that needs to persist */ ··· 1046 1045 1047 1046 out_unlock: 1048 1047 xfs_dqfunlock(dqp); 1049 - return XFS_ERROR(EIO); 1048 + return -EIO; 1050 1049 } 1051 1050 1052 1051 /*
+15
fs/xfs/xfs_dquot.h
··· 139 139 } 140 140 } 141 141 142 + /* 143 + * Check whether a dquot is under low free space conditions. We assume the quota 144 + * is enabled and enforced. 145 + */ 146 + static inline bool xfs_dquot_lowsp(struct xfs_dquot *dqp) 147 + { 148 + int64_t freesp; 149 + 150 + freesp = be64_to_cpu(dqp->q_core.d_blk_hardlimit) - dqp->q_res_bcount; 151 + if (freesp < dqp->q_low_space[XFS_QLOWSP_1_PCNT]) 152 + return true; 153 + 154 + return false; 155 + } 156 + 142 157 #define XFS_DQ_IS_LOCKED(dqp) (mutex_is_locked(&((dqp)->q_qlock))) 143 158 #define XFS_DQ_IS_DIRTY(dqp) ((dqp)->dq_flags & XFS_DQ_DIRTY) 144 159 #define XFS_QM_ISUDQ(dqp) ((dqp)->dq_flags & XFS_DQ_USER)
+3 -3
fs/xfs/xfs_dquot_buf.c fs/xfs/libxfs/xfs_dquot_buf.c
··· 257 257 struct xfs_mount *mp = bp->b_target->bt_mount; 258 258 259 259 if (!xfs_dquot_buf_verify_crc(mp, bp)) 260 - xfs_buf_ioerror(bp, EFSBADCRC); 260 + xfs_buf_ioerror(bp, -EFSBADCRC); 261 261 else if (!xfs_dquot_buf_verify(mp, bp)) 262 - xfs_buf_ioerror(bp, EFSCORRUPTED); 262 + xfs_buf_ioerror(bp, -EFSCORRUPTED); 263 263 264 264 if (bp->b_error) 265 265 xfs_verifier_error(bp); ··· 277 277 struct xfs_mount *mp = bp->b_target->bt_mount; 278 278 279 279 if (!xfs_dquot_buf_verify(mp, bp)) { 280 - xfs_buf_ioerror(bp, EFSCORRUPTED); 280 + xfs_buf_ioerror(bp, -EFSCORRUPTED); 281 281 xfs_verifier_error(bp); 282 282 return; 283 283 }
+1 -24
fs/xfs/xfs_error.c
··· 27 27 28 28 #ifdef DEBUG 29 29 30 - int xfs_etrap[XFS_ERROR_NTRAP] = { 31 - 0, 32 - }; 33 - 34 - int 35 - xfs_error_trap(int e) 36 - { 37 - int i; 38 - 39 - if (!e) 40 - return 0; 41 - for (i = 0; i < XFS_ERROR_NTRAP; i++) { 42 - if (xfs_etrap[i] == 0) 43 - break; 44 - if (e != xfs_etrap[i]) 45 - continue; 46 - xfs_notice(NULL, "%s: error %d", __func__, e); 47 - BUG(); 48 - break; 49 - } 50 - return e; 51 - } 52 - 53 30 int xfs_etest[XFS_NUM_INJECT_ERROR]; 54 31 int64_t xfs_etest_fsid[XFS_NUM_INJECT_ERROR]; 55 32 char * xfs_etest_fsname[XFS_NUM_INJECT_ERROR]; ··· 167 190 struct xfs_mount *mp = bp->b_target->bt_mount; 168 191 169 192 xfs_alert(mp, "Metadata %s detected at %pF, block 0x%llx", 170 - bp->b_error == EFSBADCRC ? "CRC error" : "corruption", 193 + bp->b_error == -EFSBADCRC ? "CRC error" : "corruption", 171 194 __return_address, bp->b_bn); 172 195 173 196 xfs_alert(mp, "Unmount and run xfs_repair");
+2 -11
fs/xfs/xfs_error.h
··· 18 18 #ifndef __XFS_ERROR_H__ 19 19 #define __XFS_ERROR_H__ 20 20 21 - #ifdef DEBUG 22 - #define XFS_ERROR_NTRAP 10 23 - extern int xfs_etrap[XFS_ERROR_NTRAP]; 24 - extern int xfs_error_trap(int); 25 - #define XFS_ERROR(e) xfs_error_trap(e) 26 - #else 27 - #define XFS_ERROR(e) (e) 28 - #endif 29 - 30 21 struct xfs_mount; 31 22 32 23 extern void xfs_error_report(const char *tag, int level, struct xfs_mount *mp, ··· 47 56 if (unlikely(!fs_is_ok)) { \ 48 57 XFS_ERROR_REPORT("XFS_WANT_CORRUPTED_GOTO", \ 49 58 XFS_ERRLEVEL_LOW, NULL); \ 50 - error = XFS_ERROR(EFSCORRUPTED); \ 59 + error = -EFSCORRUPTED; \ 51 60 goto l; \ 52 61 } \ 53 62 } ··· 59 68 if (unlikely(!fs_is_ok)) { \ 60 69 XFS_ERROR_REPORT("XFS_WANT_CORRUPTED_RETURN", \ 61 70 XFS_ERRLEVEL_LOW, NULL); \ 62 - return XFS_ERROR(EFSCORRUPTED); \ 71 + return -EFSCORRUPTED; \ 63 72 } \ 64 73 } 65 74
+5 -5
fs/xfs/xfs_export.c
··· 147 147 * We don't use ESTALE directly down the chain to not 148 148 * confuse applications using bulkstat that expect EINVAL. 149 149 */ 150 - if (error == EINVAL || error == ENOENT) 151 - error = ESTALE; 152 - return ERR_PTR(-error); 150 + if (error == -EINVAL || error == -ENOENT) 151 + error = -ESTALE; 152 + return ERR_PTR(error); 153 153 } 154 154 155 155 if (ip->i_d.di_gen != generation) { ··· 217 217 218 218 error = xfs_lookup(XFS_I(child->d_inode), &xfs_name_dotdot, &cip, NULL); 219 219 if (unlikely(error)) 220 - return ERR_PTR(-error); 220 + return ERR_PTR(error); 221 221 222 222 return d_obtain_alias(VFS_I(cip)); 223 223 } ··· 237 237 238 238 if (!lsn) 239 239 return 0; 240 - return -_xfs_log_force_lsn(mp, lsn, XFS_LOG_SYNC, NULL); 240 + return _xfs_log_force_lsn(mp, lsn, XFS_LOG_SYNC, NULL); 241 241 } 242 242 243 243 const struct export_operations xfs_export_operations = {
+1 -1
fs/xfs/xfs_extfree_item.c
··· 298 298 } 299 299 return 0; 300 300 } 301 - return EFSCORRUPTED; 301 + return -EFSCORRUPTED; 302 302 } 303 303 304 304 /*
+45 -30
fs/xfs/xfs_file.c
··· 38 38 #include "xfs_trace.h" 39 39 #include "xfs_log.h" 40 40 #include "xfs_dinode.h" 41 + #include "xfs_icache.h" 41 42 42 43 #include <linux/aio.h> 43 44 #include <linux/dcache.h> ··· 156 155 157 156 if (!lsn) 158 157 return 0; 159 - return -_xfs_log_force_lsn(mp, lsn, XFS_LOG_SYNC, NULL); 158 + return _xfs_log_force_lsn(mp, lsn, XFS_LOG_SYNC, NULL); 160 159 } 161 160 162 161 STATIC int ··· 180 179 return error; 181 180 182 181 if (XFS_FORCED_SHUTDOWN(mp)) 183 - return -XFS_ERROR(EIO); 182 + return -EIO; 184 183 185 184 xfs_iflags_clear(ip, XFS_ITRUNCATED); 186 185 ··· 226 225 !log_flushed) 227 226 xfs_blkdev_issue_flush(mp->m_ddev_targp); 228 227 229 - return -error; 228 + return error; 230 229 } 231 230 232 231 STATIC ssize_t ··· 247 246 XFS_STATS_INC(xs_read_calls); 248 247 249 248 if (unlikely(file->f_flags & O_DIRECT)) 250 - ioflags |= IO_ISDIRECT; 249 + ioflags |= XFS_IO_ISDIRECT; 251 250 if (file->f_mode & FMODE_NOCMTIME) 252 - ioflags |= IO_INVIS; 251 + ioflags |= XFS_IO_INVIS; 253 252 254 - if (unlikely(ioflags & IO_ISDIRECT)) { 253 + if (unlikely(ioflags & XFS_IO_ISDIRECT)) { 255 254 xfs_buftarg_t *target = 256 255 XFS_IS_REALTIME_INODE(ip) ? 257 256 mp->m_rtdev_targp : mp->m_ddev_targp; ··· 259 258 if ((pos | size) & target->bt_logical_sectormask) { 260 259 if (pos == i_size_read(inode)) 261 260 return 0; 262 - return -XFS_ERROR(EINVAL); 261 + return -EINVAL; 263 262 } 264 263 } 265 264 ··· 284 283 * proceeed concurrently without serialisation. 285 284 */ 286 285 xfs_rw_ilock(ip, XFS_IOLOCK_SHARED); 287 - if ((ioflags & IO_ISDIRECT) && inode->i_mapping->nrpages) { 286 + if ((ioflags & XFS_IO_ISDIRECT) && inode->i_mapping->nrpages) { 288 287 xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED); 289 288 xfs_rw_ilock(ip, XFS_IOLOCK_EXCL); 290 289 ··· 326 325 XFS_STATS_INC(xs_read_calls); 327 326 328 327 if (infilp->f_mode & FMODE_NOCMTIME) 329 - ioflags |= IO_INVIS; 328 + ioflags |= XFS_IO_INVIS; 330 329 331 330 if (XFS_FORCED_SHUTDOWN(ip->i_mount)) 332 331 return -EIO; ··· 525 524 xfs_rw_ilock(ip, *iolock); 526 525 goto restart; 527 526 } 528 - error = -xfs_zero_eof(ip, *pos, i_size_read(inode)); 527 + error = xfs_zero_eof(ip, *pos, i_size_read(inode)); 529 528 if (error) 530 529 return error; 531 530 } ··· 595 594 596 595 /* DIO must be aligned to device logical sector size */ 597 596 if ((pos | count) & target->bt_logical_sectormask) 598 - return -XFS_ERROR(EINVAL); 597 + return -EINVAL; 599 598 600 599 /* "unaligned" here means not aligned to a filesystem block */ 601 600 if ((pos & mp->m_blockmask) || ((pos + count) & mp->m_blockmask)) ··· 690 689 ret = generic_perform_write(file, from, pos); 691 690 if (likely(ret >= 0)) 692 691 iocb->ki_pos = pos + ret; 692 + 693 693 /* 694 - * If we just got an ENOSPC, try to write back all dirty inodes to 695 - * convert delalloc space to free up some of the excess reserved 696 - * metadata space. 694 + * If we hit a space limit, try to free up some lingering preallocated 695 + * space before returning an error. In the case of ENOSPC, first try to 696 + * write back all dirty inodes to free up some of the excess reserved 697 + * metadata space. This reduces the chances that the eofblocks scan 698 + * waits on dirty mappings. Since xfs_flush_inodes() is serialized, this 699 + * also behaves as a filter to prevent too many eofblocks scans from 700 + * running at the same time. 697 701 */ 698 - if (ret == -ENOSPC && !enospc) { 702 + if (ret == -EDQUOT && !enospc) { 703 + enospc = xfs_inode_free_quota_eofblocks(ip); 704 + if (enospc) 705 + goto write_retry; 706 + } else if (ret == -ENOSPC && !enospc) { 707 + struct xfs_eofblocks eofb = {0}; 708 + 699 709 enospc = 1; 700 710 xfs_flush_inodes(ip->i_mount); 711 + eofb.eof_scan_owner = ip->i_ino; /* for locking */ 712 + eofb.eof_flags = XFS_EOF_FLAGS_SYNC; 713 + xfs_icache_free_eofblocks(ip->i_mount, &eofb); 701 714 goto write_retry; 702 715 } 703 716 ··· 787 772 unsigned blksize_mask = (1 << inode->i_blkbits) - 1; 788 773 789 774 if (offset & blksize_mask || len & blksize_mask) { 790 - error = EINVAL; 775 + error = -EINVAL; 791 776 goto out_unlock; 792 777 } 793 778 ··· 796 781 * in which case it is effectively a truncate operation 797 782 */ 798 783 if (offset + len >= i_size_read(inode)) { 799 - error = EINVAL; 784 + error = -EINVAL; 800 785 goto out_unlock; 801 786 } 802 787 ··· 809 794 if (!(mode & FALLOC_FL_KEEP_SIZE) && 810 795 offset + len > i_size_read(inode)) { 811 796 new_size = offset + len; 812 - error = -inode_newsize_ok(inode, new_size); 797 + error = inode_newsize_ok(inode, new_size); 813 798 if (error) 814 799 goto out_unlock; 815 800 } ··· 859 844 860 845 out_unlock: 861 846 xfs_iunlock(ip, XFS_IOLOCK_EXCL); 862 - return -error; 847 + return error; 863 848 } 864 849 865 850 ··· 904 889 struct inode *inode, 905 890 struct file *filp) 906 891 { 907 - return -xfs_release(XFS_I(inode)); 892 + return xfs_release(XFS_I(inode)); 908 893 } 909 894 910 895 STATIC int ··· 933 918 934 919 error = xfs_readdir(ip, ctx, bufsize); 935 920 if (error) 936 - return -error; 921 + return error; 937 922 return 0; 938 923 } 939 924 ··· 1199 1184 1200 1185 isize = i_size_read(inode); 1201 1186 if (start >= isize) { 1202 - error = ENXIO; 1187 + error = -ENXIO; 1203 1188 goto out_unlock; 1204 1189 } 1205 1190 ··· 1221 1206 1222 1207 /* No extents at given offset, must be beyond EOF */ 1223 1208 if (nmap == 0) { 1224 - error = ENXIO; 1209 + error = -ENXIO; 1225 1210 goto out_unlock; 1226 1211 } 1227 1212 ··· 1252 1237 * we are reading after EOF if nothing in map[1]. 1253 1238 */ 1254 1239 if (nmap == 1) { 1255 - error = ENXIO; 1240 + error = -ENXIO; 1256 1241 goto out_unlock; 1257 1242 } 1258 1243 ··· 1265 1250 fsbno = map[i - 1].br_startoff + map[i - 1].br_blockcount; 1266 1251 start = XFS_FSB_TO_B(mp, fsbno); 1267 1252 if (start >= isize) { 1268 - error = ENXIO; 1253 + error = -ENXIO; 1269 1254 goto out_unlock; 1270 1255 } 1271 1256 } ··· 1277 1262 xfs_iunlock(ip, lock); 1278 1263 1279 1264 if (error) 1280 - return -error; 1265 + return error; 1281 1266 return offset; 1282 1267 } 1283 1268 ··· 1297 1282 int error; 1298 1283 1299 1284 if (XFS_FORCED_SHUTDOWN(mp)) 1300 - return -XFS_ERROR(EIO); 1285 + return -EIO; 1301 1286 1302 1287 lock = xfs_ilock_data_map_shared(ip); 1303 1288 1304 1289 isize = i_size_read(inode); 1305 1290 if (start >= isize) { 1306 - error = ENXIO; 1291 + error = -ENXIO; 1307 1292 goto out_unlock; 1308 1293 } 1309 1294 ··· 1322 1307 1323 1308 /* No extents at given offset, must be beyond EOF */ 1324 1309 if (nmap == 0) { 1325 - error = ENXIO; 1310 + error = -ENXIO; 1326 1311 goto out_unlock; 1327 1312 } 1328 1313 ··· 1385 1370 xfs_iunlock(ip, lock); 1386 1371 1387 1372 if (error) 1388 - return -error; 1373 + return error; 1389 1374 return offset; 1390 1375 } 1391 1376
+2 -2
fs/xfs/xfs_filestream.c
··· 258 258 if (*agp == NULLAGNUMBER) 259 259 return 0; 260 260 261 - err = ENOMEM; 261 + err = -ENOMEM; 262 262 item = kmem_alloc(sizeof(*item), KM_MAYFAIL); 263 263 if (!item) 264 264 goto out_put_ag; ··· 268 268 269 269 err = xfs_mru_cache_insert(mp->m_filestream, ip->i_ino, &item->mru); 270 270 if (err) { 271 - if (err == EEXIST) 271 + if (err == -EEXIST) 272 272 err = 0; 273 273 goto out_free_item; 274 274 }
+1 -13
fs/xfs/xfs_format.h fs/xfs/libxfs/xfs_format.h
··· 68 68 #define XFS_RTLOBIT(w) xfs_lowbit32(w) 69 69 #define XFS_RTHIBIT(w) xfs_highbit32(w) 70 70 71 - #if XFS_BIG_BLKNOS 72 71 #define XFS_RTBLOCKLOG(b) xfs_highbit64(b) 73 - #else 74 - #define XFS_RTBLOCKLOG(b) xfs_highbit32(b) 75 - #endif 76 72 77 73 /* 78 74 * Dquot and dquot block format definitions ··· 300 304 * Values and macros for delayed-allocation startblock fields. 301 305 */ 302 306 #define STARTBLOCKVALBITS 17 303 - #define STARTBLOCKMASKBITS (15 + XFS_BIG_BLKNOS * 20) 304 - #define DSTARTBLOCKMASKBITS (15 + 20) 307 + #define STARTBLOCKMASKBITS (15 + 20) 305 308 #define STARTBLOCKMASK \ 306 309 (((((xfs_fsblock_t)1) << STARTBLOCKMASKBITS) - 1) << STARTBLOCKVALBITS) 307 - #define DSTARTBLOCKMASK \ 308 - (((((xfs_dfsbno_t)1) << DSTARTBLOCKMASKBITS) - 1) << STARTBLOCKVALBITS) 309 310 310 311 static inline int isnullstartblock(xfs_fsblock_t x) 311 312 { 312 313 return ((x) & STARTBLOCKMASK) == STARTBLOCKMASK; 313 - } 314 - 315 - static inline int isnulldstartblock(xfs_dfsbno_t x) 316 - { 317 - return ((x) & DSTARTBLOCKMASK) == DSTARTBLOCKMASK; 318 314 } 319 315 320 316 static inline xfs_fsblock_t nullstartblock(int k)
+5 -2
fs/xfs/xfs_fs.h
··· 255 255 ((2 * 1024 * 1024 * 1024ULL) - XFS_MIN_LOG_BYTES) 256 256 257 257 /* Used for sanity checks on superblock */ 258 - #define XFS_MAX_DBLOCKS(s) ((xfs_drfsbno_t)(s)->sb_agcount * (s)->sb_agblocks) 259 - #define XFS_MIN_DBLOCKS(s) ((xfs_drfsbno_t)((s)->sb_agcount - 1) * \ 258 + #define XFS_MAX_DBLOCKS(s) ((xfs_rfsblock_t)(s)->sb_agcount * (s)->sb_agblocks) 259 + #define XFS_MIN_DBLOCKS(s) ((xfs_rfsblock_t)((s)->sb_agcount - 1) * \ 260 260 (s)->sb_agblocks + XFS_MIN_AG_BLOCKS) 261 261 262 262 /* ··· 375 375 #define XFS_EOF_FLAGS_GID (1 << 2) /* filter by gid */ 376 376 #define XFS_EOF_FLAGS_PRID (1 << 3) /* filter by project id */ 377 377 #define XFS_EOF_FLAGS_MINFILESIZE (1 << 4) /* filter by min file size */ 378 + #define XFS_EOF_FLAGS_UNION (1 << 5) /* union filter algorithm; 379 + * kernel only, not included in 380 + * valid mask */ 378 381 #define XFS_EOF_FLAGS_VALID \ 379 382 (XFS_EOF_FLAGS_SYNC | \ 380 383 XFS_EOF_FLAGS_UID | \
+21 -21
fs/xfs/xfs_fsops.c
··· 168 168 nb = in->newblocks; 169 169 pct = in->imaxpct; 170 170 if (nb < mp->m_sb.sb_dblocks || pct < 0 || pct > 100) 171 - return XFS_ERROR(EINVAL); 171 + return -EINVAL; 172 172 if ((error = xfs_sb_validate_fsb_count(&mp->m_sb, nb))) 173 173 return error; 174 174 dpct = pct - mp->m_sb.sb_imax_pct; ··· 176 176 XFS_FSB_TO_BB(mp, nb) - XFS_FSS_TO_BB(mp, 1), 177 177 XFS_FSS_TO_BB(mp, 1), 0, NULL); 178 178 if (!bp) 179 - return EIO; 179 + return -EIO; 180 180 if (bp->b_error) { 181 181 error = bp->b_error; 182 182 xfs_buf_relse(bp); ··· 191 191 nagcount--; 192 192 nb = (xfs_rfsblock_t)nagcount * mp->m_sb.sb_agblocks; 193 193 if (nb < mp->m_sb.sb_dblocks) 194 - return XFS_ERROR(EINVAL); 194 + return -EINVAL; 195 195 } 196 196 new = nb - mp->m_sb.sb_dblocks; 197 197 oagcount = mp->m_sb.sb_agcount; ··· 229 229 XFS_FSS_TO_BB(mp, 1), 0, 230 230 &xfs_agf_buf_ops); 231 231 if (!bp) { 232 - error = ENOMEM; 232 + error = -ENOMEM; 233 233 goto error0; 234 234 } 235 235 ··· 270 270 XFS_FSS_TO_BB(mp, 1), 0, 271 271 &xfs_agfl_buf_ops); 272 272 if (!bp) { 273 - error = ENOMEM; 273 + error = -ENOMEM; 274 274 goto error0; 275 275 } 276 276 ··· 298 298 XFS_FSS_TO_BB(mp, 1), 0, 299 299 &xfs_agi_buf_ops); 300 300 if (!bp) { 301 - error = ENOMEM; 301 + error = -ENOMEM; 302 302 goto error0; 303 303 } 304 304 ··· 336 336 &xfs_allocbt_buf_ops); 337 337 338 338 if (!bp) { 339 - error = ENOMEM; 339 + error = -ENOMEM; 340 340 goto error0; 341 341 } 342 342 ··· 365 365 BTOBB(mp->m_sb.sb_blocksize), 0, 366 366 &xfs_allocbt_buf_ops); 367 367 if (!bp) { 368 - error = ENOMEM; 368 + error = -ENOMEM; 369 369 goto error0; 370 370 } 371 371 ··· 395 395 BTOBB(mp->m_sb.sb_blocksize), 0, 396 396 &xfs_inobt_buf_ops); 397 397 if (!bp) { 398 - error = ENOMEM; 398 + error = -ENOMEM; 399 399 goto error0; 400 400 } 401 401 ··· 420 420 BTOBB(mp->m_sb.sb_blocksize), 0, 421 421 &xfs_inobt_buf_ops); 422 422 if (!bp) { 423 - error = ENOMEM; 423 + error = -ENOMEM; 424 424 goto error0; 425 425 } 426 426 ··· 531 531 bp->b_ops = &xfs_sb_buf_ops; 532 532 xfs_buf_zero(bp, 0, BBTOB(bp->b_length)); 533 533 } else 534 - error = ENOMEM; 534 + error = -ENOMEM; 535 535 } 536 536 537 537 /* ··· 576 576 577 577 nb = in->newblocks; 578 578 if (nb < XFS_MIN_LOG_BLOCKS || nb < XFS_B_TO_FSB(mp, XFS_MIN_LOG_BYTES)) 579 - return XFS_ERROR(EINVAL); 579 + return -EINVAL; 580 580 if (nb == mp->m_sb.sb_logblocks && 581 581 in->isint == (mp->m_sb.sb_logstart != 0)) 582 - return XFS_ERROR(EINVAL); 582 + return -EINVAL; 583 583 /* 584 584 * Moving the log is hard, need new interfaces to sync 585 585 * the log first, hold off all activity while moving it. 586 586 * Can have shorter or longer log in the same space, 587 587 * or transform internal to external log or vice versa. 588 588 */ 589 - return XFS_ERROR(ENOSYS); 589 + return -ENOSYS; 590 590 } 591 591 592 592 /* ··· 604 604 int error; 605 605 606 606 if (!capable(CAP_SYS_ADMIN)) 607 - return XFS_ERROR(EPERM); 607 + return -EPERM; 608 608 if (!mutex_trylock(&mp->m_growlock)) 609 - return XFS_ERROR(EWOULDBLOCK); 609 + return -EWOULDBLOCK; 610 610 error = xfs_growfs_data_private(mp, in); 611 611 mutex_unlock(&mp->m_growlock); 612 612 return error; ··· 620 620 int error; 621 621 622 622 if (!capable(CAP_SYS_ADMIN)) 623 - return XFS_ERROR(EPERM); 623 + return -EPERM; 624 624 if (!mutex_trylock(&mp->m_growlock)) 625 - return XFS_ERROR(EWOULDBLOCK); 625 + return -EWOULDBLOCK; 626 626 error = xfs_growfs_log_private(mp, in); 627 627 mutex_unlock(&mp->m_growlock); 628 628 return error; ··· 674 674 /* If inval is null, report current values and return */ 675 675 if (inval == (__uint64_t *)NULL) { 676 676 if (!outval) 677 - return EINVAL; 677 + return -EINVAL; 678 678 outval->resblks = mp->m_resblks; 679 679 outval->resblks_avail = mp->m_resblks_avail; 680 680 return 0; ··· 757 757 int error; 758 758 error = xfs_icsb_modify_counters(mp, XFS_SBS_FDBLOCKS, 759 759 fdblks_delta, 0); 760 - if (error == ENOSPC) 760 + if (error == -ENOSPC) 761 761 goto retry; 762 762 } 763 763 return 0; ··· 818 818 SHUTDOWN_FORCE_UMOUNT | SHUTDOWN_LOG_IO_ERROR); 819 819 break; 820 820 default: 821 - return XFS_ERROR(EINVAL); 821 + return -EINVAL; 822 822 } 823 823 824 824 return 0;
+17 -17
fs/xfs/xfs_ialloc.c fs/xfs/libxfs/xfs_ialloc.c
··· 292 292 mp->m_bsize * blks_per_cluster, 293 293 XBF_UNMAPPED); 294 294 if (!fbuf) 295 - return ENOMEM; 295 + return -ENOMEM; 296 296 297 297 /* Initialize the inode buffers and log them appropriately. */ 298 298 fbuf->b_ops = &xfs_inode_buf_ops; ··· 380 380 newlen = args.mp->m_ialloc_inos; 381 381 if (args.mp->m_maxicount && 382 382 args.mp->m_sb.sb_icount + newlen > args.mp->m_maxicount) 383 - return XFS_ERROR(ENOSPC); 383 + return -ENOSPC; 384 384 args.minlen = args.maxlen = args.mp->m_ialloc_blks; 385 385 /* 386 386 * First try to allocate inodes contiguous with the last-allocated ··· 1385 1385 if (error) { 1386 1386 xfs_trans_brelse(tp, agbp); 1387 1387 1388 - if (error != ENOSPC) 1388 + if (error != -ENOSPC) 1389 1389 goto out_error; 1390 1390 1391 1391 xfs_perag_put(pag); ··· 1416 1416 agno = 0; 1417 1417 if (agno == start_agno) { 1418 1418 *inop = NULLFSINO; 1419 - return noroom ? ENOSPC : 0; 1419 + return noroom ? -ENOSPC : 0; 1420 1420 } 1421 1421 } 1422 1422 ··· 1425 1425 return xfs_dialloc_ag(tp, agbp, parent, inop); 1426 1426 out_error: 1427 1427 xfs_perag_put(pag); 1428 - return XFS_ERROR(error); 1428 + return error; 1429 1429 } 1430 1430 1431 1431 STATIC int ··· 1682 1682 xfs_warn(mp, "%s: agno >= mp->m_sb.sb_agcount (%d >= %d).", 1683 1683 __func__, agno, mp->m_sb.sb_agcount); 1684 1684 ASSERT(0); 1685 - return XFS_ERROR(EINVAL); 1685 + return -EINVAL; 1686 1686 } 1687 1687 agino = XFS_INO_TO_AGINO(mp, inode); 1688 1688 if (inode != XFS_AGINO_TO_INO(mp, agno, agino)) { ··· 1690 1690 __func__, (unsigned long long)inode, 1691 1691 (unsigned long long)XFS_AGINO_TO_INO(mp, agno, agino)); 1692 1692 ASSERT(0); 1693 - return XFS_ERROR(EINVAL); 1693 + return -EINVAL; 1694 1694 } 1695 1695 agbno = XFS_AGINO_TO_AGBNO(mp, agino); 1696 1696 if (agbno >= mp->m_sb.sb_agblocks) { 1697 1697 xfs_warn(mp, "%s: agbno >= mp->m_sb.sb_agblocks (%d >= %d).", 1698 1698 __func__, agbno, mp->m_sb.sb_agblocks); 1699 1699 ASSERT(0); 1700 - return XFS_ERROR(EINVAL); 1700 + return -EINVAL; 1701 1701 } 1702 1702 /* 1703 1703 * Get the allocation group header. ··· 1769 1769 if (i) 1770 1770 error = xfs_inobt_get_rec(cur, &rec, &i); 1771 1771 if (!error && i == 0) 1772 - error = EINVAL; 1772 + error = -EINVAL; 1773 1773 } 1774 1774 1775 1775 xfs_trans_brelse(tp, agbp); ··· 1780 1780 /* check that the returned record contains the required inode */ 1781 1781 if (rec.ir_startino > agino || 1782 1782 rec.ir_startino + mp->m_ialloc_inos <= agino) 1783 - return EINVAL; 1783 + return -EINVAL; 1784 1784 1785 1785 /* for untrusted inodes check it is allocated first */ 1786 1786 if ((flags & XFS_IGET_UNTRUSTED) && 1787 1787 (rec.ir_free & XFS_INOBT_MASK(agino - rec.ir_startino))) 1788 - return EINVAL; 1788 + return -EINVAL; 1789 1789 1790 1790 *chunk_agbno = XFS_AGINO_TO_AGBNO(mp, rec.ir_startino); 1791 1791 *offset_agbno = agbno - *chunk_agbno; ··· 1829 1829 * as they can be invalid without implying corruption. 1830 1830 */ 1831 1831 if (flags & XFS_IGET_UNTRUSTED) 1832 - return XFS_ERROR(EINVAL); 1832 + return -EINVAL; 1833 1833 if (agno >= mp->m_sb.sb_agcount) { 1834 1834 xfs_alert(mp, 1835 1835 "%s: agno (%d) >= mp->m_sb.sb_agcount (%d)", ··· 1849 1849 } 1850 1850 xfs_stack_trace(); 1851 1851 #endif /* DEBUG */ 1852 - return XFS_ERROR(EINVAL); 1852 + return -EINVAL; 1853 1853 } 1854 1854 1855 1855 blks_per_cluster = xfs_icluster_size_fsb(mp); ··· 1922 1922 __func__, (unsigned long long) imap->im_blkno, 1923 1923 (unsigned long long) imap->im_len, 1924 1924 XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks)); 1925 - return XFS_ERROR(EINVAL); 1925 + return -EINVAL; 1926 1926 } 1927 1927 return 0; 1928 1928 } ··· 2072 2072 2073 2073 if (xfs_sb_version_hascrc(&mp->m_sb) && 2074 2074 !xfs_buf_verify_cksum(bp, XFS_AGI_CRC_OFF)) 2075 - xfs_buf_ioerror(bp, EFSBADCRC); 2075 + xfs_buf_ioerror(bp, -EFSBADCRC); 2076 2076 else if (XFS_TEST_ERROR(!xfs_agi_verify(bp), mp, 2077 2077 XFS_ERRTAG_IALLOC_READ_AGI, 2078 2078 XFS_RANDOM_IALLOC_READ_AGI)) 2079 - xfs_buf_ioerror(bp, EFSCORRUPTED); 2079 + xfs_buf_ioerror(bp, -EFSCORRUPTED); 2080 2080 2081 2081 if (bp->b_error) 2082 2082 xfs_verifier_error(bp); ··· 2090 2090 struct xfs_buf_log_item *bip = bp->b_fspriv; 2091 2091 2092 2092 if (!xfs_agi_verify(bp)) { 2093 - xfs_buf_ioerror(bp, EFSCORRUPTED); 2093 + xfs_buf_ioerror(bp, -EFSCORRUPTED); 2094 2094 xfs_verifier_error(bp); 2095 2095 return; 2096 2096 }
fs/xfs/xfs_ialloc.h fs/xfs/libxfs/xfs_ialloc.h
+3 -3
fs/xfs/xfs_ialloc_btree.c fs/xfs/libxfs/xfs_ialloc_btree.c
··· 272 272 struct xfs_buf *bp) 273 273 { 274 274 if (!xfs_btree_sblock_verify_crc(bp)) 275 - xfs_buf_ioerror(bp, EFSBADCRC); 275 + xfs_buf_ioerror(bp, -EFSBADCRC); 276 276 else if (!xfs_inobt_verify(bp)) 277 - xfs_buf_ioerror(bp, EFSCORRUPTED); 277 + xfs_buf_ioerror(bp, -EFSCORRUPTED); 278 278 279 279 if (bp->b_error) { 280 280 trace_xfs_btree_corrupt(bp, _RET_IP_); ··· 288 288 { 289 289 if (!xfs_inobt_verify(bp)) { 290 290 trace_xfs_btree_corrupt(bp, _RET_IP_); 291 - xfs_buf_ioerror(bp, EFSCORRUPTED); 291 + xfs_buf_ioerror(bp, -EFSCORRUPTED); 292 292 xfs_verifier_error(bp); 293 293 return; 294 294 }
fs/xfs/xfs_ialloc_btree.h fs/xfs/libxfs/xfs_ialloc_btree.h
+120 -28
fs/xfs/xfs_icache.c
··· 33 33 #include "xfs_trace.h" 34 34 #include "xfs_icache.h" 35 35 #include "xfs_bmap_util.h" 36 + #include "xfs_quota.h" 37 + #include "xfs_dquot_item.h" 38 + #include "xfs_dquot.h" 36 39 37 40 #include <linux/kthread.h> 38 41 #include <linux/freezer.h> ··· 161 158 if (ip->i_ino != ino) { 162 159 trace_xfs_iget_skip(ip); 163 160 XFS_STATS_INC(xs_ig_frecycle); 164 - error = EAGAIN; 161 + error = -EAGAIN; 165 162 goto out_error; 166 163 } 167 164 ··· 179 176 if (ip->i_flags & (XFS_INEW|XFS_IRECLAIM)) { 180 177 trace_xfs_iget_skip(ip); 181 178 XFS_STATS_INC(xs_ig_frecycle); 182 - error = EAGAIN; 179 + error = -EAGAIN; 183 180 goto out_error; 184 181 } 185 182 ··· 187 184 * If lookup is racing with unlink return an error immediately. 188 185 */ 189 186 if (ip->i_d.di_mode == 0 && !(flags & XFS_IGET_CREATE)) { 190 - error = ENOENT; 187 + error = -ENOENT; 191 188 goto out_error; 192 189 } 193 190 ··· 209 206 spin_unlock(&ip->i_flags_lock); 210 207 rcu_read_unlock(); 211 208 212 - error = -inode_init_always(mp->m_super, inode); 209 + error = inode_init_always(mp->m_super, inode); 213 210 if (error) { 214 211 /* 215 212 * Re-initializing the inode failed, and we are in deep ··· 246 243 /* If the VFS inode is being torn down, pause and try again. */ 247 244 if (!igrab(inode)) { 248 245 trace_xfs_iget_skip(ip); 249 - error = EAGAIN; 246 + error = -EAGAIN; 250 247 goto out_error; 251 248 } 252 249 ··· 288 285 289 286 ip = xfs_inode_alloc(mp, ino); 290 287 if (!ip) 291 - return ENOMEM; 288 + return -ENOMEM; 292 289 293 290 error = xfs_iread(mp, tp, ip, flags); 294 291 if (error) ··· 297 294 trace_xfs_iget_miss(ip); 298 295 299 296 if ((ip->i_d.di_mode == 0) && !(flags & XFS_IGET_CREATE)) { 300 - error = ENOENT; 297 + error = -ENOENT; 301 298 goto out_destroy; 302 299 } 303 300 ··· 308 305 * recurse into the file system. 309 306 */ 310 307 if (radix_tree_preload(GFP_NOFS)) { 311 - error = EAGAIN; 308 + error = -EAGAIN; 312 309 goto out_destroy; 313 310 } 314 311 ··· 344 341 if (unlikely(error)) { 345 342 WARN_ON(error != -EEXIST); 346 343 XFS_STATS_INC(xs_ig_dup); 347 - error = EAGAIN; 344 + error = -EAGAIN; 348 345 goto out_preload_end; 349 346 } 350 347 spin_unlock(&pag->pag_ici_lock); ··· 411 408 412 409 /* reject inode numbers outside existing AGs */ 413 410 if (!ino || XFS_INO_TO_AGNO(mp, ino) >= mp->m_sb.sb_agcount) 414 - return EINVAL; 411 + return -EINVAL; 415 412 416 413 /* get the perag structure and ensure that it's inode capable */ 417 414 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ino)); ··· 448 445 return 0; 449 446 450 447 out_error_or_again: 451 - if (error == EAGAIN) { 448 + if (error == -EAGAIN) { 452 449 delay(1); 453 450 goto again; 454 451 } ··· 492 489 493 490 /* nothing to sync during shutdown */ 494 491 if (XFS_FORCED_SHUTDOWN(ip->i_mount)) 495 - return EFSCORRUPTED; 492 + return -EFSCORRUPTED; 496 493 497 494 /* If we can't grab the inode, it must on it's way to reclaim. */ 498 495 if (!igrab(inode)) 499 - return ENOENT; 496 + return -ENOENT; 500 497 501 498 /* inode is valid */ 502 499 return 0; 503 500 504 501 out_unlock_noent: 505 502 spin_unlock(&ip->i_flags_lock); 506 - return ENOENT; 503 + return -ENOENT; 507 504 } 508 505 509 506 STATIC int ··· 586 583 continue; 587 584 error = execute(batch[i], flags, args); 588 585 IRELE(batch[i]); 589 - if (error == EAGAIN) { 586 + if (error == -EAGAIN) { 590 587 skipped++; 591 588 continue; 592 589 } 593 - if (error && last_error != EFSCORRUPTED) 590 + if (error && last_error != -EFSCORRUPTED) 594 591 last_error = error; 595 592 } 596 593 597 594 /* bail out if the filesystem is corrupted. */ 598 - if (error == EFSCORRUPTED) 595 + if (error == -EFSCORRUPTED) 599 596 break; 600 597 601 598 cond_resched(); ··· 655 652 xfs_perag_put(pag); 656 653 if (error) { 657 654 last_error = error; 658 - if (error == EFSCORRUPTED) 655 + if (error == -EFSCORRUPTED) 659 656 break; 660 657 } 661 658 } 662 - return XFS_ERROR(last_error); 659 + return last_error; 663 660 } 664 661 665 662 int ··· 683 680 xfs_perag_put(pag); 684 681 if (error) { 685 682 last_error = error; 686 - if (error == EFSCORRUPTED) 683 + if (error == -EFSCORRUPTED) 687 684 break; 688 685 } 689 686 } 690 - return XFS_ERROR(last_error); 687 + return last_error; 691 688 } 692 689 693 690 /* ··· 947 944 * see the stale flag set on the inode. 948 945 */ 949 946 error = xfs_iflush(ip, &bp); 950 - if (error == EAGAIN) { 947 + if (error == -EAGAIN) { 951 948 xfs_iunlock(ip, XFS_ILOCK_EXCL); 952 949 /* backoff longer than in xfs_ifree_cluster */ 953 950 delay(2); ··· 1000 997 xfs_iflags_clear(ip, XFS_IRECLAIM); 1001 998 xfs_iunlock(ip, XFS_ILOCK_EXCL); 1002 999 /* 1003 - * We could return EAGAIN here to make reclaim rescan the inode tree in 1000 + * We could return -EAGAIN here to make reclaim rescan the inode tree in 1004 1001 * a short while. However, this just burns CPU time scanning the tree 1005 1002 * waiting for IO to complete and the reclaim work never goes back to 1006 1003 * the idle state. Instead, return 0 to let the next scheduled ··· 1103 1100 if (!batch[i]) 1104 1101 continue; 1105 1102 error = xfs_reclaim_inode(batch[i], pag, flags); 1106 - if (error && last_error != EFSCORRUPTED) 1103 + if (error && last_error != -EFSCORRUPTED) 1107 1104 last_error = error; 1108 1105 } 1109 1106 ··· 1132 1129 trylock = 0; 1133 1130 goto restart; 1134 1131 } 1135 - return XFS_ERROR(last_error); 1132 + return last_error; 1136 1133 } 1137 1134 1138 1135 int ··· 1206 1203 return 1; 1207 1204 } 1208 1205 1206 + /* 1207 + * A union-based inode filtering algorithm. Process the inode if any of the 1208 + * criteria match. This is for global/internal scans only. 1209 + */ 1210 + STATIC int 1211 + xfs_inode_match_id_union( 1212 + struct xfs_inode *ip, 1213 + struct xfs_eofblocks *eofb) 1214 + { 1215 + if ((eofb->eof_flags & XFS_EOF_FLAGS_UID) && 1216 + uid_eq(VFS_I(ip)->i_uid, eofb->eof_uid)) 1217 + return 1; 1218 + 1219 + if ((eofb->eof_flags & XFS_EOF_FLAGS_GID) && 1220 + gid_eq(VFS_I(ip)->i_gid, eofb->eof_gid)) 1221 + return 1; 1222 + 1223 + if ((eofb->eof_flags & XFS_EOF_FLAGS_PRID) && 1224 + xfs_get_projid(ip) == eofb->eof_prid) 1225 + return 1; 1226 + 1227 + return 0; 1228 + } 1229 + 1209 1230 STATIC int 1210 1231 xfs_inode_free_eofblocks( 1211 1232 struct xfs_inode *ip, ··· 1238 1211 { 1239 1212 int ret; 1240 1213 struct xfs_eofblocks *eofb = args; 1214 + bool need_iolock = true; 1215 + int match; 1216 + 1217 + ASSERT(!eofb || (eofb && eofb->eof_scan_owner != 0)); 1241 1218 1242 1219 if (!xfs_can_free_eofblocks(ip, false)) { 1243 1220 /* inode could be preallocated or append-only */ ··· 1259 1228 return 0; 1260 1229 1261 1230 if (eofb) { 1262 - if (!xfs_inode_match_id(ip, eofb)) 1231 + if (eofb->eof_flags & XFS_EOF_FLAGS_UNION) 1232 + match = xfs_inode_match_id_union(ip, eofb); 1233 + else 1234 + match = xfs_inode_match_id(ip, eofb); 1235 + if (!match) 1263 1236 return 0; 1264 1237 1265 1238 /* skip the inode if the file size is too small */ 1266 1239 if (eofb->eof_flags & XFS_EOF_FLAGS_MINFILESIZE && 1267 1240 XFS_ISIZE(ip) < eofb->eof_min_file_size) 1268 1241 return 0; 1242 + 1243 + /* 1244 + * A scan owner implies we already hold the iolock. Skip it in 1245 + * xfs_free_eofblocks() to avoid deadlock. This also eliminates 1246 + * the possibility of EAGAIN being returned. 1247 + */ 1248 + if (eofb->eof_scan_owner == ip->i_ino) 1249 + need_iolock = false; 1269 1250 } 1270 1251 1271 - ret = xfs_free_eofblocks(ip->i_mount, ip, true); 1252 + ret = xfs_free_eofblocks(ip->i_mount, ip, need_iolock); 1272 1253 1273 1254 /* don't revisit the inode if we're not waiting */ 1274 - if (ret == EAGAIN && !(flags & SYNC_WAIT)) 1255 + if (ret == -EAGAIN && !(flags & SYNC_WAIT)) 1275 1256 ret = 0; 1276 1257 1277 1258 return ret; ··· 1301 1258 1302 1259 return xfs_inode_ag_iterator_tag(mp, xfs_inode_free_eofblocks, flags, 1303 1260 eofb, XFS_ICI_EOFBLOCKS_TAG); 1261 + } 1262 + 1263 + /* 1264 + * Run eofblocks scans on the quotas applicable to the inode. For inodes with 1265 + * multiple quotas, we don't know exactly which quota caused an allocation 1266 + * failure. We make a best effort by including each quota under low free space 1267 + * conditions (less than 1% free space) in the scan. 1268 + */ 1269 + int 1270 + xfs_inode_free_quota_eofblocks( 1271 + struct xfs_inode *ip) 1272 + { 1273 + int scan = 0; 1274 + struct xfs_eofblocks eofb = {0}; 1275 + struct xfs_dquot *dq; 1276 + 1277 + ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL)); 1278 + 1279 + /* 1280 + * Set the scan owner to avoid a potential livelock. Otherwise, the scan 1281 + * can repeatedly trylock on the inode we're currently processing. We 1282 + * run a sync scan to increase effectiveness and use the union filter to 1283 + * cover all applicable quotas in a single scan. 1284 + */ 1285 + eofb.eof_scan_owner = ip->i_ino; 1286 + eofb.eof_flags = XFS_EOF_FLAGS_UNION|XFS_EOF_FLAGS_SYNC; 1287 + 1288 + if (XFS_IS_UQUOTA_ENFORCED(ip->i_mount)) { 1289 + dq = xfs_inode_dquot(ip, XFS_DQ_USER); 1290 + if (dq && xfs_dquot_lowsp(dq)) { 1291 + eofb.eof_uid = VFS_I(ip)->i_uid; 1292 + eofb.eof_flags |= XFS_EOF_FLAGS_UID; 1293 + scan = 1; 1294 + } 1295 + } 1296 + 1297 + if (XFS_IS_GQUOTA_ENFORCED(ip->i_mount)) { 1298 + dq = xfs_inode_dquot(ip, XFS_DQ_GROUP); 1299 + if (dq && xfs_dquot_lowsp(dq)) { 1300 + eofb.eof_gid = VFS_I(ip)->i_gid; 1301 + eofb.eof_flags |= XFS_EOF_FLAGS_GID; 1302 + scan = 1; 1303 + } 1304 + } 1305 + 1306 + if (scan) 1307 + xfs_icache_free_eofblocks(ip->i_mount, &eofb); 1308 + 1309 + return scan; 1304 1310 } 1305 1311 1306 1312 void
+8 -5
fs/xfs/xfs_icache.h
··· 27 27 kgid_t eof_gid; 28 28 prid_t eof_prid; 29 29 __u64 eof_min_file_size; 30 + xfs_ino_t eof_scan_owner; 30 31 }; 31 32 32 33 #define SYNC_WAIT 0x0001 /* wait for i/o to complete */ ··· 58 57 void xfs_inode_set_eofblocks_tag(struct xfs_inode *ip); 59 58 void xfs_inode_clear_eofblocks_tag(struct xfs_inode *ip); 60 59 int xfs_icache_free_eofblocks(struct xfs_mount *, struct xfs_eofblocks *); 60 + int xfs_inode_free_quota_eofblocks(struct xfs_inode *ip); 61 61 void xfs_eofblocks_worker(struct work_struct *); 62 62 63 63 int xfs_inode_ag_iterator(struct xfs_mount *mp, ··· 74 72 struct xfs_eofblocks *dst) 75 73 { 76 74 if (src->eof_version != XFS_EOFBLOCKS_VERSION) 77 - return EINVAL; 75 + return -EINVAL; 78 76 79 77 if (src->eof_flags & ~XFS_EOF_FLAGS_VALID) 80 - return EINVAL; 78 + return -EINVAL; 81 79 82 80 if (memchr_inv(&src->pad32, 0, sizeof(src->pad32)) || 83 81 memchr_inv(src->pad64, 0, sizeof(src->pad64))) 84 - return EINVAL; 82 + return -EINVAL; 85 83 86 84 dst->eof_flags = src->eof_flags; 87 85 dst->eof_prid = src->eof_prid; 88 86 dst->eof_min_file_size = src->eof_min_file_size; 87 + dst->eof_scan_owner = NULLFSINO; 89 88 90 89 dst->eof_uid = INVALID_UID; 91 90 if (src->eof_flags & XFS_EOF_FLAGS_UID) { 92 91 dst->eof_uid = make_kuid(current_user_ns(), src->eof_uid); 93 92 if (!uid_valid(dst->eof_uid)) 94 - return EINVAL; 93 + return -EINVAL; 95 94 } 96 95 97 96 dst->eof_gid = INVALID_GID; 98 97 if (src->eof_flags & XFS_EOF_FLAGS_GID) { 99 98 dst->eof_gid = make_kgid(current_user_ns(), src->eof_gid); 100 99 if (!gid_valid(dst->eof_gid)) 101 - return EINVAL; 100 + return -EINVAL; 102 101 } 103 102 return 0; 104 103 }
+34 -34
fs/xfs/xfs_inode.c
··· 583 583 trace_xfs_lookup(dp, name); 584 584 585 585 if (XFS_FORCED_SHUTDOWN(dp->i_mount)) 586 - return XFS_ERROR(EIO); 586 + return -EIO; 587 587 588 588 lock_mode = xfs_ilock_data_map_shared(dp); 589 589 error = xfs_dir_lookup(NULL, dp, name, &inum, ci_name); ··· 893 893 } 894 894 if (!ialloc_context && !ip) { 895 895 *ipp = NULL; 896 - return XFS_ERROR(ENOSPC); 896 + return -ENOSPC; 897 897 } 898 898 899 899 /* ··· 1088 1088 trace_xfs_create(dp, name); 1089 1089 1090 1090 if (XFS_FORCED_SHUTDOWN(mp)) 1091 - return XFS_ERROR(EIO); 1091 + return -EIO; 1092 1092 1093 1093 prid = xfs_get_initial_prid(dp); 1094 1094 ··· 1125 1125 */ 1126 1126 tres.tr_logflags = XFS_TRANS_PERM_LOG_RES; 1127 1127 error = xfs_trans_reserve(tp, &tres, resblks, 0); 1128 - if (error == ENOSPC) { 1128 + if (error == -ENOSPC) { 1129 1129 /* flush outstanding delalloc blocks and retry */ 1130 1130 xfs_flush_inodes(mp); 1131 1131 error = xfs_trans_reserve(tp, &tres, resblks, 0); 1132 1132 } 1133 - if (error == ENOSPC) { 1133 + if (error == -ENOSPC) { 1134 1134 /* No space at all so try a "no-allocation" reservation */ 1135 1135 resblks = 0; 1136 1136 error = xfs_trans_reserve(tp, &tres, 0, 0); ··· 1165 1165 error = xfs_dir_ialloc(&tp, dp, mode, is_dir ? 2 : 1, rdev, 1166 1166 prid, resblks > 0, &ip, &committed); 1167 1167 if (error) { 1168 - if (error == ENOSPC) 1168 + if (error == -ENOSPC) 1169 1169 goto out_trans_cancel; 1170 1170 goto out_trans_abort; 1171 1171 } ··· 1184 1184 &first_block, &free_list, resblks ? 1185 1185 resblks - XFS_IALLOC_SPACE_RES(mp) : 0); 1186 1186 if (error) { 1187 - ASSERT(error != ENOSPC); 1187 + ASSERT(error != -ENOSPC); 1188 1188 goto out_trans_abort; 1189 1189 } 1190 1190 xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); ··· 1274 1274 uint resblks; 1275 1275 1276 1276 if (XFS_FORCED_SHUTDOWN(mp)) 1277 - return XFS_ERROR(EIO); 1277 + return -EIO; 1278 1278 1279 1279 prid = xfs_get_initial_prid(dp); 1280 1280 ··· 1293 1293 1294 1294 tres = &M_RES(mp)->tr_create_tmpfile; 1295 1295 error = xfs_trans_reserve(tp, tres, resblks, 0); 1296 - if (error == ENOSPC) { 1296 + if (error == -ENOSPC) { 1297 1297 /* No space at all so try a "no-allocation" reservation */ 1298 1298 resblks = 0; 1299 1299 error = xfs_trans_reserve(tp, tres, 0, 0); ··· 1311 1311 error = xfs_dir_ialloc(&tp, dp, mode, 1, 0, 1312 1312 prid, resblks > 0, &ip, NULL); 1313 1313 if (error) { 1314 - if (error == ENOSPC) 1314 + if (error == -ENOSPC) 1315 1315 goto out_trans_cancel; 1316 1316 goto out_trans_abort; 1317 1317 } ··· 1382 1382 ASSERT(!S_ISDIR(sip->i_d.di_mode)); 1383 1383 1384 1384 if (XFS_FORCED_SHUTDOWN(mp)) 1385 - return XFS_ERROR(EIO); 1385 + return -EIO; 1386 1386 1387 1387 error = xfs_qm_dqattach(sip, 0); 1388 1388 if (error) ··· 1396 1396 cancel_flags = XFS_TRANS_RELEASE_LOG_RES; 1397 1397 resblks = XFS_LINK_SPACE_RES(mp, target_name->len); 1398 1398 error = xfs_trans_reserve(tp, &M_RES(mp)->tr_link, resblks, 0); 1399 - if (error == ENOSPC) { 1399 + if (error == -ENOSPC) { 1400 1400 resblks = 0; 1401 1401 error = xfs_trans_reserve(tp, &M_RES(mp)->tr_link, 0, 0); 1402 1402 } ··· 1417 1417 */ 1418 1418 if (unlikely((tdp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) && 1419 1419 (xfs_get_projid(tdp) != xfs_get_projid(sip)))) { 1420 - error = XFS_ERROR(EXDEV); 1420 + error = -EXDEV; 1421 1421 goto error_return; 1422 1422 } 1423 1423 ··· 1635 1635 truncated = xfs_iflags_test_and_clear(ip, XFS_ITRUNCATED); 1636 1636 if (truncated) { 1637 1637 xfs_iflags_clear(ip, XFS_IDIRTY_RELEASE); 1638 - if (VN_DIRTY(VFS_I(ip)) && ip->i_delayed_blks > 0) { 1639 - error = -filemap_flush(VFS_I(ip)->i_mapping); 1638 + if (ip->i_delayed_blks > 0) { 1639 + error = filemap_flush(VFS_I(ip)->i_mapping); 1640 1640 if (error) 1641 1641 return error; 1642 1642 } ··· 1673 1673 return 0; 1674 1674 1675 1675 error = xfs_free_eofblocks(mp, ip, true); 1676 - if (error && error != EAGAIN) 1676 + if (error && error != -EAGAIN) 1677 1677 return error; 1678 1678 1679 1679 /* delalloc blocks after truncation means it really is dirty */ ··· 1772 1772 error = xfs_trans_reserve(tp, &M_RES(mp)->tr_ifree, 1773 1773 XFS_IFREE_SPACE_RES(mp), 0); 1774 1774 if (error) { 1775 - if (error == ENOSPC) { 1775 + if (error == -ENOSPC) { 1776 1776 xfs_warn_ratelimited(mp, 1777 1777 "Failed to remove inode(s) from unlinked list. " 1778 1778 "Please free space, unmount and run xfs_repair."); ··· 2219 2219 XBF_UNMAPPED); 2220 2220 2221 2221 if (!bp) 2222 - return ENOMEM; 2222 + return -ENOMEM; 2223 2223 2224 2224 /* 2225 2225 * This buffer may not have been correctly initialised as we ··· 2491 2491 trace_xfs_remove(dp, name); 2492 2492 2493 2493 if (XFS_FORCED_SHUTDOWN(mp)) 2494 - return XFS_ERROR(EIO); 2494 + return -EIO; 2495 2495 2496 2496 error = xfs_qm_dqattach(dp, 0); 2497 2497 if (error) ··· 2521 2521 */ 2522 2522 resblks = XFS_REMOVE_SPACE_RES(mp); 2523 2523 error = xfs_trans_reserve(tp, &M_RES(mp)->tr_remove, resblks, 0); 2524 - if (error == ENOSPC) { 2524 + if (error == -ENOSPC) { 2525 2525 resblks = 0; 2526 2526 error = xfs_trans_reserve(tp, &M_RES(mp)->tr_remove, 0, 0); 2527 2527 } 2528 2528 if (error) { 2529 - ASSERT(error != ENOSPC); 2529 + ASSERT(error != -ENOSPC); 2530 2530 cancel_flags = 0; 2531 2531 goto out_trans_cancel; 2532 2532 } ··· 2543 2543 if (is_dir) { 2544 2544 ASSERT(ip->i_d.di_nlink >= 2); 2545 2545 if (ip->i_d.di_nlink != 2) { 2546 - error = XFS_ERROR(ENOTEMPTY); 2546 + error = -ENOTEMPTY; 2547 2547 goto out_trans_cancel; 2548 2548 } 2549 2549 if (!xfs_dir_isempty(ip)) { 2550 - error = XFS_ERROR(ENOTEMPTY); 2550 + error = -ENOTEMPTY; 2551 2551 goto out_trans_cancel; 2552 2552 } 2553 2553 ··· 2582 2582 error = xfs_dir_removename(tp, dp, name, ip->i_ino, 2583 2583 &first_block, &free_list, resblks); 2584 2584 if (error) { 2585 - ASSERT(error != ENOENT); 2585 + ASSERT(error != -ENOENT); 2586 2586 goto out_bmap_cancel; 2587 2587 } 2588 2588 ··· 2702 2702 cancel_flags = XFS_TRANS_RELEASE_LOG_RES; 2703 2703 spaceres = XFS_RENAME_SPACE_RES(mp, target_name->len); 2704 2704 error = xfs_trans_reserve(tp, &M_RES(mp)->tr_rename, spaceres, 0); 2705 - if (error == ENOSPC) { 2705 + if (error == -ENOSPC) { 2706 2706 spaceres = 0; 2707 2707 error = xfs_trans_reserve(tp, &M_RES(mp)->tr_rename, 0, 0); 2708 2708 } ··· 2747 2747 */ 2748 2748 if (unlikely((target_dp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) && 2749 2749 (xfs_get_projid(target_dp) != xfs_get_projid(src_ip)))) { 2750 - error = XFS_ERROR(EXDEV); 2750 + error = -EXDEV; 2751 2751 goto error_return; 2752 2752 } 2753 2753 ··· 2770 2770 error = xfs_dir_createname(tp, target_dp, target_name, 2771 2771 src_ip->i_ino, &first_block, 2772 2772 &free_list, spaceres); 2773 - if (error == ENOSPC) 2773 + if (error == -ENOSPC) 2774 2774 goto error_return; 2775 2775 if (error) 2776 2776 goto abort_return; ··· 2795 2795 */ 2796 2796 if (!(xfs_dir_isempty(target_ip)) || 2797 2797 (target_ip->i_d.di_nlink > 2)) { 2798 - error = XFS_ERROR(EEXIST); 2798 + error = -EEXIST; 2799 2799 goto error_return; 2800 2800 } 2801 2801 } ··· 2847 2847 error = xfs_dir_replace(tp, src_ip, &xfs_name_dotdot, 2848 2848 target_dp->i_ino, 2849 2849 &first_block, &free_list, spaceres); 2850 - ASSERT(error != EEXIST); 2850 + ASSERT(error != -EEXIST); 2851 2851 if (error) 2852 2852 goto abort_return; 2853 2853 } ··· 3055 3055 if (bp->b_iodone) { 3056 3056 XFS_BUF_UNDONE(bp); 3057 3057 xfs_buf_stale(bp); 3058 - xfs_buf_ioerror(bp, EIO); 3058 + xfs_buf_ioerror(bp, -EIO); 3059 3059 xfs_buf_ioend(bp, 0); 3060 3060 } else { 3061 3061 xfs_buf_stale(bp); ··· 3069 3069 xfs_iflush_abort(iq, false); 3070 3070 kmem_free(ilist); 3071 3071 xfs_perag_put(pag); 3072 - return XFS_ERROR(EFSCORRUPTED); 3072 + return -EFSCORRUPTED; 3073 3073 } 3074 3074 3075 3075 /* ··· 3124 3124 * as we wait for an empty AIL as part of the unmount process. 3125 3125 */ 3126 3126 if (XFS_FORCED_SHUTDOWN(mp)) { 3127 - error = XFS_ERROR(EIO); 3127 + error = -EIO; 3128 3128 goto abort_out; 3129 3129 } 3130 3130 ··· 3167 3167 xfs_buf_relse(bp); 3168 3168 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); 3169 3169 cluster_corrupt_out: 3170 - error = XFS_ERROR(EFSCORRUPTED); 3170 + error = -EFSCORRUPTED; 3171 3171 abort_out: 3172 3172 /* 3173 3173 * Unlocks the flush lock ··· 3331 3331 return 0; 3332 3332 3333 3333 corrupt_out: 3334 - return XFS_ERROR(EFSCORRUPTED); 3334 + return -EFSCORRUPTED; 3335 3335 }
+10
fs/xfs/xfs_inode.h
··· 398 398 399 399 extern struct kmem_zone *xfs_inode_zone; 400 400 401 + /* 402 + * Flags for read/write calls 403 + */ 404 + #define XFS_IO_ISDIRECT 0x00001 /* bypass page cache */ 405 + #define XFS_IO_INVIS 0x00002 /* don't update inode timestamps */ 406 + 407 + #define XFS_IO_FLAGS \ 408 + { XFS_IO_ISDIRECT, "DIRECT" }, \ 409 + { XFS_IO_INVIS, "INVIS"} 410 + 401 411 #endif /* __XFS_INODE_H__ */
+5 -5
fs/xfs/xfs_inode_buf.c fs/xfs/libxfs/xfs_inode_buf.c
··· 101 101 return; 102 102 } 103 103 104 - xfs_buf_ioerror(bp, EFSCORRUPTED); 104 + xfs_buf_ioerror(bp, -EFSCORRUPTED); 105 105 xfs_verifier_error(bp); 106 106 #ifdef DEBUG 107 107 xfs_alert(mp, ··· 174 174 (int)imap->im_len, buf_flags, &bp, 175 175 &xfs_inode_buf_ops); 176 176 if (error) { 177 - if (error == EAGAIN) { 177 + if (error == -EAGAIN) { 178 178 ASSERT(buf_flags & XBF_TRYLOCK); 179 179 return error; 180 180 } 181 181 182 - if (error == EFSCORRUPTED && 182 + if (error == -EFSCORRUPTED && 183 183 (iget_flags & XFS_IGET_UNTRUSTED)) 184 - return XFS_ERROR(EINVAL); 184 + return -EINVAL; 185 185 186 186 xfs_warn(mp, "%s: xfs_trans_read_buf() returned error %d.", 187 187 __func__, error); ··· 390 390 __func__, ip->i_ino); 391 391 392 392 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, dip); 393 - error = XFS_ERROR(EFSCORRUPTED); 393 + error = -EFSCORRUPTED; 394 394 goto out_brelse; 395 395 } 396 396
fs/xfs/xfs_inode_buf.h fs/xfs/libxfs/xfs_inode_buf.h
+18 -18
fs/xfs/xfs_inode_fork.c fs/xfs/libxfs/xfs_inode_fork.c
··· 102 102 be64_to_cpu(dip->di_nblocks)); 103 103 XFS_CORRUPTION_ERROR("xfs_iformat(1)", XFS_ERRLEVEL_LOW, 104 104 ip->i_mount, dip); 105 - return XFS_ERROR(EFSCORRUPTED); 105 + return -EFSCORRUPTED; 106 106 } 107 107 108 108 if (unlikely(dip->di_forkoff > ip->i_mount->m_sb.sb_inodesize)) { ··· 111 111 dip->di_forkoff); 112 112 XFS_CORRUPTION_ERROR("xfs_iformat(2)", XFS_ERRLEVEL_LOW, 113 113 ip->i_mount, dip); 114 - return XFS_ERROR(EFSCORRUPTED); 114 + return -EFSCORRUPTED; 115 115 } 116 116 117 117 if (unlikely((ip->i_d.di_flags & XFS_DIFLAG_REALTIME) && ··· 121 121 ip->i_ino); 122 122 XFS_CORRUPTION_ERROR("xfs_iformat(realtime)", 123 123 XFS_ERRLEVEL_LOW, ip->i_mount, dip); 124 - return XFS_ERROR(EFSCORRUPTED); 124 + return -EFSCORRUPTED; 125 125 } 126 126 127 127 switch (ip->i_d.di_mode & S_IFMT) { ··· 132 132 if (unlikely(dip->di_format != XFS_DINODE_FMT_DEV)) { 133 133 XFS_CORRUPTION_ERROR("xfs_iformat(3)", XFS_ERRLEVEL_LOW, 134 134 ip->i_mount, dip); 135 - return XFS_ERROR(EFSCORRUPTED); 135 + return -EFSCORRUPTED; 136 136 } 137 137 ip->i_d.di_size = 0; 138 138 ip->i_df.if_u2.if_rdev = xfs_dinode_get_rdev(dip); ··· 153 153 XFS_CORRUPTION_ERROR("xfs_iformat(4)", 154 154 XFS_ERRLEVEL_LOW, 155 155 ip->i_mount, dip); 156 - return XFS_ERROR(EFSCORRUPTED); 156 + return -EFSCORRUPTED; 157 157 } 158 158 159 159 di_size = be64_to_cpu(dip->di_size); ··· 166 166 XFS_CORRUPTION_ERROR("xfs_iformat(5)", 167 167 XFS_ERRLEVEL_LOW, 168 168 ip->i_mount, dip); 169 - return XFS_ERROR(EFSCORRUPTED); 169 + return -EFSCORRUPTED; 170 170 } 171 171 172 172 size = (int)di_size; ··· 181 181 default: 182 182 XFS_ERROR_REPORT("xfs_iformat(6)", XFS_ERRLEVEL_LOW, 183 183 ip->i_mount); 184 - return XFS_ERROR(EFSCORRUPTED); 184 + return -EFSCORRUPTED; 185 185 } 186 186 break; 187 187 188 188 default: 189 189 XFS_ERROR_REPORT("xfs_iformat(7)", XFS_ERRLEVEL_LOW, ip->i_mount); 190 - return XFS_ERROR(EFSCORRUPTED); 190 + return -EFSCORRUPTED; 191 191 } 192 192 if (error) { 193 193 return error; ··· 211 211 XFS_CORRUPTION_ERROR("xfs_iformat(8)", 212 212 XFS_ERRLEVEL_LOW, 213 213 ip->i_mount, dip); 214 - return XFS_ERROR(EFSCORRUPTED); 214 + return -EFSCORRUPTED; 215 215 } 216 216 217 217 error = xfs_iformat_local(ip, dip, XFS_ATTR_FORK, size); ··· 223 223 error = xfs_iformat_btree(ip, dip, XFS_ATTR_FORK); 224 224 break; 225 225 default: 226 - error = XFS_ERROR(EFSCORRUPTED); 226 + error = -EFSCORRUPTED; 227 227 break; 228 228 } 229 229 if (error) { ··· 266 266 XFS_DFORK_SIZE(dip, ip->i_mount, whichfork)); 267 267 XFS_CORRUPTION_ERROR("xfs_iformat_local", XFS_ERRLEVEL_LOW, 268 268 ip->i_mount, dip); 269 - return XFS_ERROR(EFSCORRUPTED); 269 + return -EFSCORRUPTED; 270 270 } 271 271 ifp = XFS_IFORK_PTR(ip, whichfork); 272 272 real_size = 0; ··· 322 322 (unsigned long long) ip->i_ino, nex); 323 323 XFS_CORRUPTION_ERROR("xfs_iformat_extents(1)", XFS_ERRLEVEL_LOW, 324 324 ip->i_mount, dip); 325 - return XFS_ERROR(EFSCORRUPTED); 325 + return -EFSCORRUPTED; 326 326 } 327 327 328 328 ifp->if_real_bytes = 0; ··· 350 350 XFS_ERROR_REPORT("xfs_iformat_extents(2)", 351 351 XFS_ERRLEVEL_LOW, 352 352 ip->i_mount); 353 - return XFS_ERROR(EFSCORRUPTED); 353 + return -EFSCORRUPTED; 354 354 } 355 355 } 356 356 ifp->if_flags |= XFS_IFEXTENTS; ··· 399 399 (unsigned long long) ip->i_ino); 400 400 XFS_CORRUPTION_ERROR("xfs_iformat_btree", XFS_ERRLEVEL_LOW, 401 401 mp, dip); 402 - return XFS_ERROR(EFSCORRUPTED); 402 + return -EFSCORRUPTED; 403 403 } 404 404 405 405 ifp->if_broot_bytes = size; ··· 436 436 if (unlikely(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)) { 437 437 XFS_ERROR_REPORT("xfs_iread_extents", XFS_ERRLEVEL_LOW, 438 438 ip->i_mount); 439 - return XFS_ERROR(EFSCORRUPTED); 439 + return -EFSCORRUPTED; 440 440 } 441 441 nextents = XFS_IFORK_NEXTENTS(ip, whichfork); 442 442 ifp = XFS_IFORK_PTR(ip, whichfork); ··· 528 528 ifp->if_broot_bytes = (int)new_size; 529 529 ASSERT(XFS_BMAP_BMDR_SPACE(ifp->if_broot) <= 530 530 XFS_IFORK_SIZE(ip, whichfork)); 531 - memmove(np, op, cur_max * (uint)sizeof(xfs_dfsbno_t)); 531 + memmove(np, op, cur_max * (uint)sizeof(xfs_fsblock_t)); 532 532 return; 533 533 } 534 534 ··· 575 575 ifp->if_broot_bytes); 576 576 np = (char *)XFS_BMAP_BROOT_PTR_ADDR(mp, new_broot, 1, 577 577 (int)new_size); 578 - memcpy(np, op, new_max * (uint)sizeof(xfs_dfsbno_t)); 578 + memcpy(np, op, new_max * (uint)sizeof(xfs_fsblock_t)); 579 579 } 580 580 kmem_free(ifp->if_broot); 581 581 ifp->if_broot = new_broot; ··· 1692 1692 } 1693 1693 *idxp = page_idx; 1694 1694 *erp_idxp = erp_idx; 1695 - return(erp); 1695 + return erp; 1696 1696 } 1697 1697 1698 1698 /*
fs/xfs/xfs_inode_fork.h fs/xfs/libxfs/xfs_inode_fork.h
+1 -1
fs/xfs/xfs_inode_item.c
··· 788 788 in_f->ilf_boffset = in_f64->ilf_boffset; 789 789 return 0; 790 790 } 791 - return EFSCORRUPTED; 791 + return -EFSCORRUPTED; 792 792 }
-4
fs/xfs/xfs_inum.h fs/xfs/libxfs/xfs_inum.h
··· 54 54 #define XFS_OFFBNO_TO_AGINO(mp,b,o) \ 55 55 ((xfs_agino_t)(((b) << XFS_INO_OFFSET_BITS(mp)) | (o))) 56 56 57 - #if XFS_BIG_INUMS 58 57 #define XFS_MAXINUMBER ((xfs_ino_t)((1ULL << 56) - 1ULL)) 59 - #else 60 - #define XFS_MAXINUMBER ((xfs_ino_t)((1ULL << 32) - 1ULL)) 61 - #endif 62 58 #define XFS_MAXINUMBER_32 ((xfs_ino_t)((1ULL << 32) - 1ULL)) 63 59 64 60 #endif /* __XFS_INUM_H__ */
+130 -136
fs/xfs/xfs_ioctl.c
··· 207 207 struct path path; 208 208 209 209 if (!capable(CAP_SYS_ADMIN)) 210 - return -XFS_ERROR(EPERM); 210 + return -EPERM; 211 211 212 212 dentry = xfs_handlereq_to_dentry(parfilp, hreq); 213 213 if (IS_ERR(dentry)) ··· 216 216 217 217 /* Restrict xfs_open_by_handle to directories & regular files. */ 218 218 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode))) { 219 - error = -XFS_ERROR(EPERM); 219 + error = -EPERM; 220 220 goto out_dput; 221 221 } 222 222 ··· 228 228 fmode = OPEN_FMODE(permflag); 229 229 if ((!(permflag & O_APPEND) || (permflag & O_TRUNC)) && 230 230 (fmode & FMODE_WRITE) && IS_APPEND(inode)) { 231 - error = -XFS_ERROR(EPERM); 231 + error = -EPERM; 232 232 goto out_dput; 233 233 } 234 234 235 235 if ((fmode & FMODE_WRITE) && IS_IMMUTABLE(inode)) { 236 - error = -XFS_ERROR(EACCES); 236 + error = -EACCES; 237 237 goto out_dput; 238 238 } 239 239 240 240 /* Can't write directories. */ 241 241 if (S_ISDIR(inode->i_mode) && (fmode & FMODE_WRITE)) { 242 - error = -XFS_ERROR(EISDIR); 242 + error = -EISDIR; 243 243 goto out_dput; 244 244 } 245 245 ··· 282 282 int error; 283 283 284 284 if (!capable(CAP_SYS_ADMIN)) 285 - return -XFS_ERROR(EPERM); 285 + return -EPERM; 286 286 287 287 dentry = xfs_handlereq_to_dentry(parfilp, hreq); 288 288 if (IS_ERR(dentry)) ··· 290 290 291 291 /* Restrict this handle operation to symlinks only. */ 292 292 if (!S_ISLNK(dentry->d_inode->i_mode)) { 293 - error = -XFS_ERROR(EINVAL); 293 + error = -EINVAL; 294 294 goto out_dput; 295 295 } 296 296 297 297 if (copy_from_user(&olen, hreq->ohandlen, sizeof(__u32))) { 298 - error = -XFS_ERROR(EFAULT); 298 + error = -EFAULT; 299 299 goto out_dput; 300 300 } 301 301 302 302 link = kmalloc(MAXPATHLEN+1, GFP_KERNEL); 303 303 if (!link) { 304 - error = -XFS_ERROR(ENOMEM); 304 + error = -ENOMEM; 305 305 goto out_dput; 306 306 } 307 307 308 - error = -xfs_readlink(XFS_I(dentry->d_inode), link); 308 + error = xfs_readlink(XFS_I(dentry->d_inode), link); 309 309 if (error) 310 310 goto out_kfree; 311 311 error = readlink_copy(hreq->ohandle, olen, link); ··· 330 330 int error; 331 331 332 332 if (!capable(CAP_SYS_ADMIN)) 333 - return XFS_ERROR(EPERM); 333 + return -EPERM; 334 334 335 335 if (XFS_FORCED_SHUTDOWN(mp)) 336 - return XFS_ERROR(EIO); 336 + return -EIO; 337 337 338 338 tp = xfs_trans_alloc(mp, XFS_TRANS_SET_DMATTRS); 339 339 error = xfs_trans_reserve(tp, &M_RES(mp)->tr_ichange, 0, 0); ··· 364 364 struct dentry *dentry; 365 365 366 366 if (!capable(CAP_MKNOD)) 367 - return -XFS_ERROR(EPERM); 367 + return -EPERM; 368 368 if (copy_from_user(&dmhreq, arg, sizeof(xfs_fsop_setdm_handlereq_t))) 369 - return -XFS_ERROR(EFAULT); 369 + return -EFAULT; 370 370 371 371 error = mnt_want_write_file(parfilp); 372 372 if (error) ··· 379 379 } 380 380 381 381 if (IS_IMMUTABLE(dentry->d_inode) || IS_APPEND(dentry->d_inode)) { 382 - error = -XFS_ERROR(EPERM); 382 + error = -EPERM; 383 383 goto out; 384 384 } 385 385 386 386 if (copy_from_user(&fsd, dmhreq.data, sizeof(fsd))) { 387 - error = -XFS_ERROR(EFAULT); 387 + error = -EFAULT; 388 388 goto out; 389 389 } 390 390 391 - error = -xfs_set_dmattrs(XFS_I(dentry->d_inode), fsd.fsd_dmevmask, 391 + error = xfs_set_dmattrs(XFS_I(dentry->d_inode), fsd.fsd_dmevmask, 392 392 fsd.fsd_dmstate); 393 393 394 394 out: ··· 409 409 char *kbuf; 410 410 411 411 if (!capable(CAP_SYS_ADMIN)) 412 - return -XFS_ERROR(EPERM); 412 + return -EPERM; 413 413 if (copy_from_user(&al_hreq, arg, sizeof(xfs_fsop_attrlist_handlereq_t))) 414 - return -XFS_ERROR(EFAULT); 414 + return -EFAULT; 415 415 if (al_hreq.buflen < sizeof(struct attrlist) || 416 416 al_hreq.buflen > XATTR_LIST_MAX) 417 - return -XFS_ERROR(EINVAL); 417 + return -EINVAL; 418 418 419 419 /* 420 420 * Reject flags, only allow namespaces. 421 421 */ 422 422 if (al_hreq.flags & ~(ATTR_ROOT | ATTR_SECURE)) 423 - return -XFS_ERROR(EINVAL); 423 + return -EINVAL; 424 424 425 425 dentry = xfs_handlereq_to_dentry(parfilp, &al_hreq.hreq); 426 426 if (IS_ERR(dentry)) ··· 431 431 goto out_dput; 432 432 433 433 cursor = (attrlist_cursor_kern_t *)&al_hreq.pos; 434 - error = -xfs_attr_list(XFS_I(dentry->d_inode), kbuf, al_hreq.buflen, 434 + error = xfs_attr_list(XFS_I(dentry->d_inode), kbuf, al_hreq.buflen, 435 435 al_hreq.flags, cursor); 436 436 if (error) 437 437 goto out_kfree; ··· 455 455 __uint32_t flags) 456 456 { 457 457 unsigned char *kbuf; 458 - int error = EFAULT; 458 + int error = -EFAULT; 459 459 460 460 if (*len > XATTR_SIZE_MAX) 461 - return EINVAL; 461 + return -EINVAL; 462 462 kbuf = kmem_zalloc_large(*len, KM_SLEEP); 463 463 if (!kbuf) 464 - return ENOMEM; 464 + return -ENOMEM; 465 465 466 466 error = xfs_attr_get(XFS_I(inode), name, kbuf, (int *)len, flags); 467 467 if (error) 468 468 goto out_kfree; 469 469 470 470 if (copy_to_user(ubuf, kbuf, *len)) 471 - error = EFAULT; 471 + error = -EFAULT; 472 472 473 473 out_kfree: 474 474 kmem_free(kbuf); ··· 484 484 __uint32_t flags) 485 485 { 486 486 unsigned char *kbuf; 487 - int error = EFAULT; 488 487 489 488 if (IS_IMMUTABLE(inode) || IS_APPEND(inode)) 490 - return EPERM; 489 + return -EPERM; 491 490 if (len > XATTR_SIZE_MAX) 492 - return EINVAL; 491 + return -EINVAL; 493 492 494 493 kbuf = memdup_user(ubuf, len); 495 494 if (IS_ERR(kbuf)) 496 495 return PTR_ERR(kbuf); 497 496 498 - error = xfs_attr_set(XFS_I(inode), name, kbuf, len, flags); 499 - 500 - return error; 497 + return xfs_attr_set(XFS_I(inode), name, kbuf, len, flags); 501 498 } 502 499 503 500 int ··· 504 507 __uint32_t flags) 505 508 { 506 509 if (IS_IMMUTABLE(inode) || IS_APPEND(inode)) 507 - return EPERM; 510 + return -EPERM; 508 511 return xfs_attr_remove(XFS_I(inode), name, flags); 509 512 } 510 513 ··· 521 524 unsigned char *attr_name; 522 525 523 526 if (!capable(CAP_SYS_ADMIN)) 524 - return -XFS_ERROR(EPERM); 527 + return -EPERM; 525 528 if (copy_from_user(&am_hreq, arg, sizeof(xfs_fsop_attrmulti_handlereq_t))) 526 - return -XFS_ERROR(EFAULT); 529 + return -EFAULT; 527 530 528 531 /* overflow check */ 529 532 if (am_hreq.opcount >= INT_MAX / sizeof(xfs_attr_multiop_t)) ··· 533 536 if (IS_ERR(dentry)) 534 537 return PTR_ERR(dentry); 535 538 536 - error = E2BIG; 539 + error = -E2BIG; 537 540 size = am_hreq.opcount * sizeof(xfs_attr_multiop_t); 538 541 if (!size || size > 16 * PAGE_SIZE) 539 542 goto out_dput; 540 543 541 544 ops = memdup_user(am_hreq.ops, size); 542 545 if (IS_ERR(ops)) { 543 - error = -PTR_ERR(ops); 546 + error = PTR_ERR(ops); 544 547 goto out_dput; 545 548 } 546 549 547 - error = ENOMEM; 550 + error = -ENOMEM; 548 551 attr_name = kmalloc(MAXNAMELEN, GFP_KERNEL); 549 552 if (!attr_name) 550 553 goto out_kfree_ops; ··· 554 557 ops[i].am_error = strncpy_from_user((char *)attr_name, 555 558 ops[i].am_attrname, MAXNAMELEN); 556 559 if (ops[i].am_error == 0 || ops[i].am_error == MAXNAMELEN) 557 - error = ERANGE; 560 + error = -ERANGE; 558 561 if (ops[i].am_error < 0) 559 562 break; 560 563 ··· 585 588 mnt_drop_write_file(parfilp); 586 589 break; 587 590 default: 588 - ops[i].am_error = EINVAL; 591 + ops[i].am_error = -EINVAL; 589 592 } 590 593 } 591 594 592 595 if (copy_to_user(am_hreq.ops, ops, size)) 593 - error = XFS_ERROR(EFAULT); 596 + error = -EFAULT; 594 597 595 598 kfree(attr_name); 596 599 out_kfree_ops: 597 600 kfree(ops); 598 601 out_dput: 599 602 dput(dentry); 600 - return -error; 603 + return error; 601 604 } 602 605 603 606 int ··· 622 625 */ 623 626 if (!xfs_sb_version_hasextflgbit(&ip->i_mount->m_sb) && 624 627 !capable(CAP_SYS_ADMIN)) 625 - return -XFS_ERROR(EPERM); 628 + return -EPERM; 626 629 627 630 if (inode->i_flags & (S_IMMUTABLE|S_APPEND)) 628 - return -XFS_ERROR(EPERM); 631 + return -EPERM; 629 632 630 633 if (!(filp->f_mode & FMODE_WRITE)) 631 - return -XFS_ERROR(EBADF); 634 + return -EBADF; 632 635 633 636 if (!S_ISREG(inode->i_mode)) 634 - return -XFS_ERROR(EINVAL); 637 + return -EINVAL; 635 638 636 639 error = mnt_want_write_file(filp); 637 640 if (error) ··· 649 652 bf->l_start += XFS_ISIZE(ip); 650 653 break; 651 654 default: 652 - error = XFS_ERROR(EINVAL); 655 + error = -EINVAL; 653 656 goto out_unlock; 654 657 } 655 658 ··· 666 669 case XFS_IOC_UNRESVSP: 667 670 case XFS_IOC_UNRESVSP64: 668 671 if (bf->l_len <= 0) { 669 - error = XFS_ERROR(EINVAL); 672 + error = -EINVAL; 670 673 goto out_unlock; 671 674 } 672 675 break; ··· 679 682 bf->l_start > mp->m_super->s_maxbytes || 680 683 bf->l_start + bf->l_len < 0 || 681 684 bf->l_start + bf->l_len >= mp->m_super->s_maxbytes) { 682 - error = XFS_ERROR(EINVAL); 685 + error = -EINVAL; 683 686 goto out_unlock; 684 687 } 685 688 ··· 720 723 break; 721 724 default: 722 725 ASSERT(0); 723 - error = XFS_ERROR(EINVAL); 726 + error = -EINVAL; 724 727 } 725 728 726 729 if (error) ··· 736 739 xfs_ilock(ip, XFS_ILOCK_EXCL); 737 740 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); 738 741 739 - if (!(ioflags & IO_INVIS)) { 742 + if (!(ioflags & XFS_IO_INVIS)) { 740 743 ip->i_d.di_mode &= ~S_ISUID; 741 744 if (ip->i_d.di_mode & S_IXGRP) 742 745 ip->i_d.di_mode &= ~S_ISGID; ··· 756 759 out_unlock: 757 760 xfs_iunlock(ip, XFS_IOLOCK_EXCL); 758 761 mnt_drop_write_file(filp); 759 - return -error; 762 + return error; 760 763 } 761 764 762 765 STATIC int ··· 778 781 return -EPERM; 779 782 780 783 if (XFS_FORCED_SHUTDOWN(mp)) 781 - return -XFS_ERROR(EIO); 784 + return -EIO; 782 785 783 786 if (copy_from_user(&bulkreq, arg, sizeof(xfs_fsop_bulkreq_t))) 784 - return -XFS_ERROR(EFAULT); 787 + return -EFAULT; 785 788 786 789 if (copy_from_user(&inlast, bulkreq.lastip, sizeof(__s64))) 787 - return -XFS_ERROR(EFAULT); 790 + return -EFAULT; 788 791 789 792 if ((count = bulkreq.icount) <= 0) 790 - return -XFS_ERROR(EINVAL); 793 + return -EINVAL; 791 794 792 795 if (bulkreq.ubuffer == NULL) 793 - return -XFS_ERROR(EINVAL); 796 + return -EINVAL; 794 797 795 798 if (cmd == XFS_IOC_FSINUMBERS) 796 799 error = xfs_inumbers(mp, &inlast, &count, 797 800 bulkreq.ubuffer, xfs_inumbers_fmt); 798 801 else if (cmd == XFS_IOC_FSBULKSTAT_SINGLE) 799 - error = xfs_bulkstat_single(mp, &inlast, 800 - bulkreq.ubuffer, &done); 802 + error = xfs_bulkstat_one(mp, inlast, bulkreq.ubuffer, 803 + sizeof(xfs_bstat_t), NULL, &done); 801 804 else /* XFS_IOC_FSBULKSTAT */ 802 805 error = xfs_bulkstat(mp, &inlast, &count, xfs_bulkstat_one, 803 806 sizeof(xfs_bstat_t), bulkreq.ubuffer, 804 807 &done); 805 808 806 809 if (error) 807 - return -error; 810 + return error; 808 811 809 812 if (bulkreq.ocount != NULL) { 810 813 if (copy_to_user(bulkreq.lastip, &inlast, 811 814 sizeof(xfs_ino_t))) 812 - return -XFS_ERROR(EFAULT); 815 + return -EFAULT; 813 816 814 817 if (copy_to_user(bulkreq.ocount, &count, sizeof(count))) 815 - return -XFS_ERROR(EFAULT); 818 + return -EFAULT; 816 819 } 817 820 818 821 return 0; ··· 828 831 829 832 error = xfs_fs_geometry(mp, &fsgeo, 3); 830 833 if (error) 831 - return -error; 834 + return error; 832 835 833 836 /* 834 837 * Caller should have passed an argument of type ··· 836 839 * xfs_fsop_geom_t that xfs_fs_geometry() fills in. 837 840 */ 838 841 if (copy_to_user(arg, &fsgeo, sizeof(xfs_fsop_geom_v1_t))) 839 - return -XFS_ERROR(EFAULT); 842 + return -EFAULT; 840 843 return 0; 841 844 } 842 845 ··· 850 853 851 854 error = xfs_fs_geometry(mp, &fsgeo, 4); 852 855 if (error) 853 - return -error; 856 + return error; 854 857 855 858 if (copy_to_user(arg, &fsgeo, sizeof(fsgeo))) 856 - return -XFS_ERROR(EFAULT); 859 + return -EFAULT; 857 860 return 0; 858 861 } 859 862 ··· 1038 1041 trace_xfs_ioctl_setattr(ip); 1039 1042 1040 1043 if (mp->m_flags & XFS_MOUNT_RDONLY) 1041 - return XFS_ERROR(EROFS); 1044 + return -EROFS; 1042 1045 if (XFS_FORCED_SHUTDOWN(mp)) 1043 - return XFS_ERROR(EIO); 1046 + return -EIO; 1044 1047 1045 1048 /* 1046 1049 * Disallow 32bit project ids when projid32bit feature is not enabled. 1047 1050 */ 1048 1051 if ((mask & FSX_PROJID) && (fa->fsx_projid > (__uint16_t)-1) && 1049 1052 !xfs_sb_version_hasprojid32bit(&ip->i_mount->m_sb)) 1050 - return XFS_ERROR(EINVAL); 1053 + return -EINVAL; 1051 1054 1052 1055 /* 1053 1056 * If disk quotas is on, we make sure that the dquots do exist on disk, ··· 1085 1088 * CAP_FSETID capability is applicable. 1086 1089 */ 1087 1090 if (!inode_owner_or_capable(VFS_I(ip))) { 1088 - code = XFS_ERROR(EPERM); 1091 + code = -EPERM; 1089 1092 goto error_return; 1090 1093 } 1091 1094 ··· 1096 1099 */ 1097 1100 if (mask & FSX_PROJID) { 1098 1101 if (current_user_ns() != &init_user_ns) { 1099 - code = XFS_ERROR(EINVAL); 1102 + code = -EINVAL; 1100 1103 goto error_return; 1101 1104 } 1102 1105 ··· 1119 1122 if (ip->i_d.di_nextents && 1120 1123 ((ip->i_d.di_extsize << mp->m_sb.sb_blocklog) != 1121 1124 fa->fsx_extsize)) { 1122 - code = XFS_ERROR(EINVAL); /* EFBIG? */ 1125 + code = -EINVAL; /* EFBIG? */ 1123 1126 goto error_return; 1124 1127 } 1125 1128 ··· 1138 1141 1139 1142 extsize_fsb = XFS_B_TO_FSB(mp, fa->fsx_extsize); 1140 1143 if (extsize_fsb > MAXEXTLEN) { 1141 - code = XFS_ERROR(EINVAL); 1144 + code = -EINVAL; 1142 1145 goto error_return; 1143 1146 } 1144 1147 ··· 1150 1153 } else { 1151 1154 size = mp->m_sb.sb_blocksize; 1152 1155 if (extsize_fsb > mp->m_sb.sb_agblocks / 2) { 1153 - code = XFS_ERROR(EINVAL); 1156 + code = -EINVAL; 1154 1157 goto error_return; 1155 1158 } 1156 1159 } 1157 1160 1158 1161 if (fa->fsx_extsize % size) { 1159 - code = XFS_ERROR(EINVAL); 1162 + code = -EINVAL; 1160 1163 goto error_return; 1161 1164 } 1162 1165 } ··· 1170 1173 if ((ip->i_d.di_nextents || ip->i_delayed_blks) && 1171 1174 (XFS_IS_REALTIME_INODE(ip)) != 1172 1175 (fa->fsx_xflags & XFS_XFLAG_REALTIME)) { 1173 - code = XFS_ERROR(EINVAL); /* EFBIG? */ 1176 + code = -EINVAL; /* EFBIG? */ 1174 1177 goto error_return; 1175 1178 } 1176 1179 ··· 1181 1184 if ((mp->m_sb.sb_rblocks == 0) || 1182 1185 (mp->m_sb.sb_rextsize == 0) || 1183 1186 (ip->i_d.di_extsize % mp->m_sb.sb_rextsize)) { 1184 - code = XFS_ERROR(EINVAL); 1187 + code = -EINVAL; 1185 1188 goto error_return; 1186 1189 } 1187 1190 } ··· 1195 1198 (fa->fsx_xflags & 1196 1199 (XFS_XFLAG_IMMUTABLE | XFS_XFLAG_APPEND))) && 1197 1200 !capable(CAP_LINUX_IMMUTABLE)) { 1198 - code = XFS_ERROR(EPERM); 1201 + code = -EPERM; 1199 1202 goto error_return; 1200 1203 } 1201 1204 } ··· 1298 1301 return error; 1299 1302 error = xfs_ioctl_setattr(ip, &fa, mask); 1300 1303 mnt_drop_write_file(filp); 1301 - return -error; 1304 + return error; 1302 1305 } 1303 1306 1304 1307 STATIC int ··· 1343 1346 return error; 1344 1347 error = xfs_ioctl_setattr(ip, &fa, mask); 1345 1348 mnt_drop_write_file(filp); 1346 - return -error; 1349 + return error; 1347 1350 } 1348 1351 1349 1352 STATIC int ··· 1353 1356 1354 1357 /* copy only getbmap portion (not getbmapx) */ 1355 1358 if (copy_to_user(base, bmv, sizeof(struct getbmap))) 1356 - return XFS_ERROR(EFAULT); 1359 + return -EFAULT; 1357 1360 1358 1361 *ap += sizeof(struct getbmap); 1359 1362 return 0; ··· 1370 1373 int error; 1371 1374 1372 1375 if (copy_from_user(&bmx, arg, sizeof(struct getbmapx))) 1373 - return -XFS_ERROR(EFAULT); 1376 + return -EFAULT; 1374 1377 1375 1378 if (bmx.bmv_count < 2) 1376 - return -XFS_ERROR(EINVAL); 1379 + return -EINVAL; 1377 1380 1378 1381 bmx.bmv_iflags = (cmd == XFS_IOC_GETBMAPA ? BMV_IF_ATTRFORK : 0); 1379 - if (ioflags & IO_INVIS) 1382 + if (ioflags & XFS_IO_INVIS) 1380 1383 bmx.bmv_iflags |= BMV_IF_NO_DMAPI_READ; 1381 1384 1382 1385 error = xfs_getbmap(ip, &bmx, xfs_getbmap_format, 1383 1386 (struct getbmap *)arg+1); 1384 1387 if (error) 1385 - return -error; 1388 + return error; 1386 1389 1387 1390 /* copy back header - only size of getbmap */ 1388 1391 if (copy_to_user(arg, &bmx, sizeof(struct getbmap))) 1389 - return -XFS_ERROR(EFAULT); 1392 + return -EFAULT; 1390 1393 return 0; 1391 1394 } 1392 1395 ··· 1396 1399 struct getbmapx __user *base = *ap; 1397 1400 1398 1401 if (copy_to_user(base, bmv, sizeof(struct getbmapx))) 1399 - return XFS_ERROR(EFAULT); 1402 + return -EFAULT; 1400 1403 1401 1404 *ap += sizeof(struct getbmapx); 1402 1405 return 0; ··· 1411 1414 int error; 1412 1415 1413 1416 if (copy_from_user(&bmx, arg, sizeof(bmx))) 1414 - return -XFS_ERROR(EFAULT); 1417 + return -EFAULT; 1415 1418 1416 1419 if (bmx.bmv_count < 2) 1417 - return -XFS_ERROR(EINVAL); 1420 + return -EINVAL; 1418 1421 1419 1422 if (bmx.bmv_iflags & (~BMV_IF_VALID)) 1420 - return -XFS_ERROR(EINVAL); 1423 + return -EINVAL; 1421 1424 1422 1425 error = xfs_getbmap(ip, &bmx, xfs_getbmapx_format, 1423 1426 (struct getbmapx *)arg+1); 1424 1427 if (error) 1425 - return -error; 1428 + return error; 1426 1429 1427 1430 /* copy back header */ 1428 1431 if (copy_to_user(arg, &bmx, sizeof(struct getbmapx))) 1429 - return -XFS_ERROR(EFAULT); 1432 + return -EFAULT; 1430 1433 1431 1434 return 0; 1432 1435 } ··· 1442 1445 /* Pull information for the target fd */ 1443 1446 f = fdget((int)sxp->sx_fdtarget); 1444 1447 if (!f.file) { 1445 - error = XFS_ERROR(EINVAL); 1448 + error = -EINVAL; 1446 1449 goto out; 1447 1450 } 1448 1451 1449 1452 if (!(f.file->f_mode & FMODE_WRITE) || 1450 1453 !(f.file->f_mode & FMODE_READ) || 1451 1454 (f.file->f_flags & O_APPEND)) { 1452 - error = XFS_ERROR(EBADF); 1455 + error = -EBADF; 1453 1456 goto out_put_file; 1454 1457 } 1455 1458 1456 1459 tmp = fdget((int)sxp->sx_fdtmp); 1457 1460 if (!tmp.file) { 1458 - error = XFS_ERROR(EINVAL); 1461 + error = -EINVAL; 1459 1462 goto out_put_file; 1460 1463 } 1461 1464 1462 1465 if (!(tmp.file->f_mode & FMODE_WRITE) || 1463 1466 !(tmp.file->f_mode & FMODE_READ) || 1464 1467 (tmp.file->f_flags & O_APPEND)) { 1465 - error = XFS_ERROR(EBADF); 1468 + error = -EBADF; 1466 1469 goto out_put_tmp_file; 1467 1470 } 1468 1471 1469 1472 if (IS_SWAPFILE(file_inode(f.file)) || 1470 1473 IS_SWAPFILE(file_inode(tmp.file))) { 1471 - error = XFS_ERROR(EINVAL); 1474 + error = -EINVAL; 1472 1475 goto out_put_tmp_file; 1473 1476 } 1474 1477 ··· 1476 1479 tip = XFS_I(file_inode(tmp.file)); 1477 1480 1478 1481 if (ip->i_mount != tip->i_mount) { 1479 - error = XFS_ERROR(EINVAL); 1482 + error = -EINVAL; 1480 1483 goto out_put_tmp_file; 1481 1484 } 1482 1485 1483 1486 if (ip->i_ino == tip->i_ino) { 1484 - error = XFS_ERROR(EINVAL); 1487 + error = -EINVAL; 1485 1488 goto out_put_tmp_file; 1486 1489 } 1487 1490 1488 1491 if (XFS_FORCED_SHUTDOWN(ip->i_mount)) { 1489 - error = XFS_ERROR(EIO); 1492 + error = -EIO; 1490 1493 goto out_put_tmp_file; 1491 1494 } 1492 1495 ··· 1520 1523 int error; 1521 1524 1522 1525 if (filp->f_mode & FMODE_NOCMTIME) 1523 - ioflags |= IO_INVIS; 1526 + ioflags |= XFS_IO_INVIS; 1524 1527 1525 1528 trace_xfs_file_ioctl(ip); 1526 1529 ··· 1539 1542 xfs_flock64_t bf; 1540 1543 1541 1544 if (copy_from_user(&bf, arg, sizeof(bf))) 1542 - return -XFS_ERROR(EFAULT); 1545 + return -EFAULT; 1543 1546 return xfs_ioc_space(ip, inode, filp, ioflags, cmd, &bf); 1544 1547 } 1545 1548 case XFS_IOC_DIOINFO: { ··· 1552 1555 da.d_maxiosz = INT_MAX & ~(da.d_miniosz - 1); 1553 1556 1554 1557 if (copy_to_user(arg, &da, sizeof(da))) 1555 - return -XFS_ERROR(EFAULT); 1558 + return -EFAULT; 1556 1559 return 0; 1557 1560 } 1558 1561 ··· 1585 1588 struct fsdmidata dmi; 1586 1589 1587 1590 if (copy_from_user(&dmi, arg, sizeof(dmi))) 1588 - return -XFS_ERROR(EFAULT); 1591 + return -EFAULT; 1589 1592 1590 1593 error = mnt_want_write_file(filp); 1591 1594 if (error) ··· 1594 1597 error = xfs_set_dmattrs(ip, dmi.fsd_dmevmask, 1595 1598 dmi.fsd_dmstate); 1596 1599 mnt_drop_write_file(filp); 1597 - return -error; 1600 + return error; 1598 1601 } 1599 1602 1600 1603 case XFS_IOC_GETBMAP: ··· 1610 1613 xfs_fsop_handlereq_t hreq; 1611 1614 1612 1615 if (copy_from_user(&hreq, arg, sizeof(hreq))) 1613 - return -XFS_ERROR(EFAULT); 1616 + return -EFAULT; 1614 1617 return xfs_find_handle(cmd, &hreq); 1615 1618 } 1616 1619 case XFS_IOC_OPEN_BY_HANDLE: { 1617 1620 xfs_fsop_handlereq_t hreq; 1618 1621 1619 1622 if (copy_from_user(&hreq, arg, sizeof(xfs_fsop_handlereq_t))) 1620 - return -XFS_ERROR(EFAULT); 1623 + return -EFAULT; 1621 1624 return xfs_open_by_handle(filp, &hreq); 1622 1625 } 1623 1626 case XFS_IOC_FSSETDM_BY_HANDLE: ··· 1627 1630 xfs_fsop_handlereq_t hreq; 1628 1631 1629 1632 if (copy_from_user(&hreq, arg, sizeof(xfs_fsop_handlereq_t))) 1630 - return -XFS_ERROR(EFAULT); 1633 + return -EFAULT; 1631 1634 return xfs_readlink_by_handle(filp, &hreq); 1632 1635 } 1633 1636 case XFS_IOC_ATTRLIST_BY_HANDLE: ··· 1640 1643 struct xfs_swapext sxp; 1641 1644 1642 1645 if (copy_from_user(&sxp, arg, sizeof(xfs_swapext_t))) 1643 - return -XFS_ERROR(EFAULT); 1646 + return -EFAULT; 1644 1647 error = mnt_want_write_file(filp); 1645 1648 if (error) 1646 1649 return error; 1647 1650 error = xfs_ioc_swapext(&sxp); 1648 1651 mnt_drop_write_file(filp); 1649 - return -error; 1652 + return error; 1650 1653 } 1651 1654 1652 1655 case XFS_IOC_FSCOUNTS: { ··· 1654 1657 1655 1658 error = xfs_fs_counts(mp, &out); 1656 1659 if (error) 1657 - return -error; 1660 + return error; 1658 1661 1659 1662 if (copy_to_user(arg, &out, sizeof(out))) 1660 - return -XFS_ERROR(EFAULT); 1663 + return -EFAULT; 1661 1664 return 0; 1662 1665 } 1663 1666 ··· 1669 1672 return -EPERM; 1670 1673 1671 1674 if (mp->m_flags & XFS_MOUNT_RDONLY) 1672 - return -XFS_ERROR(EROFS); 1675 + return -EROFS; 1673 1676 1674 1677 if (copy_from_user(&inout, arg, sizeof(inout))) 1675 - return -XFS_ERROR(EFAULT); 1678 + return -EFAULT; 1676 1679 1677 1680 error = mnt_want_write_file(filp); 1678 1681 if (error) ··· 1683 1686 error = xfs_reserve_blocks(mp, &in, &inout); 1684 1687 mnt_drop_write_file(filp); 1685 1688 if (error) 1686 - return -error; 1689 + return error; 1687 1690 1688 1691 if (copy_to_user(arg, &inout, sizeof(inout))) 1689 - return -XFS_ERROR(EFAULT); 1692 + return -EFAULT; 1690 1693 return 0; 1691 1694 } 1692 1695 ··· 1698 1701 1699 1702 error = xfs_reserve_blocks(mp, NULL, &out); 1700 1703 if (error) 1701 - return -error; 1704 + return error; 1702 1705 1703 1706 if (copy_to_user(arg, &out, sizeof(out))) 1704 - return -XFS_ERROR(EFAULT); 1707 + return -EFAULT; 1705 1708 1706 1709 return 0; 1707 1710 } ··· 1710 1713 xfs_growfs_data_t in; 1711 1714 1712 1715 if (copy_from_user(&in, arg, sizeof(in))) 1713 - return -XFS_ERROR(EFAULT); 1716 + return -EFAULT; 1714 1717 1715 1718 error = mnt_want_write_file(filp); 1716 1719 if (error) 1717 1720 return error; 1718 1721 error = xfs_growfs_data(mp, &in); 1719 1722 mnt_drop_write_file(filp); 1720 - return -error; 1723 + return error; 1721 1724 } 1722 1725 1723 1726 case XFS_IOC_FSGROWFSLOG: { 1724 1727 xfs_growfs_log_t in; 1725 1728 1726 1729 if (copy_from_user(&in, arg, sizeof(in))) 1727 - return -XFS_ERROR(EFAULT); 1730 + return -EFAULT; 1728 1731 1729 1732 error = mnt_want_write_file(filp); 1730 1733 if (error) 1731 1734 return error; 1732 1735 error = xfs_growfs_log(mp, &in); 1733 1736 mnt_drop_write_file(filp); 1734 - return -error; 1737 + return error; 1735 1738 } 1736 1739 1737 1740 case XFS_IOC_FSGROWFSRT: { 1738 1741 xfs_growfs_rt_t in; 1739 1742 1740 1743 if (copy_from_user(&in, arg, sizeof(in))) 1741 - return -XFS_ERROR(EFAULT); 1744 + return -EFAULT; 1742 1745 1743 1746 error = mnt_want_write_file(filp); 1744 1747 if (error) 1745 1748 return error; 1746 1749 error = xfs_growfs_rt(mp, &in); 1747 1750 mnt_drop_write_file(filp); 1748 - return -error; 1751 + return error; 1749 1752 } 1750 1753 1751 1754 case XFS_IOC_GOINGDOWN: { ··· 1755 1758 return -EPERM; 1756 1759 1757 1760 if (get_user(in, (__uint32_t __user *)arg)) 1758 - return -XFS_ERROR(EFAULT); 1761 + return -EFAULT; 1759 1762 1760 - error = xfs_fs_goingdown(mp, in); 1761 - return -error; 1763 + return xfs_fs_goingdown(mp, in); 1762 1764 } 1763 1765 1764 1766 case XFS_IOC_ERROR_INJECTION: { ··· 1767 1771 return -EPERM; 1768 1772 1769 1773 if (copy_from_user(&in, arg, sizeof(in))) 1770 - return -XFS_ERROR(EFAULT); 1774 + return -EFAULT; 1771 1775 1772 - error = xfs_errortag_add(in.errtag, mp); 1773 - return -error; 1776 + return xfs_errortag_add(in.errtag, mp); 1774 1777 } 1775 1778 1776 1779 case XFS_IOC_ERROR_CLEARALL: 1777 1780 if (!capable(CAP_SYS_ADMIN)) 1778 1781 return -EPERM; 1779 1782 1780 - error = xfs_errortag_clearall(mp, 1); 1781 - return -error; 1783 + return xfs_errortag_clearall(mp, 1); 1782 1784 1783 1785 case XFS_IOC_FREE_EOFBLOCKS: { 1784 1786 struct xfs_fs_eofblocks eofb; ··· 1786 1792 return -EPERM; 1787 1793 1788 1794 if (mp->m_flags & XFS_MOUNT_RDONLY) 1789 - return -XFS_ERROR(EROFS); 1795 + return -EROFS; 1790 1796 1791 1797 if (copy_from_user(&eofb, arg, sizeof(eofb))) 1792 - return -XFS_ERROR(EFAULT); 1798 + return -EFAULT; 1793 1799 1794 1800 error = xfs_fs_eofblocks_from_user(&eofb, &keofb); 1795 1801 if (error) 1796 - return -error; 1802 + return error; 1797 1803 1798 - return -xfs_icache_free_eofblocks(mp, &keofb); 1804 + return xfs_icache_free_eofblocks(mp, &keofb); 1799 1805 } 1800 1806 1801 1807 default:
+55 -56
fs/xfs/xfs_ioctl32.c
··· 28 28 #include "xfs_sb.h" 29 29 #include "xfs_ag.h" 30 30 #include "xfs_mount.h" 31 - #include "xfs_vnode.h" 32 31 #include "xfs_inode.h" 33 32 #include "xfs_itable.h" 34 33 #include "xfs_error.h" ··· 55 56 get_user(bf->l_sysid, &arg32->l_sysid) || 56 57 get_user(bf->l_pid, &arg32->l_pid) || 57 58 copy_from_user(bf->l_pad, &arg32->l_pad, 4*sizeof(u32))) 58 - return -XFS_ERROR(EFAULT); 59 + return -EFAULT; 59 60 return 0; 60 61 } 61 62 ··· 69 70 70 71 error = xfs_fs_geometry(mp, &fsgeo, 3); 71 72 if (error) 72 - return -error; 73 + return error; 73 74 /* The 32-bit variant simply has some padding at the end */ 74 75 if (copy_to_user(arg32, &fsgeo, sizeof(struct compat_xfs_fsop_geom_v1))) 75 - return -XFS_ERROR(EFAULT); 76 + return -EFAULT; 76 77 return 0; 77 78 } 78 79 ··· 83 84 { 84 85 if (get_user(in->newblocks, &arg32->newblocks) || 85 86 get_user(in->imaxpct, &arg32->imaxpct)) 86 - return -XFS_ERROR(EFAULT); 87 + return -EFAULT; 87 88 return 0; 88 89 } 89 90 ··· 94 95 { 95 96 if (get_user(in->newblocks, &arg32->newblocks) || 96 97 get_user(in->extsize, &arg32->extsize)) 97 - return -XFS_ERROR(EFAULT); 98 + return -EFAULT; 98 99 return 0; 99 100 } 100 101 101 102 STATIC int 102 103 xfs_inumbers_fmt_compat( 103 104 void __user *ubuffer, 104 - const xfs_inogrp_t *buffer, 105 + const struct xfs_inogrp *buffer, 105 106 long count, 106 107 long *written) 107 108 { ··· 112 113 if (put_user(buffer[i].xi_startino, &p32[i].xi_startino) || 113 114 put_user(buffer[i].xi_alloccount, &p32[i].xi_alloccount) || 114 115 put_user(buffer[i].xi_allocmask, &p32[i].xi_allocmask)) 115 - return -XFS_ERROR(EFAULT); 116 + return -EFAULT; 116 117 } 117 118 *written = count * sizeof(*p32); 118 119 return 0; ··· 131 132 132 133 if (get_user(sec32, &bstime32->tv_sec) || 133 134 get_user(bstime->tv_nsec, &bstime32->tv_nsec)) 134 - return -XFS_ERROR(EFAULT); 135 + return -EFAULT; 135 136 bstime->tv_sec = sec32; 136 137 return 0; 137 138 } ··· 163 164 get_user(bstat->bs_dmevmask, &bstat32->bs_dmevmask) || 164 165 get_user(bstat->bs_dmstate, &bstat32->bs_dmstate) || 165 166 get_user(bstat->bs_aextents, &bstat32->bs_aextents)) 166 - return -XFS_ERROR(EFAULT); 167 + return -EFAULT; 167 168 return 0; 168 169 } 169 170 ··· 179 180 sec32 = p->tv_sec; 180 181 if (put_user(sec32, &p32->tv_sec) || 181 182 put_user(p->tv_nsec, &p32->tv_nsec)) 182 - return -XFS_ERROR(EFAULT); 183 + return -EFAULT; 183 184 return 0; 184 185 } 185 186 ··· 194 195 compat_xfs_bstat_t __user *p32 = ubuffer; 195 196 196 197 if (ubsize < sizeof(*p32)) 197 - return XFS_ERROR(ENOMEM); 198 + return -ENOMEM; 198 199 199 200 if (put_user(buffer->bs_ino, &p32->bs_ino) || 200 201 put_user(buffer->bs_mode, &p32->bs_mode) || ··· 217 218 put_user(buffer->bs_dmevmask, &p32->bs_dmevmask) || 218 219 put_user(buffer->bs_dmstate, &p32->bs_dmstate) || 219 220 put_user(buffer->bs_aextents, &p32->bs_aextents)) 220 - return XFS_ERROR(EFAULT); 221 + return -EFAULT; 221 222 if (ubused) 222 223 *ubused = sizeof(*p32); 223 224 return 0; ··· 255 256 /* should be called again (unused here, but used in dmapi) */ 256 257 257 258 if (!capable(CAP_SYS_ADMIN)) 258 - return -XFS_ERROR(EPERM); 259 + return -EPERM; 259 260 260 261 if (XFS_FORCED_SHUTDOWN(mp)) 261 - return -XFS_ERROR(EIO); 262 + return -EIO; 262 263 263 264 if (get_user(addr, &p32->lastip)) 264 - return -XFS_ERROR(EFAULT); 265 + return -EFAULT; 265 266 bulkreq.lastip = compat_ptr(addr); 266 267 if (get_user(bulkreq.icount, &p32->icount) || 267 268 get_user(addr, &p32->ubuffer)) 268 - return -XFS_ERROR(EFAULT); 269 + return -EFAULT; 269 270 bulkreq.ubuffer = compat_ptr(addr); 270 271 if (get_user(addr, &p32->ocount)) 271 - return -XFS_ERROR(EFAULT); 272 + return -EFAULT; 272 273 bulkreq.ocount = compat_ptr(addr); 273 274 274 275 if (copy_from_user(&inlast, bulkreq.lastip, sizeof(__s64))) 275 - return -XFS_ERROR(EFAULT); 276 + return -EFAULT; 276 277 277 278 if ((count = bulkreq.icount) <= 0) 278 - return -XFS_ERROR(EINVAL); 279 + return -EINVAL; 279 280 280 281 if (bulkreq.ubuffer == NULL) 281 - return -XFS_ERROR(EINVAL); 282 + return -EINVAL; 282 283 283 284 if (cmd == XFS_IOC_FSINUMBERS_32) { 284 285 error = xfs_inumbers(mp, &inlast, &count, ··· 293 294 xfs_bulkstat_one_compat, sizeof(compat_xfs_bstat_t), 294 295 bulkreq.ubuffer, &done); 295 296 } else 296 - error = XFS_ERROR(EINVAL); 297 + error = -EINVAL; 297 298 if (error) 298 - return -error; 299 + return error; 299 300 300 301 if (bulkreq.ocount != NULL) { 301 302 if (copy_to_user(bulkreq.lastip, &inlast, 302 303 sizeof(xfs_ino_t))) 303 - return -XFS_ERROR(EFAULT); 304 + return -EFAULT; 304 305 305 306 if (copy_to_user(bulkreq.ocount, &count, sizeof(count))) 306 - return -XFS_ERROR(EFAULT); 307 + return -EFAULT; 307 308 } 308 309 309 310 return 0; ··· 317 318 compat_xfs_fsop_handlereq_t hreq32; 318 319 319 320 if (copy_from_user(&hreq32, arg32, sizeof(compat_xfs_fsop_handlereq_t))) 320 - return -XFS_ERROR(EFAULT); 321 + return -EFAULT; 321 322 322 323 hreq->fd = hreq32.fd; 323 324 hreq->path = compat_ptr(hreq32.path); ··· 351 352 char *kbuf; 352 353 353 354 if (!capable(CAP_SYS_ADMIN)) 354 - return -XFS_ERROR(EPERM); 355 + return -EPERM; 355 356 if (copy_from_user(&al_hreq, arg, 356 357 sizeof(compat_xfs_fsop_attrlist_handlereq_t))) 357 - return -XFS_ERROR(EFAULT); 358 + return -EFAULT; 358 359 if (al_hreq.buflen < sizeof(struct attrlist) || 359 360 al_hreq.buflen > XATTR_LIST_MAX) 360 - return -XFS_ERROR(EINVAL); 361 + return -EINVAL; 361 362 362 363 /* 363 364 * Reject flags, only allow namespaces. 364 365 */ 365 366 if (al_hreq.flags & ~(ATTR_ROOT | ATTR_SECURE)) 366 - return -XFS_ERROR(EINVAL); 367 + return -EINVAL; 367 368 368 369 dentry = xfs_compat_handlereq_to_dentry(parfilp, &al_hreq.hreq); 369 370 if (IS_ERR(dentry)) ··· 375 376 goto out_dput; 376 377 377 378 cursor = (attrlist_cursor_kern_t *)&al_hreq.pos; 378 - error = -xfs_attr_list(XFS_I(dentry->d_inode), kbuf, al_hreq.buflen, 379 + error = xfs_attr_list(XFS_I(dentry->d_inode), kbuf, al_hreq.buflen, 379 380 al_hreq.flags, cursor); 380 381 if (error) 381 382 goto out_kfree; ··· 403 404 unsigned char *attr_name; 404 405 405 406 if (!capable(CAP_SYS_ADMIN)) 406 - return -XFS_ERROR(EPERM); 407 + return -EPERM; 407 408 if (copy_from_user(&am_hreq, arg, 408 409 sizeof(compat_xfs_fsop_attrmulti_handlereq_t))) 409 - return -XFS_ERROR(EFAULT); 410 + return -EFAULT; 410 411 411 412 /* overflow check */ 412 413 if (am_hreq.opcount >= INT_MAX / sizeof(compat_xfs_attr_multiop_t)) ··· 416 417 if (IS_ERR(dentry)) 417 418 return PTR_ERR(dentry); 418 419 419 - error = E2BIG; 420 + error = -E2BIG; 420 421 size = am_hreq.opcount * sizeof(compat_xfs_attr_multiop_t); 421 422 if (!size || size > 16 * PAGE_SIZE) 422 423 goto out_dput; ··· 427 428 goto out_dput; 428 429 } 429 430 430 - error = ENOMEM; 431 + error = -ENOMEM; 431 432 attr_name = kmalloc(MAXNAMELEN, GFP_KERNEL); 432 433 if (!attr_name) 433 434 goto out_kfree_ops; ··· 438 439 compat_ptr(ops[i].am_attrname), 439 440 MAXNAMELEN); 440 441 if (ops[i].am_error == 0 || ops[i].am_error == MAXNAMELEN) 441 - error = ERANGE; 442 + error = -ERANGE; 442 443 if (ops[i].am_error < 0) 443 444 break; 444 445 ··· 469 470 mnt_drop_write_file(parfilp); 470 471 break; 471 472 default: 472 - ops[i].am_error = EINVAL; 473 + ops[i].am_error = -EINVAL; 473 474 } 474 475 } 475 476 476 477 if (copy_to_user(compat_ptr(am_hreq.ops), ops, size)) 477 - error = XFS_ERROR(EFAULT); 478 + error = -EFAULT; 478 479 479 480 kfree(attr_name); 480 481 out_kfree_ops: 481 482 kfree(ops); 482 483 out_dput: 483 484 dput(dentry); 484 - return -error; 485 + return error; 485 486 } 486 487 487 488 STATIC int ··· 495 496 struct dentry *dentry; 496 497 497 498 if (!capable(CAP_MKNOD)) 498 - return -XFS_ERROR(EPERM); 499 + return -EPERM; 499 500 if (copy_from_user(&dmhreq, arg, 500 501 sizeof(compat_xfs_fsop_setdm_handlereq_t))) 501 - return -XFS_ERROR(EFAULT); 502 + return -EFAULT; 502 503 503 504 dentry = xfs_compat_handlereq_to_dentry(parfilp, &dmhreq.hreq); 504 505 if (IS_ERR(dentry)) 505 506 return PTR_ERR(dentry); 506 507 507 508 if (IS_IMMUTABLE(dentry->d_inode) || IS_APPEND(dentry->d_inode)) { 508 - error = -XFS_ERROR(EPERM); 509 + error = -EPERM; 509 510 goto out; 510 511 } 511 512 512 513 if (copy_from_user(&fsd, compat_ptr(dmhreq.data), sizeof(fsd))) { 513 - error = -XFS_ERROR(EFAULT); 514 + error = -EFAULT; 514 515 goto out; 515 516 } 516 517 517 - error = -xfs_set_dmattrs(XFS_I(dentry->d_inode), fsd.fsd_dmevmask, 518 + error = xfs_set_dmattrs(XFS_I(dentry->d_inode), fsd.fsd_dmevmask, 518 519 fsd.fsd_dmstate); 519 520 520 521 out: ··· 536 537 int error; 537 538 538 539 if (filp->f_mode & FMODE_NOCMTIME) 539 - ioflags |= IO_INVIS; 540 + ioflags |= XFS_IO_INVIS; 540 541 541 542 trace_xfs_file_compat_ioctl(ip); 542 543 ··· 587 588 struct xfs_flock64 bf; 588 589 589 590 if (xfs_compat_flock64_copyin(&bf, arg)) 590 - return -XFS_ERROR(EFAULT); 591 + return -EFAULT; 591 592 cmd = _NATIVE_IOC(cmd, struct xfs_flock64); 592 593 return xfs_ioc_space(ip, inode, filp, ioflags, cmd, &bf); 593 594 } ··· 597 598 struct xfs_growfs_data in; 598 599 599 600 if (xfs_compat_growfs_data_copyin(&in, arg)) 600 - return -XFS_ERROR(EFAULT); 601 + return -EFAULT; 601 602 error = mnt_want_write_file(filp); 602 603 if (error) 603 604 return error; 604 605 error = xfs_growfs_data(mp, &in); 605 606 mnt_drop_write_file(filp); 606 - return -error; 607 + return error; 607 608 } 608 609 case XFS_IOC_FSGROWFSRT_32: { 609 610 struct xfs_growfs_rt in; 610 611 611 612 if (xfs_compat_growfs_rt_copyin(&in, arg)) 612 - return -XFS_ERROR(EFAULT); 613 + return -EFAULT; 613 614 error = mnt_want_write_file(filp); 614 615 if (error) 615 616 return error; 616 617 error = xfs_growfs_rt(mp, &in); 617 618 mnt_drop_write_file(filp); 618 - return -error; 619 + return error; 619 620 } 620 621 #endif 621 622 /* long changes size, but xfs only copiese out 32 bits */ ··· 632 633 if (copy_from_user(&sxp, sxu, 633 634 offsetof(struct xfs_swapext, sx_stat)) || 634 635 xfs_ioctl32_bstat_copyin(&sxp.sx_stat, &sxu->sx_stat)) 635 - return -XFS_ERROR(EFAULT); 636 + return -EFAULT; 636 637 error = mnt_want_write_file(filp); 637 638 if (error) 638 639 return error; 639 640 error = xfs_ioc_swapext(&sxp); 640 641 mnt_drop_write_file(filp); 641 - return -error; 642 + return error; 642 643 } 643 644 case XFS_IOC_FSBULKSTAT_32: 644 645 case XFS_IOC_FSBULKSTAT_SINGLE_32: ··· 650 651 struct xfs_fsop_handlereq hreq; 651 652 652 653 if (xfs_compat_handlereq_copyin(&hreq, arg)) 653 - return -XFS_ERROR(EFAULT); 654 + return -EFAULT; 654 655 cmd = _NATIVE_IOC(cmd, struct xfs_fsop_handlereq); 655 656 return xfs_find_handle(cmd, &hreq); 656 657 } ··· 658 659 struct xfs_fsop_handlereq hreq; 659 660 660 661 if (xfs_compat_handlereq_copyin(&hreq, arg)) 661 - return -XFS_ERROR(EFAULT); 662 + return -EFAULT; 662 663 return xfs_open_by_handle(filp, &hreq); 663 664 } 664 665 case XFS_IOC_READLINK_BY_HANDLE_32: { 665 666 struct xfs_fsop_handlereq hreq; 666 667 667 668 if (xfs_compat_handlereq_copyin(&hreq, arg)) 668 - return -XFS_ERROR(EFAULT); 669 + return -EFAULT; 669 670 return xfs_readlink_by_handle(filp, &hreq); 670 671 } 671 672 case XFS_IOC_ATTRLIST_BY_HANDLE_32: ··· 675 676 case XFS_IOC_FSSETDM_BY_HANDLE_32: 676 677 return xfs_compat_fssetdm_by_handle(filp, arg); 677 678 default: 678 - return -XFS_ERROR(ENOIOCTLCMD); 679 + return -ENOIOCTLCMD; 679 680 } 680 681 }
+31 -23
fs/xfs/xfs_iomap.c
··· 110 110 (unsigned long long)imap->br_startoff, 111 111 (unsigned long long)imap->br_blockcount, 112 112 imap->br_state); 113 - return EFSCORRUPTED; 113 + return -EFSCORRUPTED; 114 114 } 115 115 116 116 int ··· 138 138 139 139 error = xfs_qm_dqattach(ip, 0); 140 140 if (error) 141 - return XFS_ERROR(error); 141 + return error; 142 142 143 143 rt = XFS_IS_REALTIME_INODE(ip); 144 144 extsz = xfs_get_extsz_hint(ip); ··· 148 148 if ((offset + count) > XFS_ISIZE(ip)) { 149 149 error = xfs_iomap_eof_align_last_fsb(mp, ip, extsz, &last_fsb); 150 150 if (error) 151 - return XFS_ERROR(error); 151 + return error; 152 152 } else { 153 153 if (nmaps && (imap->br_startblock == HOLESTARTBLOCK)) 154 154 last_fsb = MIN(last_fsb, (xfs_fileoff_t) ··· 188 188 */ 189 189 if (error) { 190 190 xfs_trans_cancel(tp, 0); 191 - return XFS_ERROR(error); 191 + return error; 192 192 } 193 193 194 194 xfs_ilock(ip, XFS_ILOCK_EXCL); ··· 225 225 * Copy any maps to caller's array and return any error. 226 226 */ 227 227 if (nimaps == 0) { 228 - error = XFS_ERROR(ENOSPC); 228 + error = -ENOSPC; 229 229 goto out_unlock; 230 230 } 231 231 ··· 397 397 struct xfs_inode *ip, 398 398 int type, 399 399 xfs_fsblock_t *qblocks, 400 - int *qshift) 400 + int *qshift, 401 + int64_t *qfreesp) 401 402 { 402 403 int64_t freesp; 403 404 int shift = 0; ··· 407 406 /* over hi wmark, squash the prealloc completely */ 408 407 if (dq->q_res_bcount >= dq->q_prealloc_hi_wmark) { 409 408 *qblocks = 0; 409 + *qfreesp = 0; 410 410 return; 411 411 } 412 412 ··· 419 417 if (freesp < dq->q_low_space[XFS_QLOWSP_1_PCNT]) 420 418 shift += 2; 421 419 } 420 + 421 + if (freesp < *qfreesp) 422 + *qfreesp = freesp; 422 423 423 424 /* only overwrite the throttle values if we are more aggressive */ 424 425 if ((freesp >> shift) < (*qblocks >> *qshift)) { ··· 481 476 } 482 477 483 478 /* 484 - * Check each quota to cap the prealloc size and provide a shift 485 - * value to throttle with. 479 + * Check each quota to cap the prealloc size, provide a shift value to 480 + * throttle with and adjust amount of available space. 486 481 */ 487 482 if (xfs_quota_need_throttle(ip, XFS_DQ_USER, alloc_blocks)) 488 - xfs_quota_calc_throttle(ip, XFS_DQ_USER, &qblocks, &qshift); 483 + xfs_quota_calc_throttle(ip, XFS_DQ_USER, &qblocks, &qshift, 484 + &freesp); 489 485 if (xfs_quota_need_throttle(ip, XFS_DQ_GROUP, alloc_blocks)) 490 - xfs_quota_calc_throttle(ip, XFS_DQ_GROUP, &qblocks, &qshift); 486 + xfs_quota_calc_throttle(ip, XFS_DQ_GROUP, &qblocks, &qshift, 487 + &freesp); 491 488 if (xfs_quota_need_throttle(ip, XFS_DQ_PROJ, alloc_blocks)) 492 - xfs_quota_calc_throttle(ip, XFS_DQ_PROJ, &qblocks, &qshift); 489 + xfs_quota_calc_throttle(ip, XFS_DQ_PROJ, &qblocks, &qshift, 490 + &freesp); 493 491 494 492 /* 495 493 * The final prealloc size is set to the minimum of free space available ··· 560 552 */ 561 553 error = xfs_qm_dqattach_locked(ip, 0); 562 554 if (error) 563 - return XFS_ERROR(error); 555 + return error; 564 556 565 557 extsz = xfs_get_extsz_hint(ip); 566 558 offset_fsb = XFS_B_TO_FSBT(mp, offset); ··· 604 596 imap, &nimaps, XFS_BMAPI_ENTIRE); 605 597 switch (error) { 606 598 case 0: 607 - case ENOSPC: 608 - case EDQUOT: 599 + case -ENOSPC: 600 + case -EDQUOT: 609 601 break; 610 602 default: 611 - return XFS_ERROR(error); 603 + return error; 612 604 } 613 605 614 606 /* ··· 622 614 error = 0; 623 615 goto retry; 624 616 } 625 - return XFS_ERROR(error ? error : ENOSPC); 617 + return error ? error : -ENOSPC; 626 618 } 627 619 628 620 if (!(imap[0].br_startblock || XFS_IS_REALTIME_INODE(ip))) ··· 671 663 */ 672 664 error = xfs_qm_dqattach(ip, 0); 673 665 if (error) 674 - return XFS_ERROR(error); 666 + return error; 675 667 676 668 offset_fsb = XFS_B_TO_FSBT(mp, offset); 677 669 count_fsb = imap->br_blockcount; ··· 698 690 nres, 0); 699 691 if (error) { 700 692 xfs_trans_cancel(tp, 0); 701 - return XFS_ERROR(error); 693 + return error; 702 694 } 703 695 xfs_ilock(ip, XFS_ILOCK_EXCL); 704 696 xfs_trans_ijoin(tp, ip, 0); ··· 747 739 if ((map_start_fsb + count_fsb) > last_block) { 748 740 count_fsb = last_block - map_start_fsb; 749 741 if (count_fsb == 0) { 750 - error = EAGAIN; 742 + error = -EAGAIN; 751 743 goto trans_cancel; 752 744 } 753 745 } ··· 801 793 xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT); 802 794 error0: 803 795 xfs_iunlock(ip, XFS_ILOCK_EXCL); 804 - return XFS_ERROR(error); 796 + return error; 805 797 } 806 798 807 799 int ··· 861 853 resblks, 0); 862 854 if (error) { 863 855 xfs_trans_cancel(tp, 0); 864 - return XFS_ERROR(error); 856 + return error; 865 857 } 866 858 867 859 xfs_ilock(ip, XFS_ILOCK_EXCL); ··· 900 892 error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES); 901 893 xfs_iunlock(ip, XFS_ILOCK_EXCL); 902 894 if (error) 903 - return XFS_ERROR(error); 895 + return error; 904 896 905 897 if (!(imap.br_startblock || XFS_IS_REALTIME_INODE(ip))) 906 898 return xfs_alert_fsblock_zero(ip, &imap); ··· 923 915 xfs_bmap_cancel(&free_list); 924 916 xfs_trans_cancel(tp, (XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT)); 925 917 xfs_iunlock(ip, XFS_ILOCK_EXCL); 926 - return XFS_ERROR(error); 918 + return error; 927 919 }
+36 -36
fs/xfs/xfs_iops.c
··· 72 72 int error = 0; 73 73 74 74 for (xattr = xattr_array; xattr->name != NULL; xattr++) { 75 - error = -xfs_attr_set(ip, xattr->name, xattr->value, 75 + error = xfs_attr_set(ip, xattr->name, xattr->value, 76 76 xattr->value_len, ATTR_SECURE); 77 77 if (error < 0) 78 78 break; ··· 93 93 struct inode *dir, 94 94 const struct qstr *qstr) 95 95 { 96 - return -security_inode_init_security(inode, dir, qstr, 96 + return security_inode_init_security(inode, dir, qstr, 97 97 &xfs_initxattrs, NULL); 98 98 } 99 99 ··· 173 173 174 174 #ifdef CONFIG_XFS_POSIX_ACL 175 175 if (default_acl) { 176 - error = -xfs_set_acl(inode, default_acl, ACL_TYPE_DEFAULT); 176 + error = xfs_set_acl(inode, default_acl, ACL_TYPE_DEFAULT); 177 177 if (error) 178 178 goto out_cleanup_inode; 179 179 } 180 180 if (acl) { 181 - error = -xfs_set_acl(inode, acl, ACL_TYPE_ACCESS); 181 + error = xfs_set_acl(inode, acl, ACL_TYPE_ACCESS); 182 182 if (error) 183 183 goto out_cleanup_inode; 184 184 } ··· 194 194 posix_acl_release(default_acl); 195 195 if (acl) 196 196 posix_acl_release(acl); 197 - return -error; 197 + return error; 198 198 199 199 out_cleanup_inode: 200 200 if (!tmpfile) ··· 248 248 xfs_dentry_to_name(&name, dentry, 0); 249 249 error = xfs_lookup(XFS_I(dir), &name, &cip, NULL); 250 250 if (unlikely(error)) { 251 - if (unlikely(error != ENOENT)) 252 - return ERR_PTR(-error); 251 + if (unlikely(error != -ENOENT)) 252 + return ERR_PTR(error); 253 253 d_add(dentry, NULL); 254 254 return NULL; 255 255 } ··· 275 275 xfs_dentry_to_name(&xname, dentry, 0); 276 276 error = xfs_lookup(XFS_I(dir), &xname, &ip, &ci_name); 277 277 if (unlikely(error)) { 278 - if (unlikely(error != ENOENT)) 279 - return ERR_PTR(-error); 278 + if (unlikely(error != -ENOENT)) 279 + return ERR_PTR(error); 280 280 /* 281 281 * call d_add(dentry, NULL) here when d_drop_negative_children 282 282 * is called in xfs_vn_mknod (ie. allow negative dentries ··· 311 311 312 312 error = xfs_link(XFS_I(dir), XFS_I(inode), &name); 313 313 if (unlikely(error)) 314 - return -error; 314 + return error; 315 315 316 316 ihold(inode); 317 317 d_instantiate(dentry, inode); ··· 328 328 329 329 xfs_dentry_to_name(&name, dentry, 0); 330 330 331 - error = -xfs_remove(XFS_I(dir), &name, XFS_I(dentry->d_inode)); 331 + error = xfs_remove(XFS_I(dir), &name, XFS_I(dentry->d_inode)); 332 332 if (error) 333 333 return error; 334 334 ··· 375 375 xfs_cleanup_inode(dir, inode, dentry); 376 376 iput(inode); 377 377 out: 378 - return -error; 378 + return error; 379 379 } 380 380 381 381 STATIC int ··· 392 392 xfs_dentry_to_name(&oname, odentry, 0); 393 393 xfs_dentry_to_name(&nname, ndentry, odentry->d_inode->i_mode); 394 394 395 - return -xfs_rename(XFS_I(odir), &oname, XFS_I(odentry->d_inode), 396 - XFS_I(ndir), &nname, new_inode ? 395 + return xfs_rename(XFS_I(odir), &oname, XFS_I(odentry->d_inode), 396 + XFS_I(ndir), &nname, new_inode ? 397 397 XFS_I(new_inode) : NULL); 398 398 } 399 399 ··· 414 414 if (!link) 415 415 goto out_err; 416 416 417 - error = -xfs_readlink(XFS_I(dentry->d_inode), link); 417 + error = xfs_readlink(XFS_I(dentry->d_inode), link); 418 418 if (unlikely(error)) 419 419 goto out_kfree; 420 420 ··· 441 441 trace_xfs_getattr(ip); 442 442 443 443 if (XFS_FORCED_SHUTDOWN(mp)) 444 - return -XFS_ERROR(EIO); 444 + return -EIO; 445 445 446 446 stat->size = XFS_ISIZE(ip); 447 447 stat->dev = inode->i_sb->s_dev; ··· 546 546 /* If acls are being inherited, we already have this checked */ 547 547 if (!(flags & XFS_ATTR_NOACL)) { 548 548 if (mp->m_flags & XFS_MOUNT_RDONLY) 549 - return XFS_ERROR(EROFS); 549 + return -EROFS; 550 550 551 551 if (XFS_FORCED_SHUTDOWN(mp)) 552 - return XFS_ERROR(EIO); 552 + return -EIO; 553 553 554 - error = -inode_change_ok(inode, iattr); 554 + error = inode_change_ok(inode, iattr); 555 555 if (error) 556 - return XFS_ERROR(error); 556 + return error; 557 557 } 558 558 559 559 ASSERT((mask & ATTR_SIZE) == 0); ··· 703 703 xfs_qm_dqrele(gdqp); 704 704 705 705 if (error) 706 - return XFS_ERROR(error); 706 + return error; 707 707 708 708 /* 709 709 * XXX(hch): Updating the ACL entries is not atomic vs the i_mode ··· 713 713 * Posix ACL code seems to care about this issue either. 714 714 */ 715 715 if ((mask & ATTR_MODE) && !(flags & XFS_ATTR_NOACL)) { 716 - error = -posix_acl_chmod(inode, inode->i_mode); 716 + error = posix_acl_chmod(inode, inode->i_mode); 717 717 if (error) 718 - return XFS_ERROR(error); 718 + return error; 719 719 } 720 720 721 721 return 0; ··· 748 748 trace_xfs_setattr(ip); 749 749 750 750 if (mp->m_flags & XFS_MOUNT_RDONLY) 751 - return XFS_ERROR(EROFS); 751 + return -EROFS; 752 752 753 753 if (XFS_FORCED_SHUTDOWN(mp)) 754 - return XFS_ERROR(EIO); 754 + return -EIO; 755 755 756 - error = -inode_change_ok(inode, iattr); 756 + error = inode_change_ok(inode, iattr); 757 757 if (error) 758 - return XFS_ERROR(error); 758 + return error; 759 759 760 760 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL)); 761 761 ASSERT(S_ISREG(ip->i_d.di_mode)); ··· 818 818 * care about here. 819 819 */ 820 820 if (oldsize != ip->i_d.di_size && newsize > ip->i_d.di_size) { 821 - error = -filemap_write_and_wait_range(VFS_I(ip)->i_mapping, 821 + error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping, 822 822 ip->i_d.di_size, newsize); 823 823 if (error) 824 824 return error; ··· 844 844 * much we can do about this, except to hope that the caller sees ENOMEM 845 845 * and retries the truncate operation. 846 846 */ 847 - error = -block_truncate_page(inode->i_mapping, newsize, xfs_get_blocks); 847 + error = block_truncate_page(inode->i_mapping, newsize, xfs_get_blocks); 848 848 if (error) 849 849 return error; 850 850 truncate_setsize(inode, newsize); ··· 950 950 error = xfs_setattr_nonsize(ip, iattr, 0); 951 951 } 952 952 953 - return -error; 953 + return error; 954 954 } 955 955 956 956 STATIC int ··· 970 970 error = xfs_trans_reserve(tp, &M_RES(mp)->tr_fsyncts, 0, 0); 971 971 if (error) { 972 972 xfs_trans_cancel(tp, 0); 973 - return -error; 973 + return error; 974 974 } 975 975 976 976 xfs_ilock(ip, XFS_ILOCK_EXCL); ··· 991 991 } 992 992 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); 993 993 xfs_trans_log_inode(tp, ip, XFS_ILOG_TIMESTAMP); 994 - return -xfs_trans_commit(tp, 0); 994 + return xfs_trans_commit(tp, 0); 995 995 } 996 996 997 997 #define XFS_FIEMAP_FLAGS (FIEMAP_FLAG_SYNC|FIEMAP_FLAG_XATTR) ··· 1036 1036 *full = 1; /* user array now full */ 1037 1037 } 1038 1038 1039 - return -error; 1039 + return error; 1040 1040 } 1041 1041 1042 1042 STATIC int ··· 1055 1055 return error; 1056 1056 1057 1057 /* Set up bmap header for xfs internal routine */ 1058 - bm.bmv_offset = BTOBB(start); 1058 + bm.bmv_offset = BTOBBT(start); 1059 1059 /* Special case for whole file */ 1060 1060 if (length == FIEMAP_MAX_OFFSET) 1061 1061 bm.bmv_length = -1LL; 1062 1062 else 1063 - bm.bmv_length = BTOBB(length); 1063 + bm.bmv_length = BTOBB(start + length) - bm.bmv_offset; 1064 1064 1065 1065 /* We add one because in getbmap world count includes the header */ 1066 1066 bm.bmv_count = !fieinfo->fi_extents_max ? MAXEXTNUM : ··· 1075 1075 1076 1076 error = xfs_getbmap(ip, &bm, xfs_fiemap_format, fieinfo); 1077 1077 if (error) 1078 - return -error; 1078 + return error; 1079 1079 1080 1080 return 0; 1081 1081 }
+266 -307
fs/xfs/xfs_itable.c
··· 67 67 *stat = BULKSTAT_RV_NOTHING; 68 68 69 69 if (!buffer || xfs_internal_inum(mp, ino)) 70 - return XFS_ERROR(EINVAL); 70 + return -EINVAL; 71 71 72 72 buf = kmem_alloc(sizeof(*buf), KM_SLEEP | KM_MAYFAIL); 73 73 if (!buf) 74 - return XFS_ERROR(ENOMEM); 74 + return -ENOMEM; 75 75 76 76 error = xfs_iget(mp, NULL, ino, 77 77 (XFS_IGET_DONTCACHE | XFS_IGET_UNTRUSTED), 78 78 XFS_ILOCK_SHARED, &ip); 79 - if (error) { 80 - *stat = BULKSTAT_RV_NOTHING; 79 + if (error) 81 80 goto out_free; 82 - } 83 81 84 82 ASSERT(ip != NULL); 85 83 ASSERT(ip->i_imap.im_blkno != 0); ··· 134 136 IRELE(ip); 135 137 136 138 error = formatter(buffer, ubsize, ubused, buf); 137 - 138 139 if (!error) 139 140 *stat = BULKSTAT_RV_DIDONE; 140 141 ··· 151 154 const xfs_bstat_t *buffer) 152 155 { 153 156 if (ubsize < sizeof(*buffer)) 154 - return XFS_ERROR(ENOMEM); 157 + return -ENOMEM; 155 158 if (copy_to_user(ubuffer, buffer, sizeof(*buffer))) 156 - return XFS_ERROR(EFAULT); 159 + return -EFAULT; 157 160 if (ubused) 158 161 *ubused = sizeof(*buffer); 159 162 return 0; ··· 172 175 xfs_bulkstat_one_fmt, ubused, stat); 173 176 } 174 177 178 + /* 179 + * Loop over all clusters in a chunk for a given incore inode allocation btree 180 + * record. Do a readahead if there are any allocated inodes in that cluster. 181 + */ 182 + STATIC void 183 + xfs_bulkstat_ichunk_ra( 184 + struct xfs_mount *mp, 185 + xfs_agnumber_t agno, 186 + struct xfs_inobt_rec_incore *irec) 187 + { 188 + xfs_agblock_t agbno; 189 + struct blk_plug plug; 190 + int blks_per_cluster; 191 + int inodes_per_cluster; 192 + int i; /* inode chunk index */ 193 + 194 + agbno = XFS_AGINO_TO_AGBNO(mp, irec->ir_startino); 195 + blks_per_cluster = xfs_icluster_size_fsb(mp); 196 + inodes_per_cluster = blks_per_cluster << mp->m_sb.sb_inopblog; 197 + 198 + blk_start_plug(&plug); 199 + for (i = 0; i < XFS_INODES_PER_CHUNK; 200 + i += inodes_per_cluster, agbno += blks_per_cluster) { 201 + if (xfs_inobt_maskn(i, inodes_per_cluster) & ~irec->ir_free) { 202 + xfs_btree_reada_bufs(mp, agno, agbno, blks_per_cluster, 203 + &xfs_inode_buf_ops); 204 + } 205 + } 206 + blk_finish_plug(&plug); 207 + } 208 + 209 + /* 210 + * Lookup the inode chunk that the given inode lives in and then get the record 211 + * if we found the chunk. If the inode was not the last in the chunk and there 212 + * are some left allocated, update the data for the pointed-to record as well as 213 + * return the count of grabbed inodes. 214 + */ 215 + STATIC int 216 + xfs_bulkstat_grab_ichunk( 217 + struct xfs_btree_cur *cur, /* btree cursor */ 218 + xfs_agino_t agino, /* starting inode of chunk */ 219 + int *icount,/* return # of inodes grabbed */ 220 + struct xfs_inobt_rec_incore *irec) /* btree record */ 221 + { 222 + int idx; /* index into inode chunk */ 223 + int stat; 224 + int error = 0; 225 + 226 + /* Lookup the inode chunk that this inode lives in */ 227 + error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_LE, &stat); 228 + if (error) 229 + return error; 230 + if (!stat) { 231 + *icount = 0; 232 + return error; 233 + } 234 + 235 + /* Get the record, should always work */ 236 + error = xfs_inobt_get_rec(cur, irec, &stat); 237 + if (error) 238 + return error; 239 + XFS_WANT_CORRUPTED_RETURN(stat == 1); 240 + 241 + /* Check if the record contains the inode in request */ 242 + if (irec->ir_startino + XFS_INODES_PER_CHUNK <= agino) 243 + return -EINVAL; 244 + 245 + idx = agino - irec->ir_startino + 1; 246 + if (idx < XFS_INODES_PER_CHUNK && 247 + (xfs_inobt_maskn(idx, XFS_INODES_PER_CHUNK - idx) & ~irec->ir_free)) { 248 + int i; 249 + 250 + /* We got a right chunk with some left inodes allocated at it. 251 + * Grab the chunk record. Mark all the uninteresting inodes 252 + * free -- because they're before our start point. 253 + */ 254 + for (i = 0; i < idx; i++) { 255 + if (XFS_INOBT_MASK(i) & ~irec->ir_free) 256 + irec->ir_freecount++; 257 + } 258 + 259 + irec->ir_free |= xfs_inobt_maskn(0, idx); 260 + *icount = XFS_INODES_PER_CHUNK - irec->ir_freecount; 261 + } 262 + 263 + return 0; 264 + } 265 + 175 266 #define XFS_BULKSTAT_UBLEFT(ubleft) ((ubleft) >= statstruct_size) 267 + 268 + /* 269 + * Process inodes in chunk with a pointer to a formatter function 270 + * that will iget the inode and fill in the appropriate structure. 271 + */ 272 + int 273 + xfs_bulkstat_ag_ichunk( 274 + struct xfs_mount *mp, 275 + xfs_agnumber_t agno, 276 + struct xfs_inobt_rec_incore *irbp, 277 + bulkstat_one_pf formatter, 278 + size_t statstruct_size, 279 + struct xfs_bulkstat_agichunk *acp) 280 + { 281 + xfs_ino_t lastino = acp->ac_lastino; 282 + char __user **ubufp = acp->ac_ubuffer; 283 + int ubleft = acp->ac_ubleft; 284 + int ubelem = acp->ac_ubelem; 285 + int chunkidx, clustidx; 286 + int error = 0; 287 + xfs_agino_t agino; 288 + 289 + for (agino = irbp->ir_startino, chunkidx = clustidx = 0; 290 + XFS_BULKSTAT_UBLEFT(ubleft) && 291 + irbp->ir_freecount < XFS_INODES_PER_CHUNK; 292 + chunkidx++, clustidx++, agino++) { 293 + int fmterror; /* bulkstat formatter result */ 294 + int ubused; 295 + xfs_ino_t ino = XFS_AGINO_TO_INO(mp, agno, agino); 296 + 297 + ASSERT(chunkidx < XFS_INODES_PER_CHUNK); 298 + 299 + /* Skip if this inode is free */ 300 + if (XFS_INOBT_MASK(chunkidx) & irbp->ir_free) { 301 + lastino = ino; 302 + continue; 303 + } 304 + 305 + /* 306 + * Count used inodes as free so we can tell when the 307 + * chunk is used up. 308 + */ 309 + irbp->ir_freecount++; 310 + 311 + /* Get the inode and fill in a single buffer */ 312 + ubused = statstruct_size; 313 + error = formatter(mp, ino, *ubufp, ubleft, &ubused, &fmterror); 314 + if (fmterror == BULKSTAT_RV_NOTHING) { 315 + if (error && error != -ENOENT && error != -EINVAL) { 316 + ubleft = 0; 317 + break; 318 + } 319 + lastino = ino; 320 + continue; 321 + } 322 + if (fmterror == BULKSTAT_RV_GIVEUP) { 323 + ubleft = 0; 324 + ASSERT(error); 325 + break; 326 + } 327 + if (*ubufp) 328 + *ubufp += ubused; 329 + ubleft -= ubused; 330 + ubelem++; 331 + lastino = ino; 332 + } 333 + 334 + acp->ac_lastino = lastino; 335 + acp->ac_ubleft = ubleft; 336 + acp->ac_ubelem = ubelem; 337 + 338 + return error; 339 + } 176 340 177 341 /* 178 342 * Return stat information in bulk (by-inode) for the filesystem. ··· 348 190 char __user *ubuffer, /* buffer with inode stats */ 349 191 int *done) /* 1 if there are more stats to get */ 350 192 { 351 - xfs_agblock_t agbno=0;/* allocation group block number */ 352 193 xfs_buf_t *agbp; /* agi header buffer */ 353 194 xfs_agi_t *agi; /* agi header data */ 354 195 xfs_agino_t agino; /* inode # in allocation group */ 355 196 xfs_agnumber_t agno; /* allocation group number */ 356 - int chunkidx; /* current index into inode chunk */ 357 - int clustidx; /* current index into inode cluster */ 358 197 xfs_btree_cur_t *cur; /* btree cursor for ialloc btree */ 359 198 int end_of_ag; /* set if we've seen the ag end */ 360 199 int error; /* error code */ ··· 364 209 xfs_inobt_rec_incore_t *irbuf; /* start of irec buffer */ 365 210 xfs_inobt_rec_incore_t *irbufend; /* end of good irec buffer entries */ 366 211 xfs_ino_t lastino; /* last inode number returned */ 367 - int blks_per_cluster; /* # of blocks per cluster */ 368 - int inodes_per_cluster;/* # of inodes per cluster */ 369 212 int nirbuf; /* size of irbuf */ 370 213 int rval; /* return value error code */ 371 214 int tmp; /* result value from btree calls */ ··· 371 218 int ubleft; /* bytes left in user's buffer */ 372 219 char __user *ubufp; /* pointer into user's buffer */ 373 220 int ubelem; /* spaces used in user's buffer */ 374 - int ubused; /* bytes used by formatter */ 375 221 376 222 /* 377 223 * Get the last inode value, see if there's nothing to do. ··· 385 233 *ubcountp = 0; 386 234 return 0; 387 235 } 388 - if (!ubcountp || *ubcountp <= 0) { 389 - return EINVAL; 390 - } 236 + 391 237 ubcount = *ubcountp; /* statstruct's */ 392 238 ubleft = ubcount * statstruct_size; /* bytes */ 393 239 *ubcountp = ubelem = 0; 394 240 *done = 0; 395 241 fmterror = 0; 396 242 ubufp = ubuffer; 397 - blks_per_cluster = xfs_icluster_size_fsb(mp); 398 - inodes_per_cluster = blks_per_cluster << mp->m_sb.sb_inopblog; 399 243 irbuf = kmem_zalloc_greedy(&irbsize, PAGE_SIZE, PAGE_SIZE * 4); 400 244 if (!irbuf) 401 - return ENOMEM; 245 + return -ENOMEM; 402 246 403 247 nirbuf = irbsize / sizeof(*irbuf); 404 248 ··· 406 258 while (XFS_BULKSTAT_UBLEFT(ubleft) && agno < mp->m_sb.sb_agcount) { 407 259 cond_resched(); 408 260 error = xfs_ialloc_read_agi(mp, NULL, agno, &agbp); 409 - if (error) { 410 - /* 411 - * Skip this allocation group and go to the next one. 412 - */ 413 - agno++; 414 - agino = 0; 415 - continue; 416 - } 261 + if (error) 262 + break; 417 263 agi = XFS_BUF_TO_AGI(agbp); 418 264 /* 419 265 * Allocate and initialize a btree cursor for ialloc btree. ··· 417 275 irbp = irbuf; 418 276 irbufend = irbuf + nirbuf; 419 277 end_of_ag = 0; 420 - /* 421 - * If we're returning in the middle of an allocation group, 422 - * we need to get the remainder of the chunk we're in. 423 - */ 278 + icount = 0; 424 279 if (agino > 0) { 425 - xfs_inobt_rec_incore_t r; 426 - 427 280 /* 428 - * Lookup the inode chunk that this inode lives in. 281 + * In the middle of an allocation group, we need to get 282 + * the remainder of the chunk we're in. 429 283 */ 430 - error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_LE, 431 - &tmp); 432 - if (!error && /* no I/O error */ 433 - tmp && /* lookup succeeded */ 434 - /* got the record, should always work */ 435 - !(error = xfs_inobt_get_rec(cur, &r, &i)) && 436 - i == 1 && 437 - /* this is the right chunk */ 438 - agino < r.ir_startino + XFS_INODES_PER_CHUNK && 439 - /* lastino was not last in chunk */ 440 - (chunkidx = agino - r.ir_startino + 1) < 441 - XFS_INODES_PER_CHUNK && 442 - /* there are some left allocated */ 443 - xfs_inobt_maskn(chunkidx, 444 - XFS_INODES_PER_CHUNK - chunkidx) & 445 - ~r.ir_free) { 446 - /* 447 - * Grab the chunk record. Mark all the 448 - * uninteresting inodes (because they're 449 - * before our start point) free. 450 - */ 451 - for (i = 0; i < chunkidx; i++) { 452 - if (XFS_INOBT_MASK(i) & ~r.ir_free) 453 - r.ir_freecount++; 454 - } 455 - r.ir_free |= xfs_inobt_maskn(0, chunkidx); 284 + struct xfs_inobt_rec_incore r; 285 + 286 + error = xfs_bulkstat_grab_ichunk(cur, agino, &icount, &r); 287 + if (error) 288 + break; 289 + if (icount) { 456 290 irbp->ir_startino = r.ir_startino; 457 291 irbp->ir_freecount = r.ir_freecount; 458 292 irbp->ir_free = r.ir_free; 459 293 irbp++; 460 294 agino = r.ir_startino + XFS_INODES_PER_CHUNK; 461 - icount = XFS_INODES_PER_CHUNK - r.ir_freecount; 462 - } else { 463 - /* 464 - * If any of those tests failed, bump the 465 - * inode number (just in case). 466 - */ 467 - agino++; 468 - icount = 0; 469 295 } 470 - /* 471 - * In any case, increment to the next record. 472 - */ 473 - if (!error) 474 - error = xfs_btree_increment(cur, 0, &tmp); 296 + /* Increment to the next record */ 297 + error = xfs_btree_increment(cur, 0, &tmp); 475 298 } else { 476 - /* 477 - * Start of ag. Lookup the first inode chunk. 478 - */ 299 + /* Start of ag. Lookup the first inode chunk */ 479 300 error = xfs_inobt_lookup(cur, 0, XFS_LOOKUP_GE, &tmp); 480 - icount = 0; 481 301 } 302 + if (error) 303 + break; 304 + 482 305 /* 483 306 * Loop through inode btree records in this ag, 484 307 * until we run out of inodes or space in the buffer. 485 308 */ 486 309 while (irbp < irbufend && icount < ubcount) { 487 - xfs_inobt_rec_incore_t r; 488 - 489 - /* 490 - * Loop as long as we're unable to read the 491 - * inode btree. 492 - */ 493 - while (error) { 494 - agino += XFS_INODES_PER_CHUNK; 495 - if (XFS_AGINO_TO_AGBNO(mp, agino) >= 496 - be32_to_cpu(agi->agi_length)) 497 - break; 498 - error = xfs_inobt_lookup(cur, agino, 499 - XFS_LOOKUP_GE, &tmp); 500 - cond_resched(); 501 - } 502 - /* 503 - * If ran off the end of the ag either with an error, 504 - * or the normal way, set end and stop collecting. 505 - */ 506 - if (error) { 507 - end_of_ag = 1; 508 - break; 509 - } 310 + struct xfs_inobt_rec_incore r; 510 311 511 312 error = xfs_inobt_get_rec(cur, &r, &i); 512 313 if (error || i == 0) { ··· 462 377 * Also start read-ahead now for this chunk. 463 378 */ 464 379 if (r.ir_freecount < XFS_INODES_PER_CHUNK) { 465 - struct blk_plug plug; 466 - /* 467 - * Loop over all clusters in the next chunk. 468 - * Do a readahead if there are any allocated 469 - * inodes in that cluster. 470 - */ 471 - blk_start_plug(&plug); 472 - agbno = XFS_AGINO_TO_AGBNO(mp, r.ir_startino); 473 - for (chunkidx = 0; 474 - chunkidx < XFS_INODES_PER_CHUNK; 475 - chunkidx += inodes_per_cluster, 476 - agbno += blks_per_cluster) { 477 - if (xfs_inobt_maskn(chunkidx, 478 - inodes_per_cluster) & ~r.ir_free) 479 - xfs_btree_reada_bufs(mp, agno, 480 - agbno, blks_per_cluster, 481 - &xfs_inode_buf_ops); 482 - } 483 - blk_finish_plug(&plug); 380 + xfs_bulkstat_ichunk_ra(mp, agno, &r); 484 381 irbp->ir_startino = r.ir_startino; 485 382 irbp->ir_freecount = r.ir_freecount; 486 383 irbp->ir_free = r.ir_free; ··· 489 422 irbufend = irbp; 490 423 for (irbp = irbuf; 491 424 irbp < irbufend && XFS_BULKSTAT_UBLEFT(ubleft); irbp++) { 492 - /* 493 - * Now process this chunk of inodes. 494 - */ 495 - for (agino = irbp->ir_startino, chunkidx = clustidx = 0; 496 - XFS_BULKSTAT_UBLEFT(ubleft) && 497 - irbp->ir_freecount < XFS_INODES_PER_CHUNK; 498 - chunkidx++, clustidx++, agino++) { 499 - ASSERT(chunkidx < XFS_INODES_PER_CHUNK); 425 + struct xfs_bulkstat_agichunk ac; 500 426 501 - ino = XFS_AGINO_TO_INO(mp, agno, agino); 502 - /* 503 - * Skip if this inode is free. 504 - */ 505 - if (XFS_INOBT_MASK(chunkidx) & irbp->ir_free) { 506 - lastino = ino; 507 - continue; 508 - } 509 - /* 510 - * Count used inodes as free so we can tell 511 - * when the chunk is used up. 512 - */ 513 - irbp->ir_freecount++; 427 + ac.ac_lastino = lastino; 428 + ac.ac_ubuffer = &ubuffer; 429 + ac.ac_ubleft = ubleft; 430 + ac.ac_ubelem = ubelem; 431 + error = xfs_bulkstat_ag_ichunk(mp, agno, irbp, 432 + formatter, statstruct_size, &ac); 433 + if (error) 434 + rval = error; 514 435 515 - /* 516 - * Get the inode and fill in a single buffer. 517 - */ 518 - ubused = statstruct_size; 519 - error = formatter(mp, ino, ubufp, ubleft, 520 - &ubused, &fmterror); 521 - if (fmterror == BULKSTAT_RV_NOTHING) { 522 - if (error && error != ENOENT && 523 - error != EINVAL) { 524 - ubleft = 0; 525 - rval = error; 526 - break; 527 - } 528 - lastino = ino; 529 - continue; 530 - } 531 - if (fmterror == BULKSTAT_RV_GIVEUP) { 532 - ubleft = 0; 533 - ASSERT(error); 534 - rval = error; 535 - break; 536 - } 537 - if (ubufp) 538 - ubufp += ubused; 539 - ubleft -= ubused; 540 - ubelem++; 541 - lastino = ino; 542 - } 436 + lastino = ac.ac_lastino; 437 + ubleft = ac.ac_ubleft; 438 + ubelem = ac.ac_ubelem; 543 439 544 440 cond_resched(); 545 441 } ··· 542 512 return rval; 543 513 } 544 514 545 - /* 546 - * Return stat information in bulk (by-inode) for the filesystem. 547 - * Special case for non-sequential one inode bulkstat. 548 - */ 549 - int /* error status */ 550 - xfs_bulkstat_single( 551 - xfs_mount_t *mp, /* mount point for filesystem */ 552 - xfs_ino_t *lastinop, /* inode to return */ 553 - char __user *buffer, /* buffer with inode stats */ 554 - int *done) /* 1 if there are more stats to get */ 555 - { 556 - int count; /* count value for bulkstat call */ 557 - int error; /* return value */ 558 - xfs_ino_t ino; /* filesystem inode number */ 559 - int res; /* result from bs1 */ 560 - 561 - /* 562 - * note that requesting valid inode numbers which are not allocated 563 - * to inodes will most likely cause xfs_imap_to_bp to generate warning 564 - * messages about bad magic numbers. This is ok. The fact that 565 - * the inode isn't actually an inode is handled by the 566 - * error check below. Done this way to make the usual case faster 567 - * at the expense of the error case. 568 - */ 569 - 570 - ino = *lastinop; 571 - error = xfs_bulkstat_one(mp, ino, buffer, sizeof(xfs_bstat_t), 572 - NULL, &res); 573 - if (error) { 574 - /* 575 - * Special case way failed, do it the "long" way 576 - * to see if that works. 577 - */ 578 - (*lastinop)--; 579 - count = 1; 580 - if (xfs_bulkstat(mp, lastinop, &count, xfs_bulkstat_one, 581 - sizeof(xfs_bstat_t), buffer, done)) 582 - return error; 583 - if (count == 0 || (xfs_ino_t)*lastinop != ino) 584 - return error == EFSCORRUPTED ? 585 - XFS_ERROR(EINVAL) : error; 586 - else 587 - return 0; 588 - } 589 - *done = 0; 590 - return 0; 591 - } 592 - 593 515 int 594 516 xfs_inumbers_fmt( 595 517 void __user *ubuffer, /* buffer to write to */ 596 - const xfs_inogrp_t *buffer, /* buffer to read from */ 518 + const struct xfs_inogrp *buffer, /* buffer to read from */ 597 519 long count, /* # of elements to read */ 598 520 long *written) /* # of bytes written */ 599 521 { ··· 560 578 */ 561 579 int /* error status */ 562 580 xfs_inumbers( 563 - xfs_mount_t *mp, /* mount point for filesystem */ 564 - xfs_ino_t *lastino, /* last inode returned */ 565 - int *count, /* size of buffer/count returned */ 566 - void __user *ubuffer,/* buffer with inode descriptions */ 567 - inumbers_fmt_pf formatter) 581 + struct xfs_mount *mp,/* mount point for filesystem */ 582 + xfs_ino_t *lastino,/* last inode returned */ 583 + int *count,/* size of buffer/count returned */ 584 + void __user *ubuffer,/* buffer with inode descriptions */ 585 + inumbers_fmt_pf formatter) 568 586 { 569 - xfs_buf_t *agbp; 570 - xfs_agino_t agino; 571 - xfs_agnumber_t agno; 572 - int bcount; 573 - xfs_inogrp_t *buffer; 574 - int bufidx; 575 - xfs_btree_cur_t *cur; 576 - int error; 577 - xfs_inobt_rec_incore_t r; 578 - int i; 579 - xfs_ino_t ino; 580 - int left; 581 - int tmp; 587 + xfs_agnumber_t agno = XFS_INO_TO_AGNO(mp, *lastino); 588 + xfs_agino_t agino = XFS_INO_TO_AGINO(mp, *lastino); 589 + struct xfs_btree_cur *cur = NULL; 590 + struct xfs_buf *agbp = NULL; 591 + struct xfs_inogrp *buffer; 592 + int bcount; 593 + int left = *count; 594 + int bufidx = 0; 595 + int error = 0; 582 596 583 - ino = (xfs_ino_t)*lastino; 584 - agno = XFS_INO_TO_AGNO(mp, ino); 585 - agino = XFS_INO_TO_AGINO(mp, ino); 586 - left = *count; 587 597 *count = 0; 598 + if (agno >= mp->m_sb.sb_agcount || 599 + *lastino != XFS_AGINO_TO_INO(mp, agno, agino)) 600 + return error; 601 + 588 602 bcount = MIN(left, (int)(PAGE_SIZE / sizeof(*buffer))); 589 603 buffer = kmem_alloc(bcount * sizeof(*buffer), KM_SLEEP); 590 - error = bufidx = 0; 591 - cur = NULL; 592 - agbp = NULL; 593 - while (left > 0 && agno < mp->m_sb.sb_agcount) { 594 - if (agbp == NULL) { 604 + do { 605 + struct xfs_inobt_rec_incore r; 606 + int stat; 607 + 608 + if (!agbp) { 595 609 error = xfs_ialloc_read_agi(mp, NULL, agno, &agbp); 596 - if (error) { 597 - /* 598 - * If we can't read the AGI of this ag, 599 - * then just skip to the next one. 600 - */ 601 - ASSERT(cur == NULL); 602 - agbp = NULL; 603 - agno++; 604 - agino = 0; 605 - continue; 606 - } 610 + if (error) 611 + break; 612 + 607 613 cur = xfs_inobt_init_cursor(mp, NULL, agbp, agno, 608 614 XFS_BTNUM_INO); 609 615 error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_GE, 610 - &tmp); 611 - if (error) { 612 - xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); 613 - cur = NULL; 614 - xfs_buf_relse(agbp); 615 - agbp = NULL; 616 - /* 617 - * Move up the last inode in the current 618 - * chunk. The lookup_ge will always get 619 - * us the first inode in the next chunk. 620 - */ 621 - agino += XFS_INODES_PER_CHUNK - 1; 622 - continue; 623 - } 616 + &stat); 617 + if (error) 618 + break; 619 + if (!stat) 620 + goto next_ag; 624 621 } 625 - error = xfs_inobt_get_rec(cur, &r, &i); 626 - if (error || i == 0) { 627 - xfs_buf_relse(agbp); 628 - agbp = NULL; 629 - xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); 630 - cur = NULL; 631 - agno++; 632 - agino = 0; 633 - continue; 634 - } 622 + 623 + error = xfs_inobt_get_rec(cur, &r, &stat); 624 + if (error) 625 + break; 626 + if (!stat) 627 + goto next_ag; 628 + 635 629 agino = r.ir_startino + XFS_INODES_PER_CHUNK - 1; 636 630 buffer[bufidx].xi_startino = 637 631 XFS_AGINO_TO_INO(mp, agno, r.ir_startino); 638 632 buffer[bufidx].xi_alloccount = 639 633 XFS_INODES_PER_CHUNK - r.ir_freecount; 640 634 buffer[bufidx].xi_allocmask = ~r.ir_free; 641 - bufidx++; 642 - left--; 643 - if (bufidx == bcount) { 644 - long written; 645 - if (formatter(ubuffer, buffer, bufidx, &written)) { 646 - error = XFS_ERROR(EFAULT); 635 + if (++bufidx == bcount) { 636 + long written; 637 + 638 + error = formatter(ubuffer, buffer, bufidx, &written); 639 + if (error) 647 640 break; 648 - } 649 641 ubuffer += written; 650 642 *count += bufidx; 651 643 bufidx = 0; 652 644 } 653 - if (left) { 654 - error = xfs_btree_increment(cur, 0, &tmp); 655 - if (error) { 656 - xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); 657 - cur = NULL; 658 - xfs_buf_relse(agbp); 659 - agbp = NULL; 660 - /* 661 - * The agino value has already been bumped. 662 - * Just try to skip up to it. 663 - */ 664 - agino += XFS_INODES_PER_CHUNK; 665 - continue; 666 - } 667 - } 668 - } 645 + if (!--left) 646 + break; 647 + 648 + error = xfs_btree_increment(cur, 0, &stat); 649 + if (error) 650 + break; 651 + if (stat) 652 + continue; 653 + 654 + next_ag: 655 + xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); 656 + cur = NULL; 657 + xfs_buf_relse(agbp); 658 + agbp = NULL; 659 + agino = 0; 660 + } while (++agno < mp->m_sb.sb_agcount); 661 + 669 662 if (!error) { 670 663 if (bufidx) { 671 - long written; 672 - if (formatter(ubuffer, buffer, bufidx, &written)) 673 - error = XFS_ERROR(EFAULT); 674 - else 664 + long written; 665 + 666 + error = formatter(ubuffer, buffer, bufidx, &written); 667 + if (!error) 675 668 *count += bufidx; 676 669 } 677 670 *lastino = XFS_AGINO_TO_INO(mp, agno, agino); 678 671 } 672 + 679 673 kmem_free(buffer); 680 674 if (cur) 681 675 xfs_btree_del_cursor(cur, (error ? XFS_BTREE_ERROR : 682 676 XFS_BTREE_NOERROR)); 683 677 if (agbp) 684 678 xfs_buf_relse(agbp); 679 + 685 680 return error; 686 681 }
+16 -7
fs/xfs/xfs_itable.h
··· 30 30 int *ubused, 31 31 int *stat); 32 32 33 + struct xfs_bulkstat_agichunk { 34 + xfs_ino_t ac_lastino; /* last inode returned */ 35 + char __user **ac_ubuffer;/* pointer into user's buffer */ 36 + int ac_ubleft; /* bytes left in user's buffer */ 37 + int ac_ubelem; /* spaces used in user's buffer */ 38 + }; 39 + 40 + int 41 + xfs_bulkstat_ag_ichunk( 42 + struct xfs_mount *mp, 43 + xfs_agnumber_t agno, 44 + struct xfs_inobt_rec_incore *irbp, 45 + bulkstat_one_pf formatter, 46 + size_t statstruct_size, 47 + struct xfs_bulkstat_agichunk *acp); 48 + 33 49 /* 34 50 * Values for stat return value. 35 51 */ ··· 65 49 size_t statstruct_size,/* sizeof struct that we're filling */ 66 50 char __user *ubuffer,/* buffer with inode stats */ 67 51 int *done); /* 1 if there are more stats to get */ 68 - 69 - int 70 - xfs_bulkstat_single( 71 - xfs_mount_t *mp, 72 - xfs_ino_t *lastinop, 73 - char __user *buffer, 74 - int *done); 75 52 76 53 typedef int (*bulkstat_one_fmt_pf)( /* used size in bytes or negative error */ 77 54 void __user *ubuffer, /* buffer to write to */
+13 -14
fs/xfs/xfs_linux.h
··· 21 21 #include <linux/types.h> 22 22 23 23 /* 24 - * XFS_BIG_BLKNOS needs block layer disk addresses to be 64 bits. 25 - * XFS_BIG_INUMS requires XFS_BIG_BLKNOS to be set. 26 - */ 27 - #if defined(CONFIG_LBDAF) || (BITS_PER_LONG == 64) 28 - # define XFS_BIG_BLKNOS 1 29 - # define XFS_BIG_INUMS 1 30 - #else 31 - # define XFS_BIG_BLKNOS 0 32 - # define XFS_BIG_INUMS 0 33 - #endif 34 - 35 - /* 36 24 * Kernel specific type declarations for XFS 37 25 */ 38 26 typedef signed char __int8_t; ··· 101 113 #include <asm/byteorder.h> 102 114 #include <asm/unaligned.h> 103 115 104 - #include "xfs_vnode.h" 116 + #include "xfs_fs.h" 105 117 #include "xfs_stats.h" 106 118 #include "xfs_sysctl.h" 107 119 #include "xfs_iops.h" ··· 178 190 #define MIN(a,b) (min(a,b)) 179 191 #define MAX(a,b) (max(a,b)) 180 192 #define howmany(x, y) (((x)+((y)-1))/(y)) 193 + 194 + /* 195 + * XFS wrapper structure for sysfs support. It depends on external data 196 + * structures and is embedded in various internal data structures to implement 197 + * the XFS sysfs object heirarchy. Define it here for broad access throughout 198 + * the codebase. 199 + */ 200 + struct xfs_kobj { 201 + struct kobject kobject; 202 + struct completion complete; 203 + }; 181 204 182 205 /* Kernel uid/gid conversion. These are used to convert to/from the on disk 183 206 * uid_t/gid_t types to the kuid_t/kgid_t types that the kernel uses internally. ··· 330 331 { 331 332 x += y - 1; 332 333 do_div(x, y); 333 - return(x * y); 334 + return x * y; 334 335 } 335 336 336 337 static inline __uint64_t howmany_64(__uint64_t x, __uint32_t y)
+42 -27
fs/xfs/xfs_log.c
··· 34 34 #include "xfs_trace.h" 35 35 #include "xfs_fsops.h" 36 36 #include "xfs_cksum.h" 37 + #include "xfs_sysfs.h" 37 38 38 39 kmem_zone_t *xfs_log_ticket_zone; 39 40 ··· 284 283 return 0; 285 284 shutdown: 286 285 list_del_init(&tic->t_queue); 287 - return XFS_ERROR(EIO); 286 + return -EIO; 288 287 } 289 288 290 289 /* ··· 378 377 int error = 0; 379 378 380 379 if (XLOG_FORCED_SHUTDOWN(log)) 381 - return XFS_ERROR(EIO); 380 + return -EIO; 382 381 383 382 XFS_STATS_INC(xs_try_logspace); 384 383 ··· 447 446 ASSERT(client == XFS_TRANSACTION || client == XFS_LOG); 448 447 449 448 if (XLOG_FORCED_SHUTDOWN(log)) 450 - return XFS_ERROR(EIO); 449 + return -EIO; 451 450 452 451 XFS_STATS_INC(xs_try_logspace); 453 452 ··· 455 454 tic = xlog_ticket_alloc(log, unit_bytes, cnt, client, permanent, 456 455 KM_SLEEP | KM_MAYFAIL); 457 456 if (!tic) 458 - return XFS_ERROR(ENOMEM); 457 + return -ENOMEM; 459 458 460 459 tic->t_trans_type = t_type; 461 460 *ticp = tic; ··· 591 590 { 592 591 if (xlog_state_release_iclog(mp->m_log, iclog)) { 593 592 xfs_force_shutdown(mp, SHUTDOWN_LOG_IO_ERROR); 594 - return EIO; 593 + return -EIO; 595 594 } 596 595 597 596 return 0; ··· 629 628 630 629 mp->m_log = xlog_alloc_log(mp, log_target, blk_offset, num_bblks); 631 630 if (IS_ERR(mp->m_log)) { 632 - error = -PTR_ERR(mp->m_log); 631 + error = PTR_ERR(mp->m_log); 633 632 goto out; 634 633 } 635 634 ··· 653 652 xfs_warn(mp, 654 653 "Log size %d blocks too small, minimum size is %d blocks", 655 654 mp->m_sb.sb_logblocks, min_logfsbs); 656 - error = EINVAL; 655 + error = -EINVAL; 657 656 } else if (mp->m_sb.sb_logblocks > XFS_MAX_LOG_BLOCKS) { 658 657 xfs_warn(mp, 659 658 "Log size %d blocks too large, maximum size is %lld blocks", 660 659 mp->m_sb.sb_logblocks, XFS_MAX_LOG_BLOCKS); 661 - error = EINVAL; 660 + error = -EINVAL; 662 661 } else if (XFS_FSB_TO_B(mp, mp->m_sb.sb_logblocks) > XFS_MAX_LOG_BYTES) { 663 662 xfs_warn(mp, 664 663 "log size %lld bytes too large, maximum size is %lld bytes", 665 664 XFS_FSB_TO_B(mp, mp->m_sb.sb_logblocks), 666 665 XFS_MAX_LOG_BYTES); 667 - error = EINVAL; 666 + error = -EINVAL; 668 667 } 669 668 if (error) { 670 669 if (xfs_sb_version_hascrc(&mp->m_sb)) { ··· 707 706 goto out_destroy_ail; 708 707 } 709 708 } 709 + 710 + error = xfs_sysfs_init(&mp->m_log->l_kobj, &xfs_log_ktype, &mp->m_kobj, 711 + "log"); 712 + if (error) 713 + goto out_destroy_ail; 710 714 711 715 /* Normal transactions can now occur */ 712 716 mp->m_log->l_flags &= ~XLOG_ACTIVE_RECOVERY; ··· 953 947 xfs_log_quiesce(mp); 954 948 955 949 xfs_trans_ail_destroy(mp); 950 + 951 + xfs_sysfs_del(&mp->m_log->l_kobj); 952 + 956 953 xlog_dealloc_log(mp->m_log); 957 954 } 958 955 ··· 1322 1313 xlog_in_core_t *iclog, *prev_iclog=NULL; 1323 1314 xfs_buf_t *bp; 1324 1315 int i; 1325 - int error = ENOMEM; 1316 + int error = -ENOMEM; 1326 1317 uint log2_size = 0; 1327 1318 1328 1319 log = kmem_zalloc(sizeof(struct xlog), KM_MAYFAIL); ··· 1349 1340 xlog_grant_head_init(&log->l_reserve_head); 1350 1341 xlog_grant_head_init(&log->l_write_head); 1351 1342 1352 - error = EFSCORRUPTED; 1343 + error = -EFSCORRUPTED; 1353 1344 if (xfs_sb_version_hassector(&mp->m_sb)) { 1354 1345 log2_size = mp->m_sb.sb_logsectlog; 1355 1346 if (log2_size < BBSHIFT) { ··· 1378 1369 1379 1370 xlog_get_iclog_buffer_size(mp, log); 1380 1371 1381 - error = ENOMEM; 1382 - bp = xfs_buf_alloc(mp->m_logdev_targp, 0, BTOBB(log->l_iclog_size), 0); 1372 + /* 1373 + * Use a NULL block for the extra log buffer used during splits so that 1374 + * it will trigger errors if we ever try to do IO on it without first 1375 + * having set it up properly. 1376 + */ 1377 + error = -ENOMEM; 1378 + bp = xfs_buf_alloc(mp->m_logdev_targp, XFS_BUF_DADDR_NULL, 1379 + BTOBB(log->l_iclog_size), 0); 1383 1380 if (!bp) 1384 1381 goto out_free_log; 1385 1382 ··· 1478 1463 out_free_log: 1479 1464 kmem_free(log); 1480 1465 out: 1481 - return ERR_PTR(-error); 1466 + return ERR_PTR(error); 1482 1467 } /* xlog_alloc_log */ 1483 1468 1484 1469 ··· 1676 1661 1677 1662 xfs_buf_lock(bp); 1678 1663 if (iclog->ic_state & XLOG_STATE_IOERROR) { 1679 - xfs_buf_ioerror(bp, EIO); 1664 + xfs_buf_ioerror(bp, -EIO); 1680 1665 xfs_buf_stale(bp); 1681 1666 xfs_buf_ioend(bp, 0); 1682 1667 /* ··· 2375 2360 2376 2361 ophdr = xlog_write_setup_ophdr(log, ptr, ticket, flags); 2377 2362 if (!ophdr) 2378 - return XFS_ERROR(EIO); 2363 + return -EIO; 2379 2364 2380 2365 xlog_write_adv_cnt(&ptr, &len, &log_offset, 2381 2366 sizeof(struct xlog_op_header)); ··· 2874 2859 spin_lock(&log->l_icloglock); 2875 2860 if (XLOG_FORCED_SHUTDOWN(log)) { 2876 2861 spin_unlock(&log->l_icloglock); 2877 - return XFS_ERROR(EIO); 2862 + return -EIO; 2878 2863 } 2879 2864 2880 2865 iclog = log->l_iclog; ··· 3062 3047 int sync = 0; /* do we sync? */ 3063 3048 3064 3049 if (iclog->ic_state & XLOG_STATE_IOERROR) 3065 - return XFS_ERROR(EIO); 3050 + return -EIO; 3066 3051 3067 3052 ASSERT(atomic_read(&iclog->ic_refcnt) > 0); 3068 3053 if (!atomic_dec_and_lock(&iclog->ic_refcnt, &log->l_icloglock)) ··· 3070 3055 3071 3056 if (iclog->ic_state & XLOG_STATE_IOERROR) { 3072 3057 spin_unlock(&log->l_icloglock); 3073 - return XFS_ERROR(EIO); 3058 + return -EIO; 3074 3059 } 3075 3060 ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE || 3076 3061 iclog->ic_state == XLOG_STATE_WANT_SYNC); ··· 3187 3172 iclog = log->l_iclog; 3188 3173 if (iclog->ic_state & XLOG_STATE_IOERROR) { 3189 3174 spin_unlock(&log->l_icloglock); 3190 - return XFS_ERROR(EIO); 3175 + return -EIO; 3191 3176 } 3192 3177 3193 3178 /* If the head iclog is not active nor dirty, we just attach ··· 3225 3210 spin_unlock(&log->l_icloglock); 3226 3211 3227 3212 if (xlog_state_release_iclog(log, iclog)) 3228 - return XFS_ERROR(EIO); 3213 + return -EIO; 3229 3214 3230 3215 if (log_flushed) 3231 3216 *log_flushed = 1; ··· 3261 3246 */ 3262 3247 if (iclog->ic_state & XLOG_STATE_IOERROR) { 3263 3248 spin_unlock(&log->l_icloglock); 3264 - return XFS_ERROR(EIO); 3249 + return -EIO; 3265 3250 } 3266 3251 XFS_STATS_INC(xs_log_force_sleep); 3267 3252 xlog_wait(&iclog->ic_force_wait, &log->l_icloglock); ··· 3271 3256 * and the memory read should be atomic. 3272 3257 */ 3273 3258 if (iclog->ic_state & XLOG_STATE_IOERROR) 3274 - return XFS_ERROR(EIO); 3259 + return -EIO; 3275 3260 if (log_flushed) 3276 3261 *log_flushed = 1; 3277 3262 } else { ··· 3339 3324 iclog = log->l_iclog; 3340 3325 if (iclog->ic_state & XLOG_STATE_IOERROR) { 3341 3326 spin_unlock(&log->l_icloglock); 3342 - return XFS_ERROR(EIO); 3327 + return -EIO; 3343 3328 } 3344 3329 3345 3330 do { ··· 3390 3375 xlog_state_switch_iclogs(log, iclog, 0); 3391 3376 spin_unlock(&log->l_icloglock); 3392 3377 if (xlog_state_release_iclog(log, iclog)) 3393 - return XFS_ERROR(EIO); 3378 + return -EIO; 3394 3379 if (log_flushed) 3395 3380 *log_flushed = 1; 3396 3381 spin_lock(&log->l_icloglock); ··· 3405 3390 */ 3406 3391 if (iclog->ic_state & XLOG_STATE_IOERROR) { 3407 3392 spin_unlock(&log->l_icloglock); 3408 - return XFS_ERROR(EIO); 3393 + return -EIO; 3409 3394 } 3410 3395 XFS_STATS_INC(xs_log_force_sleep); 3411 3396 xlog_wait(&iclog->ic_force_wait, &log->l_icloglock); ··· 3415 3400 * and the memory read should be atomic. 3416 3401 */ 3417 3402 if (iclog->ic_state & XLOG_STATE_IOERROR) 3418 - return XFS_ERROR(EIO); 3403 + return -EIO; 3419 3404 3420 3405 if (log_flushed) 3421 3406 *log_flushed = 1;
+3 -5
fs/xfs/xfs_log_cil.c
··· 78 78 { 79 79 log->l_cilp->xc_ctx->ticket = xlog_cil_ticket_alloc(log); 80 80 log->l_cilp->xc_ctx->sequence = 1; 81 - log->l_cilp->xc_ctx->commit_lsn = xlog_assign_lsn(log->l_curr_cycle, 82 - log->l_curr_block); 83 81 } 84 82 85 83 /* ··· 632 634 xfs_log_ticket_put(tic); 633 635 out_abort: 634 636 xlog_cil_committed(ctx, XFS_LI_ABORTED); 635 - return XFS_ERROR(EIO); 637 + return -EIO; 636 638 } 637 639 638 640 static void ··· 926 928 927 929 cil = kmem_zalloc(sizeof(*cil), KM_SLEEP|KM_MAYFAIL); 928 930 if (!cil) 929 - return ENOMEM; 931 + return -ENOMEM; 930 932 931 933 ctx = kmem_zalloc(sizeof(*ctx), KM_SLEEP|KM_MAYFAIL); 932 934 if (!ctx) { 933 935 kmem_free(cil); 934 - return ENOMEM; 936 + return -ENOMEM; 935 937 } 936 938 937 939 INIT_WORK(&cil->xc_push_work, xlog_cil_push_work);
+2 -2
fs/xfs/xfs_log_format.h fs/xfs/libxfs/xfs_log_format.h
··· 380 380 xfs_ictimestamp_t di_mtime; /* time last modified */ 381 381 xfs_ictimestamp_t di_ctime; /* time created/inode modified */ 382 382 xfs_fsize_t di_size; /* number of bytes in file */ 383 - xfs_drfsbno_t di_nblocks; /* # of direct & btree blocks used */ 383 + xfs_rfsblock_t di_nblocks; /* # of direct & btree blocks used */ 384 384 xfs_extlen_t di_extsize; /* basic/minimum extent size for file */ 385 385 xfs_extnum_t di_nextents; /* number of extents in data fork */ 386 386 xfs_aextnum_t di_anextents; /* number of extents in attribute fork*/ ··· 516 516 * EFI/EFD log format definitions 517 517 */ 518 518 typedef struct xfs_extent { 519 - xfs_dfsbno_t ext_start; 519 + xfs_fsblock_t ext_start; 520 520 xfs_extlen_t ext_len; 521 521 } xfs_extent_t; 522 522
+2
fs/xfs/xfs_log_priv.h
··· 405 405 struct xlog_grant_head l_reserve_head; 406 406 struct xlog_grant_head l_write_head; 407 407 408 + struct xfs_kobj l_kobj; 409 + 408 410 /* The following field are used for debugging; need to hold icloglock */ 409 411 #ifdef DEBUG 410 412 char *l_iclog_bak[XLOG_MAX_ICLOGS];
+149 -135
fs/xfs/xfs_log_recover.c
··· 179 179 xfs_warn(log->l_mp, "Invalid block length (0x%x) for buffer", 180 180 nbblks); 181 181 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp); 182 - return EFSCORRUPTED; 182 + return -EFSCORRUPTED; 183 183 } 184 184 185 185 blk_no = round_down(blk_no, log->l_sectBBsize); ··· 194 194 bp->b_error = 0; 195 195 196 196 if (XFS_FORCED_SHUTDOWN(log->l_mp)) 197 - return XFS_ERROR(EIO); 197 + return -EIO; 198 198 199 199 xfs_buf_iorequest(bp); 200 200 error = xfs_buf_iowait(bp); ··· 268 268 xfs_warn(log->l_mp, "Invalid block length (0x%x) for buffer", 269 269 nbblks); 270 270 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp); 271 - return EFSCORRUPTED; 271 + return -EFSCORRUPTED; 272 272 } 273 273 274 274 blk_no = round_down(blk_no, log->l_sectBBsize); ··· 330 330 xlog_header_check_dump(mp, head); 331 331 XFS_ERROR_REPORT("xlog_header_check_recover(1)", 332 332 XFS_ERRLEVEL_HIGH, mp); 333 - return XFS_ERROR(EFSCORRUPTED); 333 + return -EFSCORRUPTED; 334 334 } else if (unlikely(!uuid_equal(&mp->m_sb.sb_uuid, &head->h_fs_uuid))) { 335 335 xfs_warn(mp, 336 336 "dirty log entry has mismatched uuid - can't recover"); 337 337 xlog_header_check_dump(mp, head); 338 338 XFS_ERROR_REPORT("xlog_header_check_recover(2)", 339 339 XFS_ERRLEVEL_HIGH, mp); 340 - return XFS_ERROR(EFSCORRUPTED); 340 + return -EFSCORRUPTED; 341 341 } 342 342 return 0; 343 343 } ··· 364 364 xlog_header_check_dump(mp, head); 365 365 XFS_ERROR_REPORT("xlog_header_check_mount", 366 366 XFS_ERRLEVEL_HIGH, mp); 367 - return XFS_ERROR(EFSCORRUPTED); 367 + return -EFSCORRUPTED; 368 368 } 369 369 return 0; 370 370 } ··· 462 462 while (!(bp = xlog_get_bp(log, bufblks))) { 463 463 bufblks >>= 1; 464 464 if (bufblks < log->l_sectBBsize) 465 - return ENOMEM; 465 + return -ENOMEM; 466 466 } 467 467 468 468 for (i = start_blk; i < start_blk + nbblks; i += bufblks) { ··· 524 524 525 525 if (!(bp = xlog_get_bp(log, num_blks))) { 526 526 if (!(bp = xlog_get_bp(log, 1))) 527 - return ENOMEM; 527 + return -ENOMEM; 528 528 smallmem = 1; 529 529 } else { 530 530 error = xlog_bread(log, start_blk, num_blks, bp, &offset); ··· 539 539 xfs_warn(log->l_mp, 540 540 "Log inconsistent (didn't find previous header)"); 541 541 ASSERT(0); 542 - error = XFS_ERROR(EIO); 542 + error = -EIO; 543 543 goto out; 544 544 } 545 545 ··· 564 564 * will be called again for the end of the physical log. 565 565 */ 566 566 if (i == -1) { 567 - error = -1; 567 + error = 1; 568 568 goto out; 569 569 } 570 570 ··· 628 628 int error, log_bbnum = log->l_logBBsize; 629 629 630 630 /* Is the end of the log device zeroed? */ 631 - if ((error = xlog_find_zeroed(log, &first_blk)) == -1) { 631 + error = xlog_find_zeroed(log, &first_blk); 632 + if (error < 0) { 633 + xfs_warn(log->l_mp, "empty log check failed"); 634 + return error; 635 + } 636 + if (error == 1) { 632 637 *return_head_blk = first_blk; 633 638 634 639 /* Is the whole lot zeroed? */ ··· 646 641 } 647 642 648 643 return 0; 649 - } else if (error) { 650 - xfs_warn(log->l_mp, "empty log check failed"); 651 - return error; 652 644 } 653 645 654 646 first_blk = 0; /* get cycle # of 1st block */ 655 647 bp = xlog_get_bp(log, 1); 656 648 if (!bp) 657 - return ENOMEM; 649 + return -ENOMEM; 658 650 659 651 error = xlog_bread(log, 0, 1, bp, &offset); 660 652 if (error) ··· 820 818 start_blk = head_blk - num_scan_bblks; /* don't read head_blk */ 821 819 822 820 /* start ptr at last block ptr before head_blk */ 823 - if ((error = xlog_find_verify_log_record(log, start_blk, 824 - &head_blk, 0)) == -1) { 825 - error = XFS_ERROR(EIO); 826 - goto bp_err; 827 - } else if (error) 821 + error = xlog_find_verify_log_record(log, start_blk, &head_blk, 0); 822 + if (error == 1) 823 + error = -EIO; 824 + if (error) 828 825 goto bp_err; 829 826 } else { 830 827 start_blk = 0; 831 828 ASSERT(head_blk <= INT_MAX); 832 - if ((error = xlog_find_verify_log_record(log, start_blk, 833 - &head_blk, 0)) == -1) { 829 + error = xlog_find_verify_log_record(log, start_blk, &head_blk, 0); 830 + if (error < 0) 831 + goto bp_err; 832 + if (error == 1) { 834 833 /* We hit the beginning of the log during our search */ 835 834 start_blk = log_bbnum - (num_scan_bblks - head_blk); 836 835 new_blk = log_bbnum; 837 836 ASSERT(start_blk <= INT_MAX && 838 837 (xfs_daddr_t) log_bbnum-start_blk >= 0); 839 838 ASSERT(head_blk <= INT_MAX); 840 - if ((error = xlog_find_verify_log_record(log, 841 - start_blk, &new_blk, 842 - (int)head_blk)) == -1) { 843 - error = XFS_ERROR(EIO); 844 - goto bp_err; 845 - } else if (error) 839 + error = xlog_find_verify_log_record(log, start_blk, 840 + &new_blk, (int)head_blk); 841 + if (error == 1) 842 + error = -EIO; 843 + if (error) 846 844 goto bp_err; 847 845 if (new_blk != log_bbnum) 848 846 head_blk = new_blk; ··· 913 911 914 912 bp = xlog_get_bp(log, 1); 915 913 if (!bp) 916 - return ENOMEM; 914 + return -ENOMEM; 917 915 if (*head_blk == 0) { /* special case */ 918 916 error = xlog_bread(log, 0, 1, bp, &offset); 919 917 if (error) ··· 963 961 xfs_warn(log->l_mp, "%s: couldn't find sync record", __func__); 964 962 xlog_put_bp(bp); 965 963 ASSERT(0); 966 - return XFS_ERROR(EIO); 964 + return -EIO; 967 965 } 968 966 969 967 /* find blk_no of tail of log */ ··· 1094 1092 * 1095 1093 * Return: 1096 1094 * 0 => the log is completely written to 1097 - * -1 => use *blk_no as the first block of the log 1098 - * >0 => error has occurred 1095 + * 1 => use *blk_no as the first block of the log 1096 + * <0 => error has occurred 1099 1097 */ 1100 1098 STATIC int 1101 1099 xlog_find_zeroed( ··· 1114 1112 /* check totally zeroed log */ 1115 1113 bp = xlog_get_bp(log, 1); 1116 1114 if (!bp) 1117 - return ENOMEM; 1115 + return -ENOMEM; 1118 1116 error = xlog_bread(log, 0, 1, bp, &offset); 1119 1117 if (error) 1120 1118 goto bp_err; ··· 1123 1121 if (first_cycle == 0) { /* completely zeroed log */ 1124 1122 *blk_no = 0; 1125 1123 xlog_put_bp(bp); 1126 - return -1; 1124 + return 1; 1127 1125 } 1128 1126 1129 1127 /* check partially zeroed log */ ··· 1143 1141 */ 1144 1142 xfs_warn(log->l_mp, 1145 1143 "Log inconsistent or not a log (last==0, first!=1)"); 1146 - error = XFS_ERROR(EINVAL); 1144 + error = -EINVAL; 1147 1145 goto bp_err; 1148 1146 } 1149 1147 ··· 1181 1179 * Potentially backup over partial log record write. We don't need 1182 1180 * to search the end of the log because we know it is zero. 1183 1181 */ 1184 - if ((error = xlog_find_verify_log_record(log, start_blk, 1185 - &last_blk, 0)) == -1) { 1186 - error = XFS_ERROR(EIO); 1187 - goto bp_err; 1188 - } else if (error) 1189 - goto bp_err; 1182 + error = xlog_find_verify_log_record(log, start_blk, &last_blk, 0); 1183 + if (error == 1) 1184 + error = -EIO; 1185 + if (error) 1186 + goto bp_err; 1190 1187 1191 1188 *blk_no = last_blk; 1192 1189 bp_err: 1193 1190 xlog_put_bp(bp); 1194 1191 if (error) 1195 1192 return error; 1196 - return -1; 1193 + return 1; 1197 1194 } 1198 1195 1199 1196 /* ··· 1252 1251 while (!(bp = xlog_get_bp(log, bufblks))) { 1253 1252 bufblks >>= 1; 1254 1253 if (bufblks < sectbb) 1255 - return ENOMEM; 1254 + return -ENOMEM; 1256 1255 } 1257 1256 1258 1257 /* We may need to do a read at the start to fill in part of ··· 1355 1354 if (unlikely(head_block < tail_block || head_block >= log->l_logBBsize)) { 1356 1355 XFS_ERROR_REPORT("xlog_clear_stale_blocks(1)", 1357 1356 XFS_ERRLEVEL_LOW, log->l_mp); 1358 - return XFS_ERROR(EFSCORRUPTED); 1357 + return -EFSCORRUPTED; 1359 1358 } 1360 1359 tail_distance = tail_block + (log->l_logBBsize - head_block); 1361 1360 } else { ··· 1367 1366 if (unlikely(head_block >= tail_block || head_cycle != (tail_cycle + 1))){ 1368 1367 XFS_ERROR_REPORT("xlog_clear_stale_blocks(2)", 1369 1368 XFS_ERRLEVEL_LOW, log->l_mp); 1370 - return XFS_ERROR(EFSCORRUPTED); 1369 + return -EFSCORRUPTED; 1371 1370 } 1372 1371 tail_distance = tail_block - head_block; 1373 1372 } ··· 1552 1551 xfs_warn(log->l_mp, "%s: bad header magic number", 1553 1552 __func__); 1554 1553 ASSERT(0); 1555 - return XFS_ERROR(EIO); 1554 + return -EIO; 1556 1555 } 1557 1556 if (len == sizeof(xfs_trans_header_t)) 1558 1557 xlog_recover_add_item(&trans->r_itemq); ··· 1582 1581 in_f->ilf_size); 1583 1582 ASSERT(0); 1584 1583 kmem_free(ptr); 1585 - return XFS_ERROR(EIO); 1584 + return -EIO; 1586 1585 } 1587 1586 1588 1587 item->ri_total = in_f->ilf_size; ··· 1703 1702 */ 1704 1703 if (!list_empty(&sort_list)) 1705 1704 list_splice_init(&sort_list, &trans->r_itemq); 1706 - error = XFS_ERROR(EIO); 1705 + error = -EIO; 1707 1706 goto out; 1708 1707 } 1709 1708 } ··· 1944 1943 item, bp); 1945 1944 XFS_ERROR_REPORT("xlog_recover_do_inode_buf", 1946 1945 XFS_ERRLEVEL_LOW, mp); 1947 - return XFS_ERROR(EFSCORRUPTED); 1946 + return -EFSCORRUPTED; 1948 1947 } 1949 1948 1950 1949 buffer_nextp = (xfs_agino_t *)xfs_buf_offset(bp, ··· 2126 2125 __uint16_t magic16; 2127 2126 __uint16_t magicda; 2128 2127 2128 + /* 2129 + * We can only do post recovery validation on items on CRC enabled 2130 + * fielsystems as we need to know when the buffer was written to be able 2131 + * to determine if we should have replayed the item. If we replay old 2132 + * metadata over a newer buffer, then it will enter a temporarily 2133 + * inconsistent state resulting in verification failures. Hence for now 2134 + * just avoid the verification stage for non-crc filesystems 2135 + */ 2136 + if (!xfs_sb_version_hascrc(&mp->m_sb)) 2137 + return; 2138 + 2129 2139 magic32 = be32_to_cpu(*(__be32 *)bp->b_addr); 2130 2140 magic16 = be16_to_cpu(*(__be16*)bp->b_addr); 2131 2141 magicda = be16_to_cpu(info->magic); ··· 2174 2162 bp->b_ops = &xfs_agf_buf_ops; 2175 2163 break; 2176 2164 case XFS_BLFT_AGFL_BUF: 2177 - if (!xfs_sb_version_hascrc(&mp->m_sb)) 2178 - break; 2179 2165 if (magic32 != XFS_AGFL_MAGIC) { 2180 2166 xfs_warn(mp, "Bad AGFL block magic!"); 2181 2167 ASSERT(0); ··· 2206 2196 #endif 2207 2197 break; 2208 2198 case XFS_BLFT_DINO_BUF: 2209 - /* 2210 - * we get here with inode allocation buffers, not buffers that 2211 - * track unlinked list changes. 2212 - */ 2213 2199 if (magic16 != XFS_DINODE_MAGIC) { 2214 2200 xfs_warn(mp, "Bad INODE block magic!"); 2215 2201 ASSERT(0); ··· 2285 2279 bp->b_ops = &xfs_attr3_leaf_buf_ops; 2286 2280 break; 2287 2281 case XFS_BLFT_ATTR_RMT_BUF: 2288 - if (!xfs_sb_version_hascrc(&mp->m_sb)) 2289 - break; 2290 2282 if (magic32 != XFS_ATTR3_RMT_MAGIC) { 2291 2283 xfs_warn(mp, "Bad attr remote magic!"); 2292 2284 ASSERT(0); ··· 2391 2387 /* Shouldn't be any more regions */ 2392 2388 ASSERT(i == item->ri_total); 2393 2389 2394 - /* 2395 - * We can only do post recovery validation on items on CRC enabled 2396 - * fielsystems as we need to know when the buffer was written to be able 2397 - * to determine if we should have replayed the item. If we replay old 2398 - * metadata over a newer buffer, then it will enter a temporarily 2399 - * inconsistent state resulting in verification failures. Hence for now 2400 - * just avoid the verification stage for non-crc filesystems 2401 - */ 2402 - if (xfs_sb_version_hascrc(&mp->m_sb)) 2403 - xlog_recover_validate_buf_type(mp, bp, buf_f); 2390 + xlog_recover_validate_buf_type(mp, bp, buf_f); 2404 2391 } 2405 2392 2406 2393 /* ··· 2399 2404 * Simple algorithm: if we have found a QUOTAOFF log item of the same type 2400 2405 * (ie. USR or GRP), then just toss this buffer away; don't recover it. 2401 2406 * Else, treat it as a regular buffer and do recovery. 2407 + * 2408 + * Return false if the buffer was tossed and true if we recovered the buffer to 2409 + * indicate to the caller if the buffer needs writing. 2402 2410 */ 2403 - STATIC void 2411 + STATIC bool 2404 2412 xlog_recover_do_dquot_buffer( 2405 2413 struct xfs_mount *mp, 2406 2414 struct xlog *log, ··· 2418 2420 /* 2419 2421 * Filesystems are required to send in quota flags at mount time. 2420 2422 */ 2421 - if (mp->m_qflags == 0) { 2422 - return; 2423 - } 2423 + if (!mp->m_qflags) 2424 + return false; 2424 2425 2425 2426 type = 0; 2426 2427 if (buf_f->blf_flags & XFS_BLF_UDQUOT_BUF) ··· 2432 2435 * This type of quotas was turned off, so ignore this buffer 2433 2436 */ 2434 2437 if (log->l_quotaoffs_flag & type) 2435 - return; 2438 + return false; 2436 2439 2437 2440 xlog_recover_do_reg_buffer(mp, item, bp, buf_f); 2441 + return true; 2438 2442 } 2439 2443 2440 2444 /* ··· 2494 2496 bp = xfs_buf_read(mp->m_ddev_targp, buf_f->blf_blkno, buf_f->blf_len, 2495 2497 buf_flags, NULL); 2496 2498 if (!bp) 2497 - return XFS_ERROR(ENOMEM); 2499 + return -ENOMEM; 2498 2500 error = bp->b_error; 2499 2501 if (error) { 2500 2502 xfs_buf_ioerror_alert(bp, "xlog_recover_do..(read#1)"); ··· 2502 2504 } 2503 2505 2504 2506 /* 2505 - * recover the buffer only if we get an LSN from it and it's less than 2507 + * Recover the buffer only if we get an LSN from it and it's less than 2506 2508 * the lsn of the transaction we are replaying. 2509 + * 2510 + * Note that we have to be extremely careful of readahead here. 2511 + * Readahead does not attach verfiers to the buffers so if we don't 2512 + * actually do any replay after readahead because of the LSN we found 2513 + * in the buffer if more recent than that current transaction then we 2514 + * need to attach the verifier directly. Failure to do so can lead to 2515 + * future recovery actions (e.g. EFI and unlinked list recovery) can 2516 + * operate on the buffers and they won't get the verifier attached. This 2517 + * can lead to blocks on disk having the correct content but a stale 2518 + * CRC. 2519 + * 2520 + * It is safe to assume these clean buffers are currently up to date. 2521 + * If the buffer is dirtied by a later transaction being replayed, then 2522 + * the verifier will be reset to match whatever recover turns that 2523 + * buffer into. 2507 2524 */ 2508 2525 lsn = xlog_recover_get_buf_lsn(mp, bp); 2509 - if (lsn && lsn != -1 && XFS_LSN_CMP(lsn, current_lsn) >= 0) 2526 + if (lsn && lsn != -1 && XFS_LSN_CMP(lsn, current_lsn) >= 0) { 2527 + xlog_recover_validate_buf_type(mp, bp, buf_f); 2510 2528 goto out_release; 2529 + } 2511 2530 2512 2531 if (buf_f->blf_flags & XFS_BLF_INODE_BUF) { 2513 2532 error = xlog_recover_do_inode_buffer(mp, item, bp, buf_f); 2533 + if (error) 2534 + goto out_release; 2514 2535 } else if (buf_f->blf_flags & 2515 2536 (XFS_BLF_UDQUOT_BUF|XFS_BLF_PDQUOT_BUF|XFS_BLF_GDQUOT_BUF)) { 2516 - xlog_recover_do_dquot_buffer(mp, log, item, bp, buf_f); 2537 + bool dirty; 2538 + 2539 + dirty = xlog_recover_do_dquot_buffer(mp, log, item, bp, buf_f); 2540 + if (!dirty) 2541 + goto out_release; 2517 2542 } else { 2518 2543 xlog_recover_do_reg_buffer(mp, item, bp, buf_f); 2519 2544 } 2520 - if (error) 2521 - goto out_release; 2522 2545 2523 2546 /* 2524 2547 * Perform delayed write on the buffer. Asynchronous writes will be ··· 2617 2598 2618 2599 ip = xfs_inode_alloc(mp, in_f->ilf_ino); 2619 2600 if (!ip) 2620 - return ENOMEM; 2601 + return -ENOMEM; 2621 2602 2622 2603 /* instantiate the inode */ 2623 2604 xfs_dinode_from_disk(&ip->i_d, dip); ··· 2695 2676 bp = xfs_buf_read(mp->m_ddev_targp, in_f->ilf_blkno, in_f->ilf_len, 0, 2696 2677 &xfs_inode_buf_ops); 2697 2678 if (!bp) { 2698 - error = ENOMEM; 2679 + error = -ENOMEM; 2699 2680 goto error; 2700 2681 } 2701 2682 error = bp->b_error; ··· 2716 2697 __func__, dip, bp, in_f->ilf_ino); 2717 2698 XFS_ERROR_REPORT("xlog_recover_inode_pass2(1)", 2718 2699 XFS_ERRLEVEL_LOW, mp); 2719 - error = EFSCORRUPTED; 2700 + error = -EFSCORRUPTED; 2720 2701 goto out_release; 2721 2702 } 2722 2703 dicp = item->ri_buf[1].i_addr; ··· 2726 2707 __func__, item, in_f->ilf_ino); 2727 2708 XFS_ERROR_REPORT("xlog_recover_inode_pass2(2)", 2728 2709 XFS_ERRLEVEL_LOW, mp); 2729 - error = EFSCORRUPTED; 2710 + error = -EFSCORRUPTED; 2730 2711 goto out_release; 2731 2712 } 2732 2713 ··· 2783 2764 "%s: Bad regular inode log record, rec ptr 0x%p, " 2784 2765 "ino ptr = 0x%p, ino bp = 0x%p, ino %Ld", 2785 2766 __func__, item, dip, bp, in_f->ilf_ino); 2786 - error = EFSCORRUPTED; 2767 + error = -EFSCORRUPTED; 2787 2768 goto out_release; 2788 2769 } 2789 2770 } else if (unlikely(S_ISDIR(dicp->di_mode))) { ··· 2796 2777 "%s: Bad dir inode log record, rec ptr 0x%p, " 2797 2778 "ino ptr = 0x%p, ino bp = 0x%p, ino %Ld", 2798 2779 __func__, item, dip, bp, in_f->ilf_ino); 2799 - error = EFSCORRUPTED; 2780 + error = -EFSCORRUPTED; 2800 2781 goto out_release; 2801 2782 } 2802 2783 } ··· 2809 2790 __func__, item, dip, bp, in_f->ilf_ino, 2810 2791 dicp->di_nextents + dicp->di_anextents, 2811 2792 dicp->di_nblocks); 2812 - error = EFSCORRUPTED; 2793 + error = -EFSCORRUPTED; 2813 2794 goto out_release; 2814 2795 } 2815 2796 if (unlikely(dicp->di_forkoff > mp->m_sb.sb_inodesize)) { ··· 2819 2800 "%s: Bad inode log record, rec ptr 0x%p, dino ptr 0x%p, " 2820 2801 "dino bp 0x%p, ino %Ld, forkoff 0x%x", __func__, 2821 2802 item, dip, bp, in_f->ilf_ino, dicp->di_forkoff); 2822 - error = EFSCORRUPTED; 2803 + error = -EFSCORRUPTED; 2823 2804 goto out_release; 2824 2805 } 2825 2806 isize = xfs_icdinode_size(dicp->di_version); ··· 2829 2810 xfs_alert(mp, 2830 2811 "%s: Bad inode log record length %d, rec ptr 0x%p", 2831 2812 __func__, item->ri_buf[1].i_len, item); 2832 - error = EFSCORRUPTED; 2813 + error = -EFSCORRUPTED; 2833 2814 goto out_release; 2834 2815 } 2835 2816 ··· 2917 2898 default: 2918 2899 xfs_warn(log->l_mp, "%s: Invalid flag", __func__); 2919 2900 ASSERT(0); 2920 - error = EIO; 2901 + error = -EIO; 2921 2902 goto out_release; 2922 2903 } 2923 2904 } ··· 2938 2919 error: 2939 2920 if (need_free) 2940 2921 kmem_free(in_f); 2941 - return XFS_ERROR(error); 2922 + return error; 2942 2923 } 2943 2924 2944 2925 /* ··· 2965 2946 if (qoff_f->qf_flags & XFS_GQUOTA_ACCT) 2966 2947 log->l_quotaoffs_flag |= XFS_DQ_GROUP; 2967 2948 2968 - return (0); 2949 + return 0; 2969 2950 } 2970 2951 2971 2952 /* ··· 2990 2971 * Filesystems are required to send in quota flags at mount time. 2991 2972 */ 2992 2973 if (mp->m_qflags == 0) 2993 - return (0); 2974 + return 0; 2994 2975 2995 2976 recddq = item->ri_buf[1].i_addr; 2996 2977 if (recddq == NULL) { 2997 2978 xfs_alert(log->l_mp, "NULL dquot in %s.", __func__); 2998 - return XFS_ERROR(EIO); 2979 + return -EIO; 2999 2980 } 3000 2981 if (item->ri_buf[1].i_len < sizeof(xfs_disk_dquot_t)) { 3001 2982 xfs_alert(log->l_mp, "dquot too small (%d) in %s.", 3002 2983 item->ri_buf[1].i_len, __func__); 3003 - return XFS_ERROR(EIO); 2984 + return -EIO; 3004 2985 } 3005 2986 3006 2987 /* ··· 3009 2990 type = recddq->d_flags & (XFS_DQ_USER | XFS_DQ_PROJ | XFS_DQ_GROUP); 3010 2991 ASSERT(type); 3011 2992 if (log->l_quotaoffs_flag & type) 3012 - return (0); 2993 + return 0; 3013 2994 3014 2995 /* 3015 2996 * At this point we know that quota was _not_ turned off. ··· 3026 3007 error = xfs_dqcheck(mp, recddq, dq_f->qlf_id, 0, XFS_QMOPT_DOWARN, 3027 3008 "xlog_recover_dquot_pass2 (log copy)"); 3028 3009 if (error) 3029 - return XFS_ERROR(EIO); 3010 + return -EIO; 3030 3011 ASSERT(dq_f->qlf_len == 1); 3031 3012 3013 + /* 3014 + * At this point we are assuming that the dquots have been allocated 3015 + * and hence the buffer has valid dquots stamped in it. It should, 3016 + * therefore, pass verifier validation. If the dquot is bad, then the 3017 + * we'll return an error here, so we don't need to specifically check 3018 + * the dquot in the buffer after the verifier has run. 3019 + */ 3032 3020 error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, dq_f->qlf_blkno, 3033 3021 XFS_FSB_TO_BB(mp, dq_f->qlf_len), 0, &bp, 3034 - NULL); 3022 + &xfs_dquot_buf_ops); 3035 3023 if (error) 3036 3024 return error; 3037 3025 3038 3026 ASSERT(bp); 3039 3027 ddq = (xfs_disk_dquot_t *)xfs_buf_offset(bp, dq_f->qlf_boffset); 3040 - 3041 - /* 3042 - * At least the magic num portion should be on disk because this 3043 - * was among a chunk of dquots created earlier, and we did some 3044 - * minimal initialization then. 3045 - */ 3046 - error = xfs_dqcheck(mp, ddq, dq_f->qlf_id, 0, XFS_QMOPT_DOWARN, 3047 - "xlog_recover_dquot_pass2"); 3048 - if (error) { 3049 - xfs_buf_relse(bp); 3050 - return XFS_ERROR(EIO); 3051 - } 3052 3028 3053 3029 /* 3054 3030 * If the dquot has an LSN in it, recover the dquot only if it's less ··· 3192 3178 icl = (struct xfs_icreate_log *)item->ri_buf[0].i_addr; 3193 3179 if (icl->icl_type != XFS_LI_ICREATE) { 3194 3180 xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad type"); 3195 - return EINVAL; 3181 + return -EINVAL; 3196 3182 } 3197 3183 3198 3184 if (icl->icl_size != 1) { 3199 3185 xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad icl size"); 3200 - return EINVAL; 3186 + return -EINVAL; 3201 3187 } 3202 3188 3203 3189 agno = be32_to_cpu(icl->icl_ag); 3204 3190 if (agno >= mp->m_sb.sb_agcount) { 3205 3191 xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad agno"); 3206 - return EINVAL; 3192 + return -EINVAL; 3207 3193 } 3208 3194 agbno = be32_to_cpu(icl->icl_agbno); 3209 3195 if (!agbno || agbno == NULLAGBLOCK || agbno >= mp->m_sb.sb_agblocks) { 3210 3196 xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad agbno"); 3211 - return EINVAL; 3197 + return -EINVAL; 3212 3198 } 3213 3199 isize = be32_to_cpu(icl->icl_isize); 3214 3200 if (isize != mp->m_sb.sb_inodesize) { 3215 3201 xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad isize"); 3216 - return EINVAL; 3202 + return -EINVAL; 3217 3203 } 3218 3204 count = be32_to_cpu(icl->icl_count); 3219 3205 if (!count) { 3220 3206 xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad count"); 3221 - return EINVAL; 3207 + return -EINVAL; 3222 3208 } 3223 3209 length = be32_to_cpu(icl->icl_length); 3224 3210 if (!length || length >= mp->m_sb.sb_agblocks) { 3225 3211 xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad length"); 3226 - return EINVAL; 3212 + return -EINVAL; 3227 3213 } 3228 3214 3229 3215 /* existing allocation is fixed value */ ··· 3232 3218 if (count != mp->m_ialloc_inos || 3233 3219 length != mp->m_ialloc_blks) { 3234 3220 xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad count 2"); 3235 - return EINVAL; 3221 + return -EINVAL; 3236 3222 } 3237 3223 3238 3224 /* ··· 3403 3389 xfs_warn(log->l_mp, "%s: invalid item type (%d)", 3404 3390 __func__, ITEM_TYPE(item)); 3405 3391 ASSERT(0); 3406 - return XFS_ERROR(EIO); 3392 + return -EIO; 3407 3393 } 3408 3394 } 3409 3395 ··· 3439 3425 xfs_warn(log->l_mp, "%s: invalid item type (%d)", 3440 3426 __func__, ITEM_TYPE(item)); 3441 3427 ASSERT(0); 3442 - return XFS_ERROR(EIO); 3428 + return -EIO; 3443 3429 } 3444 3430 } 3445 3431 ··· 3574 3560 3575 3561 /* check the log format matches our own - else we can't recover */ 3576 3562 if (xlog_header_check_recover(log->l_mp, rhead)) 3577 - return (XFS_ERROR(EIO)); 3563 + return -EIO; 3578 3564 3579 3565 while ((dp < lp) && num_logops) { 3580 3566 ASSERT(dp + sizeof(xlog_op_header_t) <= lp); ··· 3585 3571 xfs_warn(log->l_mp, "%s: bad clientid 0x%x", 3586 3572 __func__, ohead->oh_clientid); 3587 3573 ASSERT(0); 3588 - return (XFS_ERROR(EIO)); 3574 + return -EIO; 3589 3575 } 3590 3576 tid = be32_to_cpu(ohead->oh_tid); 3591 3577 hash = XLOG_RHASH(tid); ··· 3599 3585 xfs_warn(log->l_mp, "%s: bad length 0x%x", 3600 3586 __func__, be32_to_cpu(ohead->oh_len)); 3601 3587 WARN_ON(1); 3602 - return (XFS_ERROR(EIO)); 3588 + return -EIO; 3603 3589 } 3604 3590 flags = ohead->oh_flags & ~XLOG_END_TRANS; 3605 3591 if (flags & XLOG_WAS_CONT_TRANS) ··· 3621 3607 xfs_warn(log->l_mp, "%s: bad transaction", 3622 3608 __func__); 3623 3609 ASSERT(0); 3624 - error = XFS_ERROR(EIO); 3610 + error = -EIO; 3625 3611 break; 3626 3612 case 0: 3627 3613 case XLOG_CONTINUE_TRANS: ··· 3632 3618 xfs_warn(log->l_mp, "%s: bad flag 0x%x", 3633 3619 __func__, flags); 3634 3620 ASSERT(0); 3635 - error = XFS_ERROR(EIO); 3621 + error = -EIO; 3636 3622 break; 3637 3623 } 3638 3624 if (error) { ··· 3683 3669 */ 3684 3670 set_bit(XFS_EFI_RECOVERED, &efip->efi_flags); 3685 3671 xfs_efi_release(efip, efip->efi_format.efi_nextents); 3686 - return XFS_ERROR(EIO); 3672 + return -EIO; 3687 3673 } 3688 3674 } 3689 3675 ··· 3983 3969 * CRC protection by punting an error back up the stack. 3984 3970 */ 3985 3971 if (xfs_sb_version_hascrc(&log->l_mp->m_sb)) 3986 - return EFSCORRUPTED; 3972 + return -EFSCORRUPTED; 3987 3973 } 3988 3974 3989 3975 return 0; ··· 4032 4018 if (unlikely(rhead->h_magicno != cpu_to_be32(XLOG_HEADER_MAGIC_NUM))) { 4033 4019 XFS_ERROR_REPORT("xlog_valid_rec_header(1)", 4034 4020 XFS_ERRLEVEL_LOW, log->l_mp); 4035 - return XFS_ERROR(EFSCORRUPTED); 4021 + return -EFSCORRUPTED; 4036 4022 } 4037 4023 if (unlikely( 4038 4024 (!rhead->h_version || 4039 4025 (be32_to_cpu(rhead->h_version) & (~XLOG_VERSION_OKBITS))))) { 4040 4026 xfs_warn(log->l_mp, "%s: unrecognised log version (%d).", 4041 4027 __func__, be32_to_cpu(rhead->h_version)); 4042 - return XFS_ERROR(EIO); 4028 + return -EIO; 4043 4029 } 4044 4030 4045 4031 /* LR body must have data or it wouldn't have been written */ ··· 4047 4033 if (unlikely( hlen <= 0 || hlen > INT_MAX )) { 4048 4034 XFS_ERROR_REPORT("xlog_valid_rec_header(2)", 4049 4035 XFS_ERRLEVEL_LOW, log->l_mp); 4050 - return XFS_ERROR(EFSCORRUPTED); 4036 + return -EFSCORRUPTED; 4051 4037 } 4052 4038 if (unlikely( blkno > log->l_logBBsize || blkno > INT_MAX )) { 4053 4039 XFS_ERROR_REPORT("xlog_valid_rec_header(3)", 4054 4040 XFS_ERRLEVEL_LOW, log->l_mp); 4055 - return XFS_ERROR(EFSCORRUPTED); 4041 + return -EFSCORRUPTED; 4056 4042 } 4057 4043 return 0; 4058 4044 } ··· 4095 4081 */ 4096 4082 hbp = xlog_get_bp(log, 1); 4097 4083 if (!hbp) 4098 - return ENOMEM; 4084 + return -ENOMEM; 4099 4085 4100 4086 error = xlog_bread(log, tail_blk, 1, hbp, &offset); 4101 4087 if (error) ··· 4124 4110 } 4125 4111 4126 4112 if (!hbp) 4127 - return ENOMEM; 4113 + return -ENOMEM; 4128 4114 dbp = xlog_get_bp(log, BTOBB(h_size)); 4129 4115 if (!dbp) { 4130 4116 xlog_put_bp(hbp); 4131 - return ENOMEM; 4117 + return -ENOMEM; 4132 4118 } 4133 4119 4134 4120 memset(rhash, 0, sizeof(rhash)); ··· 4402 4388 * If IO errors happened during recovery, bail out. 4403 4389 */ 4404 4390 if (XFS_FORCED_SHUTDOWN(log->l_mp)) { 4405 - return (EIO); 4391 + return -EIO; 4406 4392 } 4407 4393 4408 4394 /* ··· 4429 4415 4430 4416 if (XFS_FORCED_SHUTDOWN(log->l_mp)) { 4431 4417 xfs_buf_relse(bp); 4432 - return XFS_ERROR(EIO); 4418 + return -EIO; 4433 4419 } 4434 4420 4435 4421 xfs_buf_iorequest(bp); ··· 4506 4492 "Please recover the log on a kernel that supports the unknown features.", 4507 4493 (log->l_mp->m_sb.sb_features_log_incompat & 4508 4494 XFS_SB_FEAT_INCOMPAT_LOG_UNKNOWN)); 4509 - return EINVAL; 4495 + return -EINVAL; 4510 4496 } 4511 4497 4512 4498 xfs_notice(log->l_mp, "Starting recovery (logdev: %s)",
fs/xfs/xfs_log_recover.h fs/xfs/libxfs/xfs_log_recover.h
fs/xfs/xfs_log_rlimit.c fs/xfs/libxfs/xfs_log_rlimit.c
+52 -45
fs/xfs/xfs_mount.c
··· 42 42 #include "xfs_trace.h" 43 43 #include "xfs_icache.h" 44 44 #include "xfs_dinode.h" 45 + #include "xfs_sysfs.h" 45 46 46 47 47 48 #ifdef HAVE_PERCPU_SB ··· 61 60 static int xfs_uuid_table_size; 62 61 static uuid_t *xfs_uuid_table; 63 62 63 + extern struct kset *xfs_kset; 64 + 64 65 /* 65 66 * See if the UUID is unique among mounted XFS filesystems. 66 67 * Mount fails if UUID is nil or a FS with the same UUID is already mounted. ··· 79 76 80 77 if (uuid_is_nil(uuid)) { 81 78 xfs_warn(mp, "Filesystem has nil UUID - can't mount"); 82 - return XFS_ERROR(EINVAL); 79 + return -EINVAL; 83 80 } 84 81 85 82 mutex_lock(&xfs_uuid_table_mutex); ··· 107 104 out_duplicate: 108 105 mutex_unlock(&xfs_uuid_table_mutex); 109 106 xfs_warn(mp, "Filesystem has duplicate UUID %pU - can't mount", uuid); 110 - return XFS_ERROR(EINVAL); 107 + return -EINVAL; 111 108 } 112 109 113 110 STATIC void ··· 176 173 ASSERT(PAGE_SHIFT >= sbp->sb_blocklog); 177 174 ASSERT(sbp->sb_blocklog >= BBSHIFT); 178 175 179 - #if XFS_BIG_BLKNOS /* Limited by ULONG_MAX of page cache index */ 176 + /* Limited by ULONG_MAX of page cache index */ 180 177 if (nblocks >> (PAGE_CACHE_SHIFT - sbp->sb_blocklog) > ULONG_MAX) 181 - return EFBIG; 182 - #else /* Limited by UINT_MAX of sectors */ 183 - if (nblocks << (sbp->sb_blocklog - BBSHIFT) > UINT_MAX) 184 - return EFBIG; 185 - #endif 178 + return -EFBIG; 186 179 return 0; 187 180 } 188 181 ··· 249 250 mp->m_flags &= ~XFS_MOUNT_32BITINODES; 250 251 251 252 if (mp->m_flags & XFS_MOUNT_32BITINODES) 252 - index = xfs_set_inode32(mp); 253 + index = xfs_set_inode32(mp, agcount); 253 254 else 254 - index = xfs_set_inode64(mp); 255 + index = xfs_set_inode64(mp, agcount); 255 256 256 257 if (maxagi) 257 258 *maxagi = index; ··· 307 308 if (!bp) { 308 309 if (loud) 309 310 xfs_warn(mp, "SB buffer read failed"); 310 - return EIO; 311 + return -EIO; 311 312 } 312 313 if (bp->b_error) { 313 314 error = bp->b_error; 314 315 if (loud) 315 316 xfs_warn(mp, "SB validate failed with error %d.", error); 316 317 /* bad CRC means corrupted metadata */ 317 - if (error == EFSBADCRC) 318 - error = EFSCORRUPTED; 318 + if (error == -EFSBADCRC) 319 + error = -EFSCORRUPTED; 319 320 goto release_buf; 320 321 } 321 322 ··· 323 324 * Initialize the mount structure from the superblock. 324 325 */ 325 326 xfs_sb_from_disk(sbp, XFS_BUF_TO_SBP(bp)); 326 - xfs_sb_quota_from_disk(sbp); 327 327 328 328 /* 329 329 * If we haven't validated the superblock, do so now before we try ··· 331 333 if (sbp->sb_magicnum != XFS_SB_MAGIC) { 332 334 if (loud) 333 335 xfs_warn(mp, "Invalid superblock magic number"); 334 - error = EINVAL; 336 + error = -EINVAL; 335 337 goto release_buf; 336 338 } 337 339 ··· 342 344 if (loud) 343 345 xfs_warn(mp, "device supports %u byte sectors (not %u)", 344 346 sector_size, sbp->sb_sectsize); 345 - error = ENOSYS; 347 + error = -ENOSYS; 346 348 goto release_buf; 347 349 } 348 350 ··· 390 392 xfs_warn(mp, 391 393 "alignment check failed: sunit/swidth vs. blocksize(%d)", 392 394 sbp->sb_blocksize); 393 - return XFS_ERROR(EINVAL); 395 + return -EINVAL; 394 396 } else { 395 397 /* 396 398 * Convert the stripe unit and width to FSBs. ··· 400 402 xfs_warn(mp, 401 403 "alignment check failed: sunit/swidth vs. agsize(%d)", 402 404 sbp->sb_agblocks); 403 - return XFS_ERROR(EINVAL); 405 + return -EINVAL; 404 406 } else if (mp->m_dalign) { 405 407 mp->m_swidth = XFS_BB_TO_FSBT(mp, mp->m_swidth); 406 408 } else { 407 409 xfs_warn(mp, 408 410 "alignment check failed: sunit(%d) less than bsize(%d)", 409 411 mp->m_dalign, sbp->sb_blocksize); 410 - return XFS_ERROR(EINVAL); 412 + return -EINVAL; 411 413 } 412 414 } 413 415 ··· 427 429 } else { 428 430 xfs_warn(mp, 429 431 "cannot change alignment: superblock does not support data alignment"); 430 - return XFS_ERROR(EINVAL); 432 + return -EINVAL; 431 433 } 432 434 } else if ((mp->m_flags & XFS_MOUNT_NOALIGN) != XFS_MOUNT_NOALIGN && 433 435 xfs_sb_version_hasdalign(&mp->m_sb)) { ··· 554 556 d = (xfs_daddr_t)XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks); 555 557 if (XFS_BB_TO_FSB(mp, d) != mp->m_sb.sb_dblocks) { 556 558 xfs_warn(mp, "filesystem size mismatch detected"); 557 - return XFS_ERROR(EFBIG); 559 + return -EFBIG; 558 560 } 559 561 bp = xfs_buf_read_uncached(mp->m_ddev_targp, 560 562 d - XFS_FSS_TO_BB(mp, 1), 561 563 XFS_FSS_TO_BB(mp, 1), 0, NULL); 562 564 if (!bp) { 563 565 xfs_warn(mp, "last sector read failed"); 564 - return EIO; 566 + return -EIO; 565 567 } 566 568 xfs_buf_relse(bp); 567 569 ··· 569 571 d = (xfs_daddr_t)XFS_FSB_TO_BB(mp, mp->m_sb.sb_logblocks); 570 572 if (XFS_BB_TO_FSB(mp, d) != mp->m_sb.sb_logblocks) { 571 573 xfs_warn(mp, "log size mismatch detected"); 572 - return XFS_ERROR(EFBIG); 574 + return -EFBIG; 573 575 } 574 576 bp = xfs_buf_read_uncached(mp->m_logdev_targp, 575 577 d - XFS_FSB_TO_BB(mp, 1), 576 578 XFS_FSB_TO_BB(mp, 1), 0, NULL); 577 579 if (!bp) { 578 580 xfs_warn(mp, "log device read failed"); 579 - return EIO; 581 + return -EIO; 580 582 } 581 583 xfs_buf_relse(bp); 582 584 } ··· 729 731 730 732 xfs_set_maxicount(mp); 731 733 732 - error = xfs_uuid_mount(mp); 734 + mp->m_kobj.kobject.kset = xfs_kset; 735 + error = xfs_sysfs_init(&mp->m_kobj, &xfs_mp_ktype, NULL, mp->m_fsname); 733 736 if (error) 734 737 goto out; 738 + 739 + error = xfs_uuid_mount(mp); 740 + if (error) 741 + goto out_remove_sysfs; 735 742 736 743 /* 737 744 * Set the minimum read and write sizes ··· 819 816 if (!sbp->sb_logblocks) { 820 817 xfs_warn(mp, "no log defined"); 821 818 XFS_ERROR_REPORT("xfs_mountfs", XFS_ERRLEVEL_LOW, mp); 822 - error = XFS_ERROR(EFSCORRUPTED); 819 + error = -EFSCORRUPTED; 823 820 goto out_free_perag; 824 821 } 825 822 ··· 858 855 !mp->m_sb.sb_inprogress) { 859 856 error = xfs_initialize_perag_data(mp, sbp->sb_agcount); 860 857 if (error) 861 - goto out_fail_wait; 858 + goto out_log_dealloc; 862 859 } 863 860 864 861 /* ··· 879 876 xfs_iunlock(rip, XFS_ILOCK_EXCL); 880 877 XFS_ERROR_REPORT("xfs_mountfs_int(2)", XFS_ERRLEVEL_LOW, 881 878 mp); 882 - error = XFS_ERROR(EFSCORRUPTED); 879 + error = -EFSCORRUPTED; 883 880 goto out_rele_rip; 884 881 } 885 882 mp->m_rootip = rip; /* save it */ ··· 930 927 xfs_notice(mp, "resetting quota flags"); 931 928 error = xfs_mount_reset_sbqflags(mp); 932 929 if (error) 933 - return error; 930 + goto out_rtunmount; 934 931 } 935 932 } 936 933 ··· 992 989 xfs_da_unmount(mp); 993 990 out_remove_uuid: 994 991 xfs_uuid_unmount(mp); 992 + out_remove_sysfs: 993 + xfs_sysfs_del(&mp->m_kobj); 995 994 out: 996 995 return error; 997 996 } ··· 1076 1071 xfs_errortag_clearall(mp, 0); 1077 1072 #endif 1078 1073 xfs_free_perag(mp); 1074 + 1075 + xfs_sysfs_del(&mp->m_kobj); 1079 1076 } 1080 1077 1081 1078 int ··· 1159 1152 lcounter += delta; 1160 1153 if (lcounter < 0) { 1161 1154 ASSERT(0); 1162 - return XFS_ERROR(EINVAL); 1155 + return -EINVAL; 1163 1156 } 1164 1157 mp->m_sb.sb_icount = lcounter; 1165 1158 return 0; ··· 1168 1161 lcounter += delta; 1169 1162 if (lcounter < 0) { 1170 1163 ASSERT(0); 1171 - return XFS_ERROR(EINVAL); 1164 + return -EINVAL; 1172 1165 } 1173 1166 mp->m_sb.sb_ifree = lcounter; 1174 1167 return 0; ··· 1198 1191 * blocks if were allowed to. 1199 1192 */ 1200 1193 if (!rsvd) 1201 - return XFS_ERROR(ENOSPC); 1194 + return -ENOSPC; 1202 1195 1203 1196 lcounter = (long long)mp->m_resblks_avail + delta; 1204 1197 if (lcounter >= 0) { ··· 1209 1202 "Filesystem \"%s\": reserve blocks depleted! " 1210 1203 "Consider increasing reserve pool size.", 1211 1204 mp->m_fsname); 1212 - return XFS_ERROR(ENOSPC); 1205 + return -ENOSPC; 1213 1206 } 1214 1207 1215 1208 mp->m_sb.sb_fdblocks = lcounter + XFS_ALLOC_SET_ASIDE(mp); ··· 1218 1211 lcounter = (long long)mp->m_sb.sb_frextents; 1219 1212 lcounter += delta; 1220 1213 if (lcounter < 0) { 1221 - return XFS_ERROR(ENOSPC); 1214 + return -ENOSPC; 1222 1215 } 1223 1216 mp->m_sb.sb_frextents = lcounter; 1224 1217 return 0; ··· 1227 1220 lcounter += delta; 1228 1221 if (lcounter < 0) { 1229 1222 ASSERT(0); 1230 - return XFS_ERROR(EINVAL); 1223 + return -EINVAL; 1231 1224 } 1232 1225 mp->m_sb.sb_dblocks = lcounter; 1233 1226 return 0; ··· 1236 1229 scounter += delta; 1237 1230 if (scounter < 0) { 1238 1231 ASSERT(0); 1239 - return XFS_ERROR(EINVAL); 1232 + return -EINVAL; 1240 1233 } 1241 1234 mp->m_sb.sb_agcount = scounter; 1242 1235 return 0; ··· 1245 1238 scounter += delta; 1246 1239 if (scounter < 0) { 1247 1240 ASSERT(0); 1248 - return XFS_ERROR(EINVAL); 1241 + return -EINVAL; 1249 1242 } 1250 1243 mp->m_sb.sb_imax_pct = scounter; 1251 1244 return 0; ··· 1254 1247 scounter += delta; 1255 1248 if (scounter < 0) { 1256 1249 ASSERT(0); 1257 - return XFS_ERROR(EINVAL); 1250 + return -EINVAL; 1258 1251 } 1259 1252 mp->m_sb.sb_rextsize = scounter; 1260 1253 return 0; ··· 1263 1256 scounter += delta; 1264 1257 if (scounter < 0) { 1265 1258 ASSERT(0); 1266 - return XFS_ERROR(EINVAL); 1259 + return -EINVAL; 1267 1260 } 1268 1261 mp->m_sb.sb_rbmblocks = scounter; 1269 1262 return 0; ··· 1272 1265 lcounter += delta; 1273 1266 if (lcounter < 0) { 1274 1267 ASSERT(0); 1275 - return XFS_ERROR(EINVAL); 1268 + return -EINVAL; 1276 1269 } 1277 1270 mp->m_sb.sb_rblocks = lcounter; 1278 1271 return 0; ··· 1281 1274 lcounter += delta; 1282 1275 if (lcounter < 0) { 1283 1276 ASSERT(0); 1284 - return XFS_ERROR(EINVAL); 1277 + return -EINVAL; 1285 1278 } 1286 1279 mp->m_sb.sb_rextents = lcounter; 1287 1280 return 0; ··· 1290 1283 scounter += delta; 1291 1284 if (scounter < 0) { 1292 1285 ASSERT(0); 1293 - return XFS_ERROR(EINVAL); 1286 + return -EINVAL; 1294 1287 } 1295 1288 mp->m_sb.sb_rextslog = scounter; 1296 1289 return 0; 1297 1290 default: 1298 1291 ASSERT(0); 1299 - return XFS_ERROR(EINVAL); 1292 + return -EINVAL; 1300 1293 } 1301 1294 } 1302 1295 ··· 1459 1452 (mp->m_rtdev_targp && xfs_readonly_buftarg(mp->m_rtdev_targp))) { 1460 1453 xfs_notice(mp, "%s required on read-only device.", message); 1461 1454 xfs_notice(mp, "write access unavailable, cannot proceed."); 1462 - return EROFS; 1455 + return -EROFS; 1463 1456 } 1464 1457 return 0; 1465 1458 } ··· 2002 1995 * (e.g. lots of space just got freed). After that 2003 1996 * we are done. 2004 1997 */ 2005 - if (ret != ENOSPC) 1998 + if (ret != -ENOSPC) 2006 1999 xfs_icsb_balance_counter(mp, field, 0); 2007 2000 xfs_icsb_unlock(mp); 2008 2001 return ret;
+1
fs/xfs/xfs_mount.h
··· 166 166 on the next remount,rw */ 167 167 int64_t m_low_space[XFS_LOWSP_MAX]; 168 168 /* low free space thresholds */ 169 + struct xfs_kobj m_kobj; 169 170 170 171 struct workqueue_struct *m_data_workqueue; 171 172 struct workqueue_struct *m_unwritten_workqueue;
+7 -7
fs/xfs/xfs_mru_cache.c
··· 337 337 *mrup = NULL; 338 338 339 339 if (!mrup || !grp_count || !lifetime_ms || !free_func) 340 - return EINVAL; 340 + return -EINVAL; 341 341 342 342 if (!(grp_time = msecs_to_jiffies(lifetime_ms) / grp_count)) 343 - return EINVAL; 343 + return -EINVAL; 344 344 345 345 if (!(mru = kmem_zalloc(sizeof(*mru), KM_SLEEP))) 346 - return ENOMEM; 346 + return -ENOMEM; 347 347 348 348 /* An extra list is needed to avoid reaping up to a grp_time early. */ 349 349 mru->grp_count = grp_count + 1; 350 350 mru->lists = kmem_zalloc(mru->grp_count * sizeof(*mru->lists), KM_SLEEP); 351 351 352 352 if (!mru->lists) { 353 - err = ENOMEM; 353 + err = -ENOMEM; 354 354 goto exit; 355 355 } 356 356 ··· 434 434 435 435 ASSERT(mru && mru->lists); 436 436 if (!mru || !mru->lists) 437 - return EINVAL; 437 + return -EINVAL; 438 438 439 439 if (radix_tree_preload(GFP_KERNEL)) 440 - return ENOMEM; 440 + return -ENOMEM; 441 441 442 442 INIT_LIST_HEAD(&elem->list_node); 443 443 elem->key = key; 444 444 445 445 spin_lock(&mru->lock); 446 - error = -radix_tree_insert(&mru->store, key, elem); 446 + error = radix_tree_insert(&mru->store, key, elem); 447 447 radix_tree_preload_end(); 448 448 if (!error) 449 449 _xfs_mru_cache_list_insert(mru, elem);
+117 -112
fs/xfs/xfs_qm.c
··· 98 98 next_index = be32_to_cpu(dqp->q_core.d_id) + 1; 99 99 100 100 error = execute(batch[i], data); 101 - if (error == EAGAIN) { 101 + if (error == -EAGAIN) { 102 102 skipped++; 103 103 continue; 104 104 } 105 - if (error && last_error != EFSCORRUPTED) 105 + if (error && last_error != -EFSCORRUPTED) 106 106 last_error = error; 107 107 } 108 108 109 109 mutex_unlock(&qi->qi_tree_lock); 110 110 111 111 /* bail out if the filesystem is corrupted. */ 112 - if (last_error == EFSCORRUPTED) { 112 + if (last_error == -EFSCORRUPTED) { 113 113 skipped = 0; 114 114 break; 115 115 } ··· 138 138 xfs_dqlock(dqp); 139 139 if ((dqp->dq_flags & XFS_DQ_FREEING) || dqp->q_nrefs != 0) { 140 140 xfs_dqunlock(dqp); 141 - return EAGAIN; 141 + return -EAGAIN; 142 142 } 143 143 144 144 dqp->dq_flags |= XFS_DQ_FREEING; ··· 218 218 if (mp->m_quotainfo) { 219 219 xfs_qm_dqpurge_all(mp, XFS_QMOPT_QUOTALL); 220 220 xfs_qm_destroy_quotainfo(mp); 221 - } 222 - } 223 - 224 - 225 - /* 226 - * This is called from xfs_mountfs to start quotas and initialize all 227 - * necessary data structures like quotainfo. This is also responsible for 228 - * running a quotacheck as necessary. We are guaranteed that the superblock 229 - * is consistently read in at this point. 230 - * 231 - * If we fail here, the mount will continue with quota turned off. We don't 232 - * need to inidicate success or failure at all. 233 - */ 234 - void 235 - xfs_qm_mount_quotas( 236 - xfs_mount_t *mp) 237 - { 238 - int error = 0; 239 - uint sbf; 240 - 241 - /* 242 - * If quotas on realtime volumes is not supported, we disable 243 - * quotas immediately. 244 - */ 245 - if (mp->m_sb.sb_rextents) { 246 - xfs_notice(mp, "Cannot turn on quotas for realtime filesystem"); 247 - mp->m_qflags = 0; 248 - goto write_changes; 249 - } 250 - 251 - ASSERT(XFS_IS_QUOTA_RUNNING(mp)); 252 - 253 - /* 254 - * Allocate the quotainfo structure inside the mount struct, and 255 - * create quotainode(s), and change/rev superblock if necessary. 256 - */ 257 - error = xfs_qm_init_quotainfo(mp); 258 - if (error) { 259 - /* 260 - * We must turn off quotas. 261 - */ 262 - ASSERT(mp->m_quotainfo == NULL); 263 - mp->m_qflags = 0; 264 - goto write_changes; 265 - } 266 - /* 267 - * If any of the quotas are not consistent, do a quotacheck. 268 - */ 269 - if (XFS_QM_NEED_QUOTACHECK(mp)) { 270 - error = xfs_qm_quotacheck(mp); 271 - if (error) { 272 - /* Quotacheck failed and disabled quotas. */ 273 - return; 274 - } 275 - } 276 - /* 277 - * If one type of quotas is off, then it will lose its 278 - * quotachecked status, since we won't be doing accounting for 279 - * that type anymore. 280 - */ 281 - if (!XFS_IS_UQUOTA_ON(mp)) 282 - mp->m_qflags &= ~XFS_UQUOTA_CHKD; 283 - if (!XFS_IS_GQUOTA_ON(mp)) 284 - mp->m_qflags &= ~XFS_GQUOTA_CHKD; 285 - if (!XFS_IS_PQUOTA_ON(mp)) 286 - mp->m_qflags &= ~XFS_PQUOTA_CHKD; 287 - 288 - write_changes: 289 - /* 290 - * We actually don't have to acquire the m_sb_lock at all. 291 - * This can only be called from mount, and that's single threaded. XXX 292 - */ 293 - spin_lock(&mp->m_sb_lock); 294 - sbf = mp->m_sb.sb_qflags; 295 - mp->m_sb.sb_qflags = mp->m_qflags & XFS_MOUNT_QUOTA_ALL; 296 - spin_unlock(&mp->m_sb_lock); 297 - 298 - if (sbf != (mp->m_qflags & XFS_MOUNT_QUOTA_ALL)) { 299 - if (xfs_qm_write_sb_changes(mp, XFS_SB_QFLAGS)) { 300 - /* 301 - * We could only have been turning quotas off. 302 - * We aren't in very good shape actually because 303 - * the incore structures are convinced that quotas are 304 - * off, but the on disk superblock doesn't know that ! 305 - */ 306 - ASSERT(!(XFS_IS_QUOTA_RUNNING(mp))); 307 - xfs_alert(mp, "%s: Superblock update failed!", 308 - __func__); 309 - } 310 - } 311 - 312 - if (error) { 313 - xfs_warn(mp, "Failed to initialize disk quotas."); 314 - return; 315 221 } 316 222 } 317 223 ··· 577 671 578 672 qinf = mp->m_quotainfo = kmem_zalloc(sizeof(xfs_quotainfo_t), KM_SLEEP); 579 673 580 - error = -list_lru_init(&qinf->qi_lru); 674 + error = list_lru_init(&qinf->qi_lru); 581 675 if (error) 582 676 goto out_free_qinf; 583 677 ··· 901 995 * will leave a trace in the log indicating corruption has 902 996 * been detected. 903 997 */ 904 - if (error == EFSCORRUPTED) { 998 + if (error == -EFSCORRUPTED) { 905 999 error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, 906 1000 XFS_FSB_TO_DADDR(mp, bno), 907 1001 mp->m_quotainfo->qi_dqchunklen, 0, &bp, ··· 911 1005 if (error) 912 1006 break; 913 1007 1008 + /* 1009 + * A corrupt buffer might not have a verifier attached, so 1010 + * make sure we have the correct one attached before writeback 1011 + * occurs. 1012 + */ 1013 + bp->b_ops = &xfs_dquot_buf_ops; 914 1014 xfs_qm_reset_dqcounts(mp, bp, firstid, type); 915 1015 xfs_buf_delwri_queue(bp, buffer_list); 916 1016 xfs_buf_relse(bp); ··· 1002 1090 xfs_buf_readahead(mp->m_ddev_targp, 1003 1091 XFS_FSB_TO_DADDR(mp, rablkno), 1004 1092 mp->m_quotainfo->qi_dqchunklen, 1005 - NULL); 1093 + &xfs_dquot_buf_ops); 1006 1094 rablkno++; 1007 1095 } 1008 1096 } ··· 1050 1138 /* 1051 1139 * Shouldn't be able to turn off quotas here. 1052 1140 */ 1053 - ASSERT(error != ESRCH); 1054 - ASSERT(error != ENOENT); 1141 + ASSERT(error != -ESRCH); 1142 + ASSERT(error != -ENOENT); 1055 1143 return error; 1056 1144 } 1057 1145 ··· 1138 1226 */ 1139 1227 if (xfs_is_quota_inode(&mp->m_sb, ino)) { 1140 1228 *res = BULKSTAT_RV_NOTHING; 1141 - return XFS_ERROR(EINVAL); 1229 + return -EINVAL; 1142 1230 } 1143 1231 1144 1232 /* ··· 1242 1330 * Walk thru all the filesystem inodes and construct a consistent view 1243 1331 * of the disk quota world. If the quotacheck fails, disable quotas. 1244 1332 */ 1245 - int 1333 + STATIC int 1246 1334 xfs_qm_quotacheck( 1247 1335 xfs_mount_t *mp) 1248 1336 { ··· 1375 1463 } 1376 1464 } else 1377 1465 xfs_notice(mp, "Quotacheck: Done."); 1378 - return (error); 1466 + return error; 1467 + } 1468 + 1469 + /* 1470 + * This is called from xfs_mountfs to start quotas and initialize all 1471 + * necessary data structures like quotainfo. This is also responsible for 1472 + * running a quotacheck as necessary. We are guaranteed that the superblock 1473 + * is consistently read in at this point. 1474 + * 1475 + * If we fail here, the mount will continue with quota turned off. We don't 1476 + * need to inidicate success or failure at all. 1477 + */ 1478 + void 1479 + xfs_qm_mount_quotas( 1480 + struct xfs_mount *mp) 1481 + { 1482 + int error = 0; 1483 + uint sbf; 1484 + 1485 + /* 1486 + * If quotas on realtime volumes is not supported, we disable 1487 + * quotas immediately. 1488 + */ 1489 + if (mp->m_sb.sb_rextents) { 1490 + xfs_notice(mp, "Cannot turn on quotas for realtime filesystem"); 1491 + mp->m_qflags = 0; 1492 + goto write_changes; 1493 + } 1494 + 1495 + ASSERT(XFS_IS_QUOTA_RUNNING(mp)); 1496 + 1497 + /* 1498 + * Allocate the quotainfo structure inside the mount struct, and 1499 + * create quotainode(s), and change/rev superblock if necessary. 1500 + */ 1501 + error = xfs_qm_init_quotainfo(mp); 1502 + if (error) { 1503 + /* 1504 + * We must turn off quotas. 1505 + */ 1506 + ASSERT(mp->m_quotainfo == NULL); 1507 + mp->m_qflags = 0; 1508 + goto write_changes; 1509 + } 1510 + /* 1511 + * If any of the quotas are not consistent, do a quotacheck. 1512 + */ 1513 + if (XFS_QM_NEED_QUOTACHECK(mp)) { 1514 + error = xfs_qm_quotacheck(mp); 1515 + if (error) { 1516 + /* Quotacheck failed and disabled quotas. */ 1517 + return; 1518 + } 1519 + } 1520 + /* 1521 + * If one type of quotas is off, then it will lose its 1522 + * quotachecked status, since we won't be doing accounting for 1523 + * that type anymore. 1524 + */ 1525 + if (!XFS_IS_UQUOTA_ON(mp)) 1526 + mp->m_qflags &= ~XFS_UQUOTA_CHKD; 1527 + if (!XFS_IS_GQUOTA_ON(mp)) 1528 + mp->m_qflags &= ~XFS_GQUOTA_CHKD; 1529 + if (!XFS_IS_PQUOTA_ON(mp)) 1530 + mp->m_qflags &= ~XFS_PQUOTA_CHKD; 1531 + 1532 + write_changes: 1533 + /* 1534 + * We actually don't have to acquire the m_sb_lock at all. 1535 + * This can only be called from mount, and that's single threaded. XXX 1536 + */ 1537 + spin_lock(&mp->m_sb_lock); 1538 + sbf = mp->m_sb.sb_qflags; 1539 + mp->m_sb.sb_qflags = mp->m_qflags & XFS_MOUNT_QUOTA_ALL; 1540 + spin_unlock(&mp->m_sb_lock); 1541 + 1542 + if (sbf != (mp->m_qflags & XFS_MOUNT_QUOTA_ALL)) { 1543 + if (xfs_qm_write_sb_changes(mp, XFS_SB_QFLAGS)) { 1544 + /* 1545 + * We could only have been turning quotas off. 1546 + * We aren't in very good shape actually because 1547 + * the incore structures are convinced that quotas are 1548 + * off, but the on disk superblock doesn't know that ! 1549 + */ 1550 + ASSERT(!(XFS_IS_QUOTA_RUNNING(mp))); 1551 + xfs_alert(mp, "%s: Superblock update failed!", 1552 + __func__); 1553 + } 1554 + } 1555 + 1556 + if (error) { 1557 + xfs_warn(mp, "Failed to initialize disk quotas."); 1558 + return; 1559 + } 1379 1560 } 1380 1561 1381 1562 /* ··· 1498 1493 error = xfs_iget(mp, NULL, mp->m_sb.sb_uquotino, 1499 1494 0, 0, &uip); 1500 1495 if (error) 1501 - return XFS_ERROR(error); 1496 + return error; 1502 1497 } 1503 1498 if (XFS_IS_GQUOTA_ON(mp) && 1504 1499 mp->m_sb.sb_gquotino != NULLFSINO) { ··· 1568 1563 IRELE(gip); 1569 1564 if (pip) 1570 1565 IRELE(pip); 1571 - return XFS_ERROR(error); 1566 + return error; 1572 1567 } 1573 1568 1574 1569 STATIC void ··· 1684 1679 XFS_QMOPT_DOWARN, 1685 1680 &uq); 1686 1681 if (error) { 1687 - ASSERT(error != ENOENT); 1682 + ASSERT(error != -ENOENT); 1688 1683 return error; 1689 1684 } 1690 1685 /* ··· 1711 1706 XFS_QMOPT_DOWARN, 1712 1707 &gq); 1713 1708 if (error) { 1714 - ASSERT(error != ENOENT); 1709 + ASSERT(error != -ENOENT); 1715 1710 goto error_rele; 1716 1711 } 1717 1712 xfs_dqunlock(gq); ··· 1731 1726 XFS_QMOPT_DOWARN, 1732 1727 &pq); 1733 1728 if (error) { 1734 - ASSERT(error != ENOENT); 1729 + ASSERT(error != -ENOENT); 1735 1730 goto error_rele; 1736 1731 } 1737 1732 xfs_dqunlock(pq); ··· 1900 1895 -((xfs_qcnt_t)delblks), 0, blkflags); 1901 1896 } 1902 1897 1903 - return (0); 1898 + return 0; 1904 1899 } 1905 1900 1906 1901 int
-1
fs/xfs/xfs_qm.h
··· 157 157 #define XFS_QM_RTBWARNLIMIT 5 158 158 159 159 extern void xfs_qm_destroy_quotainfo(struct xfs_mount *); 160 - extern int xfs_qm_quotacheck(struct xfs_mount *); 161 160 extern int xfs_qm_write_sb_changes(struct xfs_mount *, __int64_t); 162 161 163 162 /* dquot stuff */
+1 -1
fs/xfs/xfs_qm_bhv.c
··· 117 117 (uquotaondisk ? " usrquota" : ""), 118 118 (gquotaondisk ? " grpquota" : ""), 119 119 (pquotaondisk ? " prjquota" : "")); 120 - return XFS_ERROR(EPERM); 120 + return -EPERM; 121 121 } 122 122 123 123 if (XFS_IS_QUOTA_ON(mp) || quotaondisk) {
+23 -23
fs/xfs/xfs_qm_syscalls.c
··· 64 64 /* 65 65 * No file system can have quotas enabled on disk but not in core. 66 66 * Note that quota utilities (like quotaoff) _expect_ 67 - * errno == EEXIST here. 67 + * errno == -EEXIST here. 68 68 */ 69 69 if ((mp->m_qflags & flags) == 0) 70 - return XFS_ERROR(EEXIST); 70 + return -EEXIST; 71 71 error = 0; 72 72 73 73 flags &= (XFS_ALL_QUOTA_ACCT | XFS_ALL_QUOTA_ENFD); ··· 94 94 95 95 /* XXX what to do if error ? Revert back to old vals incore ? */ 96 96 error = xfs_qm_write_sb_changes(mp, XFS_SB_QFLAGS); 97 - return (error); 97 + return error; 98 98 } 99 99 100 100 dqtype = 0; ··· 198 198 if (mp->m_qflags == 0) { 199 199 mutex_unlock(&q->qi_quotaofflock); 200 200 xfs_qm_destroy_quotainfo(mp); 201 - return (0); 201 + return 0; 202 202 } 203 203 204 204 /* ··· 278 278 xfs_mount_t *mp, 279 279 uint flags) 280 280 { 281 - int error = EINVAL; 281 + int error = -EINVAL; 282 282 283 283 if (!xfs_sb_version_hasquota(&mp->m_sb) || flags == 0 || 284 284 (flags & ~XFS_DQ_ALLTYPES)) { 285 285 xfs_debug(mp, "%s: flags=%x m_qflags=%x", 286 286 __func__, flags, mp->m_qflags); 287 - return XFS_ERROR(EINVAL); 287 + return -EINVAL; 288 288 } 289 289 290 290 if (flags & XFS_DQ_USER) { ··· 328 328 if (flags == 0) { 329 329 xfs_debug(mp, "%s: zero flags, m_qflags=%x", 330 330 __func__, mp->m_qflags); 331 - return XFS_ERROR(EINVAL); 331 + return -EINVAL; 332 332 } 333 333 334 334 /* No fs can turn on quotas with a delayed effect */ ··· 351 351 xfs_debug(mp, 352 352 "%s: Can't enforce without acct, flags=%x sbflags=%x", 353 353 __func__, flags, mp->m_sb.sb_qflags); 354 - return XFS_ERROR(EINVAL); 354 + return -EINVAL; 355 355 } 356 356 /* 357 357 * If everything's up to-date incore, then don't waste time. 358 358 */ 359 359 if ((mp->m_qflags & flags) == flags) 360 - return XFS_ERROR(EEXIST); 360 + return -EEXIST; 361 361 362 362 /* 363 363 * Change sb_qflags on disk but not incore mp->qflags ··· 372 372 * There's nothing to change if it's the same. 373 373 */ 374 374 if ((qf & flags) == flags && sbflags == 0) 375 - return XFS_ERROR(EEXIST); 375 + return -EEXIST; 376 376 sbflags |= XFS_SB_QFLAGS; 377 377 378 378 if ((error = xfs_qm_write_sb_changes(mp, sbflags))) 379 - return (error); 379 + return error; 380 380 /* 381 381 * If we aren't trying to switch on quota enforcement, we are done. 382 382 */ ··· 387 387 ((mp->m_sb.sb_qflags & XFS_GQUOTA_ACCT) != 388 388 (mp->m_qflags & XFS_GQUOTA_ACCT)) || 389 389 (flags & XFS_ALL_QUOTA_ENFD) == 0) 390 - return (0); 390 + return 0; 391 391 392 392 if (! XFS_IS_QUOTA_RUNNING(mp)) 393 - return XFS_ERROR(ESRCH); 393 + return -ESRCH; 394 394 395 395 /* 396 396 * Switch on quota enforcement in core. ··· 399 399 mp->m_qflags |= (flags & XFS_ALL_QUOTA_ENFD); 400 400 mutex_unlock(&mp->m_quotainfo->qi_quotaofflock); 401 401 402 - return (0); 402 + return 0; 403 403 } 404 404 405 405 ··· 426 426 if (!xfs_sb_version_hasquota(&mp->m_sb)) { 427 427 out->qs_uquota.qfs_ino = NULLFSINO; 428 428 out->qs_gquota.qfs_ino = NULLFSINO; 429 - return (0); 429 + return 0; 430 430 } 431 431 432 432 out->qs_flags = (__uint16_t) xfs_qm_export_flags(mp->m_qflags & ··· 514 514 out->qs_uquota.qfs_ino = NULLFSINO; 515 515 out->qs_gquota.qfs_ino = NULLFSINO; 516 516 out->qs_pquota.qfs_ino = NULLFSINO; 517 - return (0); 517 + return 0; 518 518 } 519 519 520 520 out->qs_flags = (__uint16_t) xfs_qm_export_flags(mp->m_qflags & ··· 595 595 xfs_qcnt_t hard, soft; 596 596 597 597 if (newlim->d_fieldmask & ~XFS_DQ_MASK) 598 - return EINVAL; 598 + return -EINVAL; 599 599 if ((newlim->d_fieldmask & XFS_DQ_MASK) == 0) 600 600 return 0; 601 601 ··· 615 615 */ 616 616 error = xfs_qm_dqget(mp, NULL, id, type, XFS_QMOPT_DQALLOC, &dqp); 617 617 if (error) { 618 - ASSERT(error != ENOENT); 618 + ASSERT(error != -ENOENT); 619 619 goto out_unlock; 620 620 } 621 621 xfs_dqunlock(dqp); ··· 758 758 error = xfs_trans_reserve(tp, &M_RES(mp)->tr_qm_equotaoff, 0, 0); 759 759 if (error) { 760 760 xfs_trans_cancel(tp, 0); 761 - return (error); 761 + return error; 762 762 } 763 763 764 764 qoffi = xfs_trans_get_qoff_item(tp, startqoff, ··· 772 772 */ 773 773 xfs_trans_set_sync(tp); 774 774 error = xfs_trans_commit(tp, 0); 775 - return (error); 775 + return error; 776 776 } 777 777 778 778 ··· 822 822 spin_unlock(&mp->m_sb_lock); 823 823 } 824 824 *qoffstartp = qoffi; 825 - return (error); 825 + return error; 826 826 } 827 827 828 828 ··· 850 850 * our utility programs are concerned. 851 851 */ 852 852 if (XFS_IS_DQUOT_UNINITIALIZED(dqp)) { 853 - error = XFS_ERROR(ENOENT); 853 + error = -ENOENT; 854 854 goto out_put; 855 855 } 856 856 ··· 953 953 uflags |= FS_QUOTA_GDQ_ENFD; 954 954 if (flags & XFS_PQUOTA_ENFD) 955 955 uflags |= FS_QUOTA_PDQ_ENFD; 956 - return (uflags); 956 + return uflags; 957 957 } 958 958 959 959
-2
fs/xfs/xfs_quota_defs.h fs/xfs/libxfs/xfs_quota_defs.h
··· 98 98 #define XFS_IS_QUOTA_ON(mp) ((mp)->m_qflags & (XFS_UQUOTA_ACTIVE | \ 99 99 XFS_GQUOTA_ACTIVE | \ 100 100 XFS_PQUOTA_ACTIVE)) 101 - #define XFS_IS_OQUOTA_ON(mp) ((mp)->m_qflags & (XFS_GQUOTA_ACTIVE | \ 102 - XFS_PQUOTA_ACTIVE)) 103 101 #define XFS_IS_UQUOTA_ON(mp) ((mp)->m_qflags & XFS_UQUOTA_ACTIVE) 104 102 #define XFS_IS_GQUOTA_ON(mp) ((mp)->m_qflags & XFS_GQUOTA_ACTIVE) 105 103 #define XFS_IS_PQUOTA_ON(mp) ((mp)->m_qflags & XFS_PQUOTA_ACTIVE)
+10 -10
fs/xfs/xfs_quotaops.c
··· 51 51 52 52 if (!XFS_IS_QUOTA_RUNNING(mp)) 53 53 return -ENOSYS; 54 - return -xfs_qm_scall_getqstat(mp, fqs); 54 + return xfs_qm_scall_getqstat(mp, fqs); 55 55 } 56 56 57 57 STATIC int ··· 63 63 64 64 if (!XFS_IS_QUOTA_RUNNING(mp)) 65 65 return -ENOSYS; 66 - return -xfs_qm_scall_getqstatv(mp, fqs); 66 + return xfs_qm_scall_getqstatv(mp, fqs); 67 67 } 68 68 69 69 STATIC int ··· 95 95 96 96 switch (op) { 97 97 case Q_XQUOTAON: 98 - return -xfs_qm_scall_quotaon(mp, flags); 98 + return xfs_qm_scall_quotaon(mp, flags); 99 99 case Q_XQUOTAOFF: 100 100 if (!XFS_IS_QUOTA_ON(mp)) 101 101 return -EINVAL; 102 - return -xfs_qm_scall_quotaoff(mp, flags); 102 + return xfs_qm_scall_quotaoff(mp, flags); 103 103 } 104 104 105 105 return -EINVAL; ··· 112 112 { 113 113 struct xfs_mount *mp = XFS_M(sb); 114 114 unsigned int flags = 0; 115 - 115 + 116 116 if (sb->s_flags & MS_RDONLY) 117 117 return -EROFS; 118 118 ··· 123 123 flags |= XFS_DQ_USER; 124 124 if (uflags & FS_GROUP_QUOTA) 125 125 flags |= XFS_DQ_GROUP; 126 - if (uflags & FS_USER_QUOTA) 126 + if (uflags & FS_PROJ_QUOTA) 127 127 flags |= XFS_DQ_PROJ; 128 128 129 - return -xfs_qm_scall_trunc_qfiles(mp, flags); 130 - } 129 + return xfs_qm_scall_trunc_qfiles(mp, flags); 130 + } 131 131 132 132 STATIC int 133 133 xfs_fs_get_dqblk( ··· 142 142 if (!XFS_IS_QUOTA_ON(mp)) 143 143 return -ESRCH; 144 144 145 - return -xfs_qm_scall_getquota(mp, from_kqid(&init_user_ns, qid), 145 + return xfs_qm_scall_getquota(mp, from_kqid(&init_user_ns, qid), 146 146 xfs_quota_type(qid.type), fdq); 147 147 } 148 148 ··· 161 161 if (!XFS_IS_QUOTA_ON(mp)) 162 162 return -ESRCH; 163 163 164 - return -xfs_qm_scall_setqlim(mp, from_kqid(&init_user_ns, qid), 164 + return xfs_qm_scall_setqlim(mp, from_kqid(&init_user_ns, qid), 165 165 xfs_quota_type(qid.type), fdq); 166 166 } 167 167
+12 -12
fs/xfs/xfs_rtalloc.c
··· 863 863 XFS_BMAPI_METADATA, &firstblock, 864 864 resblks, &map, &nmap, &flist); 865 865 if (!error && nmap < 1) 866 - error = XFS_ERROR(ENOSPC); 866 + error = -ENOSPC; 867 867 if (error) 868 868 goto error_cancel; 869 869 /* ··· 903 903 bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, d, 904 904 mp->m_bsize, 0); 905 905 if (bp == NULL) { 906 - error = XFS_ERROR(EIO); 906 + error = -EIO; 907 907 error_cancel: 908 908 xfs_trans_cancel(tp, cancelflags); 909 909 goto error; ··· 944 944 xfs_buf_t *bp; /* temporary buffer */ 945 945 int error; /* error return value */ 946 946 xfs_mount_t *nmp; /* new (fake) mount structure */ 947 - xfs_drfsbno_t nrblocks; /* new number of realtime blocks */ 947 + xfs_rfsblock_t nrblocks; /* new number of realtime blocks */ 948 948 xfs_extlen_t nrbmblocks; /* new number of rt bitmap blocks */ 949 - xfs_drtbno_t nrextents; /* new number of realtime extents */ 949 + xfs_rtblock_t nrextents; /* new number of realtime extents */ 950 950 uint8_t nrextslog; /* new log2 of sb_rextents */ 951 951 xfs_extlen_t nrsumblocks; /* new number of summary blocks */ 952 952 uint nrsumlevels; /* new rt summary levels */ ··· 962 962 * Initial error checking. 963 963 */ 964 964 if (!capable(CAP_SYS_ADMIN)) 965 - return XFS_ERROR(EPERM); 965 + return -EPERM; 966 966 if (mp->m_rtdev_targp == NULL || mp->m_rbmip == NULL || 967 967 (nrblocks = in->newblocks) <= sbp->sb_rblocks || 968 968 (sbp->sb_rblocks && (in->extsize != sbp->sb_rextsize))) 969 - return XFS_ERROR(EINVAL); 969 + return -EINVAL; 970 970 if ((error = xfs_sb_validate_fsb_count(sbp, nrblocks))) 971 971 return error; 972 972 /* ··· 976 976 XFS_FSB_TO_BB(mp, nrblocks - 1), 977 977 XFS_FSB_TO_BB(mp, 1), 0, NULL); 978 978 if (!bp) 979 - return EIO; 979 + return -EIO; 980 980 if (bp->b_error) { 981 981 error = bp->b_error; 982 982 xfs_buf_relse(bp); ··· 1001 1001 * since we'll log basically the whole summary file at once. 1002 1002 */ 1003 1003 if (nrsumblocks > (mp->m_sb.sb_logblocks >> 1)) 1004 - return XFS_ERROR(EINVAL); 1004 + return -EINVAL; 1005 1005 /* 1006 1006 * Get the old block counts for bitmap and summary inodes. 1007 1007 * These can't change since other growfs callers are locked out. ··· 1208 1208 len, &sumbp, &sb, prod, &r); 1209 1209 break; 1210 1210 default: 1211 - error = EIO; 1211 + error = -EIO; 1212 1212 ASSERT(0); 1213 1213 } 1214 1214 if (error) ··· 1247 1247 if (mp->m_rtdev_targp == NULL) { 1248 1248 xfs_warn(mp, 1249 1249 "Filesystem has a realtime volume, use rtdev=device option"); 1250 - return XFS_ERROR(ENODEV); 1250 + return -ENODEV; 1251 1251 } 1252 1252 mp->m_rsumlevels = sbp->sb_rextslog + 1; 1253 1253 mp->m_rsumsize = ··· 1263 1263 xfs_warn(mp, "realtime mount -- %llu != %llu", 1264 1264 (unsigned long long) XFS_BB_TO_FSB(mp, d), 1265 1265 (unsigned long long) mp->m_sb.sb_rblocks); 1266 - return XFS_ERROR(EFBIG); 1266 + return -EFBIG; 1267 1267 } 1268 1268 bp = xfs_buf_read_uncached(mp->m_rtdev_targp, 1269 1269 d - XFS_FSB_TO_BB(mp, 1), ··· 1272 1272 xfs_warn(mp, "realtime device size check failed"); 1273 1273 if (bp) 1274 1274 xfs_buf_relse(bp); 1275 - return EIO; 1275 + return -EIO; 1276 1276 } 1277 1277 xfs_buf_relse(bp); 1278 1278 return 0;
+1 -1
fs/xfs/xfs_rtalloc.h
··· 132 132 return 0; 133 133 134 134 xfs_warn(mp, "Not built with CONFIG_XFS_RT"); 135 - return ENOSYS; 135 + return -ENOSYS; 136 136 } 137 137 # define xfs_rtmount_inodes(m) (((mp)->m_sb.sb_rblocks == 0)? 0 : (ENOSYS)) 138 138 # define xfs_rtunmount_inodes(m)
fs/xfs/xfs_rtbitmap.c fs/xfs/libxfs/xfs_rtbitmap.c
+36 -20
fs/xfs/xfs_sb.c fs/xfs/libxfs/xfs_sb.c
··· 186 186 */ 187 187 if (sbp->sb_magicnum != XFS_SB_MAGIC) { 188 188 xfs_warn(mp, "bad magic number"); 189 - return XFS_ERROR(EWRONGFS); 189 + return -EWRONGFS; 190 190 } 191 191 192 192 193 193 if (!xfs_sb_good_version(sbp)) { 194 194 xfs_warn(mp, "bad version"); 195 - return XFS_ERROR(EWRONGFS); 195 + return -EWRONGFS; 196 196 } 197 197 198 198 /* ··· 220 220 xfs_warn(mp, 221 221 "Attempted to mount read-only compatible filesystem read-write.\n" 222 222 "Filesystem can only be safely mounted read only."); 223 - return XFS_ERROR(EINVAL); 223 + return -EINVAL; 224 224 } 225 225 } 226 226 if (xfs_sb_has_incompat_feature(sbp, ··· 230 230 "Filesystem can not be safely mounted by this kernel.", 231 231 (sbp->sb_features_incompat & 232 232 XFS_SB_FEAT_INCOMPAT_UNKNOWN)); 233 - return XFS_ERROR(EINVAL); 233 + return -EINVAL; 234 234 } 235 235 } 236 236 ··· 238 238 if (sbp->sb_qflags & (XFS_OQUOTA_ENFD | XFS_OQUOTA_CHKD)) { 239 239 xfs_notice(mp, 240 240 "Version 5 of Super block has XFS_OQUOTA bits."); 241 - return XFS_ERROR(EFSCORRUPTED); 241 + return -EFSCORRUPTED; 242 242 } 243 243 } else if (sbp->sb_qflags & (XFS_PQUOTA_ENFD | XFS_GQUOTA_ENFD | 244 244 XFS_PQUOTA_CHKD | XFS_GQUOTA_CHKD)) { 245 245 xfs_notice(mp, 246 246 "Superblock earlier than Version 5 has XFS_[PQ]UOTA_{ENFD|CHKD} bits."); 247 - return XFS_ERROR(EFSCORRUPTED); 247 + return -EFSCORRUPTED; 248 248 } 249 249 250 250 if (unlikely( ··· 252 252 xfs_warn(mp, 253 253 "filesystem is marked as having an external log; " 254 254 "specify logdev on the mount command line."); 255 - return XFS_ERROR(EINVAL); 255 + return -EINVAL; 256 256 } 257 257 258 258 if (unlikely( ··· 260 260 xfs_warn(mp, 261 261 "filesystem is marked as having an internal log; " 262 262 "do not specify logdev on the mount command line."); 263 - return XFS_ERROR(EINVAL); 263 + return -EINVAL; 264 264 } 265 265 266 266 /* ··· 294 294 sbp->sb_dblocks < XFS_MIN_DBLOCKS(sbp) || 295 295 sbp->sb_shared_vn != 0)) { 296 296 xfs_notice(mp, "SB sanity check failed"); 297 - return XFS_ERROR(EFSCORRUPTED); 297 + return -EFSCORRUPTED; 298 298 } 299 299 300 300 /* ··· 305 305 "File system with blocksize %d bytes. " 306 306 "Only pagesize (%ld) or less will currently work.", 307 307 sbp->sb_blocksize, PAGE_SIZE); 308 - return XFS_ERROR(ENOSYS); 308 + return -ENOSYS; 309 309 } 310 310 311 311 /* ··· 320 320 default: 321 321 xfs_warn(mp, "inode size of %d bytes not supported", 322 322 sbp->sb_inodesize); 323 - return XFS_ERROR(ENOSYS); 323 + return -ENOSYS; 324 324 } 325 325 326 326 if (xfs_sb_validate_fsb_count(sbp, sbp->sb_dblocks) || 327 327 xfs_sb_validate_fsb_count(sbp, sbp->sb_rblocks)) { 328 328 xfs_warn(mp, 329 329 "file system too large to be mounted on this system."); 330 - return XFS_ERROR(EFBIG); 330 + return -EFBIG; 331 331 } 332 332 333 333 if (check_inprogress && sbp->sb_inprogress) { 334 334 xfs_warn(mp, "Offline file system operation in progress!"); 335 - return XFS_ERROR(EFSCORRUPTED); 335 + return -EFSCORRUPTED; 336 336 } 337 337 return 0; 338 338 } ··· 386 386 } 387 387 } 388 388 389 - void 390 - xfs_sb_from_disk( 389 + static void 390 + __xfs_sb_from_disk( 391 391 struct xfs_sb *to, 392 - xfs_dsb_t *from) 392 + xfs_dsb_t *from, 393 + bool convert_xquota) 393 394 { 394 395 to->sb_magicnum = be32_to_cpu(from->sb_magicnum); 395 396 to->sb_blocksize = be32_to_cpu(from->sb_blocksize); ··· 446 445 to->sb_pad = 0; 447 446 to->sb_pquotino = be64_to_cpu(from->sb_pquotino); 448 447 to->sb_lsn = be64_to_cpu(from->sb_lsn); 448 + /* Convert on-disk flags to in-memory flags? */ 449 + if (convert_xquota) 450 + xfs_sb_quota_from_disk(to); 451 + } 452 + 453 + void 454 + xfs_sb_from_disk( 455 + struct xfs_sb *to, 456 + xfs_dsb_t *from) 457 + { 458 + __xfs_sb_from_disk(to, from, true); 449 459 } 450 460 451 461 static inline void ··· 589 577 struct xfs_mount *mp = bp->b_target->bt_mount; 590 578 struct xfs_sb sb; 591 579 592 - xfs_sb_from_disk(&sb, XFS_BUF_TO_SBP(bp)); 580 + /* 581 + * Use call variant which doesn't convert quota flags from disk 582 + * format, because xfs_mount_validate_sb checks the on-disk flags. 583 + */ 584 + __xfs_sb_from_disk(&sb, XFS_BUF_TO_SBP(bp), false); 593 585 594 586 /* 595 587 * Only check the in progress field for the primary superblock as ··· 636 620 /* Only fail bad secondaries on a known V5 filesystem */ 637 621 if (bp->b_bn == XFS_SB_DADDR || 638 622 xfs_sb_version_hascrc(&mp->m_sb)) { 639 - error = EFSBADCRC; 623 + error = -EFSBADCRC; 640 624 goto out_error; 641 625 } 642 626 } ··· 646 630 out_error: 647 631 if (error) { 648 632 xfs_buf_ioerror(bp, error); 649 - if (error == EFSCORRUPTED || error == EFSBADCRC) 633 + if (error == -EFSCORRUPTED || error == -EFSBADCRC) 650 634 xfs_verifier_error(bp); 651 635 } 652 636 } ··· 669 653 return; 670 654 } 671 655 /* quietly fail */ 672 - xfs_buf_ioerror(bp, EWRONGFS); 656 + xfs_buf_ioerror(bp, -EWRONGFS); 673 657 } 674 658 675 659 static void
+4 -4
fs/xfs/xfs_sb.h fs/xfs/libxfs/xfs_sb.h
··· 87 87 typedef struct xfs_sb { 88 88 __uint32_t sb_magicnum; /* magic number == XFS_SB_MAGIC */ 89 89 __uint32_t sb_blocksize; /* logical block size, bytes */ 90 - xfs_drfsbno_t sb_dblocks; /* number of data blocks */ 91 - xfs_drfsbno_t sb_rblocks; /* number of realtime blocks */ 92 - xfs_drtbno_t sb_rextents; /* number of realtime extents */ 90 + xfs_rfsblock_t sb_dblocks; /* number of data blocks */ 91 + xfs_rfsblock_t sb_rblocks; /* number of realtime blocks */ 92 + xfs_rtblock_t sb_rextents; /* number of realtime extents */ 93 93 uuid_t sb_uuid; /* file system unique id */ 94 - xfs_dfsbno_t sb_logstart; /* starting block of log if internal */ 94 + xfs_fsblock_t sb_logstart; /* starting block of log if internal */ 95 95 xfs_ino_t sb_rootino; /* root inode number */ 96 96 xfs_ino_t sb_rbmino; /* bitmap inode for realtime extents */ 97 97 xfs_ino_t sb_rsumino; /* summary inode for rt bitmap */
fs/xfs/xfs_shared.h fs/xfs/libxfs/xfs_shared.h
+71 -61
fs/xfs/xfs_super.c
··· 61 61 static const struct super_operations xfs_super_operations; 62 62 static kmem_zone_t *xfs_ioend_zone; 63 63 mempool_t *xfs_ioend_pool; 64 + struct kset *xfs_kset; 64 65 65 66 #define MNTOPT_LOGBUFS "logbufs" /* number of XFS log buffers */ 66 67 #define MNTOPT_LOGBSIZE "logbsize" /* size of XFS log buffers */ ··· 186 185 */ 187 186 mp->m_fsname = kstrndup(sb->s_id, MAXNAMELEN, GFP_KERNEL); 188 187 if (!mp->m_fsname) 189 - return ENOMEM; 188 + return -ENOMEM; 190 189 mp->m_fsname_len = strlen(mp->m_fsname) + 1; 191 190 192 191 /* ··· 205 204 */ 206 205 mp->m_flags |= XFS_MOUNT_BARRIER; 207 206 mp->m_flags |= XFS_MOUNT_COMPAT_IOSIZE; 208 - #if !XFS_BIG_INUMS 209 - mp->m_flags |= XFS_MOUNT_SMALL_INUMS; 210 - #endif 211 207 212 208 /* 213 209 * These can be overridden by the mount option parsing. ··· 225 227 if (!value || !*value) { 226 228 xfs_warn(mp, "%s option requires an argument", 227 229 this_char); 228 - return EINVAL; 230 + return -EINVAL; 229 231 } 230 232 if (kstrtoint(value, 10, &mp->m_logbufs)) 231 - return EINVAL; 233 + return -EINVAL; 232 234 } else if (!strcmp(this_char, MNTOPT_LOGBSIZE)) { 233 235 if (!value || !*value) { 234 236 xfs_warn(mp, "%s option requires an argument", 235 237 this_char); 236 - return EINVAL; 238 + return -EINVAL; 237 239 } 238 240 if (suffix_kstrtoint(value, 10, &mp->m_logbsize)) 239 - return EINVAL; 241 + return -EINVAL; 240 242 } else if (!strcmp(this_char, MNTOPT_LOGDEV)) { 241 243 if (!value || !*value) { 242 244 xfs_warn(mp, "%s option requires an argument", 243 245 this_char); 244 - return EINVAL; 246 + return -EINVAL; 245 247 } 246 248 mp->m_logname = kstrndup(value, MAXNAMELEN, GFP_KERNEL); 247 249 if (!mp->m_logname) 248 - return ENOMEM; 250 + return -ENOMEM; 249 251 } else if (!strcmp(this_char, MNTOPT_MTPT)) { 250 252 xfs_warn(mp, "%s option not allowed on this system", 251 253 this_char); 252 - return EINVAL; 254 + return -EINVAL; 253 255 } else if (!strcmp(this_char, MNTOPT_RTDEV)) { 254 256 if (!value || !*value) { 255 257 xfs_warn(mp, "%s option requires an argument", 256 258 this_char); 257 - return EINVAL; 259 + return -EINVAL; 258 260 } 259 261 mp->m_rtname = kstrndup(value, MAXNAMELEN, GFP_KERNEL); 260 262 if (!mp->m_rtname) 261 - return ENOMEM; 263 + return -ENOMEM; 262 264 } else if (!strcmp(this_char, MNTOPT_BIOSIZE)) { 263 265 if (!value || !*value) { 264 266 xfs_warn(mp, "%s option requires an argument", 265 267 this_char); 266 - return EINVAL; 268 + return -EINVAL; 267 269 } 268 270 if (kstrtoint(value, 10, &iosize)) 269 - return EINVAL; 271 + return -EINVAL; 270 272 iosizelog = ffs(iosize) - 1; 271 273 } else if (!strcmp(this_char, MNTOPT_ALLOCSIZE)) { 272 274 if (!value || !*value) { 273 275 xfs_warn(mp, "%s option requires an argument", 274 276 this_char); 275 - return EINVAL; 277 + return -EINVAL; 276 278 } 277 279 if (suffix_kstrtoint(value, 10, &iosize)) 278 - return EINVAL; 280 + return -EINVAL; 279 281 iosizelog = ffs(iosize) - 1; 280 282 } else if (!strcmp(this_char, MNTOPT_GRPID) || 281 283 !strcmp(this_char, MNTOPT_BSDGROUPS)) { ··· 295 297 if (!value || !*value) { 296 298 xfs_warn(mp, "%s option requires an argument", 297 299 this_char); 298 - return EINVAL; 300 + return -EINVAL; 299 301 } 300 302 if (kstrtoint(value, 10, &dsunit)) 301 - return EINVAL; 303 + return -EINVAL; 302 304 } else if (!strcmp(this_char, MNTOPT_SWIDTH)) { 303 305 if (!value || !*value) { 304 306 xfs_warn(mp, "%s option requires an argument", 305 307 this_char); 306 - return EINVAL; 308 + return -EINVAL; 307 309 } 308 310 if (kstrtoint(value, 10, &dswidth)) 309 - return EINVAL; 311 + return -EINVAL; 310 312 } else if (!strcmp(this_char, MNTOPT_32BITINODE)) { 311 313 mp->m_flags |= XFS_MOUNT_SMALL_INUMS; 312 314 } else if (!strcmp(this_char, MNTOPT_64BITINODE)) { 313 315 mp->m_flags &= ~XFS_MOUNT_SMALL_INUMS; 314 - #if !XFS_BIG_INUMS 315 - xfs_warn(mp, "%s option not allowed on this system", 316 - this_char); 317 - return EINVAL; 318 - #endif 319 316 } else if (!strcmp(this_char, MNTOPT_NOUUID)) { 320 317 mp->m_flags |= XFS_MOUNT_NOUUID; 321 318 } else if (!strcmp(this_char, MNTOPT_BARRIER)) { ··· 383 390 "irixsgid is now a sysctl(2) variable, option is deprecated."); 384 391 } else { 385 392 xfs_warn(mp, "unknown mount option [%s].", this_char); 386 - return EINVAL; 393 + return -EINVAL; 387 394 } 388 395 } 389 396 ··· 393 400 if ((mp->m_flags & XFS_MOUNT_NORECOVERY) && 394 401 !(mp->m_flags & XFS_MOUNT_RDONLY)) { 395 402 xfs_warn(mp, "no-recovery mounts must be read-only."); 396 - return EINVAL; 403 + return -EINVAL; 397 404 } 398 405 399 406 if ((mp->m_flags & XFS_MOUNT_NOALIGN) && (dsunit || dswidth)) { 400 407 xfs_warn(mp, 401 408 "sunit and swidth options incompatible with the noalign option"); 402 - return EINVAL; 409 + return -EINVAL; 403 410 } 404 411 405 412 #ifndef CONFIG_XFS_QUOTA 406 413 if (XFS_IS_QUOTA_RUNNING(mp)) { 407 414 xfs_warn(mp, "quota support not available in this kernel."); 408 - return EINVAL; 415 + return -EINVAL; 409 416 } 410 417 #endif 411 418 412 419 if ((dsunit && !dswidth) || (!dsunit && dswidth)) { 413 420 xfs_warn(mp, "sunit and swidth must be specified together"); 414 - return EINVAL; 421 + return -EINVAL; 415 422 } 416 423 417 424 if (dsunit && (dswidth % dsunit != 0)) { 418 425 xfs_warn(mp, 419 426 "stripe width (%d) must be a multiple of the stripe unit (%d)", 420 427 dswidth, dsunit); 421 - return EINVAL; 428 + return -EINVAL; 422 429 } 423 430 424 431 done: ··· 439 446 mp->m_logbufs > XLOG_MAX_ICLOGS)) { 440 447 xfs_warn(mp, "invalid logbufs value: %d [not %d-%d]", 441 448 mp->m_logbufs, XLOG_MIN_ICLOGS, XLOG_MAX_ICLOGS); 442 - return XFS_ERROR(EINVAL); 449 + return -EINVAL; 443 450 } 444 451 if (mp->m_logbsize != -1 && 445 452 mp->m_logbsize != 0 && ··· 449 456 xfs_warn(mp, 450 457 "invalid logbufsize: %d [not 16k,32k,64k,128k or 256k]", 451 458 mp->m_logbsize); 452 - return XFS_ERROR(EINVAL); 459 + return -EINVAL; 453 460 } 454 461 455 462 if (iosizelog) { ··· 458 465 xfs_warn(mp, "invalid log iosize: %d [not %d-%d]", 459 466 iosizelog, XFS_MIN_IO_LOG, 460 467 XFS_MAX_IO_LOG); 461 - return XFS_ERROR(EINVAL); 468 + return -EINVAL; 462 469 } 463 470 464 471 mp->m_flags |= XFS_MOUNT_DFLT_IOSIZE; ··· 590 597 return (((__uint64_t)pagefactor) << bitshift) - 1; 591 598 } 592 599 600 + /* 601 + * xfs_set_inode32() and xfs_set_inode64() are passed an agcount 602 + * because in the growfs case, mp->m_sb.sb_agcount is not updated 603 + * yet to the potentially higher ag count. 604 + */ 593 605 xfs_agnumber_t 594 - xfs_set_inode32(struct xfs_mount *mp) 606 + xfs_set_inode32(struct xfs_mount *mp, xfs_agnumber_t agcount) 595 607 { 596 608 xfs_agnumber_t index = 0; 597 609 xfs_agnumber_t maxagi = 0; 598 610 xfs_sb_t *sbp = &mp->m_sb; 599 611 xfs_agnumber_t max_metadata; 600 - xfs_agino_t agino = XFS_OFFBNO_TO_AGINO(mp, sbp->sb_agblocks -1, 0); 601 - xfs_ino_t ino = XFS_AGINO_TO_INO(mp, sbp->sb_agcount -1, agino); 612 + xfs_agino_t agino; 613 + xfs_ino_t ino; 602 614 xfs_perag_t *pag; 603 615 604 616 /* Calculate how much should be reserved for inodes to meet ··· 618 620 do_div(icount, sbp->sb_agblocks); 619 621 max_metadata = icount; 620 622 } else { 621 - max_metadata = sbp->sb_agcount; 623 + max_metadata = agcount; 622 624 } 623 625 624 - for (index = 0; index < sbp->sb_agcount; index++) { 626 + agino = XFS_OFFBNO_TO_AGINO(mp, sbp->sb_agblocks - 1, 0); 627 + 628 + for (index = 0; index < agcount; index++) { 625 629 ino = XFS_AGINO_TO_INO(mp, index, agino); 626 630 627 631 if (ino > XFS_MAXINUMBER_32) { ··· 648 648 } 649 649 650 650 xfs_agnumber_t 651 - xfs_set_inode64(struct xfs_mount *mp) 651 + xfs_set_inode64(struct xfs_mount *mp, xfs_agnumber_t agcount) 652 652 { 653 653 xfs_agnumber_t index = 0; 654 654 655 - for (index = 0; index < mp->m_sb.sb_agcount; index++) { 655 + for (index = 0; index < agcount; index++) { 656 656 struct xfs_perag *pag; 657 657 658 658 pag = xfs_perag_get(mp, index); ··· 686 686 xfs_warn(mp, "Invalid device [%s], error=%d\n", name, error); 687 687 } 688 688 689 - return -error; 689 + return error; 690 690 } 691 691 692 692 STATIC void ··· 756 756 if (rtdev == ddev || rtdev == logdev) { 757 757 xfs_warn(mp, 758 758 "Cannot mount filesystem with identical rtdev and ddev/logdev."); 759 - error = EINVAL; 759 + error = -EINVAL; 760 760 goto out_close_rtdev; 761 761 } 762 762 } ··· 764 764 /* 765 765 * Setup xfs_mount buffer target pointers 766 766 */ 767 - error = ENOMEM; 767 + error = -ENOMEM; 768 768 mp->m_ddev_targp = xfs_alloc_buftarg(mp, ddev); 769 769 if (!mp->m_ddev_targp) 770 770 goto out_close_rtdev; ··· 1188 1188 char *options) 1189 1189 { 1190 1190 struct xfs_mount *mp = XFS_M(sb); 1191 + xfs_sb_t *sbp = &mp->m_sb; 1191 1192 substring_t args[MAX_OPT_ARGS]; 1192 1193 char *p; 1193 1194 int error; ··· 1209 1208 mp->m_flags &= ~XFS_MOUNT_BARRIER; 1210 1209 break; 1211 1210 case Opt_inode64: 1212 - mp->m_maxagi = xfs_set_inode64(mp); 1211 + mp->m_maxagi = xfs_set_inode64(mp, sbp->sb_agcount); 1213 1212 break; 1214 1213 case Opt_inode32: 1215 - mp->m_maxagi = xfs_set_inode32(mp); 1214 + mp->m_maxagi = xfs_set_inode32(mp, sbp->sb_agcount); 1216 1215 break; 1217 1216 default: 1218 1217 /* ··· 1296 1295 1297 1296 xfs_save_resvblks(mp); 1298 1297 xfs_quiesce_attr(mp); 1299 - return -xfs_fs_log_dummy(mp); 1298 + return xfs_fs_log_dummy(mp); 1300 1299 } 1301 1300 1302 1301 STATIC int ··· 1315 1314 struct seq_file *m, 1316 1315 struct dentry *root) 1317 1316 { 1318 - return -xfs_showargs(XFS_M(root->d_sb), m); 1317 + return xfs_showargs(XFS_M(root->d_sb), m); 1319 1318 } 1320 1319 1321 1320 /* ··· 1337 1336 mp->m_logbsize < mp->m_sb.sb_logsunit) { 1338 1337 xfs_warn(mp, 1339 1338 "logbuf size must be greater than or equal to log stripe size"); 1340 - return XFS_ERROR(EINVAL); 1339 + return -EINVAL; 1341 1340 } 1342 1341 } else { 1343 1342 /* Fail a mount if the logbuf is larger than 32K */ 1344 1343 if (mp->m_logbsize > XLOG_BIG_RECORD_BSIZE) { 1345 1344 xfs_warn(mp, 1346 1345 "logbuf size for version 1 logs must be 16K or 32K"); 1347 - return XFS_ERROR(EINVAL); 1346 + return -EINVAL; 1348 1347 } 1349 1348 } 1350 1349 ··· 1356 1355 xfs_warn(mp, 1357 1356 "Cannot mount a V5 filesystem as %s. %s is always enabled for V5 filesystems.", 1358 1357 MNTOPT_NOATTR2, MNTOPT_ATTR2); 1359 - return XFS_ERROR(EINVAL); 1358 + return -EINVAL; 1360 1359 } 1361 1360 1362 1361 /* ··· 1373 1372 if ((mp->m_sb.sb_flags & XFS_SBF_READONLY) && !ronly) { 1374 1373 xfs_warn(mp, 1375 1374 "cannot mount a read-only filesystem as read-write"); 1376 - return XFS_ERROR(EROFS); 1375 + return -EROFS; 1377 1376 } 1378 1377 1379 1378 if ((mp->m_qflags & (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE)) && ··· 1381 1380 !xfs_sb_version_has_pquotino(&mp->m_sb)) { 1382 1381 xfs_warn(mp, 1383 1382 "Super block does not support project and group quota together"); 1384 - return XFS_ERROR(EINVAL); 1383 + return -EINVAL; 1385 1384 } 1386 1385 1387 1386 return 0; ··· 1395 1394 { 1396 1395 struct inode *root; 1397 1396 struct xfs_mount *mp = NULL; 1398 - int flags = 0, error = ENOMEM; 1397 + int flags = 0, error = -ENOMEM; 1399 1398 1400 1399 mp = kzalloc(sizeof(struct xfs_mount), GFP_KERNEL); 1401 1400 if (!mp) ··· 1429 1428 if (error) 1430 1429 goto out_free_fsname; 1431 1430 1432 - error = -xfs_init_mount_workqueues(mp); 1431 + error = xfs_init_mount_workqueues(mp); 1433 1432 if (error) 1434 1433 goto out_close_devices; 1435 1434 1436 - error = -xfs_icsb_init_counters(mp); 1435 + error = xfs_icsb_init_counters(mp); 1437 1436 if (error) 1438 1437 goto out_destroy_workqueues; 1439 1438 ··· 1475 1474 1476 1475 root = igrab(VFS_I(mp->m_rootip)); 1477 1476 if (!root) { 1478 - error = ENOENT; 1477 + error = -ENOENT; 1479 1478 goto out_unmount; 1480 1479 } 1481 1480 sb->s_root = d_make_root(root); 1482 1481 if (!sb->s_root) { 1483 - error = ENOMEM; 1482 + error = -ENOMEM; 1484 1483 goto out_unmount; 1485 1484 } 1486 1485 ··· 1500 1499 xfs_free_fsname(mp); 1501 1500 kfree(mp); 1502 1501 out: 1503 - return -error; 1502 + return error; 1504 1503 1505 1504 out_unmount: 1506 1505 xfs_filestream_unmount(mp); ··· 1762 1761 if (error) 1763 1762 goto out_cleanup_procfs; 1764 1763 1764 + xfs_kset = kset_create_and_add("xfs", NULL, fs_kobj); 1765 + if (!xfs_kset) { 1766 + error = -ENOMEM; 1767 + goto out_sysctl_unregister;; 1768 + } 1769 + 1765 1770 error = xfs_qm_init(); 1766 1771 if (error) 1767 - goto out_sysctl_unregister; 1772 + goto out_kset_unregister; 1768 1773 1769 1774 error = register_filesystem(&xfs_fs_type); 1770 1775 if (error) ··· 1779 1772 1780 1773 out_qm_exit: 1781 1774 xfs_qm_exit(); 1775 + out_kset_unregister: 1776 + kset_unregister(xfs_kset); 1782 1777 out_sysctl_unregister: 1783 1778 xfs_sysctl_unregister(); 1784 1779 out_cleanup_procfs: ··· 1802 1793 { 1803 1794 xfs_qm_exit(); 1804 1795 unregister_filesystem(&xfs_fs_type); 1796 + kset_unregister(xfs_kset); 1805 1797 xfs_sysctl_unregister(); 1806 1798 xfs_cleanup_procfs(); 1807 1799 xfs_buf_terminate();
+2 -13
fs/xfs/xfs_super.h
··· 44 44 # define XFS_REALTIME_STRING 45 45 #endif 46 46 47 - #if XFS_BIG_BLKNOS 48 - # if XFS_BIG_INUMS 49 - # define XFS_BIGFS_STRING "large block/inode numbers, " 50 - # else 51 - # define XFS_BIGFS_STRING "large block numbers, " 52 - # endif 53 - #else 54 - # define XFS_BIGFS_STRING 55 - #endif 56 - 57 47 #ifdef DEBUG 58 48 # define XFS_DBG_STRING "debug" 59 49 #else ··· 54 64 #define XFS_BUILD_OPTIONS XFS_ACL_STRING \ 55 65 XFS_SECURITY_STRING \ 56 66 XFS_REALTIME_STRING \ 57 - XFS_BIGFS_STRING \ 58 67 XFS_DBG_STRING /* DBG must be last */ 59 68 60 69 struct xfs_inode; ··· 65 76 66 77 extern void xfs_flush_inodes(struct xfs_mount *mp); 67 78 extern void xfs_blkdev_issue_flush(struct xfs_buftarg *); 68 - extern xfs_agnumber_t xfs_set_inode32(struct xfs_mount *); 69 - extern xfs_agnumber_t xfs_set_inode64(struct xfs_mount *); 79 + extern xfs_agnumber_t xfs_set_inode32(struct xfs_mount *, xfs_agnumber_t agcount); 80 + extern xfs_agnumber_t xfs_set_inode64(struct xfs_mount *, xfs_agnumber_t agcount); 70 81 71 82 extern const struct export_operations xfs_export_operations; 72 83 extern const struct xattr_handler *xfs_xattr_handlers[];
+15 -15
fs/xfs/xfs_symlink.c
··· 76 76 bp = xfs_buf_read(mp->m_ddev_targp, d, BTOBB(byte_cnt), 0, 77 77 &xfs_symlink_buf_ops); 78 78 if (!bp) 79 - return XFS_ERROR(ENOMEM); 79 + return -ENOMEM; 80 80 error = bp->b_error; 81 81 if (error) { 82 82 xfs_buf_ioerror_alert(bp, __func__); 83 83 xfs_buf_relse(bp); 84 84 85 85 /* bad CRC means corrupted metadata */ 86 - if (error == EFSBADCRC) 87 - error = EFSCORRUPTED; 86 + if (error == -EFSBADCRC) 87 + error = -EFSCORRUPTED; 88 88 goto out; 89 89 } 90 90 byte_cnt = XFS_SYMLINK_BUF_SPACE(mp, byte_cnt); ··· 95 95 if (xfs_sb_version_hascrc(&mp->m_sb)) { 96 96 if (!xfs_symlink_hdr_ok(ip->i_ino, offset, 97 97 byte_cnt, bp)) { 98 - error = EFSCORRUPTED; 98 + error = -EFSCORRUPTED; 99 99 xfs_alert(mp, 100 100 "symlink header does not match required off/len/owner (0x%x/Ox%x,0x%llx)", 101 101 offset, byte_cnt, ip->i_ino); ··· 135 135 trace_xfs_readlink(ip); 136 136 137 137 if (XFS_FORCED_SHUTDOWN(mp)) 138 - return XFS_ERROR(EIO); 138 + return -EIO; 139 139 140 140 xfs_ilock(ip, XFS_ILOCK_SHARED); 141 141 ··· 148 148 __func__, (unsigned long long) ip->i_ino, 149 149 (long long) pathlen); 150 150 ASSERT(0); 151 - error = XFS_ERROR(EFSCORRUPTED); 151 + error = -EFSCORRUPTED; 152 152 goto out; 153 153 } 154 154 ··· 203 203 trace_xfs_symlink(dp, link_name); 204 204 205 205 if (XFS_FORCED_SHUTDOWN(mp)) 206 - return XFS_ERROR(EIO); 206 + return -EIO; 207 207 208 208 /* 209 209 * Check component lengths of the target path name. 210 210 */ 211 211 pathlen = strlen(target_path); 212 212 if (pathlen >= MAXPATHLEN) /* total string too long */ 213 - return XFS_ERROR(ENAMETOOLONG); 213 + return -ENAMETOOLONG; 214 214 215 215 udqp = gdqp = NULL; 216 216 prid = xfs_get_initial_prid(dp); ··· 238 238 fs_blocks = xfs_symlink_blocks(mp, pathlen); 239 239 resblks = XFS_SYMLINK_SPACE_RES(mp, link_name->len, fs_blocks); 240 240 error = xfs_trans_reserve(tp, &M_RES(mp)->tr_symlink, resblks, 0); 241 - if (error == ENOSPC && fs_blocks == 0) { 241 + if (error == -ENOSPC && fs_blocks == 0) { 242 242 resblks = 0; 243 243 error = xfs_trans_reserve(tp, &M_RES(mp)->tr_symlink, 0, 0); 244 244 } ··· 254 254 * Check whether the directory allows new symlinks or not. 255 255 */ 256 256 if (dp->i_d.di_flags & XFS_DIFLAG_NOSYMLINKS) { 257 - error = XFS_ERROR(EPERM); 257 + error = -EPERM; 258 258 goto error_return; 259 259 } 260 260 ··· 284 284 error = xfs_dir_ialloc(&tp, dp, S_IFLNK | (mode & ~S_IFMT), 1, 0, 285 285 prid, resblks > 0, &ip, NULL); 286 286 if (error) { 287 - if (error == ENOSPC) 287 + if (error == -ENOSPC) 288 288 goto error_return; 289 289 goto error1; 290 290 } ··· 348 348 bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, d, 349 349 BTOBB(byte_cnt), 0); 350 350 if (!bp) { 351 - error = ENOMEM; 351 + error = -ENOMEM; 352 352 goto error2; 353 353 } 354 354 bp->b_ops = &xfs_symlink_buf_ops; ··· 489 489 XFS_FSB_TO_DADDR(mp, mval[i].br_startblock), 490 490 XFS_FSB_TO_BB(mp, mval[i].br_blockcount), 0); 491 491 if (!bp) { 492 - error = ENOMEM; 492 + error = -ENOMEM; 493 493 goto error_bmap_cancel; 494 494 } 495 495 xfs_trans_binval(tp, bp); ··· 562 562 trace_xfs_inactive_symlink(ip); 563 563 564 564 if (XFS_FORCED_SHUTDOWN(mp)) 565 - return XFS_ERROR(EIO); 565 + return -EIO; 566 566 567 567 xfs_ilock(ip, XFS_ILOCK_EXCL); 568 568 ··· 580 580 __func__, (unsigned long long)ip->i_ino, pathlen); 581 581 xfs_iunlock(ip, XFS_ILOCK_EXCL); 582 582 ASSERT(0); 583 - return XFS_ERROR(EFSCORRUPTED); 583 + return -EFSCORRUPTED; 584 584 } 585 585 586 586 if (ip->i_df.if_flags & XFS_IFINLINE) {
+165
fs/xfs/xfs_sysfs.c
··· 1 + /* 2 + * Copyright (c) 2014 Red Hat, Inc. 3 + * All Rights Reserved. 4 + * 5 + * This program is free software; you can redistribute it and/or 6 + * modify it under the terms of the GNU General Public License as 7 + * published by the Free Software Foundation. 8 + * 9 + * This program is distributed in the hope that it would be useful, 10 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 + * GNU General Public License for more details. 13 + * 14 + * You should have received a copy of the GNU General Public License 15 + * along with this program; if not, write the Free Software Foundation, 16 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 17 + */ 18 + 19 + #include "xfs.h" 20 + #include "xfs_sysfs.h" 21 + #include "xfs_log_format.h" 22 + #include "xfs_log.h" 23 + #include "xfs_log_priv.h" 24 + 25 + struct xfs_sysfs_attr { 26 + struct attribute attr; 27 + ssize_t (*show)(char *buf, void *data); 28 + ssize_t (*store)(const char *buf, size_t count, void *data); 29 + }; 30 + 31 + static inline struct xfs_sysfs_attr * 32 + to_attr(struct attribute *attr) 33 + { 34 + return container_of(attr, struct xfs_sysfs_attr, attr); 35 + } 36 + 37 + #define XFS_SYSFS_ATTR_RW(name) \ 38 + static struct xfs_sysfs_attr xfs_sysfs_attr_##name = __ATTR_RW(name) 39 + #define XFS_SYSFS_ATTR_RO(name) \ 40 + static struct xfs_sysfs_attr xfs_sysfs_attr_##name = __ATTR_RO(name) 41 + 42 + #define ATTR_LIST(name) &xfs_sysfs_attr_##name.attr 43 + 44 + /* 45 + * xfs_mount kobject. This currently has no attributes and thus no need for show 46 + * and store helpers. The mp kobject serves as the per-mount parent object that 47 + * is identified by the fsname under sysfs. 48 + */ 49 + 50 + struct kobj_type xfs_mp_ktype = { 51 + .release = xfs_sysfs_release, 52 + }; 53 + 54 + /* xlog */ 55 + 56 + STATIC ssize_t 57 + log_head_lsn_show( 58 + char *buf, 59 + void *data) 60 + { 61 + struct xlog *log = data; 62 + int cycle; 63 + int block; 64 + 65 + spin_lock(&log->l_icloglock); 66 + cycle = log->l_curr_cycle; 67 + block = log->l_curr_block; 68 + spin_unlock(&log->l_icloglock); 69 + 70 + return snprintf(buf, PAGE_SIZE, "%d:%d\n", cycle, block); 71 + } 72 + XFS_SYSFS_ATTR_RO(log_head_lsn); 73 + 74 + STATIC ssize_t 75 + log_tail_lsn_show( 76 + char *buf, 77 + void *data) 78 + { 79 + struct xlog *log = data; 80 + int cycle; 81 + int block; 82 + 83 + xlog_crack_atomic_lsn(&log->l_tail_lsn, &cycle, &block); 84 + return snprintf(buf, PAGE_SIZE, "%d:%d\n", cycle, block); 85 + } 86 + XFS_SYSFS_ATTR_RO(log_tail_lsn); 87 + 88 + STATIC ssize_t 89 + reserve_grant_head_show( 90 + char *buf, 91 + void *data) 92 + { 93 + struct xlog *log = data; 94 + int cycle; 95 + int bytes; 96 + 97 + xlog_crack_grant_head(&log->l_reserve_head.grant, &cycle, &bytes); 98 + return snprintf(buf, PAGE_SIZE, "%d:%d\n", cycle, bytes); 99 + } 100 + XFS_SYSFS_ATTR_RO(reserve_grant_head); 101 + 102 + STATIC ssize_t 103 + write_grant_head_show( 104 + char *buf, 105 + void *data) 106 + { 107 + struct xlog *log = data; 108 + int cycle; 109 + int bytes; 110 + 111 + xlog_crack_grant_head(&log->l_write_head.grant, &cycle, &bytes); 112 + return snprintf(buf, PAGE_SIZE, "%d:%d\n", cycle, bytes); 113 + } 114 + XFS_SYSFS_ATTR_RO(write_grant_head); 115 + 116 + static struct attribute *xfs_log_attrs[] = { 117 + ATTR_LIST(log_head_lsn), 118 + ATTR_LIST(log_tail_lsn), 119 + ATTR_LIST(reserve_grant_head), 120 + ATTR_LIST(write_grant_head), 121 + NULL, 122 + }; 123 + 124 + static inline struct xlog * 125 + to_xlog(struct kobject *kobject) 126 + { 127 + struct xfs_kobj *kobj = to_kobj(kobject); 128 + return container_of(kobj, struct xlog, l_kobj); 129 + } 130 + 131 + STATIC ssize_t 132 + xfs_log_show( 133 + struct kobject *kobject, 134 + struct attribute *attr, 135 + char *buf) 136 + { 137 + struct xlog *log = to_xlog(kobject); 138 + struct xfs_sysfs_attr *xfs_attr = to_attr(attr); 139 + 140 + return xfs_attr->show ? xfs_attr->show(buf, log) : 0; 141 + } 142 + 143 + STATIC ssize_t 144 + xfs_log_store( 145 + struct kobject *kobject, 146 + struct attribute *attr, 147 + const char *buf, 148 + size_t count) 149 + { 150 + struct xlog *log = to_xlog(kobject); 151 + struct xfs_sysfs_attr *xfs_attr = to_attr(attr); 152 + 153 + return xfs_attr->store ? xfs_attr->store(buf, count, log) : 0; 154 + } 155 + 156 + static struct sysfs_ops xfs_log_ops = { 157 + .show = xfs_log_show, 158 + .store = xfs_log_store, 159 + }; 160 + 161 + struct kobj_type xfs_log_ktype = { 162 + .release = xfs_sysfs_release, 163 + .sysfs_ops = &xfs_log_ops, 164 + .default_attrs = xfs_log_attrs, 165 + };
+59
fs/xfs/xfs_sysfs.h
··· 1 + /* 2 + * Copyright (c) 2014 Red Hat, Inc. 3 + * All Rights Reserved. 4 + * 5 + * This program is free software; you can redistribute it and/or 6 + * modify it under the terms of the GNU General Public License as 7 + * published by the Free Software Foundation. 8 + * 9 + * This program is distributed in the hope that it would be useful, 10 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 + * GNU General Public License for more details. 13 + * 14 + * You should have received a copy of the GNU General Public License 15 + * along with this program; if not, write the Free Software Foundation, 16 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 17 + */ 18 + 19 + #ifndef __XFS_SYSFS_H__ 20 + #define __XFS_SYSFS_H__ 21 + 22 + extern struct kobj_type xfs_mp_ktype; /* xfs_mount */ 23 + extern struct kobj_type xfs_log_ktype; /* xlog */ 24 + 25 + static inline struct xfs_kobj * 26 + to_kobj(struct kobject *kobject) 27 + { 28 + return container_of(kobject, struct xfs_kobj, kobject); 29 + } 30 + 31 + static inline void 32 + xfs_sysfs_release(struct kobject *kobject) 33 + { 34 + struct xfs_kobj *kobj = to_kobj(kobject); 35 + complete(&kobj->complete); 36 + } 37 + 38 + static inline int 39 + xfs_sysfs_init( 40 + struct xfs_kobj *kobj, 41 + struct kobj_type *ktype, 42 + struct xfs_kobj *parent_kobj, 43 + const char *name) 44 + { 45 + init_completion(&kobj->complete); 46 + return kobject_init_and_add(&kobj->kobject, ktype, 47 + &parent_kobj->kobject, "%s", name); 48 + } 49 + 50 + static inline void 51 + xfs_sysfs_del( 52 + struct xfs_kobj *kobj) 53 + { 54 + kobject_del(&kobj->kobject); 55 + kobject_put(&kobj->kobject); 56 + wait_for_completion(&kobj->complete); 57 + } 58 + 59 + #endif /* __XFS_SYSFS_H__ */
+5 -5
fs/xfs/xfs_trans.c
··· 190 190 -((int64_t)blocks), rsvd); 191 191 if (error != 0) { 192 192 current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS); 193 - return (XFS_ERROR(ENOSPC)); 193 + return -ENOSPC; 194 194 } 195 195 tp->t_blk_res += blocks; 196 196 } ··· 241 241 error = xfs_mod_incore_sb(tp->t_mountp, XFS_SBS_FREXTENTS, 242 242 -((int64_t)rtextents), rsvd); 243 243 if (error) { 244 - error = XFS_ERROR(ENOSPC); 244 + error = -ENOSPC; 245 245 goto undo_log; 246 246 } 247 247 tp->t_rtx_res += rtextents; ··· 874 874 goto out_unreserve; 875 875 876 876 if (XFS_FORCED_SHUTDOWN(mp)) { 877 - error = XFS_ERROR(EIO); 877 + error = -EIO; 878 878 goto out_unreserve; 879 879 } 880 880 ··· 917 917 if (tp->t_ticket) { 918 918 commit_lsn = xfs_log_done(mp, tp->t_ticket, NULL, log_flags); 919 919 if (commit_lsn == -1 && !error) 920 - error = XFS_ERROR(EIO); 920 + error = -EIO; 921 921 } 922 922 current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS); 923 923 xfs_trans_free_items(tp, NULLCOMMITLSN, error ? XFS_TRANS_ABORT : 0); ··· 1024 1024 */ 1025 1025 error = xfs_trans_commit(trans, 0); 1026 1026 if (error) 1027 - return (error); 1027 + return error; 1028 1028 1029 1029 trans = *tpp; 1030 1030
+2 -2
fs/xfs/xfs_trans_ail.c
··· 762 762 763 763 ailp = kmem_zalloc(sizeof(struct xfs_ail), KM_MAYFAIL); 764 764 if (!ailp) 765 - return ENOMEM; 765 + return -ENOMEM; 766 766 767 767 ailp->xa_mount = mp; 768 768 INIT_LIST_HEAD(&ailp->xa_ail); ··· 781 781 782 782 out_free_ailp: 783 783 kmem_free(ailp); 784 - return ENOMEM; 784 + return -ENOMEM; 785 785 } 786 786 787 787 void
+18 -19
fs/xfs/xfs_trans_buf.c
··· 166 166 ASSERT(atomic_read(&bip->bli_refcount) > 0); 167 167 bip->bli_recur++; 168 168 trace_xfs_trans_get_buf_recur(bip); 169 - return (bp); 169 + return bp; 170 170 } 171 171 172 172 bp = xfs_buf_get_map(target, map, nmaps, flags); ··· 178 178 179 179 _xfs_trans_bjoin(tp, bp, 1); 180 180 trace_xfs_trans_get_buf(bp->b_fspriv); 181 - return (bp); 181 + return bp; 182 182 } 183 183 184 184 /* ··· 201 201 * Default to just trying to lock the superblock buffer 202 202 * if tp is NULL. 203 203 */ 204 - if (tp == NULL) { 205 - return (xfs_getsb(mp, flags)); 206 - } 204 + if (tp == NULL) 205 + return xfs_getsb(mp, flags); 207 206 208 207 /* 209 208 * If the superblock buffer already has this transaction ··· 217 218 ASSERT(atomic_read(&bip->bli_refcount) > 0); 218 219 bip->bli_recur++; 219 220 trace_xfs_trans_getsb_recur(bip); 220 - return (bp); 221 + return bp; 221 222 } 222 223 223 224 bp = xfs_getsb(mp, flags); ··· 226 227 227 228 _xfs_trans_bjoin(tp, bp, 1); 228 229 trace_xfs_trans_getsb(bp->b_fspriv); 229 - return (bp); 230 + return bp; 230 231 } 231 232 232 233 #ifdef DEBUG ··· 266 267 bp = xfs_buf_read_map(target, map, nmaps, flags, ops); 267 268 if (!bp) 268 269 return (flags & XBF_TRYLOCK) ? 269 - EAGAIN : XFS_ERROR(ENOMEM); 270 + -EAGAIN : -ENOMEM; 270 271 271 272 if (bp->b_error) { 272 273 error = bp->b_error; ··· 276 277 xfs_buf_relse(bp); 277 278 278 279 /* bad CRC means corrupted metadata */ 279 - if (error == EFSBADCRC) 280 - error = EFSCORRUPTED; 280 + if (error == -EFSBADCRC) 281 + error = -EFSCORRUPTED; 281 282 return error; 282 283 } 283 284 #ifdef DEBUG ··· 286 287 if (((xfs_req_num++) % xfs_error_mod) == 0) { 287 288 xfs_buf_relse(bp); 288 289 xfs_debug(mp, "Returning error!"); 289 - return XFS_ERROR(EIO); 290 + return -EIO; 290 291 } 291 292 } 292 293 } ··· 342 343 xfs_force_shutdown(tp->t_mountp, 343 344 SHUTDOWN_META_IO_ERROR); 344 345 /* bad CRC means corrupted metadata */ 345 - if (error == EFSBADCRC) 346 - error = EFSCORRUPTED; 346 + if (error == -EFSBADCRC) 347 + error = -EFSCORRUPTED; 347 348 return error; 348 349 } 349 350 } ··· 354 355 if (XFS_FORCED_SHUTDOWN(mp)) { 355 356 trace_xfs_trans_read_buf_shut(bp, _RET_IP_); 356 357 *bpp = NULL; 357 - return XFS_ERROR(EIO); 358 + return -EIO; 358 359 } 359 360 360 361 ··· 371 372 if (bp == NULL) { 372 373 *bpp = NULL; 373 374 return (flags & XBF_TRYLOCK) ? 374 - 0 : XFS_ERROR(ENOMEM); 375 + 0 : -ENOMEM; 375 376 } 376 377 if (bp->b_error) { 377 378 error = bp->b_error; ··· 383 384 xfs_buf_relse(bp); 384 385 385 386 /* bad CRC means corrupted metadata */ 386 - if (error == EFSBADCRC) 387 - error = EFSCORRUPTED; 387 + if (error == -EFSBADCRC) 388 + error = -EFSCORRUPTED; 388 389 return error; 389 390 } 390 391 #ifdef DEBUG ··· 395 396 SHUTDOWN_META_IO_ERROR); 396 397 xfs_buf_relse(bp); 397 398 xfs_debug(mp, "Returning trans error!"); 398 - return XFS_ERROR(EIO); 399 + return -EIO; 399 400 } 400 401 } 401 402 } ··· 413 414 trace_xfs_trans_read_buf_shut(bp, _RET_IP_); 414 415 xfs_buf_relse(bp); 415 416 *bpp = NULL; 416 - return XFS_ERROR(EIO); 417 + return -EIO; 417 418 } 418 419 419 420 /*
+2 -2
fs/xfs/xfs_trans_dquot.c
··· 722 722 error_return: 723 723 xfs_dqunlock(dqp); 724 724 if (flags & XFS_QMOPT_ENOSPC) 725 - return ENOSPC; 726 - return EDQUOT; 725 + return -ENOSPC; 726 + return -EDQUOT; 727 727 } 728 728 729 729
fs/xfs/xfs_trans_resv.c fs/xfs/libxfs/xfs_trans_resv.c
fs/xfs/xfs_trans_resv.h fs/xfs/libxfs/xfs_trans_resv.h
fs/xfs/xfs_trans_space.h fs/xfs/libxfs/xfs_trans_space.h
+2 -27
fs/xfs/xfs_types.h
··· 38 38 typedef __uint32_t xfs_dablk_t; /* dir/attr block number (in file) */ 39 39 typedef __uint32_t xfs_dahash_t; /* dir/attr hash value */ 40 40 41 - /* 42 - * These types are 64 bits on disk but are either 32 or 64 bits in memory. 43 - * Disk based types: 44 - */ 45 - typedef __uint64_t xfs_dfsbno_t; /* blockno in filesystem (agno|agbno) */ 46 - typedef __uint64_t xfs_drfsbno_t; /* blockno in filesystem (raw) */ 47 - typedef __uint64_t xfs_drtbno_t; /* extent (block) in realtime area */ 48 - typedef __uint64_t xfs_dfiloff_t; /* block number in a file */ 49 - typedef __uint64_t xfs_dfilblks_t; /* number of blocks in a file */ 50 - 51 - /* 52 - * Memory based types are conditional. 53 - */ 54 - #if XFS_BIG_BLKNOS 55 41 typedef __uint64_t xfs_fsblock_t; /* blockno in filesystem (agno|agbno) */ 56 42 typedef __uint64_t xfs_rfsblock_t; /* blockno in filesystem (raw) */ 57 43 typedef __uint64_t xfs_rtblock_t; /* extent (block) in realtime area */ 58 - typedef __int64_t xfs_srtblock_t; /* signed version of xfs_rtblock_t */ 59 - #else 60 - typedef __uint32_t xfs_fsblock_t; /* blockno in filesystem (agno|agbno) */ 61 - typedef __uint32_t xfs_rfsblock_t; /* blockno in filesystem (raw) */ 62 - typedef __uint32_t xfs_rtblock_t; /* extent (block) in realtime area */ 63 - typedef __int32_t xfs_srtblock_t; /* signed version of xfs_rtblock_t */ 64 - #endif 65 44 typedef __uint64_t xfs_fileoff_t; /* block number in a file */ 66 - typedef __int64_t xfs_sfiloff_t; /* signed block number in a file */ 67 45 typedef __uint64_t xfs_filblks_t; /* number of blocks in a file */ 68 46 47 + typedef __int64_t xfs_srtblock_t; /* signed version of xfs_rtblock_t */ 48 + typedef __int64_t xfs_sfiloff_t; /* signed block number in a file */ 69 49 70 50 /* 71 51 * Null values for the types. 72 52 */ 73 - #define NULLDFSBNO ((xfs_dfsbno_t)-1) 74 - #define NULLDRFSBNO ((xfs_drfsbno_t)-1) 75 - #define NULLDRTBNO ((xfs_drtbno_t)-1) 76 - #define NULLDFILOFF ((xfs_dfiloff_t)-1) 77 - 78 53 #define NULLFSBLOCK ((xfs_fsblock_t)-1) 79 54 #define NULLRFSBLOCK ((xfs_rfsblock_t)-1) 80 55 #define NULLRTBLOCK ((xfs_rtblock_t)-1)
-46
fs/xfs/xfs_vnode.h
··· 1 - /* 2 - * Copyright (c) 2000-2005 Silicon Graphics, Inc. 3 - * All Rights Reserved. 4 - * 5 - * This program is free software; you can redistribute it and/or 6 - * modify it under the terms of the GNU General Public License as 7 - * published by the Free Software Foundation. 8 - * 9 - * This program is distributed in the hope that it would be useful, 10 - * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 - * GNU General Public License for more details. 13 - * 14 - * You should have received a copy of the GNU General Public License 15 - * along with this program; if not, write the Free Software Foundation, 16 - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 17 - */ 18 - #ifndef __XFS_VNODE_H__ 19 - #define __XFS_VNODE_H__ 20 - 21 - #include "xfs_fs.h" 22 - 23 - struct file; 24 - struct xfs_inode; 25 - struct attrlist_cursor_kern; 26 - 27 - /* 28 - * Flags for read/write calls - same values as IRIX 29 - */ 30 - #define IO_ISDIRECT 0x00004 /* bypass page cache */ 31 - #define IO_INVIS 0x00020 /* don't update inode timestamps */ 32 - 33 - #define XFS_IO_FLAGS \ 34 - { IO_ISDIRECT, "DIRECT" }, \ 35 - { IO_INVIS, "INVIS"} 36 - 37 - /* 38 - * Some useful predicates. 39 - */ 40 - #define VN_MAPPED(vp) mapping_mapped(vp->i_mapping) 41 - #define VN_CACHED(vp) (vp->i_mapping->nrpages) 42 - #define VN_DIRTY(vp) mapping_tagged(vp->i_mapping, \ 43 - PAGECACHE_TAG_DIRTY) 44 - 45 - 46 - #endif /* __XFS_VNODE_H__ */
+3 -3
fs/xfs/xfs_xattr.c
··· 49 49 value = NULL; 50 50 } 51 51 52 - error = -xfs_attr_get(ip, (unsigned char *)name, value, &asize, xflags); 52 + error = xfs_attr_get(ip, (unsigned char *)name, value, &asize, xflags); 53 53 if (error) 54 54 return error; 55 55 return asize; ··· 71 71 xflags |= ATTR_REPLACE; 72 72 73 73 if (!value) 74 - return -xfs_attr_remove(ip, (unsigned char *)name, xflags); 75 - return -xfs_attr_set(ip, (unsigned char *)name, 74 + return xfs_attr_remove(ip, (unsigned char *)name, xflags); 75 + return xfs_attr_set(ip, (unsigned char *)name, 76 76 (void *)value, size, xflags); 77 77 } 78 78