Merge branch 'for-linus' of git://oss.sgi.com/xfs/xfs

* 'for-linus' of git://oss.sgi.com/xfs/xfs: (23 commits)
xfs: don't name variables "panic"
xfs: factor agf counter updates into a helper
xfs: clean up the xfs_alloc_compute_aligned calling convention
xfs: kill support/debug.[ch]
xfs: Convert remaining cmn_err() callers to new API
xfs: convert the quota debug prints to new API
xfs: rename xfs_cmn_err_fsblock_zero()
xfs: convert xfs_fs_cmn_err to new error logging API
xfs: kill xfs_fs_mount_cmn_err() macro
xfs: kill xfs_fs_repair_cmn_err() macro
xfs: convert xfs_cmn_err to xfs_alert_tag
xfs: Convert xlog_warn to new logging interface
xfs: Convert linux-2.6/ files to new logging interface
xfs: introduce new logging API.
xfs: zero proper structure size for geometry calls
xfs: enable delaylog by default
xfs: more sensible inode refcounting for ialloc
xfs: stop using xfs_trans_iget in the RT allocator
xfs: check if device support discard in xfs_ioc_trim()
xfs: prevent leaking uninitialized stack memory in FSGEOMETRY_V1
...

+946 -1078
-7
Documentation/filesystems/xfs-delayed-logging-design.txt
··· 791 be able to swap methods automatically and transparently depending on load 792 characteristics, but this should not be necessary if delayed logging works as 793 designed. 794 - 795 - Roadmap: 796 - 797 - 2.6.39 Switch default mount option to use delayed logging 798 - => should be roughly 12 months after initial merge 799 - => enough time to shake out remaining problems before next round of 800 - enterprise distro kernel rebases
··· 791 be able to swap methods automatically and transparently depending on load 792 characteristics, but this should not be necessary if delayed logging works as 793 designed.
+2 -3
fs/xfs/Makefile
··· 102 xfs_globals.o \ 103 xfs_ioctl.o \ 104 xfs_iops.o \ 105 xfs_super.o \ 106 xfs_sync.o \ 107 xfs_xattr.o) 108 109 # Objects in support/ 110 - xfs-y += $(addprefix support/, \ 111 - debug.o \ 112 - uuid.o)
··· 102 xfs_globals.o \ 103 xfs_ioctl.o \ 104 xfs_iops.o \ 105 + xfs_message.o \ 106 xfs_super.o \ 107 xfs_sync.o \ 108 xfs_xattr.o) 109 110 # Objects in support/ 111 + xfs-y += support/uuid.o
+5 -4
fs/xfs/linux-2.6/kmem.c
··· 23 #include <linux/backing-dev.h> 24 #include "time.h" 25 #include "kmem.h" 26 27 /* 28 * Greedy allocation. May fail and may return vmalloced memory. ··· 57 if (ptr || (flags & (KM_MAYFAIL|KM_NOSLEEP))) 58 return ptr; 59 if (!(++retries % 100)) 60 - printk(KERN_ERR "XFS: possible memory allocation " 61 - "deadlock in %s (mode:0x%x)\n", 62 __func__, lflags); 63 congestion_wait(BLK_RW_ASYNC, HZ/50); 64 } while (1); ··· 113 if (ptr || (flags & (KM_MAYFAIL|KM_NOSLEEP))) 114 return ptr; 115 if (!(++retries % 100)) 116 - printk(KERN_ERR "XFS: possible memory allocation " 117 - "deadlock in %s (mode:0x%x)\n", 118 __func__, lflags); 119 congestion_wait(BLK_RW_ASYNC, HZ/50); 120 } while (1);
··· 23 #include <linux/backing-dev.h> 24 #include "time.h" 25 #include "kmem.h" 26 + #include "xfs_message.h" 27 28 /* 29 * Greedy allocation. May fail and may return vmalloced memory. ··· 56 if (ptr || (flags & (KM_MAYFAIL|KM_NOSLEEP))) 57 return ptr; 58 if (!(++retries % 100)) 59 + xfs_err(NULL, 60 + "possible memory allocation deadlock in %s (mode:0x%x)", 61 __func__, lflags); 62 congestion_wait(BLK_RW_ASYNC, HZ/50); 63 } while (1); ··· 112 if (ptr || (flags & (KM_MAYFAIL|KM_NOSLEEP))) 113 return ptr; 114 if (!(++retries % 100)) 115 + xfs_err(NULL, 116 + "possible memory allocation deadlock in %s (mode:0x%x)", 117 __func__, lflags); 118 congestion_wait(BLK_RW_ASYNC, HZ/50); 119 } while (1);
+3 -3
fs/xfs/linux-2.6/xfs_aops.c
··· 854 if (XFS_FORCED_SHUTDOWN(ip->i_mount)) 855 goto out_invalidate; 856 857 - xfs_fs_cmn_err(CE_ALERT, ip->i_mount, 858 "page discard on page %p, inode 0x%llx, offset %llu.", 859 page, ip->i_ino, offset); 860 ··· 872 if (error) { 873 /* something screwed, just bail */ 874 if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) { 875 - xfs_fs_cmn_err(CE_ALERT, ip->i_mount, 876 "page discard unable to remove delalloc mapping."); 877 } 878 break; ··· 1411 if (error) { 1412 /* something screwed, just bail */ 1413 if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) { 1414 - xfs_fs_cmn_err(CE_ALERT, ip->i_mount, 1415 "xfs_vm_write_failed: unable to clean up ino %lld", 1416 ip->i_ino); 1417 }
··· 854 if (XFS_FORCED_SHUTDOWN(ip->i_mount)) 855 goto out_invalidate; 856 857 + xfs_alert(ip->i_mount, 858 "page discard on page %p, inode 0x%llx, offset %llu.", 859 page, ip->i_ino, offset); 860 ··· 872 if (error) { 873 /* something screwed, just bail */ 874 if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) { 875 + xfs_alert(ip->i_mount, 876 "page discard unable to remove delalloc mapping."); 877 } 878 break; ··· 1411 if (error) { 1412 /* something screwed, just bail */ 1413 if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) { 1414 + xfs_alert(ip->i_mount, 1415 "xfs_vm_write_failed: unable to clean up ino %lld", 1416 ip->i_ino); 1417 }
+8 -9
fs/xfs/linux-2.6/xfs_buf.c
··· 401 * handle buffer allocation failures we can't do much. 402 */ 403 if (!(++retries % 100)) 404 - printk(KERN_ERR 405 - "XFS: possible memory allocation " 406 - "deadlock in %s (mode:0x%x)\n", 407 __func__, gfp_mask); 408 409 XFS_STATS_INC(xb_page_retries); ··· 614 if (!(bp->b_flags & XBF_MAPPED)) { 615 error = _xfs_buf_map_pages(bp, flags); 616 if (unlikely(error)) { 617 - printk(KERN_WARNING "%s: failed to map pages\n", 618 - __func__); 619 goto no_buffer; 620 } 621 } ··· 849 850 error = _xfs_buf_map_pages(bp, XBF_MAPPED); 851 if (unlikely(error)) { 852 - printk(KERN_WARNING "%s: failed to map pages\n", 853 - __func__); 854 goto fail_free_mem; 855 } 856 ··· 1616 btp->bt_smask = sectorsize - 1; 1617 1618 if (set_blocksize(btp->bt_bdev, sectorsize)) { 1619 - printk(KERN_WARNING 1620 - "XFS: Cannot set_blocksize to %u on device %s\n", 1621 sectorsize, XFS_BUFTARG_NAME(btp)); 1622 return EINVAL; 1623 }
··· 401 * handle buffer allocation failures we can't do much. 402 */ 403 if (!(++retries % 100)) 404 + xfs_err(NULL, 405 + "possible memory allocation deadlock in %s (mode:0x%x)", 406 __func__, gfp_mask); 407 408 XFS_STATS_INC(xb_page_retries); ··· 615 if (!(bp->b_flags & XBF_MAPPED)) { 616 error = _xfs_buf_map_pages(bp, flags); 617 if (unlikely(error)) { 618 + xfs_warn(target->bt_mount, 619 + "%s: failed to map pages\n", __func__); 620 goto no_buffer; 621 } 622 } ··· 850 851 error = _xfs_buf_map_pages(bp, XBF_MAPPED); 852 if (unlikely(error)) { 853 + xfs_warn(target->bt_mount, 854 + "%s: failed to map pages\n", __func__); 855 goto fail_free_mem; 856 } 857 ··· 1617 btp->bt_smask = sectorsize - 1; 1618 1619 if (set_blocksize(btp->bt_bdev, sectorsize)) { 1620 + xfs_warn(btp->bt_mount, 1621 + "Cannot set_blocksize to %u on device %s\n", 1622 sectorsize, XFS_BUFTARG_NAME(btp)); 1623 return EINVAL; 1624 }
+22 -1
fs/xfs/linux-2.6/xfs_linux.h
··· 39 #include <mrlock.h> 40 #include <time.h> 41 42 - #include <support/debug.h> 43 #include <support/uuid.h> 44 45 #include <linux/semaphore.h> ··· 85 #include <xfs_aops.h> 86 #include <xfs_super.h> 87 #include <xfs_buf.h> 88 89 /* 90 * Feature macros (disable/enable) ··· 279 #else 280 #define __arch_pack 281 #endif 282 283 #endif /* __XFS_LINUX__ */
··· 39 #include <mrlock.h> 40 #include <time.h> 41 42 #include <support/uuid.h> 43 44 #include <linux/semaphore.h> ··· 86 #include <xfs_aops.h> 87 #include <xfs_super.h> 88 #include <xfs_buf.h> 89 + #include <xfs_message.h> 90 91 /* 92 * Feature macros (disable/enable) ··· 279 #else 280 #define __arch_pack 281 #endif 282 + 283 + #define ASSERT_ALWAYS(expr) \ 284 + (unlikely(expr) ? (void)0 : assfail(#expr, __FILE__, __LINE__)) 285 + 286 + #ifndef DEBUG 287 + #define ASSERT(expr) ((void)0) 288 + 289 + #ifndef STATIC 290 + # define STATIC static noinline 291 + #endif 292 + 293 + #else /* DEBUG */ 294 + 295 + #define ASSERT(expr) \ 296 + (unlikely(expr) ? (void)0 : assfail(#expr, __FILE__, __LINE__)) 297 + 298 + #ifndef STATIC 299 + # define STATIC noinline 300 + #endif 301 + 302 + #endif /* DEBUG */ 303 304 #endif /* __XFS_LINUX__ */
+133
fs/xfs/linux-2.6/xfs_message.c
···
··· 1 + /* 2 + * Copyright (c) 2011 Red Hat, Inc. All Rights Reserved. 3 + * 4 + * This program is free software; you can redistribute it and/or 5 + * modify it under the terms of the GNU General Public License as 6 + * published by the Free Software Foundation. 7 + * 8 + * This program is distributed in the hope that it would be useful, 9 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 + * GNU General Public License for more details. 12 + * 13 + * You should have received a copy of the GNU General Public License 14 + * along with this program; if not, write the Free Software Foundation, 15 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 16 + */ 17 + 18 + #include "xfs.h" 19 + #include "xfs_fs.h" 20 + #include "xfs_types.h" 21 + #include "xfs_log.h" 22 + #include "xfs_inum.h" 23 + #include "xfs_trans.h" 24 + #include "xfs_sb.h" 25 + #include "xfs_ag.h" 26 + #include "xfs_mount.h" 27 + 28 + /* 29 + * XFS logging functions 30 + */ 31 + static int 32 + __xfs_printk( 33 + const char *level, 34 + const struct xfs_mount *mp, 35 + struct va_format *vaf) 36 + { 37 + if (mp && mp->m_fsname) 38 + return printk("%sXFS (%s): %pV\n", level, mp->m_fsname, vaf); 39 + return printk("%sXFS: %pV\n", level, vaf); 40 + } 41 + 42 + int xfs_printk( 43 + const char *level, 44 + const struct xfs_mount *mp, 45 + const char *fmt, ...) 46 + { 47 + struct va_format vaf; 48 + va_list args; 49 + int r; 50 + 51 + va_start(args, fmt); 52 + 53 + vaf.fmt = fmt; 54 + vaf.va = &args; 55 + 56 + r = __xfs_printk(level, mp, &vaf); 57 + va_end(args); 58 + 59 + return r; 60 + } 61 + 62 + #define define_xfs_printk_level(func, kern_level) \ 63 + int func(const struct xfs_mount *mp, const char *fmt, ...) \ 64 + { \ 65 + struct va_format vaf; \ 66 + va_list args; \ 67 + int r; \ 68 + \ 69 + va_start(args, fmt); \ 70 + \ 71 + vaf.fmt = fmt; \ 72 + vaf.va = &args; \ 73 + \ 74 + r = __xfs_printk(kern_level, mp, &vaf); \ 75 + va_end(args); \ 76 + \ 77 + return r; \ 78 + } \ 79 + 80 + define_xfs_printk_level(xfs_emerg, KERN_EMERG); 81 + define_xfs_printk_level(xfs_alert, KERN_ALERT); 82 + define_xfs_printk_level(xfs_crit, KERN_CRIT); 83 + define_xfs_printk_level(xfs_err, KERN_ERR); 84 + define_xfs_printk_level(xfs_warn, KERN_WARNING); 85 + define_xfs_printk_level(xfs_notice, KERN_NOTICE); 86 + define_xfs_printk_level(xfs_info, KERN_INFO); 87 + #ifdef DEBUG 88 + define_xfs_printk_level(xfs_debug, KERN_DEBUG); 89 + #endif 90 + 91 + int 92 + xfs_alert_tag( 93 + const struct xfs_mount *mp, 94 + int panic_tag, 95 + const char *fmt, ...) 96 + { 97 + struct va_format vaf; 98 + va_list args; 99 + int do_panic = 0; 100 + int r; 101 + 102 + if (xfs_panic_mask && (xfs_panic_mask & panic_tag)) { 103 + xfs_printk(KERN_ALERT, mp, 104 + "XFS: Transforming an alert into a BUG."); 105 + do_panic = 1; 106 + } 107 + 108 + va_start(args, fmt); 109 + 110 + vaf.fmt = fmt; 111 + vaf.va = &args; 112 + 113 + r = __xfs_printk(KERN_ALERT, mp, &vaf); 114 + va_end(args); 115 + 116 + BUG_ON(do_panic); 117 + 118 + return r; 119 + } 120 + 121 + void 122 + assfail(char *expr, char *file, int line) 123 + { 124 + xfs_emerg(NULL, "Assertion failed: %s, file: %s, line: %d", 125 + expr, file, line); 126 + BUG(); 127 + } 128 + 129 + void 130 + xfs_hex_dump(void *p, int length) 131 + { 132 + print_hex_dump(KERN_ALERT, "", DUMP_PREFIX_ADDRESS, 16, 1, p, length, 1); 133 + }
+38
fs/xfs/linux-2.6/xfs_message.h
···
··· 1 + #ifndef __XFS_MESSAGE_H 2 + #define __XFS_MESSAGE_H 1 3 + 4 + struct xfs_mount; 5 + 6 + extern int xfs_printk(const char *level, const struct xfs_mount *mp, 7 + const char *fmt, ...) 8 + __attribute__ ((format (printf, 3, 4))); 9 + extern int xfs_emerg(const struct xfs_mount *mp, const char *fmt, ...) 10 + __attribute__ ((format (printf, 2, 3))); 11 + extern int xfs_alert(const struct xfs_mount *mp, const char *fmt, ...) 12 + __attribute__ ((format (printf, 2, 3))); 13 + extern int xfs_alert_tag(const struct xfs_mount *mp, int tag, 14 + const char *fmt, ...) 15 + __attribute__ ((format (printf, 3, 4))); 16 + extern int xfs_crit(const struct xfs_mount *mp, const char *fmt, ...) 17 + __attribute__ ((format (printf, 2, 3))); 18 + extern int xfs_err(const struct xfs_mount *mp, const char *fmt, ...) 19 + __attribute__ ((format (printf, 2, 3))); 20 + extern int xfs_warn(const struct xfs_mount *mp, const char *fmt, ...) 21 + __attribute__ ((format (printf, 2, 3))); 22 + extern int xfs_notice(const struct xfs_mount *mp, const char *fmt, ...) 23 + __attribute__ ((format (printf, 2, 3))); 24 + extern int xfs_info(const struct xfs_mount *mp, const char *fmt, ...) 25 + __attribute__ ((format (printf, 2, 3))); 26 + 27 + #ifdef DEBUG 28 + extern int xfs_debug(const struct xfs_mount *mp, const char *fmt, ...) 29 + __attribute__ ((format (printf, 2, 3))); 30 + #else 31 + #define xfs_debug(mp, fmt, ...) (0) 32 + #endif 33 + 34 + extern void assfail(char *expr, char *f, int l); 35 + 36 + extern void xfs_hex_dump(void *p, int length); 37 + 38 + #endif /* __XFS_MESSAGE_H */
+58 -70
fs/xfs/linux-2.6/xfs_super.c
··· 173 __uint8_t iosizelog = 0; 174 175 /* 176 * Copy binary VFS mount flags we are interested in. 177 */ 178 if (sb->s_flags & MS_RDONLY) ··· 198 mp->m_flags |= XFS_MOUNT_BARRIER; 199 mp->m_flags |= XFS_MOUNT_COMPAT_IOSIZE; 200 mp->m_flags |= XFS_MOUNT_SMALL_INUMS; 201 202 /* 203 * These can be overridden by the mount option parsing. ··· 217 218 if (!strcmp(this_char, MNTOPT_LOGBUFS)) { 219 if (!value || !*value) { 220 - cmn_err(CE_WARN, 221 - "XFS: %s option requires an argument", 222 this_char); 223 return EINVAL; 224 } 225 mp->m_logbufs = simple_strtoul(value, &eov, 10); 226 } else if (!strcmp(this_char, MNTOPT_LOGBSIZE)) { 227 if (!value || !*value) { 228 - cmn_err(CE_WARN, 229 - "XFS: %s option requires an argument", 230 this_char); 231 return EINVAL; 232 } 233 mp->m_logbsize = suffix_strtoul(value, &eov, 10); 234 } else if (!strcmp(this_char, MNTOPT_LOGDEV)) { 235 if (!value || !*value) { 236 - cmn_err(CE_WARN, 237 - "XFS: %s option requires an argument", 238 this_char); 239 return EINVAL; 240 } ··· 239 if (!mp->m_logname) 240 return ENOMEM; 241 } else if (!strcmp(this_char, MNTOPT_MTPT)) { 242 - cmn_err(CE_WARN, 243 - "XFS: %s option not allowed on this system", 244 this_char); 245 return EINVAL; 246 } else if (!strcmp(this_char, MNTOPT_RTDEV)) { 247 if (!value || !*value) { 248 - cmn_err(CE_WARN, 249 - "XFS: %s option requires an argument", 250 this_char); 251 return EINVAL; 252 } ··· 253 return ENOMEM; 254 } else if (!strcmp(this_char, MNTOPT_BIOSIZE)) { 255 if (!value || !*value) { 256 - cmn_err(CE_WARN, 257 - "XFS: %s option requires an argument", 258 this_char); 259 return EINVAL; 260 } ··· 261 iosizelog = ffs(iosize) - 1; 262 } else if (!strcmp(this_char, MNTOPT_ALLOCSIZE)) { 263 if (!value || !*value) { 264 - cmn_err(CE_WARN, 265 - "XFS: %s option requires an argument", 266 this_char); 267 return EINVAL; 268 } ··· 283 mp->m_flags |= XFS_MOUNT_SWALLOC; 284 } else if (!strcmp(this_char, MNTOPT_SUNIT)) { 285 if (!value || !*value) { 286 - cmn_err(CE_WARN, 287 - "XFS: %s option requires an argument", 288 this_char); 289 return EINVAL; 290 } 291 dsunit = simple_strtoul(value, &eov, 10); 292 } else if (!strcmp(this_char, MNTOPT_SWIDTH)) { 293 if (!value || !*value) { 294 - cmn_err(CE_WARN, 295 - "XFS: %s option requires an argument", 296 this_char); 297 return EINVAL; 298 } ··· 298 } else if (!strcmp(this_char, MNTOPT_64BITINODE)) { 299 mp->m_flags &= ~XFS_MOUNT_SMALL_INUMS; 300 #if !XFS_BIG_INUMS 301 - cmn_err(CE_WARN, 302 - "XFS: %s option not allowed on this system", 303 this_char); 304 return EINVAL; 305 #endif ··· 356 } else if (!strcmp(this_char, MNTOPT_NODELAYLOG)) { 357 mp->m_flags &= ~XFS_MOUNT_DELAYLOG; 358 } else if (!strcmp(this_char, "ihashsize")) { 359 - cmn_err(CE_WARN, 360 - "XFS: ihashsize no longer used, option is deprecated."); 361 } else if (!strcmp(this_char, "osyncisdsync")) { 362 - cmn_err(CE_WARN, 363 - "XFS: osyncisdsync has no effect, option is deprecated."); 364 } else if (!strcmp(this_char, "osyncisosync")) { 365 - cmn_err(CE_WARN, 366 - "XFS: osyncisosync has no effect, option is deprecated."); 367 } else if (!strcmp(this_char, "irixsgid")) { 368 - cmn_err(CE_WARN, 369 - "XFS: irixsgid is now a sysctl(2) variable, option is deprecated."); 370 } else { 371 - cmn_err(CE_WARN, 372 - "XFS: unknown mount option [%s].", this_char); 373 return EINVAL; 374 } 375 } ··· 378 */ 379 if ((mp->m_flags & XFS_MOUNT_NORECOVERY) && 380 !(mp->m_flags & XFS_MOUNT_RDONLY)) { 381 - cmn_err(CE_WARN, "XFS: no-recovery mounts must be read-only."); 382 return EINVAL; 383 } 384 385 if ((mp->m_flags & XFS_MOUNT_NOALIGN) && (dsunit || dswidth)) { 386 - cmn_err(CE_WARN, 387 - "XFS: sunit and swidth options incompatible with the noalign option"); 388 return EINVAL; 389 } 390 391 #ifndef CONFIG_XFS_QUOTA 392 if (XFS_IS_QUOTA_RUNNING(mp)) { 393 - cmn_err(CE_WARN, 394 - "XFS: quota support not available in this kernel."); 395 return EINVAL; 396 } 397 #endif 398 399 if ((mp->m_qflags & (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE)) && 400 (mp->m_qflags & (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE))) { 401 - cmn_err(CE_WARN, 402 - "XFS: cannot mount with both project and group quota"); 403 return EINVAL; 404 } 405 406 if ((dsunit && !dswidth) || (!dsunit && dswidth)) { 407 - cmn_err(CE_WARN, 408 - "XFS: sunit and swidth must be specified together"); 409 return EINVAL; 410 } 411 412 if (dsunit && (dswidth % dsunit != 0)) { 413 - cmn_err(CE_WARN, 414 - "XFS: stripe width (%d) must be a multiple of the stripe unit (%d)", 415 dswidth, dsunit); 416 return EINVAL; 417 } ··· 434 mp->m_logbufs != 0 && 435 (mp->m_logbufs < XLOG_MIN_ICLOGS || 436 mp->m_logbufs > XLOG_MAX_ICLOGS)) { 437 - cmn_err(CE_WARN, 438 - "XFS: invalid logbufs value: %d [not %d-%d]", 439 mp->m_logbufs, XLOG_MIN_ICLOGS, XLOG_MAX_ICLOGS); 440 return XFS_ERROR(EINVAL); 441 } ··· 443 (mp->m_logbsize < XLOG_MIN_RECORD_BSIZE || 444 mp->m_logbsize > XLOG_MAX_RECORD_BSIZE || 445 !is_power_of_2(mp->m_logbsize))) { 446 - cmn_err(CE_WARN, 447 - "XFS: invalid logbufsize: %d [not 16k,32k,64k,128k or 256k]", 448 mp->m_logbsize); 449 return XFS_ERROR(EINVAL); 450 } 451 452 - mp->m_fsname = kstrndup(sb->s_id, MAXNAMELEN, GFP_KERNEL); 453 - if (!mp->m_fsname) 454 - return ENOMEM; 455 - mp->m_fsname_len = strlen(mp->m_fsname) + 1; 456 - 457 if (iosizelog) { 458 if (iosizelog > XFS_MAX_IO_LOG || 459 iosizelog < XFS_MIN_IO_LOG) { 460 - cmn_err(CE_WARN, 461 - "XFS: invalid log iosize: %d [not %d-%d]", 462 iosizelog, XFS_MIN_IO_LOG, 463 XFS_MAX_IO_LOG); 464 return XFS_ERROR(EINVAL); ··· 599 mp); 600 if (IS_ERR(*bdevp)) { 601 error = PTR_ERR(*bdevp); 602 - printk("XFS: Invalid device [%s], error=%d\n", name, error); 603 } 604 605 return -error; ··· 653 int error; 654 655 if (mp->m_logdev_targp != mp->m_ddev_targp) { 656 - xfs_fs_cmn_err(CE_NOTE, mp, 657 "Disabling barriers, not supported with external log device"); 658 mp->m_flags &= ~XFS_MOUNT_BARRIER; 659 return; 660 } 661 662 if (xfs_readonly_buftarg(mp->m_ddev_targp)) { 663 - xfs_fs_cmn_err(CE_NOTE, mp, 664 - "Disabling barriers, underlying device is readonly"); 665 mp->m_flags &= ~XFS_MOUNT_BARRIER; 666 return; 667 } 668 669 error = xfs_barrier_test(mp); 670 if (error) { 671 - xfs_fs_cmn_err(CE_NOTE, mp, 672 - "Disabling barriers, trial barrier write failed"); 673 mp->m_flags &= ~XFS_MOUNT_BARRIER; 674 return; 675 } ··· 732 goto out_close_logdev; 733 734 if (rtdev == ddev || rtdev == logdev) { 735 - cmn_err(CE_WARN, 736 - "XFS: Cannot mount filesystem with identical rtdev and ddev/logdev."); 737 error = EINVAL; 738 goto out_close_rtdev; 739 } ··· 1334 * options that we can't actually change. 1335 */ 1336 #if 0 1337 - printk(KERN_INFO 1338 - "XFS: mount option \"%s\" not supported for remount\n", p); 1339 return -EINVAL; 1340 #else 1341 break; ··· 1356 if (mp->m_update_flags) { 1357 error = xfs_mount_log_sb(mp, mp->m_update_flags); 1358 if (error) { 1359 - cmn_err(CE_WARN, 1360 - "XFS: failed to write sb changes"); 1361 return error; 1362 } 1363 mp->m_update_flags = 0; ··· 1440 mp->m_logbsize = mp->m_sb.sb_logsunit; 1441 } else if (mp->m_logbsize > 0 && 1442 mp->m_logbsize < mp->m_sb.sb_logsunit) { 1443 - cmn_err(CE_WARN, 1444 - "XFS: logbuf size must be greater than or equal to log stripe size"); 1445 return XFS_ERROR(EINVAL); 1446 } 1447 } else { 1448 /* Fail a mount if the logbuf is larger than 32K */ 1449 if (mp->m_logbsize > XLOG_BIG_RECORD_BSIZE) { 1450 - cmn_err(CE_WARN, 1451 - "XFS: logbuf size for version 1 logs must be 16K or 32K"); 1452 return XFS_ERROR(EINVAL); 1453 } 1454 } ··· 1465 * prohibit r/w mounts of read-only filesystems 1466 */ 1467 if ((mp->m_sb.sb_flags & XFS_SBF_READONLY) && !ronly) { 1468 - cmn_err(CE_WARN, 1469 - "XFS: cannot mount a read-only filesystem as read-write"); 1470 return XFS_ERROR(EROFS); 1471 } 1472
··· 173 __uint8_t iosizelog = 0; 174 175 /* 176 + * set up the mount name first so all the errors will refer to the 177 + * correct device. 178 + */ 179 + mp->m_fsname = kstrndup(sb->s_id, MAXNAMELEN, GFP_KERNEL); 180 + if (!mp->m_fsname) 181 + return ENOMEM; 182 + mp->m_fsname_len = strlen(mp->m_fsname) + 1; 183 + 184 + /* 185 * Copy binary VFS mount flags we are interested in. 186 */ 187 if (sb->s_flags & MS_RDONLY) ··· 189 mp->m_flags |= XFS_MOUNT_BARRIER; 190 mp->m_flags |= XFS_MOUNT_COMPAT_IOSIZE; 191 mp->m_flags |= XFS_MOUNT_SMALL_INUMS; 192 + mp->m_flags |= XFS_MOUNT_DELAYLOG; 193 194 /* 195 * These can be overridden by the mount option parsing. ··· 207 208 if (!strcmp(this_char, MNTOPT_LOGBUFS)) { 209 if (!value || !*value) { 210 + xfs_warn(mp, "%s option requires an argument", 211 this_char); 212 return EINVAL; 213 } 214 mp->m_logbufs = simple_strtoul(value, &eov, 10); 215 } else if (!strcmp(this_char, MNTOPT_LOGBSIZE)) { 216 if (!value || !*value) { 217 + xfs_warn(mp, "%s option requires an argument", 218 this_char); 219 return EINVAL; 220 } 221 mp->m_logbsize = suffix_strtoul(value, &eov, 10); 222 } else if (!strcmp(this_char, MNTOPT_LOGDEV)) { 223 if (!value || !*value) { 224 + xfs_warn(mp, "%s option requires an argument", 225 this_char); 226 return EINVAL; 227 } ··· 232 if (!mp->m_logname) 233 return ENOMEM; 234 } else if (!strcmp(this_char, MNTOPT_MTPT)) { 235 + xfs_warn(mp, "%s option not allowed on this system", 236 this_char); 237 return EINVAL; 238 } else if (!strcmp(this_char, MNTOPT_RTDEV)) { 239 if (!value || !*value) { 240 + xfs_warn(mp, "%s option requires an argument", 241 this_char); 242 return EINVAL; 243 } ··· 248 return ENOMEM; 249 } else if (!strcmp(this_char, MNTOPT_BIOSIZE)) { 250 if (!value || !*value) { 251 + xfs_warn(mp, "%s option requires an argument", 252 this_char); 253 return EINVAL; 254 } ··· 257 iosizelog = ffs(iosize) - 1; 258 } else if (!strcmp(this_char, MNTOPT_ALLOCSIZE)) { 259 if (!value || !*value) { 260 + xfs_warn(mp, "%s option requires an argument", 261 this_char); 262 return EINVAL; 263 } ··· 280 mp->m_flags |= XFS_MOUNT_SWALLOC; 281 } else if (!strcmp(this_char, MNTOPT_SUNIT)) { 282 if (!value || !*value) { 283 + xfs_warn(mp, "%s option requires an argument", 284 this_char); 285 return EINVAL; 286 } 287 dsunit = simple_strtoul(value, &eov, 10); 288 } else if (!strcmp(this_char, MNTOPT_SWIDTH)) { 289 if (!value || !*value) { 290 + xfs_warn(mp, "%s option requires an argument", 291 this_char); 292 return EINVAL; 293 } ··· 297 } else if (!strcmp(this_char, MNTOPT_64BITINODE)) { 298 mp->m_flags &= ~XFS_MOUNT_SMALL_INUMS; 299 #if !XFS_BIG_INUMS 300 + xfs_warn(mp, "%s option not allowed on this system", 301 this_char); 302 return EINVAL; 303 #endif ··· 356 } else if (!strcmp(this_char, MNTOPT_NODELAYLOG)) { 357 mp->m_flags &= ~XFS_MOUNT_DELAYLOG; 358 } else if (!strcmp(this_char, "ihashsize")) { 359 + xfs_warn(mp, 360 + "ihashsize no longer used, option is deprecated."); 361 } else if (!strcmp(this_char, "osyncisdsync")) { 362 + xfs_warn(mp, 363 + "osyncisdsync has no effect, option is deprecated."); 364 } else if (!strcmp(this_char, "osyncisosync")) { 365 + xfs_warn(mp, 366 + "osyncisosync has no effect, option is deprecated."); 367 } else if (!strcmp(this_char, "irixsgid")) { 368 + xfs_warn(mp, 369 + "irixsgid is now a sysctl(2) variable, option is deprecated."); 370 } else { 371 + xfs_warn(mp, "unknown mount option [%s].", this_char); 372 return EINVAL; 373 } 374 } ··· 379 */ 380 if ((mp->m_flags & XFS_MOUNT_NORECOVERY) && 381 !(mp->m_flags & XFS_MOUNT_RDONLY)) { 382 + xfs_warn(mp, "no-recovery mounts must be read-only."); 383 return EINVAL; 384 } 385 386 if ((mp->m_flags & XFS_MOUNT_NOALIGN) && (dsunit || dswidth)) { 387 + xfs_warn(mp, 388 + "sunit and swidth options incompatible with the noalign option"); 389 return EINVAL; 390 } 391 392 #ifndef CONFIG_XFS_QUOTA 393 if (XFS_IS_QUOTA_RUNNING(mp)) { 394 + xfs_warn(mp, "quota support not available in this kernel."); 395 return EINVAL; 396 } 397 #endif 398 399 if ((mp->m_qflags & (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE)) && 400 (mp->m_qflags & (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE))) { 401 + xfs_warn(mp, "cannot mount with both project and group quota"); 402 return EINVAL; 403 } 404 405 if ((dsunit && !dswidth) || (!dsunit && dswidth)) { 406 + xfs_warn(mp, "sunit and swidth must be specified together"); 407 return EINVAL; 408 } 409 410 if (dsunit && (dswidth % dsunit != 0)) { 411 + xfs_warn(mp, 412 + "stripe width (%d) must be a multiple of the stripe unit (%d)", 413 dswidth, dsunit); 414 return EINVAL; 415 } ··· 438 mp->m_logbufs != 0 && 439 (mp->m_logbufs < XLOG_MIN_ICLOGS || 440 mp->m_logbufs > XLOG_MAX_ICLOGS)) { 441 + xfs_warn(mp, "invalid logbufs value: %d [not %d-%d]", 442 mp->m_logbufs, XLOG_MIN_ICLOGS, XLOG_MAX_ICLOGS); 443 return XFS_ERROR(EINVAL); 444 } ··· 448 (mp->m_logbsize < XLOG_MIN_RECORD_BSIZE || 449 mp->m_logbsize > XLOG_MAX_RECORD_BSIZE || 450 !is_power_of_2(mp->m_logbsize))) { 451 + xfs_warn(mp, 452 + "invalid logbufsize: %d [not 16k,32k,64k,128k or 256k]", 453 mp->m_logbsize); 454 return XFS_ERROR(EINVAL); 455 } 456 457 if (iosizelog) { 458 if (iosizelog > XFS_MAX_IO_LOG || 459 iosizelog < XFS_MIN_IO_LOG) { 460 + xfs_warn(mp, "invalid log iosize: %d [not %d-%d]", 461 iosizelog, XFS_MIN_IO_LOG, 462 XFS_MAX_IO_LOG); 463 return XFS_ERROR(EINVAL); ··· 610 mp); 611 if (IS_ERR(*bdevp)) { 612 error = PTR_ERR(*bdevp); 613 + xfs_warn(mp, "Invalid device [%s], error=%d\n", name, error); 614 } 615 616 return -error; ··· 664 int error; 665 666 if (mp->m_logdev_targp != mp->m_ddev_targp) { 667 + xfs_notice(mp, 668 "Disabling barriers, not supported with external log device"); 669 mp->m_flags &= ~XFS_MOUNT_BARRIER; 670 return; 671 } 672 673 if (xfs_readonly_buftarg(mp->m_ddev_targp)) { 674 + xfs_notice(mp, 675 + "Disabling barriers, underlying device is readonly"); 676 mp->m_flags &= ~XFS_MOUNT_BARRIER; 677 return; 678 } 679 680 error = xfs_barrier_test(mp); 681 if (error) { 682 + xfs_notice(mp, 683 + "Disabling barriers, trial barrier write failed"); 684 mp->m_flags &= ~XFS_MOUNT_BARRIER; 685 return; 686 } ··· 743 goto out_close_logdev; 744 745 if (rtdev == ddev || rtdev == logdev) { 746 + xfs_warn(mp, 747 + "Cannot mount filesystem with identical rtdev and ddev/logdev."); 748 error = EINVAL; 749 goto out_close_rtdev; 750 } ··· 1345 * options that we can't actually change. 1346 */ 1347 #if 0 1348 + xfs_info(mp, 1349 + "mount option \"%s\" not supported for remount\n", p); 1350 return -EINVAL; 1351 #else 1352 break; ··· 1367 if (mp->m_update_flags) { 1368 error = xfs_mount_log_sb(mp, mp->m_update_flags); 1369 if (error) { 1370 + xfs_warn(mp, "failed to write sb changes"); 1371 return error; 1372 } 1373 mp->m_update_flags = 0; ··· 1452 mp->m_logbsize = mp->m_sb.sb_logsunit; 1453 } else if (mp->m_logbsize > 0 && 1454 mp->m_logbsize < mp->m_sb.sb_logsunit) { 1455 + xfs_warn(mp, 1456 + "logbuf size must be greater than or equal to log stripe size"); 1457 return XFS_ERROR(EINVAL); 1458 } 1459 } else { 1460 /* Fail a mount if the logbuf is larger than 32K */ 1461 if (mp->m_logbsize > XLOG_BIG_RECORD_BSIZE) { 1462 + xfs_warn(mp, 1463 + "logbuf size for version 1 logs must be 16K or 32K"); 1464 return XFS_ERROR(EINVAL); 1465 } 1466 } ··· 1477 * prohibit r/w mounts of read-only filesystems 1478 */ 1479 if ((mp->m_sb.sb_flags & XFS_SBF_READONLY) && !ronly) { 1480 + xfs_warn(mp, 1481 + "cannot mount a read-only filesystem as read-write"); 1482 return XFS_ERROR(EROFS); 1483 } 1484
+2 -3
fs/xfs/linux-2.6/xfs_sync.c
··· 425 /* Push the superblock and write an unmount record */ 426 error = xfs_log_sbcount(mp, 1); 427 if (error) 428 - xfs_fs_cmn_err(CE_WARN, mp, 429 - "xfs_attr_quiesce: failed to log sb changes. " 430 "Frozen image may not be consistent."); 431 xfs_log_unmount_write(mp); 432 xfs_unmountfs_writesb(mp); ··· 805 * pass on the error. 806 */ 807 if (error && error != EAGAIN && !XFS_FORCED_SHUTDOWN(ip->i_mount)) { 808 - xfs_fs_cmn_err(CE_WARN, ip->i_mount, 809 "inode 0x%llx background reclaim flush failed with %d", 810 (long long)ip->i_ino, error); 811 }
··· 425 /* Push the superblock and write an unmount record */ 426 error = xfs_log_sbcount(mp, 1); 427 if (error) 428 + xfs_warn(mp, "xfs_attr_quiesce: failed to log sb changes. " 429 "Frozen image may not be consistent."); 430 xfs_log_unmount_write(mp); 431 xfs_unmountfs_writesb(mp); ··· 806 * pass on the error. 807 */ 808 if (error && error != EAGAIN && !XFS_FORCED_SHUTDOWN(ip->i_mount)) { 809 + xfs_warn(ip->i_mount, 810 "inode 0x%llx background reclaim flush failed with %d", 811 (long long)ip->i_ino, error); 812 }
+1 -1
fs/xfs/linux-2.6/xfs_sysctl.c
··· 37 ret = proc_dointvec_minmax(ctl, write, buffer, lenp, ppos); 38 39 if (!ret && write && *valp) { 40 - printk("XFS Clearing xfsstats\n"); 41 for_each_possible_cpu(c) { 42 preempt_disable(); 43 /* save vn_active, it's a universal truth! */
··· 37 ret = proc_dointvec_minmax(ctl, write, buffer, lenp, ppos); 38 39 if (!ret && write && *valp) { 40 + xfs_notice(NULL, "Clearing xfsstats"); 41 for_each_possible_cpu(c) { 42 preempt_disable(); 43 /* save vn_active, it's a universal truth! */
+26 -22
fs/xfs/quota/xfs_dquot.c
··· 544 /* 545 * A simple sanity check in case we got a corrupted dquot... 546 */ 547 - if (xfs_qm_dqcheck(ddq, id, dqp->dq_flags & XFS_DQ_ALLTYPES, 548 flags & (XFS_QMOPT_DQREPAIR|XFS_QMOPT_DOWARN), 549 - "dqtobp")) { 550 if (!(flags & XFS_QMOPT_DQREPAIR)) { 551 xfs_trans_brelse(tp, bp); 552 return XFS_ERROR(EIO); ··· 828 if (xfs_do_dqerror) { 829 if ((xfs_dqerror_target == mp->m_ddev_targp) && 830 (xfs_dqreq_num++ % xfs_dqerror_mod) == 0) { 831 - cmn_err(CE_DEBUG, "Returning error in dqget"); 832 return (EIO); 833 } 834 } ··· 1208 /* 1209 * A simple sanity check in case we got a corrupted dquot.. 1210 */ 1211 - if (xfs_qm_dqcheck(&dqp->q_core, be32_to_cpu(ddqp->d_id), 0, 1212 - XFS_QMOPT_DOWARN, "dqflush (incore copy)")) { 1213 xfs_buf_relse(bp); 1214 xfs_dqfunlock(dqp); 1215 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); ··· 1393 */ 1394 error = xfs_qm_dqflush(dqp, SYNC_WAIT); 1395 if (error) 1396 - xfs_fs_cmn_err(CE_WARN, mp, 1397 - "xfs_qm_dqpurge: dquot %p flush failed", dqp); 1398 xfs_dqflock(dqp); 1399 } 1400 ASSERT(atomic_read(&dqp->q_pincount) == 0); ··· 1427 void 1428 xfs_qm_dqprint(xfs_dquot_t *dqp) 1429 { 1430 - cmn_err(CE_DEBUG, "-----------KERNEL DQUOT----------------"); 1431 - cmn_err(CE_DEBUG, "---- dquotID = %d", 1432 (int)be32_to_cpu(dqp->q_core.d_id)); 1433 - cmn_err(CE_DEBUG, "---- type = %s", DQFLAGTO_TYPESTR(dqp)); 1434 - cmn_err(CE_DEBUG, "---- fs = 0x%p", dqp->q_mount); 1435 - cmn_err(CE_DEBUG, "---- blkno = 0x%x", (int) dqp->q_blkno); 1436 - cmn_err(CE_DEBUG, "---- boffset = 0x%x", (int) dqp->q_bufoffset); 1437 - cmn_err(CE_DEBUG, "---- blkhlimit = %Lu (0x%x)", 1438 be64_to_cpu(dqp->q_core.d_blk_hardlimit), 1439 (int)be64_to_cpu(dqp->q_core.d_blk_hardlimit)); 1440 - cmn_err(CE_DEBUG, "---- blkslimit = %Lu (0x%x)", 1441 be64_to_cpu(dqp->q_core.d_blk_softlimit), 1442 (int)be64_to_cpu(dqp->q_core.d_blk_softlimit)); 1443 - cmn_err(CE_DEBUG, "---- inohlimit = %Lu (0x%x)", 1444 be64_to_cpu(dqp->q_core.d_ino_hardlimit), 1445 (int)be64_to_cpu(dqp->q_core.d_ino_hardlimit)); 1446 - cmn_err(CE_DEBUG, "---- inoslimit = %Lu (0x%x)", 1447 be64_to_cpu(dqp->q_core.d_ino_softlimit), 1448 (int)be64_to_cpu(dqp->q_core.d_ino_softlimit)); 1449 - cmn_err(CE_DEBUG, "---- bcount = %Lu (0x%x)", 1450 be64_to_cpu(dqp->q_core.d_bcount), 1451 (int)be64_to_cpu(dqp->q_core.d_bcount)); 1452 - cmn_err(CE_DEBUG, "---- icount = %Lu (0x%x)", 1453 be64_to_cpu(dqp->q_core.d_icount), 1454 (int)be64_to_cpu(dqp->q_core.d_icount)); 1455 - cmn_err(CE_DEBUG, "---- btimer = %d", 1456 (int)be32_to_cpu(dqp->q_core.d_btimer)); 1457 - cmn_err(CE_DEBUG, "---- itimer = %d", 1458 (int)be32_to_cpu(dqp->q_core.d_itimer)); 1459 - cmn_err(CE_DEBUG, "---------------------------"); 1460 } 1461 #endif 1462
··· 544 /* 545 * A simple sanity check in case we got a corrupted dquot... 546 */ 547 + error = xfs_qm_dqcheck(mp, ddq, id, dqp->dq_flags & XFS_DQ_ALLTYPES, 548 flags & (XFS_QMOPT_DQREPAIR|XFS_QMOPT_DOWARN), 549 + "dqtobp"); 550 + if (error) { 551 if (!(flags & XFS_QMOPT_DQREPAIR)) { 552 xfs_trans_brelse(tp, bp); 553 return XFS_ERROR(EIO); ··· 827 if (xfs_do_dqerror) { 828 if ((xfs_dqerror_target == mp->m_ddev_targp) && 829 (xfs_dqreq_num++ % xfs_dqerror_mod) == 0) { 830 + xfs_debug(mp, "Returning error in dqget"); 831 return (EIO); 832 } 833 } ··· 1207 /* 1208 * A simple sanity check in case we got a corrupted dquot.. 1209 */ 1210 + error = xfs_qm_dqcheck(mp, &dqp->q_core, be32_to_cpu(ddqp->d_id), 0, 1211 + XFS_QMOPT_DOWARN, "dqflush (incore copy)"); 1212 + if (error) { 1213 xfs_buf_relse(bp); 1214 xfs_dqfunlock(dqp); 1215 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); ··· 1391 */ 1392 error = xfs_qm_dqflush(dqp, SYNC_WAIT); 1393 if (error) 1394 + xfs_warn(mp, "%s: dquot %p flush failed", 1395 + __func__, dqp); 1396 xfs_dqflock(dqp); 1397 } 1398 ASSERT(atomic_read(&dqp->q_pincount) == 0); ··· 1425 void 1426 xfs_qm_dqprint(xfs_dquot_t *dqp) 1427 { 1428 + struct xfs_mount *mp = dqp->q_mount; 1429 + 1430 + xfs_debug(mp, "-----------KERNEL DQUOT----------------"); 1431 + xfs_debug(mp, "---- dquotID = %d", 1432 (int)be32_to_cpu(dqp->q_core.d_id)); 1433 + xfs_debug(mp, "---- type = %s", DQFLAGTO_TYPESTR(dqp)); 1434 + xfs_debug(mp, "---- fs = 0x%p", dqp->q_mount); 1435 + xfs_debug(mp, "---- blkno = 0x%x", (int) dqp->q_blkno); 1436 + xfs_debug(mp, "---- boffset = 0x%x", (int) dqp->q_bufoffset); 1437 + xfs_debug(mp, "---- blkhlimit = %Lu (0x%x)", 1438 be64_to_cpu(dqp->q_core.d_blk_hardlimit), 1439 (int)be64_to_cpu(dqp->q_core.d_blk_hardlimit)); 1440 + xfs_debug(mp, "---- blkslimit = %Lu (0x%x)", 1441 be64_to_cpu(dqp->q_core.d_blk_softlimit), 1442 (int)be64_to_cpu(dqp->q_core.d_blk_softlimit)); 1443 + xfs_debug(mp, "---- inohlimit = %Lu (0x%x)", 1444 be64_to_cpu(dqp->q_core.d_ino_hardlimit), 1445 (int)be64_to_cpu(dqp->q_core.d_ino_hardlimit)); 1446 + xfs_debug(mp, "---- inoslimit = %Lu (0x%x)", 1447 be64_to_cpu(dqp->q_core.d_ino_softlimit), 1448 (int)be64_to_cpu(dqp->q_core.d_ino_softlimit)); 1449 + xfs_debug(mp, "---- bcount = %Lu (0x%x)", 1450 be64_to_cpu(dqp->q_core.d_bcount), 1451 (int)be64_to_cpu(dqp->q_core.d_bcount)); 1452 + xfs_debug(mp, "---- icount = %Lu (0x%x)", 1453 be64_to_cpu(dqp->q_core.d_icount), 1454 (int)be64_to_cpu(dqp->q_core.d_icount)); 1455 + xfs_debug(mp, "---- btimer = %d", 1456 (int)be32_to_cpu(dqp->q_core.d_btimer)); 1457 + xfs_debug(mp, "---- itimer = %d", 1458 (int)be32_to_cpu(dqp->q_core.d_itimer)); 1459 + xfs_debug(mp, "---------------------------"); 1460 } 1461 #endif 1462
+2 -3
fs/xfs/quota/xfs_dquot_item.c
··· 136 */ 137 error = xfs_qm_dqflush(dqp, 0); 138 if (error) 139 - xfs_fs_cmn_err(CE_WARN, dqp->q_mount, 140 - "xfs_qm_dquot_logitem_push: push error %d on dqp %p", 141 - error, dqp); 142 xfs_dqunlock(dqp); 143 } 144
··· 136 */ 137 error = xfs_qm_dqflush(dqp, 0); 138 if (error) 139 + xfs_warn(dqp->q_mount, "%s: push error %d on dqp %p", 140 + __func__, error, dqp); 141 xfs_dqunlock(dqp); 142 } 143
+19 -30
fs/xfs/quota/xfs_qm.c
··· 80 int i = 0; 81 82 list_for_each_entry(dqp, &mp->m_quotainfo->qi_dqlist_lock, qi_mplist) { 83 - cmn_err(CE_DEBUG, " %d. \"%d (%s)\" " 84 "bcnt = %lld, icnt = %lld, refs = %d", 85 i++, be32_to_cpu(dqp->q_core.d_id), 86 DQFLAGTO_TYPESTR(dqp), ··· 205 list_for_each_entry_safe(dqp, n, &xqm->qm_dqfrlist, q_freelist) { 206 xfs_dqlock(dqp); 207 #ifdef QUOTADEBUG 208 - cmn_err(CE_DEBUG, "FREELIST destroy 0x%p", dqp); 209 #endif 210 list_del_init(&dqp->q_freelist); 211 xfs_Gqm->qm_dqfrlist_cnt--; ··· 341 * quotas immediately. 342 */ 343 if (mp->m_sb.sb_rextents) { 344 - cmn_err(CE_NOTE, 345 - "Cannot turn on quotas for realtime filesystem %s", 346 - mp->m_fsname); 347 mp->m_qflags = 0; 348 goto write_changes; 349 } ··· 400 * off, but the on disk superblock doesn't know that ! 401 */ 402 ASSERT(!(XFS_IS_QUOTA_RUNNING(mp))); 403 - xfs_fs_cmn_err(CE_ALERT, mp, 404 - "XFS mount_quotas: Superblock update failed!"); 405 } 406 } 407 408 if (error) { 409 - xfs_fs_cmn_err(CE_WARN, mp, 410 - "Failed to initialize disk quotas."); 411 return; 412 } 413 ··· 1227 } 1228 1229 /* 1230 - * Keep an extra reference to this quota inode. This inode is 1231 - * locked exclusively and joined to the transaction already. 1232 - */ 1233 - ASSERT(xfs_isilocked(*ip, XFS_ILOCK_EXCL)); 1234 - IHOLD(*ip); 1235 - 1236 - /* 1237 * Make the changes in the superblock, and log those too. 1238 * sbfields arg may contain fields other than *QUOTINO; 1239 * VERSIONNUM for example. ··· 1254 xfs_mod_sb(tp, sbfields); 1255 1256 if ((error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES))) { 1257 - xfs_fs_cmn_err(CE_ALERT, mp, "XFS qino_alloc failed!"); 1258 return error; 1259 } 1260 return 0; ··· 1289 * output any warnings because it's perfectly possible to 1290 * find uninitialised dquot blks. See comment in xfs_qm_dqcheck. 1291 */ 1292 - (void) xfs_qm_dqcheck(ddq, id+j, type, XFS_QMOPT_DQREPAIR, 1293 "xfs_quotacheck"); 1294 ddq->d_bcount = 0; 1295 ddq->d_icount = 0; ··· 1666 */ 1667 ASSERT(list_empty(&mp->m_quotainfo->qi_dqlist)); 1668 1669 - cmn_err(CE_NOTE, "XFS quotacheck %s: Please wait.", mp->m_fsname); 1670 1671 /* 1672 * First we go thru all the dquots on disk, USR and GRP/PRJ, and reset ··· 1744 1745 error_return: 1746 if (error) { 1747 - cmn_err(CE_WARN, "XFS quotacheck %s: Unsuccessful (Error %d): " 1748 - "Disabling quotas.", 1749 - mp->m_fsname, error); 1750 /* 1751 * We must turn off quotas. 1752 */ ··· 1754 ASSERT(xfs_Gqm != NULL); 1755 xfs_qm_destroy_quotainfo(mp); 1756 if (xfs_mount_reset_sbqflags(mp)) { 1757 - cmn_err(CE_WARN, "XFS quotacheck %s: " 1758 - "Failed to reset quota flags.", mp->m_fsname); 1759 } 1760 - } else { 1761 - cmn_err(CE_NOTE, "XFS quotacheck %s: Done.", mp->m_fsname); 1762 - } 1763 return (error); 1764 } 1765 ··· 1926 */ 1927 error = xfs_qm_dqflush(dqp, 0); 1928 if (error) { 1929 - xfs_fs_cmn_err(CE_WARN, mp, 1930 - "xfs_qm_dqreclaim: dquot %p flush failed", dqp); 1931 } 1932 goto dqunlock; 1933 } ··· 2104 int error; 2105 2106 #ifdef QUOTADEBUG 2107 - cmn_err(CE_NOTE, "Writing superblock quota changes :%s", mp->m_fsname); 2108 #endif 2109 tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SBCHANGE); 2110 if ((error = xfs_trans_reserve(tp, 0,
··· 80 int i = 0; 81 82 list_for_each_entry(dqp, &mp->m_quotainfo->qi_dqlist_lock, qi_mplist) { 83 + xfs_debug(mp, " %d. \"%d (%s)\" " 84 "bcnt = %lld, icnt = %lld, refs = %d", 85 i++, be32_to_cpu(dqp->q_core.d_id), 86 DQFLAGTO_TYPESTR(dqp), ··· 205 list_for_each_entry_safe(dqp, n, &xqm->qm_dqfrlist, q_freelist) { 206 xfs_dqlock(dqp); 207 #ifdef QUOTADEBUG 208 + xfs_debug(dqp->q_mount, "FREELIST destroy 0x%p", dqp); 209 #endif 210 list_del_init(&dqp->q_freelist); 211 xfs_Gqm->qm_dqfrlist_cnt--; ··· 341 * quotas immediately. 342 */ 343 if (mp->m_sb.sb_rextents) { 344 + xfs_notice(mp, "Cannot turn on quotas for realtime filesystem"); 345 mp->m_qflags = 0; 346 goto write_changes; 347 } ··· 402 * off, but the on disk superblock doesn't know that ! 403 */ 404 ASSERT(!(XFS_IS_QUOTA_RUNNING(mp))); 405 + xfs_alert(mp, "%s: Superblock update failed!", 406 + __func__); 407 } 408 } 409 410 if (error) { 411 + xfs_warn(mp, "Failed to initialize disk quotas."); 412 return; 413 } 414 ··· 1230 } 1231 1232 /* 1233 * Make the changes in the superblock, and log those too. 1234 * sbfields arg may contain fields other than *QUOTINO; 1235 * VERSIONNUM for example. ··· 1264 xfs_mod_sb(tp, sbfields); 1265 1266 if ((error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES))) { 1267 + xfs_alert(mp, "%s failed (error %d)!", __func__, error); 1268 return error; 1269 } 1270 return 0; ··· 1299 * output any warnings because it's perfectly possible to 1300 * find uninitialised dquot blks. See comment in xfs_qm_dqcheck. 1301 */ 1302 + (void) xfs_qm_dqcheck(mp, ddq, id+j, type, XFS_QMOPT_DQREPAIR, 1303 "xfs_quotacheck"); 1304 ddq->d_bcount = 0; 1305 ddq->d_icount = 0; ··· 1676 */ 1677 ASSERT(list_empty(&mp->m_quotainfo->qi_dqlist)); 1678 1679 + xfs_notice(mp, "Quotacheck needed: Please wait."); 1680 1681 /* 1682 * First we go thru all the dquots on disk, USR and GRP/PRJ, and reset ··· 1754 1755 error_return: 1756 if (error) { 1757 + xfs_warn(mp, 1758 + "Quotacheck: Unsuccessful (Error %d): Disabling quotas.", 1759 + error); 1760 /* 1761 * We must turn off quotas. 1762 */ ··· 1764 ASSERT(xfs_Gqm != NULL); 1765 xfs_qm_destroy_quotainfo(mp); 1766 if (xfs_mount_reset_sbqflags(mp)) { 1767 + xfs_warn(mp, 1768 + "Quotacheck: Failed to reset quota flags."); 1769 } 1770 + } else 1771 + xfs_notice(mp, "Quotacheck: Done."); 1772 return (error); 1773 } 1774 ··· 1937 */ 1938 error = xfs_qm_dqflush(dqp, 0); 1939 if (error) { 1940 + xfs_warn(mp, "%s: dquot %p flush failed", 1941 + __func__, dqp); 1942 } 1943 goto dqunlock; 1944 } ··· 2115 int error; 2116 2117 #ifdef QUOTADEBUG 2118 + xfs_notice(mp, "Writing superblock quota changes"); 2119 #endif 2120 tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SBCHANGE); 2121 if ((error = xfs_trans_reserve(tp, 0,
+1 -2
fs/xfs/quota/xfs_qm_bhv.c
··· 119 (gquotaondisk && !XFS_IS_GQUOTA_ON(mp)) || 120 (!gquotaondisk && XFS_IS_OQUOTA_ON(mp))) && 121 xfs_dev_is_read_only(mp, "changing quota state")) { 122 - cmn_err(CE_WARN, 123 - "XFS: please mount with%s%s%s%s.", 124 (!quotaondisk ? "out quota" : ""), 125 (uquotaondisk ? " usrquota" : ""), 126 (pquotaondisk ? " prjquota" : ""),
··· 119 (gquotaondisk && !XFS_IS_GQUOTA_ON(mp)) || 120 (!gquotaondisk && XFS_IS_OQUOTA_ON(mp))) && 121 xfs_dev_is_read_only(mp, "changing quota state")) { 122 + xfs_warn(mp, "please mount with%s%s%s%s.", 123 (!quotaondisk ? "out quota" : ""), 124 (uquotaondisk ? " usrquota" : ""), 125 (pquotaondisk ? " prjquota" : ""),
+43 -42
fs/xfs/quota/xfs_qm_syscalls.c
··· 41 #include "xfs_qm.h" 42 #include "xfs_trace.h" 43 44 - #ifdef DEBUG 45 - # define qdprintk(s, args...) cmn_err(CE_DEBUG, s, ## args) 46 - #else 47 - # define qdprintk(s, args...) do { } while (0) 48 - #endif 49 - 50 STATIC int xfs_qm_log_quotaoff(xfs_mount_t *, xfs_qoff_logitem_t **, uint); 51 STATIC int xfs_qm_log_quotaoff_end(xfs_mount_t *, xfs_qoff_logitem_t *, 52 uint); ··· 288 int error = 0, error2 = 0; 289 290 if (!xfs_sb_version_hasquota(&mp->m_sb) || flags == 0) { 291 - qdprintk("qtrunc flags=%x m_qflags=%x\n", flags, mp->m_qflags); 292 return XFS_ERROR(EINVAL); 293 } 294 ··· 326 sbflags = 0; 327 328 if (flags == 0) { 329 - qdprintk("quotaon: zero flags, m_qflags=%x\n", mp->m_qflags); 330 return XFS_ERROR(EINVAL); 331 } 332 ··· 348 (flags & XFS_GQUOTA_ACCT) == 0 && 349 (mp->m_sb.sb_qflags & XFS_GQUOTA_ACCT) == 0 && 350 (flags & XFS_OQUOTA_ENFD))) { 351 - qdprintk("Can't enforce without acct, flags=%x sbflags=%x\n", 352 - flags, mp->m_sb.sb_qflags); 353 return XFS_ERROR(EINVAL); 354 } 355 /* ··· 538 q->qi_bsoftlimit = soft; 539 } 540 } else { 541 - qdprintk("blkhard %Ld < blksoft %Ld\n", hard, soft); 542 } 543 hard = (newlim->d_fieldmask & FS_DQ_RTBHARD) ? 544 (xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_rtb_hardlimit) : ··· 554 q->qi_rtbsoftlimit = soft; 555 } 556 } else { 557 - qdprintk("rtbhard %Ld < rtbsoft %Ld\n", hard, soft); 558 } 559 560 hard = (newlim->d_fieldmask & FS_DQ_IHARD) ? ··· 571 q->qi_isoftlimit = soft; 572 } 573 } else { 574 - qdprintk("ihard %Ld < isoft %Ld\n", hard, soft); 575 } 576 577 /* ··· 936 #define DQTEST_LIST_PRINT(l, NXT, title) \ 937 { \ 938 xfs_dqtest_t *dqp; int i = 0;\ 939 - cmn_err(CE_DEBUG, "%s (#%d)", title, (int) (l)->qh_nelems); \ 940 for (dqp = (xfs_dqtest_t *)(l)->qh_next; dqp != NULL; \ 941 dqp = (xfs_dqtest_t *)dqp->NXT) { \ 942 - cmn_err(CE_DEBUG, " %d. \"%d (%s)\" bcnt = %d, icnt = %d", \ 943 ++i, dqp->d_id, DQFLAGTO_TYPESTR(dqp), \ 944 dqp->d_bcount, dqp->d_icount); } \ 945 } ··· 964 } 965 STATIC void 966 xfs_qm_dqtest_print( 967 - xfs_dqtest_t *d) 968 { 969 - cmn_err(CE_DEBUG, "-----------DQTEST DQUOT----------------"); 970 - cmn_err(CE_DEBUG, "---- dquot ID = %d", d->d_id); 971 - cmn_err(CE_DEBUG, "---- fs = 0x%p", d->q_mount); 972 - cmn_err(CE_DEBUG, "---- bcount = %Lu (0x%x)", 973 d->d_bcount, (int)d->d_bcount); 974 - cmn_err(CE_DEBUG, "---- icount = %Lu (0x%x)", 975 d->d_icount, (int)d->d_icount); 976 - cmn_err(CE_DEBUG, "---------------------------"); 977 } 978 979 STATIC void ··· 988 { 989 qmtest_nfails++; 990 if (error) 991 - cmn_err(CE_DEBUG, "quotacheck failed id=%d, err=%d\nreason: %s", 992 - d->d_id, error, reason); 993 else 994 - cmn_err(CE_DEBUG, "quotacheck failed id=%d (%s) [%d != %d]", 995 - d->d_id, reason, (int)a, (int)b); 996 - xfs_qm_dqtest_print(d); 997 if (dqp) 998 xfs_qm_dqprint(dqp); 999 } ··· 1022 be64_to_cpu(dqp->q_core.d_bcount) >= 1023 be64_to_cpu(dqp->q_core.d_blk_softlimit)) { 1024 if (!dqp->q_core.d_btimer && dqp->q_core.d_id) { 1025 - cmn_err(CE_DEBUG, 1026 - "%d [%s] [0x%p] BLK TIMER NOT STARTED", 1027 - d->d_id, DQFLAGTO_TYPESTR(d), d->q_mount); 1028 err++; 1029 } 1030 } ··· 1032 be64_to_cpu(dqp->q_core.d_icount) >= 1033 be64_to_cpu(dqp->q_core.d_ino_softlimit)) { 1034 if (!dqp->q_core.d_itimer && dqp->q_core.d_id) { 1035 - cmn_err(CE_DEBUG, 1036 - "%d [%s] [0x%p] INO TIMER NOT STARTED", 1037 - d->d_id, DQFLAGTO_TYPESTR(d), d->q_mount); 1038 err++; 1039 } 1040 } 1041 #ifdef QUOTADEBUG 1042 if (!err) { 1043 - cmn_err(CE_DEBUG, "%d [%s] [0x%p] qchecked", 1044 - d->d_id, DQFLAGTO_TYPESTR(d), d->q_mount); 1045 } 1046 #endif 1047 return (err); ··· 1138 1139 if (ino == mp->m_sb.sb_uquotino || ino == mp->m_sb.sb_gquotino) { 1140 *res = BULKSTAT_RV_NOTHING; 1141 - qdprintk("internalqcheck: ino=%llu, uqino=%llu, gqino=%llu\n", 1142 - (unsigned long long) ino, 1143 (unsigned long long) mp->m_sb.sb_uquotino, 1144 (unsigned long long) mp->m_sb.sb_gquotino); 1145 return XFS_ERROR(EINVAL); ··· 1224 xfs_qm_internalqcheck_adjust, 1225 0, NULL, &done); 1226 if (error) { 1227 - cmn_err(CE_DEBUG, "Bulkstat returned error 0x%x", error); 1228 break; 1229 } 1230 } while (!done); 1231 1232 - cmn_err(CE_DEBUG, "Checking results against system dquots"); 1233 for (i = 0; i < qmtest_hashmask; i++) { 1234 xfs_dqtest_t *d, *n; 1235 xfs_dqhash_t *h; ··· 1247 } 1248 1249 if (qmtest_nfails) { 1250 - cmn_err(CE_DEBUG, "******** quotacheck failed ********"); 1251 - cmn_err(CE_DEBUG, "failures = %d", qmtest_nfails); 1252 } else { 1253 - cmn_err(CE_DEBUG, "******** quotacheck successful! ********"); 1254 } 1255 kmem_free(qmtest_udqtab); 1256 kmem_free(qmtest_gdqtab);
··· 41 #include "xfs_qm.h" 42 #include "xfs_trace.h" 43 44 STATIC int xfs_qm_log_quotaoff(xfs_mount_t *, xfs_qoff_logitem_t **, uint); 45 STATIC int xfs_qm_log_quotaoff_end(xfs_mount_t *, xfs_qoff_logitem_t *, 46 uint); ··· 294 int error = 0, error2 = 0; 295 296 if (!xfs_sb_version_hasquota(&mp->m_sb) || flags == 0) { 297 + xfs_debug(mp, "%s: flags=%x m_qflags=%x\n", 298 + __func__, flags, mp->m_qflags); 299 return XFS_ERROR(EINVAL); 300 } 301 ··· 331 sbflags = 0; 332 333 if (flags == 0) { 334 + xfs_debug(mp, "%s: zero flags, m_qflags=%x\n", 335 + __func__, mp->m_qflags); 336 return XFS_ERROR(EINVAL); 337 } 338 ··· 352 (flags & XFS_GQUOTA_ACCT) == 0 && 353 (mp->m_sb.sb_qflags & XFS_GQUOTA_ACCT) == 0 && 354 (flags & XFS_OQUOTA_ENFD))) { 355 + xfs_debug(mp, 356 + "%s: Can't enforce without acct, flags=%x sbflags=%x\n", 357 + __func__, flags, mp->m_sb.sb_qflags); 358 return XFS_ERROR(EINVAL); 359 } 360 /* ··· 541 q->qi_bsoftlimit = soft; 542 } 543 } else { 544 + xfs_debug(mp, "blkhard %Ld < blksoft %Ld\n", hard, soft); 545 } 546 hard = (newlim->d_fieldmask & FS_DQ_RTBHARD) ? 547 (xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_rtb_hardlimit) : ··· 557 q->qi_rtbsoftlimit = soft; 558 } 559 } else { 560 + xfs_debug(mp, "rtbhard %Ld < rtbsoft %Ld\n", hard, soft); 561 } 562 563 hard = (newlim->d_fieldmask & FS_DQ_IHARD) ? ··· 574 q->qi_isoftlimit = soft; 575 } 576 } else { 577 + xfs_debug(mp, "ihard %Ld < isoft %Ld\n", hard, soft); 578 } 579 580 /* ··· 939 #define DQTEST_LIST_PRINT(l, NXT, title) \ 940 { \ 941 xfs_dqtest_t *dqp; int i = 0;\ 942 + xfs_debug(NULL, "%s (#%d)", title, (int) (l)->qh_nelems); \ 943 for (dqp = (xfs_dqtest_t *)(l)->qh_next; dqp != NULL; \ 944 dqp = (xfs_dqtest_t *)dqp->NXT) { \ 945 + xfs_debug(dqp->q_mount, \ 946 + " %d. \"%d (%s)\" bcnt = %d, icnt = %d", \ 947 ++i, dqp->d_id, DQFLAGTO_TYPESTR(dqp), \ 948 dqp->d_bcount, dqp->d_icount); } \ 949 } ··· 966 } 967 STATIC void 968 xfs_qm_dqtest_print( 969 + struct xfs_mount *mp, 970 + struct dqtest *d) 971 { 972 + xfs_debug(mp, "-----------DQTEST DQUOT----------------"); 973 + xfs_debug(mp, "---- dquot ID = %d", d->d_id); 974 + xfs_debug(mp, "---- fs = 0x%p", d->q_mount); 975 + xfs_debug(mp, "---- bcount = %Lu (0x%x)", 976 d->d_bcount, (int)d->d_bcount); 977 + xfs_debug(mp, "---- icount = %Lu (0x%x)", 978 d->d_icount, (int)d->d_icount); 979 + xfs_debug(mp, "---------------------------"); 980 } 981 982 STATIC void ··· 989 { 990 qmtest_nfails++; 991 if (error) 992 + xfs_debug(dqp->q_mount, 993 + "quotacheck failed id=%d, err=%d\nreason: %s", 994 + d->d_id, error, reason); 995 else 996 + xfs_debug(dqp->q_mount, 997 + "quotacheck failed id=%d (%s) [%d != %d]", 998 + d->d_id, reason, (int)a, (int)b); 999 + xfs_qm_dqtest_print(dqp->q_mount, d); 1000 if (dqp) 1001 xfs_qm_dqprint(dqp); 1002 } ··· 1021 be64_to_cpu(dqp->q_core.d_bcount) >= 1022 be64_to_cpu(dqp->q_core.d_blk_softlimit)) { 1023 if (!dqp->q_core.d_btimer && dqp->q_core.d_id) { 1024 + xfs_debug(dqp->q_mount, 1025 + "%d [%s] BLK TIMER NOT STARTED", 1026 + d->d_id, DQFLAGTO_TYPESTR(d)); 1027 err++; 1028 } 1029 } ··· 1031 be64_to_cpu(dqp->q_core.d_icount) >= 1032 be64_to_cpu(dqp->q_core.d_ino_softlimit)) { 1033 if (!dqp->q_core.d_itimer && dqp->q_core.d_id) { 1034 + xfs_debug(dqp->q_mount, 1035 + "%d [%s] INO TIMER NOT STARTED", 1036 + d->d_id, DQFLAGTO_TYPESTR(d)); 1037 err++; 1038 } 1039 } 1040 #ifdef QUOTADEBUG 1041 if (!err) { 1042 + xfs_debug(dqp->q_mount, "%d [%s] qchecked", 1043 + d->d_id, DQFLAGTO_TYPESTR(d)); 1044 } 1045 #endif 1046 return (err); ··· 1137 1138 if (ino == mp->m_sb.sb_uquotino || ino == mp->m_sb.sb_gquotino) { 1139 *res = BULKSTAT_RV_NOTHING; 1140 + xfs_debug(mp, "%s: ino=%llu, uqino=%llu, gqino=%llu\n", 1141 + __func__, (unsigned long long) ino, 1142 (unsigned long long) mp->m_sb.sb_uquotino, 1143 (unsigned long long) mp->m_sb.sb_gquotino); 1144 return XFS_ERROR(EINVAL); ··· 1223 xfs_qm_internalqcheck_adjust, 1224 0, NULL, &done); 1225 if (error) { 1226 + xfs_debug(mp, "Bulkstat returned error 0x%x", error); 1227 break; 1228 } 1229 } while (!done); 1230 1231 + xfs_debug(mp, "Checking results against system dquots"); 1232 for (i = 0; i < qmtest_hashmask; i++) { 1233 xfs_dqtest_t *d, *n; 1234 xfs_dqhash_t *h; ··· 1246 } 1247 1248 if (qmtest_nfails) { 1249 + xfs_debug(mp, "******** quotacheck failed ********"); 1250 + xfs_debug(mp, "failures = %d", qmtest_nfails); 1251 } else { 1252 + xfs_debug(mp, "******** quotacheck successful! ********"); 1253 } 1254 kmem_free(qmtest_udqtab); 1255 kmem_free(qmtest_gdqtab);
+3 -2
fs/xfs/quota/xfs_trans_dquot.c
··· 643 (XFS_IS_OQUOTA_ENFORCED(dqp->q_mount) && 644 (XFS_QM_ISPDQ(dqp) || XFS_QM_ISGDQ(dqp))))) { 645 #ifdef QUOTADEBUG 646 - cmn_err(CE_DEBUG, "BLK Res: nblks=%ld + resbcount=%Ld" 647 - " > hardlimit=%Ld?", nblks, *resbcountp, hardlimit); 648 #endif 649 if (nblks > 0) { 650 /*
··· 643 (XFS_IS_OQUOTA_ENFORCED(dqp->q_mount) && 644 (XFS_QM_ISPDQ(dqp) || XFS_QM_ISGDQ(dqp))))) { 645 #ifdef QUOTADEBUG 646 + xfs_debug(mp, 647 + "BLK Res: nblks=%ld + resbcount=%Ld > hardlimit=%Ld?", 648 + nblks, *resbcountp, hardlimit); 649 #endif 650 if (nblks > 0) { 651 /*
-107
fs/xfs/support/debug.c
··· 1 - /* 2 - * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc. 3 - * All Rights Reserved. 4 - * 5 - * This program is free software; you can redistribute it and/or 6 - * modify it under the terms of the GNU General Public License as 7 - * published by the Free Software Foundation. 8 - * 9 - * This program is distributed in the hope that it would be useful, 10 - * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 - * GNU General Public License for more details. 13 - * 14 - * You should have received a copy of the GNU General Public License 15 - * along with this program; if not, write the Free Software Foundation, 16 - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 17 - */ 18 - #include <xfs.h> 19 - #include "debug.h" 20 - 21 - /* xfs_mount.h drags a lot of crap in, sorry.. */ 22 - #include "xfs_sb.h" 23 - #include "xfs_inum.h" 24 - #include "xfs_ag.h" 25 - #include "xfs_mount.h" 26 - #include "xfs_error.h" 27 - 28 - void 29 - cmn_err( 30 - const char *lvl, 31 - const char *fmt, 32 - ...) 33 - { 34 - struct va_format vaf; 35 - va_list args; 36 - 37 - va_start(args, fmt); 38 - vaf.fmt = fmt; 39 - vaf.va = &args; 40 - 41 - printk("%s%pV", lvl, &vaf); 42 - va_end(args); 43 - 44 - BUG_ON(strncmp(lvl, KERN_EMERG, strlen(KERN_EMERG)) == 0); 45 - } 46 - 47 - void 48 - xfs_fs_cmn_err( 49 - const char *lvl, 50 - struct xfs_mount *mp, 51 - const char *fmt, 52 - ...) 53 - { 54 - struct va_format vaf; 55 - va_list args; 56 - 57 - va_start(args, fmt); 58 - vaf.fmt = fmt; 59 - vaf.va = &args; 60 - 61 - printk("%sFilesystem %s: %pV", lvl, mp->m_fsname, &vaf); 62 - va_end(args); 63 - 64 - BUG_ON(strncmp(lvl, KERN_EMERG, strlen(KERN_EMERG)) == 0); 65 - } 66 - 67 - /* All callers to xfs_cmn_err use CE_ALERT, so don't bother testing lvl */ 68 - void 69 - xfs_cmn_err( 70 - int panic_tag, 71 - const char *lvl, 72 - struct xfs_mount *mp, 73 - const char *fmt, 74 - ...) 75 - { 76 - struct va_format vaf; 77 - va_list args; 78 - int do_panic = 0; 79 - 80 - if (xfs_panic_mask && (xfs_panic_mask & panic_tag)) { 81 - printk(KERN_ALERT "XFS: Transforming an alert into a BUG."); 82 - do_panic = 1; 83 - } 84 - 85 - va_start(args, fmt); 86 - vaf.fmt = fmt; 87 - vaf.va = &args; 88 - 89 - printk(KERN_ALERT "Filesystem %s: %pV", mp->m_fsname, &vaf); 90 - va_end(args); 91 - 92 - BUG_ON(do_panic); 93 - } 94 - 95 - void 96 - assfail(char *expr, char *file, int line) 97 - { 98 - printk(KERN_CRIT "Assertion failed: %s, file: %s, line: %d\n", expr, 99 - file, line); 100 - BUG(); 101 - } 102 - 103 - void 104 - xfs_hex_dump(void *p, int length) 105 - { 106 - print_hex_dump(KERN_ALERT, "", DUMP_PREFIX_ADDRESS, 16, 1, p, length, 1); 107 - }
···
-61
fs/xfs/support/debug.h
··· 1 - /* 2 - * Copyright (c) 2000-2005 Silicon Graphics, Inc. 3 - * All Rights Reserved. 4 - * 5 - * This program is free software; you can redistribute it and/or 6 - * modify it under the terms of the GNU General Public License as 7 - * published by the Free Software Foundation. 8 - * 9 - * This program is distributed in the hope that it would be useful, 10 - * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 - * GNU General Public License for more details. 13 - * 14 - * You should have received a copy of the GNU General Public License 15 - * along with this program; if not, write the Free Software Foundation, 16 - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 17 - */ 18 - #ifndef __XFS_SUPPORT_DEBUG_H__ 19 - #define __XFS_SUPPORT_DEBUG_H__ 20 - 21 - #include <stdarg.h> 22 - 23 - struct xfs_mount; 24 - 25 - #define CE_DEBUG KERN_DEBUG 26 - #define CE_CONT KERN_INFO 27 - #define CE_NOTE KERN_NOTICE 28 - #define CE_WARN KERN_WARNING 29 - #define CE_ALERT KERN_ALERT 30 - #define CE_PANIC KERN_EMERG 31 - 32 - void cmn_err(const char *lvl, const char *fmt, ...) 33 - __attribute__ ((format (printf, 2, 3))); 34 - void xfs_fs_cmn_err( const char *lvl, struct xfs_mount *mp, 35 - const char *fmt, ...) __attribute__ ((format (printf, 3, 4))); 36 - void xfs_cmn_err( int panic_tag, const char *lvl, struct xfs_mount *mp, 37 - const char *fmt, ...) __attribute__ ((format (printf, 4, 5))); 38 - 39 - extern void assfail(char *expr, char *f, int l); 40 - 41 - #define ASSERT_ALWAYS(expr) \ 42 - (unlikely(expr) ? (void)0 : assfail(#expr, __FILE__, __LINE__)) 43 - 44 - #ifndef DEBUG 45 - #define ASSERT(expr) ((void)0) 46 - 47 - #ifndef STATIC 48 - # define STATIC static noinline 49 - #endif 50 - 51 - #else /* DEBUG */ 52 - 53 - #define ASSERT(expr) \ 54 - (unlikely(expr) ? (void)0 : assfail(#expr, __FILE__, __LINE__)) 55 - 56 - #ifndef STATIC 57 - # define STATIC noinline 58 - #endif 59 - 60 - #endif /* DEBUG */ 61 - #endif /* __XFS_SUPPORT_DEBUG_H__ */
···
+79 -75
fs/xfs/xfs_alloc.c
··· 147 */ 148 STATIC void 149 xfs_alloc_compute_aligned( 150 xfs_agblock_t foundbno, /* starting block in found extent */ 151 xfs_extlen_t foundlen, /* length in found extent */ 152 - xfs_extlen_t alignment, /* alignment for allocation */ 153 - xfs_extlen_t minlen, /* minimum length for allocation */ 154 xfs_agblock_t *resbno, /* result block number */ 155 xfs_extlen_t *reslen) /* result length */ 156 { ··· 157 xfs_extlen_t diff; 158 xfs_extlen_t len; 159 160 - if (alignment > 1 && foundlen >= minlen) { 161 - bno = roundup(foundbno, alignment); 162 diff = bno - foundbno; 163 len = diff >= foundlen ? 0 : foundlen - diff; 164 } else { ··· 463 return 0; 464 } 465 466 /* 467 * Allocation group level functions. 468 */ ··· 525 ASSERT(0); 526 /* NOTREACHED */ 527 } 528 - if (error) 529 return error; 530 - /* 531 - * If the allocation worked, need to change the agf structure 532 - * (and log it), and the superblock. 533 - */ 534 - if (args->agbno != NULLAGBLOCK) { 535 - xfs_agf_t *agf; /* allocation group freelist header */ 536 - long slen = (long)args->len; 537 538 - ASSERT(args->len >= args->minlen && args->len <= args->maxlen); 539 - ASSERT(!(args->wasfromfl) || !args->isfl); 540 - ASSERT(args->agbno % args->alignment == 0); 541 - if (!(args->wasfromfl)) { 542 543 - agf = XFS_BUF_TO_AGF(args->agbp); 544 - be32_add_cpu(&agf->agf_freeblks, -(args->len)); 545 - xfs_trans_agblocks_delta(args->tp, 546 - -((long)(args->len))); 547 - args->pag->pagf_freeblks -= args->len; 548 - ASSERT(be32_to_cpu(agf->agf_freeblks) <= 549 - be32_to_cpu(agf->agf_length)); 550 - xfs_alloc_log_agf(args->tp, args->agbp, 551 - XFS_AGF_FREEBLKS); 552 - /* 553 - * Search the busylist for these blocks and mark the 554 - * transaction as synchronous if blocks are found. This 555 - * avoids the need to block due to a synchronous log 556 - * force to ensure correct ordering as the synchronous 557 - * transaction will guarantee that for us. 558 - */ 559 - if (xfs_alloc_busy_search(args->mp, args->agno, 560 - args->agbno, args->len)) 561 - xfs_trans_set_sync(args->tp); 562 - } 563 - if (!args->isfl) 564 - xfs_trans_mod_sb(args->tp, 565 - args->wasdel ? XFS_TRANS_SB_RES_FDBLOCKS : 566 - XFS_TRANS_SB_FDBLOCKS, -slen); 567 - XFS_STATS_INC(xs_allocx); 568 - XFS_STATS_ADD(xs_allocb, args->len); 569 } 570 - return 0; 571 } 572 573 /* ··· 708 if (error) 709 goto error0; 710 XFS_WANT_CORRUPTED_GOTO(i == 1, error0); 711 - xfs_alloc_compute_aligned(*sbno, *slen, args->alignment, 712 - args->minlen, &bno, slena); 713 714 /* 715 * The good extent is closer than this one. ··· 880 if ((error = xfs_alloc_get_rec(cnt_cur, &ltbno, &ltlen, &i))) 881 goto error0; 882 XFS_WANT_CORRUPTED_GOTO(i == 1, error0); 883 - xfs_alloc_compute_aligned(ltbno, ltlen, args->alignment, 884 - args->minlen, &ltbnoa, &ltlena); 885 if (ltlena < args->minlen) 886 continue; 887 args->len = XFS_EXTLEN_MIN(ltlena, args->maxlen); ··· 1001 if ((error = xfs_alloc_get_rec(bno_cur_lt, &ltbno, &ltlen, &i))) 1002 goto error0; 1003 XFS_WANT_CORRUPTED_GOTO(i == 1, error0); 1004 - xfs_alloc_compute_aligned(ltbno, ltlen, args->alignment, 1005 - args->minlen, &ltbnoa, &ltlena); 1006 if (ltlena >= args->minlen) 1007 break; 1008 if ((error = xfs_btree_decrement(bno_cur_lt, 0, &i))) ··· 1017 if ((error = xfs_alloc_get_rec(bno_cur_gt, &gtbno, &gtlen, &i))) 1018 goto error0; 1019 XFS_WANT_CORRUPTED_GOTO(i == 1, error0); 1020 - xfs_alloc_compute_aligned(gtbno, gtlen, args->alignment, 1021 - args->minlen, &gtbnoa, &gtlena); 1022 if (gtlena >= args->minlen) 1023 break; 1024 if ((error = xfs_btree_increment(bno_cur_gt, 0, &i))) ··· 1197 * once aligned; if not, we search left for something better. 1198 * This can't happen in the second case above. 1199 */ 1200 - xfs_alloc_compute_aligned(fbno, flen, args->alignment, args->minlen, 1201 - &rbno, &rlen); 1202 rlen = XFS_EXTLEN_MIN(args->maxlen, rlen); 1203 XFS_WANT_CORRUPTED_GOTO(rlen == 0 || 1204 (rlen <= flen && rbno + rlen <= fbno + flen), error0); ··· 1222 XFS_WANT_CORRUPTED_GOTO(i == 1, error0); 1223 if (flen < bestrlen) 1224 break; 1225 - xfs_alloc_compute_aligned(fbno, flen, args->alignment, 1226 - args->minlen, &rbno, &rlen); 1227 rlen = XFS_EXTLEN_MIN(args->maxlen, rlen); 1228 XFS_WANT_CORRUPTED_GOTO(rlen == 0 || 1229 (rlen <= flen && rbno + rlen <= fbno + flen), ··· 1401 xfs_mount_t *mp; /* mount point struct for filesystem */ 1402 xfs_agblock_t nbno; /* new starting block of freespace */ 1403 xfs_extlen_t nlen; /* new length of freespace */ 1404 1405 mp = tp->t_mountp; 1406 /* ··· 1600 XFS_WANT_CORRUPTED_GOTO(i == 1, error0); 1601 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR); 1602 cnt_cur = NULL; 1603 /* 1604 * Update the freespace totals in the ag and superblock. 1605 */ 1606 - { 1607 - xfs_agf_t *agf; 1608 - xfs_perag_t *pag; /* per allocation group data */ 1609 1610 - pag = xfs_perag_get(mp, agno); 1611 - pag->pagf_freeblks += len; 1612 - xfs_perag_put(pag); 1613 - 1614 - agf = XFS_BUF_TO_AGF(agbp); 1615 - be32_add_cpu(&agf->agf_freeblks, len); 1616 - xfs_trans_agblocks_delta(tp, len); 1617 - XFS_WANT_CORRUPTED_GOTO( 1618 - be32_to_cpu(agf->agf_freeblks) <= 1619 - be32_to_cpu(agf->agf_length), 1620 - error0); 1621 - xfs_alloc_log_agf(tp, agbp, XFS_AGF_FREEBLKS); 1622 - if (!isfl) 1623 - xfs_trans_mod_sb(tp, XFS_TRANS_SB_FDBLOCKS, (long)len); 1624 - XFS_STATS_INC(xs_freex); 1625 - XFS_STATS_ADD(xs_freeb, len); 1626 - } 1627 1628 trace_xfs_free_extent(mp, agno, bno, len, isfl, haveleft, haveright); 1629
··· 147 */ 148 STATIC void 149 xfs_alloc_compute_aligned( 150 + xfs_alloc_arg_t *args, /* allocation argument structure */ 151 xfs_agblock_t foundbno, /* starting block in found extent */ 152 xfs_extlen_t foundlen, /* length in found extent */ 153 xfs_agblock_t *resbno, /* result block number */ 154 xfs_extlen_t *reslen) /* result length */ 155 { ··· 158 xfs_extlen_t diff; 159 xfs_extlen_t len; 160 161 + if (args->alignment > 1 && foundlen >= args->minlen) { 162 + bno = roundup(foundbno, args->alignment); 163 diff = bno - foundbno; 164 len = diff >= foundlen ? 0 : foundlen - diff; 165 } else { ··· 464 return 0; 465 } 466 467 + STATIC int 468 + xfs_alloc_update_counters( 469 + struct xfs_trans *tp, 470 + struct xfs_perag *pag, 471 + struct xfs_buf *agbp, 472 + long len) 473 + { 474 + struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp); 475 + 476 + pag->pagf_freeblks += len; 477 + be32_add_cpu(&agf->agf_freeblks, len); 478 + 479 + xfs_trans_agblocks_delta(tp, len); 480 + if (unlikely(be32_to_cpu(agf->agf_freeblks) > 481 + be32_to_cpu(agf->agf_length))) 482 + return EFSCORRUPTED; 483 + 484 + xfs_alloc_log_agf(tp, agbp, XFS_AGF_FREEBLKS); 485 + return 0; 486 + } 487 + 488 /* 489 * Allocation group level functions. 490 */ ··· 505 ASSERT(0); 506 /* NOTREACHED */ 507 } 508 + 509 + if (error || args->agbno == NULLAGBLOCK) 510 return error; 511 512 + ASSERT(args->len >= args->minlen); 513 + ASSERT(args->len <= args->maxlen); 514 + ASSERT(!args->wasfromfl || !args->isfl); 515 + ASSERT(args->agbno % args->alignment == 0); 516 517 + if (!args->wasfromfl) { 518 + error = xfs_alloc_update_counters(args->tp, args->pag, 519 + args->agbp, 520 + -((long)(args->len))); 521 + if (error) 522 + return error; 523 + 524 + /* 525 + * Search the busylist for these blocks and mark the 526 + * transaction as synchronous if blocks are found. This 527 + * avoids the need to block due to a synchronous log 528 + * force to ensure correct ordering as the synchronous 529 + * transaction will guarantee that for us. 530 + */ 531 + if (xfs_alloc_busy_search(args->mp, args->agno, 532 + args->agbno, args->len)) 533 + xfs_trans_set_sync(args->tp); 534 } 535 + 536 + if (!args->isfl) { 537 + xfs_trans_mod_sb(args->tp, args->wasdel ? 538 + XFS_TRANS_SB_RES_FDBLOCKS : 539 + XFS_TRANS_SB_FDBLOCKS, 540 + -((long)(args->len))); 541 + } 542 + 543 + XFS_STATS_INC(xs_allocx); 544 + XFS_STATS_ADD(xs_allocb, args->len); 545 + return error; 546 } 547 548 /* ··· 693 if (error) 694 goto error0; 695 XFS_WANT_CORRUPTED_GOTO(i == 1, error0); 696 + xfs_alloc_compute_aligned(args, *sbno, *slen, &bno, slena); 697 698 /* 699 * The good extent is closer than this one. ··· 866 if ((error = xfs_alloc_get_rec(cnt_cur, &ltbno, &ltlen, &i))) 867 goto error0; 868 XFS_WANT_CORRUPTED_GOTO(i == 1, error0); 869 + xfs_alloc_compute_aligned(args, ltbno, ltlen, 870 + &ltbnoa, &ltlena); 871 if (ltlena < args->minlen) 872 continue; 873 args->len = XFS_EXTLEN_MIN(ltlena, args->maxlen); ··· 987 if ((error = xfs_alloc_get_rec(bno_cur_lt, &ltbno, &ltlen, &i))) 988 goto error0; 989 XFS_WANT_CORRUPTED_GOTO(i == 1, error0); 990 + xfs_alloc_compute_aligned(args, ltbno, ltlen, 991 + &ltbnoa, &ltlena); 992 if (ltlena >= args->minlen) 993 break; 994 if ((error = xfs_btree_decrement(bno_cur_lt, 0, &i))) ··· 1003 if ((error = xfs_alloc_get_rec(bno_cur_gt, &gtbno, &gtlen, &i))) 1004 goto error0; 1005 XFS_WANT_CORRUPTED_GOTO(i == 1, error0); 1006 + xfs_alloc_compute_aligned(args, gtbno, gtlen, 1007 + &gtbnoa, &gtlena); 1008 if (gtlena >= args->minlen) 1009 break; 1010 if ((error = xfs_btree_increment(bno_cur_gt, 0, &i))) ··· 1183 * once aligned; if not, we search left for something better. 1184 * This can't happen in the second case above. 1185 */ 1186 + xfs_alloc_compute_aligned(args, fbno, flen, &rbno, &rlen); 1187 rlen = XFS_EXTLEN_MIN(args->maxlen, rlen); 1188 XFS_WANT_CORRUPTED_GOTO(rlen == 0 || 1189 (rlen <= flen && rbno + rlen <= fbno + flen), error0); ··· 1209 XFS_WANT_CORRUPTED_GOTO(i == 1, error0); 1210 if (flen < bestrlen) 1211 break; 1212 + xfs_alloc_compute_aligned(args, fbno, flen, 1213 + &rbno, &rlen); 1214 rlen = XFS_EXTLEN_MIN(args->maxlen, rlen); 1215 XFS_WANT_CORRUPTED_GOTO(rlen == 0 || 1216 (rlen <= flen && rbno + rlen <= fbno + flen), ··· 1388 xfs_mount_t *mp; /* mount point struct for filesystem */ 1389 xfs_agblock_t nbno; /* new starting block of freespace */ 1390 xfs_extlen_t nlen; /* new length of freespace */ 1391 + xfs_perag_t *pag; /* per allocation group data */ 1392 1393 mp = tp->t_mountp; 1394 /* ··· 1586 XFS_WANT_CORRUPTED_GOTO(i == 1, error0); 1587 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR); 1588 cnt_cur = NULL; 1589 + 1590 /* 1591 * Update the freespace totals in the ag and superblock. 1592 */ 1593 + pag = xfs_perag_get(mp, agno); 1594 + error = xfs_alloc_update_counters(tp, pag, agbp, len); 1595 + xfs_perag_put(pag); 1596 + if (error) 1597 + goto error0; 1598 1599 + if (!isfl) 1600 + xfs_trans_mod_sb(tp, XFS_TRANS_SB_FDBLOCKS, (long)len); 1601 + XFS_STATS_INC(xs_freex); 1602 + XFS_STATS_ADD(xs_freeb, len); 1603 1604 trace_xfs_free_extent(mp, agno, bno, len, isfl, haveleft, haveright); 1605
+15 -9
fs/xfs/xfs_bmap.c
··· 2365 */ 2366 if (ralen * mp->m_sb.sb_rextsize >= MAXEXTLEN) 2367 ralen = MAXEXTLEN / mp->m_sb.sb_rextsize; 2368 /* 2369 * If it's an allocation to an empty file at offset 0, 2370 * pick an extent that will space things out in the rt area. ··· 3526 3527 if (unlikely(!(gotp->br_startblock) && (*lastxp != NULLEXTNUM) && 3528 !(XFS_IS_REALTIME_INODE(ip) && fork == XFS_DATA_FORK))) { 3529 - xfs_cmn_err(XFS_PTAG_FSBLOCK_ZERO, CE_ALERT, ip->i_mount, 3530 "Access to block zero in inode %llu " 3531 "start_block: %llx start_off: %llx " 3532 "blkcnt: %llx extent-state: %x lastx: %x\n", ··· 4200 num_recs = xfs_btree_get_numrecs(block); 4201 if (unlikely(i + num_recs > room)) { 4202 ASSERT(i + num_recs <= room); 4203 - xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount, 4204 "corrupt dinode %Lu, (btree extents).", 4205 (unsigned long long) ip->i_ino); 4206 - XFS_ERROR_REPORT("xfs_bmap_read_extents(1)", 4207 - XFS_ERRLEVEL_LOW, 4208 - ip->i_mount); 4209 goto error0; 4210 } 4211 XFS_WANT_CORRUPTED_GOTO( ··· 5778 else 5779 thispa = XFS_BMBT_PTR_ADDR(mp, block, j, dmxr); 5780 if (*thispa == *pp) { 5781 - cmn_err(CE_WARN, "%s: thispa(%d) == pp(%d) %Ld", 5782 __func__, j, i, 5783 (unsigned long long)be64_to_cpu(*thispa)); 5784 panic("%s: ptrs are equal in node\n", ··· 5943 return; 5944 5945 error0: 5946 - cmn_err(CE_WARN, "%s: at error0", __func__); 5947 if (bp_release) 5948 xfs_trans_brelse(NULL, bp); 5949 error_norelse: 5950 - cmn_err(CE_WARN, "%s: BAD after btree leaves for %d extents", 5951 __func__, i); 5952 panic("%s: CORRUPTED BTREE OR SOMETHING", __func__); 5953 return; ··· 6150 if (error) { 6151 /* something screwed, just bail */ 6152 if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) { 6153 - xfs_fs_cmn_err(CE_ALERT, ip->i_mount, 6154 "Failed delalloc mapping lookup ino %lld fsb %lld.", 6155 ip->i_ino, start_fsb); 6156 }
··· 2365 */ 2366 if (ralen * mp->m_sb.sb_rextsize >= MAXEXTLEN) 2367 ralen = MAXEXTLEN / mp->m_sb.sb_rextsize; 2368 + 2369 + /* 2370 + * Lock out other modifications to the RT bitmap inode. 2371 + */ 2372 + xfs_ilock(mp->m_rbmip, XFS_ILOCK_EXCL); 2373 + xfs_trans_ijoin_ref(ap->tp, mp->m_rbmip, XFS_ILOCK_EXCL); 2374 + 2375 /* 2376 * If it's an allocation to an empty file at offset 0, 2377 * pick an extent that will space things out in the rt area. ··· 3519 3520 if (unlikely(!(gotp->br_startblock) && (*lastxp != NULLEXTNUM) && 3521 !(XFS_IS_REALTIME_INODE(ip) && fork == XFS_DATA_FORK))) { 3522 + xfs_alert_tag(ip->i_mount, XFS_PTAG_FSBLOCK_ZERO, 3523 "Access to block zero in inode %llu " 3524 "start_block: %llx start_off: %llx " 3525 "blkcnt: %llx extent-state: %x lastx: %x\n", ··· 4193 num_recs = xfs_btree_get_numrecs(block); 4194 if (unlikely(i + num_recs > room)) { 4195 ASSERT(i + num_recs <= room); 4196 + xfs_warn(ip->i_mount, 4197 "corrupt dinode %Lu, (btree extents).", 4198 (unsigned long long) ip->i_ino); 4199 + XFS_CORRUPTION_ERROR("xfs_bmap_read_extents(1)", 4200 + XFS_ERRLEVEL_LOW, ip->i_mount, block); 4201 goto error0; 4202 } 4203 XFS_WANT_CORRUPTED_GOTO( ··· 5772 else 5773 thispa = XFS_BMBT_PTR_ADDR(mp, block, j, dmxr); 5774 if (*thispa == *pp) { 5775 + xfs_warn(mp, "%s: thispa(%d) == pp(%d) %Ld", 5776 __func__, j, i, 5777 (unsigned long long)be64_to_cpu(*thispa)); 5778 panic("%s: ptrs are equal in node\n", ··· 5937 return; 5938 5939 error0: 5940 + xfs_warn(mp, "%s: at error0", __func__); 5941 if (bp_release) 5942 xfs_trans_brelse(NULL, bp); 5943 error_norelse: 5944 + xfs_warn(mp, "%s: BAD after btree leaves for %d extents", 5945 __func__, i); 5946 panic("%s: CORRUPTED BTREE OR SOMETHING", __func__); 5947 return; ··· 6144 if (error) { 6145 /* something screwed, just bail */ 6146 if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) { 6147 + xfs_alert(ip->i_mount, 6148 "Failed delalloc mapping lookup ino %lld fsb %lld.", 6149 ip->i_ino, start_fsb); 6150 }
+8 -7
fs/xfs/xfs_buf_item.c
··· 130 orig = bip->bli_orig; 131 buffer = XFS_BUF_PTR(bp); 132 for (x = 0; x < XFS_BUF_COUNT(bp); x++) { 133 - if (orig[x] != buffer[x] && !btst(bip->bli_logged, x)) 134 - cmn_err(CE_PANIC, 135 - "xfs_buf_item_log_check bip %x buffer %x orig %x index %d", 136 - bip, bp, orig, x); 137 } 138 } 139 #else ··· 985 if (XFS_BUF_TARGET(bp) != lasttarg || 986 time_after(jiffies, (lasttime + 5*HZ))) { 987 lasttime = jiffies; 988 - cmn_err(CE_ALERT, "Device %s, XFS metadata write error" 989 - " block 0x%llx in %s", 990 XFS_BUFTARG_NAME(XFS_BUF_TARGET(bp)), 991 - (__uint64_t)XFS_BUF_ADDR(bp), mp->m_fsname); 992 } 993 lasttarg = XFS_BUF_TARGET(bp); 994
··· 130 orig = bip->bli_orig; 131 buffer = XFS_BUF_PTR(bp); 132 for (x = 0; x < XFS_BUF_COUNT(bp); x++) { 133 + if (orig[x] != buffer[x] && !btst(bip->bli_logged, x)) { 134 + xfs_emerg(bp->b_mount, 135 + "%s: bip %x buffer %x orig %x index %d", 136 + __func__, bip, bp, orig, x); 137 + ASSERT(0); 138 + } 139 } 140 } 141 #else ··· 983 if (XFS_BUF_TARGET(bp) != lasttarg || 984 time_after(jiffies, (lasttime + 5*HZ))) { 985 lasttime = jiffies; 986 + xfs_alert(mp, "Device %s: metadata write error block 0x%llx", 987 XFS_BUFTARG_NAME(XFS_BUF_TARGET(bp)), 988 + (__uint64_t)XFS_BUF_ADDR(bp)); 989 } 990 lasttarg = XFS_BUF_TARGET(bp); 991
+4 -5
fs/xfs/xfs_da_btree.c
··· 1995 error = mappedbno == -2 ? 0 : XFS_ERROR(EFSCORRUPTED); 1996 if (unlikely(error == EFSCORRUPTED)) { 1997 if (xfs_error_level >= XFS_ERRLEVEL_LOW) { 1998 - cmn_err(CE_ALERT, "xfs_da_do_buf: bno %lld\n", 1999 - (long long)bno); 2000 - cmn_err(CE_ALERT, "dir: inode %lld\n", 2001 (long long)dp->i_ino); 2002 for (i = 0; i < nmap; i++) { 2003 - cmn_err(CE_ALERT, 2004 - "[%02d] br_startoff %lld br_startblock %lld br_blockcount %lld br_state %d\n", 2005 i, 2006 (long long)mapp[i].br_startoff, 2007 (long long)mapp[i].br_startblock,
··· 1995 error = mappedbno == -2 ? 0 : XFS_ERROR(EFSCORRUPTED); 1996 if (unlikely(error == EFSCORRUPTED)) { 1997 if (xfs_error_level >= XFS_ERRLEVEL_LOW) { 1998 + xfs_alert(mp, "%s: bno %lld dir: inode %lld", 1999 + __func__, (long long)bno, 2000 (long long)dp->i_ino); 2001 for (i = 0; i < nmap; i++) { 2002 + xfs_alert(mp, 2003 + "[%02d] br_startoff %lld br_startblock %lld br_blockcount %lld br_state %d", 2004 i, 2005 (long long)mapp[i].br_startoff, 2006 (long long)mapp[i].br_startblock,
+2 -2
fs/xfs/xfs_dfrag.c
··· 270 /* check inode formats now that data is flushed */ 271 error = xfs_swap_extents_check_format(ip, tip); 272 if (error) { 273 - xfs_fs_cmn_err(CE_NOTE, mp, 274 "%s: inode 0x%llx format is incompatible for exchanging.", 275 - __FILE__, ip->i_ino); 276 goto out_unlock; 277 } 278
··· 270 /* check inode formats now that data is flushed */ 271 error = xfs_swap_extents_check_format(ip, tip); 272 if (error) { 273 + xfs_notice(mp, 274 "%s: inode 0x%llx format is incompatible for exchanging.", 275 + __func__, ip->i_ino); 276 goto out_unlock; 277 } 278
+1 -1
fs/xfs/xfs_dir2.c
··· 159 XFS_AGINO_TO_INO(mp, agno, agino) == ino; 160 if (unlikely(XFS_TEST_ERROR(!ino_ok, mp, XFS_ERRTAG_DIR_INO_VALIDATE, 161 XFS_RANDOM_DIR_INO_VALIDATE))) { 162 - xfs_fs_cmn_err(CE_WARN, mp, "Invalid inode number 0x%Lx", 163 (unsigned long long) ino); 164 XFS_ERROR_REPORT("xfs_dir_ino_validate", XFS_ERRLEVEL_LOW, mp); 165 return XFS_ERROR(EFSCORRUPTED);
··· 159 XFS_AGINO_TO_INO(mp, agno, agino) == ino; 160 if (unlikely(XFS_TEST_ERROR(!ino_ok, mp, XFS_ERRTAG_DIR_INO_VALIDATE, 161 XFS_RANDOM_DIR_INO_VALIDATE))) { 162 + xfs_warn(mp, "Invalid inode number 0x%Lx", 163 (unsigned long long) ino); 164 XFS_ERROR_REPORT("xfs_dir_ino_validate", XFS_ERRLEVEL_LOW, mp); 165 return XFS_ERROR(EFSCORRUPTED);
+10 -15
fs/xfs/xfs_dir2_node.c
··· 899 if(blk2->index < 0) { 900 state->inleaf = 1; 901 blk2->index = 0; 902 - cmn_err(CE_ALERT, 903 - "xfs_dir2_leafn_rebalance: picked the wrong leaf? reverting original leaf: " 904 - "blk1->index %d\n", 905 - blk1->index); 906 } 907 } 908 ··· 1640 } 1641 1642 if (unlikely(xfs_dir2_db_to_fdb(mp, dbno) != fbno)) { 1643 - cmn_err(CE_ALERT, 1644 - "xfs_dir2_node_addname_int: dir ino " 1645 - "%llu needed freesp block %lld for\n" 1646 - " data block %lld, got %lld\n" 1647 - " ifbno %llu lastfbno %d\n", 1648 - (unsigned long long)dp->i_ino, 1649 (long long)xfs_dir2_db_to_fdb(mp, dbno), 1650 (long long)dbno, (long long)fbno, 1651 (unsigned long long)ifbno, lastfbno); 1652 if (fblk) { 1653 - cmn_err(CE_ALERT, 1654 - " fblk 0x%p blkno %llu " 1655 - "index %d magic 0x%x\n", 1656 fblk, 1657 (unsigned long long)fblk->blkno, 1658 fblk->index, 1659 fblk->magic); 1660 } else { 1661 - cmn_err(CE_ALERT, 1662 - " ... fblk is NULL\n"); 1663 } 1664 XFS_ERROR_REPORT("xfs_dir2_node_addname_int", 1665 XFS_ERRLEVEL_LOW, mp);
··· 899 if(blk2->index < 0) { 900 state->inleaf = 1; 901 blk2->index = 0; 902 + xfs_alert(args->dp->i_mount, 903 + "%s: picked the wrong leaf? reverting original leaf: blk1->index %d\n", 904 + __func__, blk1->index); 905 } 906 } 907 ··· 1641 } 1642 1643 if (unlikely(xfs_dir2_db_to_fdb(mp, dbno) != fbno)) { 1644 + xfs_alert(mp, 1645 + "%s: dir ino " "%llu needed freesp block %lld for\n" 1646 + " data block %lld, got %lld ifbno %llu lastfbno %d", 1647 + __func__, (unsigned long long)dp->i_ino, 1648 (long long)xfs_dir2_db_to_fdb(mp, dbno), 1649 (long long)dbno, (long long)fbno, 1650 (unsigned long long)ifbno, lastfbno); 1651 if (fblk) { 1652 + xfs_alert(mp, 1653 + " fblk 0x%p blkno %llu index %d magic 0x%x", 1654 fblk, 1655 (unsigned long long)fblk->blkno, 1656 fblk->index, 1657 fblk->magic); 1658 } else { 1659 + xfs_alert(mp, " ... fblk is NULL"); 1660 } 1661 XFS_ERROR_REPORT("xfs_dir2_node_addname_int", 1662 XFS_ERRLEVEL_LOW, mp);
+10 -12
fs/xfs/xfs_error.c
··· 48 break; 49 if (e != xfs_etrap[i]) 50 continue; 51 - cmn_err(CE_NOTE, "xfs_error_trap: error %d", e); 52 BUG(); 53 break; 54 } ··· 74 75 for (i = 0; i < XFS_NUM_INJECT_ERROR; i++) { 76 if (xfs_etest[i] == error_tag && xfs_etest_fsid[i] == fsid) { 77 - cmn_err(CE_WARN, 78 "Injecting error (%s) at file %s, line %d, on filesystem \"%s\"", 79 expression, file, line, xfs_etest_fsname[i]); 80 return 1; ··· 95 96 for (i = 0; i < XFS_NUM_INJECT_ERROR; i++) { 97 if (xfs_etest_fsid[i] == fsid && xfs_etest[i] == error_tag) { 98 - cmn_err(CE_WARN, "XFS error tag #%d on", error_tag); 99 return 0; 100 } 101 } 102 103 for (i = 0; i < XFS_NUM_INJECT_ERROR; i++) { 104 if (xfs_etest[i] == 0) { 105 - cmn_err(CE_WARN, "Turned on XFS error tag #%d", 106 error_tag); 107 xfs_etest[i] = error_tag; 108 xfs_etest_fsid[i] = fsid; ··· 114 } 115 } 116 117 - cmn_err(CE_WARN, "error tag overflow, too many turned on"); 118 119 return 1; 120 } ··· 133 if ((fsid == 0LL || xfs_etest_fsid[i] == fsid) && 134 xfs_etest[i] != 0) { 135 cleared = 1; 136 - cmn_err(CE_WARN, "Clearing XFS error tag #%d", 137 xfs_etest[i]); 138 xfs_etest[i] = 0; 139 xfs_etest_fsid[i] = 0LL; ··· 144 } 145 146 if (loud || cleared) 147 - cmn_err(CE_WARN, 148 - "Cleared all XFS error tags for filesystem \"%s\"", 149 - mp->m_fsname); 150 151 return 0; 152 } ··· 160 inst_t *ra) 161 { 162 if (level <= xfs_error_level) { 163 - xfs_cmn_err(XFS_PTAG_ERROR_REPORT, 164 - CE_ALERT, mp, 165 - "XFS internal error %s at line %d of file %s. Caller 0x%p\n", 166 tag, linenum, filename, ra); 167 168 xfs_stack_trace(); ··· 181 if (level <= xfs_error_level) 182 xfs_hex_dump(p, 16); 183 xfs_error_report(tag, level, mp, filename, linenum, ra); 184 }
··· 48 break; 49 if (e != xfs_etrap[i]) 50 continue; 51 + xfs_notice(NULL, "%s: error %d", __func__, e); 52 BUG(); 53 break; 54 } ··· 74 75 for (i = 0; i < XFS_NUM_INJECT_ERROR; i++) { 76 if (xfs_etest[i] == error_tag && xfs_etest_fsid[i] == fsid) { 77 + xfs_warn(NULL, 78 "Injecting error (%s) at file %s, line %d, on filesystem \"%s\"", 79 expression, file, line, xfs_etest_fsname[i]); 80 return 1; ··· 95 96 for (i = 0; i < XFS_NUM_INJECT_ERROR; i++) { 97 if (xfs_etest_fsid[i] == fsid && xfs_etest[i] == error_tag) { 98 + xfs_warn(mp, "error tag #%d on", error_tag); 99 return 0; 100 } 101 } 102 103 for (i = 0; i < XFS_NUM_INJECT_ERROR; i++) { 104 if (xfs_etest[i] == 0) { 105 + xfs_warn(mp, "Turned on XFS error tag #%d", 106 error_tag); 107 xfs_etest[i] = error_tag; 108 xfs_etest_fsid[i] = fsid; ··· 114 } 115 } 116 117 + xfs_warn(mp, "error tag overflow, too many turned on"); 118 119 return 1; 120 } ··· 133 if ((fsid == 0LL || xfs_etest_fsid[i] == fsid) && 134 xfs_etest[i] != 0) { 135 cleared = 1; 136 + xfs_warn(mp, "Clearing XFS error tag #%d", 137 xfs_etest[i]); 138 xfs_etest[i] = 0; 139 xfs_etest_fsid[i] = 0LL; ··· 144 } 145 146 if (loud || cleared) 147 + xfs_warn(mp, "Cleared all XFS error tags for filesystem"); 148 149 return 0; 150 } ··· 162 inst_t *ra) 163 { 164 if (level <= xfs_error_level) { 165 + xfs_alert_tag(mp, XFS_PTAG_ERROR_REPORT, 166 + "Internal error %s at line %d of file %s. Caller 0x%p\n", 167 tag, linenum, filename, ra); 168 169 xfs_stack_trace(); ··· 184 if (level <= xfs_error_level) 185 xfs_hex_dump(p, 16); 186 xfs_error_report(tag, level, mp, filename, linenum, ra); 187 + xfs_alert(mp, "Corruption detected. Unmount and run xfs_repair"); 188 }
+2 -17
fs/xfs/xfs_error.h
··· 145 #endif /* DEBUG */ 146 147 /* 148 - * XFS panic tags -- allow a call to xfs_cmn_err() be turned into 149 - * a panic by setting xfs_panic_mask in a 150 - * sysctl. update xfs_max[XFS_PARAM] if 151 - * more are added. 152 */ 153 #define XFS_NO_PTAG 0 154 #define XFS_PTAG_IFLUSH 0x00000001 ··· 157 #define XFS_PTAG_SHUTDOWN_IOERROR 0x00000020 158 #define XFS_PTAG_SHUTDOWN_LOGERROR 0x00000040 159 #define XFS_PTAG_FSBLOCK_ZERO 0x00000080 160 - 161 - struct xfs_mount; 162 - 163 - extern void xfs_hex_dump(void *p, int length); 164 - 165 - #define xfs_fs_repair_cmn_err(level, mp, fmt, args...) \ 166 - xfs_fs_cmn_err(level, mp, fmt " Unmount and run xfs_repair.", ## args) 167 - 168 - #define xfs_fs_mount_cmn_err(f, fmt, args...) \ 169 - do { \ 170 - if (!(f & XFS_MFSI_QUIET)) \ 171 - cmn_err(CE_WARN, "XFS: " fmt, ## args); \ 172 - } while (0) 173 174 #endif /* __XFS_ERROR_H__ */
··· 145 #endif /* DEBUG */ 146 147 /* 148 + * XFS panic tags -- allow a call to xfs_alert_tag() be turned into 149 + * a panic by setting xfs_panic_mask in a sysctl. 150 */ 151 #define XFS_NO_PTAG 0 152 #define XFS_PTAG_IFLUSH 0x00000001 ··· 159 #define XFS_PTAG_SHUTDOWN_IOERROR 0x00000020 160 #define XFS_PTAG_SHUTDOWN_LOGERROR 0x00000040 161 #define XFS_PTAG_FSBLOCK_ZERO 0x00000080 162 163 #endif /* __XFS_ERROR_H__ */
+3 -3
fs/xfs/xfs_fsops.c
··· 385 XFS_AGB_TO_DADDR(mp, agno, XFS_SB_BLOCK(mp)), 386 XFS_FSS_TO_BB(mp, 1), 0, &bp); 387 if (error) { 388 - xfs_fs_cmn_err(CE_WARN, mp, 389 - "error %d reading secondary superblock for ag %d", 390 error, agno); 391 break; 392 } ··· 399 if (!(error = xfs_bwrite(mp, bp))) { 400 continue; 401 } else { 402 - xfs_fs_cmn_err(CE_WARN, mp, 403 "write error %d updating secondary superblock for ag %d", 404 error, agno); 405 break; /* no point in continuing */
··· 385 XFS_AGB_TO_DADDR(mp, agno, XFS_SB_BLOCK(mp)), 386 XFS_FSS_TO_BB(mp, 1), 0, &bp); 387 if (error) { 388 + xfs_warn(mp, 389 + "error %d reading secondary superblock for ag %d", 390 error, agno); 391 break; 392 } ··· 399 if (!(error = xfs_bwrite(mp, bp))) { 400 continue; 401 } else { 402 + xfs_warn(mp, 403 "write error %d updating secondary superblock for ag %d", 404 error, agno); 405 break; /* no point in continuing */
+34 -48
fs/xfs/xfs_ialloc.c
··· 1055 */ 1056 agno = XFS_INO_TO_AGNO(mp, inode); 1057 if (agno >= mp->m_sb.sb_agcount) { 1058 - cmn_err(CE_WARN, 1059 - "xfs_difree: agno >= mp->m_sb.sb_agcount (%d >= %d) on %s. Returning EINVAL.", 1060 - agno, mp->m_sb.sb_agcount, mp->m_fsname); 1061 ASSERT(0); 1062 return XFS_ERROR(EINVAL); 1063 } 1064 agino = XFS_INO_TO_AGINO(mp, inode); 1065 if (inode != XFS_AGINO_TO_INO(mp, agno, agino)) { 1066 - cmn_err(CE_WARN, 1067 - "xfs_difree: inode != XFS_AGINO_TO_INO() " 1068 - "(%llu != %llu) on %s. Returning EINVAL.", 1069 - (unsigned long long)inode, 1070 - (unsigned long long)XFS_AGINO_TO_INO(mp, agno, agino), 1071 - mp->m_fsname); 1072 ASSERT(0); 1073 return XFS_ERROR(EINVAL); 1074 } 1075 agbno = XFS_AGINO_TO_AGBNO(mp, agino); 1076 if (agbno >= mp->m_sb.sb_agblocks) { 1077 - cmn_err(CE_WARN, 1078 - "xfs_difree: agbno >= mp->m_sb.sb_agblocks (%d >= %d) on %s. Returning EINVAL.", 1079 - agbno, mp->m_sb.sb_agblocks, mp->m_fsname); 1080 ASSERT(0); 1081 return XFS_ERROR(EINVAL); 1082 } ··· 1080 */ 1081 error = xfs_ialloc_read_agi(mp, tp, agno, &agbp); 1082 if (error) { 1083 - cmn_err(CE_WARN, 1084 - "xfs_difree: xfs_ialloc_read_agi() returned an error %d on %s. Returning error.", 1085 - error, mp->m_fsname); 1086 return error; 1087 } 1088 agi = XFS_BUF_TO_AGI(agbp); ··· 1100 * Look for the entry describing this inode. 1101 */ 1102 if ((error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_LE, &i))) { 1103 - cmn_err(CE_WARN, 1104 - "xfs_difree: xfs_inobt_lookup returned() an error %d on %s. Returning error.", 1105 - error, mp->m_fsname); 1106 goto error0; 1107 } 1108 XFS_WANT_CORRUPTED_GOTO(i == 1, error0); 1109 error = xfs_inobt_get_rec(cur, &rec, &i); 1110 if (error) { 1111 - cmn_err(CE_WARN, 1112 - "xfs_difree: xfs_inobt_get_rec() returned an error %d on %s. Returning error.", 1113 - error, mp->m_fsname); 1114 goto error0; 1115 } 1116 XFS_WANT_CORRUPTED_GOTO(i == 1, error0); ··· 1149 xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, -(ilen - 1)); 1150 1151 if ((error = xfs_btree_delete(cur, &i))) { 1152 - cmn_err(CE_WARN, "xfs_difree: xfs_btree_delete returned an error %d on %s.\n", 1153 - error, mp->m_fsname); 1154 goto error0; 1155 } 1156 ··· 1162 1163 error = xfs_inobt_update(cur, &rec); 1164 if (error) { 1165 - cmn_err(CE_WARN, 1166 - "xfs_difree: xfs_inobt_update returned an error %d on %s.", 1167 - error, mp->m_fsname); 1168 goto error0; 1169 } 1170 ··· 1209 1210 error = xfs_ialloc_read_agi(mp, tp, agno, &agbp); 1211 if (error) { 1212 - xfs_fs_cmn_err(CE_ALERT, mp, "xfs_imap: " 1213 - "xfs_ialloc_read_agi() returned " 1214 - "error %d, agno %d", 1215 - error, agno); 1216 return error; 1217 } 1218 ··· 1289 if (flags & XFS_IGET_UNTRUSTED) 1290 return XFS_ERROR(EINVAL); 1291 if (agno >= mp->m_sb.sb_agcount) { 1292 - xfs_fs_cmn_err(CE_ALERT, mp, 1293 - "xfs_imap: agno (%d) >= " 1294 - "mp->m_sb.sb_agcount (%d)", 1295 - agno, mp->m_sb.sb_agcount); 1296 } 1297 if (agbno >= mp->m_sb.sb_agblocks) { 1298 - xfs_fs_cmn_err(CE_ALERT, mp, 1299 - "xfs_imap: agbno (0x%llx) >= " 1300 - "mp->m_sb.sb_agblocks (0x%lx)", 1301 - (unsigned long long) agbno, 1302 - (unsigned long) mp->m_sb.sb_agblocks); 1303 } 1304 if (ino != XFS_AGINO_TO_INO(mp, agno, agino)) { 1305 - xfs_fs_cmn_err(CE_ALERT, mp, 1306 - "xfs_imap: ino (0x%llx) != " 1307 - "XFS_AGINO_TO_INO(mp, agno, agino) " 1308 - "(0x%llx)", 1309 - ino, XFS_AGINO_TO_INO(mp, agno, agino)); 1310 } 1311 xfs_stack_trace(); 1312 #endif /* DEBUG */ ··· 1375 */ 1376 if ((imap->im_blkno + imap->im_len) > 1377 XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks)) { 1378 - xfs_fs_cmn_err(CE_ALERT, mp, "xfs_imap: " 1379 - "(imap->im_blkno (0x%llx) + imap->im_len (0x%llx)) > " 1380 - " XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks) (0x%llx)", 1381 - (unsigned long long) imap->im_blkno, 1382 (unsigned long long) imap->im_len, 1383 XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks)); 1384 return XFS_ERROR(EINVAL);
··· 1055 */ 1056 agno = XFS_INO_TO_AGNO(mp, inode); 1057 if (agno >= mp->m_sb.sb_agcount) { 1058 + xfs_warn(mp, "%s: agno >= mp->m_sb.sb_agcount (%d >= %d).", 1059 + __func__, agno, mp->m_sb.sb_agcount); 1060 ASSERT(0); 1061 return XFS_ERROR(EINVAL); 1062 } 1063 agino = XFS_INO_TO_AGINO(mp, inode); 1064 if (inode != XFS_AGINO_TO_INO(mp, agno, agino)) { 1065 + xfs_warn(mp, "%s: inode != XFS_AGINO_TO_INO() (%llu != %llu).", 1066 + __func__, (unsigned long long)inode, 1067 + (unsigned long long)XFS_AGINO_TO_INO(mp, agno, agino)); 1068 ASSERT(0); 1069 return XFS_ERROR(EINVAL); 1070 } 1071 agbno = XFS_AGINO_TO_AGBNO(mp, agino); 1072 if (agbno >= mp->m_sb.sb_agblocks) { 1073 + xfs_warn(mp, "%s: agbno >= mp->m_sb.sb_agblocks (%d >= %d).", 1074 + __func__, agbno, mp->m_sb.sb_agblocks); 1075 ASSERT(0); 1076 return XFS_ERROR(EINVAL); 1077 } ··· 1085 */ 1086 error = xfs_ialloc_read_agi(mp, tp, agno, &agbp); 1087 if (error) { 1088 + xfs_warn(mp, "%s: xfs_ialloc_read_agi() returned error %d.", 1089 + __func__, error); 1090 return error; 1091 } 1092 agi = XFS_BUF_TO_AGI(agbp); ··· 1106 * Look for the entry describing this inode. 1107 */ 1108 if ((error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_LE, &i))) { 1109 + xfs_warn(mp, "%s: xfs_inobt_lookup() returned error %d.", 1110 + __func__, error); 1111 goto error0; 1112 } 1113 XFS_WANT_CORRUPTED_GOTO(i == 1, error0); 1114 error = xfs_inobt_get_rec(cur, &rec, &i); 1115 if (error) { 1116 + xfs_warn(mp, "%s: xfs_inobt_get_rec() returned error %d.", 1117 + __func__, error); 1118 goto error0; 1119 } 1120 XFS_WANT_CORRUPTED_GOTO(i == 1, error0); ··· 1157 xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, -(ilen - 1)); 1158 1159 if ((error = xfs_btree_delete(cur, &i))) { 1160 + xfs_warn(mp, "%s: xfs_btree_delete returned error %d.", 1161 + __func__, error); 1162 goto error0; 1163 } 1164 ··· 1170 1171 error = xfs_inobt_update(cur, &rec); 1172 if (error) { 1173 + xfs_warn(mp, "%s: xfs_inobt_update returned error %d.", 1174 + __func__, error); 1175 goto error0; 1176 } 1177 ··· 1218 1219 error = xfs_ialloc_read_agi(mp, tp, agno, &agbp); 1220 if (error) { 1221 + xfs_alert(mp, 1222 + "%s: xfs_ialloc_read_agi() returned error %d, agno %d", 1223 + __func__, error, agno); 1224 return error; 1225 } 1226 ··· 1299 if (flags & XFS_IGET_UNTRUSTED) 1300 return XFS_ERROR(EINVAL); 1301 if (agno >= mp->m_sb.sb_agcount) { 1302 + xfs_alert(mp, 1303 + "%s: agno (%d) >= mp->m_sb.sb_agcount (%d)", 1304 + __func__, agno, mp->m_sb.sb_agcount); 1305 } 1306 if (agbno >= mp->m_sb.sb_agblocks) { 1307 + xfs_alert(mp, 1308 + "%s: agbno (0x%llx) >= mp->m_sb.sb_agblocks (0x%lx)", 1309 + __func__, (unsigned long long)agbno, 1310 + (unsigned long)mp->m_sb.sb_agblocks); 1311 } 1312 if (ino != XFS_AGINO_TO_INO(mp, agno, agino)) { 1313 + xfs_alert(mp, 1314 + "%s: ino (0x%llx) != XFS_AGINO_TO_INO() (0x%llx)", 1315 + __func__, ino, 1316 + XFS_AGINO_TO_INO(mp, agno, agino)); 1317 } 1318 xfs_stack_trace(); 1319 #endif /* DEBUG */ ··· 1388 */ 1389 if ((imap->im_blkno + imap->im_len) > 1390 XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks)) { 1391 + xfs_alert(mp, 1392 + "%s: (im_blkno (0x%llx) + im_len (0x%llx)) > sb_dblocks (0x%llx)", 1393 + __func__, (unsigned long long) imap->im_blkno, 1394 (unsigned long long) imap->im_len, 1395 XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks)); 1396 return XFS_ERROR(EINVAL);
+58 -71
fs/xfs/xfs_inode.c
··· 110 dip = (xfs_dinode_t *)xfs_buf_offset(bp, 111 i * mp->m_sb.sb_inodesize); 112 if (!dip->di_next_unlinked) { 113 - xfs_fs_cmn_err(CE_ALERT, mp, 114 - "Detected a bogus zero next_unlinked field in incore inode buffer 0x%p. About to pop an ASSERT.", 115 bp); 116 ASSERT(dip->di_next_unlinked); 117 } ··· 142 (int)imap->im_len, buf_flags, &bp); 143 if (error) { 144 if (error != EAGAIN) { 145 - cmn_err(CE_WARN, 146 - "xfs_imap_to_bp: xfs_trans_read_buf()returned " 147 - "an error %d on %s. Returning error.", 148 - error, mp->m_fsname); 149 } else { 150 ASSERT(buf_flags & XBF_TRYLOCK); 151 } ··· 179 XFS_CORRUPTION_ERROR("xfs_imap_to_bp", 180 XFS_ERRLEVEL_HIGH, mp, dip); 181 #ifdef DEBUG 182 - cmn_err(CE_PANIC, 183 - "Device %s - bad inode magic/vsn " 184 - "daddr %lld #%d (magic=%x)", 185 - XFS_BUFTARG_NAME(mp->m_ddev_targp), 186 (unsigned long long)imap->im_blkno, i, 187 be16_to_cpu(dip->di_magic)); 188 #endif 189 xfs_trans_brelse(tp, bp); 190 return XFS_ERROR(EFSCORRUPTED); ··· 315 if (unlikely(be32_to_cpu(dip->di_nextents) + 316 be16_to_cpu(dip->di_anextents) > 317 be64_to_cpu(dip->di_nblocks))) { 318 - xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount, 319 "corrupt dinode %Lu, extent total = %d, nblocks = %Lu.", 320 (unsigned long long)ip->i_ino, 321 (int)(be32_to_cpu(dip->di_nextents) + ··· 328 } 329 330 if (unlikely(dip->di_forkoff > ip->i_mount->m_sb.sb_inodesize)) { 331 - xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount, 332 - "corrupt dinode %Lu, forkoff = 0x%x.", 333 (unsigned long long)ip->i_ino, 334 dip->di_forkoff); 335 XFS_CORRUPTION_ERROR("xfs_iformat(2)", XFS_ERRLEVEL_LOW, ··· 338 339 if (unlikely((ip->i_d.di_flags & XFS_DIFLAG_REALTIME) && 340 !ip->i_mount->m_rtdev_targp)) { 341 - xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount, 342 "corrupt dinode %Lu, has realtime flag set.", 343 ip->i_ino); 344 XFS_CORRUPTION_ERROR("xfs_iformat(realtime)", ··· 370 * no local regular files yet 371 */ 372 if (unlikely((be16_to_cpu(dip->di_mode) & S_IFMT) == S_IFREG)) { 373 - xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount, 374 - "corrupt inode %Lu " 375 - "(local format for regular file).", 376 (unsigned long long) ip->i_ino); 377 XFS_CORRUPTION_ERROR("xfs_iformat(4)", 378 XFS_ERRLEVEL_LOW, ··· 381 382 di_size = be64_to_cpu(dip->di_size); 383 if (unlikely(di_size > XFS_DFORK_DSIZE(dip, ip->i_mount))) { 384 - xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount, 385 - "corrupt inode %Lu " 386 - "(bad size %Ld for local inode).", 387 (unsigned long long) ip->i_ino, 388 (long long) di_size); 389 XFS_CORRUPTION_ERROR("xfs_iformat(5)", ··· 426 size = be16_to_cpu(atp->hdr.totsize); 427 428 if (unlikely(size < sizeof(struct xfs_attr_sf_hdr))) { 429 - xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount, 430 - "corrupt inode %Lu " 431 - "(bad attr fork size %Ld).", 432 (unsigned long long) ip->i_ino, 433 (long long) size); 434 XFS_CORRUPTION_ERROR("xfs_iformat(8)", ··· 482 * kmem_alloc() or memcpy() below. 483 */ 484 if (unlikely(size > XFS_DFORK_SIZE(dip, ip->i_mount, whichfork))) { 485 - xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount, 486 - "corrupt inode %Lu " 487 - "(bad size %d for local fork, size = %d).", 488 (unsigned long long) ip->i_ino, size, 489 XFS_DFORK_SIZE(dip, ip->i_mount, whichfork)); 490 XFS_CORRUPTION_ERROR("xfs_iformat_local", XFS_ERRLEVEL_LOW, ··· 540 * kmem_alloc() or memcpy() below. 541 */ 542 if (unlikely(size < 0 || size > XFS_DFORK_SIZE(dip, ip->i_mount, whichfork))) { 543 - xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount, 544 - "corrupt inode %Lu ((a)extents = %d).", 545 (unsigned long long) ip->i_ino, nex); 546 XFS_CORRUPTION_ERROR("xfs_iformat_extents(1)", XFS_ERRLEVEL_LOW, 547 ip->i_mount, dip); ··· 615 || XFS_BMDR_SPACE_CALC(nrecs) > 616 XFS_DFORK_SIZE(dip, ip->i_mount, whichfork) 617 || XFS_IFORK_NEXTENTS(ip, whichfork) > ip->i_d.di_nblocks)) { 618 - xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount, 619 - "corrupt inode %Lu (btree).", 620 (unsigned long long) ip->i_ino); 621 - XFS_ERROR_REPORT("xfs_iformat_btree", XFS_ERRLEVEL_LOW, 622 - ip->i_mount); 623 return XFS_ERROR(EFSCORRUPTED); 624 } 625 ··· 804 */ 805 if (be16_to_cpu(dip->di_magic) != XFS_DINODE_MAGIC) { 806 #ifdef DEBUG 807 - xfs_fs_cmn_err(CE_ALERT, mp, "xfs_iread: " 808 - "dip->di_magic (0x%x) != " 809 - "XFS_DINODE_MAGIC (0x%x)", 810 - be16_to_cpu(dip->di_magic), 811 - XFS_DINODE_MAGIC); 812 #endif /* DEBUG */ 813 error = XFS_ERROR(EINVAL); 814 goto out_brelse; ··· 824 error = xfs_iformat(ip, dip); 825 if (error) { 826 #ifdef DEBUG 827 - xfs_fs_cmn_err(CE_ALERT, mp, "xfs_iread: " 828 - "xfs_iformat() returned error %d", 829 - error); 830 #endif /* DEBUG */ 831 goto out_brelse; 832 } ··· 1004 * This is because we're setting fields here we need 1005 * to prevent others from looking at until we're done. 1006 */ 1007 - error = xfs_trans_iget(tp->t_mountp, tp, ino, 1008 - XFS_IGET_CREATE, XFS_ILOCK_EXCL, &ip); 1009 if (error) 1010 return error; 1011 ASSERT(ip != NULL); ··· 1154 /* 1155 * Log the new values stuffed into the inode. 1156 */ 1157 xfs_trans_log_inode(tp, ip, flags); 1158 1159 /* now that we have an i_mode we can setup inode ops and unlock */ ··· 1809 */ 1810 error = xfs_itobp(mp, tp, ip, &dip, &ibp, XBF_LOCK); 1811 if (error) { 1812 - cmn_err(CE_WARN, 1813 - "xfs_iunlink_remove: xfs_itobp() returned an error %d on %s. Returning error.", 1814 - error, mp->m_fsname); 1815 return error; 1816 } 1817 next_agino = be32_to_cpu(dip->di_next_unlinked); ··· 1855 error = xfs_inotobp(mp, tp, next_ino, &last_dip, 1856 &last_ibp, &last_offset, 0); 1857 if (error) { 1858 - cmn_err(CE_WARN, 1859 - "xfs_iunlink_remove: xfs_inotobp() returned an error %d on %s. Returning error.", 1860 - error, mp->m_fsname); 1861 return error; 1862 } 1863 next_agino = be32_to_cpu(last_dip->di_next_unlinked); ··· 1870 */ 1871 error = xfs_itobp(mp, tp, ip, &dip, &ibp, XBF_LOCK); 1872 if (error) { 1873 - cmn_err(CE_WARN, 1874 - "xfs_iunlink_remove: xfs_itobp() returned an error %d on %s. Returning error.", 1875 - error, mp->m_fsname); 1876 return error; 1877 } 1878 next_agino = be32_to_cpu(dip->di_next_unlinked); ··· 2926 2927 if (XFS_TEST_ERROR(be16_to_cpu(dip->di_magic) != XFS_DINODE_MAGIC, 2928 mp, XFS_ERRTAG_IFLUSH_1, XFS_RANDOM_IFLUSH_1)) { 2929 - xfs_cmn_err(XFS_PTAG_IFLUSH, CE_ALERT, mp, 2930 - "xfs_iflush: Bad inode %Lu magic number 0x%x, ptr 0x%p", 2931 - ip->i_ino, be16_to_cpu(dip->di_magic), dip); 2932 goto corrupt_out; 2933 } 2934 if (XFS_TEST_ERROR(ip->i_d.di_magic != XFS_DINODE_MAGIC, 2935 mp, XFS_ERRTAG_IFLUSH_2, XFS_RANDOM_IFLUSH_2)) { 2936 - xfs_cmn_err(XFS_PTAG_IFLUSH, CE_ALERT, mp, 2937 - "xfs_iflush: Bad inode %Lu, ptr 0x%p, magic number 0x%x", 2938 - ip->i_ino, ip, ip->i_d.di_magic); 2939 goto corrupt_out; 2940 } 2941 if ((ip->i_d.di_mode & S_IFMT) == S_IFREG) { ··· 2943 (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS) && 2944 (ip->i_d.di_format != XFS_DINODE_FMT_BTREE), 2945 mp, XFS_ERRTAG_IFLUSH_3, XFS_RANDOM_IFLUSH_3)) { 2946 - xfs_cmn_err(XFS_PTAG_IFLUSH, CE_ALERT, mp, 2947 - "xfs_iflush: Bad regular inode %Lu, ptr 0x%p", 2948 - ip->i_ino, ip); 2949 goto corrupt_out; 2950 } 2951 } else if ((ip->i_d.di_mode & S_IFMT) == S_IFDIR) { ··· 2954 (ip->i_d.di_format != XFS_DINODE_FMT_BTREE) && 2955 (ip->i_d.di_format != XFS_DINODE_FMT_LOCAL), 2956 mp, XFS_ERRTAG_IFLUSH_4, XFS_RANDOM_IFLUSH_4)) { 2957 - xfs_cmn_err(XFS_PTAG_IFLUSH, CE_ALERT, mp, 2958 - "xfs_iflush: Bad directory inode %Lu, ptr 0x%p", 2959 - ip->i_ino, ip); 2960 goto corrupt_out; 2961 } 2962 } 2963 if (XFS_TEST_ERROR(ip->i_d.di_nextents + ip->i_d.di_anextents > 2964 ip->i_d.di_nblocks, mp, XFS_ERRTAG_IFLUSH_5, 2965 XFS_RANDOM_IFLUSH_5)) { 2966 - xfs_cmn_err(XFS_PTAG_IFLUSH, CE_ALERT, mp, 2967 - "xfs_iflush: detected corrupt incore inode %Lu, total extents = %d, nblocks = %Ld, ptr 0x%p", 2968 - ip->i_ino, 2969 ip->i_d.di_nextents + ip->i_d.di_anextents, 2970 - ip->i_d.di_nblocks, 2971 - ip); 2972 goto corrupt_out; 2973 } 2974 if (XFS_TEST_ERROR(ip->i_d.di_forkoff > mp->m_sb.sb_inodesize, 2975 mp, XFS_ERRTAG_IFLUSH_6, XFS_RANDOM_IFLUSH_6)) { 2976 - xfs_cmn_err(XFS_PTAG_IFLUSH, CE_ALERT, mp, 2977 - "xfs_iflush: bad inode %Lu, forkoff 0x%x, ptr 0x%p", 2978 - ip->i_ino, ip->i_d.di_forkoff, ip); 2979 goto corrupt_out; 2980 } 2981 /*
··· 110 dip = (xfs_dinode_t *)xfs_buf_offset(bp, 111 i * mp->m_sb.sb_inodesize); 112 if (!dip->di_next_unlinked) { 113 + xfs_alert(mp, 114 + "Detected bogus zero next_unlinked field in incore inode buffer 0x%p.", 115 bp); 116 ASSERT(dip->di_next_unlinked); 117 } ··· 142 (int)imap->im_len, buf_flags, &bp); 143 if (error) { 144 if (error != EAGAIN) { 145 + xfs_warn(mp, 146 + "%s: xfs_trans_read_buf() returned error %d.", 147 + __func__, error); 148 } else { 149 ASSERT(buf_flags & XBF_TRYLOCK); 150 } ··· 180 XFS_CORRUPTION_ERROR("xfs_imap_to_bp", 181 XFS_ERRLEVEL_HIGH, mp, dip); 182 #ifdef DEBUG 183 + xfs_emerg(mp, 184 + "bad inode magic/vsn daddr %lld #%d (magic=%x)", 185 (unsigned long long)imap->im_blkno, i, 186 be16_to_cpu(dip->di_magic)); 187 + ASSERT(0); 188 #endif 189 xfs_trans_brelse(tp, bp); 190 return XFS_ERROR(EFSCORRUPTED); ··· 317 if (unlikely(be32_to_cpu(dip->di_nextents) + 318 be16_to_cpu(dip->di_anextents) > 319 be64_to_cpu(dip->di_nblocks))) { 320 + xfs_warn(ip->i_mount, 321 "corrupt dinode %Lu, extent total = %d, nblocks = %Lu.", 322 (unsigned long long)ip->i_ino, 323 (int)(be32_to_cpu(dip->di_nextents) + ··· 330 } 331 332 if (unlikely(dip->di_forkoff > ip->i_mount->m_sb.sb_inodesize)) { 333 + xfs_warn(ip->i_mount, "corrupt dinode %Lu, forkoff = 0x%x.", 334 (unsigned long long)ip->i_ino, 335 dip->di_forkoff); 336 XFS_CORRUPTION_ERROR("xfs_iformat(2)", XFS_ERRLEVEL_LOW, ··· 341 342 if (unlikely((ip->i_d.di_flags & XFS_DIFLAG_REALTIME) && 343 !ip->i_mount->m_rtdev_targp)) { 344 + xfs_warn(ip->i_mount, 345 "corrupt dinode %Lu, has realtime flag set.", 346 ip->i_ino); 347 XFS_CORRUPTION_ERROR("xfs_iformat(realtime)", ··· 373 * no local regular files yet 374 */ 375 if (unlikely((be16_to_cpu(dip->di_mode) & S_IFMT) == S_IFREG)) { 376 + xfs_warn(ip->i_mount, 377 + "corrupt inode %Lu (local format for regular file).", 378 (unsigned long long) ip->i_ino); 379 XFS_CORRUPTION_ERROR("xfs_iformat(4)", 380 XFS_ERRLEVEL_LOW, ··· 385 386 di_size = be64_to_cpu(dip->di_size); 387 if (unlikely(di_size > XFS_DFORK_DSIZE(dip, ip->i_mount))) { 388 + xfs_warn(ip->i_mount, 389 + "corrupt inode %Lu (bad size %Ld for local inode).", 390 (unsigned long long) ip->i_ino, 391 (long long) di_size); 392 XFS_CORRUPTION_ERROR("xfs_iformat(5)", ··· 431 size = be16_to_cpu(atp->hdr.totsize); 432 433 if (unlikely(size < sizeof(struct xfs_attr_sf_hdr))) { 434 + xfs_warn(ip->i_mount, 435 + "corrupt inode %Lu (bad attr fork size %Ld).", 436 (unsigned long long) ip->i_ino, 437 (long long) size); 438 XFS_CORRUPTION_ERROR("xfs_iformat(8)", ··· 488 * kmem_alloc() or memcpy() below. 489 */ 490 if (unlikely(size > XFS_DFORK_SIZE(dip, ip->i_mount, whichfork))) { 491 + xfs_warn(ip->i_mount, 492 + "corrupt inode %Lu (bad size %d for local fork, size = %d).", 493 (unsigned long long) ip->i_ino, size, 494 XFS_DFORK_SIZE(dip, ip->i_mount, whichfork)); 495 XFS_CORRUPTION_ERROR("xfs_iformat_local", XFS_ERRLEVEL_LOW, ··· 547 * kmem_alloc() or memcpy() below. 548 */ 549 if (unlikely(size < 0 || size > XFS_DFORK_SIZE(dip, ip->i_mount, whichfork))) { 550 + xfs_warn(ip->i_mount, "corrupt inode %Lu ((a)extents = %d).", 551 (unsigned long long) ip->i_ino, nex); 552 XFS_CORRUPTION_ERROR("xfs_iformat_extents(1)", XFS_ERRLEVEL_LOW, 553 ip->i_mount, dip); ··· 623 || XFS_BMDR_SPACE_CALC(nrecs) > 624 XFS_DFORK_SIZE(dip, ip->i_mount, whichfork) 625 || XFS_IFORK_NEXTENTS(ip, whichfork) > ip->i_d.di_nblocks)) { 626 + xfs_warn(ip->i_mount, "corrupt inode %Lu (btree).", 627 (unsigned long long) ip->i_ino); 628 + XFS_CORRUPTION_ERROR("xfs_iformat_btree", XFS_ERRLEVEL_LOW, 629 + ip->i_mount, dip); 630 return XFS_ERROR(EFSCORRUPTED); 631 } 632 ··· 813 */ 814 if (be16_to_cpu(dip->di_magic) != XFS_DINODE_MAGIC) { 815 #ifdef DEBUG 816 + xfs_alert(mp, 817 + "%s: dip->di_magic (0x%x) != XFS_DINODE_MAGIC (0x%x)", 818 + __func__, be16_to_cpu(dip->di_magic), XFS_DINODE_MAGIC); 819 #endif /* DEBUG */ 820 error = XFS_ERROR(EINVAL); 821 goto out_brelse; ··· 835 error = xfs_iformat(ip, dip); 836 if (error) { 837 #ifdef DEBUG 838 + xfs_alert(mp, "%s: xfs_iformat() returned error %d", 839 + __func__, error); 840 #endif /* DEBUG */ 841 goto out_brelse; 842 } ··· 1016 * This is because we're setting fields here we need 1017 * to prevent others from looking at until we're done. 1018 */ 1019 + error = xfs_iget(tp->t_mountp, tp, ino, XFS_IGET_CREATE, 1020 + XFS_ILOCK_EXCL, &ip); 1021 if (error) 1022 return error; 1023 ASSERT(ip != NULL); ··· 1166 /* 1167 * Log the new values stuffed into the inode. 1168 */ 1169 + xfs_trans_ijoin_ref(tp, ip, XFS_ILOCK_EXCL); 1170 xfs_trans_log_inode(tp, ip, flags); 1171 1172 /* now that we have an i_mode we can setup inode ops and unlock */ ··· 1820 */ 1821 error = xfs_itobp(mp, tp, ip, &dip, &ibp, XBF_LOCK); 1822 if (error) { 1823 + xfs_warn(mp, "%s: xfs_itobp() returned error %d.", 1824 + __func__, error); 1825 return error; 1826 } 1827 next_agino = be32_to_cpu(dip->di_next_unlinked); ··· 1867 error = xfs_inotobp(mp, tp, next_ino, &last_dip, 1868 &last_ibp, &last_offset, 0); 1869 if (error) { 1870 + xfs_warn(mp, 1871 + "%s: xfs_inotobp() returned error %d.", 1872 + __func__, error); 1873 return error; 1874 } 1875 next_agino = be32_to_cpu(last_dip->di_next_unlinked); ··· 1882 */ 1883 error = xfs_itobp(mp, tp, ip, &dip, &ibp, XBF_LOCK); 1884 if (error) { 1885 + xfs_warn(mp, "%s: xfs_itobp(2) returned error %d.", 1886 + __func__, error); 1887 return error; 1888 } 1889 next_agino = be32_to_cpu(dip->di_next_unlinked); ··· 2939 2940 if (XFS_TEST_ERROR(be16_to_cpu(dip->di_magic) != XFS_DINODE_MAGIC, 2941 mp, XFS_ERRTAG_IFLUSH_1, XFS_RANDOM_IFLUSH_1)) { 2942 + xfs_alert_tag(mp, XFS_PTAG_IFLUSH, 2943 + "%s: Bad inode %Lu magic number 0x%x, ptr 0x%p", 2944 + __func__, ip->i_ino, be16_to_cpu(dip->di_magic), dip); 2945 goto corrupt_out; 2946 } 2947 if (XFS_TEST_ERROR(ip->i_d.di_magic != XFS_DINODE_MAGIC, 2948 mp, XFS_ERRTAG_IFLUSH_2, XFS_RANDOM_IFLUSH_2)) { 2949 + xfs_alert_tag(mp, XFS_PTAG_IFLUSH, 2950 + "%s: Bad inode %Lu, ptr 0x%p, magic number 0x%x", 2951 + __func__, ip->i_ino, ip, ip->i_d.di_magic); 2952 goto corrupt_out; 2953 } 2954 if ((ip->i_d.di_mode & S_IFMT) == S_IFREG) { ··· 2956 (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS) && 2957 (ip->i_d.di_format != XFS_DINODE_FMT_BTREE), 2958 mp, XFS_ERRTAG_IFLUSH_3, XFS_RANDOM_IFLUSH_3)) { 2959 + xfs_alert_tag(mp, XFS_PTAG_IFLUSH, 2960 + "%s: Bad regular inode %Lu, ptr 0x%p", 2961 + __func__, ip->i_ino, ip); 2962 goto corrupt_out; 2963 } 2964 } else if ((ip->i_d.di_mode & S_IFMT) == S_IFDIR) { ··· 2967 (ip->i_d.di_format != XFS_DINODE_FMT_BTREE) && 2968 (ip->i_d.di_format != XFS_DINODE_FMT_LOCAL), 2969 mp, XFS_ERRTAG_IFLUSH_4, XFS_RANDOM_IFLUSH_4)) { 2970 + xfs_alert_tag(mp, XFS_PTAG_IFLUSH, 2971 + "%s: Bad directory inode %Lu, ptr 0x%p", 2972 + __func__, ip->i_ino, ip); 2973 goto corrupt_out; 2974 } 2975 } 2976 if (XFS_TEST_ERROR(ip->i_d.di_nextents + ip->i_d.di_anextents > 2977 ip->i_d.di_nblocks, mp, XFS_ERRTAG_IFLUSH_5, 2978 XFS_RANDOM_IFLUSH_5)) { 2979 + xfs_alert_tag(mp, XFS_PTAG_IFLUSH, 2980 + "%s: detected corrupt incore inode %Lu, " 2981 + "total extents = %d, nblocks = %Ld, ptr 0x%p", 2982 + __func__, ip->i_ino, 2983 ip->i_d.di_nextents + ip->i_d.di_anextents, 2984 + ip->i_d.di_nblocks, ip); 2985 goto corrupt_out; 2986 } 2987 if (XFS_TEST_ERROR(ip->i_d.di_forkoff > mp->m_sb.sb_inodesize, 2988 mp, XFS_ERRTAG_IFLUSH_6, XFS_RANDOM_IFLUSH_6)) { 2989 + xfs_alert_tag(mp, XFS_PTAG_IFLUSH, 2990 + "%s: bad inode %Lu, forkoff 0x%x, ptr 0x%p", 2991 + __func__, ip->i_ino, ip->i_d.di_forkoff, ip); 2992 goto corrupt_out; 2993 } 2994 /*
+15 -8
fs/xfs/xfs_inode.h
··· 409 /* 410 * Flags for lockdep annotations. 411 * 412 - * XFS_I[O]LOCK_PARENT - for operations that require locking two inodes 413 - * (ie directory operations that require locking a directory inode and 414 - * an entry inode). The first inode gets locked with this flag so it 415 - * gets a lockdep subclass of 1 and the second lock will have a lockdep 416 - * subclass of 0. 417 * 418 * XFS_LOCK_INUMORDER - for locking several inodes at the some time 419 * with xfs_lock_inodes(). This flag is used as the starting subclass 420 * and each subsequent lock acquired will increment the subclass by one. 421 - * So the first lock acquired will have a lockdep subclass of 2, the 422 - * second lock will have a lockdep subclass of 3, and so on. It is 423 * the responsibility of the class builder to shift this to the correct 424 * portion of the lock_mode lockdep mask. 425 */ 426 #define XFS_LOCK_PARENT 1 427 - #define XFS_LOCK_INUMORDER 2 428 429 #define XFS_IOLOCK_SHIFT 16 430 #define XFS_IOLOCK_PARENT (XFS_LOCK_PARENT << XFS_IOLOCK_SHIFT) 431 432 #define XFS_ILOCK_SHIFT 24 433 #define XFS_ILOCK_PARENT (XFS_LOCK_PARENT << XFS_ILOCK_SHIFT) 434 435 #define XFS_IOLOCK_DEP_MASK 0x00ff0000 436 #define XFS_ILOCK_DEP_MASK 0xff000000
··· 409 /* 410 * Flags for lockdep annotations. 411 * 412 + * XFS_LOCK_PARENT - for directory operations that require locking a 413 + * parent directory inode and a child entry inode. The parent gets locked 414 + * with this flag so it gets a lockdep subclass of 1 and the child entry 415 + * lock will have a lockdep subclass of 0. 416 + * 417 + * XFS_LOCK_RTBITMAP/XFS_LOCK_RTSUM - the realtime device bitmap and summary 418 + * inodes do not participate in the normal lock order, and thus have their 419 + * own subclasses. 420 * 421 * XFS_LOCK_INUMORDER - for locking several inodes at the some time 422 * with xfs_lock_inodes(). This flag is used as the starting subclass 423 * and each subsequent lock acquired will increment the subclass by one. 424 + * So the first lock acquired will have a lockdep subclass of 4, the 425 + * second lock will have a lockdep subclass of 5, and so on. It is 426 * the responsibility of the class builder to shift this to the correct 427 * portion of the lock_mode lockdep mask. 428 */ 429 #define XFS_LOCK_PARENT 1 430 + #define XFS_LOCK_RTBITMAP 2 431 + #define XFS_LOCK_RTSUM 3 432 + #define XFS_LOCK_INUMORDER 4 433 434 #define XFS_IOLOCK_SHIFT 16 435 #define XFS_IOLOCK_PARENT (XFS_LOCK_PARENT << XFS_IOLOCK_SHIFT) 436 437 #define XFS_ILOCK_SHIFT 24 438 #define XFS_ILOCK_PARENT (XFS_LOCK_PARENT << XFS_ILOCK_SHIFT) 439 + #define XFS_ILOCK_RTBITMAP (XFS_LOCK_RTBITMAP << XFS_ILOCK_SHIFT) 440 + #define XFS_ILOCK_RTSUM (XFS_LOCK_RTSUM << XFS_ILOCK_SHIFT) 441 442 #define XFS_IOLOCK_DEP_MASK 0x00ff0000 443 #define XFS_ILOCK_DEP_MASK 0xff000000
+6 -6
fs/xfs/xfs_iomap.c
··· 101 } 102 103 STATIC int 104 - xfs_cmn_err_fsblock_zero( 105 xfs_inode_t *ip, 106 xfs_bmbt_irec_t *imap) 107 { 108 - xfs_cmn_err(XFS_PTAG_FSBLOCK_ZERO, CE_ALERT, ip->i_mount, 109 "Access to block zero in inode %llu " 110 "start_block: %llx start_off: %llx " 111 "blkcnt: %llx extent-state: %x\n", ··· 246 } 247 248 if (!(imap->br_startblock || XFS_IS_REALTIME_INODE(ip))) { 249 - error = xfs_cmn_err_fsblock_zero(ip, imap); 250 goto error_out; 251 } 252 ··· 464 } 465 466 if (!(imap[0].br_startblock || XFS_IS_REALTIME_INODE(ip))) 467 - return xfs_cmn_err_fsblock_zero(ip, &imap[0]); 468 469 *ret_imap = imap[0]; 470 return 0; ··· 614 * covers at least part of the callers request 615 */ 616 if (!(imap->br_startblock || XFS_IS_REALTIME_INODE(ip))) 617 - return xfs_cmn_err_fsblock_zero(ip, imap); 618 619 if ((offset_fsb >= imap->br_startoff) && 620 (offset_fsb < (imap->br_startoff + ··· 724 return XFS_ERROR(error); 725 726 if (!(imap.br_startblock || XFS_IS_REALTIME_INODE(ip))) 727 - return xfs_cmn_err_fsblock_zero(ip, &imap); 728 729 if ((numblks_fsb = imap.br_blockcount) == 0) { 730 /*
··· 101 } 102 103 STATIC int 104 + xfs_alert_fsblock_zero( 105 xfs_inode_t *ip, 106 xfs_bmbt_irec_t *imap) 107 { 108 + xfs_alert_tag(ip->i_mount, XFS_PTAG_FSBLOCK_ZERO, 109 "Access to block zero in inode %llu " 110 "start_block: %llx start_off: %llx " 111 "blkcnt: %llx extent-state: %x\n", ··· 246 } 247 248 if (!(imap->br_startblock || XFS_IS_REALTIME_INODE(ip))) { 249 + error = xfs_alert_fsblock_zero(ip, imap); 250 goto error_out; 251 } 252 ··· 464 } 465 466 if (!(imap[0].br_startblock || XFS_IS_REALTIME_INODE(ip))) 467 + return xfs_alert_fsblock_zero(ip, &imap[0]); 468 469 *ret_imap = imap[0]; 470 return 0; ··· 614 * covers at least part of the callers request 615 */ 616 if (!(imap->br_startblock || XFS_IS_REALTIME_INODE(ip))) 617 + return xfs_alert_fsblock_zero(ip, imap); 618 619 if ((offset_fsb >= imap->br_startoff) && 620 (offset_fsb < (imap->br_startoff + ··· 724 return XFS_ERROR(error); 725 726 if (!(imap.br_startblock || XFS_IS_REALTIME_INODE(ip))) 727 + return xfs_alert_fsblock_zero(ip, &imap); 728 729 if ((numblks_fsb = imap.br_blockcount) == 0) { 730 /*
+59 -65
fs/xfs/xfs_log.c
··· 374 int error; 375 376 if (!(mp->m_flags & XFS_MOUNT_NORECOVERY)) 377 - cmn_err(CE_NOTE, "XFS mounting filesystem %s", mp->m_fsname); 378 else { 379 - cmn_err(CE_NOTE, 380 - "Mounting filesystem \"%s\" in no-recovery mode. Filesystem will be inconsistent.", 381 - mp->m_fsname); 382 ASSERT(mp->m_flags & XFS_MOUNT_RDONLY); 383 } 384 ··· 392 */ 393 error = xfs_trans_ail_init(mp); 394 if (error) { 395 - cmn_err(CE_WARN, "XFS: AIL initialisation failed: error %d", error); 396 goto out_free_log; 397 } 398 mp->m_log->l_ailp = mp->m_ail; ··· 412 if (readonly) 413 mp->m_flags |= XFS_MOUNT_RDONLY; 414 if (error) { 415 - cmn_err(CE_WARN, "XFS: log mount/recovery failed: error %d", error); 416 goto out_destroy_ail; 417 } 418 } ··· 542 */ 543 } 544 545 - if (error) { 546 - xfs_fs_cmn_err(CE_ALERT, mp, 547 - "xfs_log_unmount: unmount record failed"); 548 - } 549 550 551 spin_lock(&log->l_icloglock); ··· 850 * In this case we just want to return the size of the 851 * log as the amount of space left. 852 */ 853 - xfs_fs_cmn_err(CE_ALERT, log->l_mp, 854 "xlog_space_left: head behind tail\n" 855 " tail_cycle = %d, tail_bytes = %d\n" 856 " GH cycle = %d, GH bytes = %d", ··· 999 1000 log = kmem_zalloc(sizeof(xlog_t), KM_MAYFAIL); 1001 if (!log) { 1002 - xlog_warn("XFS: Log allocation failed: No memory!"); 1003 goto out; 1004 } 1005 ··· 1027 if (xfs_sb_version_hassector(&mp->m_sb)) { 1028 log2_size = mp->m_sb.sb_logsectlog; 1029 if (log2_size < BBSHIFT) { 1030 - xlog_warn("XFS: Log sector size too small " 1031 - "(0x%x < 0x%x)", log2_size, BBSHIFT); 1032 goto out_free_log; 1033 } 1034 1035 log2_size -= BBSHIFT; 1036 if (log2_size > mp->m_sectbb_log) { 1037 - xlog_warn("XFS: Log sector size too large " 1038 - "(0x%x > 0x%x)", log2_size, mp->m_sectbb_log); 1039 goto out_free_log; 1040 } 1041 1042 /* for larger sector sizes, must have v2 or external log */ 1043 if (log2_size && log->l_logBBstart > 0 && 1044 !xfs_sb_version_haslogv2(&mp->m_sb)) { 1045 - 1046 - xlog_warn("XFS: log sector size (0x%x) invalid " 1047 - "for configuration.", log2_size); 1048 goto out_free_log; 1049 } 1050 } ··· 1561 "SWAPEXT" 1562 }; 1563 1564 - xfs_fs_cmn_err(CE_WARN, mp, 1565 - "xfs_log_write: reservation summary:\n" 1566 - " trans type = %s (%u)\n" 1567 - " unit res = %d bytes\n" 1568 - " current res = %d bytes\n" 1569 - " total reg = %u bytes (o/flow = %u bytes)\n" 1570 - " ophdrs = %u (ophdr space = %u bytes)\n" 1571 - " ophdr + reg = %u bytes\n" 1572 - " num regions = %u\n", 1573 - ((ticket->t_trans_type <= 0 || 1574 - ticket->t_trans_type > XFS_TRANS_TYPE_MAX) ? 1575 - "bad-trans-type" : trans_type_str[ticket->t_trans_type-1]), 1576 - ticket->t_trans_type, 1577 - ticket->t_unit_res, 1578 - ticket->t_curr_res, 1579 - ticket->t_res_arr_sum, ticket->t_res_o_flow, 1580 - ticket->t_res_num_ophdrs, ophdr_spc, 1581 - ticket->t_res_arr_sum + 1582 - ticket->t_res_o_flow + ophdr_spc, 1583 - ticket->t_res_num); 1584 1585 for (i = 0; i < ticket->t_res_num; i++) { 1586 - uint r_type = ticket->t_res_arr[i].r_type; 1587 - cmn_err(CE_WARN, 1588 - "region[%u]: %s - %u bytes\n", 1589 - i, 1590 ((r_type <= 0 || r_type > XLOG_REG_TYPE_MAX) ? 1591 "bad-rtype" : res_type_str[r_type-1]), 1592 ticket->t_res_arr[i].r_len); 1593 } 1594 1595 - xfs_cmn_err(XFS_PTAG_LOGRES, CE_ALERT, mp, 1596 "xfs_log_write: reservation ran out. Need to up reservation"); 1597 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); 1598 } ··· 1678 case XFS_LOG: 1679 break; 1680 default: 1681 - xfs_fs_cmn_err(CE_WARN, log->l_mp, 1682 "Bad XFS transaction clientid 0x%x in ticket 0x%p", 1683 ophdr->oh_clientid, ticket); 1684 return NULL; ··· 2260 if (repeats > 5000) { 2261 flushcnt += repeats; 2262 repeats = 0; 2263 - xfs_fs_cmn_err(CE_WARN, log->l_mp, 2264 "%s: possible infinite loop (%d iterations)", 2265 __func__, flushcnt); 2266 } ··· 3048 int error; 3049 3050 error = _xfs_log_force(mp, flags, NULL); 3051 - if (error) { 3052 - xfs_fs_cmn_err(CE_WARN, mp, "xfs_log_force: " 3053 - "error %d returned.", error); 3054 - } 3055 } 3056 3057 /* ··· 3198 int error; 3199 3200 error = _xfs_log_force_lsn(mp, lsn, flags, NULL); 3201 - if (error) { 3202 - xfs_fs_cmn_err(CE_WARN, mp, "xfs_log_force: " 3203 - "error %d returned.", error); 3204 - } 3205 } 3206 3207 /* ··· 3404 } 3405 3406 if (!good_ptr) 3407 - xlog_panic("xlog_verify_dest_ptr: invalid ptr"); 3408 } 3409 3410 STATIC void ··· 3440 blocks = 3441 log->l_logBBsize - (log->l_prev_block - BLOCK_LSN(tail_lsn)); 3442 if (blocks < BTOBB(iclog->ic_offset)+BTOBB(log->l_iclog_hsize)) 3443 - xlog_panic("xlog_verify_tail_lsn: ran out of log space"); 3444 } else { 3445 ASSERT(CYCLE_LSN(tail_lsn)+1 == log->l_prev_cycle); 3446 3447 if (BLOCK_LSN(tail_lsn) == log->l_prev_block) 3448 - xlog_panic("xlog_verify_tail_lsn: tail wrapped"); 3449 3450 blocks = BLOCK_LSN(tail_lsn) - log->l_prev_block; 3451 if (blocks < BTOBB(iclog->ic_offset) + 1) 3452 - xlog_panic("xlog_verify_tail_lsn: ran out of log space"); 3453 } 3454 } /* xlog_verify_tail_lsn */ 3455 ··· 3489 icptr = log->l_iclog; 3490 for (i=0; i < log->l_iclog_bufs; i++) { 3491 if (icptr == NULL) 3492 - xlog_panic("xlog_verify_iclog: invalid ptr"); 3493 icptr = icptr->ic_next; 3494 } 3495 if (icptr != log->l_iclog) 3496 - xlog_panic("xlog_verify_iclog: corrupt iclog ring"); 3497 spin_unlock(&log->l_icloglock); 3498 3499 /* check log magic numbers */ 3500 if (be32_to_cpu(iclog->ic_header.h_magicno) != XLOG_HEADER_MAGIC_NUM) 3501 - xlog_panic("xlog_verify_iclog: invalid magic num"); 3502 3503 ptr = (xfs_caddr_t) &iclog->ic_header; 3504 for (ptr += BBSIZE; ptr < ((xfs_caddr_t)&iclog->ic_header) + count; 3505 ptr += BBSIZE) { 3506 if (be32_to_cpu(*(__be32 *)ptr) == XLOG_HEADER_MAGIC_NUM) 3507 - xlog_panic("xlog_verify_iclog: unexpected magic num"); 3508 } 3509 3510 /* check fields */ ··· 3535 } 3536 } 3537 if (clientid != XFS_TRANSACTION && clientid != XFS_LOG) 3538 - cmn_err(CE_WARN, "xlog_verify_iclog: " 3539 - "invalid clientid %d op 0x%p offset 0x%lx", 3540 - clientid, ophead, (unsigned long)field_offset); 3541 3542 /* check length */ 3543 field_offset = (__psint_t)
··· 374 int error; 375 376 if (!(mp->m_flags & XFS_MOUNT_NORECOVERY)) 377 + xfs_notice(mp, "Mounting Filesystem"); 378 else { 379 + xfs_notice(mp, 380 + "Mounting filesystem in no-recovery mode. Filesystem will be inconsistent."); 381 ASSERT(mp->m_flags & XFS_MOUNT_RDONLY); 382 } 383 ··· 393 */ 394 error = xfs_trans_ail_init(mp); 395 if (error) { 396 + xfs_warn(mp, "AIL initialisation failed: error %d", error); 397 goto out_free_log; 398 } 399 mp->m_log->l_ailp = mp->m_ail; ··· 413 if (readonly) 414 mp->m_flags |= XFS_MOUNT_RDONLY; 415 if (error) { 416 + xfs_warn(mp, "log mount/recovery failed: error %d", 417 + error); 418 goto out_destroy_ail; 419 } 420 } ··· 542 */ 543 } 544 545 + if (error) 546 + xfs_alert(mp, "%s: unmount record failed", __func__); 547 548 549 spin_lock(&log->l_icloglock); ··· 852 * In this case we just want to return the size of the 853 * log as the amount of space left. 854 */ 855 + xfs_alert(log->l_mp, 856 "xlog_space_left: head behind tail\n" 857 " tail_cycle = %d, tail_bytes = %d\n" 858 " GH cycle = %d, GH bytes = %d", ··· 1001 1002 log = kmem_zalloc(sizeof(xlog_t), KM_MAYFAIL); 1003 if (!log) { 1004 + xfs_warn(mp, "Log allocation failed: No memory!"); 1005 goto out; 1006 } 1007 ··· 1029 if (xfs_sb_version_hassector(&mp->m_sb)) { 1030 log2_size = mp->m_sb.sb_logsectlog; 1031 if (log2_size < BBSHIFT) { 1032 + xfs_warn(mp, "Log sector size too small (0x%x < 0x%x)", 1033 + log2_size, BBSHIFT); 1034 goto out_free_log; 1035 } 1036 1037 log2_size -= BBSHIFT; 1038 if (log2_size > mp->m_sectbb_log) { 1039 + xfs_warn(mp, "Log sector size too large (0x%x > 0x%x)", 1040 + log2_size, mp->m_sectbb_log); 1041 goto out_free_log; 1042 } 1043 1044 /* for larger sector sizes, must have v2 or external log */ 1045 if (log2_size && log->l_logBBstart > 0 && 1046 !xfs_sb_version_haslogv2(&mp->m_sb)) { 1047 + xfs_warn(mp, 1048 + "log sector size (0x%x) invalid for configuration.", 1049 + log2_size); 1050 goto out_free_log; 1051 } 1052 } ··· 1563 "SWAPEXT" 1564 }; 1565 1566 + xfs_warn(mp, 1567 + "xfs_log_write: reservation summary:\n" 1568 + " trans type = %s (%u)\n" 1569 + " unit res = %d bytes\n" 1570 + " current res = %d bytes\n" 1571 + " total reg = %u bytes (o/flow = %u bytes)\n" 1572 + " ophdrs = %u (ophdr space = %u bytes)\n" 1573 + " ophdr + reg = %u bytes\n" 1574 + " num regions = %u\n", 1575 + ((ticket->t_trans_type <= 0 || 1576 + ticket->t_trans_type > XFS_TRANS_TYPE_MAX) ? 1577 + "bad-trans-type" : trans_type_str[ticket->t_trans_type-1]), 1578 + ticket->t_trans_type, 1579 + ticket->t_unit_res, 1580 + ticket->t_curr_res, 1581 + ticket->t_res_arr_sum, ticket->t_res_o_flow, 1582 + ticket->t_res_num_ophdrs, ophdr_spc, 1583 + ticket->t_res_arr_sum + 1584 + ticket->t_res_o_flow + ophdr_spc, 1585 + ticket->t_res_num); 1586 1587 for (i = 0; i < ticket->t_res_num; i++) { 1588 + uint r_type = ticket->t_res_arr[i].r_type; 1589 + xfs_warn(mp, "region[%u]: %s - %u bytes\n", i, 1590 ((r_type <= 0 || r_type > XLOG_REG_TYPE_MAX) ? 1591 "bad-rtype" : res_type_str[r_type-1]), 1592 ticket->t_res_arr[i].r_len); 1593 } 1594 1595 + xfs_alert_tag(mp, XFS_PTAG_LOGRES, 1596 "xfs_log_write: reservation ran out. Need to up reservation"); 1597 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); 1598 } ··· 1682 case XFS_LOG: 1683 break; 1684 default: 1685 + xfs_warn(log->l_mp, 1686 "Bad XFS transaction clientid 0x%x in ticket 0x%p", 1687 ophdr->oh_clientid, ticket); 1688 return NULL; ··· 2264 if (repeats > 5000) { 2265 flushcnt += repeats; 2266 repeats = 0; 2267 + xfs_warn(log->l_mp, 2268 "%s: possible infinite loop (%d iterations)", 2269 __func__, flushcnt); 2270 } ··· 3052 int error; 3053 3054 error = _xfs_log_force(mp, flags, NULL); 3055 + if (error) 3056 + xfs_warn(mp, "%s: error %d returned.", __func__, error); 3057 } 3058 3059 /* ··· 3204 int error; 3205 3206 error = _xfs_log_force_lsn(mp, lsn, flags, NULL); 3207 + if (error) 3208 + xfs_warn(mp, "%s: error %d returned.", __func__, error); 3209 } 3210 3211 /* ··· 3412 } 3413 3414 if (!good_ptr) 3415 + xfs_emerg(log->l_mp, "%s: invalid ptr", __func__); 3416 } 3417 3418 STATIC void ··· 3448 blocks = 3449 log->l_logBBsize - (log->l_prev_block - BLOCK_LSN(tail_lsn)); 3450 if (blocks < BTOBB(iclog->ic_offset)+BTOBB(log->l_iclog_hsize)) 3451 + xfs_emerg(log->l_mp, "%s: ran out of log space", __func__); 3452 } else { 3453 ASSERT(CYCLE_LSN(tail_lsn)+1 == log->l_prev_cycle); 3454 3455 if (BLOCK_LSN(tail_lsn) == log->l_prev_block) 3456 + xfs_emerg(log->l_mp, "%s: tail wrapped", __func__); 3457 3458 blocks = BLOCK_LSN(tail_lsn) - log->l_prev_block; 3459 if (blocks < BTOBB(iclog->ic_offset) + 1) 3460 + xfs_emerg(log->l_mp, "%s: ran out of log space", __func__); 3461 } 3462 } /* xlog_verify_tail_lsn */ 3463 ··· 3497 icptr = log->l_iclog; 3498 for (i=0; i < log->l_iclog_bufs; i++) { 3499 if (icptr == NULL) 3500 + xfs_emerg(log->l_mp, "%s: invalid ptr", __func__); 3501 icptr = icptr->ic_next; 3502 } 3503 if (icptr != log->l_iclog) 3504 + xfs_emerg(log->l_mp, "%s: corrupt iclog ring", __func__); 3505 spin_unlock(&log->l_icloglock); 3506 3507 /* check log magic numbers */ 3508 if (be32_to_cpu(iclog->ic_header.h_magicno) != XLOG_HEADER_MAGIC_NUM) 3509 + xfs_emerg(log->l_mp, "%s: invalid magic num", __func__); 3510 3511 ptr = (xfs_caddr_t) &iclog->ic_header; 3512 for (ptr += BBSIZE; ptr < ((xfs_caddr_t)&iclog->ic_header) + count; 3513 ptr += BBSIZE) { 3514 if (be32_to_cpu(*(__be32 *)ptr) == XLOG_HEADER_MAGIC_NUM) 3515 + xfs_emerg(log->l_mp, "%s: unexpected magic num", 3516 + __func__); 3517 } 3518 3519 /* check fields */ ··· 3542 } 3543 } 3544 if (clientid != XFS_TRANSACTION && clientid != XFS_LOG) 3545 + xfs_warn(log->l_mp, 3546 + "%s: invalid clientid %d op 0x%p offset 0x%lx", 3547 + __func__, clientid, ophead, 3548 + (unsigned long)field_offset); 3549 3550 /* check length */ 3551 field_offset = (__psint_t)
-4
fs/xfs/xfs_log_priv.h
··· 87 return be32_to_cpu(i) >> 24; 88 } 89 90 - #define xlog_panic(args...) cmn_err(CE_PANIC, ## args) 91 - #define xlog_exit(args...) cmn_err(CE_PANIC, ## args) 92 - #define xlog_warn(args...) cmn_err(CE_WARN, ## args) 93 - 94 /* 95 * In core log state 96 */
··· 87 return be32_to_cpu(i) >> 24; 88 } 89 90 /* 91 * In core log state 92 */
+109 -114
fs/xfs/xfs_log_recover.c
··· 92 int nbblks) 93 { 94 if (!xlog_buf_bbcount_valid(log, nbblks)) { 95 - xlog_warn("XFS: Invalid block length (0x%x) given for buffer", 96 nbblks); 97 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp); 98 return NULL; ··· 160 int error; 161 162 if (!xlog_buf_bbcount_valid(log, nbblks)) { 163 - xlog_warn("XFS: Invalid block length (0x%x) given for buffer", 164 nbblks); 165 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp); 166 return EFSCORRUPTED; ··· 219 int error; 220 221 if (!xlog_buf_bbcount_valid(log, nbblks)) { 222 - xlog_warn("XFS: Invalid block length (0x%x) given for buffer", 223 nbblks); 224 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp); 225 return EFSCORRUPTED; ··· 254 xfs_mount_t *mp, 255 xlog_rec_header_t *head) 256 { 257 - cmn_err(CE_DEBUG, "%s: SB : uuid = %pU, fmt = %d\n", 258 __func__, &mp->m_sb.sb_uuid, XLOG_FMT); 259 - cmn_err(CE_DEBUG, " log : uuid = %pU, fmt = %d\n", 260 &head->h_fs_uuid, be32_to_cpu(head->h_fmt)); 261 } 262 #else ··· 279 * a dirty log created in IRIX. 280 */ 281 if (unlikely(be32_to_cpu(head->h_fmt) != XLOG_FMT)) { 282 - xlog_warn( 283 - "XFS: dirty log written in incompatible format - can't recover"); 284 xlog_header_check_dump(mp, head); 285 XFS_ERROR_REPORT("xlog_header_check_recover(1)", 286 XFS_ERRLEVEL_HIGH, mp); 287 return XFS_ERROR(EFSCORRUPTED); 288 } else if (unlikely(!uuid_equal(&mp->m_sb.sb_uuid, &head->h_fs_uuid))) { 289 - xlog_warn( 290 - "XFS: dirty log entry has mismatched uuid - can't recover"); 291 xlog_header_check_dump(mp, head); 292 XFS_ERROR_REPORT("xlog_header_check_recover(2)", 293 XFS_ERRLEVEL_HIGH, mp); ··· 312 * h_fs_uuid is nil, we assume this log was last mounted 313 * by IRIX and continue. 314 */ 315 - xlog_warn("XFS: nil uuid in log - IRIX style log"); 316 } else if (unlikely(!uuid_equal(&mp->m_sb.sb_uuid, &head->h_fs_uuid))) { 317 - xlog_warn("XFS: log has mismatched uuid - can't recover"); 318 xlog_header_check_dump(mp, head); 319 XFS_ERROR_REPORT("xlog_header_check_mount", 320 XFS_ERRLEVEL_HIGH, mp); ··· 490 for (i = (*last_blk) - 1; i >= 0; i--) { 491 if (i < start_blk) { 492 /* valid log record not found */ 493 - xlog_warn( 494 - "XFS: Log inconsistent (didn't find previous header)"); 495 ASSERT(0); 496 error = XFS_ERROR(EIO); 497 goto out; ··· 591 * mkfs etc write a dummy unmount record to a fresh 592 * log so we can store the uuid in there 593 */ 594 - xlog_warn("XFS: totally zeroed log"); 595 } 596 597 return 0; 598 } else if (error) { 599 - xlog_warn("XFS: empty log check failed"); 600 return error; 601 } 602 ··· 819 xlog_put_bp(bp); 820 821 if (error) 822 - xlog_warn("XFS: failed to find log head"); 823 return error; 824 } 825 ··· 912 } 913 } 914 if (!found) { 915 - xlog_warn("XFS: xlog_find_tail: couldn't find sync record"); 916 ASSERT(0); 917 return XFS_ERROR(EIO); 918 } ··· 1028 xlog_put_bp(bp); 1029 1030 if (error) 1031 - xlog_warn("XFS: failed to locate log tail"); 1032 return error; 1033 } 1034 ··· 1092 * the first block must be 1. If it's not, maybe we're 1093 * not looking at a log... Bail out. 1094 */ 1095 - xlog_warn("XFS: Log inconsistent or not a log (last==0, first!=1)"); 1096 return XFS_ERROR(EINVAL); 1097 } 1098 ··· 1507 if (list_empty(&trans->r_itemq)) { 1508 /* we need to catch log corruptions here */ 1509 if (*(uint *)dp != XFS_TRANS_HEADER_MAGIC) { 1510 - xlog_warn("XFS: xlog_recover_add_to_trans: " 1511 - "bad header magic number"); 1512 ASSERT(0); 1513 return XFS_ERROR(EIO); 1514 } ··· 1535 if (item->ri_total == 0) { /* first region to be added */ 1536 if (in_f->ilf_size == 0 || 1537 in_f->ilf_size > XLOG_MAX_REGIONS_IN_ITEM) { 1538 - xlog_warn( 1539 - "XFS: bad number of regions (%d) in inode log format", 1540 in_f->ilf_size); 1541 ASSERT(0); 1542 return XFS_ERROR(EIO); ··· 1593 list_move_tail(&item->ri_list, &trans->r_itemq); 1594 break; 1595 default: 1596 - xlog_warn( 1597 - "XFS: xlog_recover_reorder_trans: unrecognized type of log operation"); 1598 ASSERT(0); 1599 return XFS_ERROR(EIO); 1600 } ··· 1805 logged_nextp = item->ri_buf[item_index].i_addr + 1806 next_unlinked_offset - reg_buf_offset; 1807 if (unlikely(*logged_nextp == 0)) { 1808 - xfs_fs_cmn_err(CE_ALERT, mp, 1809 - "bad inode buffer log record (ptr = 0x%p, bp = 0x%p). XFS trying to replay bad (0) inode di_next_unlinked field", 1810 item, bp); 1811 XFS_ERROR_REPORT("xlog_recover_do_inode_buf", 1812 XFS_ERRLEVEL_LOW, mp); ··· 1866 if (buf_f->blf_flags & 1867 (XFS_BLF_UDQUOT_BUF|XFS_BLF_PDQUOT_BUF|XFS_BLF_GDQUOT_BUF)) { 1868 if (item->ri_buf[i].i_addr == NULL) { 1869 - cmn_err(CE_ALERT, 1870 "XFS: NULL dquot in %s.", __func__); 1871 goto next; 1872 } 1873 if (item->ri_buf[i].i_len < sizeof(xfs_disk_dquot_t)) { 1874 - cmn_err(CE_ALERT, 1875 "XFS: dquot too small (%d) in %s.", 1876 item->ri_buf[i].i_len, __func__); 1877 goto next; 1878 } 1879 - error = xfs_qm_dqcheck(item->ri_buf[i].i_addr, 1880 -1, 0, XFS_QMOPT_DOWARN, 1881 "dquot_buf_recover"); 1882 if (error) ··· 1901 */ 1902 int 1903 xfs_qm_dqcheck( 1904 xfs_disk_dquot_t *ddq, 1905 xfs_dqid_t id, 1906 uint type, /* used only when IO_dorepair is true */ ··· 1928 */ 1929 if (be16_to_cpu(ddq->d_magic) != XFS_DQUOT_MAGIC) { 1930 if (flags & XFS_QMOPT_DOWARN) 1931 - cmn_err(CE_ALERT, 1932 "%s : XFS dquot ID 0x%x, magic 0x%x != 0x%x", 1933 str, id, be16_to_cpu(ddq->d_magic), XFS_DQUOT_MAGIC); 1934 errs++; 1935 } 1936 if (ddq->d_version != XFS_DQUOT_VERSION) { 1937 if (flags & XFS_QMOPT_DOWARN) 1938 - cmn_err(CE_ALERT, 1939 "%s : XFS dquot ID 0x%x, version 0x%x != 0x%x", 1940 str, id, ddq->d_version, XFS_DQUOT_VERSION); 1941 errs++; ··· 1945 ddq->d_flags != XFS_DQ_PROJ && 1946 ddq->d_flags != XFS_DQ_GROUP) { 1947 if (flags & XFS_QMOPT_DOWARN) 1948 - cmn_err(CE_ALERT, 1949 "%s : XFS dquot ID 0x%x, unknown flags 0x%x", 1950 str, id, ddq->d_flags); 1951 errs++; ··· 1953 1954 if (id != -1 && id != be32_to_cpu(ddq->d_id)) { 1955 if (flags & XFS_QMOPT_DOWARN) 1956 - cmn_err(CE_ALERT, 1957 "%s : ondisk-dquot 0x%p, ID mismatch: " 1958 "0x%x expected, found id 0x%x", 1959 str, ddq, id, be32_to_cpu(ddq->d_id)); ··· 1966 be64_to_cpu(ddq->d_blk_softlimit)) { 1967 if (!ddq->d_btimer) { 1968 if (flags & XFS_QMOPT_DOWARN) 1969 - cmn_err(CE_ALERT, 1970 - "%s : Dquot ID 0x%x (0x%p) " 1971 - "BLK TIMER NOT STARTED", 1972 str, (int)be32_to_cpu(ddq->d_id), ddq); 1973 errs++; 1974 } ··· 1977 be64_to_cpu(ddq->d_ino_softlimit)) { 1978 if (!ddq->d_itimer) { 1979 if (flags & XFS_QMOPT_DOWARN) 1980 - cmn_err(CE_ALERT, 1981 - "%s : Dquot ID 0x%x (0x%p) " 1982 - "INODE TIMER NOT STARTED", 1983 str, (int)be32_to_cpu(ddq->d_id), ddq); 1984 errs++; 1985 } ··· 1988 be64_to_cpu(ddq->d_rtb_softlimit)) { 1989 if (!ddq->d_rtbtimer) { 1990 if (flags & XFS_QMOPT_DOWARN) 1991 - cmn_err(CE_ALERT, 1992 - "%s : Dquot ID 0x%x (0x%p) " 1993 - "RTBLK TIMER NOT STARTED", 1994 str, (int)be32_to_cpu(ddq->d_id), ddq); 1995 errs++; 1996 } ··· 2000 return errs; 2001 2002 if (flags & XFS_QMOPT_DOWARN) 2003 - cmn_err(CE_NOTE, "Re-initializing dquot ID 0x%x", id); 2004 2005 /* 2006 * Typically, a repair is only requested by quotacheck. ··· 2219 */ 2220 if (unlikely(be16_to_cpu(dip->di_magic) != XFS_DINODE_MAGIC)) { 2221 xfs_buf_relse(bp); 2222 - xfs_fs_cmn_err(CE_ALERT, mp, 2223 - "xfs_inode_recover: Bad inode magic number, dino ptr = 0x%p, dino bp = 0x%p, ino = %Ld", 2224 - dip, bp, in_f->ilf_ino); 2225 XFS_ERROR_REPORT("xlog_recover_inode_pass2(1)", 2226 XFS_ERRLEVEL_LOW, mp); 2227 error = EFSCORRUPTED; ··· 2230 dicp = item->ri_buf[1].i_addr; 2231 if (unlikely(dicp->di_magic != XFS_DINODE_MAGIC)) { 2232 xfs_buf_relse(bp); 2233 - xfs_fs_cmn_err(CE_ALERT, mp, 2234 - "xfs_inode_recover: Bad inode log record, rec ptr 0x%p, ino %Ld", 2235 - item, in_f->ilf_ino); 2236 XFS_ERROR_REPORT("xlog_recover_inode_pass2(2)", 2237 XFS_ERRLEVEL_LOW, mp); 2238 error = EFSCORRUPTED; ··· 2264 XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(3)", 2265 XFS_ERRLEVEL_LOW, mp, dicp); 2266 xfs_buf_relse(bp); 2267 - xfs_fs_cmn_err(CE_ALERT, mp, 2268 - "xfs_inode_recover: Bad regular inode log record, rec ptr 0x%p, ino ptr = 0x%p, ino bp = 0x%p, ino %Ld", 2269 - item, dip, bp, in_f->ilf_ino); 2270 error = EFSCORRUPTED; 2271 goto error; 2272 } ··· 2278 XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(4)", 2279 XFS_ERRLEVEL_LOW, mp, dicp); 2280 xfs_buf_relse(bp); 2281 - xfs_fs_cmn_err(CE_ALERT, mp, 2282 - "xfs_inode_recover: Bad dir inode log record, rec ptr 0x%p, ino ptr = 0x%p, ino bp = 0x%p, ino %Ld", 2283 - item, dip, bp, in_f->ilf_ino); 2284 error = EFSCORRUPTED; 2285 goto error; 2286 } ··· 2290 XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(5)", 2291 XFS_ERRLEVEL_LOW, mp, dicp); 2292 xfs_buf_relse(bp); 2293 - xfs_fs_cmn_err(CE_ALERT, mp, 2294 - "xfs_inode_recover: Bad inode log record, rec ptr 0x%p, dino ptr 0x%p, dino bp 0x%p, ino %Ld, total extents = %d, nblocks = %Ld", 2295 - item, dip, bp, in_f->ilf_ino, 2296 dicp->di_nextents + dicp->di_anextents, 2297 dicp->di_nblocks); 2298 error = EFSCORRUPTED; ··· 2303 XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(6)", 2304 XFS_ERRLEVEL_LOW, mp, dicp); 2305 xfs_buf_relse(bp); 2306 - xfs_fs_cmn_err(CE_ALERT, mp, 2307 - "xfs_inode_recover: Bad inode log rec ptr 0x%p, dino ptr 0x%p, dino bp 0x%p, ino %Ld, forkoff 0x%x", 2308 item, dip, bp, in_f->ilf_ino, dicp->di_forkoff); 2309 error = EFSCORRUPTED; 2310 goto error; ··· 2314 XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(7)", 2315 XFS_ERRLEVEL_LOW, mp, dicp); 2316 xfs_buf_relse(bp); 2317 - xfs_fs_cmn_err(CE_ALERT, mp, 2318 - "xfs_inode_recover: Bad inode log record length %d, rec ptr 0x%p", 2319 - item->ri_buf[1].i_len, item); 2320 error = EFSCORRUPTED; 2321 goto error; 2322 } ··· 2403 break; 2404 2405 default: 2406 - xlog_warn("XFS: xlog_recover_inode_pass2: Invalid flag"); 2407 ASSERT(0); 2408 xfs_buf_relse(bp); 2409 error = EIO; ··· 2472 2473 recddq = item->ri_buf[1].i_addr; 2474 if (recddq == NULL) { 2475 - cmn_err(CE_ALERT, 2476 - "XFS: NULL dquot in %s.", __func__); 2477 return XFS_ERROR(EIO); 2478 } 2479 if (item->ri_buf[1].i_len < sizeof(xfs_disk_dquot_t)) { 2480 - cmn_err(CE_ALERT, 2481 - "XFS: dquot too small (%d) in %s.", 2482 item->ri_buf[1].i_len, __func__); 2483 return XFS_ERROR(EIO); 2484 } ··· 2501 */ 2502 dq_f = item->ri_buf[0].i_addr; 2503 ASSERT(dq_f); 2504 - if ((error = xfs_qm_dqcheck(recddq, 2505 - dq_f->qlf_id, 2506 - 0, XFS_QMOPT_DOWARN, 2507 - "xlog_recover_dquot_pass2 (log copy)"))) { 2508 return XFS_ERROR(EIO); 2509 - } 2510 ASSERT(dq_f->qlf_len == 1); 2511 2512 error = xfs_read_buf(mp, mp->m_ddev_targp, ··· 2524 * was among a chunk of dquots created earlier, and we did some 2525 * minimal initialization then. 2526 */ 2527 - if (xfs_qm_dqcheck(ddq, dq_f->qlf_id, 0, XFS_QMOPT_DOWARN, 2528 - "xlog_recover_dquot_pass2")) { 2529 xfs_buf_relse(bp); 2530 return XFS_ERROR(EIO); 2531 } ··· 2678 /* nothing to do in pass 1 */ 2679 return 0; 2680 default: 2681 - xlog_warn( 2682 - "XFS: invalid item type (%d) xlog_recover_commit_pass1", 2683 - ITEM_TYPE(item)); 2684 ASSERT(0); 2685 return XFS_ERROR(EIO); 2686 } ··· 2708 /* nothing to do in pass2 */ 2709 return 0; 2710 default: 2711 - xlog_warn( 2712 - "XFS: invalid item type (%d) xlog_recover_commit_pass2", 2713 - ITEM_TYPE(item)); 2714 ASSERT(0); 2715 return XFS_ERROR(EIO); 2716 } ··· 2751 2752 STATIC int 2753 xlog_recover_unmount_trans( 2754 xlog_recover_t *trans) 2755 { 2756 /* Do nothing now */ 2757 - xlog_warn("XFS: xlog_recover_unmount_trans: Unmount LR"); 2758 return 0; 2759 } 2760 ··· 2798 dp += sizeof(xlog_op_header_t); 2799 if (ohead->oh_clientid != XFS_TRANSACTION && 2800 ohead->oh_clientid != XFS_LOG) { 2801 - xlog_warn( 2802 - "XFS: xlog_recover_process_data: bad clientid"); 2803 ASSERT(0); 2804 return (XFS_ERROR(EIO)); 2805 } ··· 2812 be64_to_cpu(rhead->h_lsn)); 2813 } else { 2814 if (dp + be32_to_cpu(ohead->oh_len) > lp) { 2815 - xlog_warn( 2816 - "XFS: xlog_recover_process_data: bad length"); 2817 WARN_ON(1); 2818 return (XFS_ERROR(EIO)); 2819 } ··· 2826 trans, pass); 2827 break; 2828 case XLOG_UNMOUNT_TRANS: 2829 - error = xlog_recover_unmount_trans(trans); 2830 break; 2831 case XLOG_WAS_CONT_TRANS: 2832 error = xlog_recover_add_to_cont_trans(log, ··· 2834 be32_to_cpu(ohead->oh_len)); 2835 break; 2836 case XLOG_START_TRANS: 2837 - xlog_warn( 2838 - "XFS: xlog_recover_process_data: bad transaction"); 2839 ASSERT(0); 2840 error = XFS_ERROR(EIO); 2841 break; ··· 2845 dp, be32_to_cpu(ohead->oh_len)); 2846 break; 2847 default: 2848 - xlog_warn( 2849 - "XFS: xlog_recover_process_data: bad flag"); 2850 ASSERT(0); 2851 error = XFS_ERROR(EIO); 2852 break; ··· 3031 out_abort: 3032 xfs_trans_cancel(tp, XFS_TRANS_ABORT); 3033 out_error: 3034 - xfs_fs_cmn_err(CE_WARN, mp, "xlog_recover_clear_agi_bucket: " 3035 - "failed to clear agi %d. Continuing.", agno); 3036 return; 3037 } 3038 ··· 3282 if (unlikely( 3283 (!rhead->h_version || 3284 (be32_to_cpu(rhead->h_version) & (~XLOG_VERSION_OKBITS))))) { 3285 - xlog_warn("XFS: %s: unrecognised log version (%d).", 3286 __func__, be32_to_cpu(rhead->h_version)); 3287 return XFS_ERROR(EIO); 3288 } ··· 3740 return error; 3741 } 3742 3743 - cmn_err(CE_NOTE, 3744 - "Starting XFS recovery on filesystem: %s (logdev: %s)", 3745 - log->l_mp->m_fsname, log->l_mp->m_logname ? 3746 - log->l_mp->m_logname : "internal"); 3747 3748 error = xlog_do_recover(log, head_blk, tail_blk); 3749 log->l_flags |= XLOG_RECOVERY_NEEDED; ··· 3775 int error; 3776 error = xlog_recover_process_efis(log); 3777 if (error) { 3778 - cmn_err(CE_ALERT, 3779 - "Failed to recover EFIs on filesystem: %s", 3780 - log->l_mp->m_fsname); 3781 return error; 3782 } 3783 /* ··· 3790 3791 xlog_recover_check_summary(log); 3792 3793 - cmn_err(CE_NOTE, 3794 - "Ending XFS recovery on filesystem: %s (logdev: %s)", 3795 - log->l_mp->m_fsname, log->l_mp->m_logname ? 3796 - log->l_mp->m_logname : "internal"); 3797 log->l_flags &= ~XLOG_RECOVERY_NEEDED; 3798 } else { 3799 - cmn_err(CE_DEBUG, 3800 - "Ending clean XFS mount for filesystem: %s\n", 3801 - log->l_mp->m_fsname); 3802 } 3803 return 0; 3804 } ··· 3828 for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) { 3829 error = xfs_read_agf(mp, NULL, agno, 0, &agfbp); 3830 if (error) { 3831 - xfs_fs_cmn_err(CE_ALERT, mp, 3832 - "xlog_recover_check_summary(agf)" 3833 - "agf read failed agno %d error %d", 3834 - agno, error); 3835 } else { 3836 agfp = XFS_BUF_TO_AGF(agfbp); 3837 freeblks += be32_to_cpu(agfp->agf_freeblks) + ··· 3838 } 3839 3840 error = xfs_read_agi(mp, NULL, agno, &agibp); 3841 - if (!error) { 3842 struct xfs_agi *agi = XFS_BUF_TO_AGI(agibp); 3843 3844 itotal += be32_to_cpu(agi->agi_count);
··· 92 int nbblks) 93 { 94 if (!xlog_buf_bbcount_valid(log, nbblks)) { 95 + xfs_warn(log->l_mp, "Invalid block length (0x%x) for buffer", 96 nbblks); 97 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp); 98 return NULL; ··· 160 int error; 161 162 if (!xlog_buf_bbcount_valid(log, nbblks)) { 163 + xfs_warn(log->l_mp, "Invalid block length (0x%x) for buffer", 164 nbblks); 165 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp); 166 return EFSCORRUPTED; ··· 219 int error; 220 221 if (!xlog_buf_bbcount_valid(log, nbblks)) { 222 + xfs_warn(log->l_mp, "Invalid block length (0x%x) for buffer", 223 nbblks); 224 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_HIGH, log->l_mp); 225 return EFSCORRUPTED; ··· 254 xfs_mount_t *mp, 255 xlog_rec_header_t *head) 256 { 257 + xfs_debug(mp, "%s: SB : uuid = %pU, fmt = %d\n", 258 __func__, &mp->m_sb.sb_uuid, XLOG_FMT); 259 + xfs_debug(mp, " log : uuid = %pU, fmt = %d\n", 260 &head->h_fs_uuid, be32_to_cpu(head->h_fmt)); 261 } 262 #else ··· 279 * a dirty log created in IRIX. 280 */ 281 if (unlikely(be32_to_cpu(head->h_fmt) != XLOG_FMT)) { 282 + xfs_warn(mp, 283 + "dirty log written in incompatible format - can't recover"); 284 xlog_header_check_dump(mp, head); 285 XFS_ERROR_REPORT("xlog_header_check_recover(1)", 286 XFS_ERRLEVEL_HIGH, mp); 287 return XFS_ERROR(EFSCORRUPTED); 288 } else if (unlikely(!uuid_equal(&mp->m_sb.sb_uuid, &head->h_fs_uuid))) { 289 + xfs_warn(mp, 290 + "dirty log entry has mismatched uuid - can't recover"); 291 xlog_header_check_dump(mp, head); 292 XFS_ERROR_REPORT("xlog_header_check_recover(2)", 293 XFS_ERRLEVEL_HIGH, mp); ··· 312 * h_fs_uuid is nil, we assume this log was last mounted 313 * by IRIX and continue. 314 */ 315 + xfs_warn(mp, "nil uuid in log - IRIX style log"); 316 } else if (unlikely(!uuid_equal(&mp->m_sb.sb_uuid, &head->h_fs_uuid))) { 317 + xfs_warn(mp, "log has mismatched uuid - can't recover"); 318 xlog_header_check_dump(mp, head); 319 XFS_ERROR_REPORT("xlog_header_check_mount", 320 XFS_ERRLEVEL_HIGH, mp); ··· 490 for (i = (*last_blk) - 1; i >= 0; i--) { 491 if (i < start_blk) { 492 /* valid log record not found */ 493 + xfs_warn(log->l_mp, 494 + "Log inconsistent (didn't find previous header)"); 495 ASSERT(0); 496 error = XFS_ERROR(EIO); 497 goto out; ··· 591 * mkfs etc write a dummy unmount record to a fresh 592 * log so we can store the uuid in there 593 */ 594 + xfs_warn(log->l_mp, "totally zeroed log"); 595 } 596 597 return 0; 598 } else if (error) { 599 + xfs_warn(log->l_mp, "empty log check failed"); 600 return error; 601 } 602 ··· 819 xlog_put_bp(bp); 820 821 if (error) 822 + xfs_warn(log->l_mp, "failed to find log head"); 823 return error; 824 } 825 ··· 912 } 913 } 914 if (!found) { 915 + xfs_warn(log->l_mp, "%s: couldn't find sync record", __func__); 916 ASSERT(0); 917 return XFS_ERROR(EIO); 918 } ··· 1028 xlog_put_bp(bp); 1029 1030 if (error) 1031 + xfs_warn(log->l_mp, "failed to locate log tail"); 1032 return error; 1033 } 1034 ··· 1092 * the first block must be 1. If it's not, maybe we're 1093 * not looking at a log... Bail out. 1094 */ 1095 + xfs_warn(log->l_mp, 1096 + "Log inconsistent or not a log (last==0, first!=1)"); 1097 return XFS_ERROR(EINVAL); 1098 } 1099 ··· 1506 if (list_empty(&trans->r_itemq)) { 1507 /* we need to catch log corruptions here */ 1508 if (*(uint *)dp != XFS_TRANS_HEADER_MAGIC) { 1509 + xfs_warn(log->l_mp, "%s: bad header magic number", 1510 + __func__); 1511 ASSERT(0); 1512 return XFS_ERROR(EIO); 1513 } ··· 1534 if (item->ri_total == 0) { /* first region to be added */ 1535 if (in_f->ilf_size == 0 || 1536 in_f->ilf_size > XLOG_MAX_REGIONS_IN_ITEM) { 1537 + xfs_warn(log->l_mp, 1538 + "bad number of regions (%d) in inode log format", 1539 in_f->ilf_size); 1540 ASSERT(0); 1541 return XFS_ERROR(EIO); ··· 1592 list_move_tail(&item->ri_list, &trans->r_itemq); 1593 break; 1594 default: 1595 + xfs_warn(log->l_mp, 1596 + "%s: unrecognized type of log operation", 1597 + __func__); 1598 ASSERT(0); 1599 return XFS_ERROR(EIO); 1600 } ··· 1803 logged_nextp = item->ri_buf[item_index].i_addr + 1804 next_unlinked_offset - reg_buf_offset; 1805 if (unlikely(*logged_nextp == 0)) { 1806 + xfs_alert(mp, 1807 + "Bad inode buffer log record (ptr = 0x%p, bp = 0x%p). " 1808 + "Trying to replay bad (0) inode di_next_unlinked field.", 1809 item, bp); 1810 XFS_ERROR_REPORT("xlog_recover_do_inode_buf", 1811 XFS_ERRLEVEL_LOW, mp); ··· 1863 if (buf_f->blf_flags & 1864 (XFS_BLF_UDQUOT_BUF|XFS_BLF_PDQUOT_BUF|XFS_BLF_GDQUOT_BUF)) { 1865 if (item->ri_buf[i].i_addr == NULL) { 1866 + xfs_alert(mp, 1867 "XFS: NULL dquot in %s.", __func__); 1868 goto next; 1869 } 1870 if (item->ri_buf[i].i_len < sizeof(xfs_disk_dquot_t)) { 1871 + xfs_alert(mp, 1872 "XFS: dquot too small (%d) in %s.", 1873 item->ri_buf[i].i_len, __func__); 1874 goto next; 1875 } 1876 + error = xfs_qm_dqcheck(mp, item->ri_buf[i].i_addr, 1877 -1, 0, XFS_QMOPT_DOWARN, 1878 "dquot_buf_recover"); 1879 if (error) ··· 1898 */ 1899 int 1900 xfs_qm_dqcheck( 1901 + struct xfs_mount *mp, 1902 xfs_disk_dquot_t *ddq, 1903 xfs_dqid_t id, 1904 uint type, /* used only when IO_dorepair is true */ ··· 1924 */ 1925 if (be16_to_cpu(ddq->d_magic) != XFS_DQUOT_MAGIC) { 1926 if (flags & XFS_QMOPT_DOWARN) 1927 + xfs_alert(mp, 1928 "%s : XFS dquot ID 0x%x, magic 0x%x != 0x%x", 1929 str, id, be16_to_cpu(ddq->d_magic), XFS_DQUOT_MAGIC); 1930 errs++; 1931 } 1932 if (ddq->d_version != XFS_DQUOT_VERSION) { 1933 if (flags & XFS_QMOPT_DOWARN) 1934 + xfs_alert(mp, 1935 "%s : XFS dquot ID 0x%x, version 0x%x != 0x%x", 1936 str, id, ddq->d_version, XFS_DQUOT_VERSION); 1937 errs++; ··· 1941 ddq->d_flags != XFS_DQ_PROJ && 1942 ddq->d_flags != XFS_DQ_GROUP) { 1943 if (flags & XFS_QMOPT_DOWARN) 1944 + xfs_alert(mp, 1945 "%s : XFS dquot ID 0x%x, unknown flags 0x%x", 1946 str, id, ddq->d_flags); 1947 errs++; ··· 1949 1950 if (id != -1 && id != be32_to_cpu(ddq->d_id)) { 1951 if (flags & XFS_QMOPT_DOWARN) 1952 + xfs_alert(mp, 1953 "%s : ondisk-dquot 0x%p, ID mismatch: " 1954 "0x%x expected, found id 0x%x", 1955 str, ddq, id, be32_to_cpu(ddq->d_id)); ··· 1962 be64_to_cpu(ddq->d_blk_softlimit)) { 1963 if (!ddq->d_btimer) { 1964 if (flags & XFS_QMOPT_DOWARN) 1965 + xfs_alert(mp, 1966 + "%s : Dquot ID 0x%x (0x%p) BLK TIMER NOT STARTED", 1967 str, (int)be32_to_cpu(ddq->d_id), ddq); 1968 errs++; 1969 } ··· 1974 be64_to_cpu(ddq->d_ino_softlimit)) { 1975 if (!ddq->d_itimer) { 1976 if (flags & XFS_QMOPT_DOWARN) 1977 + xfs_alert(mp, 1978 + "%s : Dquot ID 0x%x (0x%p) INODE TIMER NOT STARTED", 1979 str, (int)be32_to_cpu(ddq->d_id), ddq); 1980 errs++; 1981 } ··· 1986 be64_to_cpu(ddq->d_rtb_softlimit)) { 1987 if (!ddq->d_rtbtimer) { 1988 if (flags & XFS_QMOPT_DOWARN) 1989 + xfs_alert(mp, 1990 + "%s : Dquot ID 0x%x (0x%p) RTBLK TIMER NOT STARTED", 1991 str, (int)be32_to_cpu(ddq->d_id), ddq); 1992 errs++; 1993 } ··· 1999 return errs; 2000 2001 if (flags & XFS_QMOPT_DOWARN) 2002 + xfs_notice(mp, "Re-initializing dquot ID 0x%x", id); 2003 2004 /* 2005 * Typically, a repair is only requested by quotacheck. ··· 2218 */ 2219 if (unlikely(be16_to_cpu(dip->di_magic) != XFS_DINODE_MAGIC)) { 2220 xfs_buf_relse(bp); 2221 + xfs_alert(mp, 2222 + "%s: Bad inode magic number, dip = 0x%p, dino bp = 0x%p, ino = %Ld", 2223 + __func__, dip, bp, in_f->ilf_ino); 2224 XFS_ERROR_REPORT("xlog_recover_inode_pass2(1)", 2225 XFS_ERRLEVEL_LOW, mp); 2226 error = EFSCORRUPTED; ··· 2229 dicp = item->ri_buf[1].i_addr; 2230 if (unlikely(dicp->di_magic != XFS_DINODE_MAGIC)) { 2231 xfs_buf_relse(bp); 2232 + xfs_alert(mp, 2233 + "%s: Bad inode log record, rec ptr 0x%p, ino %Ld", 2234 + __func__, item, in_f->ilf_ino); 2235 XFS_ERROR_REPORT("xlog_recover_inode_pass2(2)", 2236 XFS_ERRLEVEL_LOW, mp); 2237 error = EFSCORRUPTED; ··· 2263 XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(3)", 2264 XFS_ERRLEVEL_LOW, mp, dicp); 2265 xfs_buf_relse(bp); 2266 + xfs_alert(mp, 2267 + "%s: Bad regular inode log record, rec ptr 0x%p, " 2268 + "ino ptr = 0x%p, ino bp = 0x%p, ino %Ld", 2269 + __func__, item, dip, bp, in_f->ilf_ino); 2270 error = EFSCORRUPTED; 2271 goto error; 2272 } ··· 2276 XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(4)", 2277 XFS_ERRLEVEL_LOW, mp, dicp); 2278 xfs_buf_relse(bp); 2279 + xfs_alert(mp, 2280 + "%s: Bad dir inode log record, rec ptr 0x%p, " 2281 + "ino ptr = 0x%p, ino bp = 0x%p, ino %Ld", 2282 + __func__, item, dip, bp, in_f->ilf_ino); 2283 error = EFSCORRUPTED; 2284 goto error; 2285 } ··· 2287 XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(5)", 2288 XFS_ERRLEVEL_LOW, mp, dicp); 2289 xfs_buf_relse(bp); 2290 + xfs_alert(mp, 2291 + "%s: Bad inode log record, rec ptr 0x%p, dino ptr 0x%p, " 2292 + "dino bp 0x%p, ino %Ld, total extents = %d, nblocks = %Ld", 2293 + __func__, item, dip, bp, in_f->ilf_ino, 2294 dicp->di_nextents + dicp->di_anextents, 2295 dicp->di_nblocks); 2296 error = EFSCORRUPTED; ··· 2299 XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(6)", 2300 XFS_ERRLEVEL_LOW, mp, dicp); 2301 xfs_buf_relse(bp); 2302 + xfs_alert(mp, 2303 + "%s: Bad inode log record, rec ptr 0x%p, dino ptr 0x%p, " 2304 + "dino bp 0x%p, ino %Ld, forkoff 0x%x", __func__, 2305 item, dip, bp, in_f->ilf_ino, dicp->di_forkoff); 2306 error = EFSCORRUPTED; 2307 goto error; ··· 2309 XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(7)", 2310 XFS_ERRLEVEL_LOW, mp, dicp); 2311 xfs_buf_relse(bp); 2312 + xfs_alert(mp, 2313 + "%s: Bad inode log record length %d, rec ptr 0x%p", 2314 + __func__, item->ri_buf[1].i_len, item); 2315 error = EFSCORRUPTED; 2316 goto error; 2317 } ··· 2398 break; 2399 2400 default: 2401 + xfs_warn(log->l_mp, "%s: Invalid flag", __func__); 2402 ASSERT(0); 2403 xfs_buf_relse(bp); 2404 error = EIO; ··· 2467 2468 recddq = item->ri_buf[1].i_addr; 2469 if (recddq == NULL) { 2470 + xfs_alert(log->l_mp, "NULL dquot in %s.", __func__); 2471 return XFS_ERROR(EIO); 2472 } 2473 if (item->ri_buf[1].i_len < sizeof(xfs_disk_dquot_t)) { 2474 + xfs_alert(log->l_mp, "dquot too small (%d) in %s.", 2475 item->ri_buf[1].i_len, __func__); 2476 return XFS_ERROR(EIO); 2477 } ··· 2498 */ 2499 dq_f = item->ri_buf[0].i_addr; 2500 ASSERT(dq_f); 2501 + error = xfs_qm_dqcheck(mp, recddq, dq_f->qlf_id, 0, XFS_QMOPT_DOWARN, 2502 + "xlog_recover_dquot_pass2 (log copy)"); 2503 + if (error) 2504 return XFS_ERROR(EIO); 2505 ASSERT(dq_f->qlf_len == 1); 2506 2507 error = xfs_read_buf(mp, mp->m_ddev_targp, ··· 2523 * was among a chunk of dquots created earlier, and we did some 2524 * minimal initialization then. 2525 */ 2526 + error = xfs_qm_dqcheck(mp, ddq, dq_f->qlf_id, 0, XFS_QMOPT_DOWARN, 2527 + "xlog_recover_dquot_pass2"); 2528 + if (error) { 2529 xfs_buf_relse(bp); 2530 return XFS_ERROR(EIO); 2531 } ··· 2676 /* nothing to do in pass 1 */ 2677 return 0; 2678 default: 2679 + xfs_warn(log->l_mp, "%s: invalid item type (%d)", 2680 + __func__, ITEM_TYPE(item)); 2681 ASSERT(0); 2682 return XFS_ERROR(EIO); 2683 } ··· 2707 /* nothing to do in pass2 */ 2708 return 0; 2709 default: 2710 + xfs_warn(log->l_mp, "%s: invalid item type (%d)", 2711 + __func__, ITEM_TYPE(item)); 2712 ASSERT(0); 2713 return XFS_ERROR(EIO); 2714 } ··· 2751 2752 STATIC int 2753 xlog_recover_unmount_trans( 2754 + struct log *log, 2755 xlog_recover_t *trans) 2756 { 2757 /* Do nothing now */ 2758 + xfs_warn(log->l_mp, "%s: Unmount LR", __func__); 2759 return 0; 2760 } 2761 ··· 2797 dp += sizeof(xlog_op_header_t); 2798 if (ohead->oh_clientid != XFS_TRANSACTION && 2799 ohead->oh_clientid != XFS_LOG) { 2800 + xfs_warn(log->l_mp, "%s: bad clientid 0x%x", 2801 + __func__, ohead->oh_clientid); 2802 ASSERT(0); 2803 return (XFS_ERROR(EIO)); 2804 } ··· 2811 be64_to_cpu(rhead->h_lsn)); 2812 } else { 2813 if (dp + be32_to_cpu(ohead->oh_len) > lp) { 2814 + xfs_warn(log->l_mp, "%s: bad length 0x%x", 2815 + __func__, be32_to_cpu(ohead->oh_len)); 2816 WARN_ON(1); 2817 return (XFS_ERROR(EIO)); 2818 } ··· 2825 trans, pass); 2826 break; 2827 case XLOG_UNMOUNT_TRANS: 2828 + error = xlog_recover_unmount_trans(log, trans); 2829 break; 2830 case XLOG_WAS_CONT_TRANS: 2831 error = xlog_recover_add_to_cont_trans(log, ··· 2833 be32_to_cpu(ohead->oh_len)); 2834 break; 2835 case XLOG_START_TRANS: 2836 + xfs_warn(log->l_mp, "%s: bad transaction", 2837 + __func__); 2838 ASSERT(0); 2839 error = XFS_ERROR(EIO); 2840 break; ··· 2844 dp, be32_to_cpu(ohead->oh_len)); 2845 break; 2846 default: 2847 + xfs_warn(log->l_mp, "%s: bad flag 0x%x", 2848 + __func__, flags); 2849 ASSERT(0); 2850 error = XFS_ERROR(EIO); 2851 break; ··· 3030 out_abort: 3031 xfs_trans_cancel(tp, XFS_TRANS_ABORT); 3032 out_error: 3033 + xfs_warn(mp, "%s: failed to clear agi %d. Continuing.", __func__, agno); 3034 return; 3035 } 3036 ··· 3282 if (unlikely( 3283 (!rhead->h_version || 3284 (be32_to_cpu(rhead->h_version) & (~XLOG_VERSION_OKBITS))))) { 3285 + xfs_warn(log->l_mp, "%s: unrecognised log version (%d).", 3286 __func__, be32_to_cpu(rhead->h_version)); 3287 return XFS_ERROR(EIO); 3288 } ··· 3740 return error; 3741 } 3742 3743 + xfs_notice(log->l_mp, "Starting recovery (logdev: %s)", 3744 + log->l_mp->m_logname ? log->l_mp->m_logname 3745 + : "internal"); 3746 3747 error = xlog_do_recover(log, head_blk, tail_blk); 3748 log->l_flags |= XLOG_RECOVERY_NEEDED; ··· 3776 int error; 3777 error = xlog_recover_process_efis(log); 3778 if (error) { 3779 + xfs_alert(log->l_mp, "Failed to recover EFIs"); 3780 return error; 3781 } 3782 /* ··· 3793 3794 xlog_recover_check_summary(log); 3795 3796 + xfs_notice(log->l_mp, "Ending recovery (logdev: %s)", 3797 + log->l_mp->m_logname ? log->l_mp->m_logname 3798 + : "internal"); 3799 log->l_flags &= ~XLOG_RECOVERY_NEEDED; 3800 } else { 3801 + xfs_info(log->l_mp, "Ending clean mount"); 3802 } 3803 return 0; 3804 } ··· 3834 for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) { 3835 error = xfs_read_agf(mp, NULL, agno, 0, &agfbp); 3836 if (error) { 3837 + xfs_alert(mp, "%s agf read failed agno %d error %d", 3838 + __func__, agno, error); 3839 } else { 3840 agfp = XFS_BUF_TO_AGF(agfbp); 3841 freeblks += be32_to_cpu(agfp->agf_freeblks) + ··· 3846 } 3847 3848 error = xfs_read_agi(mp, NULL, agno, &agibp); 3849 + if (error) { 3850 + xfs_alert(mp, "%s agi read failed agno %d error %d", 3851 + __func__, agno, error); 3852 + } else { 3853 struct xfs_agi *agi = XFS_BUF_TO_AGI(agibp); 3854 3855 itotal += be32_to_cpu(agi->agi_count);
+75 -73
fs/xfs/xfs_mount.c
··· 133 return 0; 134 135 if (uuid_is_nil(uuid)) { 136 - cmn_err(CE_WARN, 137 - "XFS: Filesystem %s has nil UUID - can't mount", 138 - mp->m_fsname); 139 return XFS_ERROR(EINVAL); 140 } 141 ··· 161 162 out_duplicate: 163 mutex_unlock(&xfs_uuid_table_mutex); 164 - cmn_err(CE_WARN, "XFS: Filesystem %s has duplicate UUID - can't mount", 165 - mp->m_fsname); 166 return XFS_ERROR(EINVAL); 167 } 168 ··· 308 xfs_sb_t *sbp, 309 int flags) 310 { 311 /* 312 * If the log device and data device have the 313 * same device number, the log is internal. ··· 318 * a volume filesystem in a non-volume manner. 319 */ 320 if (sbp->sb_magicnum != XFS_SB_MAGIC) { 321 - xfs_fs_mount_cmn_err(flags, "bad magic number"); 322 return XFS_ERROR(EWRONGFS); 323 } 324 325 if (!xfs_sb_good_version(sbp)) { 326 - xfs_fs_mount_cmn_err(flags, "bad version"); 327 return XFS_ERROR(EWRONGFS); 328 } 329 330 if (unlikely( 331 sbp->sb_logstart == 0 && mp->m_logdev_targp == mp->m_ddev_targp)) { 332 - xfs_fs_mount_cmn_err(flags, 333 - "filesystem is marked as having an external log; " 334 - "specify logdev on the\nmount command line."); 335 return XFS_ERROR(EINVAL); 336 } 337 338 if (unlikely( 339 sbp->sb_logstart != 0 && mp->m_logdev_targp != mp->m_ddev_targp)) { 340 - xfs_fs_mount_cmn_err(flags, 341 - "filesystem is marked as having an internal log; " 342 - "do not specify logdev on\nthe mount command line."); 343 return XFS_ERROR(EINVAL); 344 } 345 ··· 372 (sbp->sb_rextsize * sbp->sb_blocksize > XFS_MAX_RTEXTSIZE) || 373 (sbp->sb_rextsize * sbp->sb_blocksize < XFS_MIN_RTEXTSIZE) || 374 (sbp->sb_imax_pct > 100 /* zero sb_imax_pct is valid */))) { 375 - xfs_fs_mount_cmn_err(flags, "SB sanity check 1 failed"); 376 return XFS_ERROR(EFSCORRUPTED); 377 } 378 ··· 386 (xfs_drfsbno_t)sbp->sb_agcount * sbp->sb_agblocks || 387 sbp->sb_dblocks < (xfs_drfsbno_t)(sbp->sb_agcount - 1) * 388 sbp->sb_agblocks + XFS_MIN_AG_BLOCKS)) { 389 - xfs_fs_mount_cmn_err(flags, "SB sanity check 2 failed"); 390 return XFS_ERROR(EFSCORRUPTED); 391 } 392 ··· 395 * Until this is fixed only page-sized or smaller data blocks work. 396 */ 397 if (unlikely(sbp->sb_blocksize > PAGE_SIZE)) { 398 - xfs_fs_mount_cmn_err(flags, 399 - "file system with blocksize %d bytes", 400 - sbp->sb_blocksize); 401 - xfs_fs_mount_cmn_err(flags, 402 - "only pagesize (%ld) or less will currently work.", 403 - PAGE_SIZE); 404 return XFS_ERROR(ENOSYS); 405 } 406 ··· 414 case 2048: 415 break; 416 default: 417 - xfs_fs_mount_cmn_err(flags, 418 - "inode size of %d bytes not supported", 419 - sbp->sb_inodesize); 420 return XFS_ERROR(ENOSYS); 421 } 422 423 if (xfs_sb_validate_fsb_count(sbp, sbp->sb_dblocks) || 424 xfs_sb_validate_fsb_count(sbp, sbp->sb_rblocks)) { 425 - xfs_fs_mount_cmn_err(flags, 426 - "file system too large to be mounted on this system."); 427 return XFS_ERROR(EFBIG); 428 } 429 430 if (unlikely(sbp->sb_inprogress)) { 431 - xfs_fs_mount_cmn_err(flags, "file system busy"); 432 return XFS_ERROR(EFSCORRUPTED); 433 } 434 ··· 438 * Version 1 directory format has never worked on Linux. 439 */ 440 if (unlikely(!xfs_sb_version_hasdirv2(sbp))) { 441 - xfs_fs_mount_cmn_err(flags, 442 - "file system using version 1 directory format"); 443 return XFS_ERROR(ENOSYS); 444 } 445 ··· 681 unsigned int sector_size; 682 xfs_buf_t *bp; 683 int error; 684 685 ASSERT(mp->m_sb_bp == NULL); 686 ASSERT(mp->m_ddev_targp != NULL); ··· 697 bp = xfs_buf_read_uncached(mp, mp->m_ddev_targp, 698 XFS_SB_DADDR, sector_size, 0); 699 if (!bp) { 700 - xfs_fs_mount_cmn_err(flags, "SB buffer read failed"); 701 return EIO; 702 } 703 ··· 709 xfs_sb_from_disk(&mp->m_sb, XFS_BUF_TO_SBP(bp)); 710 error = xfs_mount_validate_sb(mp, &(mp->m_sb), flags); 711 if (error) { 712 - xfs_fs_mount_cmn_err(flags, "SB validate failed"); 713 goto release_buf; 714 } 715 ··· 718 * We must be able to do sector-sized and sector-aligned IO. 719 */ 720 if (sector_size > mp->m_sb.sb_sectsize) { 721 - xfs_fs_mount_cmn_err(flags, 722 - "device supports only %u byte sectors (not %u)", 723 - sector_size, mp->m_sb.sb_sectsize); 724 error = ENOSYS; 725 goto release_buf; 726 } ··· 864 if ((BBTOB(mp->m_dalign) & mp->m_blockmask) || 865 (BBTOB(mp->m_swidth) & mp->m_blockmask)) { 866 if (mp->m_flags & XFS_MOUNT_RETERR) { 867 - cmn_err(CE_WARN, 868 - "XFS: alignment check 1 failed"); 869 return XFS_ERROR(EINVAL); 870 } 871 mp->m_dalign = mp->m_swidth = 0; ··· 877 if (mp->m_flags & XFS_MOUNT_RETERR) { 878 return XFS_ERROR(EINVAL); 879 } 880 - xfs_fs_cmn_err(CE_WARN, mp, 881 - "stripe alignment turned off: sunit(%d)/swidth(%d) incompatible with agsize(%d)", 882 mp->m_dalign, mp->m_swidth, 883 sbp->sb_agblocks); 884 ··· 889 mp->m_swidth = XFS_BB_TO_FSBT(mp, mp->m_swidth); 890 } else { 891 if (mp->m_flags & XFS_MOUNT_RETERR) { 892 - xfs_fs_cmn_err(CE_WARN, mp, 893 - "stripe alignment turned off: sunit(%d) less than bsize(%d)", 894 - mp->m_dalign, 895 mp->m_blockmask +1); 896 return XFS_ERROR(EINVAL); 897 } ··· 1037 1038 d = (xfs_daddr_t)XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks); 1039 if (XFS_BB_TO_FSB(mp, d) != mp->m_sb.sb_dblocks) { 1040 - cmn_err(CE_WARN, "XFS: filesystem size mismatch detected"); 1041 return XFS_ERROR(EFBIG); 1042 } 1043 bp = xfs_buf_read_uncached(mp, mp->m_ddev_targp, 1044 d - XFS_FSS_TO_BB(mp, 1), 1045 BBTOB(XFS_FSS_TO_BB(mp, 1)), 0); 1046 if (!bp) { 1047 - cmn_err(CE_WARN, "XFS: last sector read failed"); 1048 return EIO; 1049 } 1050 xfs_buf_relse(bp); ··· 1052 if (mp->m_logdev_targp != mp->m_ddev_targp) { 1053 d = (xfs_daddr_t)XFS_FSB_TO_BB(mp, mp->m_sb.sb_logblocks); 1054 if (XFS_BB_TO_FSB(mp, d) != mp->m_sb.sb_logblocks) { 1055 - cmn_err(CE_WARN, "XFS: log size mismatch detected"); 1056 return XFS_ERROR(EFBIG); 1057 } 1058 bp = xfs_buf_read_uncached(mp, mp->m_logdev_targp, 1059 d - XFS_FSB_TO_BB(mp, 1), 1060 XFS_FSB_TO_B(mp, 1), 0); 1061 if (!bp) { 1062 - cmn_err(CE_WARN, "XFS: log device read failed"); 1063 return EIO; 1064 } 1065 xfs_buf_relse(bp); ··· 1097 return 0; 1098 1099 #ifdef QUOTADEBUG 1100 - xfs_fs_cmn_err(CE_NOTE, mp, "Writing superblock quota changes"); 1101 #endif 1102 1103 tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SBCHANGE); ··· 1105 XFS_DEFAULT_LOG_COUNT); 1106 if (error) { 1107 xfs_trans_cancel(tp, 0); 1108 - xfs_fs_cmn_err(CE_ALERT, mp, 1109 - "xfs_mount_reset_sbqflags: Superblock update failed!"); 1110 return error; 1111 } 1112 ··· 1171 * transaction subsystem is online. 1172 */ 1173 if (xfs_sb_has_mismatched_features2(sbp)) { 1174 - cmn_err(CE_WARN, 1175 - "XFS: correcting sb_features alignment problem"); 1176 sbp->sb_features2 |= sbp->sb_bad_features2; 1177 sbp->sb_bad_features2 = sbp->sb_features2; 1178 mp->m_update_flags |= XFS_SB_FEATURES2 | XFS_SB_BAD_FEATURES2; ··· 1250 */ 1251 error = xfs_rtmount_init(mp); 1252 if (error) { 1253 - cmn_err(CE_WARN, "XFS: RT mount failed"); 1254 goto out_remove_uuid; 1255 } 1256 ··· 1281 INIT_RADIX_TREE(&mp->m_perag_tree, GFP_ATOMIC); 1282 error = xfs_initialize_perag(mp, sbp->sb_agcount, &mp->m_maxagi); 1283 if (error) { 1284 - cmn_err(CE_WARN, "XFS: Failed per-ag init: %d", error); 1285 goto out_remove_uuid; 1286 } 1287 1288 if (!sbp->sb_logblocks) { 1289 - cmn_err(CE_WARN, "XFS: no log defined"); 1290 XFS_ERROR_REPORT("xfs_mountfs", XFS_ERRLEVEL_LOW, mp); 1291 error = XFS_ERROR(EFSCORRUPTED); 1292 goto out_free_perag; ··· 1299 XFS_FSB_TO_DADDR(mp, sbp->sb_logstart), 1300 XFS_FSB_TO_BB(mp, sbp->sb_logblocks)); 1301 if (error) { 1302 - cmn_err(CE_WARN, "XFS: log mount failed"); 1303 goto out_free_perag; 1304 } 1305 ··· 1336 */ 1337 error = xfs_iget(mp, NULL, sbp->sb_rootino, 0, XFS_ILOCK_EXCL, &rip); 1338 if (error) { 1339 - cmn_err(CE_WARN, "XFS: failed to read root inode"); 1340 goto out_log_dealloc; 1341 } 1342 1343 ASSERT(rip != NULL); 1344 1345 if (unlikely((rip->i_d.di_mode & S_IFMT) != S_IFDIR)) { 1346 - cmn_err(CE_WARN, "XFS: corrupted root inode"); 1347 - cmn_err(CE_WARN, "Device %s - root %llu is not a directory", 1348 - XFS_BUFTARG_NAME(mp->m_ddev_targp), 1349 (unsigned long long)rip->i_ino); 1350 xfs_iunlock(rip, XFS_ILOCK_EXCL); 1351 XFS_ERROR_REPORT("xfs_mountfs_int(2)", XFS_ERRLEVEL_LOW, ··· 1363 /* 1364 * Free up the root inode. 1365 */ 1366 - cmn_err(CE_WARN, "XFS: failed to read RT inodes"); 1367 goto out_rele_rip; 1368 } 1369 ··· 1375 if (mp->m_update_flags && !(mp->m_flags & XFS_MOUNT_RDONLY)) { 1376 error = xfs_mount_log_sb(mp, mp->m_update_flags); 1377 if (error) { 1378 - cmn_err(CE_WARN, "XFS: failed to write sb changes"); 1379 goto out_rtunmount; 1380 } 1381 } ··· 1396 * quotachecked license. 1397 */ 1398 if (mp->m_sb.sb_qflags & XFS_ALL_QUOTA_ACCT) { 1399 - cmn_err(CE_NOTE, 1400 - "XFS: resetting qflags for filesystem %s", 1401 - mp->m_fsname); 1402 - 1403 error = xfs_mount_reset_sbqflags(mp); 1404 if (error) 1405 return error; ··· 1410 */ 1411 error = xfs_log_mount_finish(mp); 1412 if (error) { 1413 - cmn_err(CE_WARN, "XFS: log mount finish failed"); 1414 goto out_rtunmount; 1415 } 1416 ··· 1439 resblks = xfs_default_resblks(mp); 1440 error = xfs_reserve_blocks(mp, &resblks, NULL); 1441 if (error) 1442 - cmn_err(CE_WARN, "XFS: Unable to allocate reserve " 1443 - "blocks. Continuing without a reserve pool."); 1444 } 1445 1446 return 0; ··· 1529 resblks = 0; 1530 error = xfs_reserve_blocks(mp, &resblks, NULL); 1531 if (error) 1532 - cmn_err(CE_WARN, "XFS: Unable to free reserved block pool. " 1533 "Freespace may not be correct on next mount."); 1534 1535 error = xfs_log_sbcount(mp, 1); 1536 if (error) 1537 - cmn_err(CE_WARN, "XFS: Unable to update superblock counters. " 1538 "Freespace may not be correct on next mount."); 1539 xfs_unmountfs_writesb(mp); 1540 xfs_unmountfs_wait(mp); /* wait for async bufs */ ··· 2017 if (xfs_readonly_buftarg(mp->m_ddev_targp) || 2018 xfs_readonly_buftarg(mp->m_logdev_targp) || 2019 (mp->m_rtdev_targp && xfs_readonly_buftarg(mp->m_rtdev_targp))) { 2020 - cmn_err(CE_NOTE, 2021 - "XFS: %s required on read-only device.", message); 2022 - cmn_err(CE_NOTE, 2023 - "XFS: write access unavailable, cannot proceed."); 2024 return EROFS; 2025 } 2026 return 0;
··· 133 return 0; 134 135 if (uuid_is_nil(uuid)) { 136 + xfs_warn(mp, "Filesystem has nil UUID - can't mount"); 137 return XFS_ERROR(EINVAL); 138 } 139 ··· 163 164 out_duplicate: 165 mutex_unlock(&xfs_uuid_table_mutex); 166 + xfs_warn(mp, "Filesystem has duplicate UUID - can't mount"); 167 return XFS_ERROR(EINVAL); 168 } 169 ··· 311 xfs_sb_t *sbp, 312 int flags) 313 { 314 + int loud = !(flags & XFS_MFSI_QUIET); 315 + 316 /* 317 * If the log device and data device have the 318 * same device number, the log is internal. ··· 319 * a volume filesystem in a non-volume manner. 320 */ 321 if (sbp->sb_magicnum != XFS_SB_MAGIC) { 322 + if (loud) 323 + xfs_warn(mp, "bad magic number"); 324 return XFS_ERROR(EWRONGFS); 325 } 326 327 if (!xfs_sb_good_version(sbp)) { 328 + if (loud) 329 + xfs_warn(mp, "bad version"); 330 return XFS_ERROR(EWRONGFS); 331 } 332 333 if (unlikely( 334 sbp->sb_logstart == 0 && mp->m_logdev_targp == mp->m_ddev_targp)) { 335 + if (loud) 336 + xfs_warn(mp, 337 + "filesystem is marked as having an external log; " 338 + "specify logdev on the mount command line."); 339 return XFS_ERROR(EINVAL); 340 } 341 342 if (unlikely( 343 sbp->sb_logstart != 0 && mp->m_logdev_targp != mp->m_ddev_targp)) { 344 + if (loud) 345 + xfs_warn(mp, 346 + "filesystem is marked as having an internal log; " 347 + "do not specify logdev on the mount command line."); 348 return XFS_ERROR(EINVAL); 349 } 350 ··· 369 (sbp->sb_rextsize * sbp->sb_blocksize > XFS_MAX_RTEXTSIZE) || 370 (sbp->sb_rextsize * sbp->sb_blocksize < XFS_MIN_RTEXTSIZE) || 371 (sbp->sb_imax_pct > 100 /* zero sb_imax_pct is valid */))) { 372 + if (loud) 373 + xfs_warn(mp, "SB sanity check 1 failed"); 374 return XFS_ERROR(EFSCORRUPTED); 375 } 376 ··· 382 (xfs_drfsbno_t)sbp->sb_agcount * sbp->sb_agblocks || 383 sbp->sb_dblocks < (xfs_drfsbno_t)(sbp->sb_agcount - 1) * 384 sbp->sb_agblocks + XFS_MIN_AG_BLOCKS)) { 385 + if (loud) 386 + xfs_warn(mp, "SB sanity check 2 failed"); 387 return XFS_ERROR(EFSCORRUPTED); 388 } 389 ··· 390 * Until this is fixed only page-sized or smaller data blocks work. 391 */ 392 if (unlikely(sbp->sb_blocksize > PAGE_SIZE)) { 393 + if (loud) { 394 + xfs_warn(mp, 395 + "File system with blocksize %d bytes. " 396 + "Only pagesize (%ld) or less will currently work.", 397 + sbp->sb_blocksize, PAGE_SIZE); 398 + } 399 return XFS_ERROR(ENOSYS); 400 } 401 ··· 409 case 2048: 410 break; 411 default: 412 + if (loud) 413 + xfs_warn(mp, "inode size of %d bytes not supported", 414 + sbp->sb_inodesize); 415 return XFS_ERROR(ENOSYS); 416 } 417 418 if (xfs_sb_validate_fsb_count(sbp, sbp->sb_dblocks) || 419 xfs_sb_validate_fsb_count(sbp, sbp->sb_rblocks)) { 420 + if (loud) 421 + xfs_warn(mp, 422 + "file system too large to be mounted on this system."); 423 return XFS_ERROR(EFBIG); 424 } 425 426 if (unlikely(sbp->sb_inprogress)) { 427 + if (loud) 428 + xfs_warn(mp, "file system busy"); 429 return XFS_ERROR(EFSCORRUPTED); 430 } 431 ··· 431 * Version 1 directory format has never worked on Linux. 432 */ 433 if (unlikely(!xfs_sb_version_hasdirv2(sbp))) { 434 + if (loud) 435 + xfs_warn(mp, 436 + "file system using version 1 directory format"); 437 return XFS_ERROR(ENOSYS); 438 } 439 ··· 673 unsigned int sector_size; 674 xfs_buf_t *bp; 675 int error; 676 + int loud = !(flags & XFS_MFSI_QUIET); 677 678 ASSERT(mp->m_sb_bp == NULL); 679 ASSERT(mp->m_ddev_targp != NULL); ··· 688 bp = xfs_buf_read_uncached(mp, mp->m_ddev_targp, 689 XFS_SB_DADDR, sector_size, 0); 690 if (!bp) { 691 + if (loud) 692 + xfs_warn(mp, "SB buffer read failed"); 693 return EIO; 694 } 695 ··· 699 xfs_sb_from_disk(&mp->m_sb, XFS_BUF_TO_SBP(bp)); 700 error = xfs_mount_validate_sb(mp, &(mp->m_sb), flags); 701 if (error) { 702 + if (loud) 703 + xfs_warn(mp, "SB validate failed"); 704 goto release_buf; 705 } 706 ··· 707 * We must be able to do sector-sized and sector-aligned IO. 708 */ 709 if (sector_size > mp->m_sb.sb_sectsize) { 710 + if (loud) 711 + xfs_warn(mp, "device supports %u byte sectors (not %u)", 712 + sector_size, mp->m_sb.sb_sectsize); 713 error = ENOSYS; 714 goto release_buf; 715 } ··· 853 if ((BBTOB(mp->m_dalign) & mp->m_blockmask) || 854 (BBTOB(mp->m_swidth) & mp->m_blockmask)) { 855 if (mp->m_flags & XFS_MOUNT_RETERR) { 856 + xfs_warn(mp, "alignment check 1 failed"); 857 return XFS_ERROR(EINVAL); 858 } 859 mp->m_dalign = mp->m_swidth = 0; ··· 867 if (mp->m_flags & XFS_MOUNT_RETERR) { 868 return XFS_ERROR(EINVAL); 869 } 870 + xfs_warn(mp, 871 + "stripe alignment turned off: sunit(%d)/swidth(%d) " 872 + "incompatible with agsize(%d)", 873 mp->m_dalign, mp->m_swidth, 874 sbp->sb_agblocks); 875 ··· 878 mp->m_swidth = XFS_BB_TO_FSBT(mp, mp->m_swidth); 879 } else { 880 if (mp->m_flags & XFS_MOUNT_RETERR) { 881 + xfs_warn(mp, 882 + "stripe alignment turned off: sunit(%d) less than bsize(%d)", 883 + mp->m_dalign, 884 mp->m_blockmask +1); 885 return XFS_ERROR(EINVAL); 886 } ··· 1026 1027 d = (xfs_daddr_t)XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks); 1028 if (XFS_BB_TO_FSB(mp, d) != mp->m_sb.sb_dblocks) { 1029 + xfs_warn(mp, "filesystem size mismatch detected"); 1030 return XFS_ERROR(EFBIG); 1031 } 1032 bp = xfs_buf_read_uncached(mp, mp->m_ddev_targp, 1033 d - XFS_FSS_TO_BB(mp, 1), 1034 BBTOB(XFS_FSS_TO_BB(mp, 1)), 0); 1035 if (!bp) { 1036 + xfs_warn(mp, "last sector read failed"); 1037 return EIO; 1038 } 1039 xfs_buf_relse(bp); ··· 1041 if (mp->m_logdev_targp != mp->m_ddev_targp) { 1042 d = (xfs_daddr_t)XFS_FSB_TO_BB(mp, mp->m_sb.sb_logblocks); 1043 if (XFS_BB_TO_FSB(mp, d) != mp->m_sb.sb_logblocks) { 1044 + xfs_warn(mp, "log size mismatch detected"); 1045 return XFS_ERROR(EFBIG); 1046 } 1047 bp = xfs_buf_read_uncached(mp, mp->m_logdev_targp, 1048 d - XFS_FSB_TO_BB(mp, 1), 1049 XFS_FSB_TO_B(mp, 1), 0); 1050 if (!bp) { 1051 + xfs_warn(mp, "log device read failed"); 1052 return EIO; 1053 } 1054 xfs_buf_relse(bp); ··· 1086 return 0; 1087 1088 #ifdef QUOTADEBUG 1089 + xfs_notice(mp, "Writing superblock quota changes"); 1090 #endif 1091 1092 tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SBCHANGE); ··· 1094 XFS_DEFAULT_LOG_COUNT); 1095 if (error) { 1096 xfs_trans_cancel(tp, 0); 1097 + xfs_alert(mp, "%s: Superblock update failed!", __func__); 1098 return error; 1099 } 1100 ··· 1161 * transaction subsystem is online. 1162 */ 1163 if (xfs_sb_has_mismatched_features2(sbp)) { 1164 + xfs_warn(mp, "correcting sb_features alignment problem"); 1165 sbp->sb_features2 |= sbp->sb_bad_features2; 1166 sbp->sb_bad_features2 = sbp->sb_features2; 1167 mp->m_update_flags |= XFS_SB_FEATURES2 | XFS_SB_BAD_FEATURES2; ··· 1241 */ 1242 error = xfs_rtmount_init(mp); 1243 if (error) { 1244 + xfs_warn(mp, "RT mount failed"); 1245 goto out_remove_uuid; 1246 } 1247 ··· 1272 INIT_RADIX_TREE(&mp->m_perag_tree, GFP_ATOMIC); 1273 error = xfs_initialize_perag(mp, sbp->sb_agcount, &mp->m_maxagi); 1274 if (error) { 1275 + xfs_warn(mp, "Failed per-ag init: %d", error); 1276 goto out_remove_uuid; 1277 } 1278 1279 if (!sbp->sb_logblocks) { 1280 + xfs_warn(mp, "no log defined"); 1281 XFS_ERROR_REPORT("xfs_mountfs", XFS_ERRLEVEL_LOW, mp); 1282 error = XFS_ERROR(EFSCORRUPTED); 1283 goto out_free_perag; ··· 1290 XFS_FSB_TO_DADDR(mp, sbp->sb_logstart), 1291 XFS_FSB_TO_BB(mp, sbp->sb_logblocks)); 1292 if (error) { 1293 + xfs_warn(mp, "log mount failed"); 1294 goto out_free_perag; 1295 } 1296 ··· 1327 */ 1328 error = xfs_iget(mp, NULL, sbp->sb_rootino, 0, XFS_ILOCK_EXCL, &rip); 1329 if (error) { 1330 + xfs_warn(mp, "failed to read root inode"); 1331 goto out_log_dealloc; 1332 } 1333 1334 ASSERT(rip != NULL); 1335 1336 if (unlikely((rip->i_d.di_mode & S_IFMT) != S_IFDIR)) { 1337 + xfs_warn(mp, "corrupted root inode %llu: not a directory", 1338 (unsigned long long)rip->i_ino); 1339 xfs_iunlock(rip, XFS_ILOCK_EXCL); 1340 XFS_ERROR_REPORT("xfs_mountfs_int(2)", XFS_ERRLEVEL_LOW, ··· 1356 /* 1357 * Free up the root inode. 1358 */ 1359 + xfs_warn(mp, "failed to read RT inodes"); 1360 goto out_rele_rip; 1361 } 1362 ··· 1368 if (mp->m_update_flags && !(mp->m_flags & XFS_MOUNT_RDONLY)) { 1369 error = xfs_mount_log_sb(mp, mp->m_update_flags); 1370 if (error) { 1371 + xfs_warn(mp, "failed to write sb changes"); 1372 goto out_rtunmount; 1373 } 1374 } ··· 1389 * quotachecked license. 1390 */ 1391 if (mp->m_sb.sb_qflags & XFS_ALL_QUOTA_ACCT) { 1392 + xfs_notice(mp, "resetting quota flags"); 1393 error = xfs_mount_reset_sbqflags(mp); 1394 if (error) 1395 return error; ··· 1406 */ 1407 error = xfs_log_mount_finish(mp); 1408 if (error) { 1409 + xfs_warn(mp, "log mount finish failed"); 1410 goto out_rtunmount; 1411 } 1412 ··· 1435 resblks = xfs_default_resblks(mp); 1436 error = xfs_reserve_blocks(mp, &resblks, NULL); 1437 if (error) 1438 + xfs_warn(mp, 1439 + "Unable to allocate reserve blocks. Continuing without reserve pool."); 1440 } 1441 1442 return 0; ··· 1525 resblks = 0; 1526 error = xfs_reserve_blocks(mp, &resblks, NULL); 1527 if (error) 1528 + xfs_warn(mp, "Unable to free reserved block pool. " 1529 "Freespace may not be correct on next mount."); 1530 1531 error = xfs_log_sbcount(mp, 1); 1532 if (error) 1533 + xfs_warn(mp, "Unable to update superblock counters. " 1534 "Freespace may not be correct on next mount."); 1535 xfs_unmountfs_writesb(mp); 1536 xfs_unmountfs_wait(mp); /* wait for async bufs */ ··· 2013 if (xfs_readonly_buftarg(mp->m_ddev_targp) || 2014 xfs_readonly_buftarg(mp->m_logdev_targp) || 2015 (mp->m_rtdev_targp && xfs_readonly_buftarg(mp->m_rtdev_targp))) { 2016 + xfs_notice(mp, "%s required on read-only device.", message); 2017 + xfs_notice(mp, "write access unavailable, cannot proceed."); 2018 return EROFS; 2019 } 2020 return 0;
+2 -1
fs/xfs/xfs_quota.h
··· 382 xfs_trans_reserve_quota_bydquots(tp, mp, ud, gd, nb, ni, \ 383 f | XFS_QMOPT_RES_REGBLKS) 384 385 - extern int xfs_qm_dqcheck(xfs_disk_dquot_t *, xfs_dqid_t, uint, uint, char *); 386 extern int xfs_mount_reset_sbqflags(struct xfs_mount *); 387 388 #endif /* __KERNEL__ */
··· 382 xfs_trans_reserve_quota_bydquots(tp, mp, ud, gd, nb, ni, \ 383 f | XFS_QMOPT_RES_REGBLKS) 384 385 + extern int xfs_qm_dqcheck(struct xfs_mount *, xfs_disk_dquot_t *, 386 + xfs_dqid_t, uint, uint, char *); 387 extern int xfs_mount_reset_sbqflags(struct xfs_mount *); 388 389 #endif /* __KERNEL__ */
+38 -54
fs/xfs/xfs_rtalloc.c
··· 76 xfs_mount_t *mp, /* file system mount point */ 77 xfs_extlen_t oblocks, /* old count of blocks */ 78 xfs_extlen_t nblocks, /* new count of blocks */ 79 - xfs_ino_t ino) /* inode number (bitmap/summary) */ 80 { 81 xfs_fileoff_t bno; /* block number in file */ 82 xfs_buf_t *bp; /* temporary buffer for zeroing */ ··· 86 xfs_fsblock_t firstblock; /* first block allocated in xaction */ 87 xfs_bmap_free_t flist; /* list of freed blocks */ 88 xfs_fsblock_t fsbno; /* filesystem block for bno */ 89 - xfs_inode_t *ip; /* pointer to incore inode */ 90 xfs_bmbt_irec_t map; /* block map output */ 91 int nmap; /* number of block maps */ 92 int resblks; /* space reservation */ ··· 111 /* 112 * Lock the inode. 113 */ 114 - if ((error = xfs_trans_iget(mp, tp, ino, 0, 115 - XFS_ILOCK_EXCL, &ip))) 116 - goto error_cancel; 117 xfs_bmap_init(&flist, &firstblock); 118 /* 119 * Allocate blocks to the bitmap file. ··· 154 /* 155 * Lock the bitmap inode. 156 */ 157 - if ((error = xfs_trans_iget(mp, tp, ino, 0, 158 - XFS_ILOCK_EXCL, &ip))) 159 - goto error_cancel; 160 /* 161 * Get a buffer for the block. 162 */ ··· 1852 xfs_rtblock_t bmbno; /* bitmap block number */ 1853 xfs_buf_t *bp; /* temporary buffer */ 1854 int error; /* error return value */ 1855 - xfs_inode_t *ip; /* bitmap inode, used as lock */ 1856 xfs_mount_t *nmp; /* new (fake) mount structure */ 1857 xfs_drfsbno_t nrblocks; /* new number of realtime blocks */ 1858 xfs_extlen_t nrbmblocks; /* new number of rt bitmap blocks */ ··· 1915 /* 1916 * Allocate space to the bitmap and summary files, as necessary. 1917 */ 1918 - if ((error = xfs_growfs_rt_alloc(mp, rbmblocks, nrbmblocks, 1919 - mp->m_sb.sb_rbmino))) 1920 return error; 1921 - if ((error = xfs_growfs_rt_alloc(mp, rsumblocks, nrsumblocks, 1922 - mp->m_sb.sb_rsumino))) 1923 return error; 1924 /* 1925 * Allocate a new (fake) mount/sb. ··· 1969 /* 1970 * Lock out other callers by grabbing the bitmap inode lock. 1971 */ 1972 - if ((error = xfs_trans_iget(mp, tp, mp->m_sb.sb_rbmino, 0, 1973 - XFS_ILOCK_EXCL, &ip))) 1974 - goto error_cancel; 1975 - ASSERT(ip == mp->m_rbmip); 1976 /* 1977 * Update the bitmap inode's size. 1978 */ ··· 1981 /* 1982 * Get the summary inode into the transaction. 1983 */ 1984 - if ((error = xfs_trans_iget(mp, tp, mp->m_sb.sb_rsumino, 0, 1985 - XFS_ILOCK_EXCL, &ip))) 1986 - goto error_cancel; 1987 - ASSERT(ip == mp->m_rsumip); 1988 /* 1989 * Update the summary inode's size. 1990 */ ··· 2068 xfs_extlen_t prod, /* extent product factor */ 2069 xfs_rtblock_t *rtblock) /* out: start block allocated */ 2070 { 2071 int error; /* error value */ 2072 - xfs_inode_t *ip; /* inode for bitmap file */ 2073 - xfs_mount_t *mp; /* file system mount structure */ 2074 xfs_rtblock_t r; /* result allocated block */ 2075 xfs_fsblock_t sb; /* summary file block number */ 2076 xfs_buf_t *sumbp; /* summary file block buffer */ 2077 2078 ASSERT(minlen > 0 && minlen <= maxlen); 2079 - mp = tp->t_mountp; 2080 /* 2081 * If prod is set then figure out what to do to minlen and maxlen. 2082 */ ··· 2092 return 0; 2093 } 2094 } 2095 - /* 2096 - * Lock out other callers by grabbing the bitmap inode lock. 2097 - */ 2098 - if ((error = xfs_trans_iget(mp, tp, mp->m_sb.sb_rbmino, 0, 2099 - XFS_ILOCK_EXCL, &ip))) 2100 - return error; 2101 sumbp = NULL; 2102 /* 2103 * Allocate by size, or near another block, or exactly at some block. ··· 2111 len, &sumbp, &sb, prod, &r); 2112 break; 2113 default: 2114 ASSERT(0); 2115 } 2116 - if (error) { 2117 return error; 2118 - } 2119 /* 2120 * If it worked, update the superblock. 2121 */ ··· 2144 xfs_extlen_t len) /* length of extent freed */ 2145 { 2146 int error; /* error value */ 2147 - xfs_inode_t *ip; /* bitmap file inode */ 2148 xfs_mount_t *mp; /* file system mount structure */ 2149 xfs_fsblock_t sb; /* summary file block number */ 2150 xfs_buf_t *sumbp; /* summary file block buffer */ ··· 2152 /* 2153 * Synchronize by locking the bitmap inode. 2154 */ 2155 - if ((error = xfs_trans_iget(mp, tp, mp->m_sb.sb_rbmino, 0, 2156 - XFS_ILOCK_EXCL, &ip))) 2157 - return error; 2158 #if defined(__KERNEL__) && defined(DEBUG) 2159 /* 2160 * Check to see that this whole range is currently allocated. ··· 2187 */ 2188 if (tp->t_frextents_delta + mp->m_sb.sb_frextents == 2189 mp->m_sb.sb_rextents) { 2190 - if (!(ip->i_d.di_flags & XFS_DIFLAG_NEWRTBM)) 2191 - ip->i_d.di_flags |= XFS_DIFLAG_NEWRTBM; 2192 - *(__uint64_t *)&ip->i_d.di_atime = 0; 2193 - xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 2194 } 2195 return 0; 2196 } ··· 2210 if (sbp->sb_rblocks == 0) 2211 return 0; 2212 if (mp->m_rtdev_targp == NULL) { 2213 - cmn_err(CE_WARN, 2214 - "XFS: This filesystem has a realtime volume, use rtdev=device option"); 2215 return XFS_ERROR(ENODEV); 2216 } 2217 mp->m_rsumlevels = sbp->sb_rextslog + 1; ··· 2225 */ 2226 d = (xfs_daddr_t)XFS_FSB_TO_BB(mp, mp->m_sb.sb_rblocks); 2227 if (XFS_BB_TO_FSB(mp, d) != mp->m_sb.sb_rblocks) { 2228 - cmn_err(CE_WARN, "XFS: realtime mount -- %llu != %llu", 2229 (unsigned long long) XFS_BB_TO_FSB(mp, d), 2230 (unsigned long long) mp->m_sb.sb_rblocks); 2231 return XFS_ERROR(EFBIG); ··· 2234 d - XFS_FSB_TO_BB(mp, 1), 2235 XFS_FSB_TO_B(mp, 1), 0); 2236 if (!bp) { 2237 - cmn_err(CE_WARN, "XFS: realtime device size check failed"); 2238 return EIO; 2239 } 2240 xfs_buf_relse(bp); ··· 2294 xfs_rtblock_t *pick) /* result rt extent */ 2295 { 2296 xfs_rtblock_t b; /* result block */ 2297 - int error; /* error return value */ 2298 - xfs_inode_t *ip; /* bitmap incore inode */ 2299 int log2; /* log of sequence number */ 2300 __uint64_t resid; /* residual after log removed */ 2301 __uint64_t seq; /* sequence number of file creation */ 2302 __uint64_t *seqp; /* pointer to seqno in inode */ 2303 2304 - if ((error = xfs_trans_iget(mp, tp, mp->m_sb.sb_rbmino, 0, 2305 - XFS_ILOCK_EXCL, &ip))) 2306 - return error; 2307 - ASSERT(ip == mp->m_rbmip); 2308 - seqp = (__uint64_t *)&ip->i_d.di_atime; 2309 - if (!(ip->i_d.di_flags & XFS_DIFLAG_NEWRTBM)) { 2310 - ip->i_d.di_flags |= XFS_DIFLAG_NEWRTBM; 2311 *seqp = 0; 2312 } 2313 seq = *seqp; ··· 2319 b = mp->m_sb.sb_rextents - len; 2320 } 2321 *seqp = seq + 1; 2322 - xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 2323 *pick = b; 2324 return 0; 2325 }
··· 76 xfs_mount_t *mp, /* file system mount point */ 77 xfs_extlen_t oblocks, /* old count of blocks */ 78 xfs_extlen_t nblocks, /* new count of blocks */ 79 + xfs_inode_t *ip) /* inode (bitmap/summary) */ 80 { 81 xfs_fileoff_t bno; /* block number in file */ 82 xfs_buf_t *bp; /* temporary buffer for zeroing */ ··· 86 xfs_fsblock_t firstblock; /* first block allocated in xaction */ 87 xfs_bmap_free_t flist; /* list of freed blocks */ 88 xfs_fsblock_t fsbno; /* filesystem block for bno */ 89 xfs_bmbt_irec_t map; /* block map output */ 90 int nmap; /* number of block maps */ 91 int resblks; /* space reservation */ ··· 112 /* 113 * Lock the inode. 114 */ 115 + xfs_ilock(ip, XFS_ILOCK_EXCL); 116 + xfs_trans_ijoin_ref(tp, ip, XFS_ILOCK_EXCL); 117 + 118 xfs_bmap_init(&flist, &firstblock); 119 /* 120 * Allocate blocks to the bitmap file. ··· 155 /* 156 * Lock the bitmap inode. 157 */ 158 + xfs_ilock(ip, XFS_ILOCK_EXCL); 159 + xfs_trans_ijoin_ref(tp, ip, XFS_ILOCK_EXCL); 160 /* 161 * Get a buffer for the block. 162 */ ··· 1854 xfs_rtblock_t bmbno; /* bitmap block number */ 1855 xfs_buf_t *bp; /* temporary buffer */ 1856 int error; /* error return value */ 1857 xfs_mount_t *nmp; /* new (fake) mount structure */ 1858 xfs_drfsbno_t nrblocks; /* new number of realtime blocks */ 1859 xfs_extlen_t nrbmblocks; /* new number of rt bitmap blocks */ ··· 1918 /* 1919 * Allocate space to the bitmap and summary files, as necessary. 1920 */ 1921 + error = xfs_growfs_rt_alloc(mp, rbmblocks, nrbmblocks, mp->m_rbmip); 1922 + if (error) 1923 return error; 1924 + error = xfs_growfs_rt_alloc(mp, rsumblocks, nrsumblocks, mp->m_rsumip); 1925 + if (error) 1926 return error; 1927 /* 1928 * Allocate a new (fake) mount/sb. ··· 1972 /* 1973 * Lock out other callers by grabbing the bitmap inode lock. 1974 */ 1975 + xfs_ilock(mp->m_rbmip, XFS_ILOCK_EXCL); 1976 + xfs_trans_ijoin_ref(tp, mp->m_rbmip, XFS_ILOCK_EXCL); 1977 /* 1978 * Update the bitmap inode's size. 1979 */ ··· 1986 /* 1987 * Get the summary inode into the transaction. 1988 */ 1989 + xfs_ilock(mp->m_rsumip, XFS_ILOCK_EXCL); 1990 + xfs_trans_ijoin_ref(tp, mp->m_rsumip, XFS_ILOCK_EXCL); 1991 /* 1992 * Update the summary inode's size. 1993 */ ··· 2075 xfs_extlen_t prod, /* extent product factor */ 2076 xfs_rtblock_t *rtblock) /* out: start block allocated */ 2077 { 2078 + xfs_mount_t *mp = tp->t_mountp; 2079 int error; /* error value */ 2080 xfs_rtblock_t r; /* result allocated block */ 2081 xfs_fsblock_t sb; /* summary file block number */ 2082 xfs_buf_t *sumbp; /* summary file block buffer */ 2083 2084 + ASSERT(xfs_isilocked(mp->m_rbmip, XFS_ILOCK_EXCL)); 2085 ASSERT(minlen > 0 && minlen <= maxlen); 2086 + 2087 /* 2088 * If prod is set then figure out what to do to minlen and maxlen. 2089 */ ··· 2099 return 0; 2100 } 2101 } 2102 + 2103 sumbp = NULL; 2104 /* 2105 * Allocate by size, or near another block, or exactly at some block. ··· 2123 len, &sumbp, &sb, prod, &r); 2124 break; 2125 default: 2126 + error = EIO; 2127 ASSERT(0); 2128 } 2129 + if (error) 2130 return error; 2131 + 2132 /* 2133 * If it worked, update the superblock. 2134 */ ··· 2155 xfs_extlen_t len) /* length of extent freed */ 2156 { 2157 int error; /* error value */ 2158 xfs_mount_t *mp; /* file system mount structure */ 2159 xfs_fsblock_t sb; /* summary file block number */ 2160 xfs_buf_t *sumbp; /* summary file block buffer */ ··· 2164 /* 2165 * Synchronize by locking the bitmap inode. 2166 */ 2167 + xfs_ilock(mp->m_rbmip, XFS_ILOCK_EXCL); 2168 + xfs_trans_ijoin_ref(tp, mp->m_rbmip, XFS_ILOCK_EXCL); 2169 + 2170 #if defined(__KERNEL__) && defined(DEBUG) 2171 /* 2172 * Check to see that this whole range is currently allocated. ··· 2199 */ 2200 if (tp->t_frextents_delta + mp->m_sb.sb_frextents == 2201 mp->m_sb.sb_rextents) { 2202 + if (!(mp->m_rbmip->i_d.di_flags & XFS_DIFLAG_NEWRTBM)) 2203 + mp->m_rbmip->i_d.di_flags |= XFS_DIFLAG_NEWRTBM; 2204 + *(__uint64_t *)&mp->m_rbmip->i_d.di_atime = 0; 2205 + xfs_trans_log_inode(tp, mp->m_rbmip, XFS_ILOG_CORE); 2206 } 2207 return 0; 2208 } ··· 2222 if (sbp->sb_rblocks == 0) 2223 return 0; 2224 if (mp->m_rtdev_targp == NULL) { 2225 + xfs_warn(mp, 2226 + "Filesystem has a realtime volume, use rtdev=device option"); 2227 return XFS_ERROR(ENODEV); 2228 } 2229 mp->m_rsumlevels = sbp->sb_rextslog + 1; ··· 2237 */ 2238 d = (xfs_daddr_t)XFS_FSB_TO_BB(mp, mp->m_sb.sb_rblocks); 2239 if (XFS_BB_TO_FSB(mp, d) != mp->m_sb.sb_rblocks) { 2240 + xfs_warn(mp, "realtime mount -- %llu != %llu", 2241 (unsigned long long) XFS_BB_TO_FSB(mp, d), 2242 (unsigned long long) mp->m_sb.sb_rblocks); 2243 return XFS_ERROR(EFBIG); ··· 2246 d - XFS_FSB_TO_BB(mp, 1), 2247 XFS_FSB_TO_B(mp, 1), 0); 2248 if (!bp) { 2249 + xfs_warn(mp, "realtime device size check failed"); 2250 return EIO; 2251 } 2252 xfs_buf_relse(bp); ··· 2306 xfs_rtblock_t *pick) /* result rt extent */ 2307 { 2308 xfs_rtblock_t b; /* result block */ 2309 int log2; /* log of sequence number */ 2310 __uint64_t resid; /* residual after log removed */ 2311 __uint64_t seq; /* sequence number of file creation */ 2312 __uint64_t *seqp; /* pointer to seqno in inode */ 2313 2314 + ASSERT(xfs_isilocked(mp->m_rbmip, XFS_ILOCK_EXCL)); 2315 + 2316 + seqp = (__uint64_t *)&mp->m_rbmip->i_d.di_atime; 2317 + if (!(mp->m_rbmip->i_d.di_flags & XFS_DIFLAG_NEWRTBM)) { 2318 + mp->m_rbmip->i_d.di_flags |= XFS_DIFLAG_NEWRTBM; 2319 *seqp = 0; 2320 } 2321 seq = *seqp; ··· 2335 b = mp->m_sb.sb_rextents - len; 2336 } 2337 *seqp = seq + 1; 2338 + xfs_trans_log_inode(tp, mp->m_rbmip, XFS_ILOG_CORE); 2339 *pick = b; 2340 return 0; 2341 }
+1 -1
fs/xfs/xfs_rtalloc.h
··· 154 if (mp->m_sb.sb_rblocks == 0) 155 return 0; 156 157 - cmn_err(CE_WARN, "XFS: Not built with CONFIG_XFS_RT"); 158 return ENOSYS; 159 } 160 # define xfs_rtmount_inodes(m) (((mp)->m_sb.sb_rblocks == 0)? 0 : (ENOSYS))
··· 154 if (mp->m_sb.sb_rblocks == 0) 155 return 0; 156 157 + xfs_warn(mp, "Not built with CONFIG_XFS_RT"); 158 return ENOSYS; 159 } 160 # define xfs_rtmount_inodes(m) (((mp)->m_sb.sb_rblocks == 0)? 0 : (ENOSYS))
+22 -36
fs/xfs/xfs_rw.c
··· 49 logerror = flags & SHUTDOWN_LOG_IO_ERROR; 50 51 if (!(flags & SHUTDOWN_FORCE_UMOUNT)) { 52 - cmn_err(CE_NOTE, "xfs_force_shutdown(%s,0x%x) called from " 53 - "line %d of file %s. Return address = 0x%p", 54 - mp->m_fsname, flags, lnnum, fname, __return_address); 55 } 56 /* 57 * No need to duplicate efforts. ··· 69 return; 70 71 if (flags & SHUTDOWN_CORRUPT_INCORE) { 72 - xfs_cmn_err(XFS_PTAG_SHUTDOWN_CORRUPT, CE_ALERT, mp, 73 - "Corruption of in-memory data detected. Shutting down filesystem: %s", 74 - mp->m_fsname); 75 - if (XFS_ERRLEVEL_HIGH <= xfs_error_level) { 76 xfs_stack_trace(); 77 - } 78 } else if (!(flags & SHUTDOWN_FORCE_UMOUNT)) { 79 if (logerror) { 80 - xfs_cmn_err(XFS_PTAG_SHUTDOWN_LOGERROR, CE_ALERT, mp, 81 - "Log I/O Error Detected. Shutting down filesystem: %s", 82 - mp->m_fsname); 83 } else if (flags & SHUTDOWN_DEVICE_REQ) { 84 - xfs_cmn_err(XFS_PTAG_SHUTDOWN_IOERROR, CE_ALERT, mp, 85 - "All device paths lost. Shutting down filesystem: %s", 86 - mp->m_fsname); 87 } else if (!(flags & SHUTDOWN_REMOTE_REQ)) { 88 - xfs_cmn_err(XFS_PTAG_SHUTDOWN_IOERROR, CE_ALERT, mp, 89 - "I/O Error Detected. Shutting down filesystem: %s", 90 - mp->m_fsname); 91 } 92 } 93 if (!(flags & SHUTDOWN_FORCE_UMOUNT)) { 94 - cmn_err(CE_ALERT, "Please umount the filesystem, " 95 - "and rectify the problem(s)"); 96 } 97 } 98 ··· 101 xfs_buf_t *bp, 102 xfs_daddr_t blkno) 103 { 104 - cmn_err(CE_ALERT, 105 - "I/O error in filesystem (\"%s\") meta-data dev %s block 0x%llx" 106 - " (\"%s\") error %d buf count %zd", 107 - (!mp || !mp->m_fsname) ? "(fs name not set)" : mp->m_fsname, 108 XFS_BUFTARG_NAME(XFS_BUF_TARGET(bp)), 109 (__uint64_t)blkno, func, 110 XFS_BUF_GETERROR(bp), XFS_BUF_COUNT(bp)); ··· 167 xfs_get_extsz_hint( 168 struct xfs_inode *ip) 169 { 170 - xfs_extlen_t extsz; 171 - 172 - if (unlikely(XFS_IS_REALTIME_INODE(ip))) { 173 - extsz = (ip->i_d.di_flags & XFS_DIFLAG_EXTSIZE) 174 - ? ip->i_d.di_extsize 175 - : ip->i_mount->m_sb.sb_rextsize; 176 - ASSERT(extsz); 177 - } else { 178 - extsz = (ip->i_d.di_flags & XFS_DIFLAG_EXTSIZE) 179 - ? ip->i_d.di_extsize : 0; 180 - } 181 - 182 - return extsz; 183 }
··· 49 logerror = flags & SHUTDOWN_LOG_IO_ERROR; 50 51 if (!(flags & SHUTDOWN_FORCE_UMOUNT)) { 52 + xfs_notice(mp, 53 + "%s(0x%x) called from line %d of file %s. Return address = 0x%p", 54 + __func__, flags, lnnum, fname, __return_address); 55 } 56 /* 57 * No need to duplicate efforts. ··· 69 return; 70 71 if (flags & SHUTDOWN_CORRUPT_INCORE) { 72 + xfs_alert_tag(mp, XFS_PTAG_SHUTDOWN_CORRUPT, 73 + "Corruption of in-memory data detected. Shutting down filesystem"); 74 + if (XFS_ERRLEVEL_HIGH <= xfs_error_level) 75 xfs_stack_trace(); 76 } else if (!(flags & SHUTDOWN_FORCE_UMOUNT)) { 77 if (logerror) { 78 + xfs_alert_tag(mp, XFS_PTAG_SHUTDOWN_LOGERROR, 79 + "Log I/O Error Detected. Shutting down filesystem"); 80 } else if (flags & SHUTDOWN_DEVICE_REQ) { 81 + xfs_alert_tag(mp, XFS_PTAG_SHUTDOWN_IOERROR, 82 + "All device paths lost. Shutting down filesystem"); 83 } else if (!(flags & SHUTDOWN_REMOTE_REQ)) { 84 + xfs_alert_tag(mp, XFS_PTAG_SHUTDOWN_IOERROR, 85 + "I/O Error Detected. Shutting down filesystem"); 86 } 87 } 88 if (!(flags & SHUTDOWN_FORCE_UMOUNT)) { 89 + xfs_alert(mp, 90 + "Please umount the filesystem and rectify the problem(s)"); 91 } 92 } 93 ··· 106 xfs_buf_t *bp, 107 xfs_daddr_t blkno) 108 { 109 + xfs_alert(mp, 110 + "I/O error occurred: meta-data dev %s block 0x%llx" 111 + " (\"%s\") error %d buf count %zd", 112 XFS_BUFTARG_NAME(XFS_BUF_TARGET(bp)), 113 (__uint64_t)blkno, func, 114 XFS_BUF_GETERROR(bp), XFS_BUF_COUNT(bp)); ··· 173 xfs_get_extsz_hint( 174 struct xfs_inode *ip) 175 { 176 + if ((ip->i_d.di_flags & XFS_DIFLAG_EXTSIZE) && ip->i_d.di_extsize) 177 + return ip->i_d.di_extsize; 178 + if (XFS_IS_REALTIME_INODE(ip)) 179 + return ip->i_mount->m_sb.sb_rextsize; 180 + return 0; 181 }
-2
fs/xfs/xfs_trans.h
··· 469 void xfs_trans_stale_inode_buf(xfs_trans_t *, struct xfs_buf *); 470 void xfs_trans_dquot_buf(xfs_trans_t *, struct xfs_buf *, uint); 471 void xfs_trans_inode_alloc_buf(xfs_trans_t *, struct xfs_buf *); 472 - int xfs_trans_iget(struct xfs_mount *, xfs_trans_t *, 473 - xfs_ino_t , uint, uint, struct xfs_inode **); 474 void xfs_trans_ichgtime(struct xfs_trans *, struct xfs_inode *, int); 475 void xfs_trans_ijoin_ref(struct xfs_trans *, struct xfs_inode *, uint); 476 void xfs_trans_ijoin(struct xfs_trans *, struct xfs_inode *);
··· 469 void xfs_trans_stale_inode_buf(xfs_trans_t *, struct xfs_buf *); 470 void xfs_trans_dquot_buf(xfs_trans_t *, struct xfs_buf *, uint); 471 void xfs_trans_inode_alloc_buf(xfs_trans_t *, struct xfs_buf *); 472 void xfs_trans_ichgtime(struct xfs_trans *, struct xfs_inode *, int); 473 void xfs_trans_ijoin_ref(struct xfs_trans *, struct xfs_inode *, uint); 474 void xfs_trans_ijoin(struct xfs_trans *, struct xfs_inode *);
+1 -1
fs/xfs/xfs_trans_ail.c
··· 563 564 spin_unlock(&ailp->xa_lock); 565 if (!XFS_FORCED_SHUTDOWN(mp)) { 566 - xfs_cmn_err(XFS_PTAG_AILDELETE, CE_ALERT, mp, 567 "%s: attempting to delete a log item that is not in the AIL", 568 __func__); 569 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
··· 563 564 spin_unlock(&ailp->xa_lock); 565 if (!XFS_FORCED_SHUTDOWN(mp)) { 566 + xfs_alert_tag(mp, XFS_PTAG_AILDELETE, 567 "%s: attempting to delete a log item that is not in the AIL", 568 __func__); 569 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
+3 -3
fs/xfs/xfs_trans_buf.c
··· 305 if (xfs_error_target == target) { 306 if (((xfs_req_num++) % xfs_error_mod) == 0) { 307 xfs_buf_relse(bp); 308 - cmn_err(CE_DEBUG, "Returning error!\n"); 309 return XFS_ERROR(EIO); 310 } 311 } ··· 403 xfs_force_shutdown(tp->t_mountp, 404 SHUTDOWN_META_IO_ERROR); 405 xfs_buf_relse(bp); 406 - cmn_err(CE_DEBUG, "Returning trans error!\n"); 407 return XFS_ERROR(EIO); 408 } 409 } ··· 427 */ 428 #if defined(DEBUG) 429 if (XFS_BUF_ISSTALE(bp) && XFS_BUF_ISDELAYWRITE(bp)) 430 - cmn_err(CE_NOTE, "about to pop assert, bp == 0x%p", bp); 431 #endif 432 ASSERT((XFS_BUF_BFLAGS(bp) & (XBF_STALE|XBF_DELWRI)) != 433 (XBF_STALE|XBF_DELWRI));
··· 305 if (xfs_error_target == target) { 306 if (((xfs_req_num++) % xfs_error_mod) == 0) { 307 xfs_buf_relse(bp); 308 + xfs_debug(mp, "Returning error!"); 309 return XFS_ERROR(EIO); 310 } 311 } ··· 403 xfs_force_shutdown(tp->t_mountp, 404 SHUTDOWN_META_IO_ERROR); 405 xfs_buf_relse(bp); 406 + xfs_debug(mp, "Returning trans error!"); 407 return XFS_ERROR(EIO); 408 } 409 } ··· 427 */ 428 #if defined(DEBUG) 429 if (XFS_BUF_ISSTALE(bp) && XFS_BUF_ISDELAYWRITE(bp)) 430 + xfs_notice(mp, "about to pop assert, bp == 0x%p", bp); 431 #endif 432 ASSERT((XFS_BUF_BFLAGS(bp) & (XBF_STALE|XBF_DELWRI)) != 433 (XBF_STALE|XBF_DELWRI));
-22
fs/xfs/xfs_trans_inode.c
··· 44 #endif 45 46 /* 47 - * Get an inode and join it to the transaction. 48 - */ 49 - int 50 - xfs_trans_iget( 51 - xfs_mount_t *mp, 52 - xfs_trans_t *tp, 53 - xfs_ino_t ino, 54 - uint flags, 55 - uint lock_flags, 56 - xfs_inode_t **ipp) 57 - { 58 - int error; 59 - 60 - error = xfs_iget(mp, tp, ino, flags, lock_flags, ipp); 61 - if (!error && tp) { 62 - xfs_trans_ijoin(tp, *ipp); 63 - (*ipp)->i_itemp->ili_lock_flags = lock_flags; 64 - } 65 - return error; 66 - } 67 - 68 - /* 69 * Add a locked inode to the transaction. 70 * 71 * The inode must be locked, and it cannot be associated with any transaction.
··· 44 #endif 45 46 /* 47 * Add a locked inode to the transaction. 48 * 49 * The inode must be locked, and it cannot be associated with any transaction.
+23 -53
fs/xfs/xfs_vnodeops.c
··· 1189 * inode might be lost for a long time or forever. 1190 */ 1191 if (!XFS_FORCED_SHUTDOWN(mp)) { 1192 - cmn_err(CE_NOTE, 1193 - "xfs_inactive: xfs_ifree() returned an error = %d on %s", 1194 - error, mp->m_fsname); 1195 xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR); 1196 } 1197 xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES|XFS_TRANS_ABORT); ··· 1207 */ 1208 error = xfs_bmap_finish(&tp, &free_list, &committed); 1209 if (error) 1210 - xfs_fs_cmn_err(CE_NOTE, mp, "xfs_inactive: " 1211 - "xfs_bmap_finish() returned error %d", error); 1212 error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES); 1213 if (error) 1214 - xfs_fs_cmn_err(CE_NOTE, mp, "xfs_inactive: " 1215 - "xfs_trans_commit() returned error %d", error); 1216 } 1217 1218 /* ··· 1309 error = xfs_qm_vop_dqalloc(dp, current_fsuid(), current_fsgid(), prid, 1310 XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT, &udqp, &gdqp); 1311 if (error) 1312 - goto std_return; 1313 1314 if (is_dir) { 1315 rdev = 0; ··· 1389 } 1390 1391 /* 1392 - * At this point, we've gotten a newly allocated inode. 1393 - * It is locked (and joined to the transaction). 1394 - */ 1395 - ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 1396 - 1397 - /* 1398 * Now we join the directory inode to the transaction. We do not do it 1399 * earlier because xfs_dir_ialloc might commit the previous transaction 1400 * (and release all the locks). An error from here on will result in ··· 1433 */ 1434 xfs_qm_vop_create_dqattach(tp, ip, udqp, gdqp); 1435 1436 - /* 1437 - * xfs_trans_commit normally decrements the vnode ref count 1438 - * when it unlocks the inode. Since we want to return the 1439 - * vnode to the caller, we bump the vnode ref count now. 1440 - */ 1441 - IHOLD(ip); 1442 - 1443 error = xfs_bmap_finish(&tp, &free_list, &committed); 1444 if (error) 1445 - goto out_abort_rele; 1446 1447 error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES); 1448 - if (error) { 1449 - IRELE(ip); 1450 - goto out_dqrele; 1451 - } 1452 1453 xfs_qm_dqrele(udqp); 1454 xfs_qm_dqrele(gdqp); ··· 1453 cancel_flags |= XFS_TRANS_ABORT; 1454 out_trans_cancel: 1455 xfs_trans_cancel(tp, cancel_flags); 1456 - out_dqrele: 1457 - xfs_qm_dqrele(udqp); 1458 - xfs_qm_dqrele(gdqp); 1459 - 1460 - if (unlock_dp_on_error) 1461 - xfs_iunlock(dp, XFS_ILOCK_EXCL); 1462 - std_return: 1463 - return error; 1464 - 1465 - out_abort_rele: 1466 /* 1467 * Wait until after the current transaction is aborted to 1468 * release the inode. This prevents recursive transactions 1469 * and deadlocks from xfs_inactive. 1470 */ 1471 - xfs_bmap_cancel(&free_list); 1472 - cancel_flags |= XFS_TRANS_ABORT; 1473 - xfs_trans_cancel(tp, cancel_flags); 1474 - IRELE(ip); 1475 - unlock_dp_on_error = B_FALSE; 1476 - goto out_dqrele; 1477 } 1478 1479 #ifdef DEBUG ··· 2092 XFS_BMAPI_WRITE | XFS_BMAPI_METADATA, 2093 &first_block, resblks, mval, &nmaps, 2094 &free_list); 2095 - if (error) { 2096 - goto error1; 2097 - } 2098 2099 if (resblks) 2100 resblks -= fs_blocks; ··· 2125 error = xfs_dir_createname(tp, dp, link_name, ip->i_ino, 2126 &first_block, &free_list, resblks); 2127 if (error) 2128 - goto error1; 2129 xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); 2130 xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE); 2131 ··· 2137 if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC)) { 2138 xfs_trans_set_sync(tp); 2139 } 2140 - 2141 - /* 2142 - * xfs_trans_commit normally decrements the vnode ref count 2143 - * when it unlocks the inode. Since we want to return the 2144 - * vnode to the caller, we bump the vnode ref count now. 2145 - */ 2146 - IHOLD(ip); 2147 2148 error = xfs_bmap_finish(&tp, &free_list, &committed); 2149 if (error) {
··· 1189 * inode might be lost for a long time or forever. 1190 */ 1191 if (!XFS_FORCED_SHUTDOWN(mp)) { 1192 + xfs_notice(mp, "%s: xfs_ifree returned error %d", 1193 + __func__, error); 1194 xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR); 1195 } 1196 xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES|XFS_TRANS_ABORT); ··· 1208 */ 1209 error = xfs_bmap_finish(&tp, &free_list, &committed); 1210 if (error) 1211 + xfs_notice(mp, "%s: xfs_bmap_finish returned error %d", 1212 + __func__, error); 1213 error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES); 1214 if (error) 1215 + xfs_notice(mp, "%s: xfs_trans_commit returned error %d", 1216 + __func__, error); 1217 } 1218 1219 /* ··· 1310 error = xfs_qm_vop_dqalloc(dp, current_fsuid(), current_fsgid(), prid, 1311 XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT, &udqp, &gdqp); 1312 if (error) 1313 + return error; 1314 1315 if (is_dir) { 1316 rdev = 0; ··· 1390 } 1391 1392 /* 1393 * Now we join the directory inode to the transaction. We do not do it 1394 * earlier because xfs_dir_ialloc might commit the previous transaction 1395 * (and release all the locks). An error from here on will result in ··· 1440 */ 1441 xfs_qm_vop_create_dqattach(tp, ip, udqp, gdqp); 1442 1443 error = xfs_bmap_finish(&tp, &free_list, &committed); 1444 if (error) 1445 + goto out_bmap_cancel; 1446 1447 error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES); 1448 + if (error) 1449 + goto out_release_inode; 1450 1451 xfs_qm_dqrele(udqp); 1452 xfs_qm_dqrele(gdqp); ··· 1469 cancel_flags |= XFS_TRANS_ABORT; 1470 out_trans_cancel: 1471 xfs_trans_cancel(tp, cancel_flags); 1472 + out_release_inode: 1473 /* 1474 * Wait until after the current transaction is aborted to 1475 * release the inode. This prevents recursive transactions 1476 * and deadlocks from xfs_inactive. 1477 */ 1478 + if (ip) 1479 + IRELE(ip); 1480 + 1481 + xfs_qm_dqrele(udqp); 1482 + xfs_qm_dqrele(gdqp); 1483 + 1484 + if (unlock_dp_on_error) 1485 + xfs_iunlock(dp, XFS_ILOCK_EXCL); 1486 + return error; 1487 } 1488 1489 #ifdef DEBUG ··· 2114 XFS_BMAPI_WRITE | XFS_BMAPI_METADATA, 2115 &first_block, resblks, mval, &nmaps, 2116 &free_list); 2117 + if (error) 2118 + goto error2; 2119 2120 if (resblks) 2121 resblks -= fs_blocks; ··· 2148 error = xfs_dir_createname(tp, dp, link_name, ip->i_ino, 2149 &first_block, &free_list, resblks); 2150 if (error) 2151 + goto error2; 2152 xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); 2153 xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE); 2154 ··· 2160 if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC)) { 2161 xfs_trans_set_sync(tp); 2162 } 2163 2164 error = xfs_bmap_finish(&tp, &free_list, &committed); 2165 if (error) {