Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v3.1-rc4 1051 lines 30 kB view raw
1/* 2 * Copyright (c) 2000-2005 Silicon Graphics, Inc. 3 * All Rights Reserved. 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License as 7 * published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it would be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 17 */ 18#include "xfs.h" 19#include "xfs_fs.h" 20#include "xfs_types.h" 21#include "xfs_bit.h" 22#include "xfs_log.h" 23#include "xfs_inum.h" 24#include "xfs_trans.h" 25#include "xfs_sb.h" 26#include "xfs_ag.h" 27#include "xfs_mount.h" 28#include "xfs_buf_item.h" 29#include "xfs_trans_priv.h" 30#include "xfs_error.h" 31#include "xfs_trace.h" 32 33 34kmem_zone_t *xfs_buf_item_zone; 35 36static inline struct xfs_buf_log_item *BUF_ITEM(struct xfs_log_item *lip) 37{ 38 return container_of(lip, struct xfs_buf_log_item, bli_item); 39} 40 41 42#ifdef XFS_TRANS_DEBUG 43/* 44 * This function uses an alternate strategy for tracking the bytes 45 * that the user requests to be logged. This can then be used 46 * in conjunction with the bli_orig array in the buf log item to 47 * catch bugs in our callers' code. 48 * 49 * We also double check the bits set in xfs_buf_item_log using a 50 * simple algorithm to check that every byte is accounted for. 51 */ 52STATIC void 53xfs_buf_item_log_debug( 54 xfs_buf_log_item_t *bip, 55 uint first, 56 uint last) 57{ 58 uint x; 59 uint byte; 60 uint nbytes; 61 uint chunk_num; 62 uint word_num; 63 uint bit_num; 64 uint bit_set; 65 uint *wordp; 66 67 ASSERT(bip->bli_logged != NULL); 68 byte = first; 69 nbytes = last - first + 1; 70 bfset(bip->bli_logged, first, nbytes); 71 for (x = 0; x < nbytes; x++) { 72 chunk_num = byte >> XFS_BLF_SHIFT; 73 word_num = chunk_num >> BIT_TO_WORD_SHIFT; 74 bit_num = chunk_num & (NBWORD - 1); 75 wordp = &(bip->bli_format.blf_data_map[word_num]); 76 bit_set = *wordp & (1 << bit_num); 77 ASSERT(bit_set); 78 byte++; 79 } 80} 81 82/* 83 * This function is called when we flush something into a buffer without 84 * logging it. This happens for things like inodes which are logged 85 * separately from the buffer. 86 */ 87void 88xfs_buf_item_flush_log_debug( 89 xfs_buf_t *bp, 90 uint first, 91 uint last) 92{ 93 xfs_buf_log_item_t *bip = bp->b_fspriv; 94 uint nbytes; 95 96 if (bip == NULL || (bip->bli_item.li_type != XFS_LI_BUF)) 97 return; 98 99 ASSERT(bip->bli_logged != NULL); 100 nbytes = last - first + 1; 101 bfset(bip->bli_logged, first, nbytes); 102} 103 104/* 105 * This function is called to verify that our callers have logged 106 * all the bytes that they changed. 107 * 108 * It does this by comparing the original copy of the buffer stored in 109 * the buf log item's bli_orig array to the current copy of the buffer 110 * and ensuring that all bytes which mismatch are set in the bli_logged 111 * array of the buf log item. 112 */ 113STATIC void 114xfs_buf_item_log_check( 115 xfs_buf_log_item_t *bip) 116{ 117 char *orig; 118 char *buffer; 119 int x; 120 xfs_buf_t *bp; 121 122 ASSERT(bip->bli_orig != NULL); 123 ASSERT(bip->bli_logged != NULL); 124 125 bp = bip->bli_buf; 126 ASSERT(XFS_BUF_COUNT(bp) > 0); 127 ASSERT(bp->b_addr != NULL); 128 orig = bip->bli_orig; 129 buffer = bp->b_addr; 130 for (x = 0; x < XFS_BUF_COUNT(bp); x++) { 131 if (orig[x] != buffer[x] && !btst(bip->bli_logged, x)) { 132 xfs_emerg(bp->b_mount, 133 "%s: bip %x buffer %x orig %x index %d", 134 __func__, bip, bp, orig, x); 135 ASSERT(0); 136 } 137 } 138} 139#else 140#define xfs_buf_item_log_debug(x,y,z) 141#define xfs_buf_item_log_check(x) 142#endif 143 144STATIC void xfs_buf_do_callbacks(struct xfs_buf *bp); 145 146/* 147 * This returns the number of log iovecs needed to log the 148 * given buf log item. 149 * 150 * It calculates this as 1 iovec for the buf log format structure 151 * and 1 for each stretch of non-contiguous chunks to be logged. 152 * Contiguous chunks are logged in a single iovec. 153 * 154 * If the XFS_BLI_STALE flag has been set, then log nothing. 155 */ 156STATIC uint 157xfs_buf_item_size( 158 struct xfs_log_item *lip) 159{ 160 struct xfs_buf_log_item *bip = BUF_ITEM(lip); 161 struct xfs_buf *bp = bip->bli_buf; 162 uint nvecs; 163 int next_bit; 164 int last_bit; 165 166 ASSERT(atomic_read(&bip->bli_refcount) > 0); 167 if (bip->bli_flags & XFS_BLI_STALE) { 168 /* 169 * The buffer is stale, so all we need to log 170 * is the buf log format structure with the 171 * cancel flag in it. 172 */ 173 trace_xfs_buf_item_size_stale(bip); 174 ASSERT(bip->bli_format.blf_flags & XFS_BLF_CANCEL); 175 return 1; 176 } 177 178 ASSERT(bip->bli_flags & XFS_BLI_LOGGED); 179 nvecs = 1; 180 last_bit = xfs_next_bit(bip->bli_format.blf_data_map, 181 bip->bli_format.blf_map_size, 0); 182 ASSERT(last_bit != -1); 183 nvecs++; 184 while (last_bit != -1) { 185 /* 186 * This takes the bit number to start looking from and 187 * returns the next set bit from there. It returns -1 188 * if there are no more bits set or the start bit is 189 * beyond the end of the bitmap. 190 */ 191 next_bit = xfs_next_bit(bip->bli_format.blf_data_map, 192 bip->bli_format.blf_map_size, 193 last_bit + 1); 194 /* 195 * If we run out of bits, leave the loop, 196 * else if we find a new set of bits bump the number of vecs, 197 * else keep scanning the current set of bits. 198 */ 199 if (next_bit == -1) { 200 last_bit = -1; 201 } else if (next_bit != last_bit + 1) { 202 last_bit = next_bit; 203 nvecs++; 204 } else if (xfs_buf_offset(bp, next_bit * XFS_BLF_CHUNK) != 205 (xfs_buf_offset(bp, last_bit * XFS_BLF_CHUNK) + 206 XFS_BLF_CHUNK)) { 207 last_bit = next_bit; 208 nvecs++; 209 } else { 210 last_bit++; 211 } 212 } 213 214 trace_xfs_buf_item_size(bip); 215 return nvecs; 216} 217 218/* 219 * This is called to fill in the vector of log iovecs for the 220 * given log buf item. It fills the first entry with a buf log 221 * format structure, and the rest point to contiguous chunks 222 * within the buffer. 223 */ 224STATIC void 225xfs_buf_item_format( 226 struct xfs_log_item *lip, 227 struct xfs_log_iovec *vecp) 228{ 229 struct xfs_buf_log_item *bip = BUF_ITEM(lip); 230 struct xfs_buf *bp = bip->bli_buf; 231 uint base_size; 232 uint nvecs; 233 int first_bit; 234 int last_bit; 235 int next_bit; 236 uint nbits; 237 uint buffer_offset; 238 239 ASSERT(atomic_read(&bip->bli_refcount) > 0); 240 ASSERT((bip->bli_flags & XFS_BLI_LOGGED) || 241 (bip->bli_flags & XFS_BLI_STALE)); 242 243 /* 244 * The size of the base structure is the size of the 245 * declared structure plus the space for the extra words 246 * of the bitmap. We subtract one from the map size, because 247 * the first element of the bitmap is accounted for in the 248 * size of the base structure. 249 */ 250 base_size = 251 (uint)(sizeof(xfs_buf_log_format_t) + 252 ((bip->bli_format.blf_map_size - 1) * sizeof(uint))); 253 vecp->i_addr = &bip->bli_format; 254 vecp->i_len = base_size; 255 vecp->i_type = XLOG_REG_TYPE_BFORMAT; 256 vecp++; 257 nvecs = 1; 258 259 /* 260 * If it is an inode buffer, transfer the in-memory state to the 261 * format flags and clear the in-memory state. We do not transfer 262 * this state if the inode buffer allocation has not yet been committed 263 * to the log as setting the XFS_BLI_INODE_BUF flag will prevent 264 * correct replay of the inode allocation. 265 */ 266 if (bip->bli_flags & XFS_BLI_INODE_BUF) { 267 if (!((bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF) && 268 xfs_log_item_in_current_chkpt(lip))) 269 bip->bli_format.blf_flags |= XFS_BLF_INODE_BUF; 270 bip->bli_flags &= ~XFS_BLI_INODE_BUF; 271 } 272 273 if (bip->bli_flags & XFS_BLI_STALE) { 274 /* 275 * The buffer is stale, so all we need to log 276 * is the buf log format structure with the 277 * cancel flag in it. 278 */ 279 trace_xfs_buf_item_format_stale(bip); 280 ASSERT(bip->bli_format.blf_flags & XFS_BLF_CANCEL); 281 bip->bli_format.blf_size = nvecs; 282 return; 283 } 284 285 /* 286 * Fill in an iovec for each set of contiguous chunks. 287 */ 288 first_bit = xfs_next_bit(bip->bli_format.blf_data_map, 289 bip->bli_format.blf_map_size, 0); 290 ASSERT(first_bit != -1); 291 last_bit = first_bit; 292 nbits = 1; 293 for (;;) { 294 /* 295 * This takes the bit number to start looking from and 296 * returns the next set bit from there. It returns -1 297 * if there are no more bits set or the start bit is 298 * beyond the end of the bitmap. 299 */ 300 next_bit = xfs_next_bit(bip->bli_format.blf_data_map, 301 bip->bli_format.blf_map_size, 302 (uint)last_bit + 1); 303 /* 304 * If we run out of bits fill in the last iovec and get 305 * out of the loop. 306 * Else if we start a new set of bits then fill in the 307 * iovec for the series we were looking at and start 308 * counting the bits in the new one. 309 * Else we're still in the same set of bits so just 310 * keep counting and scanning. 311 */ 312 if (next_bit == -1) { 313 buffer_offset = first_bit * XFS_BLF_CHUNK; 314 vecp->i_addr = xfs_buf_offset(bp, buffer_offset); 315 vecp->i_len = nbits * XFS_BLF_CHUNK; 316 vecp->i_type = XLOG_REG_TYPE_BCHUNK; 317 nvecs++; 318 break; 319 } else if (next_bit != last_bit + 1) { 320 buffer_offset = first_bit * XFS_BLF_CHUNK; 321 vecp->i_addr = xfs_buf_offset(bp, buffer_offset); 322 vecp->i_len = nbits * XFS_BLF_CHUNK; 323 vecp->i_type = XLOG_REG_TYPE_BCHUNK; 324 nvecs++; 325 vecp++; 326 first_bit = next_bit; 327 last_bit = next_bit; 328 nbits = 1; 329 } else if (xfs_buf_offset(bp, next_bit << XFS_BLF_SHIFT) != 330 (xfs_buf_offset(bp, last_bit << XFS_BLF_SHIFT) + 331 XFS_BLF_CHUNK)) { 332 buffer_offset = first_bit * XFS_BLF_CHUNK; 333 vecp->i_addr = xfs_buf_offset(bp, buffer_offset); 334 vecp->i_len = nbits * XFS_BLF_CHUNK; 335 vecp->i_type = XLOG_REG_TYPE_BCHUNK; 336/* You would think we need to bump the nvecs here too, but we do not 337 * this number is used by recovery, and it gets confused by the boundary 338 * split here 339 * nvecs++; 340 */ 341 vecp++; 342 first_bit = next_bit; 343 last_bit = next_bit; 344 nbits = 1; 345 } else { 346 last_bit++; 347 nbits++; 348 } 349 } 350 bip->bli_format.blf_size = nvecs; 351 352 /* 353 * Check to make sure everything is consistent. 354 */ 355 trace_xfs_buf_item_format(bip); 356 xfs_buf_item_log_check(bip); 357} 358 359/* 360 * This is called to pin the buffer associated with the buf log item in memory 361 * so it cannot be written out. 362 * 363 * We also always take a reference to the buffer log item here so that the bli 364 * is held while the item is pinned in memory. This means that we can 365 * unconditionally drop the reference count a transaction holds when the 366 * transaction is completed. 367 */ 368STATIC void 369xfs_buf_item_pin( 370 struct xfs_log_item *lip) 371{ 372 struct xfs_buf_log_item *bip = BUF_ITEM(lip); 373 374 ASSERT(atomic_read(&bip->bli_refcount) > 0); 375 ASSERT((bip->bli_flags & XFS_BLI_LOGGED) || 376 (bip->bli_flags & XFS_BLI_STALE)); 377 378 trace_xfs_buf_item_pin(bip); 379 380 atomic_inc(&bip->bli_refcount); 381 atomic_inc(&bip->bli_buf->b_pin_count); 382} 383 384/* 385 * This is called to unpin the buffer associated with the buf log 386 * item which was previously pinned with a call to xfs_buf_item_pin(). 387 * 388 * Also drop the reference to the buf item for the current transaction. 389 * If the XFS_BLI_STALE flag is set and we are the last reference, 390 * then free up the buf log item and unlock the buffer. 391 * 392 * If the remove flag is set we are called from uncommit in the 393 * forced-shutdown path. If that is true and the reference count on 394 * the log item is going to drop to zero we need to free the item's 395 * descriptor in the transaction. 396 */ 397STATIC void 398xfs_buf_item_unpin( 399 struct xfs_log_item *lip, 400 int remove) 401{ 402 struct xfs_buf_log_item *bip = BUF_ITEM(lip); 403 xfs_buf_t *bp = bip->bli_buf; 404 struct xfs_ail *ailp = lip->li_ailp; 405 int stale = bip->bli_flags & XFS_BLI_STALE; 406 int freed; 407 408 ASSERT(bp->b_fspriv == bip); 409 ASSERT(atomic_read(&bip->bli_refcount) > 0); 410 411 trace_xfs_buf_item_unpin(bip); 412 413 freed = atomic_dec_and_test(&bip->bli_refcount); 414 415 if (atomic_dec_and_test(&bp->b_pin_count)) 416 wake_up_all(&bp->b_waiters); 417 418 if (freed && stale) { 419 ASSERT(bip->bli_flags & XFS_BLI_STALE); 420 ASSERT(xfs_buf_islocked(bp)); 421 ASSERT(!(XFS_BUF_ISDELAYWRITE(bp))); 422 ASSERT(XFS_BUF_ISSTALE(bp)); 423 ASSERT(bip->bli_format.blf_flags & XFS_BLF_CANCEL); 424 425 trace_xfs_buf_item_unpin_stale(bip); 426 427 if (remove) { 428 /* 429 * If we are in a transaction context, we have to 430 * remove the log item from the transaction as we are 431 * about to release our reference to the buffer. If we 432 * don't, the unlock that occurs later in 433 * xfs_trans_uncommit() will try to reference the 434 * buffer which we no longer have a hold on. 435 */ 436 if (lip->li_desc) 437 xfs_trans_del_item(lip); 438 439 /* 440 * Since the transaction no longer refers to the buffer, 441 * the buffer should no longer refer to the transaction. 442 */ 443 bp->b_transp = NULL; 444 } 445 446 /* 447 * If we get called here because of an IO error, we may 448 * or may not have the item on the AIL. xfs_trans_ail_delete() 449 * will take care of that situation. 450 * xfs_trans_ail_delete() drops the AIL lock. 451 */ 452 if (bip->bli_flags & XFS_BLI_STALE_INODE) { 453 xfs_buf_do_callbacks(bp); 454 bp->b_fspriv = NULL; 455 bp->b_iodone = NULL; 456 } else { 457 spin_lock(&ailp->xa_lock); 458 xfs_trans_ail_delete(ailp, (xfs_log_item_t *)bip); 459 xfs_buf_item_relse(bp); 460 ASSERT(bp->b_fspriv == NULL); 461 } 462 xfs_buf_relse(bp); 463 } 464} 465 466/* 467 * This is called to attempt to lock the buffer associated with this 468 * buf log item. Don't sleep on the buffer lock. If we can't get 469 * the lock right away, return 0. If we can get the lock, take a 470 * reference to the buffer. If this is a delayed write buffer that 471 * needs AIL help to be written back, invoke the pushbuf routine 472 * rather than the normal success path. 473 */ 474STATIC uint 475xfs_buf_item_trylock( 476 struct xfs_log_item *lip) 477{ 478 struct xfs_buf_log_item *bip = BUF_ITEM(lip); 479 struct xfs_buf *bp = bip->bli_buf; 480 481 if (xfs_buf_ispinned(bp)) 482 return XFS_ITEM_PINNED; 483 if (!xfs_buf_trylock(bp)) 484 return XFS_ITEM_LOCKED; 485 486 /* take a reference to the buffer. */ 487 xfs_buf_hold(bp); 488 489 ASSERT(!(bip->bli_flags & XFS_BLI_STALE)); 490 trace_xfs_buf_item_trylock(bip); 491 if (XFS_BUF_ISDELAYWRITE(bp)) 492 return XFS_ITEM_PUSHBUF; 493 return XFS_ITEM_SUCCESS; 494} 495 496/* 497 * Release the buffer associated with the buf log item. If there is no dirty 498 * logged data associated with the buffer recorded in the buf log item, then 499 * free the buf log item and remove the reference to it in the buffer. 500 * 501 * This call ignores the recursion count. It is only called when the buffer 502 * should REALLY be unlocked, regardless of the recursion count. 503 * 504 * We unconditionally drop the transaction's reference to the log item. If the 505 * item was logged, then another reference was taken when it was pinned, so we 506 * can safely drop the transaction reference now. This also allows us to avoid 507 * potential races with the unpin code freeing the bli by not referencing the 508 * bli after we've dropped the reference count. 509 * 510 * If the XFS_BLI_HOLD flag is set in the buf log item, then free the log item 511 * if necessary but do not unlock the buffer. This is for support of 512 * xfs_trans_bhold(). Make sure the XFS_BLI_HOLD field is cleared if we don't 513 * free the item. 514 */ 515STATIC void 516xfs_buf_item_unlock( 517 struct xfs_log_item *lip) 518{ 519 struct xfs_buf_log_item *bip = BUF_ITEM(lip); 520 struct xfs_buf *bp = bip->bli_buf; 521 int aborted; 522 uint hold; 523 524 /* Clear the buffer's association with this transaction. */ 525 bp->b_transp = NULL; 526 527 /* 528 * If this is a transaction abort, don't return early. Instead, allow 529 * the brelse to happen. Normally it would be done for stale 530 * (cancelled) buffers at unpin time, but we'll never go through the 531 * pin/unpin cycle if we abort inside commit. 532 */ 533 aborted = (lip->li_flags & XFS_LI_ABORTED) != 0; 534 535 /* 536 * Before possibly freeing the buf item, determine if we should 537 * release the buffer at the end of this routine. 538 */ 539 hold = bip->bli_flags & XFS_BLI_HOLD; 540 541 /* Clear the per transaction state. */ 542 bip->bli_flags &= ~(XFS_BLI_LOGGED | XFS_BLI_HOLD); 543 544 /* 545 * If the buf item is marked stale, then don't do anything. We'll 546 * unlock the buffer and free the buf item when the buffer is unpinned 547 * for the last time. 548 */ 549 if (bip->bli_flags & XFS_BLI_STALE) { 550 trace_xfs_buf_item_unlock_stale(bip); 551 ASSERT(bip->bli_format.blf_flags & XFS_BLF_CANCEL); 552 if (!aborted) { 553 atomic_dec(&bip->bli_refcount); 554 return; 555 } 556 } 557 558 trace_xfs_buf_item_unlock(bip); 559 560 /* 561 * If the buf item isn't tracking any data, free it, otherwise drop the 562 * reference we hold to it. 563 */ 564 if (xfs_bitmap_empty(bip->bli_format.blf_data_map, 565 bip->bli_format.blf_map_size)) 566 xfs_buf_item_relse(bp); 567 else 568 atomic_dec(&bip->bli_refcount); 569 570 if (!hold) 571 xfs_buf_relse(bp); 572} 573 574/* 575 * This is called to find out where the oldest active copy of the 576 * buf log item in the on disk log resides now that the last log 577 * write of it completed at the given lsn. 578 * We always re-log all the dirty data in a buffer, so usually the 579 * latest copy in the on disk log is the only one that matters. For 580 * those cases we simply return the given lsn. 581 * 582 * The one exception to this is for buffers full of newly allocated 583 * inodes. These buffers are only relogged with the XFS_BLI_INODE_BUF 584 * flag set, indicating that only the di_next_unlinked fields from the 585 * inodes in the buffers will be replayed during recovery. If the 586 * original newly allocated inode images have not yet been flushed 587 * when the buffer is so relogged, then we need to make sure that we 588 * keep the old images in the 'active' portion of the log. We do this 589 * by returning the original lsn of that transaction here rather than 590 * the current one. 591 */ 592STATIC xfs_lsn_t 593xfs_buf_item_committed( 594 struct xfs_log_item *lip, 595 xfs_lsn_t lsn) 596{ 597 struct xfs_buf_log_item *bip = BUF_ITEM(lip); 598 599 trace_xfs_buf_item_committed(bip); 600 601 if ((bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF) && lip->li_lsn != 0) 602 return lip->li_lsn; 603 return lsn; 604} 605 606/* 607 * The buffer is locked, but is not a delayed write buffer. This happens 608 * if we race with IO completion and hence we don't want to try to write it 609 * again. Just release the buffer. 610 */ 611STATIC void 612xfs_buf_item_push( 613 struct xfs_log_item *lip) 614{ 615 struct xfs_buf_log_item *bip = BUF_ITEM(lip); 616 struct xfs_buf *bp = bip->bli_buf; 617 618 ASSERT(!(bip->bli_flags & XFS_BLI_STALE)); 619 ASSERT(!XFS_BUF_ISDELAYWRITE(bp)); 620 621 trace_xfs_buf_item_push(bip); 622 623 xfs_buf_relse(bp); 624} 625 626/* 627 * The buffer is locked and is a delayed write buffer. Promote the buffer 628 * in the delayed write queue as the caller knows that they must invoke 629 * the xfsbufd to get this buffer written. We have to unlock the buffer 630 * to allow the xfsbufd to write it, too. 631 */ 632STATIC void 633xfs_buf_item_pushbuf( 634 struct xfs_log_item *lip) 635{ 636 struct xfs_buf_log_item *bip = BUF_ITEM(lip); 637 struct xfs_buf *bp = bip->bli_buf; 638 639 ASSERT(!(bip->bli_flags & XFS_BLI_STALE)); 640 ASSERT(XFS_BUF_ISDELAYWRITE(bp)); 641 642 trace_xfs_buf_item_pushbuf(bip); 643 644 xfs_buf_delwri_promote(bp); 645 xfs_buf_relse(bp); 646} 647 648STATIC void 649xfs_buf_item_committing( 650 struct xfs_log_item *lip, 651 xfs_lsn_t commit_lsn) 652{ 653} 654 655/* 656 * This is the ops vector shared by all buf log items. 657 */ 658static struct xfs_item_ops xfs_buf_item_ops = { 659 .iop_size = xfs_buf_item_size, 660 .iop_format = xfs_buf_item_format, 661 .iop_pin = xfs_buf_item_pin, 662 .iop_unpin = xfs_buf_item_unpin, 663 .iop_trylock = xfs_buf_item_trylock, 664 .iop_unlock = xfs_buf_item_unlock, 665 .iop_committed = xfs_buf_item_committed, 666 .iop_push = xfs_buf_item_push, 667 .iop_pushbuf = xfs_buf_item_pushbuf, 668 .iop_committing = xfs_buf_item_committing 669}; 670 671 672/* 673 * Allocate a new buf log item to go with the given buffer. 674 * Set the buffer's b_fsprivate field to point to the new 675 * buf log item. If there are other item's attached to the 676 * buffer (see xfs_buf_attach_iodone() below), then put the 677 * buf log item at the front. 678 */ 679void 680xfs_buf_item_init( 681 xfs_buf_t *bp, 682 xfs_mount_t *mp) 683{ 684 xfs_log_item_t *lip = bp->b_fspriv; 685 xfs_buf_log_item_t *bip; 686 int chunks; 687 int map_size; 688 689 /* 690 * Check to see if there is already a buf log item for 691 * this buffer. If there is, it is guaranteed to be 692 * the first. If we do already have one, there is 693 * nothing to do here so return. 694 */ 695 ASSERT(bp->b_target->bt_mount == mp); 696 if (lip != NULL && lip->li_type == XFS_LI_BUF) 697 return; 698 699 /* 700 * chunks is the number of XFS_BLF_CHUNK size pieces 701 * the buffer can be divided into. Make sure not to 702 * truncate any pieces. map_size is the size of the 703 * bitmap needed to describe the chunks of the buffer. 704 */ 705 chunks = (int)((XFS_BUF_COUNT(bp) + (XFS_BLF_CHUNK - 1)) >> XFS_BLF_SHIFT); 706 map_size = (int)((chunks + NBWORD) >> BIT_TO_WORD_SHIFT); 707 708 bip = (xfs_buf_log_item_t*)kmem_zone_zalloc(xfs_buf_item_zone, 709 KM_SLEEP); 710 xfs_log_item_init(mp, &bip->bli_item, XFS_LI_BUF, &xfs_buf_item_ops); 711 bip->bli_buf = bp; 712 xfs_buf_hold(bp); 713 bip->bli_format.blf_type = XFS_LI_BUF; 714 bip->bli_format.blf_blkno = (__int64_t)XFS_BUF_ADDR(bp); 715 bip->bli_format.blf_len = (ushort)BTOBB(XFS_BUF_COUNT(bp)); 716 bip->bli_format.blf_map_size = map_size; 717 718#ifdef XFS_TRANS_DEBUG 719 /* 720 * Allocate the arrays for tracking what needs to be logged 721 * and what our callers request to be logged. bli_orig 722 * holds a copy of the original, clean buffer for comparison 723 * against, and bli_logged keeps a 1 bit flag per byte in 724 * the buffer to indicate which bytes the callers have asked 725 * to have logged. 726 */ 727 bip->bli_orig = (char *)kmem_alloc(XFS_BUF_COUNT(bp), KM_SLEEP); 728 memcpy(bip->bli_orig, bp->b_addr, XFS_BUF_COUNT(bp)); 729 bip->bli_logged = (char *)kmem_zalloc(XFS_BUF_COUNT(bp) / NBBY, KM_SLEEP); 730#endif 731 732 /* 733 * Put the buf item into the list of items attached to the 734 * buffer at the front. 735 */ 736 if (bp->b_fspriv) 737 bip->bli_item.li_bio_list = bp->b_fspriv; 738 bp->b_fspriv = bip; 739} 740 741 742/* 743 * Mark bytes first through last inclusive as dirty in the buf 744 * item's bitmap. 745 */ 746void 747xfs_buf_item_log( 748 xfs_buf_log_item_t *bip, 749 uint first, 750 uint last) 751{ 752 uint first_bit; 753 uint last_bit; 754 uint bits_to_set; 755 uint bits_set; 756 uint word_num; 757 uint *wordp; 758 uint bit; 759 uint end_bit; 760 uint mask; 761 762 /* 763 * Mark the item as having some dirty data for 764 * quick reference in xfs_buf_item_dirty. 765 */ 766 bip->bli_flags |= XFS_BLI_DIRTY; 767 768 /* 769 * Convert byte offsets to bit numbers. 770 */ 771 first_bit = first >> XFS_BLF_SHIFT; 772 last_bit = last >> XFS_BLF_SHIFT; 773 774 /* 775 * Calculate the total number of bits to be set. 776 */ 777 bits_to_set = last_bit - first_bit + 1; 778 779 /* 780 * Get a pointer to the first word in the bitmap 781 * to set a bit in. 782 */ 783 word_num = first_bit >> BIT_TO_WORD_SHIFT; 784 wordp = &(bip->bli_format.blf_data_map[word_num]); 785 786 /* 787 * Calculate the starting bit in the first word. 788 */ 789 bit = first_bit & (uint)(NBWORD - 1); 790 791 /* 792 * First set any bits in the first word of our range. 793 * If it starts at bit 0 of the word, it will be 794 * set below rather than here. That is what the variable 795 * bit tells us. The variable bits_set tracks the number 796 * of bits that have been set so far. End_bit is the number 797 * of the last bit to be set in this word plus one. 798 */ 799 if (bit) { 800 end_bit = MIN(bit + bits_to_set, (uint)NBWORD); 801 mask = ((1 << (end_bit - bit)) - 1) << bit; 802 *wordp |= mask; 803 wordp++; 804 bits_set = end_bit - bit; 805 } else { 806 bits_set = 0; 807 } 808 809 /* 810 * Now set bits a whole word at a time that are between 811 * first_bit and last_bit. 812 */ 813 while ((bits_to_set - bits_set) >= NBWORD) { 814 *wordp |= 0xffffffff; 815 bits_set += NBWORD; 816 wordp++; 817 } 818 819 /* 820 * Finally, set any bits left to be set in one last partial word. 821 */ 822 end_bit = bits_to_set - bits_set; 823 if (end_bit) { 824 mask = (1 << end_bit) - 1; 825 *wordp |= mask; 826 } 827 828 xfs_buf_item_log_debug(bip, first, last); 829} 830 831 832/* 833 * Return 1 if the buffer has some data that has been logged (at any 834 * point, not just the current transaction) and 0 if not. 835 */ 836uint 837xfs_buf_item_dirty( 838 xfs_buf_log_item_t *bip) 839{ 840 return (bip->bli_flags & XFS_BLI_DIRTY); 841} 842 843STATIC void 844xfs_buf_item_free( 845 xfs_buf_log_item_t *bip) 846{ 847#ifdef XFS_TRANS_DEBUG 848 kmem_free(bip->bli_orig); 849 kmem_free(bip->bli_logged); 850#endif /* XFS_TRANS_DEBUG */ 851 852 kmem_zone_free(xfs_buf_item_zone, bip); 853} 854 855/* 856 * This is called when the buf log item is no longer needed. It should 857 * free the buf log item associated with the given buffer and clear 858 * the buffer's pointer to the buf log item. If there are no more 859 * items in the list, clear the b_iodone field of the buffer (see 860 * xfs_buf_attach_iodone() below). 861 */ 862void 863xfs_buf_item_relse( 864 xfs_buf_t *bp) 865{ 866 xfs_buf_log_item_t *bip; 867 868 trace_xfs_buf_item_relse(bp, _RET_IP_); 869 870 bip = bp->b_fspriv; 871 bp->b_fspriv = bip->bli_item.li_bio_list; 872 if (bp->b_fspriv == NULL) 873 bp->b_iodone = NULL; 874 875 xfs_buf_rele(bp); 876 xfs_buf_item_free(bip); 877} 878 879 880/* 881 * Add the given log item with its callback to the list of callbacks 882 * to be called when the buffer's I/O completes. If it is not set 883 * already, set the buffer's b_iodone() routine to be 884 * xfs_buf_iodone_callbacks() and link the log item into the list of 885 * items rooted at b_fsprivate. Items are always added as the second 886 * entry in the list if there is a first, because the buf item code 887 * assumes that the buf log item is first. 888 */ 889void 890xfs_buf_attach_iodone( 891 xfs_buf_t *bp, 892 void (*cb)(xfs_buf_t *, xfs_log_item_t *), 893 xfs_log_item_t *lip) 894{ 895 xfs_log_item_t *head_lip; 896 897 ASSERT(xfs_buf_islocked(bp)); 898 899 lip->li_cb = cb; 900 head_lip = bp->b_fspriv; 901 if (head_lip) { 902 lip->li_bio_list = head_lip->li_bio_list; 903 head_lip->li_bio_list = lip; 904 } else { 905 bp->b_fspriv = lip; 906 } 907 908 ASSERT(bp->b_iodone == NULL || 909 bp->b_iodone == xfs_buf_iodone_callbacks); 910 bp->b_iodone = xfs_buf_iodone_callbacks; 911} 912 913/* 914 * We can have many callbacks on a buffer. Running the callbacks individually 915 * can cause a lot of contention on the AIL lock, so we allow for a single 916 * callback to be able to scan the remaining lip->li_bio_list for other items 917 * of the same type and callback to be processed in the first call. 918 * 919 * As a result, the loop walking the callback list below will also modify the 920 * list. it removes the first item from the list and then runs the callback. 921 * The loop then restarts from the new head of the list. This allows the 922 * callback to scan and modify the list attached to the buffer and we don't 923 * have to care about maintaining a next item pointer. 924 */ 925STATIC void 926xfs_buf_do_callbacks( 927 struct xfs_buf *bp) 928{ 929 struct xfs_log_item *lip; 930 931 while ((lip = bp->b_fspriv) != NULL) { 932 bp->b_fspriv = lip->li_bio_list; 933 ASSERT(lip->li_cb != NULL); 934 /* 935 * Clear the next pointer so we don't have any 936 * confusion if the item is added to another buf. 937 * Don't touch the log item after calling its 938 * callback, because it could have freed itself. 939 */ 940 lip->li_bio_list = NULL; 941 lip->li_cb(bp, lip); 942 } 943} 944 945/* 946 * This is the iodone() function for buffers which have had callbacks 947 * attached to them by xfs_buf_attach_iodone(). It should remove each 948 * log item from the buffer's list and call the callback of each in turn. 949 * When done, the buffer's fsprivate field is set to NULL and the buffer 950 * is unlocked with a call to iodone(). 951 */ 952void 953xfs_buf_iodone_callbacks( 954 struct xfs_buf *bp) 955{ 956 struct xfs_log_item *lip = bp->b_fspriv; 957 struct xfs_mount *mp = lip->li_mountp; 958 static ulong lasttime; 959 static xfs_buftarg_t *lasttarg; 960 961 if (likely(!xfs_buf_geterror(bp))) 962 goto do_callbacks; 963 964 /* 965 * If we've already decided to shutdown the filesystem because of 966 * I/O errors, there's no point in giving this a retry. 967 */ 968 if (XFS_FORCED_SHUTDOWN(mp)) { 969 XFS_BUF_SUPER_STALE(bp); 970 trace_xfs_buf_item_iodone(bp, _RET_IP_); 971 goto do_callbacks; 972 } 973 974 if (bp->b_target != lasttarg || 975 time_after(jiffies, (lasttime + 5*HZ))) { 976 lasttime = jiffies; 977 xfs_alert(mp, "Device %s: metadata write error block 0x%llx", 978 xfs_buf_target_name(bp->b_target), 979 (__uint64_t)XFS_BUF_ADDR(bp)); 980 } 981 lasttarg = bp->b_target; 982 983 /* 984 * If the write was asynchronous then no one will be looking for the 985 * error. Clear the error state and write the buffer out again. 986 * 987 * During sync or umount we'll write all pending buffers again 988 * synchronous, which will catch these errors if they keep hanging 989 * around. 990 */ 991 if (XFS_BUF_ISASYNC(bp)) { 992 xfs_buf_ioerror(bp, 0); /* errno of 0 unsets the flag */ 993 994 if (!XFS_BUF_ISSTALE(bp)) { 995 XFS_BUF_DELAYWRITE(bp); 996 XFS_BUF_DONE(bp); 997 } 998 ASSERT(bp->b_iodone != NULL); 999 trace_xfs_buf_item_iodone_async(bp, _RET_IP_); 1000 xfs_buf_relse(bp); 1001 return; 1002 } 1003 1004 /* 1005 * If the write of the buffer was synchronous, we want to make 1006 * sure to return the error to the caller of xfs_bwrite(). 1007 */ 1008 XFS_BUF_STALE(bp); 1009 XFS_BUF_DONE(bp); 1010 XFS_BUF_UNDELAYWRITE(bp); 1011 1012 trace_xfs_buf_error_relse(bp, _RET_IP_); 1013 1014do_callbacks: 1015 xfs_buf_do_callbacks(bp); 1016 bp->b_fspriv = NULL; 1017 bp->b_iodone = NULL; 1018 xfs_buf_ioend(bp, 0); 1019} 1020 1021/* 1022 * This is the iodone() function for buffers which have been 1023 * logged. It is called when they are eventually flushed out. 1024 * It should remove the buf item from the AIL, and free the buf item. 1025 * It is called by xfs_buf_iodone_callbacks() above which will take 1026 * care of cleaning up the buffer itself. 1027 */ 1028void 1029xfs_buf_iodone( 1030 struct xfs_buf *bp, 1031 struct xfs_log_item *lip) 1032{ 1033 struct xfs_ail *ailp = lip->li_ailp; 1034 1035 ASSERT(BUF_ITEM(lip)->bli_buf == bp); 1036 1037 xfs_buf_rele(bp); 1038 1039 /* 1040 * If we are forcibly shutting down, this may well be 1041 * off the AIL already. That's because we simulate the 1042 * log-committed callbacks to unpin these buffers. Or we may never 1043 * have put this item on AIL because of the transaction was 1044 * aborted forcibly. xfs_trans_ail_delete() takes care of these. 1045 * 1046 * Either way, AIL is useless if we're forcing a shutdown. 1047 */ 1048 spin_lock(&ailp->xa_lock); 1049 xfs_trans_ail_delete(ailp, lip); 1050 xfs_buf_item_free(BUF_ITEM(lip)); 1051}