Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v3.1-rc8 2199 lines 66 kB view raw
1/* 2 * linux/fs/jbd/transaction.c 3 * 4 * Written by Stephen C. Tweedie <sct@redhat.com>, 1998 5 * 6 * Copyright 1998 Red Hat corp --- All Rights Reserved 7 * 8 * This file is part of the Linux kernel and is made available under 9 * the terms of the GNU General Public License, version 2, or at your 10 * option, any later version, incorporated herein by reference. 11 * 12 * Generic filesystem transaction handling code; part of the ext2fs 13 * journaling system. 14 * 15 * This file manages transactions (compound commits managed by the 16 * journaling code) and handles (individual atomic operations by the 17 * filesystem). 18 */ 19 20#include <linux/time.h> 21#include <linux/fs.h> 22#include <linux/jbd.h> 23#include <linux/errno.h> 24#include <linux/slab.h> 25#include <linux/timer.h> 26#include <linux/mm.h> 27#include <linux/highmem.h> 28#include <linux/hrtimer.h> 29#include <linux/backing-dev.h> 30 31static void __journal_temp_unlink_buffer(struct journal_head *jh); 32 33/* 34 * get_transaction: obtain a new transaction_t object. 35 * 36 * Simply allocate and initialise a new transaction. Create it in 37 * RUNNING state and add it to the current journal (which should not 38 * have an existing running transaction: we only make a new transaction 39 * once we have started to commit the old one). 40 * 41 * Preconditions: 42 * The journal MUST be locked. We don't perform atomic mallocs on the 43 * new transaction and we can't block without protecting against other 44 * processes trying to touch the journal while it is in transition. 45 * 46 * Called under j_state_lock 47 */ 48 49static transaction_t * 50get_transaction(journal_t *journal, transaction_t *transaction) 51{ 52 transaction->t_journal = journal; 53 transaction->t_state = T_RUNNING; 54 transaction->t_start_time = ktime_get(); 55 transaction->t_tid = journal->j_transaction_sequence++; 56 transaction->t_expires = jiffies + journal->j_commit_interval; 57 spin_lock_init(&transaction->t_handle_lock); 58 59 /* Set up the commit timer for the new transaction. */ 60 journal->j_commit_timer.expires = 61 round_jiffies_up(transaction->t_expires); 62 add_timer(&journal->j_commit_timer); 63 64 J_ASSERT(journal->j_running_transaction == NULL); 65 journal->j_running_transaction = transaction; 66 67 return transaction; 68} 69 70/* 71 * Handle management. 72 * 73 * A handle_t is an object which represents a single atomic update to a 74 * filesystem, and which tracks all of the modifications which form part 75 * of that one update. 76 */ 77 78/* 79 * start_this_handle: Given a handle, deal with any locking or stalling 80 * needed to make sure that there is enough journal space for the handle 81 * to begin. Attach the handle to a transaction and set up the 82 * transaction's buffer credits. 83 */ 84 85static int start_this_handle(journal_t *journal, handle_t *handle) 86{ 87 transaction_t *transaction; 88 int needed; 89 int nblocks = handle->h_buffer_credits; 90 transaction_t *new_transaction = NULL; 91 int ret = 0; 92 93 if (nblocks > journal->j_max_transaction_buffers) { 94 printk(KERN_ERR "JBD: %s wants too many credits (%d > %d)\n", 95 current->comm, nblocks, 96 journal->j_max_transaction_buffers); 97 ret = -ENOSPC; 98 goto out; 99 } 100 101alloc_transaction: 102 if (!journal->j_running_transaction) { 103 new_transaction = kzalloc(sizeof(*new_transaction), GFP_NOFS); 104 if (!new_transaction) { 105 congestion_wait(BLK_RW_ASYNC, HZ/50); 106 goto alloc_transaction; 107 } 108 } 109 110 jbd_debug(3, "New handle %p going live.\n", handle); 111 112repeat: 113 114 /* 115 * We need to hold j_state_lock until t_updates has been incremented, 116 * for proper journal barrier handling 117 */ 118 spin_lock(&journal->j_state_lock); 119repeat_locked: 120 if (is_journal_aborted(journal) || 121 (journal->j_errno != 0 && !(journal->j_flags & JFS_ACK_ERR))) { 122 spin_unlock(&journal->j_state_lock); 123 ret = -EROFS; 124 goto out; 125 } 126 127 /* Wait on the journal's transaction barrier if necessary */ 128 if (journal->j_barrier_count) { 129 spin_unlock(&journal->j_state_lock); 130 wait_event(journal->j_wait_transaction_locked, 131 journal->j_barrier_count == 0); 132 goto repeat; 133 } 134 135 if (!journal->j_running_transaction) { 136 if (!new_transaction) { 137 spin_unlock(&journal->j_state_lock); 138 goto alloc_transaction; 139 } 140 get_transaction(journal, new_transaction); 141 new_transaction = NULL; 142 } 143 144 transaction = journal->j_running_transaction; 145 146 /* 147 * If the current transaction is locked down for commit, wait for the 148 * lock to be released. 149 */ 150 if (transaction->t_state == T_LOCKED) { 151 DEFINE_WAIT(wait); 152 153 prepare_to_wait(&journal->j_wait_transaction_locked, 154 &wait, TASK_UNINTERRUPTIBLE); 155 spin_unlock(&journal->j_state_lock); 156 schedule(); 157 finish_wait(&journal->j_wait_transaction_locked, &wait); 158 goto repeat; 159 } 160 161 /* 162 * If there is not enough space left in the log to write all potential 163 * buffers requested by this operation, we need to stall pending a log 164 * checkpoint to free some more log space. 165 */ 166 spin_lock(&transaction->t_handle_lock); 167 needed = transaction->t_outstanding_credits + nblocks; 168 169 if (needed > journal->j_max_transaction_buffers) { 170 /* 171 * If the current transaction is already too large, then start 172 * to commit it: we can then go back and attach this handle to 173 * a new transaction. 174 */ 175 DEFINE_WAIT(wait); 176 177 jbd_debug(2, "Handle %p starting new commit...\n", handle); 178 spin_unlock(&transaction->t_handle_lock); 179 prepare_to_wait(&journal->j_wait_transaction_locked, &wait, 180 TASK_UNINTERRUPTIBLE); 181 __log_start_commit(journal, transaction->t_tid); 182 spin_unlock(&journal->j_state_lock); 183 schedule(); 184 finish_wait(&journal->j_wait_transaction_locked, &wait); 185 goto repeat; 186 } 187 188 /* 189 * The commit code assumes that it can get enough log space 190 * without forcing a checkpoint. This is *critical* for 191 * correctness: a checkpoint of a buffer which is also 192 * associated with a committing transaction creates a deadlock, 193 * so commit simply cannot force through checkpoints. 194 * 195 * We must therefore ensure the necessary space in the journal 196 * *before* starting to dirty potentially checkpointed buffers 197 * in the new transaction. 198 * 199 * The worst part is, any transaction currently committing can 200 * reduce the free space arbitrarily. Be careful to account for 201 * those buffers when checkpointing. 202 */ 203 204 /* 205 * @@@ AKPM: This seems rather over-defensive. We're giving commit 206 * a _lot_ of headroom: 1/4 of the journal plus the size of 207 * the committing transaction. Really, we only need to give it 208 * committing_transaction->t_outstanding_credits plus "enough" for 209 * the log control blocks. 210 * Also, this test is inconsistent with the matching one in 211 * journal_extend(). 212 */ 213 if (__log_space_left(journal) < jbd_space_needed(journal)) { 214 jbd_debug(2, "Handle %p waiting for checkpoint...\n", handle); 215 spin_unlock(&transaction->t_handle_lock); 216 __log_wait_for_space(journal); 217 goto repeat_locked; 218 } 219 220 /* OK, account for the buffers that this operation expects to 221 * use and add the handle to the running transaction. */ 222 223 handle->h_transaction = transaction; 224 transaction->t_outstanding_credits += nblocks; 225 transaction->t_updates++; 226 transaction->t_handle_count++; 227 jbd_debug(4, "Handle %p given %d credits (total %d, free %d)\n", 228 handle, nblocks, transaction->t_outstanding_credits, 229 __log_space_left(journal)); 230 spin_unlock(&transaction->t_handle_lock); 231 spin_unlock(&journal->j_state_lock); 232 233 lock_map_acquire(&handle->h_lockdep_map); 234out: 235 if (unlikely(new_transaction)) /* It's usually NULL */ 236 kfree(new_transaction); 237 return ret; 238} 239 240static struct lock_class_key jbd_handle_key; 241 242/* Allocate a new handle. This should probably be in a slab... */ 243static handle_t *new_handle(int nblocks) 244{ 245 handle_t *handle = jbd_alloc_handle(GFP_NOFS); 246 if (!handle) 247 return NULL; 248 memset(handle, 0, sizeof(*handle)); 249 handle->h_buffer_credits = nblocks; 250 handle->h_ref = 1; 251 252 lockdep_init_map(&handle->h_lockdep_map, "jbd_handle", &jbd_handle_key, 0); 253 254 return handle; 255} 256 257/** 258 * handle_t *journal_start() - Obtain a new handle. 259 * @journal: Journal to start transaction on. 260 * @nblocks: number of block buffer we might modify 261 * 262 * We make sure that the transaction can guarantee at least nblocks of 263 * modified buffers in the log. We block until the log can guarantee 264 * that much space. 265 * 266 * This function is visible to journal users (like ext3fs), so is not 267 * called with the journal already locked. 268 * 269 * Return a pointer to a newly allocated handle, or an ERR_PTR() value 270 * on failure. 271 */ 272handle_t *journal_start(journal_t *journal, int nblocks) 273{ 274 handle_t *handle = journal_current_handle(); 275 int err; 276 277 if (!journal) 278 return ERR_PTR(-EROFS); 279 280 if (handle) { 281 J_ASSERT(handle->h_transaction->t_journal == journal); 282 handle->h_ref++; 283 return handle; 284 } 285 286 handle = new_handle(nblocks); 287 if (!handle) 288 return ERR_PTR(-ENOMEM); 289 290 current->journal_info = handle; 291 292 err = start_this_handle(journal, handle); 293 if (err < 0) { 294 jbd_free_handle(handle); 295 current->journal_info = NULL; 296 handle = ERR_PTR(err); 297 } 298 return handle; 299} 300 301/** 302 * int journal_extend() - extend buffer credits. 303 * @handle: handle to 'extend' 304 * @nblocks: nr blocks to try to extend by. 305 * 306 * Some transactions, such as large extends and truncates, can be done 307 * atomically all at once or in several stages. The operation requests 308 * a credit for a number of buffer modications in advance, but can 309 * extend its credit if it needs more. 310 * 311 * journal_extend tries to give the running handle more buffer credits. 312 * It does not guarantee that allocation - this is a best-effort only. 313 * The calling process MUST be able to deal cleanly with a failure to 314 * extend here. 315 * 316 * Return 0 on success, non-zero on failure. 317 * 318 * return code < 0 implies an error 319 * return code > 0 implies normal transaction-full status. 320 */ 321int journal_extend(handle_t *handle, int nblocks) 322{ 323 transaction_t *transaction = handle->h_transaction; 324 journal_t *journal = transaction->t_journal; 325 int result; 326 int wanted; 327 328 result = -EIO; 329 if (is_handle_aborted(handle)) 330 goto out; 331 332 result = 1; 333 334 spin_lock(&journal->j_state_lock); 335 336 /* Don't extend a locked-down transaction! */ 337 if (handle->h_transaction->t_state != T_RUNNING) { 338 jbd_debug(3, "denied handle %p %d blocks: " 339 "transaction not running\n", handle, nblocks); 340 goto error_out; 341 } 342 343 spin_lock(&transaction->t_handle_lock); 344 wanted = transaction->t_outstanding_credits + nblocks; 345 346 if (wanted > journal->j_max_transaction_buffers) { 347 jbd_debug(3, "denied handle %p %d blocks: " 348 "transaction too large\n", handle, nblocks); 349 goto unlock; 350 } 351 352 if (wanted > __log_space_left(journal)) { 353 jbd_debug(3, "denied handle %p %d blocks: " 354 "insufficient log space\n", handle, nblocks); 355 goto unlock; 356 } 357 358 handle->h_buffer_credits += nblocks; 359 transaction->t_outstanding_credits += nblocks; 360 result = 0; 361 362 jbd_debug(3, "extended handle %p by %d\n", handle, nblocks); 363unlock: 364 spin_unlock(&transaction->t_handle_lock); 365error_out: 366 spin_unlock(&journal->j_state_lock); 367out: 368 return result; 369} 370 371 372/** 373 * int journal_restart() - restart a handle. 374 * @handle: handle to restart 375 * @nblocks: nr credits requested 376 * 377 * Restart a handle for a multi-transaction filesystem 378 * operation. 379 * 380 * If the journal_extend() call above fails to grant new buffer credits 381 * to a running handle, a call to journal_restart will commit the 382 * handle's transaction so far and reattach the handle to a new 383 * transaction capabable of guaranteeing the requested number of 384 * credits. 385 */ 386 387int journal_restart(handle_t *handle, int nblocks) 388{ 389 transaction_t *transaction = handle->h_transaction; 390 journal_t *journal = transaction->t_journal; 391 int ret; 392 393 /* If we've had an abort of any type, don't even think about 394 * actually doing the restart! */ 395 if (is_handle_aborted(handle)) 396 return 0; 397 398 /* 399 * First unlink the handle from its current transaction, and start the 400 * commit on that. 401 */ 402 J_ASSERT(transaction->t_updates > 0); 403 J_ASSERT(journal_current_handle() == handle); 404 405 spin_lock(&journal->j_state_lock); 406 spin_lock(&transaction->t_handle_lock); 407 transaction->t_outstanding_credits -= handle->h_buffer_credits; 408 transaction->t_updates--; 409 410 if (!transaction->t_updates) 411 wake_up(&journal->j_wait_updates); 412 spin_unlock(&transaction->t_handle_lock); 413 414 jbd_debug(2, "restarting handle %p\n", handle); 415 __log_start_commit(journal, transaction->t_tid); 416 spin_unlock(&journal->j_state_lock); 417 418 lock_map_release(&handle->h_lockdep_map); 419 handle->h_buffer_credits = nblocks; 420 ret = start_this_handle(journal, handle); 421 return ret; 422} 423 424 425/** 426 * void journal_lock_updates () - establish a transaction barrier. 427 * @journal: Journal to establish a barrier on. 428 * 429 * This locks out any further updates from being started, and blocks 430 * until all existing updates have completed, returning only once the 431 * journal is in a quiescent state with no updates running. 432 * 433 * The journal lock should not be held on entry. 434 */ 435void journal_lock_updates(journal_t *journal) 436{ 437 DEFINE_WAIT(wait); 438 439 spin_lock(&journal->j_state_lock); 440 ++journal->j_barrier_count; 441 442 /* Wait until there are no running updates */ 443 while (1) { 444 transaction_t *transaction = journal->j_running_transaction; 445 446 if (!transaction) 447 break; 448 449 spin_lock(&transaction->t_handle_lock); 450 if (!transaction->t_updates) { 451 spin_unlock(&transaction->t_handle_lock); 452 break; 453 } 454 prepare_to_wait(&journal->j_wait_updates, &wait, 455 TASK_UNINTERRUPTIBLE); 456 spin_unlock(&transaction->t_handle_lock); 457 spin_unlock(&journal->j_state_lock); 458 schedule(); 459 finish_wait(&journal->j_wait_updates, &wait); 460 spin_lock(&journal->j_state_lock); 461 } 462 spin_unlock(&journal->j_state_lock); 463 464 /* 465 * We have now established a barrier against other normal updates, but 466 * we also need to barrier against other journal_lock_updates() calls 467 * to make sure that we serialise special journal-locked operations 468 * too. 469 */ 470 mutex_lock(&journal->j_barrier); 471} 472 473/** 474 * void journal_unlock_updates (journal_t* journal) - release barrier 475 * @journal: Journal to release the barrier on. 476 * 477 * Release a transaction barrier obtained with journal_lock_updates(). 478 * 479 * Should be called without the journal lock held. 480 */ 481void journal_unlock_updates (journal_t *journal) 482{ 483 J_ASSERT(journal->j_barrier_count != 0); 484 485 mutex_unlock(&journal->j_barrier); 486 spin_lock(&journal->j_state_lock); 487 --journal->j_barrier_count; 488 spin_unlock(&journal->j_state_lock); 489 wake_up(&journal->j_wait_transaction_locked); 490} 491 492static void warn_dirty_buffer(struct buffer_head *bh) 493{ 494 char b[BDEVNAME_SIZE]; 495 496 printk(KERN_WARNING 497 "JBD: Spotted dirty metadata buffer (dev = %s, blocknr = %llu). " 498 "There's a risk of filesystem corruption in case of system " 499 "crash.\n", 500 bdevname(bh->b_bdev, b), (unsigned long long)bh->b_blocknr); 501} 502 503/* 504 * If the buffer is already part of the current transaction, then there 505 * is nothing we need to do. If it is already part of a prior 506 * transaction which we are still committing to disk, then we need to 507 * make sure that we do not overwrite the old copy: we do copy-out to 508 * preserve the copy going to disk. We also account the buffer against 509 * the handle's metadata buffer credits (unless the buffer is already 510 * part of the transaction, that is). 511 * 512 */ 513static int 514do_get_write_access(handle_t *handle, struct journal_head *jh, 515 int force_copy) 516{ 517 struct buffer_head *bh; 518 transaction_t *transaction; 519 journal_t *journal; 520 int error; 521 char *frozen_buffer = NULL; 522 int need_copy = 0; 523 524 if (is_handle_aborted(handle)) 525 return -EROFS; 526 527 transaction = handle->h_transaction; 528 journal = transaction->t_journal; 529 530 jbd_debug(5, "journal_head %p, force_copy %d\n", jh, force_copy); 531 532 JBUFFER_TRACE(jh, "entry"); 533repeat: 534 bh = jh2bh(jh); 535 536 /* @@@ Need to check for errors here at some point. */ 537 538 lock_buffer(bh); 539 jbd_lock_bh_state(bh); 540 541 /* We now hold the buffer lock so it is safe to query the buffer 542 * state. Is the buffer dirty? 543 * 544 * If so, there are two possibilities. The buffer may be 545 * non-journaled, and undergoing a quite legitimate writeback. 546 * Otherwise, it is journaled, and we don't expect dirty buffers 547 * in that state (the buffers should be marked JBD_Dirty 548 * instead.) So either the IO is being done under our own 549 * control and this is a bug, or it's a third party IO such as 550 * dump(8) (which may leave the buffer scheduled for read --- 551 * ie. locked but not dirty) or tune2fs (which may actually have 552 * the buffer dirtied, ugh.) */ 553 554 if (buffer_dirty(bh)) { 555 /* 556 * First question: is this buffer already part of the current 557 * transaction or the existing committing transaction? 558 */ 559 if (jh->b_transaction) { 560 J_ASSERT_JH(jh, 561 jh->b_transaction == transaction || 562 jh->b_transaction == 563 journal->j_committing_transaction); 564 if (jh->b_next_transaction) 565 J_ASSERT_JH(jh, jh->b_next_transaction == 566 transaction); 567 warn_dirty_buffer(bh); 568 } 569 /* 570 * In any case we need to clean the dirty flag and we must 571 * do it under the buffer lock to be sure we don't race 572 * with running write-out. 573 */ 574 JBUFFER_TRACE(jh, "Journalling dirty buffer"); 575 clear_buffer_dirty(bh); 576 set_buffer_jbddirty(bh); 577 } 578 579 unlock_buffer(bh); 580 581 error = -EROFS; 582 if (is_handle_aborted(handle)) { 583 jbd_unlock_bh_state(bh); 584 goto out; 585 } 586 error = 0; 587 588 /* 589 * The buffer is already part of this transaction if b_transaction or 590 * b_next_transaction points to it 591 */ 592 if (jh->b_transaction == transaction || 593 jh->b_next_transaction == transaction) 594 goto done; 595 596 /* 597 * this is the first time this transaction is touching this buffer, 598 * reset the modified flag 599 */ 600 jh->b_modified = 0; 601 602 /* 603 * If there is already a copy-out version of this buffer, then we don't 604 * need to make another one 605 */ 606 if (jh->b_frozen_data) { 607 JBUFFER_TRACE(jh, "has frozen data"); 608 J_ASSERT_JH(jh, jh->b_next_transaction == NULL); 609 jh->b_next_transaction = transaction; 610 goto done; 611 } 612 613 /* Is there data here we need to preserve? */ 614 615 if (jh->b_transaction && jh->b_transaction != transaction) { 616 JBUFFER_TRACE(jh, "owned by older transaction"); 617 J_ASSERT_JH(jh, jh->b_next_transaction == NULL); 618 J_ASSERT_JH(jh, jh->b_transaction == 619 journal->j_committing_transaction); 620 621 /* There is one case we have to be very careful about. 622 * If the committing transaction is currently writing 623 * this buffer out to disk and has NOT made a copy-out, 624 * then we cannot modify the buffer contents at all 625 * right now. The essence of copy-out is that it is the 626 * extra copy, not the primary copy, which gets 627 * journaled. If the primary copy is already going to 628 * disk then we cannot do copy-out here. */ 629 630 if (jh->b_jlist == BJ_Shadow) { 631 DEFINE_WAIT_BIT(wait, &bh->b_state, BH_Unshadow); 632 wait_queue_head_t *wqh; 633 634 wqh = bit_waitqueue(&bh->b_state, BH_Unshadow); 635 636 JBUFFER_TRACE(jh, "on shadow: sleep"); 637 jbd_unlock_bh_state(bh); 638 /* commit wakes up all shadow buffers after IO */ 639 for ( ; ; ) { 640 prepare_to_wait(wqh, &wait.wait, 641 TASK_UNINTERRUPTIBLE); 642 if (jh->b_jlist != BJ_Shadow) 643 break; 644 schedule(); 645 } 646 finish_wait(wqh, &wait.wait); 647 goto repeat; 648 } 649 650 /* Only do the copy if the currently-owning transaction 651 * still needs it. If it is on the Forget list, the 652 * committing transaction is past that stage. The 653 * buffer had better remain locked during the kmalloc, 654 * but that should be true --- we hold the journal lock 655 * still and the buffer is already on the BUF_JOURNAL 656 * list so won't be flushed. 657 * 658 * Subtle point, though: if this is a get_undo_access, 659 * then we will be relying on the frozen_data to contain 660 * the new value of the committed_data record after the 661 * transaction, so we HAVE to force the frozen_data copy 662 * in that case. */ 663 664 if (jh->b_jlist != BJ_Forget || force_copy) { 665 JBUFFER_TRACE(jh, "generate frozen data"); 666 if (!frozen_buffer) { 667 JBUFFER_TRACE(jh, "allocate memory for buffer"); 668 jbd_unlock_bh_state(bh); 669 frozen_buffer = 670 jbd_alloc(jh2bh(jh)->b_size, 671 GFP_NOFS); 672 if (!frozen_buffer) { 673 printk(KERN_EMERG 674 "%s: OOM for frozen_buffer\n", 675 __func__); 676 JBUFFER_TRACE(jh, "oom!"); 677 error = -ENOMEM; 678 jbd_lock_bh_state(bh); 679 goto done; 680 } 681 goto repeat; 682 } 683 jh->b_frozen_data = frozen_buffer; 684 frozen_buffer = NULL; 685 need_copy = 1; 686 } 687 jh->b_next_transaction = transaction; 688 } 689 690 691 /* 692 * Finally, if the buffer is not journaled right now, we need to make 693 * sure it doesn't get written to disk before the caller actually 694 * commits the new data 695 */ 696 if (!jh->b_transaction) { 697 JBUFFER_TRACE(jh, "no transaction"); 698 J_ASSERT_JH(jh, !jh->b_next_transaction); 699 JBUFFER_TRACE(jh, "file as BJ_Reserved"); 700 spin_lock(&journal->j_list_lock); 701 __journal_file_buffer(jh, transaction, BJ_Reserved); 702 spin_unlock(&journal->j_list_lock); 703 } 704 705done: 706 if (need_copy) { 707 struct page *page; 708 int offset; 709 char *source; 710 711 J_EXPECT_JH(jh, buffer_uptodate(jh2bh(jh)), 712 "Possible IO failure.\n"); 713 page = jh2bh(jh)->b_page; 714 offset = offset_in_page(jh2bh(jh)->b_data); 715 source = kmap_atomic(page, KM_USER0); 716 memcpy(jh->b_frozen_data, source+offset, jh2bh(jh)->b_size); 717 kunmap_atomic(source, KM_USER0); 718 } 719 jbd_unlock_bh_state(bh); 720 721 /* 722 * If we are about to journal a buffer, then any revoke pending on it is 723 * no longer valid 724 */ 725 journal_cancel_revoke(handle, jh); 726 727out: 728 if (unlikely(frozen_buffer)) /* It's usually NULL */ 729 jbd_free(frozen_buffer, bh->b_size); 730 731 JBUFFER_TRACE(jh, "exit"); 732 return error; 733} 734 735/** 736 * int journal_get_write_access() - notify intent to modify a buffer for metadata (not data) update. 737 * @handle: transaction to add buffer modifications to 738 * @bh: bh to be used for metadata writes 739 * 740 * Returns an error code or 0 on success. 741 * 742 * In full data journalling mode the buffer may be of type BJ_AsyncData, 743 * because we're write()ing a buffer which is also part of a shared mapping. 744 */ 745 746int journal_get_write_access(handle_t *handle, struct buffer_head *bh) 747{ 748 struct journal_head *jh = journal_add_journal_head(bh); 749 int rc; 750 751 /* We do not want to get caught playing with fields which the 752 * log thread also manipulates. Make sure that the buffer 753 * completes any outstanding IO before proceeding. */ 754 rc = do_get_write_access(handle, jh, 0); 755 journal_put_journal_head(jh); 756 return rc; 757} 758 759 760/* 761 * When the user wants to journal a newly created buffer_head 762 * (ie. getblk() returned a new buffer and we are going to populate it 763 * manually rather than reading off disk), then we need to keep the 764 * buffer_head locked until it has been completely filled with new 765 * data. In this case, we should be able to make the assertion that 766 * the bh is not already part of an existing transaction. 767 * 768 * The buffer should already be locked by the caller by this point. 769 * There is no lock ranking violation: it was a newly created, 770 * unlocked buffer beforehand. */ 771 772/** 773 * int journal_get_create_access () - notify intent to use newly created bh 774 * @handle: transaction to new buffer to 775 * @bh: new buffer. 776 * 777 * Call this if you create a new bh. 778 */ 779int journal_get_create_access(handle_t *handle, struct buffer_head *bh) 780{ 781 transaction_t *transaction = handle->h_transaction; 782 journal_t *journal = transaction->t_journal; 783 struct journal_head *jh = journal_add_journal_head(bh); 784 int err; 785 786 jbd_debug(5, "journal_head %p\n", jh); 787 err = -EROFS; 788 if (is_handle_aborted(handle)) 789 goto out; 790 err = 0; 791 792 JBUFFER_TRACE(jh, "entry"); 793 /* 794 * The buffer may already belong to this transaction due to pre-zeroing 795 * in the filesystem's new_block code. It may also be on the previous, 796 * committing transaction's lists, but it HAS to be in Forget state in 797 * that case: the transaction must have deleted the buffer for it to be 798 * reused here. 799 */ 800 jbd_lock_bh_state(bh); 801 spin_lock(&journal->j_list_lock); 802 J_ASSERT_JH(jh, (jh->b_transaction == transaction || 803 jh->b_transaction == NULL || 804 (jh->b_transaction == journal->j_committing_transaction && 805 jh->b_jlist == BJ_Forget))); 806 807 J_ASSERT_JH(jh, jh->b_next_transaction == NULL); 808 J_ASSERT_JH(jh, buffer_locked(jh2bh(jh))); 809 810 if (jh->b_transaction == NULL) { 811 /* 812 * Previous journal_forget() could have left the buffer 813 * with jbddirty bit set because it was being committed. When 814 * the commit finished, we've filed the buffer for 815 * checkpointing and marked it dirty. Now we are reallocating 816 * the buffer so the transaction freeing it must have 817 * committed and so it's safe to clear the dirty bit. 818 */ 819 clear_buffer_dirty(jh2bh(jh)); 820 821 /* first access by this transaction */ 822 jh->b_modified = 0; 823 824 JBUFFER_TRACE(jh, "file as BJ_Reserved"); 825 __journal_file_buffer(jh, transaction, BJ_Reserved); 826 } else if (jh->b_transaction == journal->j_committing_transaction) { 827 /* first access by this transaction */ 828 jh->b_modified = 0; 829 830 JBUFFER_TRACE(jh, "set next transaction"); 831 jh->b_next_transaction = transaction; 832 } 833 spin_unlock(&journal->j_list_lock); 834 jbd_unlock_bh_state(bh); 835 836 /* 837 * akpm: I added this. ext3_alloc_branch can pick up new indirect 838 * blocks which contain freed but then revoked metadata. We need 839 * to cancel the revoke in case we end up freeing it yet again 840 * and the reallocating as data - this would cause a second revoke, 841 * which hits an assertion error. 842 */ 843 JBUFFER_TRACE(jh, "cancelling revoke"); 844 journal_cancel_revoke(handle, jh); 845out: 846 journal_put_journal_head(jh); 847 return err; 848} 849 850/** 851 * int journal_get_undo_access() - Notify intent to modify metadata with non-rewindable consequences 852 * @handle: transaction 853 * @bh: buffer to undo 854 * 855 * Sometimes there is a need to distinguish between metadata which has 856 * been committed to disk and that which has not. The ext3fs code uses 857 * this for freeing and allocating space, we have to make sure that we 858 * do not reuse freed space until the deallocation has been committed, 859 * since if we overwrote that space we would make the delete 860 * un-rewindable in case of a crash. 861 * 862 * To deal with that, journal_get_undo_access requests write access to a 863 * buffer for parts of non-rewindable operations such as delete 864 * operations on the bitmaps. The journaling code must keep a copy of 865 * the buffer's contents prior to the undo_access call until such time 866 * as we know that the buffer has definitely been committed to disk. 867 * 868 * We never need to know which transaction the committed data is part 869 * of, buffers touched here are guaranteed to be dirtied later and so 870 * will be committed to a new transaction in due course, at which point 871 * we can discard the old committed data pointer. 872 * 873 * Returns error number or 0 on success. 874 */ 875int journal_get_undo_access(handle_t *handle, struct buffer_head *bh) 876{ 877 int err; 878 struct journal_head *jh = journal_add_journal_head(bh); 879 char *committed_data = NULL; 880 881 JBUFFER_TRACE(jh, "entry"); 882 883 /* 884 * Do this first --- it can drop the journal lock, so we want to 885 * make sure that obtaining the committed_data is done 886 * atomically wrt. completion of any outstanding commits. 887 */ 888 err = do_get_write_access(handle, jh, 1); 889 if (err) 890 goto out; 891 892repeat: 893 if (!jh->b_committed_data) { 894 committed_data = jbd_alloc(jh2bh(jh)->b_size, GFP_NOFS); 895 if (!committed_data) { 896 printk(KERN_EMERG "%s: No memory for committed data\n", 897 __func__); 898 err = -ENOMEM; 899 goto out; 900 } 901 } 902 903 jbd_lock_bh_state(bh); 904 if (!jh->b_committed_data) { 905 /* Copy out the current buffer contents into the 906 * preserved, committed copy. */ 907 JBUFFER_TRACE(jh, "generate b_committed data"); 908 if (!committed_data) { 909 jbd_unlock_bh_state(bh); 910 goto repeat; 911 } 912 913 jh->b_committed_data = committed_data; 914 committed_data = NULL; 915 memcpy(jh->b_committed_data, bh->b_data, bh->b_size); 916 } 917 jbd_unlock_bh_state(bh); 918out: 919 journal_put_journal_head(jh); 920 if (unlikely(committed_data)) 921 jbd_free(committed_data, bh->b_size); 922 return err; 923} 924 925/** 926 * int journal_dirty_data() - mark a buffer as containing dirty data to be flushed 927 * @handle: transaction 928 * @bh: bufferhead to mark 929 * 930 * Description: 931 * Mark a buffer as containing dirty data which needs to be flushed before 932 * we can commit the current transaction. 933 * 934 * The buffer is placed on the transaction's data list and is marked as 935 * belonging to the transaction. 936 * 937 * Returns error number or 0 on success. 938 * 939 * journal_dirty_data() can be called via page_launder->ext3_writepage 940 * by kswapd. 941 */ 942int journal_dirty_data(handle_t *handle, struct buffer_head *bh) 943{ 944 journal_t *journal = handle->h_transaction->t_journal; 945 int need_brelse = 0; 946 struct journal_head *jh; 947 int ret = 0; 948 949 if (is_handle_aborted(handle)) 950 return ret; 951 952 jh = journal_add_journal_head(bh); 953 JBUFFER_TRACE(jh, "entry"); 954 955 /* 956 * The buffer could *already* be dirty. Writeout can start 957 * at any time. 958 */ 959 jbd_debug(4, "jh: %p, tid:%d\n", jh, handle->h_transaction->t_tid); 960 961 /* 962 * What if the buffer is already part of a running transaction? 963 * 964 * There are two cases: 965 * 1) It is part of the current running transaction. Refile it, 966 * just in case we have allocated it as metadata, deallocated 967 * it, then reallocated it as data. 968 * 2) It is part of the previous, still-committing transaction. 969 * If all we want to do is to guarantee that the buffer will be 970 * written to disk before this new transaction commits, then 971 * being sure that the *previous* transaction has this same 972 * property is sufficient for us! Just leave it on its old 973 * transaction. 974 * 975 * In case (2), the buffer must not already exist as metadata 976 * --- that would violate write ordering (a transaction is free 977 * to write its data at any point, even before the previous 978 * committing transaction has committed). The caller must 979 * never, ever allow this to happen: there's nothing we can do 980 * about it in this layer. 981 */ 982 jbd_lock_bh_state(bh); 983 spin_lock(&journal->j_list_lock); 984 985 /* Now that we have bh_state locked, are we really still mapped? */ 986 if (!buffer_mapped(bh)) { 987 JBUFFER_TRACE(jh, "unmapped buffer, bailing out"); 988 goto no_journal; 989 } 990 991 if (jh->b_transaction) { 992 JBUFFER_TRACE(jh, "has transaction"); 993 if (jh->b_transaction != handle->h_transaction) { 994 JBUFFER_TRACE(jh, "belongs to older transaction"); 995 J_ASSERT_JH(jh, jh->b_transaction == 996 journal->j_committing_transaction); 997 998 /* @@@ IS THIS TRUE ? */ 999 /* 1000 * Not any more. Scenario: someone does a write() 1001 * in data=journal mode. The buffer's transaction has 1002 * moved into commit. Then someone does another 1003 * write() to the file. We do the frozen data copyout 1004 * and set b_next_transaction to point to j_running_t. 1005 * And while we're in that state, someone does a 1006 * writepage() in an attempt to pageout the same area 1007 * of the file via a shared mapping. At present that 1008 * calls journal_dirty_data(), and we get right here. 1009 * It may be too late to journal the data. Simply 1010 * falling through to the next test will suffice: the 1011 * data will be dirty and wil be checkpointed. The 1012 * ordering comments in the next comment block still 1013 * apply. 1014 */ 1015 //J_ASSERT_JH(jh, jh->b_next_transaction == NULL); 1016 1017 /* 1018 * If we're journalling data, and this buffer was 1019 * subject to a write(), it could be metadata, forget 1020 * or shadow against the committing transaction. Now, 1021 * someone has dirtied the same darn page via a mapping 1022 * and it is being writepage()'d. 1023 * We *could* just steal the page from commit, with some 1024 * fancy locking there. Instead, we just skip it - 1025 * don't tie the page's buffers to the new transaction 1026 * at all. 1027 * Implication: if we crash before the writepage() data 1028 * is written into the filesystem, recovery will replay 1029 * the write() data. 1030 */ 1031 if (jh->b_jlist != BJ_None && 1032 jh->b_jlist != BJ_SyncData && 1033 jh->b_jlist != BJ_Locked) { 1034 JBUFFER_TRACE(jh, "Not stealing"); 1035 goto no_journal; 1036 } 1037 1038 /* 1039 * This buffer may be undergoing writeout in commit. We 1040 * can't return from here and let the caller dirty it 1041 * again because that can cause the write-out loop in 1042 * commit to never terminate. 1043 */ 1044 if (buffer_dirty(bh)) { 1045 get_bh(bh); 1046 spin_unlock(&journal->j_list_lock); 1047 jbd_unlock_bh_state(bh); 1048 need_brelse = 1; 1049 sync_dirty_buffer(bh); 1050 jbd_lock_bh_state(bh); 1051 spin_lock(&journal->j_list_lock); 1052 /* Since we dropped the lock... */ 1053 if (!buffer_mapped(bh)) { 1054 JBUFFER_TRACE(jh, "buffer got unmapped"); 1055 goto no_journal; 1056 } 1057 /* The buffer may become locked again at any 1058 time if it is redirtied */ 1059 } 1060 1061 /* 1062 * We cannot remove the buffer with io error from the 1063 * committing transaction, because otherwise it would 1064 * miss the error and the commit would not abort. 1065 */ 1066 if (unlikely(!buffer_uptodate(bh))) { 1067 ret = -EIO; 1068 goto no_journal; 1069 } 1070 /* We might have slept so buffer could be refiled now */ 1071 if (jh->b_transaction != NULL && 1072 jh->b_transaction != handle->h_transaction) { 1073 JBUFFER_TRACE(jh, "unfile from commit"); 1074 __journal_temp_unlink_buffer(jh); 1075 /* It still points to the committing 1076 * transaction; move it to this one so 1077 * that the refile assert checks are 1078 * happy. */ 1079 jh->b_transaction = handle->h_transaction; 1080 } 1081 /* The buffer will be refiled below */ 1082 1083 } 1084 /* 1085 * Special case --- the buffer might actually have been 1086 * allocated and then immediately deallocated in the previous, 1087 * committing transaction, so might still be left on that 1088 * transaction's metadata lists. 1089 */ 1090 if (jh->b_jlist != BJ_SyncData && jh->b_jlist != BJ_Locked) { 1091 JBUFFER_TRACE(jh, "not on correct data list: unfile"); 1092 J_ASSERT_JH(jh, jh->b_jlist != BJ_Shadow); 1093 JBUFFER_TRACE(jh, "file as data"); 1094 __journal_file_buffer(jh, handle->h_transaction, 1095 BJ_SyncData); 1096 } 1097 } else { 1098 JBUFFER_TRACE(jh, "not on a transaction"); 1099 __journal_file_buffer(jh, handle->h_transaction, BJ_SyncData); 1100 } 1101no_journal: 1102 spin_unlock(&journal->j_list_lock); 1103 jbd_unlock_bh_state(bh); 1104 if (need_brelse) { 1105 BUFFER_TRACE(bh, "brelse"); 1106 __brelse(bh); 1107 } 1108 JBUFFER_TRACE(jh, "exit"); 1109 journal_put_journal_head(jh); 1110 return ret; 1111} 1112 1113/** 1114 * int journal_dirty_metadata() - mark a buffer as containing dirty metadata 1115 * @handle: transaction to add buffer to. 1116 * @bh: buffer to mark 1117 * 1118 * Mark dirty metadata which needs to be journaled as part of the current 1119 * transaction. 1120 * 1121 * The buffer is placed on the transaction's metadata list and is marked 1122 * as belonging to the transaction. 1123 * 1124 * Returns error number or 0 on success. 1125 * 1126 * Special care needs to be taken if the buffer already belongs to the 1127 * current committing transaction (in which case we should have frozen 1128 * data present for that commit). In that case, we don't relink the 1129 * buffer: that only gets done when the old transaction finally 1130 * completes its commit. 1131 */ 1132int journal_dirty_metadata(handle_t *handle, struct buffer_head *bh) 1133{ 1134 transaction_t *transaction = handle->h_transaction; 1135 journal_t *journal = transaction->t_journal; 1136 struct journal_head *jh = bh2jh(bh); 1137 1138 jbd_debug(5, "journal_head %p\n", jh); 1139 JBUFFER_TRACE(jh, "entry"); 1140 if (is_handle_aborted(handle)) 1141 goto out; 1142 1143 jbd_lock_bh_state(bh); 1144 1145 if (jh->b_modified == 0) { 1146 /* 1147 * This buffer's got modified and becoming part 1148 * of the transaction. This needs to be done 1149 * once a transaction -bzzz 1150 */ 1151 jh->b_modified = 1; 1152 J_ASSERT_JH(jh, handle->h_buffer_credits > 0); 1153 handle->h_buffer_credits--; 1154 } 1155 1156 /* 1157 * fastpath, to avoid expensive locking. If this buffer is already 1158 * on the running transaction's metadata list there is nothing to do. 1159 * Nobody can take it off again because there is a handle open. 1160 * I _think_ we're OK here with SMP barriers - a mistaken decision will 1161 * result in this test being false, so we go in and take the locks. 1162 */ 1163 if (jh->b_transaction == transaction && jh->b_jlist == BJ_Metadata) { 1164 JBUFFER_TRACE(jh, "fastpath"); 1165 J_ASSERT_JH(jh, jh->b_transaction == 1166 journal->j_running_transaction); 1167 goto out_unlock_bh; 1168 } 1169 1170 set_buffer_jbddirty(bh); 1171 1172 /* 1173 * Metadata already on the current transaction list doesn't 1174 * need to be filed. Metadata on another transaction's list must 1175 * be committing, and will be refiled once the commit completes: 1176 * leave it alone for now. 1177 */ 1178 if (jh->b_transaction != transaction) { 1179 JBUFFER_TRACE(jh, "already on other transaction"); 1180 J_ASSERT_JH(jh, jh->b_transaction == 1181 journal->j_committing_transaction); 1182 J_ASSERT_JH(jh, jh->b_next_transaction == transaction); 1183 /* And this case is illegal: we can't reuse another 1184 * transaction's data buffer, ever. */ 1185 goto out_unlock_bh; 1186 } 1187 1188 /* That test should have eliminated the following case: */ 1189 J_ASSERT_JH(jh, jh->b_frozen_data == NULL); 1190 1191 JBUFFER_TRACE(jh, "file as BJ_Metadata"); 1192 spin_lock(&journal->j_list_lock); 1193 __journal_file_buffer(jh, handle->h_transaction, BJ_Metadata); 1194 spin_unlock(&journal->j_list_lock); 1195out_unlock_bh: 1196 jbd_unlock_bh_state(bh); 1197out: 1198 JBUFFER_TRACE(jh, "exit"); 1199 return 0; 1200} 1201 1202/* 1203 * journal_release_buffer: undo a get_write_access without any buffer 1204 * updates, if the update decided in the end that it didn't need access. 1205 * 1206 */ 1207void 1208journal_release_buffer(handle_t *handle, struct buffer_head *bh) 1209{ 1210 BUFFER_TRACE(bh, "entry"); 1211} 1212 1213/** 1214 * void journal_forget() - bforget() for potentially-journaled buffers. 1215 * @handle: transaction handle 1216 * @bh: bh to 'forget' 1217 * 1218 * We can only do the bforget if there are no commits pending against the 1219 * buffer. If the buffer is dirty in the current running transaction we 1220 * can safely unlink it. 1221 * 1222 * bh may not be a journalled buffer at all - it may be a non-JBD 1223 * buffer which came off the hashtable. Check for this. 1224 * 1225 * Decrements bh->b_count by one. 1226 * 1227 * Allow this call even if the handle has aborted --- it may be part of 1228 * the caller's cleanup after an abort. 1229 */ 1230int journal_forget (handle_t *handle, struct buffer_head *bh) 1231{ 1232 transaction_t *transaction = handle->h_transaction; 1233 journal_t *journal = transaction->t_journal; 1234 struct journal_head *jh; 1235 int drop_reserve = 0; 1236 int err = 0; 1237 int was_modified = 0; 1238 1239 BUFFER_TRACE(bh, "entry"); 1240 1241 jbd_lock_bh_state(bh); 1242 spin_lock(&journal->j_list_lock); 1243 1244 if (!buffer_jbd(bh)) 1245 goto not_jbd; 1246 jh = bh2jh(bh); 1247 1248 /* Critical error: attempting to delete a bitmap buffer, maybe? 1249 * Don't do any jbd operations, and return an error. */ 1250 if (!J_EXPECT_JH(jh, !jh->b_committed_data, 1251 "inconsistent data on disk")) { 1252 err = -EIO; 1253 goto not_jbd; 1254 } 1255 1256 /* keep track of wether or not this transaction modified us */ 1257 was_modified = jh->b_modified; 1258 1259 /* 1260 * The buffer's going from the transaction, we must drop 1261 * all references -bzzz 1262 */ 1263 jh->b_modified = 0; 1264 1265 if (jh->b_transaction == handle->h_transaction) { 1266 J_ASSERT_JH(jh, !jh->b_frozen_data); 1267 1268 /* If we are forgetting a buffer which is already part 1269 * of this transaction, then we can just drop it from 1270 * the transaction immediately. */ 1271 clear_buffer_dirty(bh); 1272 clear_buffer_jbddirty(bh); 1273 1274 JBUFFER_TRACE(jh, "belongs to current transaction: unfile"); 1275 1276 /* 1277 * we only want to drop a reference if this transaction 1278 * modified the buffer 1279 */ 1280 if (was_modified) 1281 drop_reserve = 1; 1282 1283 /* 1284 * We are no longer going to journal this buffer. 1285 * However, the commit of this transaction is still 1286 * important to the buffer: the delete that we are now 1287 * processing might obsolete an old log entry, so by 1288 * committing, we can satisfy the buffer's checkpoint. 1289 * 1290 * So, if we have a checkpoint on the buffer, we should 1291 * now refile the buffer on our BJ_Forget list so that 1292 * we know to remove the checkpoint after we commit. 1293 */ 1294 1295 if (jh->b_cp_transaction) { 1296 __journal_temp_unlink_buffer(jh); 1297 __journal_file_buffer(jh, transaction, BJ_Forget); 1298 } else { 1299 __journal_unfile_buffer(jh); 1300 if (!buffer_jbd(bh)) { 1301 spin_unlock(&journal->j_list_lock); 1302 jbd_unlock_bh_state(bh); 1303 __bforget(bh); 1304 goto drop; 1305 } 1306 } 1307 } else if (jh->b_transaction) { 1308 J_ASSERT_JH(jh, (jh->b_transaction == 1309 journal->j_committing_transaction)); 1310 /* However, if the buffer is still owned by a prior 1311 * (committing) transaction, we can't drop it yet... */ 1312 JBUFFER_TRACE(jh, "belongs to older transaction"); 1313 /* ... but we CAN drop it from the new transaction if we 1314 * have also modified it since the original commit. */ 1315 1316 if (jh->b_next_transaction) { 1317 J_ASSERT(jh->b_next_transaction == transaction); 1318 jh->b_next_transaction = NULL; 1319 1320 /* 1321 * only drop a reference if this transaction modified 1322 * the buffer 1323 */ 1324 if (was_modified) 1325 drop_reserve = 1; 1326 } 1327 } 1328 1329not_jbd: 1330 spin_unlock(&journal->j_list_lock); 1331 jbd_unlock_bh_state(bh); 1332 __brelse(bh); 1333drop: 1334 if (drop_reserve) { 1335 /* no need to reserve log space for this block -bzzz */ 1336 handle->h_buffer_credits++; 1337 } 1338 return err; 1339} 1340 1341/** 1342 * int journal_stop() - complete a transaction 1343 * @handle: tranaction to complete. 1344 * 1345 * All done for a particular handle. 1346 * 1347 * There is not much action needed here. We just return any remaining 1348 * buffer credits to the transaction and remove the handle. The only 1349 * complication is that we need to start a commit operation if the 1350 * filesystem is marked for synchronous update. 1351 * 1352 * journal_stop itself will not usually return an error, but it may 1353 * do so in unusual circumstances. In particular, expect it to 1354 * return -EIO if a journal_abort has been executed since the 1355 * transaction began. 1356 */ 1357int journal_stop(handle_t *handle) 1358{ 1359 transaction_t *transaction = handle->h_transaction; 1360 journal_t *journal = transaction->t_journal; 1361 int err; 1362 pid_t pid; 1363 1364 J_ASSERT(journal_current_handle() == handle); 1365 1366 if (is_handle_aborted(handle)) 1367 err = -EIO; 1368 else { 1369 J_ASSERT(transaction->t_updates > 0); 1370 err = 0; 1371 } 1372 1373 if (--handle->h_ref > 0) { 1374 jbd_debug(4, "h_ref %d -> %d\n", handle->h_ref + 1, 1375 handle->h_ref); 1376 return err; 1377 } 1378 1379 jbd_debug(4, "Handle %p going down\n", handle); 1380 1381 /* 1382 * Implement synchronous transaction batching. If the handle 1383 * was synchronous, don't force a commit immediately. Let's 1384 * yield and let another thread piggyback onto this transaction. 1385 * Keep doing that while new threads continue to arrive. 1386 * It doesn't cost much - we're about to run a commit and sleep 1387 * on IO anyway. Speeds up many-threaded, many-dir operations 1388 * by 30x or more... 1389 * 1390 * We try and optimize the sleep time against what the underlying disk 1391 * can do, instead of having a static sleep time. This is useful for 1392 * the case where our storage is so fast that it is more optimal to go 1393 * ahead and force a flush and wait for the transaction to be committed 1394 * than it is to wait for an arbitrary amount of time for new writers to 1395 * join the transaction. We achieve this by measuring how long it takes 1396 * to commit a transaction, and compare it with how long this 1397 * transaction has been running, and if run time < commit time then we 1398 * sleep for the delta and commit. This greatly helps super fast disks 1399 * that would see slowdowns as more threads started doing fsyncs. 1400 * 1401 * But don't do this if this process was the most recent one to 1402 * perform a synchronous write. We do this to detect the case where a 1403 * single process is doing a stream of sync writes. No point in waiting 1404 * for joiners in that case. 1405 */ 1406 pid = current->pid; 1407 if (handle->h_sync && journal->j_last_sync_writer != pid) { 1408 u64 commit_time, trans_time; 1409 1410 journal->j_last_sync_writer = pid; 1411 1412 spin_lock(&journal->j_state_lock); 1413 commit_time = journal->j_average_commit_time; 1414 spin_unlock(&journal->j_state_lock); 1415 1416 trans_time = ktime_to_ns(ktime_sub(ktime_get(), 1417 transaction->t_start_time)); 1418 1419 commit_time = min_t(u64, commit_time, 1420 1000*jiffies_to_usecs(1)); 1421 1422 if (trans_time < commit_time) { 1423 ktime_t expires = ktime_add_ns(ktime_get(), 1424 commit_time); 1425 set_current_state(TASK_UNINTERRUPTIBLE); 1426 schedule_hrtimeout(&expires, HRTIMER_MODE_ABS); 1427 } 1428 } 1429 1430 if (handle->h_sync) 1431 transaction->t_synchronous_commit = 1; 1432 current->journal_info = NULL; 1433 spin_lock(&journal->j_state_lock); 1434 spin_lock(&transaction->t_handle_lock); 1435 transaction->t_outstanding_credits -= handle->h_buffer_credits; 1436 transaction->t_updates--; 1437 if (!transaction->t_updates) { 1438 wake_up(&journal->j_wait_updates); 1439 if (journal->j_barrier_count) 1440 wake_up(&journal->j_wait_transaction_locked); 1441 } 1442 1443 /* 1444 * If the handle is marked SYNC, we need to set another commit 1445 * going! We also want to force a commit if the current 1446 * transaction is occupying too much of the log, or if the 1447 * transaction is too old now. 1448 */ 1449 if (handle->h_sync || 1450 transaction->t_outstanding_credits > 1451 journal->j_max_transaction_buffers || 1452 time_after_eq(jiffies, transaction->t_expires)) { 1453 /* Do this even for aborted journals: an abort still 1454 * completes the commit thread, it just doesn't write 1455 * anything to disk. */ 1456 tid_t tid = transaction->t_tid; 1457 1458 spin_unlock(&transaction->t_handle_lock); 1459 jbd_debug(2, "transaction too old, requesting commit for " 1460 "handle %p\n", handle); 1461 /* This is non-blocking */ 1462 __log_start_commit(journal, transaction->t_tid); 1463 spin_unlock(&journal->j_state_lock); 1464 1465 /* 1466 * Special case: JFS_SYNC synchronous updates require us 1467 * to wait for the commit to complete. 1468 */ 1469 if (handle->h_sync && !(current->flags & PF_MEMALLOC)) 1470 err = log_wait_commit(journal, tid); 1471 } else { 1472 spin_unlock(&transaction->t_handle_lock); 1473 spin_unlock(&journal->j_state_lock); 1474 } 1475 1476 lock_map_release(&handle->h_lockdep_map); 1477 1478 jbd_free_handle(handle); 1479 return err; 1480} 1481 1482/** 1483 * int journal_force_commit() - force any uncommitted transactions 1484 * @journal: journal to force 1485 * 1486 * For synchronous operations: force any uncommitted transactions 1487 * to disk. May seem kludgy, but it reuses all the handle batching 1488 * code in a very simple manner. 1489 */ 1490int journal_force_commit(journal_t *journal) 1491{ 1492 handle_t *handle; 1493 int ret; 1494 1495 handle = journal_start(journal, 1); 1496 if (IS_ERR(handle)) { 1497 ret = PTR_ERR(handle); 1498 } else { 1499 handle->h_sync = 1; 1500 ret = journal_stop(handle); 1501 } 1502 return ret; 1503} 1504 1505/* 1506 * 1507 * List management code snippets: various functions for manipulating the 1508 * transaction buffer lists. 1509 * 1510 */ 1511 1512/* 1513 * Append a buffer to a transaction list, given the transaction's list head 1514 * pointer. 1515 * 1516 * j_list_lock is held. 1517 * 1518 * jbd_lock_bh_state(jh2bh(jh)) is held. 1519 */ 1520 1521static inline void 1522__blist_add_buffer(struct journal_head **list, struct journal_head *jh) 1523{ 1524 if (!*list) { 1525 jh->b_tnext = jh->b_tprev = jh; 1526 *list = jh; 1527 } else { 1528 /* Insert at the tail of the list to preserve order */ 1529 struct journal_head *first = *list, *last = first->b_tprev; 1530 jh->b_tprev = last; 1531 jh->b_tnext = first; 1532 last->b_tnext = first->b_tprev = jh; 1533 } 1534} 1535 1536/* 1537 * Remove a buffer from a transaction list, given the transaction's list 1538 * head pointer. 1539 * 1540 * Called with j_list_lock held, and the journal may not be locked. 1541 * 1542 * jbd_lock_bh_state(jh2bh(jh)) is held. 1543 */ 1544 1545static inline void 1546__blist_del_buffer(struct journal_head **list, struct journal_head *jh) 1547{ 1548 if (*list == jh) { 1549 *list = jh->b_tnext; 1550 if (*list == jh) 1551 *list = NULL; 1552 } 1553 jh->b_tprev->b_tnext = jh->b_tnext; 1554 jh->b_tnext->b_tprev = jh->b_tprev; 1555} 1556 1557/* 1558 * Remove a buffer from the appropriate transaction list. 1559 * 1560 * Note that this function can *change* the value of 1561 * bh->b_transaction->t_sync_datalist, t_buffers, t_forget, 1562 * t_iobuf_list, t_shadow_list, t_log_list or t_reserved_list. If the caller 1563 * is holding onto a copy of one of thee pointers, it could go bad. 1564 * Generally the caller needs to re-read the pointer from the transaction_t. 1565 * 1566 * Called under j_list_lock. The journal may not be locked. 1567 */ 1568static void __journal_temp_unlink_buffer(struct journal_head *jh) 1569{ 1570 struct journal_head **list = NULL; 1571 transaction_t *transaction; 1572 struct buffer_head *bh = jh2bh(jh); 1573 1574 J_ASSERT_JH(jh, jbd_is_locked_bh_state(bh)); 1575 transaction = jh->b_transaction; 1576 if (transaction) 1577 assert_spin_locked(&transaction->t_journal->j_list_lock); 1578 1579 J_ASSERT_JH(jh, jh->b_jlist < BJ_Types); 1580 if (jh->b_jlist != BJ_None) 1581 J_ASSERT_JH(jh, transaction != NULL); 1582 1583 switch (jh->b_jlist) { 1584 case BJ_None: 1585 return; 1586 case BJ_SyncData: 1587 list = &transaction->t_sync_datalist; 1588 break; 1589 case BJ_Metadata: 1590 transaction->t_nr_buffers--; 1591 J_ASSERT_JH(jh, transaction->t_nr_buffers >= 0); 1592 list = &transaction->t_buffers; 1593 break; 1594 case BJ_Forget: 1595 list = &transaction->t_forget; 1596 break; 1597 case BJ_IO: 1598 list = &transaction->t_iobuf_list; 1599 break; 1600 case BJ_Shadow: 1601 list = &transaction->t_shadow_list; 1602 break; 1603 case BJ_LogCtl: 1604 list = &transaction->t_log_list; 1605 break; 1606 case BJ_Reserved: 1607 list = &transaction->t_reserved_list; 1608 break; 1609 case BJ_Locked: 1610 list = &transaction->t_locked_list; 1611 break; 1612 } 1613 1614 __blist_del_buffer(list, jh); 1615 jh->b_jlist = BJ_None; 1616 if (test_clear_buffer_jbddirty(bh)) 1617 mark_buffer_dirty(bh); /* Expose it to the VM */ 1618} 1619 1620/* 1621 * Remove buffer from all transactions. 1622 * 1623 * Called with bh_state lock and j_list_lock 1624 * 1625 * jh and bh may be already freed when this function returns. 1626 */ 1627void __journal_unfile_buffer(struct journal_head *jh) 1628{ 1629 __journal_temp_unlink_buffer(jh); 1630 jh->b_transaction = NULL; 1631 journal_put_journal_head(jh); 1632} 1633 1634void journal_unfile_buffer(journal_t *journal, struct journal_head *jh) 1635{ 1636 struct buffer_head *bh = jh2bh(jh); 1637 1638 /* Get reference so that buffer cannot be freed before we unlock it */ 1639 get_bh(bh); 1640 jbd_lock_bh_state(bh); 1641 spin_lock(&journal->j_list_lock); 1642 __journal_unfile_buffer(jh); 1643 spin_unlock(&journal->j_list_lock); 1644 jbd_unlock_bh_state(bh); 1645 __brelse(bh); 1646} 1647 1648/* 1649 * Called from journal_try_to_free_buffers(). 1650 * 1651 * Called under jbd_lock_bh_state(bh) 1652 */ 1653static void 1654__journal_try_to_free_buffer(journal_t *journal, struct buffer_head *bh) 1655{ 1656 struct journal_head *jh; 1657 1658 jh = bh2jh(bh); 1659 1660 if (buffer_locked(bh) || buffer_dirty(bh)) 1661 goto out; 1662 1663 if (jh->b_next_transaction != NULL) 1664 goto out; 1665 1666 spin_lock(&journal->j_list_lock); 1667 if (jh->b_transaction != NULL && jh->b_cp_transaction == NULL) { 1668 if (jh->b_jlist == BJ_SyncData || jh->b_jlist == BJ_Locked) { 1669 /* A written-back ordered data buffer */ 1670 JBUFFER_TRACE(jh, "release data"); 1671 __journal_unfile_buffer(jh); 1672 } 1673 } else if (jh->b_cp_transaction != NULL && jh->b_transaction == NULL) { 1674 /* written-back checkpointed metadata buffer */ 1675 if (jh->b_jlist == BJ_None) { 1676 JBUFFER_TRACE(jh, "remove from checkpoint list"); 1677 __journal_remove_checkpoint(jh); 1678 } 1679 } 1680 spin_unlock(&journal->j_list_lock); 1681out: 1682 return; 1683} 1684 1685/** 1686 * int journal_try_to_free_buffers() - try to free page buffers. 1687 * @journal: journal for operation 1688 * @page: to try and free 1689 * @gfp_mask: we use the mask to detect how hard should we try to release 1690 * buffers. If __GFP_WAIT and __GFP_FS is set, we wait for commit code to 1691 * release the buffers. 1692 * 1693 * 1694 * For all the buffers on this page, 1695 * if they are fully written out ordered data, move them onto BUF_CLEAN 1696 * so try_to_free_buffers() can reap them. 1697 * 1698 * This function returns non-zero if we wish try_to_free_buffers() 1699 * to be called. We do this if the page is releasable by try_to_free_buffers(). 1700 * We also do it if the page has locked or dirty buffers and the caller wants 1701 * us to perform sync or async writeout. 1702 * 1703 * This complicates JBD locking somewhat. We aren't protected by the 1704 * BKL here. We wish to remove the buffer from its committing or 1705 * running transaction's ->t_datalist via __journal_unfile_buffer. 1706 * 1707 * This may *change* the value of transaction_t->t_datalist, so anyone 1708 * who looks at t_datalist needs to lock against this function. 1709 * 1710 * Even worse, someone may be doing a journal_dirty_data on this 1711 * buffer. So we need to lock against that. journal_dirty_data() 1712 * will come out of the lock with the buffer dirty, which makes it 1713 * ineligible for release here. 1714 * 1715 * Who else is affected by this? hmm... Really the only contender 1716 * is do_get_write_access() - it could be looking at the buffer while 1717 * journal_try_to_free_buffer() is changing its state. But that 1718 * cannot happen because we never reallocate freed data as metadata 1719 * while the data is part of a transaction. Yes? 1720 * 1721 * Return 0 on failure, 1 on success 1722 */ 1723int journal_try_to_free_buffers(journal_t *journal, 1724 struct page *page, gfp_t gfp_mask) 1725{ 1726 struct buffer_head *head; 1727 struct buffer_head *bh; 1728 int ret = 0; 1729 1730 J_ASSERT(PageLocked(page)); 1731 1732 head = page_buffers(page); 1733 bh = head; 1734 do { 1735 struct journal_head *jh; 1736 1737 /* 1738 * We take our own ref against the journal_head here to avoid 1739 * having to add tons of locking around each instance of 1740 * journal_put_journal_head(). 1741 */ 1742 jh = journal_grab_journal_head(bh); 1743 if (!jh) 1744 continue; 1745 1746 jbd_lock_bh_state(bh); 1747 __journal_try_to_free_buffer(journal, bh); 1748 journal_put_journal_head(jh); 1749 jbd_unlock_bh_state(bh); 1750 if (buffer_jbd(bh)) 1751 goto busy; 1752 } while ((bh = bh->b_this_page) != head); 1753 1754 ret = try_to_free_buffers(page); 1755 1756busy: 1757 return ret; 1758} 1759 1760/* 1761 * This buffer is no longer needed. If it is on an older transaction's 1762 * checkpoint list we need to record it on this transaction's forget list 1763 * to pin this buffer (and hence its checkpointing transaction) down until 1764 * this transaction commits. If the buffer isn't on a checkpoint list, we 1765 * release it. 1766 * Returns non-zero if JBD no longer has an interest in the buffer. 1767 * 1768 * Called under j_list_lock. 1769 * 1770 * Called under jbd_lock_bh_state(bh). 1771 */ 1772static int __dispose_buffer(struct journal_head *jh, transaction_t *transaction) 1773{ 1774 int may_free = 1; 1775 struct buffer_head *bh = jh2bh(jh); 1776 1777 if (jh->b_cp_transaction) { 1778 JBUFFER_TRACE(jh, "on running+cp transaction"); 1779 __journal_temp_unlink_buffer(jh); 1780 /* 1781 * We don't want to write the buffer anymore, clear the 1782 * bit so that we don't confuse checks in 1783 * __journal_file_buffer 1784 */ 1785 clear_buffer_dirty(bh); 1786 __journal_file_buffer(jh, transaction, BJ_Forget); 1787 may_free = 0; 1788 } else { 1789 JBUFFER_TRACE(jh, "on running transaction"); 1790 __journal_unfile_buffer(jh); 1791 } 1792 return may_free; 1793} 1794 1795/* 1796 * journal_invalidatepage 1797 * 1798 * This code is tricky. It has a number of cases to deal with. 1799 * 1800 * There are two invariants which this code relies on: 1801 * 1802 * i_size must be updated on disk before we start calling invalidatepage on the 1803 * data. 1804 * 1805 * This is done in ext3 by defining an ext3_setattr method which 1806 * updates i_size before truncate gets going. By maintaining this 1807 * invariant, we can be sure that it is safe to throw away any buffers 1808 * attached to the current transaction: once the transaction commits, 1809 * we know that the data will not be needed. 1810 * 1811 * Note however that we can *not* throw away data belonging to the 1812 * previous, committing transaction! 1813 * 1814 * Any disk blocks which *are* part of the previous, committing 1815 * transaction (and which therefore cannot be discarded immediately) are 1816 * not going to be reused in the new running transaction 1817 * 1818 * The bitmap committed_data images guarantee this: any block which is 1819 * allocated in one transaction and removed in the next will be marked 1820 * as in-use in the committed_data bitmap, so cannot be reused until 1821 * the next transaction to delete the block commits. This means that 1822 * leaving committing buffers dirty is quite safe: the disk blocks 1823 * cannot be reallocated to a different file and so buffer aliasing is 1824 * not possible. 1825 * 1826 * 1827 * The above applies mainly to ordered data mode. In writeback mode we 1828 * don't make guarantees about the order in which data hits disk --- in 1829 * particular we don't guarantee that new dirty data is flushed before 1830 * transaction commit --- so it is always safe just to discard data 1831 * immediately in that mode. --sct 1832 */ 1833 1834/* 1835 * The journal_unmap_buffer helper function returns zero if the buffer 1836 * concerned remains pinned as an anonymous buffer belonging to an older 1837 * transaction. 1838 * 1839 * We're outside-transaction here. Either or both of j_running_transaction 1840 * and j_committing_transaction may be NULL. 1841 */ 1842static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh) 1843{ 1844 transaction_t *transaction; 1845 struct journal_head *jh; 1846 int may_free = 1; 1847 int ret; 1848 1849 BUFFER_TRACE(bh, "entry"); 1850 1851 /* 1852 * It is safe to proceed here without the j_list_lock because the 1853 * buffers cannot be stolen by try_to_free_buffers as long as we are 1854 * holding the page lock. --sct 1855 */ 1856 1857 if (!buffer_jbd(bh)) 1858 goto zap_buffer_unlocked; 1859 1860 spin_lock(&journal->j_state_lock); 1861 jbd_lock_bh_state(bh); 1862 spin_lock(&journal->j_list_lock); 1863 1864 jh = journal_grab_journal_head(bh); 1865 if (!jh) 1866 goto zap_buffer_no_jh; 1867 1868 /* 1869 * We cannot remove the buffer from checkpoint lists until the 1870 * transaction adding inode to orphan list (let's call it T) 1871 * is committed. Otherwise if the transaction changing the 1872 * buffer would be cleaned from the journal before T is 1873 * committed, a crash will cause that the correct contents of 1874 * the buffer will be lost. On the other hand we have to 1875 * clear the buffer dirty bit at latest at the moment when the 1876 * transaction marking the buffer as freed in the filesystem 1877 * structures is committed because from that moment on the 1878 * buffer can be reallocated and used by a different page. 1879 * Since the block hasn't been freed yet but the inode has 1880 * already been added to orphan list, it is safe for us to add 1881 * the buffer to BJ_Forget list of the newest transaction. 1882 */ 1883 transaction = jh->b_transaction; 1884 if (transaction == NULL) { 1885 /* First case: not on any transaction. If it 1886 * has no checkpoint link, then we can zap it: 1887 * it's a writeback-mode buffer so we don't care 1888 * if it hits disk safely. */ 1889 if (!jh->b_cp_transaction) { 1890 JBUFFER_TRACE(jh, "not on any transaction: zap"); 1891 goto zap_buffer; 1892 } 1893 1894 if (!buffer_dirty(bh)) { 1895 /* bdflush has written it. We can drop it now */ 1896 goto zap_buffer; 1897 } 1898 1899 /* OK, it must be in the journal but still not 1900 * written fully to disk: it's metadata or 1901 * journaled data... */ 1902 1903 if (journal->j_running_transaction) { 1904 /* ... and once the current transaction has 1905 * committed, the buffer won't be needed any 1906 * longer. */ 1907 JBUFFER_TRACE(jh, "checkpointed: add to BJ_Forget"); 1908 ret = __dispose_buffer(jh, 1909 journal->j_running_transaction); 1910 journal_put_journal_head(jh); 1911 spin_unlock(&journal->j_list_lock); 1912 jbd_unlock_bh_state(bh); 1913 spin_unlock(&journal->j_state_lock); 1914 return ret; 1915 } else { 1916 /* There is no currently-running transaction. So the 1917 * orphan record which we wrote for this file must have 1918 * passed into commit. We must attach this buffer to 1919 * the committing transaction, if it exists. */ 1920 if (journal->j_committing_transaction) { 1921 JBUFFER_TRACE(jh, "give to committing trans"); 1922 ret = __dispose_buffer(jh, 1923 journal->j_committing_transaction); 1924 journal_put_journal_head(jh); 1925 spin_unlock(&journal->j_list_lock); 1926 jbd_unlock_bh_state(bh); 1927 spin_unlock(&journal->j_state_lock); 1928 return ret; 1929 } else { 1930 /* The orphan record's transaction has 1931 * committed. We can cleanse this buffer */ 1932 clear_buffer_jbddirty(bh); 1933 goto zap_buffer; 1934 } 1935 } 1936 } else if (transaction == journal->j_committing_transaction) { 1937 JBUFFER_TRACE(jh, "on committing transaction"); 1938 if (jh->b_jlist == BJ_Locked) { 1939 /* 1940 * The buffer is on the committing transaction's locked 1941 * list. We have the buffer locked, so I/O has 1942 * completed. So we can nail the buffer now. 1943 */ 1944 may_free = __dispose_buffer(jh, transaction); 1945 goto zap_buffer; 1946 } 1947 /* 1948 * The buffer is committing, we simply cannot touch 1949 * it. So we just set j_next_transaction to the 1950 * running transaction (if there is one) and mark 1951 * buffer as freed so that commit code knows it should 1952 * clear dirty bits when it is done with the buffer. 1953 */ 1954 set_buffer_freed(bh); 1955 if (journal->j_running_transaction && buffer_jbddirty(bh)) 1956 jh->b_next_transaction = journal->j_running_transaction; 1957 journal_put_journal_head(jh); 1958 spin_unlock(&journal->j_list_lock); 1959 jbd_unlock_bh_state(bh); 1960 spin_unlock(&journal->j_state_lock); 1961 return 0; 1962 } else { 1963 /* Good, the buffer belongs to the running transaction. 1964 * We are writing our own transaction's data, not any 1965 * previous one's, so it is safe to throw it away 1966 * (remember that we expect the filesystem to have set 1967 * i_size already for this truncate so recovery will not 1968 * expose the disk blocks we are discarding here.) */ 1969 J_ASSERT_JH(jh, transaction == journal->j_running_transaction); 1970 JBUFFER_TRACE(jh, "on running transaction"); 1971 may_free = __dispose_buffer(jh, transaction); 1972 } 1973 1974zap_buffer: 1975 journal_put_journal_head(jh); 1976zap_buffer_no_jh: 1977 spin_unlock(&journal->j_list_lock); 1978 jbd_unlock_bh_state(bh); 1979 spin_unlock(&journal->j_state_lock); 1980zap_buffer_unlocked: 1981 clear_buffer_dirty(bh); 1982 J_ASSERT_BH(bh, !buffer_jbddirty(bh)); 1983 clear_buffer_mapped(bh); 1984 clear_buffer_req(bh); 1985 clear_buffer_new(bh); 1986 bh->b_bdev = NULL; 1987 return may_free; 1988} 1989 1990/** 1991 * void journal_invalidatepage() - invalidate a journal page 1992 * @journal: journal to use for flush 1993 * @page: page to flush 1994 * @offset: length of page to invalidate. 1995 * 1996 * Reap page buffers containing data after offset in page. 1997 */ 1998void journal_invalidatepage(journal_t *journal, 1999 struct page *page, 2000 unsigned long offset) 2001{ 2002 struct buffer_head *head, *bh, *next; 2003 unsigned int curr_off = 0; 2004 int may_free = 1; 2005 2006 if (!PageLocked(page)) 2007 BUG(); 2008 if (!page_has_buffers(page)) 2009 return; 2010 2011 /* We will potentially be playing with lists other than just the 2012 * data lists (especially for journaled data mode), so be 2013 * cautious in our locking. */ 2014 2015 head = bh = page_buffers(page); 2016 do { 2017 unsigned int next_off = curr_off + bh->b_size; 2018 next = bh->b_this_page; 2019 2020 if (offset <= curr_off) { 2021 /* This block is wholly outside the truncation point */ 2022 lock_buffer(bh); 2023 may_free &= journal_unmap_buffer(journal, bh); 2024 unlock_buffer(bh); 2025 } 2026 curr_off = next_off; 2027 bh = next; 2028 2029 } while (bh != head); 2030 2031 if (!offset) { 2032 if (may_free && try_to_free_buffers(page)) 2033 J_ASSERT(!page_has_buffers(page)); 2034 } 2035} 2036 2037/* 2038 * File a buffer on the given transaction list. 2039 */ 2040void __journal_file_buffer(struct journal_head *jh, 2041 transaction_t *transaction, int jlist) 2042{ 2043 struct journal_head **list = NULL; 2044 int was_dirty = 0; 2045 struct buffer_head *bh = jh2bh(jh); 2046 2047 J_ASSERT_JH(jh, jbd_is_locked_bh_state(bh)); 2048 assert_spin_locked(&transaction->t_journal->j_list_lock); 2049 2050 J_ASSERT_JH(jh, jh->b_jlist < BJ_Types); 2051 J_ASSERT_JH(jh, jh->b_transaction == transaction || 2052 jh->b_transaction == NULL); 2053 2054 if (jh->b_transaction && jh->b_jlist == jlist) 2055 return; 2056 2057 if (jlist == BJ_Metadata || jlist == BJ_Reserved || 2058 jlist == BJ_Shadow || jlist == BJ_Forget) { 2059 /* 2060 * For metadata buffers, we track dirty bit in buffer_jbddirty 2061 * instead of buffer_dirty. We should not see a dirty bit set 2062 * here because we clear it in do_get_write_access but e.g. 2063 * tune2fs can modify the sb and set the dirty bit at any time 2064 * so we try to gracefully handle that. 2065 */ 2066 if (buffer_dirty(bh)) 2067 warn_dirty_buffer(bh); 2068 if (test_clear_buffer_dirty(bh) || 2069 test_clear_buffer_jbddirty(bh)) 2070 was_dirty = 1; 2071 } 2072 2073 if (jh->b_transaction) 2074 __journal_temp_unlink_buffer(jh); 2075 else 2076 journal_grab_journal_head(bh); 2077 jh->b_transaction = transaction; 2078 2079 switch (jlist) { 2080 case BJ_None: 2081 J_ASSERT_JH(jh, !jh->b_committed_data); 2082 J_ASSERT_JH(jh, !jh->b_frozen_data); 2083 return; 2084 case BJ_SyncData: 2085 list = &transaction->t_sync_datalist; 2086 break; 2087 case BJ_Metadata: 2088 transaction->t_nr_buffers++; 2089 list = &transaction->t_buffers; 2090 break; 2091 case BJ_Forget: 2092 list = &transaction->t_forget; 2093 break; 2094 case BJ_IO: 2095 list = &transaction->t_iobuf_list; 2096 break; 2097 case BJ_Shadow: 2098 list = &transaction->t_shadow_list; 2099 break; 2100 case BJ_LogCtl: 2101 list = &transaction->t_log_list; 2102 break; 2103 case BJ_Reserved: 2104 list = &transaction->t_reserved_list; 2105 break; 2106 case BJ_Locked: 2107 list = &transaction->t_locked_list; 2108 break; 2109 } 2110 2111 __blist_add_buffer(list, jh); 2112 jh->b_jlist = jlist; 2113 2114 if (was_dirty) 2115 set_buffer_jbddirty(bh); 2116} 2117 2118void journal_file_buffer(struct journal_head *jh, 2119 transaction_t *transaction, int jlist) 2120{ 2121 jbd_lock_bh_state(jh2bh(jh)); 2122 spin_lock(&transaction->t_journal->j_list_lock); 2123 __journal_file_buffer(jh, transaction, jlist); 2124 spin_unlock(&transaction->t_journal->j_list_lock); 2125 jbd_unlock_bh_state(jh2bh(jh)); 2126} 2127 2128/* 2129 * Remove a buffer from its current buffer list in preparation for 2130 * dropping it from its current transaction entirely. If the buffer has 2131 * already started to be used by a subsequent transaction, refile the 2132 * buffer on that transaction's metadata list. 2133 * 2134 * Called under j_list_lock 2135 * Called under jbd_lock_bh_state(jh2bh(jh)) 2136 * 2137 * jh and bh may be already free when this function returns 2138 */ 2139void __journal_refile_buffer(struct journal_head *jh) 2140{ 2141 int was_dirty, jlist; 2142 struct buffer_head *bh = jh2bh(jh); 2143 2144 J_ASSERT_JH(jh, jbd_is_locked_bh_state(bh)); 2145 if (jh->b_transaction) 2146 assert_spin_locked(&jh->b_transaction->t_journal->j_list_lock); 2147 2148 /* If the buffer is now unused, just drop it. */ 2149 if (jh->b_next_transaction == NULL) { 2150 __journal_unfile_buffer(jh); 2151 return; 2152 } 2153 2154 /* 2155 * It has been modified by a later transaction: add it to the new 2156 * transaction's metadata list. 2157 */ 2158 2159 was_dirty = test_clear_buffer_jbddirty(bh); 2160 __journal_temp_unlink_buffer(jh); 2161 /* 2162 * We set b_transaction here because b_next_transaction will inherit 2163 * our jh reference and thus __journal_file_buffer() must not take a 2164 * new one. 2165 */ 2166 jh->b_transaction = jh->b_next_transaction; 2167 jh->b_next_transaction = NULL; 2168 if (buffer_freed(bh)) 2169 jlist = BJ_Forget; 2170 else if (jh->b_modified) 2171 jlist = BJ_Metadata; 2172 else 2173 jlist = BJ_Reserved; 2174 __journal_file_buffer(jh, jh->b_transaction, jlist); 2175 J_ASSERT_JH(jh, jh->b_transaction->t_state == T_RUNNING); 2176 2177 if (was_dirty) 2178 set_buffer_jbddirty(bh); 2179} 2180 2181/* 2182 * __journal_refile_buffer() with necessary locking added. We take our bh 2183 * reference so that we can safely unlock bh. 2184 * 2185 * The jh and bh may be freed by this call. 2186 */ 2187void journal_refile_buffer(journal_t *journal, struct journal_head *jh) 2188{ 2189 struct buffer_head *bh = jh2bh(jh); 2190 2191 /* Get reference so that buffer cannot be freed before we unlock it */ 2192 get_bh(bh); 2193 jbd_lock_bh_state(bh); 2194 spin_lock(&journal->j_list_lock); 2195 __journal_refile_buffer(jh); 2196 jbd_unlock_bh_state(bh); 2197 spin_unlock(&journal->j_list_lock); 2198 __brelse(bh); 2199}