Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'ext4_for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/ext4

Pull ext4 updates from Ted Ts'o:
"Pretty much all bug fixes and clean ups for 4.3, after a lot of
features and other churn going into 4.2"

* tag 'ext4_for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/ext4:
Revert "ext4: remove block_device_ejected"
ext4: ratelimit the file system mounted message
ext4: silence a format string false positive
ext4: simplify some code in read_mmp_block()
ext4: don't manipulate recovery flag when freezing no-journal fs
jbd2: limit number of reserved credits
ext4 crypto: remove duplicate header file
ext4: update c/mtime on truncate up
jbd2: avoid infinite loop when destroying aborted journal
ext4, jbd2: add REQ_FUA flag when recording an error in the superblock
ext4 crypto: fix spelling typo in comment
ext4 crypto: exit cleanly if ext4_derive_key_aes() fails
ext4: reject journal options for ext2 mounts
ext4: implement cgroup writeback support
ext4: replace ext4_io_submit->io_op with ->io_wbc
ext4 crypto: check for too-short encrypted file names
ext4 crypto: use a jbd2 transaction when adding a crypto policy
jbd2: speedup jbd2_journal_dirty_metadata()

+208 -67
+4 -1
fs/ext4/crypto_fname.c
··· 19 19 #include <linux/gfp.h> 20 20 #include <linux/kernel.h> 21 21 #include <linux/key.h> 22 - #include <linux/key.h> 23 22 #include <linux/list.h> 24 23 #include <linux/mempool.h> 25 24 #include <linux/random.h> ··· 327 328 oname->len = iname->len; 328 329 return oname->len; 329 330 } 331 + } 332 + if (iname->len < EXT4_CRYPTO_BLOCK_SIZE) { 333 + EXT4_ERROR_INODE(inode, "encrypted inode too small"); 334 + return -EUCLEAN; 330 335 } 331 336 if (EXT4_I(inode)->i_crypt_info) 332 337 return ext4_fname_decrypt(inode, iname, oname);
+3 -1
fs/ext4/crypto_key.c
··· 30 30 31 31 /** 32 32 * ext4_derive_key_aes() - Derive a key using AES-128-ECB 33 - * @deriving_key: Encryption key used for derivatio. 33 + * @deriving_key: Encryption key used for derivation. 34 34 * @source_key: Source key to which to apply derivation. 35 35 * @derived_key: Derived key. 36 36 * ··· 220 220 BUG_ON(master_key->size != EXT4_AES_256_XTS_KEY_SIZE); 221 221 res = ext4_derive_key_aes(ctx.nonce, master_key->raw, 222 222 raw_key); 223 + if (res) 224 + goto out; 223 225 got_key: 224 226 ctfm = crypto_alloc_ablkcipher(cipher_str, 0, 0); 225 227 if (!ctfm || IS_ERR(ctfm)) {
+15 -2
fs/ext4/crypto_policy.c
··· 12 12 #include <linux/string.h> 13 13 #include <linux/types.h> 14 14 15 + #include "ext4_jbd2.h" 15 16 #include "ext4.h" 16 17 #include "xattr.h" 17 18 ··· 50 49 struct inode *inode, const struct ext4_encryption_policy *policy) 51 50 { 52 51 struct ext4_encryption_context ctx; 53 - int res = 0; 52 + handle_t *handle; 53 + int res, res2; 54 54 55 55 res = ext4_convert_inline_data(inode); 56 56 if (res) ··· 80 78 BUILD_BUG_ON(sizeof(ctx.nonce) != EXT4_KEY_DERIVATION_NONCE_SIZE); 81 79 get_random_bytes(ctx.nonce, EXT4_KEY_DERIVATION_NONCE_SIZE); 82 80 81 + handle = ext4_journal_start(inode, EXT4_HT_MISC, 82 + ext4_jbd2_credits_xattr(inode)); 83 + if (IS_ERR(handle)) 84 + return PTR_ERR(handle); 83 85 res = ext4_xattr_set(inode, EXT4_XATTR_INDEX_ENCRYPTION, 84 86 EXT4_XATTR_NAME_ENCRYPTION_CONTEXT, &ctx, 85 87 sizeof(ctx), 0); 86 - if (!res) 88 + if (!res) { 87 89 ext4_set_inode_flag(inode, EXT4_INODE_ENCRYPT); 90 + res = ext4_mark_inode_dirty(handle, inode); 91 + if (res) 92 + EXT4_ERROR_INODE(inode, "Failed to mark inode dirty"); 93 + } 94 + res2 = ext4_journal_stop(handle); 95 + if (!res) 96 + res = res2; 88 97 return res; 89 98 } 90 99
+1 -1
fs/ext4/ext4.h
··· 187 187 } ext4_io_end_t; 188 188 189 189 struct ext4_io_submit { 190 - int io_op; 190 + struct writeback_control *io_wbc; 191 191 struct bio *io_bio; 192 192 ext4_io_end_t *io_end; 193 193 sector_t io_next_block;
+8
fs/ext4/inode.c
··· 4728 4728 error = ext4_orphan_add(handle, inode); 4729 4729 orphan = 1; 4730 4730 } 4731 + /* 4732 + * Update c/mtime on truncate up, ext4_truncate() will 4733 + * update c/mtime in shrink case below 4734 + */ 4735 + if (!shrink) { 4736 + inode->i_mtime = ext4_current_time(inode); 4737 + inode->i_ctime = inode->i_mtime; 4738 + } 4731 4739 down_write(&EXT4_I(inode)->i_data_sem); 4732 4740 EXT4_I(inode)->i_disksize = attr->ia_size; 4733 4741 rc = ext4_mark_inode_dirty(handle, inode);
+25 -21
fs/ext4/mmp.c
··· 69 69 ext4_fsblk_t mmp_block) 70 70 { 71 71 struct mmp_struct *mmp; 72 + int ret; 72 73 73 74 if (*bh) 74 75 clear_buffer_uptodate(*bh); ··· 77 76 /* This would be sb_bread(sb, mmp_block), except we need to be sure 78 77 * that the MD RAID device cache has been bypassed, and that the read 79 78 * is not blocked in the elevator. */ 80 - if (!*bh) 79 + if (!*bh) { 81 80 *bh = sb_getblk(sb, mmp_block); 82 - if (!*bh) 83 - return -ENOMEM; 84 - if (*bh) { 85 - get_bh(*bh); 86 - lock_buffer(*bh); 87 - (*bh)->b_end_io = end_buffer_read_sync; 88 - submit_bh(READ_SYNC | REQ_META | REQ_PRIO, *bh); 89 - wait_on_buffer(*bh); 90 - if (!buffer_uptodate(*bh)) { 91 - brelse(*bh); 92 - *bh = NULL; 81 + if (!*bh) { 82 + ret = -ENOMEM; 83 + goto warn_exit; 93 84 } 94 85 } 95 - if (unlikely(!*bh)) { 96 - ext4_warning(sb, "Error while reading MMP block %llu", 97 - mmp_block); 98 - return -EIO; 86 + 87 + get_bh(*bh); 88 + lock_buffer(*bh); 89 + (*bh)->b_end_io = end_buffer_read_sync; 90 + submit_bh(READ_SYNC | REQ_META | REQ_PRIO, *bh); 91 + wait_on_buffer(*bh); 92 + if (!buffer_uptodate(*bh)) { 93 + brelse(*bh); 94 + *bh = NULL; 95 + ret = -EIO; 96 + goto warn_exit; 99 97 } 100 98 101 99 mmp = (struct mmp_struct *)((*bh)->b_data); 102 - if (le32_to_cpu(mmp->mmp_magic) != EXT4_MMP_MAGIC || 103 - !ext4_mmp_csum_verify(sb, mmp)) 104 - return -EINVAL; 100 + if (le32_to_cpu(mmp->mmp_magic) == EXT4_MMP_MAGIC && 101 + ext4_mmp_csum_verify(sb, mmp)) 102 + return 0; 103 + ret = -EINVAL; 105 104 106 - return 0; 105 + warn_exit: 106 + ext4_warning(sb, "Error %d while reading MMP block %llu", 107 + ret, mmp_block); 108 + return ret; 107 109 } 108 110 109 111 /* ··· 115 111 void __dump_mmp_msg(struct super_block *sb, struct mmp_struct *mmp, 116 112 const char *function, unsigned int line, const char *msg) 117 113 { 118 - __ext4_warning(sb, function, line, msg); 114 + __ext4_warning(sb, function, line, "%s", msg); 119 115 __ext4_warning(sb, function, line, 120 116 "MMP failure info: last update time: %llu, last update " 121 117 "node: %s, last update device: %s\n",
+6 -2
fs/ext4/page-io.c
··· 354 354 struct bio *bio = io->io_bio; 355 355 356 356 if (bio) { 357 + int io_op = io->io_wbc->sync_mode == WB_SYNC_ALL ? 358 + WRITE_SYNC : WRITE; 357 359 bio_get(io->io_bio); 358 - submit_bio(io->io_op, io->io_bio); 360 + submit_bio(io_op, io->io_bio); 359 361 bio_put(io->io_bio); 360 362 } 361 363 io->io_bio = NULL; ··· 366 364 void ext4_io_submit_init(struct ext4_io_submit *io, 367 365 struct writeback_control *wbc) 368 366 { 369 - io->io_op = (wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE); 367 + io->io_wbc = wbc; 370 368 io->io_bio = NULL; 371 369 io->io_end = NULL; 372 370 } ··· 379 377 bio = bio_alloc(GFP_NOIO, BIO_MAX_PAGES); 380 378 if (!bio) 381 379 return -ENOMEM; 380 + wbc_init_bio(io->io_wbc, bio); 382 381 bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9); 383 382 bio->bi_bdev = bh->b_bdev; 384 383 bio->bi_end_io = ext4_end_bio; ··· 408 405 ret = bio_add_page(io->io_bio, page, bh->b_size, bh_offset(bh)); 409 406 if (ret != bh->b_size) 410 407 goto submit_and_retry; 408 + wbc_account_io(io->io_wbc, page, bh->b_size); 411 409 io->io_next_block++; 412 410 return 0; 413 411 }
+38 -12
fs/ext4/super.c
··· 60 60 static struct mutex ext4_li_mtx; 61 61 static struct ext4_features *ext4_feat; 62 62 static int ext4_mballoc_ready; 63 + static struct ratelimit_state ext4_mount_msg_ratelimit; 63 64 64 65 static int ext4_load_journal(struct super_block *, struct ext4_super_block *, 65 66 unsigned long journal_devnum); ··· 320 319 { 321 320 __save_error_info(sb, func, line); 322 321 ext4_commit_super(sb, 1); 322 + } 323 + 324 + /* 325 + * The del_gendisk() function uninitializes the disk-specific data 326 + * structures, including the bdi structure, without telling anyone 327 + * else. Once this happens, any attempt to call mark_buffer_dirty() 328 + * (for example, by ext4_commit_super), will cause a kernel OOPS. 329 + * This is a kludge to prevent these oops until we can put in a proper 330 + * hook in del_gendisk() to inform the VFS and file system layers. 331 + */ 332 + static int block_device_ejected(struct super_block *sb) 333 + { 334 + struct inode *bd_inode = sb->s_bdev->bd_inode; 335 + struct backing_dev_info *bdi = inode_to_bdi(bd_inode); 336 + 337 + return bdi->dev == NULL; 323 338 } 324 339 325 340 static void ext4_journal_commit_callback(journal_t *journal, transaction_t *txn) ··· 1407 1390 {Opt_stripe, 0, MOPT_GTE0}, 1408 1391 {Opt_resuid, 0, MOPT_GTE0}, 1409 1392 {Opt_resgid, 0, MOPT_GTE0}, 1410 - {Opt_journal_dev, 0, MOPT_GTE0}, 1411 - {Opt_journal_path, 0, MOPT_STRING}, 1412 - {Opt_journal_ioprio, 0, MOPT_GTE0}, 1393 + {Opt_journal_dev, 0, MOPT_NO_EXT2 | MOPT_GTE0}, 1394 + {Opt_journal_path, 0, MOPT_NO_EXT2 | MOPT_STRING}, 1395 + {Opt_journal_ioprio, 0, MOPT_NO_EXT2 | MOPT_GTE0}, 1413 1396 {Opt_data_journal, EXT4_MOUNT_JOURNAL_DATA, MOPT_NO_EXT2 | MOPT_DATAJ}, 1414 1397 {Opt_data_ordered, EXT4_MOUNT_ORDERED_DATA, MOPT_NO_EXT2 | MOPT_DATAJ}, 1415 1398 {Opt_data_writeback, EXT4_MOUNT_WRITEBACK_DATA, ··· 3656 3639 } 3657 3640 if (test_opt(sb, DELALLOC)) 3658 3641 clear_opt(sb, DELALLOC); 3642 + } else { 3643 + sb->s_iflags |= SB_I_CGROUPWB; 3659 3644 } 3660 3645 3661 3646 sb->s_flags = (sb->s_flags & ~MS_POSIXACL) | ··· 4290 4271 "the device does not support discard"); 4291 4272 } 4292 4273 4293 - ext4_msg(sb, KERN_INFO, "mounted filesystem with%s. " 4294 - "Opts: %s%s%s", descr, sbi->s_es->s_mount_opts, 4295 - *sbi->s_es->s_mount_opts ? "; " : "", orig_data); 4274 + if (___ratelimit(&ext4_mount_msg_ratelimit, "EXT4-fs mount")) 4275 + ext4_msg(sb, KERN_INFO, "mounted filesystem with%s. " 4276 + "Opts: %s%s%s", descr, sbi->s_es->s_mount_opts, 4277 + *sbi->s_es->s_mount_opts ? "; " : "", orig_data); 4296 4278 4297 4279 if (es->s_error_count) 4298 4280 mod_timer(&sbi->s_err_report, jiffies + 300*HZ); /* 5 minutes */ ··· 4633 4613 struct buffer_head *sbh = EXT4_SB(sb)->s_sbh; 4634 4614 int error = 0; 4635 4615 4636 - if (!sbh) 4616 + if (!sbh || block_device_ejected(sb)) 4637 4617 return error; 4638 4618 if (buffer_write_io_error(sbh)) { 4639 4619 /* ··· 4681 4661 ext4_superblock_csum_set(sb); 4682 4662 mark_buffer_dirty(sbh); 4683 4663 if (sync) { 4684 - error = sync_dirty_buffer(sbh); 4664 + error = __sync_dirty_buffer(sbh, 4665 + test_opt(sb, BARRIER) ? WRITE_FUA : WRITE_SYNC); 4685 4666 if (error) 4686 4667 return error; 4687 4668 ··· 4850 4829 error = jbd2_journal_flush(journal); 4851 4830 if (error < 0) 4852 4831 goto out; 4832 + 4833 + /* Journal blocked and flushed, clear needs_recovery flag. */ 4834 + EXT4_CLEAR_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER); 4853 4835 } 4854 4836 4855 - /* Journal blocked and flushed, clear needs_recovery flag. */ 4856 - EXT4_CLEAR_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER); 4857 4837 error = ext4_commit_super(sb, 1); 4858 4838 out: 4859 4839 if (journal) ··· 4872 4850 if (sb->s_flags & MS_RDONLY) 4873 4851 return 0; 4874 4852 4875 - /* Reset the needs_recovery flag before the fs is unlocked. */ 4876 - EXT4_SET_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER); 4853 + if (EXT4_SB(sb)->s_journal) { 4854 + /* Reset the needs_recovery flag before the fs is unlocked. */ 4855 + EXT4_SET_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER); 4856 + } 4857 + 4877 4858 ext4_commit_super(sb, 1); 4878 4859 return 0; 4879 4860 } ··· 5625 5600 { 5626 5601 int i, err; 5627 5602 5603 + ratelimit_state_init(&ext4_mount_msg_ratelimit, 30 * HZ, 64); 5628 5604 ext4_li_info = NULL; 5629 5605 mutex_init(&ext4_li_mtx); 5630 5606
+33 -6
fs/jbd2/checkpoint.c
··· 417 417 * journal_clean_one_cp_list 418 418 * 419 419 * Find all the written-back checkpoint buffers in the given list and 420 - * release them. 420 + * release them. If 'destroy' is set, clean all buffers unconditionally. 421 421 * 422 422 * Called with j_list_lock held. 423 423 * Returns 1 if we freed the transaction, 0 otherwise. 424 424 */ 425 - static int journal_clean_one_cp_list(struct journal_head *jh) 425 + static int journal_clean_one_cp_list(struct journal_head *jh, bool destroy) 426 426 { 427 427 struct journal_head *last_jh; 428 428 struct journal_head *next_jh = jh; ··· 436 436 do { 437 437 jh = next_jh; 438 438 next_jh = jh->b_cpnext; 439 - ret = __try_to_free_cp_buf(jh); 439 + if (!destroy) 440 + ret = __try_to_free_cp_buf(jh); 441 + else 442 + ret = __jbd2_journal_remove_checkpoint(jh) + 1; 440 443 if (!ret) 441 444 return freed; 442 445 if (ret == 2) ··· 462 459 * journal_clean_checkpoint_list 463 460 * 464 461 * Find all the written-back checkpoint buffers in the journal and release them. 462 + * If 'destroy' is set, release all buffers unconditionally. 465 463 * 466 464 * Called with j_list_lock held. 467 465 */ 468 - void __jbd2_journal_clean_checkpoint_list(journal_t *journal) 466 + void __jbd2_journal_clean_checkpoint_list(journal_t *journal, bool destroy) 469 467 { 470 468 transaction_t *transaction, *last_transaction, *next_transaction; 471 469 int ret; ··· 480 476 do { 481 477 transaction = next_transaction; 482 478 next_transaction = transaction->t_cpnext; 483 - ret = journal_clean_one_cp_list(transaction->t_checkpoint_list); 479 + ret = journal_clean_one_cp_list(transaction->t_checkpoint_list, 480 + destroy); 484 481 /* 485 482 * This function only frees up some memory if possible so we 486 483 * dont have an obligation to finish processing. Bail out if ··· 497 492 * we can possibly see not yet submitted buffers on io_list 498 493 */ 499 494 ret = journal_clean_one_cp_list(transaction-> 500 - t_checkpoint_io_list); 495 + t_checkpoint_io_list, destroy); 501 496 if (need_resched()) 502 497 return; 503 498 /* ··· 508 503 if (!ret) 509 504 return; 510 505 } while (transaction != last_transaction); 506 + } 507 + 508 + /* 509 + * Remove buffers from all checkpoint lists as journal is aborted and we just 510 + * need to free memory 511 + */ 512 + void jbd2_journal_destroy_checkpoint(journal_t *journal) 513 + { 514 + /* 515 + * We loop because __jbd2_journal_clean_checkpoint_list() may abort 516 + * early due to a need of rescheduling. 517 + */ 518 + while (1) { 519 + spin_lock(&journal->j_list_lock); 520 + if (!journal->j_checkpoint_transactions) { 521 + spin_unlock(&journal->j_list_lock); 522 + break; 523 + } 524 + __jbd2_journal_clean_checkpoint_list(journal, true); 525 + spin_unlock(&journal->j_list_lock); 526 + cond_resched(); 527 + } 511 528 } 512 529 513 530 /*
+1 -1
fs/jbd2/commit.c
··· 510 510 * frees some memory 511 511 */ 512 512 spin_lock(&journal->j_list_lock); 513 - __jbd2_journal_clean_checkpoint_list(journal); 513 + __jbd2_journal_clean_checkpoint_list(journal, false); 514 514 spin_unlock(&journal->j_list_lock); 515 515 516 516 jbd_debug(3, "JBD2: commit phase 1\n");
+11 -2
fs/jbd2/journal.c
··· 1456 1456 sb->s_errno = cpu_to_be32(journal->j_errno); 1457 1457 read_unlock(&journal->j_state_lock); 1458 1458 1459 - jbd2_write_superblock(journal, WRITE_SYNC); 1459 + jbd2_write_superblock(journal, WRITE_FUA); 1460 1460 } 1461 1461 EXPORT_SYMBOL(jbd2_journal_update_sb_errno); 1462 1462 ··· 1693 1693 while (journal->j_checkpoint_transactions != NULL) { 1694 1694 spin_unlock(&journal->j_list_lock); 1695 1695 mutex_lock(&journal->j_checkpoint_mutex); 1696 - jbd2_log_do_checkpoint(journal); 1696 + err = jbd2_log_do_checkpoint(journal); 1697 1697 mutex_unlock(&journal->j_checkpoint_mutex); 1698 + /* 1699 + * If checkpointing failed, just free the buffers to avoid 1700 + * looping forever 1701 + */ 1702 + if (err) { 1703 + jbd2_journal_destroy_checkpoint(journal); 1704 + spin_lock(&journal->j_list_lock); 1705 + break; 1706 + } 1698 1707 spin_lock(&journal->j_list_lock); 1699 1708 } 1700 1709
+61 -17
fs/jbd2/transaction.c
··· 204 204 * attach this handle to a new transaction. 205 205 */ 206 206 atomic_sub(total, &t->t_outstanding_credits); 207 + 208 + /* 209 + * Is the number of reserved credits in the current transaction too 210 + * big to fit this handle? Wait until reserved credits are freed. 211 + */ 212 + if (atomic_read(&journal->j_reserved_credits) + total > 213 + journal->j_max_transaction_buffers) { 214 + read_unlock(&journal->j_state_lock); 215 + wait_event(journal->j_wait_reserved, 216 + atomic_read(&journal->j_reserved_credits) + total <= 217 + journal->j_max_transaction_buffers); 218 + return 1; 219 + } 220 + 207 221 wait_transaction_locked(journal); 208 222 return 1; 209 223 } ··· 276 262 int rsv_blocks = 0; 277 263 unsigned long ts = jiffies; 278 264 279 - /* 280 - * 1/2 of transaction can be reserved so we can practically handle 281 - * only 1/2 of maximum transaction size per operation 282 - */ 283 - if (WARN_ON(blocks > journal->j_max_transaction_buffers / 2)) { 284 - printk(KERN_ERR "JBD2: %s wants too many credits (%d > %d)\n", 285 - current->comm, blocks, 286 - journal->j_max_transaction_buffers / 2); 287 - return -ENOSPC; 288 - } 289 - 290 265 if (handle->h_rsv_handle) 291 266 rsv_blocks = handle->h_rsv_handle->h_buffer_credits; 267 + 268 + /* 269 + * Limit the number of reserved credits to 1/2 of maximum transaction 270 + * size and limit the number of total credits to not exceed maximum 271 + * transaction size per operation. 272 + */ 273 + if ((rsv_blocks > journal->j_max_transaction_buffers / 2) || 274 + (rsv_blocks + blocks > journal->j_max_transaction_buffers)) { 275 + printk(KERN_ERR "JBD2: %s wants too many credits " 276 + "credits:%d rsv_credits:%d max:%d\n", 277 + current->comm, blocks, rsv_blocks, 278 + journal->j_max_transaction_buffers); 279 + WARN_ON(1); 280 + return -ENOSPC; 281 + } 292 282 293 283 alloc_transaction: 294 284 if (!journal->j_running_transaction) { ··· 1298 1280 triggers->t_abort(triggers, jh2bh(jh)); 1299 1281 } 1300 1282 1301 - 1302 - 1303 1283 /** 1304 1284 * int jbd2_journal_dirty_metadata() - mark a buffer as containing dirty metadata 1305 1285 * @handle: transaction to add buffer to. ··· 1330 1314 1331 1315 if (is_handle_aborted(handle)) 1332 1316 return -EROFS; 1333 - journal = transaction->t_journal; 1334 - jh = jbd2_journal_grab_journal_head(bh); 1335 - if (!jh) { 1317 + if (!buffer_jbd(bh)) { 1336 1318 ret = -EUCLEAN; 1337 1319 goto out; 1338 1320 } 1321 + /* 1322 + * We don't grab jh reference here since the buffer must be part 1323 + * of the running transaction. 1324 + */ 1325 + jh = bh2jh(bh); 1326 + /* 1327 + * This and the following assertions are unreliable since we may see jh 1328 + * in inconsistent state unless we grab bh_state lock. But this is 1329 + * crucial to catch bugs so let's do a reliable check until the 1330 + * lockless handling is fully proven. 1331 + */ 1332 + if (jh->b_transaction != transaction && 1333 + jh->b_next_transaction != transaction) { 1334 + jbd_lock_bh_state(bh); 1335 + J_ASSERT_JH(jh, jh->b_transaction == transaction || 1336 + jh->b_next_transaction == transaction); 1337 + jbd_unlock_bh_state(bh); 1338 + } 1339 + if (jh->b_modified == 1) { 1340 + /* If it's in our transaction it must be in BJ_Metadata list. */ 1341 + if (jh->b_transaction == transaction && 1342 + jh->b_jlist != BJ_Metadata) { 1343 + jbd_lock_bh_state(bh); 1344 + J_ASSERT_JH(jh, jh->b_transaction != transaction || 1345 + jh->b_jlist == BJ_Metadata); 1346 + jbd_unlock_bh_state(bh); 1347 + } 1348 + goto out; 1349 + } 1350 + 1351 + journal = transaction->t_journal; 1339 1352 jbd_debug(5, "journal_head %p\n", jh); 1340 1353 JBUFFER_TRACE(jh, "entry"); 1341 1354 ··· 1455 1410 spin_unlock(&journal->j_list_lock); 1456 1411 out_unlock_bh: 1457 1412 jbd_unlock_bh_state(bh); 1458 - jbd2_journal_put_journal_head(jh); 1459 1413 out: 1460 1414 JBUFFER_TRACE(jh, "exit"); 1461 1415 return ret;
+2 -1
include/linux/jbd2.h
··· 1081 1081 extern void jbd2_journal_commit_transaction(journal_t *); 1082 1082 1083 1083 /* Checkpoint list management */ 1084 - void __jbd2_journal_clean_checkpoint_list(journal_t *journal); 1084 + void __jbd2_journal_clean_checkpoint_list(journal_t *journal, bool destroy); 1085 1085 int __jbd2_journal_remove_checkpoint(struct journal_head *); 1086 + void jbd2_journal_destroy_checkpoint(journal_t *journal); 1086 1087 void __jbd2_journal_insert_checkpoint(struct journal_head *, transaction_t *); 1087 1088 1088 1089