Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ryusuke/nilfs2

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ryusuke/nilfs2:
nilfs2: add reader's lock for cno in nilfs_ioctl_sync
nilfs2: delete unnecessary condition in load_segment_summary
nilfs2: move iterator to write log into segment buffer
nilfs2: get rid of s_dirt flag use
nilfs2: get rid of nilfs_segctor_req struct
nilfs2: delete unnecessary condition in nilfs_dat_translate
nilfs2: fix potential hang in nilfs_error on errors=remount-ro
nilfs2: use mnt_want_write in ioctls where write access is needed
nilfs2: issue discard request after cleaning segments

+200 -117
+3
Documentation/filesystems/nilfs2.txt
··· 74 74 This disables every write access on the device for 75 75 read-only mounts or snapshots. This option will fail 76 76 for r/w mounts on an unclean volume. 77 + discard Issue discard/TRIM commands to the underlying block 78 + device when blocks are freed. This is useful for SSD 79 + devices and sparse/thinly-provisioned LUNs. 77 80 78 81 NILFS2 usage 79 82 ============
+1 -2
fs/nilfs2/dat.c
··· 388 388 ret = -ENOENT; 389 389 goto out; 390 390 } 391 - if (blocknrp != NULL) 392 - *blocknrp = blocknr; 391 + *blocknrp = blocknr; 393 392 394 393 out: 395 394 kunmap_atomic(kaddr, KM_USER0);
+49 -19
fs/nilfs2/ioctl.c
··· 26 26 #include <linux/capability.h> /* capable() */ 27 27 #include <linux/uaccess.h> /* copy_from_user(), copy_to_user() */ 28 28 #include <linux/vmalloc.h> 29 + #include <linux/mount.h> /* mnt_want_write(), mnt_drop_write() */ 29 30 #include <linux/nilfs2_fs.h> 30 31 #include "nilfs.h" 31 32 #include "segment.h" ··· 108 107 109 108 if (!capable(CAP_SYS_ADMIN)) 110 109 return -EPERM; 110 + 111 + ret = mnt_want_write(filp->f_path.mnt); 112 + if (ret) 113 + return ret; 114 + 115 + ret = -EFAULT; 111 116 if (copy_from_user(&cpmode, argp, sizeof(cpmode))) 112 - return -EFAULT; 117 + goto out; 113 118 114 119 mutex_lock(&nilfs->ns_mount_mutex); 120 + 115 121 nilfs_transaction_begin(inode->i_sb, &ti, 0); 116 122 ret = nilfs_cpfile_change_cpmode( 117 123 cpfile, cpmode.cm_cno, cpmode.cm_mode); 118 - if (unlikely(ret < 0)) { 124 + if (unlikely(ret < 0)) 119 125 nilfs_transaction_abort(inode->i_sb); 120 - mutex_unlock(&nilfs->ns_mount_mutex); 121 - return ret; 122 - } 123 - nilfs_transaction_commit(inode->i_sb); /* never fails */ 126 + else 127 + nilfs_transaction_commit(inode->i_sb); /* never fails */ 128 + 124 129 mutex_unlock(&nilfs->ns_mount_mutex); 130 + out: 131 + mnt_drop_write(filp->f_path.mnt); 125 132 return ret; 126 133 } 127 134 ··· 144 135 145 136 if (!capable(CAP_SYS_ADMIN)) 146 137 return -EPERM; 138 + 139 + ret = mnt_want_write(filp->f_path.mnt); 140 + if (ret) 141 + return ret; 142 + 143 + ret = -EFAULT; 147 144 if (copy_from_user(&cno, argp, sizeof(cno))) 148 - return -EFAULT; 145 + goto out; 149 146 150 147 nilfs_transaction_begin(inode->i_sb, &ti, 0); 151 148 ret = nilfs_cpfile_delete_checkpoint(cpfile, cno); 152 - if (unlikely(ret < 0)) { 149 + if (unlikely(ret < 0)) 153 150 nilfs_transaction_abort(inode->i_sb); 154 - return ret; 155 - } 156 - nilfs_transaction_commit(inode->i_sb); /* never fails */ 151 + else 152 + nilfs_transaction_commit(inode->i_sb); /* never fails */ 153 + out: 154 + mnt_drop_write(filp->f_path.mnt); 157 155 return ret; 158 156 } 159 157 ··· 512 496 if (!capable(CAP_SYS_ADMIN)) 513 497 return -EPERM; 514 498 515 - if (copy_from_user(argv, argp, sizeof(argv))) 516 - return -EFAULT; 499 + ret = mnt_want_write(filp->f_path.mnt); 500 + if (ret) 501 + return ret; 517 502 503 + ret = -EFAULT; 504 + if (copy_from_user(argv, argp, sizeof(argv))) 505 + goto out; 506 + 507 + ret = -EINVAL; 518 508 nsegs = argv[4].v_nmembs; 519 509 if (argv[4].v_size != argsz[4]) 520 - return -EINVAL; 510 + goto out; 511 + 521 512 /* 522 513 * argv[4] points to segment numbers this ioctl cleans. We 523 514 * use kmalloc() for its buffer because memory used for the ··· 532 509 */ 533 510 kbufs[4] = memdup_user((void __user *)(unsigned long)argv[4].v_base, 534 511 nsegs * sizeof(__u64)); 535 - if (IS_ERR(kbufs[4])) 536 - return PTR_ERR(kbufs[4]); 537 - 512 + if (IS_ERR(kbufs[4])) { 513 + ret = PTR_ERR(kbufs[4]); 514 + goto out; 515 + } 538 516 nilfs = NILFS_SB(inode->i_sb)->s_nilfs; 539 517 540 518 for (n = 0; n < 4; n++) { ··· 587 563 nilfs_remove_all_gcinode(nilfs); 588 564 clear_nilfs_gc_running(nilfs); 589 565 590 - out_free: 566 + out_free: 591 567 while (--n >= 0) 592 568 vfree(kbufs[n]); 593 569 kfree(kbufs[4]); 570 + out: 571 + mnt_drop_write(filp->f_path.mnt); 594 572 return ret; 595 573 } 596 574 ··· 601 575 { 602 576 __u64 cno; 603 577 int ret; 578 + struct the_nilfs *nilfs; 604 579 605 580 ret = nilfs_construct_segment(inode->i_sb); 606 581 if (ret < 0) 607 582 return ret; 608 583 609 584 if (argp != NULL) { 610 - cno = NILFS_SB(inode->i_sb)->s_nilfs->ns_cno - 1; 585 + nilfs = NILFS_SB(inode->i_sb)->s_nilfs; 586 + down_read(&nilfs->ns_segctor_sem); 587 + cno = nilfs->ns_cno - 1; 588 + up_read(&nilfs->ns_segctor_sem); 611 589 if (copy_to_user(argp, &cno, sizeof(cno))) 612 590 return -EFAULT; 613 591 }
+11 -30
fs/nilfs2/recovery.c
··· 39 39 NILFS_SEG_FAIL_IO, 40 40 NILFS_SEG_FAIL_MAGIC, 41 41 NILFS_SEG_FAIL_SEQ, 42 - NILFS_SEG_FAIL_CHECKSUM_SEGSUM, 43 42 NILFS_SEG_FAIL_CHECKSUM_SUPER_ROOT, 44 43 NILFS_SEG_FAIL_CHECKSUM_FULL, 45 44 NILFS_SEG_FAIL_CONSISTENCY, ··· 69 70 case NILFS_SEG_FAIL_SEQ: 70 71 printk(KERN_WARNING 71 72 "NILFS warning: Sequence number mismatch\n"); 72 - break; 73 - case NILFS_SEG_FAIL_CHECKSUM_SEGSUM: 74 - printk(KERN_WARNING 75 - "NILFS warning: Checksum error in segment summary\n"); 76 73 break; 77 74 case NILFS_SEG_FAIL_CHECKSUM_SUPER_ROOT: 78 75 printk(KERN_WARNING ··· 201 206 * @pseg_start: start disk block number of partial segment 202 207 * @seg_seq: sequence number requested 203 208 * @ssi: pointer to nilfs_segsum_info struct to store information 204 - * @full_check: full check flag 205 - * (0: only checks segment summary CRC, 1: data CRC) 206 209 */ 207 210 static int 208 211 load_segment_summary(struct nilfs_sb_info *sbi, sector_t pseg_start, 209 - u64 seg_seq, struct nilfs_segsum_info *ssi, 210 - int full_check) 212 + u64 seg_seq, struct nilfs_segsum_info *ssi) 211 213 { 212 214 struct buffer_head *bh_sum; 213 215 struct nilfs_segment_summary *sum; 214 - unsigned long offset, nblock; 215 - u64 check_bytes; 216 - u32 crc, crc_sum; 216 + unsigned long nblock; 217 + u32 crc; 217 218 int ret = NILFS_SEG_FAIL_IO; 218 219 219 220 bh_sum = sb_bread(sbi->s_super, pseg_start); ··· 228 237 ret = NILFS_SEG_FAIL_SEQ; 229 238 goto failed; 230 239 } 231 - if (full_check) { 232 - offset = sizeof(sum->ss_datasum); 233 - check_bytes = 234 - ((u64)ssi->nblocks << sbi->s_super->s_blocksize_bits); 235 - nblock = ssi->nblocks; 236 - crc_sum = le32_to_cpu(sum->ss_datasum); 237 - ret = NILFS_SEG_FAIL_CHECKSUM_FULL; 238 - } else { /* only checks segment summary */ 239 - offset = sizeof(sum->ss_datasum) + sizeof(sum->ss_sumsum); 240 - check_bytes = ssi->sumbytes; 241 - nblock = ssi->nsumblk; 242 - crc_sum = le32_to_cpu(sum->ss_sumsum); 243 - ret = NILFS_SEG_FAIL_CHECKSUM_SEGSUM; 244 - } 245 240 241 + nblock = ssi->nblocks; 246 242 if (unlikely(nblock == 0 || 247 243 nblock > sbi->s_nilfs->ns_blocks_per_segment)) { 248 244 /* This limits the number of blocks read in the CRC check */ 249 245 ret = NILFS_SEG_FAIL_CONSISTENCY; 250 246 goto failed; 251 247 } 252 - if (calc_crc_cont(sbi, bh_sum, &crc, offset, check_bytes, 248 + if (calc_crc_cont(sbi, bh_sum, &crc, sizeof(sum->ss_datasum), 249 + ((u64)nblock << sbi->s_super->s_blocksize_bits), 253 250 pseg_start, nblock)) { 254 251 ret = NILFS_SEG_FAIL_IO; 255 252 goto failed; 256 253 } 257 - if (crc == crc_sum) 254 + if (crc == le32_to_cpu(sum->ss_datasum)) 258 255 ret = 0; 256 + else 257 + ret = NILFS_SEG_FAIL_CHECKSUM_FULL; 259 258 failed: 260 259 brelse(bh_sum); 261 260 out: ··· 579 598 580 599 while (segnum != ri->ri_segnum || pseg_start <= ri->ri_pseg_start) { 581 600 582 - ret = load_segment_summary(sbi, pseg_start, seg_seq, &ssi, 1); 601 + ret = load_segment_summary(sbi, pseg_start, seg_seq, &ssi); 583 602 if (ret) { 584 603 if (ret == NILFS_SEG_FAIL_IO) { 585 604 err = -EIO; ··· 802 821 803 822 for (;;) { 804 823 /* Load segment summary */ 805 - ret = load_segment_summary(sbi, pseg_start, seg_seq, &ssi, 1); 824 + ret = load_segment_summary(sbi, pseg_start, seg_seq, &ssi); 806 825 if (ret) { 807 826 if (ret == NILFS_SEG_FAIL_IO) 808 827 goto failed;
+18
fs/nilfs2/segbuf.c
··· 40 40 }; 41 41 42 42 43 + static int nilfs_segbuf_write(struct nilfs_segment_buffer *segbuf, 44 + struct the_nilfs *nilfs); 45 + static int nilfs_segbuf_wait(struct nilfs_segment_buffer *segbuf); 46 + 47 + 43 48 static struct kmem_cache *nilfs_segbuf_cachep; 44 49 45 50 static void nilfs_segbuf_init_once(void *obj) ··· 305 300 nilfs_segbuf_clear(segbuf); 306 301 nilfs_segbuf_free(segbuf); 307 302 } 303 + } 304 + 305 + int nilfs_write_logs(struct list_head *logs, struct the_nilfs *nilfs) 306 + { 307 + struct nilfs_segment_buffer *segbuf; 308 + int ret = 0; 309 + 310 + list_for_each_entry(segbuf, logs, sb_list) { 311 + ret = nilfs_segbuf_write(segbuf, nilfs); 312 + if (ret) 313 + break; 314 + } 315 + return ret; 308 316 } 309 317 310 318 int nilfs_wait_on_logs(struct list_head *logs)
+1 -4
fs/nilfs2/segbuf.h
··· 166 166 segbuf->sb_sum.nfileblk++; 167 167 } 168 168 169 - int nilfs_segbuf_write(struct nilfs_segment_buffer *segbuf, 170 - struct the_nilfs *nilfs); 171 - int nilfs_segbuf_wait(struct nilfs_segment_buffer *segbuf); 172 - 173 169 void nilfs_clear_logs(struct list_head *logs); 174 170 void nilfs_truncate_logs(struct list_head *logs, 175 171 struct nilfs_segment_buffer *last); 172 + int nilfs_write_logs(struct list_head *logs, struct the_nilfs *nilfs); 176 173 int nilfs_wait_on_logs(struct list_head *logs); 177 174 178 175 static inline void nilfs_destroy_logs(struct list_head *logs)
+64 -56
fs/nilfs2/segment.c
··· 1764 1764 static int nilfs_segctor_write(struct nilfs_sc_info *sci, 1765 1765 struct the_nilfs *nilfs) 1766 1766 { 1767 - struct nilfs_segment_buffer *segbuf; 1768 - int ret = 0; 1767 + int ret; 1769 1768 1770 - list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) { 1771 - ret = nilfs_segbuf_write(segbuf, nilfs); 1772 - if (ret) 1773 - break; 1774 - } 1769 + ret = nilfs_write_logs(&sci->sc_segbufs, nilfs); 1775 1770 list_splice_tail_init(&sci->sc_segbufs, &sci->sc_write_logs); 1776 1771 return ret; 1777 1772 } ··· 1932 1937 { 1933 1938 struct nilfs_segment_buffer *segbuf; 1934 1939 struct page *bd_page = NULL, *fs_page = NULL; 1935 - struct nilfs_sb_info *sbi = sci->sc_sbi; 1936 - struct the_nilfs *nilfs = sbi->s_nilfs; 1940 + struct the_nilfs *nilfs = sci->sc_sbi->s_nilfs; 1937 1941 int update_sr = (sci->sc_super_root != NULL); 1938 1942 1939 1943 list_for_each_entry(segbuf, &sci->sc_write_logs, sb_list) { ··· 2014 2020 if (update_sr) { 2015 2021 nilfs_set_last_segment(nilfs, segbuf->sb_pseg_start, 2016 2022 segbuf->sb_sum.seg_seq, nilfs->ns_cno++); 2017 - sbi->s_super->s_dirt = 1; 2023 + set_nilfs_sb_dirty(nilfs); 2018 2024 2019 2025 clear_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags); 2020 2026 clear_bit(NILFS_SC_DIRTY, &sci->sc_flags); ··· 2419 2425 return err; 2420 2426 } 2421 2427 2422 - struct nilfs_segctor_req { 2423 - int mode; 2424 - __u32 seq_accepted; 2425 - int sc_err; /* construction failure */ 2426 - int sb_err; /* super block writeback failure */ 2427 - }; 2428 - 2429 2428 #define FLUSH_FILE_BIT (0x1) /* data file only */ 2430 2429 #define FLUSH_DAT_BIT (1 << NILFS_DAT_INO) /* DAT only */ 2431 2430 2432 - static void nilfs_segctor_accept(struct nilfs_sc_info *sci, 2433 - struct nilfs_segctor_req *req) 2431 + /** 2432 + * nilfs_segctor_accept - record accepted sequence count of log-write requests 2433 + * @sci: segment constructor object 2434 + */ 2435 + static void nilfs_segctor_accept(struct nilfs_sc_info *sci) 2434 2436 { 2435 - req->sc_err = req->sb_err = 0; 2436 2437 spin_lock(&sci->sc_state_lock); 2437 - req->seq_accepted = sci->sc_seq_request; 2438 + sci->sc_seq_accepted = sci->sc_seq_request; 2438 2439 spin_unlock(&sci->sc_state_lock); 2439 2440 2440 2441 if (sci->sc_timer) 2441 2442 del_timer_sync(sci->sc_timer); 2442 2443 } 2443 2444 2444 - static void nilfs_segctor_notify(struct nilfs_sc_info *sci, 2445 - struct nilfs_segctor_req *req) 2445 + /** 2446 + * nilfs_segctor_notify - notify the result of request to caller threads 2447 + * @sci: segment constructor object 2448 + * @mode: mode of log forming 2449 + * @err: error code to be notified 2450 + */ 2451 + static void nilfs_segctor_notify(struct nilfs_sc_info *sci, int mode, int err) 2446 2452 { 2447 2453 /* Clear requests (even when the construction failed) */ 2448 2454 spin_lock(&sci->sc_state_lock); 2449 2455 2450 - if (req->mode == SC_LSEG_SR) { 2456 + if (mode == SC_LSEG_SR) { 2451 2457 sci->sc_state &= ~NILFS_SEGCTOR_COMMIT; 2452 - sci->sc_seq_done = req->seq_accepted; 2453 - nilfs_segctor_wakeup(sci, req->sc_err ? : req->sb_err); 2458 + sci->sc_seq_done = sci->sc_seq_accepted; 2459 + nilfs_segctor_wakeup(sci, err); 2454 2460 sci->sc_flush_request = 0; 2455 2461 } else { 2456 - if (req->mode == SC_FLUSH_FILE) 2462 + if (mode == SC_FLUSH_FILE) 2457 2463 sci->sc_flush_request &= ~FLUSH_FILE_BIT; 2458 - else if (req->mode == SC_FLUSH_DAT) 2464 + else if (mode == SC_FLUSH_DAT) 2459 2465 sci->sc_flush_request &= ~FLUSH_DAT_BIT; 2460 2466 2461 2467 /* re-enable timer if checkpoint creation was not done */ ··· 2466 2472 spin_unlock(&sci->sc_state_lock); 2467 2473 } 2468 2474 2469 - static int nilfs_segctor_construct(struct nilfs_sc_info *sci, 2470 - struct nilfs_segctor_req *req) 2475 + /** 2476 + * nilfs_segctor_construct - form logs and write them to disk 2477 + * @sci: segment constructor object 2478 + * @mode: mode of log forming 2479 + */ 2480 + static int nilfs_segctor_construct(struct nilfs_sc_info *sci, int mode) 2471 2481 { 2472 2482 struct nilfs_sb_info *sbi = sci->sc_sbi; 2473 2483 struct the_nilfs *nilfs = sbi->s_nilfs; 2474 2484 int err = 0; 2475 2485 2486 + nilfs_segctor_accept(sci); 2487 + 2476 2488 if (nilfs_discontinued(nilfs)) 2477 - req->mode = SC_LSEG_SR; 2478 - if (!nilfs_segctor_confirm(sci)) { 2479 - err = nilfs_segctor_do_construct(sci, req->mode); 2480 - req->sc_err = err; 2481 - } 2489 + mode = SC_LSEG_SR; 2490 + if (!nilfs_segctor_confirm(sci)) 2491 + err = nilfs_segctor_do_construct(sci, mode); 2492 + 2482 2493 if (likely(!err)) { 2483 - if (req->mode != SC_FLUSH_DAT) 2494 + if (mode != SC_FLUSH_DAT) 2484 2495 atomic_set(&nilfs->ns_ndirtyblks, 0); 2485 2496 if (test_bit(NILFS_SC_SUPER_ROOT, &sci->sc_flags) && 2486 2497 nilfs_discontinued(nilfs)) { 2487 2498 down_write(&nilfs->ns_sem); 2488 - req->sb_err = nilfs_commit_super(sbi, 2489 - nilfs_altsb_need_update(nilfs)); 2499 + err = nilfs_commit_super( 2500 + sbi, nilfs_altsb_need_update(nilfs)); 2490 2501 up_write(&nilfs->ns_sem); 2491 2502 } 2492 2503 } 2504 + 2505 + nilfs_segctor_notify(sci, mode, err); 2493 2506 return err; 2494 2507 } 2495 2508 ··· 2527 2526 struct nilfs_sc_info *sci = NILFS_SC(sbi); 2528 2527 struct the_nilfs *nilfs = sbi->s_nilfs; 2529 2528 struct nilfs_transaction_info ti; 2530 - struct nilfs_segctor_req req = { .mode = SC_LSEG_SR }; 2531 2529 int err; 2532 2530 2533 2531 if (unlikely(!sci)) ··· 2547 2547 list_splice_tail_init(&nilfs->ns_gc_inodes, &sci->sc_gc_inodes); 2548 2548 2549 2549 for (;;) { 2550 - nilfs_segctor_accept(sci, &req); 2551 - err = nilfs_segctor_construct(sci, &req); 2550 + err = nilfs_segctor_construct(sci, SC_LSEG_SR); 2552 2551 nilfs_remove_written_gcinodes(nilfs, &sci->sc_gc_inodes); 2553 - nilfs_segctor_notify(sci, &req); 2554 2552 2555 2553 if (likely(!err)) 2556 2554 break; ··· 2557 2559 "segment construction failed. (err=%d)", err); 2558 2560 set_current_state(TASK_INTERRUPTIBLE); 2559 2561 schedule_timeout(sci->sc_interval); 2562 + } 2563 + if (nilfs_test_opt(sbi, DISCARD)) { 2564 + int ret = nilfs_discard_segments(nilfs, sci->sc_freesegs, 2565 + sci->sc_nfreesegs); 2566 + if (ret) { 2567 + printk(KERN_WARNING 2568 + "NILFS warning: error %d on discard request, " 2569 + "turning discards off for the device\n", ret); 2570 + nilfs_clear_opt(sbi, DISCARD); 2571 + } 2560 2572 } 2561 2573 2562 2574 out_unlock: ··· 2581 2573 { 2582 2574 struct nilfs_sb_info *sbi = sci->sc_sbi; 2583 2575 struct nilfs_transaction_info ti; 2584 - struct nilfs_segctor_req req = { .mode = mode }; 2585 2576 2586 2577 nilfs_transaction_lock(sbi, &ti, 0); 2587 - 2588 - nilfs_segctor_accept(sci, &req); 2589 - nilfs_segctor_construct(sci, &req); 2590 - nilfs_segctor_notify(sci, &req); 2578 + nilfs_segctor_construct(sci, mode); 2591 2579 2592 2580 /* 2593 2581 * Unclosed segment should be retried. We do this using sc_timer. ··· 2639 2635 static int nilfs_segctor_thread(void *arg) 2640 2636 { 2641 2637 struct nilfs_sc_info *sci = (struct nilfs_sc_info *)arg; 2638 + struct the_nilfs *nilfs = sci->sc_sbi->s_nilfs; 2642 2639 struct timer_list timer; 2643 2640 int timeout = 0; 2644 2641 ··· 2685 2680 } else { 2686 2681 DEFINE_WAIT(wait); 2687 2682 int should_sleep = 1; 2688 - struct the_nilfs *nilfs; 2689 2683 2690 2684 prepare_to_wait(&sci->sc_wait_daemon, &wait, 2691 2685 TASK_INTERRUPTIBLE); ··· 2705 2701 finish_wait(&sci->sc_wait_daemon, &wait); 2706 2702 timeout = ((sci->sc_state & NILFS_SEGCTOR_COMMIT) && 2707 2703 time_after_eq(jiffies, sci->sc_timer->expires)); 2708 - nilfs = sci->sc_sbi->s_nilfs; 2709 - if (sci->sc_super->s_dirt && nilfs_sb_need_update(nilfs)) 2704 + 2705 + if (nilfs_sb_dirty(nilfs) && nilfs_sb_need_update(nilfs)) 2710 2706 set_nilfs_discontinued(nilfs); 2711 2707 } 2712 2708 goto loop; ··· 2801 2797 do { 2802 2798 struct nilfs_sb_info *sbi = sci->sc_sbi; 2803 2799 struct nilfs_transaction_info ti; 2804 - struct nilfs_segctor_req req = { .mode = SC_LSEG_SR }; 2805 2800 2806 2801 nilfs_transaction_lock(sbi, &ti, 0); 2807 - nilfs_segctor_accept(sci, &req); 2808 - ret = nilfs_segctor_construct(sci, &req); 2809 - nilfs_segctor_notify(sci, &req); 2802 + ret = nilfs_segctor_construct(sci, SC_LSEG_SR); 2810 2803 nilfs_transaction_unlock(sbi); 2811 2804 2812 2805 } while (ret && retrycount-- > 0); ··· 2866 2865 struct the_nilfs *nilfs = sbi->s_nilfs; 2867 2866 int err; 2868 2867 2869 - /* Each field of nilfs_segctor is cleared through the initialization 2870 - of super-block info */ 2868 + if (NILFS_SC(sbi)) { 2869 + /* 2870 + * This happens if the filesystem was remounted 2871 + * read/write after nilfs_error degenerated it into a 2872 + * read-only mount. 2873 + */ 2874 + nilfs_detach_segment_constructor(sbi); 2875 + } 2876 + 2871 2877 sbi->s_sc_info = nilfs_segctor_new(sbi); 2872 2878 if (!sbi->s_sc_info) 2873 2879 return -ENOMEM;
+2
fs/nilfs2/segment.h
··· 116 116 * @sc_wait_daemon: Daemon wait queue 117 117 * @sc_wait_task: Start/end wait queue to control segctord task 118 118 * @sc_seq_request: Request counter 119 + * @sc_seq_accept: Accepted request count 119 120 * @sc_seq_done: Completion counter 120 121 * @sc_sync: Request of explicit sync operation 121 122 * @sc_interval: Timeout value of background construction ··· 170 169 wait_queue_head_t sc_wait_task; 171 170 172 171 __u32 sc_seq_request; 172 + __u32 sc_seq_accepted; 173 173 __u32 sc_seq_done; 174 174 175 175 int sc_sync;
+9 -6
fs/nilfs2/super.c
··· 96 96 if (!(sb->s_flags & MS_RDONLY)) { 97 97 struct the_nilfs *nilfs = sbi->s_nilfs; 98 98 99 - if (!nilfs_test_opt(sbi, ERRORS_CONT)) 100 - nilfs_detach_segment_constructor(sbi); 101 - 102 99 down_write(&nilfs->ns_sem); 103 100 if (!(nilfs->ns_mount_state & NILFS_ERROR_FS)) { 104 101 nilfs->ns_mount_state |= NILFS_ERROR_FS; ··· 298 301 memcpy(sbp[1], sbp[0], nilfs->ns_sbsize); 299 302 nilfs->ns_sbwtime[1] = t; 300 303 } 301 - sbi->s_super->s_dirt = 0; 304 + clear_nilfs_sb_dirty(nilfs); 302 305 return nilfs_sync_super(sbi, dupsb); 303 306 } 304 307 ··· 342 345 err = nilfs_construct_segment(sb); 343 346 344 347 down_write(&nilfs->ns_sem); 345 - if (sb->s_dirt) 348 + if (nilfs_sb_dirty(nilfs)) 346 349 nilfs_commit_super(sbi, 1); 347 350 up_write(&nilfs->ns_sem); 348 351 ··· 478 481 seq_printf(seq, ",order=strict"); 479 482 if (nilfs_test_opt(sbi, NORECOVERY)) 480 483 seq_printf(seq, ",norecovery"); 484 + if (nilfs_test_opt(sbi, DISCARD)) 485 + seq_printf(seq, ",discard"); 481 486 482 487 return 0; 483 488 } ··· 549 550 enum { 550 551 Opt_err_cont, Opt_err_panic, Opt_err_ro, 551 552 Opt_nobarrier, Opt_snapshot, Opt_order, Opt_norecovery, 552 - Opt_err, 553 + Opt_discard, Opt_err, 553 554 }; 554 555 555 556 static match_table_t tokens = { ··· 560 561 {Opt_snapshot, "cp=%u"}, 561 562 {Opt_order, "order=%s"}, 562 563 {Opt_norecovery, "norecovery"}, 564 + {Opt_discard, "discard"}, 563 565 {Opt_err, NULL} 564 566 }; 565 567 ··· 613 613 break; 614 614 case Opt_norecovery: 615 615 nilfs_set_opt(sbi, NORECOVERY); 616 + break; 617 + case Opt_discard: 618 + nilfs_set_opt(sbi, DISCARD); 616 619 break; 617 620 default: 618 621 printk(KERN_ERR
+38
fs/nilfs2/the_nilfs.c
··· 646 646 goto out; 647 647 } 648 648 649 + int nilfs_discard_segments(struct the_nilfs *nilfs, __u64 *segnump, 650 + size_t nsegs) 651 + { 652 + sector_t seg_start, seg_end; 653 + sector_t start = 0, nblocks = 0; 654 + unsigned int sects_per_block; 655 + __u64 *sn; 656 + int ret = 0; 657 + 658 + sects_per_block = (1 << nilfs->ns_blocksize_bits) / 659 + bdev_logical_block_size(nilfs->ns_bdev); 660 + for (sn = segnump; sn < segnump + nsegs; sn++) { 661 + nilfs_get_segment_range(nilfs, *sn, &seg_start, &seg_end); 662 + 663 + if (!nblocks) { 664 + start = seg_start; 665 + nblocks = seg_end - seg_start + 1; 666 + } else if (start + nblocks == seg_start) { 667 + nblocks += seg_end - seg_start + 1; 668 + } else { 669 + ret = blkdev_issue_discard(nilfs->ns_bdev, 670 + start * sects_per_block, 671 + nblocks * sects_per_block, 672 + GFP_NOFS, 673 + DISCARD_FL_BARRIER); 674 + if (ret < 0) 675 + return ret; 676 + nblocks = 0; 677 + } 678 + } 679 + if (nblocks) 680 + ret = blkdev_issue_discard(nilfs->ns_bdev, 681 + start * sects_per_block, 682 + nblocks * sects_per_block, 683 + GFP_NOFS, DISCARD_FL_BARRIER); 684 + return ret; 685 + } 686 + 649 687 int nilfs_count_free_blocks(struct the_nilfs *nilfs, sector_t *nblocks) 650 688 { 651 689 struct inode *dat = nilfs_dat_inode(nilfs);
+3
fs/nilfs2/the_nilfs.h
··· 38 38 the latest checkpoint was loaded */ 39 39 THE_NILFS_DISCONTINUED, /* 'next' pointer chain has broken */ 40 40 THE_NILFS_GC_RUNNING, /* gc process is running */ 41 + THE_NILFS_SB_DIRTY, /* super block is dirty */ 41 42 }; 42 43 43 44 /** ··· 198 197 THE_NILFS_FNS(LOADED, loaded) 199 198 THE_NILFS_FNS(DISCONTINUED, discontinued) 200 199 THE_NILFS_FNS(GC_RUNNING, gc_running) 200 + THE_NILFS_FNS(SB_DIRTY, sb_dirty) 201 201 202 202 /* Minimum interval of periodical update of superblocks (in seconds) */ 203 203 #define NILFS_SB_FREQ 10 ··· 223 221 void put_nilfs(struct the_nilfs *); 224 222 int init_nilfs(struct the_nilfs *, struct nilfs_sb_info *, char *); 225 223 int load_nilfs(struct the_nilfs *, struct nilfs_sb_info *); 224 + int nilfs_discard_segments(struct the_nilfs *, __u64 *, size_t); 226 225 int nilfs_count_free_blocks(struct the_nilfs *, sector_t *); 227 226 struct nilfs_sb_info *nilfs_find_sbinfo(struct the_nilfs *, int, __u64); 228 227 int nilfs_checkpoint_is_mounted(struct the_nilfs *, __u64, int);
+1
include/linux/nilfs2_fs.h
··· 153 153 semantics also for data */ 154 154 #define NILFS_MOUNT_NORECOVERY 0x4000 /* Disable write access during 155 155 mount-time recovery */ 156 + #define NILFS_MOUNT_DISCARD 0x8000 /* Issue DISCARD requests */ 156 157 157 158 158 159 /**