Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

f2fs: change to use rwsem for gc_mutex

Mutex lock won't serialize callers, in order to avoid starving of unlucky
caller, let's use rwsem lock instead.

Signed-off-by: Chao Yu <yuchao0@huawei.com>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>

authored by

Chao Yu and committed by
Jaegeuk Kim
fb24fea7 d7b0a23d

+27 -24
+4 -1
fs/f2fs/f2fs.h
··· 1391 1391 struct f2fs_mount_info mount_opt; /* mount options */ 1392 1392 1393 1393 /* for cleaning operations */ 1394 - struct mutex gc_mutex; /* mutex for GC */ 1394 + struct rw_semaphore gc_lock; /* 1395 + * semaphore for GC, avoid 1396 + * race between GC and GC or CP 1397 + */ 1395 1398 struct f2fs_gc_kthread *gc_thread; /* GC thread */ 1396 1399 unsigned int cur_victim_sec; /* current victim section num */ 1397 1400 unsigned int gc_mode; /* current GC state */
+6 -6
fs/f2fs/file.c
··· 1642 1642 next_alloc: 1643 1643 if (has_not_enough_free_secs(sbi, 0, 1644 1644 GET_SEC_FROM_SEG(sbi, overprovision_segments(sbi)))) { 1645 - mutex_lock(&sbi->gc_mutex); 1645 + down_write(&sbi->gc_lock); 1646 1646 err = f2fs_gc(sbi, true, false, NULL_SEGNO); 1647 1647 if (err && err != -ENODATA && err != -EAGAIN) 1648 1648 goto out_err; ··· 2450 2450 return ret; 2451 2451 2452 2452 if (!sync) { 2453 - if (!mutex_trylock(&sbi->gc_mutex)) { 2453 + if (!down_write_trylock(&sbi->gc_lock)) { 2454 2454 ret = -EBUSY; 2455 2455 goto out; 2456 2456 } 2457 2457 } else { 2458 - mutex_lock(&sbi->gc_mutex); 2458 + down_write(&sbi->gc_lock); 2459 2459 } 2460 2460 2461 2461 ret = f2fs_gc(sbi, sync, true, NULL_SEGNO); ··· 2493 2493 2494 2494 do_more: 2495 2495 if (!range.sync) { 2496 - if (!mutex_trylock(&sbi->gc_mutex)) { 2496 + if (!down_write_trylock(&sbi->gc_lock)) { 2497 2497 ret = -EBUSY; 2498 2498 goto out; 2499 2499 } 2500 2500 } else { 2501 - mutex_lock(&sbi->gc_mutex); 2501 + down_write(&sbi->gc_lock); 2502 2502 } 2503 2503 2504 2504 ret = f2fs_gc(sbi, range.sync, true, GET_SEGNO(sbi, range.start)); ··· 2929 2929 end_segno = min(start_segno + range.segments, dev_end_segno); 2930 2930 2931 2931 while (start_segno < end_segno) { 2932 - if (!mutex_trylock(&sbi->gc_mutex)) { 2932 + if (!down_write_trylock(&sbi->gc_lock)) { 2933 2933 ret = -EBUSY; 2934 2934 goto out; 2935 2935 }
+6 -6
fs/f2fs/gc.c
··· 78 78 */ 79 79 if (sbi->gc_mode == GC_URGENT) { 80 80 wait_ms = gc_th->urgent_sleep_time; 81 - mutex_lock(&sbi->gc_mutex); 81 + down_write(&sbi->gc_lock); 82 82 goto do_gc; 83 83 } 84 84 85 - if (!mutex_trylock(&sbi->gc_mutex)) { 85 + if (!down_write_trylock(&sbi->gc_lock)) { 86 86 stat_other_skip_bggc_count(sbi); 87 87 goto next; 88 88 } 89 89 90 90 if (!is_idle(sbi, GC_TIME)) { 91 91 increase_sleep_time(gc_th, &wait_ms); 92 - mutex_unlock(&sbi->gc_mutex); 92 + up_write(&sbi->gc_lock); 93 93 stat_io_skip_bggc_count(sbi); 94 94 goto next; 95 95 } ··· 1370 1370 reserved_segments(sbi), 1371 1371 prefree_segments(sbi)); 1372 1372 1373 - mutex_unlock(&sbi->gc_mutex); 1373 + up_write(&sbi->gc_lock); 1374 1374 1375 1375 put_gc_inode(&gc_list); 1376 1376 ··· 1409 1409 .iroot = RADIX_TREE_INIT(gc_list.iroot, GFP_NOFS), 1410 1410 }; 1411 1411 1412 - mutex_lock(&sbi->gc_mutex); 1412 + down_write(&sbi->gc_lock); 1413 1413 do_garbage_collect(sbi, segno, &gc_list, FG_GC); 1414 - mutex_unlock(&sbi->gc_mutex); 1414 + up_write(&sbi->gc_lock); 1415 1415 put_gc_inode(&gc_list); 1416 1416 1417 1417 if (get_valid_blocks(sbi, segno, true))
+3 -3
fs/f2fs/segment.c
··· 504 504 * dir/node pages without enough free segments. 505 505 */ 506 506 if (has_not_enough_free_secs(sbi, 0, 0)) { 507 - mutex_lock(&sbi->gc_mutex); 507 + down_write(&sbi->gc_lock); 508 508 f2fs_gc(sbi, false, false, NULL_SEGNO); 509 509 } 510 510 } ··· 2860 2860 if (sbi->discard_blks == 0) 2861 2861 goto out; 2862 2862 2863 - mutex_lock(&sbi->gc_mutex); 2863 + down_write(&sbi->gc_lock); 2864 2864 err = f2fs_write_checkpoint(sbi, &cpc); 2865 - mutex_unlock(&sbi->gc_mutex); 2865 + up_write(&sbi->gc_lock); 2866 2866 if (err) 2867 2867 goto out; 2868 2868
+8 -8
fs/f2fs/super.c
··· 1238 1238 1239 1239 cpc.reason = __get_cp_reason(sbi); 1240 1240 1241 - mutex_lock(&sbi->gc_mutex); 1241 + down_write(&sbi->gc_lock); 1242 1242 err = f2fs_write_checkpoint(sbi, &cpc); 1243 - mutex_unlock(&sbi->gc_mutex); 1243 + up_write(&sbi->gc_lock); 1244 1244 } 1245 1245 f2fs_trace_ios(NULL, 1); 1246 1246 ··· 1621 1621 f2fs_update_time(sbi, DISABLE_TIME); 1622 1622 1623 1623 while (!f2fs_time_over(sbi, DISABLE_TIME)) { 1624 - mutex_lock(&sbi->gc_mutex); 1624 + down_write(&sbi->gc_lock); 1625 1625 err = f2fs_gc(sbi, true, false, NULL_SEGNO); 1626 1626 if (err == -ENODATA) { 1627 1627 err = 0; ··· 1643 1643 goto restore_flag; 1644 1644 } 1645 1645 1646 - mutex_lock(&sbi->gc_mutex); 1646 + down_write(&sbi->gc_lock); 1647 1647 cpc.reason = CP_PAUSE; 1648 1648 set_sbi_flag(sbi, SBI_CP_DISABLED); 1649 1649 err = f2fs_write_checkpoint(sbi, &cpc); ··· 1655 1655 spin_unlock(&sbi->stat_lock); 1656 1656 1657 1657 out_unlock: 1658 - mutex_unlock(&sbi->gc_mutex); 1658 + up_write(&sbi->gc_lock); 1659 1659 restore_flag: 1660 1660 sbi->sb->s_flags = s_flags; /* Restore MS_RDONLY status */ 1661 1661 return err; ··· 1663 1663 1664 1664 static void f2fs_enable_checkpoint(struct f2fs_sb_info *sbi) 1665 1665 { 1666 - mutex_lock(&sbi->gc_mutex); 1666 + down_write(&sbi->gc_lock); 1667 1667 f2fs_dirty_to_prefree(sbi); 1668 1668 1669 1669 clear_sbi_flag(sbi, SBI_CP_DISABLED); 1670 1670 set_sbi_flag(sbi, SBI_IS_DIRTY); 1671 - mutex_unlock(&sbi->gc_mutex); 1671 + up_write(&sbi->gc_lock); 1672 1672 1673 1673 f2fs_sync_fs(sbi->sb, 1); 1674 1674 } ··· 3398 3398 3399 3399 /* init f2fs-specific super block info */ 3400 3400 sbi->valid_super_block = valid_super_block; 3401 - mutex_init(&sbi->gc_mutex); 3401 + init_rwsem(&sbi->gc_lock); 3402 3402 mutex_init(&sbi->writepages); 3403 3403 mutex_init(&sbi->cp_mutex); 3404 3404 mutex_init(&sbi->resize_mutex);