Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

erofs: decouple basic mount options from fs_context

Previously, EROFS mount options are all in the basic types, so
erofs_fs_context can be directly copied with assignment. However,
when the multiple device feature is introduced, it's hard to handle
multiple device information like the other basic mount options.

Let's separate basic mount option usage from fs_context, thus
multiple device information can be handled gracefully then.

No logic changes.

Link: https://lore.kernel.org/r/20211007070224.12833-1-hsiangkao@linux.alibaba.com
Reviewed-by: Chao Yu <chao@kernel.org>
Reviewed-by: Liu Bo <bo.liu@linux.alibaba.com>
Signed-off-by: Gao Xiang <hsiangkao@linux.alibaba.com>

+45 -43
+1 -1
fs/erofs/inode.c
··· 192 192 inode->i_atime.tv_nsec = inode->i_ctime.tv_nsec; 193 193 194 194 inode->i_flags &= ~S_DAX; 195 - if (test_opt(&sbi->ctx, DAX_ALWAYS) && S_ISREG(inode->i_mode) && 195 + if (test_opt(&sbi->opt, DAX_ALWAYS) && S_ISREG(inode->i_mode) && 196 196 vi->datalayout == EROFS_INODE_FLAT_PLAIN) 197 197 inode->i_flags |= S_DAX; 198 198 if (!nblks)
+10 -6
fs/erofs/internal.h
··· 47 47 /* data type for filesystem-wide blocks number */ 48 48 typedef u32 erofs_blk_t; 49 49 50 - struct erofs_fs_context { 50 + struct erofs_mount_opts { 51 51 #ifdef CONFIG_EROFS_FS_ZIP 52 52 /* current strategy of how to use managed cache */ 53 53 unsigned char cache_strategy; ··· 60 60 unsigned int mount_opt; 61 61 }; 62 62 63 + struct erofs_fs_context { 64 + struct erofs_mount_opts opt; 65 + }; 66 + 63 67 /* all filesystem-wide lz4 configurations */ 64 68 struct erofs_sb_lz4_info { 65 69 /* # of pages needed for EROFS lz4 rolling decompression */ ··· 73 69 }; 74 70 75 71 struct erofs_sb_info { 72 + struct erofs_mount_opts opt; /* options */ 73 + 76 74 #ifdef CONFIG_EROFS_FS_ZIP 77 75 /* list for all registered superblocks, mainly for shrinker */ 78 76 struct list_head list; ··· 114 108 u8 volume_name[16]; /* volume name */ 115 109 u32 feature_compat; 116 110 u32 feature_incompat; 117 - 118 - struct erofs_fs_context ctx; /* options */ 119 111 }; 120 112 121 113 #define EROFS_SB(sb) ((struct erofs_sb_info *)(sb)->s_fs_info) ··· 125 121 #define EROFS_MOUNT_DAX_ALWAYS 0x00000040 126 122 #define EROFS_MOUNT_DAX_NEVER 0x00000080 127 123 128 - #define clear_opt(ctx, option) ((ctx)->mount_opt &= ~EROFS_MOUNT_##option) 129 - #define set_opt(ctx, option) ((ctx)->mount_opt |= EROFS_MOUNT_##option) 130 - #define test_opt(ctx, option) ((ctx)->mount_opt & EROFS_MOUNT_##option) 124 + #define clear_opt(opt, option) ((opt)->mount_opt &= ~EROFS_MOUNT_##option) 125 + #define set_opt(opt, option) ((opt)->mount_opt |= EROFS_MOUNT_##option) 126 + #define test_opt(opt, option) ((opt)->mount_opt & EROFS_MOUNT_##option) 131 127 132 128 enum { 133 129 EROFS_ZIP_CACHE_DISABLED,
+28 -30
fs/erofs/super.c
··· 340 340 static void erofs_default_options(struct erofs_fs_context *ctx) 341 341 { 342 342 #ifdef CONFIG_EROFS_FS_ZIP 343 - ctx->cache_strategy = EROFS_ZIP_CACHE_READAROUND; 344 - ctx->max_sync_decompress_pages = 3; 345 - ctx->readahead_sync_decompress = false; 343 + ctx->opt.cache_strategy = EROFS_ZIP_CACHE_READAROUND; 344 + ctx->opt.max_sync_decompress_pages = 3; 345 + ctx->opt.readahead_sync_decompress = false; 346 346 #endif 347 347 #ifdef CONFIG_EROFS_FS_XATTR 348 - set_opt(ctx, XATTR_USER); 348 + set_opt(&ctx->opt, XATTR_USER); 349 349 #endif 350 350 #ifdef CONFIG_EROFS_FS_POSIX_ACL 351 - set_opt(ctx, POSIX_ACL); 351 + set_opt(&ctx->opt, POSIX_ACL); 352 352 #endif 353 353 } 354 354 ··· 392 392 switch (mode) { 393 393 case EROFS_MOUNT_DAX_ALWAYS: 394 394 warnfc(fc, "DAX enabled. Warning: EXPERIMENTAL, use at your own risk"); 395 - set_opt(ctx, DAX_ALWAYS); 396 - clear_opt(ctx, DAX_NEVER); 395 + set_opt(&ctx->opt, DAX_ALWAYS); 396 + clear_opt(&ctx->opt, DAX_NEVER); 397 397 return true; 398 398 case EROFS_MOUNT_DAX_NEVER: 399 - set_opt(ctx, DAX_NEVER); 400 - clear_opt(ctx, DAX_ALWAYS); 399 + set_opt(&ctx->opt, DAX_NEVER); 400 + clear_opt(&ctx->opt, DAX_ALWAYS); 401 401 return true; 402 402 default: 403 403 DBG_BUGON(1); ··· 424 424 case Opt_user_xattr: 425 425 #ifdef CONFIG_EROFS_FS_XATTR 426 426 if (result.boolean) 427 - set_opt(ctx, XATTR_USER); 427 + set_opt(&ctx->opt, XATTR_USER); 428 428 else 429 - clear_opt(ctx, XATTR_USER); 429 + clear_opt(&ctx->opt, XATTR_USER); 430 430 #else 431 431 errorfc(fc, "{,no}user_xattr options not supported"); 432 432 #endif ··· 434 434 case Opt_acl: 435 435 #ifdef CONFIG_EROFS_FS_POSIX_ACL 436 436 if (result.boolean) 437 - set_opt(ctx, POSIX_ACL); 437 + set_opt(&ctx->opt, POSIX_ACL); 438 438 else 439 - clear_opt(ctx, POSIX_ACL); 439 + clear_opt(&ctx->opt, POSIX_ACL); 440 440 #else 441 441 errorfc(fc, "{,no}acl options not supported"); 442 442 #endif 443 443 break; 444 444 case Opt_cache_strategy: 445 445 #ifdef CONFIG_EROFS_FS_ZIP 446 - ctx->cache_strategy = result.uint_32; 446 + ctx->opt.cache_strategy = result.uint_32; 447 447 #else 448 448 errorfc(fc, "compression not supported, cache_strategy ignored"); 449 449 #endif ··· 540 540 return -ENOMEM; 541 541 542 542 sb->s_fs_info = sbi; 543 + sbi->opt = ctx->opt; 543 544 sbi->dax_dev = fs_dax_get_by_bdev(sb->s_bdev); 544 545 err = erofs_read_superblock(sb); 545 546 if (err) 546 547 return err; 547 548 548 - if (test_opt(ctx, DAX_ALWAYS) && 549 + if (test_opt(&sbi->opt, DAX_ALWAYS) && 549 550 !dax_supported(sbi->dax_dev, sb->s_bdev, EROFS_BLKSIZ, 0, bdev_nr_sectors(sb->s_bdev))) { 550 551 errorfc(fc, "DAX unsupported by block device. Turning off DAX."); 551 - clear_opt(ctx, DAX_ALWAYS); 552 + clear_opt(&sbi->opt, DAX_ALWAYS); 552 553 } 553 554 sb->s_flags |= SB_RDONLY | SB_NOATIME; 554 555 sb->s_maxbytes = MAX_LFS_FILESIZE; ··· 558 557 sb->s_op = &erofs_sops; 559 558 sb->s_xattr = erofs_xattr_handlers; 560 559 561 - if (test_opt(ctx, POSIX_ACL)) 560 + if (test_opt(&sbi->opt, POSIX_ACL)) 562 561 sb->s_flags |= SB_POSIXACL; 563 562 else 564 563 sb->s_flags &= ~SB_POSIXACL; 565 - 566 - sbi->ctx = *ctx; 567 564 568 565 #ifdef CONFIG_EROFS_FS_ZIP 569 566 xa_init(&sbi->managed_pslots); ··· 606 607 607 608 DBG_BUGON(!sb_rdonly(sb)); 608 609 609 - if (test_opt(ctx, POSIX_ACL)) 610 + if (test_opt(&ctx->opt, POSIX_ACL)) 610 611 fc->sb_flags |= SB_POSIXACL; 611 612 else 612 613 fc->sb_flags &= ~SB_POSIXACL; 613 614 614 - sbi->ctx = *ctx; 615 + sbi->opt = ctx->opt; 615 616 616 617 fc->sb_flags |= SB_RDONLY; 617 618 return 0; ··· 639 640 erofs_default_options(fc->fs_private); 640 641 641 642 fc->ops = &erofs_context_ops; 642 - 643 643 return 0; 644 644 } 645 645 ··· 761 763 static int erofs_show_options(struct seq_file *seq, struct dentry *root) 762 764 { 763 765 struct erofs_sb_info *sbi = EROFS_SB(root->d_sb); 764 - struct erofs_fs_context *ctx = &sbi->ctx; 766 + struct erofs_mount_opts *opt = &sbi->opt; 765 767 766 768 #ifdef CONFIG_EROFS_FS_XATTR 767 - if (test_opt(ctx, XATTR_USER)) 769 + if (test_opt(opt, XATTR_USER)) 768 770 seq_puts(seq, ",user_xattr"); 769 771 else 770 772 seq_puts(seq, ",nouser_xattr"); 771 773 #endif 772 774 #ifdef CONFIG_EROFS_FS_POSIX_ACL 773 - if (test_opt(ctx, POSIX_ACL)) 775 + if (test_opt(opt, POSIX_ACL)) 774 776 seq_puts(seq, ",acl"); 775 777 else 776 778 seq_puts(seq, ",noacl"); 777 779 #endif 778 780 #ifdef CONFIG_EROFS_FS_ZIP 779 - if (ctx->cache_strategy == EROFS_ZIP_CACHE_DISABLED) 781 + if (opt->cache_strategy == EROFS_ZIP_CACHE_DISABLED) 780 782 seq_puts(seq, ",cache_strategy=disabled"); 781 - else if (ctx->cache_strategy == EROFS_ZIP_CACHE_READAHEAD) 783 + else if (opt->cache_strategy == EROFS_ZIP_CACHE_READAHEAD) 782 784 seq_puts(seq, ",cache_strategy=readahead"); 783 - else if (ctx->cache_strategy == EROFS_ZIP_CACHE_READAROUND) 785 + else if (opt->cache_strategy == EROFS_ZIP_CACHE_READAROUND) 784 786 seq_puts(seq, ",cache_strategy=readaround"); 785 787 #endif 786 - if (test_opt(ctx, DAX_ALWAYS)) 788 + if (test_opt(opt, DAX_ALWAYS)) 787 789 seq_puts(seq, ",dax=always"); 788 - if (test_opt(ctx, DAX_NEVER)) 790 + if (test_opt(opt, DAX_NEVER)) 789 791 seq_puts(seq, ",dax=never"); 790 792 return 0; 791 793 }
+2 -2
fs/erofs/xattr.c
··· 429 429 430 430 static bool erofs_xattr_user_list(struct dentry *dentry) 431 431 { 432 - return test_opt(&EROFS_SB(dentry->d_sb)->ctx, XATTR_USER); 432 + return test_opt(&EROFS_SB(dentry->d_sb)->opt, XATTR_USER); 433 433 } 434 434 435 435 static bool erofs_xattr_trusted_list(struct dentry *dentry) ··· 476 476 477 477 switch (handler->flags) { 478 478 case EROFS_XATTR_INDEX_USER: 479 - if (!test_opt(&sbi->ctx, XATTR_USER)) 479 + if (!test_opt(&sbi->opt, XATTR_USER)) 480 480 return -EOPNOTSUPP; 481 481 break; 482 482 case EROFS_XATTR_INDEX_TRUSTED:
+4 -4
fs/erofs/zdata.c
··· 695 695 goto err_out; 696 696 697 697 /* preload all compressed pages (maybe downgrade role if necessary) */ 698 - if (should_alloc_managed_pages(fe, sbi->ctx.cache_strategy, map->m_la)) 698 + if (should_alloc_managed_pages(fe, sbi->opt.cache_strategy, map->m_la)) 699 699 cache_strategy = TRYALLOC; 700 700 else 701 701 cache_strategy = DONTALLOC; ··· 796 796 /* Use workqueue and sync decompression for atomic contexts only */ 797 797 if (in_atomic() || irqs_disabled()) { 798 798 queue_work(z_erofs_workqueue, &io->u.work); 799 - sbi->ctx.readahead_sync_decompress = true; 799 + sbi->opt.readahead_sync_decompress = true; 800 800 return; 801 801 } 802 802 z_erofs_decompressqueue_work(&io->u.work); ··· 1411 1411 struct erofs_sb_info *const sbi = EROFS_I_SB(inode); 1412 1412 1413 1413 unsigned int nr_pages = readahead_count(rac); 1414 - bool sync = (sbi->ctx.readahead_sync_decompress && 1415 - nr_pages <= sbi->ctx.max_sync_decompress_pages); 1414 + bool sync = (sbi->opt.readahead_sync_decompress && 1415 + nr_pages <= sbi->opt.max_sync_decompress_pages); 1416 1416 struct z_erofs_decompress_frontend f = DECOMPRESS_FRONTEND_INIT(inode); 1417 1417 struct page *page, *head = NULL; 1418 1418 LIST_HEAD(pagepool);