Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'f2fs-for-5.4' of git://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk/f2fs

Pull f2fs updates from Jaegeuk Kim:
"In this round, we introduced casefolding support in f2fs, and fixed
various bugs in individual features such as IO alignment,
checkpoint=disable, quota, and swapfile.

Enhancement:
- support casefolding w/ enhancement in ext4
- support fiemap for directory
- support FS_IO_GET|SET_FSLABEL

Bug fix:
- fix IO stuck during checkpoint=disable
- avoid infinite GC loop
- fix panic/overflow related to IO alignment feature
- fix livelock in swap file
- fix discard command leak
- disallow dio for atomic_write"

* tag 'f2fs-for-5.4' of git://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk/f2fs: (51 commits)
f2fs: add a condition to detect overflow in f2fs_ioc_gc_range()
f2fs: fix to add missing F2FS_IO_ALIGNED() condition
f2fs: fix to fallback to buffered IO in IO aligned mode
f2fs: fix to handle error path correctly in f2fs_map_blocks
f2fs: fix extent corrupotion during directIO in LFS mode
f2fs: check all the data segments against all node ones
f2fs: Add a small clarification to CONFIG_FS_F2FS_FS_SECURITY
f2fs: fix inode rwsem regression
f2fs: fix to avoid accessing uninitialized field of inode page in is_alive()
f2fs: avoid infinite GC loop due to stale atomic files
f2fs: Fix indefinite loop in f2fs_gc()
f2fs: convert inline_data in prior to i_size_write
f2fs: fix error path of f2fs_convert_inline_page()
f2fs: add missing documents of reserve_root/resuid/resgid
f2fs: fix flushing node pages when checkpoint is disabled
f2fs: enhance f2fs_is_checkpoint_ready()'s readability
f2fs: clean up __bio_alloc()'s parameter
f2fs: fix wrong error injection path in inc_valid_block_count()
f2fs: fix to writeout dirty inode during node flush
f2fs: optimize case-insensitive lookups
...

+865 -204
+7
Documentation/ABI/testing/sysfs-fs-f2fs
··· 251 251 If checkpoint=disable, it displays the number of blocks that are unusable. 252 252 If checkpoint=enable it displays the enumber of blocks that would be unusable 253 253 if checkpoint=disable were to be set. 254 + 255 + What: /sys/fs/f2fs/<disk>/encoding 256 + Date July 2019 257 + Contact: "Daniel Rosenberg" <drosen@google.com> 258 + Description: 259 + Displays name and version of the encoding set for the filesystem. 260 + If no encoding is set, displays (none)
+8
Documentation/filesystems/f2fs.txt
··· 157 157 enabled by default. 158 158 data_flush Enable data flushing before checkpoint in order to 159 159 persist data of regular and symlink. 160 + reserve_root=%d Support configuring reserved space which is used for 161 + allocation from a privileged user with specified uid or 162 + gid, unit: 4KB, the default limit is 0.2% of user blocks. 163 + resuid=%d The user ID which may use the reserved blocks. 164 + resgid=%d The group ID which may use the reserved blocks. 160 165 fault_injection=%d Enable fault injection in all supported types with 161 166 specified injection rate. 162 167 fault_type=%d Support configuring fault injection type, should be ··· 417 412 If checkpoint=enable it shows the number of blocks 418 413 that would be unusable if checkpoint=disable were 419 414 to be set. 415 + 416 + encoding This shows the encoding used for casefolding. 417 + If casefolding is not enabled, returns (none) 420 418 421 419 ================================================================================ 422 420 USAGE
+4 -1
fs/f2fs/Kconfig
··· 2 2 config F2FS_FS 3 3 tristate "F2FS filesystem support" 4 4 depends on BLOCK 5 + select NLS 5 6 select CRYPTO 6 7 select CRYPTO_CRC32 7 8 select F2FS_FS_XATTR if FS_ENCRYPTION ··· 61 60 Security Models (LSMs) accepted by AppArmor, SELinux, Smack and TOMOYO 62 61 Linux. This option enables an extended attribute handler for file 63 62 security labels in the f2fs filesystem, so that it requires enabling 64 - the extended attribute support in advance. 63 + the extended attribute support in advance. In particular you need this 64 + option if you use the setcap command to assign initial process capabi- 65 + lities to executables (the security.* extended attributes). 65 66 66 67 If you are not using a security module, say N. 67 68
+70 -34
fs/f2fs/data.c
··· 283 283 /* 284 284 * Low-level block read/write IO operations. 285 285 */ 286 - static struct bio *__bio_alloc(struct f2fs_sb_info *sbi, block_t blk_addr, 287 - struct writeback_control *wbc, 288 - int npages, bool is_read, 289 - enum page_type type, enum temp_type temp) 286 + static struct bio *__bio_alloc(struct f2fs_io_info *fio, int npages) 290 287 { 288 + struct f2fs_sb_info *sbi = fio->sbi; 291 289 struct bio *bio; 292 290 293 291 bio = f2fs_bio_alloc(sbi, npages, true); 294 292 295 - f2fs_target_device(sbi, blk_addr, bio); 296 - if (is_read) { 293 + f2fs_target_device(sbi, fio->new_blkaddr, bio); 294 + if (is_read_io(fio->op)) { 297 295 bio->bi_end_io = f2fs_read_end_io; 298 296 bio->bi_private = NULL; 299 297 } else { 300 298 bio->bi_end_io = f2fs_write_end_io; 301 299 bio->bi_private = sbi; 302 - bio->bi_write_hint = f2fs_io_type_to_rw_hint(sbi, type, temp); 300 + bio->bi_write_hint = f2fs_io_type_to_rw_hint(sbi, 301 + fio->type, fio->temp); 303 302 } 304 - if (wbc) 305 - wbc_init_bio(wbc, bio); 303 + if (fio->io_wbc) 304 + wbc_init_bio(fio->io_wbc, bio); 306 305 307 306 return bio; 308 307 } ··· 317 318 318 319 if (test_opt(sbi, LFS) && current->plug) 319 320 blk_finish_plug(current->plug); 321 + 322 + if (F2FS_IO_ALIGNED(sbi)) 323 + goto submit_io; 320 324 321 325 start = bio->bi_iter.bi_size >> F2FS_BLKSIZE_BITS; 322 326 start %= F2FS_IO_SIZE(sbi); ··· 487 485 f2fs_trace_ios(fio, 0); 488 486 489 487 /* Allocate a new bio */ 490 - bio = __bio_alloc(fio->sbi, fio->new_blkaddr, fio->io_wbc, 491 - 1, is_read_io(fio->op), fio->type, fio->temp); 488 + bio = __bio_alloc(fio, 1); 492 489 493 490 if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) { 494 491 bio_put(bio); ··· 506 505 return 0; 507 506 } 508 507 508 + static bool page_is_mergeable(struct f2fs_sb_info *sbi, struct bio *bio, 509 + block_t last_blkaddr, block_t cur_blkaddr) 510 + { 511 + if (last_blkaddr + 1 != cur_blkaddr) 512 + return false; 513 + return __same_bdev(sbi, cur_blkaddr, bio); 514 + } 515 + 516 + static bool io_type_is_mergeable(struct f2fs_bio_info *io, 517 + struct f2fs_io_info *fio) 518 + { 519 + if (io->fio.op != fio->op) 520 + return false; 521 + return io->fio.op_flags == fio->op_flags; 522 + } 523 + 524 + static bool io_is_mergeable(struct f2fs_sb_info *sbi, struct bio *bio, 525 + struct f2fs_bio_info *io, 526 + struct f2fs_io_info *fio, 527 + block_t last_blkaddr, 528 + block_t cur_blkaddr) 529 + { 530 + if (F2FS_IO_ALIGNED(sbi) && (fio->type == DATA || fio->type == NODE)) { 531 + unsigned int filled_blocks = 532 + F2FS_BYTES_TO_BLK(bio->bi_iter.bi_size); 533 + unsigned int io_size = F2FS_IO_SIZE(sbi); 534 + unsigned int left_vecs = bio->bi_max_vecs - bio->bi_vcnt; 535 + 536 + /* IOs in bio is aligned and left space of vectors is not enough */ 537 + if (!(filled_blocks % io_size) && left_vecs < io_size) 538 + return false; 539 + } 540 + if (!page_is_mergeable(sbi, bio, last_blkaddr, cur_blkaddr)) 541 + return false; 542 + return io_type_is_mergeable(io, fio); 543 + } 544 + 509 545 int f2fs_merge_page_bio(struct f2fs_io_info *fio) 510 546 { 511 547 struct bio *bio = *fio->bio; ··· 556 518 trace_f2fs_submit_page_bio(page, fio); 557 519 f2fs_trace_ios(fio, 0); 558 520 559 - if (bio && (*fio->last_block + 1 != fio->new_blkaddr || 560 - !__same_bdev(fio->sbi, fio->new_blkaddr, bio))) { 521 + if (bio && !page_is_mergeable(fio->sbi, bio, *fio->last_block, 522 + fio->new_blkaddr)) { 561 523 __submit_bio(fio->sbi, bio, fio->type); 562 524 bio = NULL; 563 525 } 564 526 alloc_new: 565 527 if (!bio) { 566 - bio = __bio_alloc(fio->sbi, fio->new_blkaddr, fio->io_wbc, 567 - BIO_MAX_PAGES, false, fio->type, fio->temp); 528 + bio = __bio_alloc(fio, BIO_MAX_PAGES); 568 529 bio_set_op_attrs(bio, fio->op, fio->op_flags); 569 530 } 570 531 ··· 629 592 630 593 inc_page_count(sbi, WB_DATA_TYPE(bio_page)); 631 594 632 - if (io->bio && (io->last_block_in_bio != fio->new_blkaddr - 1 || 633 - (io->fio.op != fio->op || io->fio.op_flags != fio->op_flags) || 634 - !__same_bdev(sbi, fio->new_blkaddr, io->bio))) 595 + if (io->bio && !io_is_mergeable(sbi, io->bio, io, fio, 596 + io->last_block_in_bio, fio->new_blkaddr)) 635 597 __submit_merged_bio(io); 636 598 alloc_new: 637 599 if (io->bio == NULL) { 638 - if ((fio->type == DATA || fio->type == NODE) && 600 + if (F2FS_IO_ALIGNED(sbi) && 601 + (fio->type == DATA || fio->type == NODE) && 639 602 fio->new_blkaddr & F2FS_IO_SIZE_MASK(sbi)) { 640 603 dec_page_count(sbi, WB_DATA_TYPE(bio_page)); 641 604 fio->retry = true; 642 605 goto skip; 643 606 } 644 - io->bio = __bio_alloc(sbi, fio->new_blkaddr, fio->io_wbc, 645 - BIO_MAX_PAGES, false, 646 - fio->type, fio->temp); 607 + io->bio = __bio_alloc(fio, BIO_MAX_PAGES); 647 608 io->fio = *fio; 648 609 } 649 610 ··· 662 627 goto next; 663 628 out: 664 629 if (is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN) || 665 - f2fs_is_checkpoint_ready(sbi)) 630 + !f2fs_is_checkpoint_ready(sbi)) 666 631 __submit_merged_bio(io); 667 632 up_write(&io->io_rwsem); 668 633 } ··· 1057 1022 if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO) 1058 1023 invalidate_mapping_pages(META_MAPPING(sbi), 1059 1024 old_blkaddr, old_blkaddr); 1060 - f2fs_set_data_blkaddr(dn); 1025 + f2fs_update_data_blkaddr(dn, dn->data_blkaddr); 1061 1026 1062 1027 /* 1063 1028 * i_size will be updated by direct_IO. Otherwise, we'll get stale ··· 1234 1199 if (test_opt(sbi, LFS) && flag == F2FS_GET_BLOCK_DIO && 1235 1200 map->m_may_create) { 1236 1201 err = __allocate_data_block(&dn, map->m_seg_type); 1237 - if (!err) { 1238 - blkaddr = dn.data_blkaddr; 1239 - set_inode_flag(inode, FI_APPEND_WRITE); 1240 - } 1202 + if (err) 1203 + goto sync_out; 1204 + blkaddr = dn.data_blkaddr; 1205 + set_inode_flag(inode, FI_APPEND_WRITE); 1241 1206 } 1242 1207 } else { 1243 1208 if (create) { ··· 1442 1407 return __get_data_block(inode, iblock, bh_result, create, 1443 1408 F2FS_GET_BLOCK_DIO, NULL, 1444 1409 f2fs_rw_hint_to_seg_type(inode->i_write_hint), 1445 - true); 1410 + IS_SWAPFILE(inode) ? false : true); 1446 1411 } 1447 1412 1448 1413 static int get_data_block_dio(struct inode *inode, sector_t iblock, ··· 1573 1538 goto out; 1574 1539 } 1575 1540 1576 - if (f2fs_has_inline_data(inode)) { 1541 + if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode)) { 1577 1542 ret = f2fs_inline_data_fiemap(inode, fieinfo, start, len); 1578 1543 if (ret != -EAGAIN) 1579 1544 goto out; ··· 1726 1691 * This page will go to BIO. Do we need to send this 1727 1692 * BIO off first? 1728 1693 */ 1729 - if (bio && (*last_block_in_bio != block_nr - 1 || 1730 - !__same_bdev(F2FS_I_SB(inode), block_nr, bio))) { 1694 + if (bio && !page_is_mergeable(F2FS_I_SB(inode), bio, 1695 + *last_block_in_bio, block_nr)) { 1731 1696 submit_and_realloc: 1732 1697 __submit_bio(F2FS_I_SB(inode), bio, DATA); 1733 1698 bio = NULL; ··· 2625 2590 2626 2591 trace_f2fs_write_begin(inode, pos, len, flags); 2627 2592 2628 - err = f2fs_is_checkpoint_ready(sbi); 2629 - if (err) 2593 + if (!f2fs_is_checkpoint_ready(sbi)) { 2594 + err = -ENOSPC; 2630 2595 goto fail; 2596 + } 2631 2597 2632 2598 if ((f2fs_is_atomic_file(inode) && 2633 2599 !f2fs_available_free_memory(sbi, INMEM_PAGES)) ||
+2 -2
fs/f2fs/debug.c
··· 67 67 si->nr_rd_data = get_pages(sbi, F2FS_RD_DATA); 68 68 si->nr_rd_node = get_pages(sbi, F2FS_RD_NODE); 69 69 si->nr_rd_meta = get_pages(sbi, F2FS_RD_META); 70 - if (SM_I(sbi) && SM_I(sbi)->fcc_info) { 70 + if (SM_I(sbi)->fcc_info) { 71 71 si->nr_flushed = 72 72 atomic_read(&SM_I(sbi)->fcc_info->issued_flush); 73 73 si->nr_flushing = ··· 75 75 si->flush_list_empty = 76 76 llist_empty(&SM_I(sbi)->fcc_info->issue_list); 77 77 } 78 - if (SM_I(sbi) && SM_I(sbi)->dcc_info) { 78 + if (SM_I(sbi)->dcc_info) { 79 79 si->nr_discarded = 80 80 atomic_read(&SM_I(sbi)->dcc_info->issued_discard); 81 81 si->nr_discarding =
+175 -9
fs/f2fs/dir.c
··· 8 8 #include <linux/fs.h> 9 9 #include <linux/f2fs_fs.h> 10 10 #include <linux/sched/signal.h> 11 + #include <linux/unicode.h> 11 12 #include "f2fs.h" 12 13 #include "node.h" 13 14 #include "acl.h" ··· 82 81 return bidx; 83 82 } 84 83 85 - static struct f2fs_dir_entry *find_in_block(struct page *dentry_page, 84 + static struct f2fs_dir_entry *find_in_block(struct inode *dir, 85 + struct page *dentry_page, 86 86 struct fscrypt_name *fname, 87 87 f2fs_hash_t namehash, 88 88 int *max_slots, ··· 95 93 96 94 dentry_blk = (struct f2fs_dentry_block *)page_address(dentry_page); 97 95 98 - make_dentry_ptr_block(NULL, &d, dentry_blk); 96 + make_dentry_ptr_block(dir, &d, dentry_blk); 99 97 de = f2fs_find_target_dentry(fname, namehash, max_slots, &d); 100 98 if (de) 101 99 *res_page = dentry_page; ··· 103 101 return de; 104 102 } 105 103 104 + #ifdef CONFIG_UNICODE 105 + /* 106 + * Test whether a case-insensitive directory entry matches the filename 107 + * being searched for. 108 + * 109 + * Returns: 0 if the directory entry matches, more than 0 if it 110 + * doesn't match or less than zero on error. 111 + */ 112 + int f2fs_ci_compare(const struct inode *parent, const struct qstr *name, 113 + const struct qstr *entry, bool quick) 114 + { 115 + const struct f2fs_sb_info *sbi = F2FS_SB(parent->i_sb); 116 + const struct unicode_map *um = sbi->s_encoding; 117 + int ret; 118 + 119 + if (quick) 120 + ret = utf8_strncasecmp_folded(um, name, entry); 121 + else 122 + ret = utf8_strncasecmp(um, name, entry); 123 + 124 + if (ret < 0) { 125 + /* Handle invalid character sequence as either an error 126 + * or as an opaque byte sequence. 127 + */ 128 + if (f2fs_has_strict_mode(sbi)) 129 + return -EINVAL; 130 + 131 + if (name->len != entry->len) 132 + return 1; 133 + 134 + return !!memcmp(name->name, entry->name, name->len); 135 + } 136 + 137 + return ret; 138 + } 139 + 140 + static void f2fs_fname_setup_ci_filename(struct inode *dir, 141 + const struct qstr *iname, 142 + struct fscrypt_str *cf_name) 143 + { 144 + struct f2fs_sb_info *sbi = F2FS_I_SB(dir); 145 + 146 + if (!IS_CASEFOLDED(dir)) { 147 + cf_name->name = NULL; 148 + return; 149 + } 150 + 151 + cf_name->name = f2fs_kmalloc(sbi, F2FS_NAME_LEN, GFP_NOFS); 152 + if (!cf_name->name) 153 + return; 154 + 155 + cf_name->len = utf8_casefold(sbi->s_encoding, 156 + iname, cf_name->name, 157 + F2FS_NAME_LEN); 158 + if ((int)cf_name->len <= 0) { 159 + kvfree(cf_name->name); 160 + cf_name->name = NULL; 161 + } 162 + } 163 + #endif 164 + 165 + static inline bool f2fs_match_name(struct f2fs_dentry_ptr *d, 166 + struct f2fs_dir_entry *de, 167 + struct fscrypt_name *fname, 168 + struct fscrypt_str *cf_str, 169 + unsigned long bit_pos, 170 + f2fs_hash_t namehash) 171 + { 172 + #ifdef CONFIG_UNICODE 173 + struct inode *parent = d->inode; 174 + struct f2fs_sb_info *sbi = F2FS_I_SB(parent); 175 + struct qstr entry; 176 + #endif 177 + 178 + if (de->hash_code != namehash) 179 + return false; 180 + 181 + #ifdef CONFIG_UNICODE 182 + entry.name = d->filename[bit_pos]; 183 + entry.len = de->name_len; 184 + 185 + if (sbi->s_encoding && IS_CASEFOLDED(parent)) { 186 + if (cf_str->name) { 187 + struct qstr cf = {.name = cf_str->name, 188 + .len = cf_str->len}; 189 + return !f2fs_ci_compare(parent, &cf, &entry, true); 190 + } 191 + return !f2fs_ci_compare(parent, fname->usr_fname, &entry, 192 + false); 193 + } 194 + #endif 195 + if (fscrypt_match_name(fname, d->filename[bit_pos], 196 + le16_to_cpu(de->name_len))) 197 + return true; 198 + return false; 199 + } 200 + 106 201 struct f2fs_dir_entry *f2fs_find_target_dentry(struct fscrypt_name *fname, 107 202 f2fs_hash_t namehash, int *max_slots, 108 203 struct f2fs_dentry_ptr *d) 109 204 { 110 205 struct f2fs_dir_entry *de; 206 + struct fscrypt_str cf_str = { .name = NULL, .len = 0 }; 111 207 unsigned long bit_pos = 0; 112 208 int max_len = 0; 209 + 210 + #ifdef CONFIG_UNICODE 211 + f2fs_fname_setup_ci_filename(d->inode, fname->usr_fname, &cf_str); 212 + #endif 113 213 114 214 if (max_slots) 115 215 *max_slots = 0; ··· 229 125 continue; 230 126 } 231 127 232 - if (de->hash_code == namehash && 233 - fscrypt_match_name(fname, d->filename[bit_pos], 234 - le16_to_cpu(de->name_len))) 128 + if (f2fs_match_name(d, de, fname, &cf_str, bit_pos, namehash)) 235 129 goto found; 236 130 237 131 if (max_slots && max_len > *max_slots) ··· 243 141 found: 244 142 if (max_slots && max_len > *max_slots) 245 143 *max_slots = max_len; 144 + 145 + #ifdef CONFIG_UNICODE 146 + kvfree(cf_str.name); 147 + #endif 246 148 return de; 247 149 } 248 150 ··· 263 157 struct f2fs_dir_entry *de = NULL; 264 158 bool room = false; 265 159 int max_slots; 266 - f2fs_hash_t namehash = f2fs_dentry_hash(&name, fname); 160 + f2fs_hash_t namehash = f2fs_dentry_hash(dir, &name, fname); 267 161 268 162 nbucket = dir_buckets(level, F2FS_I(dir)->i_dir_level); 269 163 nblock = bucket_blocks(level); ··· 285 179 } 286 180 } 287 181 288 - de = find_in_block(dentry_page, fname, namehash, &max_slots, 289 - res_page); 182 + de = find_in_block(dir, dentry_page, fname, namehash, 183 + &max_slots, res_page); 290 184 if (de) 291 185 break; 292 186 ··· 355 249 struct f2fs_dir_entry *de = NULL; 356 250 struct fscrypt_name fname; 357 251 int err; 252 + 253 + #ifdef CONFIG_UNICODE 254 + if (f2fs_has_strict_mode(F2FS_I_SB(dir)) && IS_CASEFOLDED(dir) && 255 + utf8_validate(F2FS_I_SB(dir)->s_encoding, child)) { 256 + *res_page = ERR_PTR(-EINVAL); 257 + return NULL; 258 + } 259 + #endif 358 260 359 261 err = fscrypt_setup_filename(dir, child, 1, &fname); 360 262 if (err) { ··· 618 504 619 505 level = 0; 620 506 slots = GET_DENTRY_SLOTS(new_name->len); 621 - dentry_hash = f2fs_dentry_hash(new_name, NULL); 507 + dentry_hash = f2fs_dentry_hash(dir, new_name, NULL); 622 508 623 509 current_depth = F2FS_I(dir)->i_current_depth; 624 510 if (F2FS_I(dir)->chash == dentry_hash) { ··· 682 568 683 569 if (inode) { 684 570 f2fs_i_pino_write(inode, dir->i_ino); 571 + 572 + /* synchronize inode page's data from inode cache */ 573 + if (is_inode_flag_set(inode, FI_NEW_INODE)) 574 + f2fs_update_inode(inode, page); 575 + 685 576 f2fs_put_page(page, 1); 686 577 } 687 578 ··· 1062 943 .compat_ioctl = f2fs_compat_ioctl, 1063 944 #endif 1064 945 }; 946 + 947 + #ifdef CONFIG_UNICODE 948 + static int f2fs_d_compare(const struct dentry *dentry, unsigned int len, 949 + const char *str, const struct qstr *name) 950 + { 951 + struct qstr qstr = {.name = str, .len = len }; 952 + 953 + if (!IS_CASEFOLDED(dentry->d_parent->d_inode)) { 954 + if (len != name->len) 955 + return -1; 956 + return memcmp(str, name, len); 957 + } 958 + 959 + return f2fs_ci_compare(dentry->d_parent->d_inode, name, &qstr, false); 960 + } 961 + 962 + static int f2fs_d_hash(const struct dentry *dentry, struct qstr *str) 963 + { 964 + struct f2fs_sb_info *sbi = F2FS_SB(dentry->d_sb); 965 + const struct unicode_map *um = sbi->s_encoding; 966 + unsigned char *norm; 967 + int len, ret = 0; 968 + 969 + if (!IS_CASEFOLDED(dentry->d_inode)) 970 + return 0; 971 + 972 + norm = f2fs_kmalloc(sbi, PATH_MAX, GFP_ATOMIC); 973 + if (!norm) 974 + return -ENOMEM; 975 + 976 + len = utf8_casefold(um, str, norm, PATH_MAX); 977 + if (len < 0) { 978 + if (f2fs_has_strict_mode(sbi)) 979 + ret = -EINVAL; 980 + goto out; 981 + } 982 + str->hash = full_name_hash(dentry, norm, len); 983 + out: 984 + kvfree(norm); 985 + return ret; 986 + } 987 + 988 + const struct dentry_operations f2fs_dentry_ops = { 989 + .d_hash = f2fs_d_hash, 990 + .d_compare = f2fs_d_compare, 991 + }; 992 + #endif
+37 -10
fs/f2fs/f2fs.h
··· 154 154 #define F2FS_FEATURE_LOST_FOUND 0x0200 155 155 #define F2FS_FEATURE_VERITY 0x0400 156 156 #define F2FS_FEATURE_SB_CHKSUM 0x0800 157 + #define F2FS_FEATURE_CASEFOLD 0x1000 157 158 158 159 #define __F2FS_HAS_FEATURE(raw_super, mask) \ 159 160 ((raw_super->feature & cpu_to_le32(mask)) != 0) ··· 418 417 #define F2FS_IOC_GET_PIN_FILE _IOR(F2FS_IOCTL_MAGIC, 14, __u32) 419 418 #define F2FS_IOC_PRECACHE_EXTENTS _IO(F2FS_IOCTL_MAGIC, 15) 420 419 #define F2FS_IOC_RESIZE_FS _IOW(F2FS_IOCTL_MAGIC, 16, __u64) 420 + 421 + #define F2FS_IOC_GET_VOLUME_NAME FS_IOC_GETFSLABEL 422 + #define F2FS_IOC_SET_VOLUME_NAME FS_IOC_SETFSLABEL 421 423 422 424 #define F2FS_IOC_SET_ENCRYPTION_POLICY FS_IOC_SET_ENCRYPTION_POLICY 423 425 #define F2FS_IOC_GET_ENCRYPTION_POLICY FS_IOC_GET_ENCRYPTION_POLICY ··· 1176 1172 int valid_super_block; /* valid super block no */ 1177 1173 unsigned long s_flag; /* flags for sbi */ 1178 1174 struct mutex writepages; /* mutex for writepages() */ 1175 + #ifdef CONFIG_UNICODE 1176 + struct unicode_map *s_encoding; 1177 + __u16 s_encoding_flags; 1178 + #endif 1179 1179 1180 1180 #ifdef CONFIG_BLK_DEV_ZONED 1181 1181 unsigned int blocks_per_blkz; /* F2FS blocks per zone */ ··· 1651 1643 static inline void disable_nat_bits(struct f2fs_sb_info *sbi, bool lock) 1652 1644 { 1653 1645 unsigned long flags; 1646 + unsigned char *nat_bits; 1654 1647 1655 1648 /* 1656 1649 * In order to re-enable nat_bits we need to call fsck.f2fs by ··· 1662 1653 if (lock) 1663 1654 spin_lock_irqsave(&sbi->cp_lock, flags); 1664 1655 __clear_ckpt_flags(F2FS_CKPT(sbi), CP_NAT_BITS_FLAG); 1665 - kvfree(NM_I(sbi)->nat_bits); 1656 + nat_bits = NM_I(sbi)->nat_bits; 1666 1657 NM_I(sbi)->nat_bits = NULL; 1667 1658 if (lock) 1668 1659 spin_unlock_irqrestore(&sbi->cp_lock, flags); 1660 + 1661 + kvfree(nat_bits); 1669 1662 } 1670 1663 1671 1664 static inline bool enabled_nat_bits(struct f2fs_sb_info *sbi, ··· 1774 1763 if (time_to_inject(sbi, FAULT_BLOCK)) { 1775 1764 f2fs_show_injection_info(FAULT_BLOCK); 1776 1765 release = *count; 1777 - goto enospc; 1766 + goto release_quota; 1778 1767 } 1779 1768 1780 1769 /* ··· 1819 1808 1820 1809 enospc: 1821 1810 percpu_counter_sub(&sbi->alloc_valid_block_count, release); 1811 + release_quota: 1822 1812 dquot_release_reservation_block(inode, release); 1823 1813 return -ENOSPC; 1824 1814 } ··· 2374 2362 #define F2FS_INDEX_FL 0x00001000 /* hash-indexed directory */ 2375 2363 #define F2FS_DIRSYNC_FL 0x00010000 /* dirsync behaviour (directories only) */ 2376 2364 #define F2FS_PROJINHERIT_FL 0x20000000 /* Create with parents projid */ 2365 + #define F2FS_CASEFOLD_FL 0x40000000 /* Casefolded file */ 2377 2366 2378 2367 /* Flags that should be inherited by new inodes from their parent. */ 2379 2368 #define F2FS_FL_INHERITED (F2FS_SYNC_FL | F2FS_NODUMP_FL | F2FS_NOATIME_FL | \ 2380 - F2FS_DIRSYNC_FL | F2FS_PROJINHERIT_FL) 2369 + F2FS_DIRSYNC_FL | F2FS_PROJINHERIT_FL | \ 2370 + F2FS_CASEFOLD_FL) 2381 2371 2382 2372 /* Flags that are appropriate for regular files (all but dir-specific ones). */ 2383 - #define F2FS_REG_FLMASK (~(F2FS_DIRSYNC_FL | F2FS_PROJINHERIT_FL)) 2373 + #define F2FS_REG_FLMASK (~(F2FS_DIRSYNC_FL | F2FS_PROJINHERIT_FL | \ 2374 + F2FS_CASEFOLD_FL)) 2384 2375 2385 2376 /* Flags that are appropriate for non-directories/regular files. */ 2386 2377 #define F2FS_OTHER_FLMASK (F2FS_NODUMP_FL | F2FS_NOATIME_FL) ··· 2950 2935 bool hot, bool set); 2951 2936 struct dentry *f2fs_get_parent(struct dentry *child); 2952 2937 2938 + extern int f2fs_ci_compare(const struct inode *parent, 2939 + const struct qstr *name, 2940 + const struct qstr *entry, 2941 + bool quick); 2942 + 2953 2943 /* 2954 2944 * dir.c 2955 2945 */ ··· 3018 2998 /* 3019 2999 * hash.c 3020 3000 */ 3021 - f2fs_hash_t f2fs_dentry_hash(const struct qstr *name_info, 3022 - struct fscrypt_name *fname); 3001 + f2fs_hash_t f2fs_dentry_hash(const struct inode *dir, 3002 + const struct qstr *name_info, struct fscrypt_name *fname); 3023 3003 3024 3004 /* 3025 3005 * node.c ··· 3462 3442 #endif 3463 3443 3464 3444 extern const struct file_operations f2fs_dir_operations; 3445 + #ifdef CONFIG_UNICODE 3446 + extern const struct dentry_operations f2fs_dentry_ops; 3447 + #endif 3465 3448 extern const struct file_operations f2fs_file_operations; 3466 3449 extern const struct inode_operations f2fs_file_inode_operations; 3467 3450 extern const struct address_space_operations f2fs_dblock_aops; ··· 3599 3576 F2FS_FEATURE_FUNCS(lost_found, LOST_FOUND); 3600 3577 F2FS_FEATURE_FUNCS(verity, VERITY); 3601 3578 F2FS_FEATURE_FUNCS(sb_chksum, SB_CHKSUM); 3579 + F2FS_FEATURE_FUNCS(casefold, CASEFOLD); 3602 3580 3603 3581 #ifdef CONFIG_BLK_DEV_ZONED 3604 3582 static inline bool f2fs_blkz_is_seq(struct f2fs_sb_info *sbi, int devi, ··· 3718 3694 */ 3719 3695 if (f2fs_sb_has_blkzoned(sbi)) 3720 3696 return true; 3721 - if (test_opt(sbi, LFS) && (rw == WRITE) && 3722 - block_unaligned_IO(inode, iocb, iter)) 3723 - return true; 3697 + if (test_opt(sbi, LFS) && (rw == WRITE)) { 3698 + if (block_unaligned_IO(inode, iocb, iter)) 3699 + return true; 3700 + if (F2FS_IO_ALIGNED(sbi)) 3701 + return true; 3702 + } 3724 3703 if (is_sbi_flag_set(F2FS_I_SB(inode), SBI_CP_DISABLED) && 3725 - !(inode->i_flags & S_SWAPFILE)) 3704 + !IS_SWAPFILE(inode)) 3726 3705 return true; 3727 3706 3728 3707 return false;
+128 -34
fs/f2fs/file.c
··· 20 20 #include <linux/uio.h> 21 21 #include <linux/uuid.h> 22 22 #include <linux/file.h> 23 + #include <linux/nls.h> 23 24 24 25 #include "f2fs.h" 25 26 #include "node.h" ··· 55 54 56 55 if (unlikely(f2fs_cp_error(sbi))) { 57 56 err = -EIO; 57 + goto err; 58 + } 59 + 60 + if (!f2fs_is_checkpoint_ready(sbi)) { 61 + err = -ENOSPC; 58 62 goto err; 59 63 } 60 64 ··· 825 819 } 826 820 827 821 if (attr->ia_valid & ATTR_SIZE) { 828 - bool to_smaller = (attr->ia_size <= i_size_read(inode)); 822 + loff_t old_size = i_size_read(inode); 823 + 824 + if (attr->ia_size > MAX_INLINE_DATA(inode)) { 825 + /* 826 + * should convert inline inode before i_size_write to 827 + * keep smaller than inline_data size with inline flag. 828 + */ 829 + err = f2fs_convert_inline_inode(inode); 830 + if (err) 831 + return err; 832 + } 829 833 830 834 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 831 835 down_write(&F2FS_I(inode)->i_mmap_sem); 832 836 833 837 truncate_setsize(inode, attr->ia_size); 834 838 835 - if (to_smaller) 839 + if (attr->ia_size <= old_size) 836 840 err = f2fs_truncate(inode); 837 841 /* 838 842 * do not trim all blocks after i_size if target size is ··· 850 834 */ 851 835 up_write(&F2FS_I(inode)->i_mmap_sem); 852 836 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 853 - 854 837 if (err) 855 838 return err; 856 839 857 - if (!to_smaller) { 858 - /* should convert inline inode here */ 859 - if (!f2fs_may_inline_data(inode)) { 860 - err = f2fs_convert_inline_inode(inode); 861 - if (err) 862 - return err; 863 - } 864 - inode->i_mtime = inode->i_ctime = current_time(inode); 865 - } 866 - 867 840 down_write(&F2FS_I(inode)->i_sem); 841 + inode->i_mtime = inode->i_ctime = current_time(inode); 868 842 F2FS_I(inode)->last_disk_size = i_size_read(inode); 869 843 up_write(&F2FS_I(inode)->i_sem); 870 844 } ··· 1047 1041 1048 1042 if (test_opt(sbi, LFS)) { 1049 1043 f2fs_put_dnode(&dn); 1050 - return -ENOTSUPP; 1044 + return -EOPNOTSUPP; 1051 1045 } 1052 1046 1053 1047 /* do not invalidate this block address */ ··· 1584 1578 1585 1579 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) 1586 1580 return -EIO; 1581 + if (!f2fs_is_checkpoint_ready(F2FS_I_SB(inode))) 1582 + return -ENOSPC; 1587 1583 1588 1584 /* f2fs only support ->fallocate for regular file */ 1589 1585 if (!S_ISREG(inode->i_mode)) ··· 1677 1669 if (IS_NOQUOTA(inode)) 1678 1670 return -EPERM; 1679 1671 1672 + if ((iflags ^ fi->i_flags) & F2FS_CASEFOLD_FL) { 1673 + if (!f2fs_sb_has_casefold(F2FS_I_SB(inode))) 1674 + return -EOPNOTSUPP; 1675 + if (!f2fs_empty_dir(inode)) 1676 + return -ENOTEMPTY; 1677 + } 1678 + 1680 1679 fi->i_flags = iflags | (fi->i_flags & ~mask); 1681 1680 1682 1681 if (fi->i_flags & F2FS_PROJINHERIT_FL) ··· 1718 1703 { F2FS_INDEX_FL, FS_INDEX_FL }, 1719 1704 { F2FS_DIRSYNC_FL, FS_DIRSYNC_FL }, 1720 1705 { F2FS_PROJINHERIT_FL, FS_PROJINHERIT_FL }, 1706 + { F2FS_CASEFOLD_FL, FS_CASEFOLD_FL }, 1721 1707 }; 1722 1708 1723 1709 #define F2FS_GETTABLE_FS_FL ( \ ··· 1733 1717 FS_ENCRYPT_FL | \ 1734 1718 FS_INLINE_DATA_FL | \ 1735 1719 FS_NOCOW_FL | \ 1736 - FS_VERITY_FL) 1720 + FS_VERITY_FL | \ 1721 + FS_CASEFOLD_FL) 1737 1722 1738 1723 #define F2FS_SETTABLE_FS_FL ( \ 1739 1724 FS_SYNC_FL | \ ··· 1743 1726 FS_NODUMP_FL | \ 1744 1727 FS_NOATIME_FL | \ 1745 1728 FS_DIRSYNC_FL | \ 1746 - FS_PROJINHERIT_FL) 1729 + FS_PROJINHERIT_FL | \ 1730 + FS_CASEFOLD_FL) 1747 1731 1748 1732 /* Convert f2fs on-disk i_flags to FS_IOC_{GET,SET}FLAGS flags */ 1749 1733 static inline u32 f2fs_iflags_to_fsflags(u32 iflags) ··· 1843 1825 static int f2fs_ioc_start_atomic_write(struct file *filp) 1844 1826 { 1845 1827 struct inode *inode = file_inode(filp); 1828 + struct f2fs_inode_info *fi = F2FS_I(inode); 1829 + struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 1846 1830 int ret; 1847 1831 1848 1832 if (!inode_owner_or_capable(inode)) 1849 1833 return -EACCES; 1850 1834 1851 1835 if (!S_ISREG(inode->i_mode)) 1836 + return -EINVAL; 1837 + 1838 + if (filp->f_flags & O_DIRECT) 1852 1839 return -EINVAL; 1853 1840 1854 1841 ret = mnt_want_write_file(filp); ··· 1887 1864 goto out; 1888 1865 } 1889 1866 1867 + spin_lock(&sbi->inode_lock[ATOMIC_FILE]); 1868 + if (list_empty(&fi->inmem_ilist)) 1869 + list_add_tail(&fi->inmem_ilist, &sbi->inode_list[ATOMIC_FILE]); 1870 + spin_unlock(&sbi->inode_lock[ATOMIC_FILE]); 1871 + 1872 + /* add inode in inmem_list first and set atomic_file */ 1890 1873 set_inode_flag(inode, FI_ATOMIC_FILE); 1891 1874 clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST); 1892 1875 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); ··· 1934 1905 goto err_out; 1935 1906 1936 1907 ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true); 1937 - if (!ret) { 1938 - clear_inode_flag(inode, FI_ATOMIC_FILE); 1939 - F2FS_I(inode)->i_gc_failures[GC_FAILURE_ATOMIC] = 0; 1940 - stat_dec_atomic_write(inode); 1941 - } 1908 + if (!ret) 1909 + f2fs_drop_inmem_pages(inode); 1942 1910 } else { 1943 1911 ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 1, false); 1944 1912 } ··· 2321 2295 return -EROFS; 2322 2296 2323 2297 end = range.start + range.len; 2324 - if (range.start < MAIN_BLKADDR(sbi) || end >= MAX_BLKADDR(sbi)) { 2298 + if (end < range.start || range.start < MAIN_BLKADDR(sbi) || 2299 + end >= MAX_BLKADDR(sbi)) 2325 2300 return -EINVAL; 2326 - } 2327 2301 2328 2302 ret = mnt_want_write_file(filp); 2329 2303 if (ret) ··· 2447 2421 map.m_lblk += map.m_len; 2448 2422 } 2449 2423 2450 - if (!fragmented) 2424 + if (!fragmented) { 2425 + total = 0; 2451 2426 goto out; 2427 + } 2452 2428 2453 2429 sec_num = DIV_ROUND_UP(total, BLKS_PER_SEC(sbi)); 2454 2430 ··· 2480 2452 2481 2453 if (!(map.m_flags & F2FS_MAP_FLAGS)) { 2482 2454 map.m_lblk = next_pgofs; 2483 - continue; 2455 + goto check; 2484 2456 } 2485 2457 2486 2458 set_inode_flag(inode, FI_DO_DEFRAG); ··· 2504 2476 } 2505 2477 2506 2478 map.m_lblk = idx; 2507 - 2508 - if (idx < pg_end && cnt < blk_per_seg) 2479 + check: 2480 + if (map.m_lblk < pg_end && cnt < blk_per_seg) 2509 2481 goto do_map; 2510 2482 2511 2483 clear_inode_flag(inode, FI_DO_DEFRAG); ··· 3169 3141 return fsverity_ioctl_measure(filp, (void __user *)arg); 3170 3142 } 3171 3143 3144 + static int f2fs_get_volume_name(struct file *filp, unsigned long arg) 3145 + { 3146 + struct inode *inode = file_inode(filp); 3147 + struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 3148 + char *vbuf; 3149 + int count; 3150 + int err = 0; 3151 + 3152 + vbuf = f2fs_kzalloc(sbi, MAX_VOLUME_NAME, GFP_KERNEL); 3153 + if (!vbuf) 3154 + return -ENOMEM; 3155 + 3156 + down_read(&sbi->sb_lock); 3157 + count = utf16s_to_utf8s(sbi->raw_super->volume_name, 3158 + ARRAY_SIZE(sbi->raw_super->volume_name), 3159 + UTF16_LITTLE_ENDIAN, vbuf, MAX_VOLUME_NAME); 3160 + up_read(&sbi->sb_lock); 3161 + 3162 + if (copy_to_user((char __user *)arg, vbuf, 3163 + min(FSLABEL_MAX, count))) 3164 + err = -EFAULT; 3165 + 3166 + kvfree(vbuf); 3167 + return err; 3168 + } 3169 + 3170 + static int f2fs_set_volume_name(struct file *filp, unsigned long arg) 3171 + { 3172 + struct inode *inode = file_inode(filp); 3173 + struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 3174 + char *vbuf; 3175 + int err = 0; 3176 + 3177 + if (!capable(CAP_SYS_ADMIN)) 3178 + return -EPERM; 3179 + 3180 + vbuf = strndup_user((const char __user *)arg, FSLABEL_MAX); 3181 + if (IS_ERR(vbuf)) 3182 + return PTR_ERR(vbuf); 3183 + 3184 + err = mnt_want_write_file(filp); 3185 + if (err) 3186 + goto out; 3187 + 3188 + down_write(&sbi->sb_lock); 3189 + 3190 + memset(sbi->raw_super->volume_name, 0, 3191 + sizeof(sbi->raw_super->volume_name)); 3192 + utf8s_to_utf16s(vbuf, strlen(vbuf), UTF16_LITTLE_ENDIAN, 3193 + sbi->raw_super->volume_name, 3194 + ARRAY_SIZE(sbi->raw_super->volume_name)); 3195 + 3196 + err = f2fs_commit_super(sbi, false); 3197 + 3198 + up_write(&sbi->sb_lock); 3199 + 3200 + mnt_drop_write_file(filp); 3201 + out: 3202 + kfree(vbuf); 3203 + return err; 3204 + } 3205 + 3172 3206 long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) 3173 3207 { 3174 3208 if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(filp))))) 3175 3209 return -EIO; 3210 + if (!f2fs_is_checkpoint_ready(F2FS_I_SB(file_inode(filp)))) 3211 + return -ENOSPC; 3176 3212 3177 3213 switch (cmd) { 3178 3214 case F2FS_IOC_GETFLAGS: ··· 3305 3213 return f2fs_ioc_enable_verity(filp, arg); 3306 3214 case FS_IOC_MEASURE_VERITY: 3307 3215 return f2fs_ioc_measure_verity(filp, arg); 3216 + case F2FS_IOC_GET_VOLUME_NAME: 3217 + return f2fs_get_volume_name(filp, arg); 3218 + case F2FS_IOC_SET_VOLUME_NAME: 3219 + return f2fs_set_volume_name(filp, arg); 3308 3220 default: 3309 3221 return -ENOTTY; 3310 3222 } ··· 3325 3229 goto out; 3326 3230 } 3327 3231 3328 - if ((iocb->ki_flags & IOCB_NOWAIT) && !(iocb->ki_flags & IOCB_DIRECT)) { 3329 - ret = -EINVAL; 3330 - goto out; 3331 - } 3332 - 3333 - if (!inode_trylock(inode)) { 3334 - if (iocb->ki_flags & IOCB_NOWAIT) { 3232 + if (iocb->ki_flags & IOCB_NOWAIT) { 3233 + if (!inode_trylock(inode)) { 3335 3234 ret = -EAGAIN; 3336 3235 goto out; 3337 3236 } 3237 + } else { 3338 3238 inode_lock(inode); 3339 3239 } 3340 3240 ··· 3426 3334 case F2FS_IOC_RESIZE_FS: 3427 3335 case FS_IOC_ENABLE_VERITY: 3428 3336 case FS_IOC_MEASURE_VERITY: 3337 + case F2FS_IOC_GET_VOLUME_NAME: 3338 + case F2FS_IOC_SET_VOLUME_NAME: 3429 3339 break; 3430 3340 default: 3431 3341 return -ENOIOCTLCMD;
+25 -2
fs/f2fs/gc.c
··· 382 382 nsearched++; 383 383 } 384 384 385 + #ifdef CONFIG_F2FS_CHECK_FS 386 + /* 387 + * skip selecting the invalid segno (that is failed due to block 388 + * validity check failure during GC) to avoid endless GC loop in 389 + * such cases. 390 + */ 391 + if (test_bit(segno, sm->invalid_segmap)) 392 + goto next; 393 + #endif 394 + 385 395 secno = GET_SEC_FROM_SEG(sbi, segno); 386 396 387 397 if (sec_usage_check(sbi, secno)) ··· 637 627 source_blkaddr = datablock_addr(NULL, node_page, ofs_in_node); 638 628 f2fs_put_page(node_page, 1); 639 629 640 - if (source_blkaddr != blkaddr) 630 + if (source_blkaddr != blkaddr) { 631 + #ifdef CONFIG_F2FS_CHECK_FS 632 + unsigned int segno = GET_SEGNO(sbi, blkaddr); 633 + unsigned long offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr); 634 + 635 + if (unlikely(check_valid_map(sbi, segno, offset))) { 636 + if (!test_and_set_bit(segno, SIT_I(sbi)->invalid_segmap)) { 637 + f2fs_err(sbi, "mismatched blkaddr %u (source_blkaddr %u) in seg %u\n", 638 + blkaddr, source_blkaddr, segno); 639 + f2fs_bug_on(sbi, 1); 640 + } 641 + } 642 + #endif 641 643 return false; 644 + } 642 645 return true; 643 646 } 644 647 ··· 1326 1303 round++; 1327 1304 } 1328 1305 1329 - if (gc_type == FG_GC) 1306 + if (gc_type == FG_GC && seg_freed) 1330 1307 sbi->cur_victim_sec = NULL_SEGNO; 1331 1308 1332 1309 if (sync)
+36 -1
fs/f2fs/hash.c
··· 14 14 #include <linux/f2fs_fs.h> 15 15 #include <linux/cryptohash.h> 16 16 #include <linux/pagemap.h> 17 + #include <linux/unicode.h> 17 18 18 19 #include "f2fs.h" 19 20 ··· 68 67 *buf++ = pad; 69 68 } 70 69 71 - f2fs_hash_t f2fs_dentry_hash(const struct qstr *name_info, 70 + static f2fs_hash_t __f2fs_dentry_hash(const struct qstr *name_info, 72 71 struct fscrypt_name *fname) 73 72 { 74 73 __u32 hash; ··· 103 102 hash = buf[0]; 104 103 f2fs_hash = cpu_to_le32(hash & ~F2FS_HASH_COL_BIT); 105 104 return f2fs_hash; 105 + } 106 + 107 + f2fs_hash_t f2fs_dentry_hash(const struct inode *dir, 108 + const struct qstr *name_info, struct fscrypt_name *fname) 109 + { 110 + #ifdef CONFIG_UNICODE 111 + struct f2fs_sb_info *sbi = F2FS_SB(dir->i_sb); 112 + const struct unicode_map *um = sbi->s_encoding; 113 + int r, dlen; 114 + unsigned char *buff; 115 + struct qstr folded; 116 + 117 + if (!name_info->len || !IS_CASEFOLDED(dir)) 118 + goto opaque_seq; 119 + 120 + buff = f2fs_kzalloc(sbi, sizeof(char) * PATH_MAX, GFP_KERNEL); 121 + if (!buff) 122 + return -ENOMEM; 123 + 124 + dlen = utf8_casefold(um, name_info, buff, PATH_MAX); 125 + if (dlen < 0) { 126 + kvfree(buff); 127 + goto opaque_seq; 128 + } 129 + folded.name = buff; 130 + folded.len = dlen; 131 + r = __f2fs_dentry_hash(&folded, fname); 132 + 133 + kvfree(buff); 134 + return r; 135 + 136 + opaque_seq: 137 + #endif 138 + return __f2fs_dentry_hash(name_info, fname); 106 139 }
+15 -3
fs/f2fs/inline.c
··· 131 131 132 132 err = f2fs_get_node_info(fio.sbi, dn->nid, &ni); 133 133 if (err) { 134 + f2fs_truncate_data_blocks_range(dn, 1); 134 135 f2fs_put_dnode(dn); 135 136 return err; 136 137 } ··· 321 320 return NULL; 322 321 } 323 322 324 - namehash = f2fs_dentry_hash(&name, fname); 323 + namehash = f2fs_dentry_hash(dir, &name, fname); 325 324 326 325 inline_dentry = inline_data_addr(dir, ipage); 327 326 ··· 581 580 582 581 f2fs_wait_on_page_writeback(ipage, NODE, true, true); 583 582 584 - name_hash = f2fs_dentry_hash(new_name, NULL); 583 + name_hash = f2fs_dentry_hash(dir, new_name, NULL); 585 584 f2fs_update_dentry(ino, mode, &d, new_name, name_hash, bit_pos); 586 585 587 586 set_page_dirty(ipage); ··· 589 588 /* we don't need to mark_inode_dirty now */ 590 589 if (inode) { 591 590 f2fs_i_pino_write(inode, dir->i_ino); 591 + 592 + /* synchronize inode page's data from inode cache */ 593 + if (is_inode_flag_set(inode, FI_NEW_INODE)) 594 + f2fs_update_inode(inode, page); 595 + 592 596 f2fs_put_page(page, 1); 593 597 } 594 598 ··· 710 704 if (IS_ERR(ipage)) 711 705 return PTR_ERR(ipage); 712 706 713 - if (!f2fs_has_inline_data(inode)) { 707 + if ((S_ISREG(inode->i_mode) || S_ISLNK(inode->i_mode)) && 708 + !f2fs_has_inline_data(inode)) { 709 + err = -EAGAIN; 710 + goto out; 711 + } 712 + 713 + if (S_ISDIR(inode->i_mode) && !f2fs_has_inline_dentry(inode)) { 714 714 err = -EAGAIN; 715 715 goto out; 716 716 }
+7 -4
fs/f2fs/inode.c
··· 48 48 new_fl |= S_ENCRYPTED; 49 49 if (file_is_verity(inode)) 50 50 new_fl |= S_VERITY; 51 + if (flags & F2FS_CASEFOLD_FL) 52 + new_fl |= S_CASEFOLD; 51 53 inode_set_flags(inode, new_fl, 52 54 S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC| 53 - S_ENCRYPTED|S_VERITY); 55 + S_ENCRYPTED|S_VERITY|S_CASEFOLD); 54 56 } 55 57 56 58 static void __get_inode_rdev(struct inode *inode, struct f2fs_inode *ri) ··· 618 616 if (!is_inode_flag_set(inode, FI_DIRTY_INODE)) 619 617 return 0; 620 618 621 - if (f2fs_is_checkpoint_ready(sbi)) 619 + if (!f2fs_is_checkpoint_ready(sbi)) 622 620 return -ENOSPC; 623 621 624 622 /* ··· 697 695 698 696 if (err) { 699 697 f2fs_update_inode_page(inode); 700 - set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR); 698 + if (dquot_initialize_needed(inode)) 699 + set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR); 701 700 } 702 701 sb_end_intwrite(inode->i_sb); 703 702 no_delete: ··· 708 705 stat_dec_inline_dir(inode); 709 706 stat_dec_inline_inode(inode); 710 707 711 - if (likely(!is_set_ckpt_flags(sbi, CP_ERROR_FLAG) && 708 + if (likely(!f2fs_cp_error(sbi) && 712 709 !is_sbi_flag_set(sbi, SBI_CP_DISABLED))) 713 710 f2fs_bug_on(sbi, is_inode_flag_set(inode, FI_DIRTY_INODE)); 714 711 else
+36 -18
fs/f2fs/namei.c
··· 272 272 273 273 if (unlikely(f2fs_cp_error(sbi))) 274 274 return -EIO; 275 - err = f2fs_is_checkpoint_ready(sbi); 276 - if (err) 277 - return err; 275 + if (!f2fs_is_checkpoint_ready(sbi)) 276 + return -ENOSPC; 278 277 279 278 err = dquot_initialize(dir); 280 279 if (err) ··· 320 321 321 322 if (unlikely(f2fs_cp_error(sbi))) 322 323 return -EIO; 323 - err = f2fs_is_checkpoint_ready(sbi); 324 - if (err) 325 - return err; 324 + if (!f2fs_is_checkpoint_ready(sbi)) 325 + return -ENOSPC; 326 326 327 327 err = fscrypt_prepare_link(old_dentry, dir, dentry); 328 328 if (err) ··· 487 489 goto out_iput; 488 490 } 489 491 out_splice: 492 + #ifdef CONFIG_UNICODE 493 + if (!inode && IS_CASEFOLDED(dir)) { 494 + /* Eventually we want to call d_add_ci(dentry, NULL) 495 + * for negative dentries in the encoding case as 496 + * well. For now, prevent the negative dentry 497 + * from being cached. 498 + */ 499 + trace_f2fs_lookup_end(dir, dentry, ino, err); 500 + return NULL; 501 + } 502 + #endif 490 503 new = d_splice_alias(inode, dentry); 491 504 err = PTR_ERR_OR_ZERO(new); 492 505 trace_f2fs_lookup_end(dir, dentry, ino, err); ··· 546 537 goto fail; 547 538 } 548 539 f2fs_delete_entry(de, page, dir, inode); 540 + #ifdef CONFIG_UNICODE 541 + /* VFS negative dentries are incompatible with Encoding and 542 + * Case-insensitiveness. Eventually we'll want avoid 543 + * invalidating the dentries here, alongside with returning the 544 + * negative dentries at f2fs_lookup(), when it is better 545 + * supported by the VFS for the CI case. 546 + */ 547 + if (IS_CASEFOLDED(dir)) 548 + d_invalidate(dentry); 549 + #endif 549 550 f2fs_unlock_op(sbi); 550 551 551 552 if (IS_DIRSYNC(dir)) ··· 590 571 591 572 if (unlikely(f2fs_cp_error(sbi))) 592 573 return -EIO; 593 - err = f2fs_is_checkpoint_ready(sbi); 594 - if (err) 595 - return err; 574 + if (!f2fs_is_checkpoint_ready(sbi)) 575 + return -ENOSPC; 596 576 597 577 err = fscrypt_prepare_symlink(dir, symname, len, dir->i_sb->s_blocksize, 598 578 &disk_link); ··· 721 703 722 704 if (unlikely(f2fs_cp_error(sbi))) 723 705 return -EIO; 724 - err = f2fs_is_checkpoint_ready(sbi); 725 - if (err) 726 - return err; 706 + if (!f2fs_is_checkpoint_ready(sbi)) 707 + return -ENOSPC; 727 708 728 709 err = dquot_initialize(dir); 729 710 if (err) ··· 821 804 822 805 if (unlikely(f2fs_cp_error(sbi))) 823 806 return -EIO; 807 + if (!f2fs_is_checkpoint_ready(sbi)) 808 + return -ENOSPC; 824 809 825 810 if (IS_ENCRYPTED(dir) || DUMMY_ENCRYPTION_ENABLED(sbi)) { 826 811 int err = fscrypt_get_encryption_info(dir); ··· 859 840 860 841 if (unlikely(f2fs_cp_error(sbi))) 861 842 return -EIO; 862 - err = f2fs_is_checkpoint_ready(sbi); 863 - if (err) 864 - return err; 843 + if (!f2fs_is_checkpoint_ready(sbi)) 844 + return -ENOSPC; 865 845 866 846 if (is_inode_flag_set(new_dir, FI_PROJ_INHERIT) && 867 847 (!projid_eq(F2FS_I(new_dir)->i_projid, ··· 1053 1035 1054 1036 if (unlikely(f2fs_cp_error(sbi))) 1055 1037 return -EIO; 1056 - err = f2fs_is_checkpoint_ready(sbi); 1057 - if (err) 1058 - return err; 1038 + if (!f2fs_is_checkpoint_ready(sbi)) 1039 + return -ENOSPC; 1059 1040 1060 1041 if ((is_inode_flag_set(new_dir, FI_PROJ_INHERIT) && 1061 1042 !projid_eq(F2FS_I(new_dir)->i_projid, ··· 1267 1250 #ifdef CONFIG_F2FS_FS_XATTR 1268 1251 .listxattr = f2fs_listxattr, 1269 1252 #endif 1253 + .fiemap = f2fs_fiemap, 1270 1254 }; 1271 1255 1272 1256 const struct inode_operations f2fs_symlink_inode_operations = {
+54 -3
fs/f2fs/node.c
··· 1524 1524 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING))) 1525 1525 goto redirty_out; 1526 1526 1527 - if (wbc->sync_mode == WB_SYNC_NONE && 1527 + if (!is_sbi_flag_set(sbi, SBI_CP_DISABLED) && 1528 + wbc->sync_mode == WB_SYNC_NONE && 1528 1529 IS_DNODE(page) && is_cold_node(page)) 1529 1530 goto redirty_out; 1530 1531 ··· 1763 1762 return ret ? -EIO: 0; 1764 1763 } 1765 1764 1765 + static int f2fs_match_ino(struct inode *inode, unsigned long ino, void *data) 1766 + { 1767 + struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 1768 + bool clean; 1769 + 1770 + if (inode->i_ino != ino) 1771 + return 0; 1772 + 1773 + if (!is_inode_flag_set(inode, FI_DIRTY_INODE)) 1774 + return 0; 1775 + 1776 + spin_lock(&sbi->inode_lock[DIRTY_META]); 1777 + clean = list_empty(&F2FS_I(inode)->gdirty_list); 1778 + spin_unlock(&sbi->inode_lock[DIRTY_META]); 1779 + 1780 + if (clean) 1781 + return 0; 1782 + 1783 + inode = igrab(inode); 1784 + if (!inode) 1785 + return 0; 1786 + return 1; 1787 + } 1788 + 1789 + static bool flush_dirty_inode(struct page *page) 1790 + { 1791 + struct f2fs_sb_info *sbi = F2FS_P_SB(page); 1792 + struct inode *inode; 1793 + nid_t ino = ino_of_node(page); 1794 + 1795 + inode = find_inode_nowait(sbi->sb, ino, f2fs_match_ino, NULL); 1796 + if (!inode) 1797 + return false; 1798 + 1799 + f2fs_update_inode(inode, page); 1800 + unlock_page(page); 1801 + 1802 + iput(inode); 1803 + return true; 1804 + } 1805 + 1766 1806 int f2fs_sync_node_pages(struct f2fs_sb_info *sbi, 1767 1807 struct writeback_control *wbc, 1768 1808 bool do_balance, enum iostat_type io_type) ··· 1827 1785 for (i = 0; i < nr_pages; i++) { 1828 1786 struct page *page = pvec.pages[i]; 1829 1787 bool submitted = false; 1788 + bool may_dirty = true; 1830 1789 1831 1790 /* give a priority to WB_SYNC threads */ 1832 1791 if (atomic_read(&sbi->wb_sync_req[NODE]) && ··· 1875 1832 goto lock_node; 1876 1833 } 1877 1834 1835 + /* flush dirty inode */ 1836 + if (IS_INODE(page) && may_dirty) { 1837 + may_dirty = false; 1838 + if (flush_dirty_inode(page)) 1839 + goto lock_node; 1840 + } 1841 + 1878 1842 f2fs_wait_on_page_writeback(page, NODE, true, true); 1879 1843 1880 1844 if (!clear_page_dirty_for_io(page)) ··· 1910 1860 } 1911 1861 1912 1862 if (step < 2) { 1913 - if (wbc->sync_mode == WB_SYNC_NONE && step == 1) 1863 + if (!is_sbi_flag_set(sbi, SBI_CP_DISABLED) && 1864 + wbc->sync_mode == WB_SYNC_NONE && step == 1) 1914 1865 goto out; 1915 1866 step++; 1916 1867 goto next_step; ··· 3015 2964 3016 2965 /* not used nids: 0, node, meta, (and root counted as valid node) */ 3017 2966 nm_i->available_nids = nm_i->max_nid - sbi->total_valid_node_count - 3018 - sbi->nquota_files - F2FS_RESERVED_NODE_NUM; 2967 + F2FS_RESERVED_NODE_NUM; 3019 2968 nm_i->nid_cnt[FREE_NID] = 0; 3020 2969 nm_i->nid_cnt[PREALLOC_NID] = 0; 3021 2970 nm_i->nat_cnt = 0;
+66 -69
fs/f2fs/segment.c
··· 185 185 186 186 void f2fs_register_inmem_page(struct inode *inode, struct page *page) 187 187 { 188 - struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 189 - struct f2fs_inode_info *fi = F2FS_I(inode); 190 188 struct inmem_pages *new; 191 189 192 190 f2fs_trace_pid(page); ··· 198 200 INIT_LIST_HEAD(&new->list); 199 201 200 202 /* increase reference count with clean state */ 201 - mutex_lock(&fi->inmem_lock); 202 203 get_page(page); 203 - list_add_tail(&new->list, &fi->inmem_pages); 204 - spin_lock(&sbi->inode_lock[ATOMIC_FILE]); 205 - if (list_empty(&fi->inmem_ilist)) 206 - list_add_tail(&fi->inmem_ilist, &sbi->inode_list[ATOMIC_FILE]); 207 - spin_unlock(&sbi->inode_lock[ATOMIC_FILE]); 204 + mutex_lock(&F2FS_I(inode)->inmem_lock); 205 + list_add_tail(&new->list, &F2FS_I(inode)->inmem_pages); 208 206 inc_page_count(F2FS_I_SB(inode), F2FS_INMEM_PAGES); 209 - mutex_unlock(&fi->inmem_lock); 207 + mutex_unlock(&F2FS_I(inode)->inmem_lock); 210 208 211 209 trace_f2fs_register_inmem_page(page, INMEM); 212 210 } ··· 324 330 mutex_lock(&fi->inmem_lock); 325 331 __revoke_inmem_pages(inode, &fi->inmem_pages, 326 332 true, false, true); 327 - 328 - if (list_empty(&fi->inmem_pages)) { 329 - spin_lock(&sbi->inode_lock[ATOMIC_FILE]); 330 - if (!list_empty(&fi->inmem_ilist)) 331 - list_del_init(&fi->inmem_ilist); 332 - spin_unlock(&sbi->inode_lock[ATOMIC_FILE]); 333 - } 334 333 mutex_unlock(&fi->inmem_lock); 335 334 } 336 335 337 336 clear_inode_flag(inode, FI_ATOMIC_FILE); 338 337 fi->i_gc_failures[GC_FAILURE_ATOMIC] = 0; 339 338 stat_dec_atomic_write(inode); 339 + 340 + spin_lock(&sbi->inode_lock[ATOMIC_FILE]); 341 + if (!list_empty(&fi->inmem_ilist)) 342 + list_del_init(&fi->inmem_ilist); 343 + spin_unlock(&sbi->inode_lock[ATOMIC_FILE]); 340 344 } 341 345 342 346 void f2fs_drop_inmem_page(struct inode *inode, struct page *page) ··· 463 471 464 472 mutex_lock(&fi->inmem_lock); 465 473 err = __f2fs_commit_inmem_pages(inode); 466 - 467 - spin_lock(&sbi->inode_lock[ATOMIC_FILE]); 468 - if (!list_empty(&fi->inmem_ilist)) 469 - list_del_init(&fi->inmem_ilist); 470 - spin_unlock(&sbi->inode_lock[ATOMIC_FILE]); 471 474 mutex_unlock(&fi->inmem_lock); 472 475 473 476 clear_inode_flag(inode, FI_ATOMIC_COMMIT); ··· 488 501 if (need && excess_cached_nats(sbi)) 489 502 f2fs_balance_fs_bg(sbi); 490 503 491 - if (f2fs_is_checkpoint_ready(sbi)) 504 + if (!f2fs_is_checkpoint_ready(sbi)) 492 505 return; 493 506 494 507 /* ··· 804 817 if (test_and_clear_bit(segno, dirty_i->dirty_segmap[t])) 805 818 dirty_i->nr_dirty[t]--; 806 819 807 - if (get_valid_blocks(sbi, segno, true) == 0) 820 + if (get_valid_blocks(sbi, segno, true) == 0) { 808 821 clear_bit(GET_SEC_FROM_SEG(sbi, segno), 809 822 dirty_i->victim_secmap); 823 + #ifdef CONFIG_F2FS_CHECK_FS 824 + clear_bit(segno, SIT_I(sbi)->invalid_segmap); 825 + #endif 826 + } 810 827 } 811 828 } 812 829 ··· 2075 2084 2076 2085 f2fs_stop_discard_thread(sbi); 2077 2086 2087 + /* 2088 + * Recovery can cache discard commands, so in error path of 2089 + * fill_super(), it needs to give a chance to handle them. 2090 + */ 2091 + if (unlikely(atomic_read(&dcc->discard_cmd_cnt))) 2092 + f2fs_issue_discard_timeout(sbi); 2093 + 2078 2094 kvfree(dcc); 2079 2095 SM_I(sbi)->dcc_info = NULL; 2080 2096 } ··· 2154 2156 if (!f2fs_test_and_set_bit(offset, se->discard_map)) 2155 2157 sbi->discard_blks--; 2156 2158 2157 - /* don't overwrite by SSR to keep node chain */ 2158 - if (IS_NODESEG(se->type) && 2159 - !is_sbi_flag_set(sbi, SBI_CP_DISABLED)) { 2159 + /* 2160 + * SSR should never reuse block which is checkpointed 2161 + * or newly invalidated. 2162 + */ 2163 + if (!is_sbi_flag_set(sbi, SBI_CP_DISABLED)) { 2160 2164 if (!f2fs_test_and_set_bit(offset, se->ckpt_valid_map)) 2161 2165 se->ckpt_valid_blocks++; 2162 2166 } ··· 3116 3116 f2fs_inode_chksum_set(sbi, page); 3117 3117 } 3118 3118 3119 + if (F2FS_IO_ALIGNED(sbi)) 3120 + fio->retry = false; 3121 + 3119 3122 if (add_list) { 3120 3123 struct f2fs_bio_info *io; 3121 3124 3122 3125 INIT_LIST_HEAD(&fio->list); 3123 3126 fio->in_list = true; 3124 - fio->retry = false; 3125 3127 io = sbi->write_io[fio->type] + fio->temp; 3126 3128 spin_lock(&io->io_lock); 3127 3129 list_add_tail(&fio->list, &io->io_list); ··· 3449 3447 seg_i = CURSEG_I(sbi, i); 3450 3448 segno = le32_to_cpu(ckpt->cur_data_segno[i]); 3451 3449 blk_off = le16_to_cpu(ckpt->cur_data_blkoff[i]); 3452 - if (blk_off > ENTRIES_IN_SUM) { 3453 - f2fs_bug_on(sbi, 1); 3454 - f2fs_put_page(page, 1); 3455 - return -EFAULT; 3456 - } 3457 3450 seg_i->next_segno = segno; 3458 3451 reset_curseg(sbi, i, 0); 3459 3452 seg_i->alloc_type = ckpt->alloc_type[i]; ··· 3938 3941 struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi); 3939 3942 struct sit_info *sit_i; 3940 3943 unsigned int sit_segs, start; 3941 - char *src_bitmap; 3942 - unsigned int bitmap_size; 3944 + char *src_bitmap, *bitmap; 3945 + unsigned int bitmap_size, main_bitmap_size, sit_bitmap_size; 3943 3946 3944 3947 /* allocate memory for SIT information */ 3945 3948 sit_i = f2fs_kzalloc(sbi, sizeof(struct sit_info), GFP_KERNEL); ··· 3955 3958 if (!sit_i->sentries) 3956 3959 return -ENOMEM; 3957 3960 3958 - bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi)); 3959 - sit_i->dirty_sentries_bitmap = f2fs_kvzalloc(sbi, bitmap_size, 3961 + main_bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi)); 3962 + sit_i->dirty_sentries_bitmap = f2fs_kvzalloc(sbi, main_bitmap_size, 3960 3963 GFP_KERNEL); 3961 3964 if (!sit_i->dirty_sentries_bitmap) 3962 3965 return -ENOMEM; 3963 3966 3967 + #ifdef CONFIG_F2FS_CHECK_FS 3968 + bitmap_size = MAIN_SEGS(sbi) * SIT_VBLOCK_MAP_SIZE * 4; 3969 + #else 3970 + bitmap_size = MAIN_SEGS(sbi) * SIT_VBLOCK_MAP_SIZE * 3; 3971 + #endif 3972 + sit_i->bitmap = f2fs_kvzalloc(sbi, bitmap_size, GFP_KERNEL); 3973 + if (!sit_i->bitmap) 3974 + return -ENOMEM; 3975 + 3976 + bitmap = sit_i->bitmap; 3977 + 3964 3978 for (start = 0; start < MAIN_SEGS(sbi); start++) { 3965 - sit_i->sentries[start].cur_valid_map 3966 - = f2fs_kzalloc(sbi, SIT_VBLOCK_MAP_SIZE, GFP_KERNEL); 3967 - sit_i->sentries[start].ckpt_valid_map 3968 - = f2fs_kzalloc(sbi, SIT_VBLOCK_MAP_SIZE, GFP_KERNEL); 3969 - if (!sit_i->sentries[start].cur_valid_map || 3970 - !sit_i->sentries[start].ckpt_valid_map) 3971 - return -ENOMEM; 3979 + sit_i->sentries[start].cur_valid_map = bitmap; 3980 + bitmap += SIT_VBLOCK_MAP_SIZE; 3981 + 3982 + sit_i->sentries[start].ckpt_valid_map = bitmap; 3983 + bitmap += SIT_VBLOCK_MAP_SIZE; 3972 3984 3973 3985 #ifdef CONFIG_F2FS_CHECK_FS 3974 - sit_i->sentries[start].cur_valid_map_mir 3975 - = f2fs_kzalloc(sbi, SIT_VBLOCK_MAP_SIZE, GFP_KERNEL); 3976 - if (!sit_i->sentries[start].cur_valid_map_mir) 3977 - return -ENOMEM; 3986 + sit_i->sentries[start].cur_valid_map_mir = bitmap; 3987 + bitmap += SIT_VBLOCK_MAP_SIZE; 3978 3988 #endif 3979 3989 3980 - sit_i->sentries[start].discard_map 3981 - = f2fs_kzalloc(sbi, SIT_VBLOCK_MAP_SIZE, 3982 - GFP_KERNEL); 3983 - if (!sit_i->sentries[start].discard_map) 3984 - return -ENOMEM; 3990 + sit_i->sentries[start].discard_map = bitmap; 3991 + bitmap += SIT_VBLOCK_MAP_SIZE; 3985 3992 } 3986 3993 3987 3994 sit_i->tmp_map = f2fs_kzalloc(sbi, SIT_VBLOCK_MAP_SIZE, GFP_KERNEL); ··· 4005 4004 sit_segs = le32_to_cpu(raw_super->segment_count_sit) >> 1; 4006 4005 4007 4006 /* setup SIT bitmap from ckeckpoint pack */ 4008 - bitmap_size = __bitmap_size(sbi, SIT_BITMAP); 4007 + sit_bitmap_size = __bitmap_size(sbi, SIT_BITMAP); 4009 4008 src_bitmap = __bitmap_ptr(sbi, SIT_BITMAP); 4010 4009 4011 - sit_i->sit_bitmap = kmemdup(src_bitmap, bitmap_size, GFP_KERNEL); 4010 + sit_i->sit_bitmap = kmemdup(src_bitmap, sit_bitmap_size, GFP_KERNEL); 4012 4011 if (!sit_i->sit_bitmap) 4013 4012 return -ENOMEM; 4014 4013 4015 4014 #ifdef CONFIG_F2FS_CHECK_FS 4016 - sit_i->sit_bitmap_mir = kmemdup(src_bitmap, bitmap_size, GFP_KERNEL); 4015 + sit_i->sit_bitmap_mir = kmemdup(src_bitmap, 4016 + sit_bitmap_size, GFP_KERNEL); 4017 4017 if (!sit_i->sit_bitmap_mir) 4018 + return -ENOMEM; 4019 + 4020 + sit_i->invalid_segmap = f2fs_kvzalloc(sbi, 4021 + main_bitmap_size, GFP_KERNEL); 4022 + if (!sit_i->invalid_segmap) 4018 4023 return -ENOMEM; 4019 4024 #endif 4020 4025 ··· 4030 4023 sit_i->sit_base_addr = le32_to_cpu(raw_super->sit_blkaddr); 4031 4024 sit_i->sit_blocks = sit_segs << sbi->log_blocks_per_seg; 4032 4025 sit_i->written_valid_blocks = 0; 4033 - sit_i->bitmap_size = bitmap_size; 4026 + sit_i->bitmap_size = sit_bitmap_size; 4034 4027 sit_i->dirty_sentries = 0; 4035 4028 sit_i->sents_per_block = SIT_ENTRY_PER_BLOCK; 4036 4029 sit_i->elapsed_time = le64_to_cpu(sbi->ckpt->elapsed_time); ··· 4168 4161 if (start >= MAIN_SEGS(sbi)) { 4169 4162 f2fs_err(sbi, "Wrong journal entry on segno %u", 4170 4163 start); 4171 - set_sbi_flag(sbi, SBI_NEED_FSCK); 4172 4164 err = -EFSCORRUPTED; 4173 4165 break; 4174 4166 } ··· 4207 4201 if (!err && total_node_blocks != valid_node_count(sbi)) { 4208 4202 f2fs_err(sbi, "SIT is corrupted node# %u vs %u", 4209 4203 total_node_blocks, valid_node_count(sbi)); 4210 - set_sbi_flag(sbi, SBI_NEED_FSCK); 4211 4204 err = -EFSCORRUPTED; 4212 4205 } 4213 4206 ··· 4497 4492 static void destroy_sit_info(struct f2fs_sb_info *sbi) 4498 4493 { 4499 4494 struct sit_info *sit_i = SIT_I(sbi); 4500 - unsigned int start; 4501 4495 4502 4496 if (!sit_i) 4503 4497 return; 4504 4498 4505 - if (sit_i->sentries) { 4506 - for (start = 0; start < MAIN_SEGS(sbi); start++) { 4507 - kvfree(sit_i->sentries[start].cur_valid_map); 4508 - #ifdef CONFIG_F2FS_CHECK_FS 4509 - kvfree(sit_i->sentries[start].cur_valid_map_mir); 4510 - #endif 4511 - kvfree(sit_i->sentries[start].ckpt_valid_map); 4512 - kvfree(sit_i->sentries[start].discard_map); 4513 - } 4514 - } 4499 + if (sit_i->sentries) 4500 + kvfree(sit_i->bitmap); 4515 4501 kvfree(sit_i->tmp_map); 4516 4502 4517 4503 kvfree(sit_i->sentries); ··· 4513 4517 kvfree(sit_i->sit_bitmap); 4514 4518 #ifdef CONFIG_F2FS_CHECK_FS 4515 4519 kvfree(sit_i->sit_bitmap_mir); 4520 + kvfree(sit_i->invalid_segmap); 4516 4521 #endif 4517 4522 kvfree(sit_i); 4518 4523 }
+8 -4
fs/f2fs/segment.h
··· 226 226 block_t sit_base_addr; /* start block address of SIT area */ 227 227 block_t sit_blocks; /* # of blocks used by SIT area */ 228 228 block_t written_valid_blocks; /* # of valid blocks in main area */ 229 + char *bitmap; /* all bitmaps pointer */ 229 230 char *sit_bitmap; /* SIT bitmap pointer */ 230 231 #ifdef CONFIG_F2FS_CHECK_FS 231 232 char *sit_bitmap_mir; /* SIT bitmap mirror */ 233 + 234 + /* bitmap of segments to be ignored by GC in case of errors */ 235 + unsigned long *invalid_segmap; 232 236 #endif 233 237 unsigned int bitmap_size; /* SIT bitmap size */ 234 238 ··· 586 582 reserved_sections(sbi) + needed); 587 583 } 588 584 589 - static inline int f2fs_is_checkpoint_ready(struct f2fs_sb_info *sbi) 585 + static inline bool f2fs_is_checkpoint_ready(struct f2fs_sb_info *sbi) 590 586 { 591 587 if (likely(!is_sbi_flag_set(sbi, SBI_CP_DISABLED))) 592 - return 0; 588 + return true; 593 589 if (likely(!has_not_enough_free_secs(sbi, 0, 0))) 594 - return 0; 595 - return -ENOSPC; 590 + return true; 591 + return false; 596 592 } 597 593 598 594 static inline bool excess_prefree_segs(struct f2fs_sb_info *sbi)
+147 -9
fs/f2fs/super.c
··· 23 23 #include <linux/f2fs_fs.h> 24 24 #include <linux/sysfs.h> 25 25 #include <linux/quota.h> 26 + #include <linux/unicode.h> 26 27 27 28 #include "f2fs.h" 28 29 #include "node.h" ··· 222 221 223 222 va_end(args); 224 223 } 224 + 225 + #ifdef CONFIG_UNICODE 226 + static const struct f2fs_sb_encodings { 227 + __u16 magic; 228 + char *name; 229 + char *version; 230 + } f2fs_sb_encoding_map[] = { 231 + {F2FS_ENC_UTF8_12_1, "utf8", "12.1.0"}, 232 + }; 233 + 234 + static int f2fs_sb_read_encoding(const struct f2fs_super_block *sb, 235 + const struct f2fs_sb_encodings **encoding, 236 + __u16 *flags) 237 + { 238 + __u16 magic = le16_to_cpu(sb->s_encoding); 239 + int i; 240 + 241 + for (i = 0; i < ARRAY_SIZE(f2fs_sb_encoding_map); i++) 242 + if (magic == f2fs_sb_encoding_map[i].magic) 243 + break; 244 + 245 + if (i >= ARRAY_SIZE(f2fs_sb_encoding_map)) 246 + return -EINVAL; 247 + 248 + *encoding = &f2fs_sb_encoding_map[i]; 249 + *flags = le16_to_cpu(sb->s_encoding_flags); 250 + 251 + return 0; 252 + } 253 + #endif 225 254 226 255 static inline void limit_reserve_root(struct f2fs_sb_info *sbi) 227 256 { ··· 829 798 return -EINVAL; 830 799 } 831 800 #endif 801 + #ifndef CONFIG_UNICODE 802 + if (f2fs_sb_has_casefold(sbi)) { 803 + f2fs_err(sbi, 804 + "Filesystem with casefold feature cannot be mounted without CONFIG_UNICODE"); 805 + return -EINVAL; 806 + } 807 + #endif 832 808 833 809 if (F2FS_IO_SIZE_BITS(sbi) && !test_opt(sbi, LFS)) { 834 810 f2fs_err(sbi, "Should set mode=lfs with %uKB-sized IO", ··· 911 873 912 874 static int f2fs_drop_inode(struct inode *inode) 913 875 { 876 + struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 914 877 int ret; 878 + 879 + /* 880 + * during filesystem shutdown, if checkpoint is disabled, 881 + * drop useless meta/node dirty pages. 882 + */ 883 + if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) { 884 + if (inode->i_ino == F2FS_NODE_INO(sbi) || 885 + inode->i_ino == F2FS_META_INO(sbi)) { 886 + trace_f2fs_drop_inode(inode, 1); 887 + return 1; 888 + } 889 + } 890 + 915 891 /* 916 892 * This is to avoid a deadlock condition like below. 917 893 * writeback_single_inode(inode) ··· 1143 1091 destroy_percpu_info(sbi); 1144 1092 for (i = 0; i < NR_PAGE_TYPE; i++) 1145 1093 kvfree(sbi->write_io[i]); 1094 + #ifdef CONFIG_UNICODE 1095 + utf8_unload(sbi->s_encoding); 1096 + #endif 1146 1097 kvfree(sbi); 1147 1098 } 1148 1099 ··· 1271 1216 else 1272 1217 buf->f_bavail = 0; 1273 1218 1274 - avail_node_count = sbi->total_node_count - sbi->nquota_files - 1275 - F2FS_RESERVED_NODE_NUM; 1219 + avail_node_count = sbi->total_node_count - F2FS_RESERVED_NODE_NUM; 1276 1220 1277 1221 if (avail_node_count > user_block_count) { 1278 1222 buf->f_files = user_block_count; ··· 1578 1524 bool need_stop_gc = false; 1579 1525 bool no_extent_cache = !test_opt(sbi, EXTENT_CACHE); 1580 1526 bool disable_checkpoint = test_opt(sbi, DISABLE_CHECKPOINT); 1527 + bool no_io_align = !F2FS_IO_ALIGNED(sbi); 1581 1528 bool checkpoint_changed; 1582 1529 #ifdef CONFIG_QUOTA 1583 1530 int i, j; ··· 1655 1600 if (no_extent_cache == !!test_opt(sbi, EXTENT_CACHE)) { 1656 1601 err = -EINVAL; 1657 1602 f2fs_warn(sbi, "switch extent_cache option is not allowed"); 1603 + goto restore_opts; 1604 + } 1605 + 1606 + if (no_io_align == !!F2FS_IO_ALIGNED(sbi)) { 1607 + err = -EINVAL; 1608 + f2fs_warn(sbi, "switch io_bits option is not allowed"); 1658 1609 goto restore_opts; 1659 1610 } 1660 1611 ··· 2042 1981 struct inode *inode; 2043 1982 int err; 2044 1983 1984 + /* if quota sysfile exists, deny enabling quota with specific file */ 1985 + if (f2fs_sb_has_quota_ino(F2FS_SB(sb))) { 1986 + f2fs_err(F2FS_SB(sb), "quota sysfile already exists"); 1987 + return -EBUSY; 1988 + } 1989 + 2045 1990 err = f2fs_quota_sync(sb, type); 2046 1991 if (err) 2047 1992 return err; ··· 2067 2000 return 0; 2068 2001 } 2069 2002 2070 - static int f2fs_quota_off(struct super_block *sb, int type) 2003 + static int __f2fs_quota_off(struct super_block *sb, int type) 2071 2004 { 2072 2005 struct inode *inode = sb_dqopt(sb)->files[type]; 2073 2006 int err; ··· 2093 2026 return err; 2094 2027 } 2095 2028 2029 + static int f2fs_quota_off(struct super_block *sb, int type) 2030 + { 2031 + struct f2fs_sb_info *sbi = F2FS_SB(sb); 2032 + int err; 2033 + 2034 + err = __f2fs_quota_off(sb, type); 2035 + 2036 + /* 2037 + * quotactl can shutdown journalled quota, result in inconsistence 2038 + * between quota record and fs data by following updates, tag the 2039 + * flag to let fsck be aware of it. 2040 + */ 2041 + if (is_journalled_quota(sbi)) 2042 + set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR); 2043 + return err; 2044 + } 2045 + 2096 2046 void f2fs_quota_off_umount(struct super_block *sb) 2097 2047 { 2098 2048 int type; 2099 2049 int err; 2100 2050 2101 2051 for (type = 0; type < MAXQUOTAS; type++) { 2102 - err = f2fs_quota_off(sb, type); 2052 + err = __f2fs_quota_off(sb, type); 2103 2053 if (err) { 2104 2054 int ret = dquot_quota_off(sb, type); 2105 2055 ··· 2701 2617 } 2702 2618 2703 2619 valid_node_count = le32_to_cpu(ckpt->valid_node_count); 2704 - avail_node_count = sbi->total_node_count - sbi->nquota_files - 2705 - F2FS_RESERVED_NODE_NUM; 2620 + avail_node_count = sbi->total_node_count - F2FS_RESERVED_NODE_NUM; 2706 2621 if (valid_node_count > avail_node_count) { 2707 2622 f2fs_err(sbi, "Wrong valid_node_count: %u, avail_node_count: %u", 2708 2623 valid_node_count, avail_node_count); ··· 2740 2657 } 2741 2658 } 2742 2659 for (i = 0; i < NR_CURSEG_NODE_TYPE; i++) { 2743 - for (j = i; j < NR_CURSEG_DATA_TYPE; j++) { 2660 + for (j = 0; j < NR_CURSEG_DATA_TYPE; j++) { 2744 2661 if (le32_to_cpu(ckpt->cur_node_segno[i]) == 2745 2662 le32_to_cpu(ckpt->cur_data_segno[j])) { 2746 - f2fs_err(sbi, "Data segment (%u) and Data segment (%u) has the same segno: %u", 2663 + f2fs_err(sbi, "Node segment (%u) and Data segment (%u) has the same segno: %u", 2747 2664 i, j, 2748 2665 le32_to_cpu(ckpt->cur_node_segno[i])); 2749 2666 return 1; ··· 3116 3033 return 0; 3117 3034 } 3118 3035 3036 + static int f2fs_setup_casefold(struct f2fs_sb_info *sbi) 3037 + { 3038 + #ifdef CONFIG_UNICODE 3039 + if (f2fs_sb_has_casefold(sbi) && !sbi->s_encoding) { 3040 + const struct f2fs_sb_encodings *encoding_info; 3041 + struct unicode_map *encoding; 3042 + __u16 encoding_flags; 3043 + 3044 + if (f2fs_sb_has_encrypt(sbi)) { 3045 + f2fs_err(sbi, 3046 + "Can't mount with encoding and encryption"); 3047 + return -EINVAL; 3048 + } 3049 + 3050 + if (f2fs_sb_read_encoding(sbi->raw_super, &encoding_info, 3051 + &encoding_flags)) { 3052 + f2fs_err(sbi, 3053 + "Encoding requested by superblock is unknown"); 3054 + return -EINVAL; 3055 + } 3056 + 3057 + encoding = utf8_load(encoding_info->version); 3058 + if (IS_ERR(encoding)) { 3059 + f2fs_err(sbi, 3060 + "can't mount with superblock charset: %s-%s " 3061 + "not supported by the kernel. flags: 0x%x.", 3062 + encoding_info->name, encoding_info->version, 3063 + encoding_flags); 3064 + return PTR_ERR(encoding); 3065 + } 3066 + f2fs_info(sbi, "Using encoding defined by superblock: " 3067 + "%s-%s with flags 0x%hx", encoding_info->name, 3068 + encoding_info->version?:"\b", encoding_flags); 3069 + 3070 + sbi->s_encoding = encoding; 3071 + sbi->s_encoding_flags = encoding_flags; 3072 + sbi->sb->s_d_op = &f2fs_dentry_ops; 3073 + } 3074 + #else 3075 + if (f2fs_sb_has_casefold(sbi)) { 3076 + f2fs_err(sbi, "Filesystem with casefold feature cannot be mounted without CONFIG_UNICODE"); 3077 + return -EINVAL; 3078 + } 3079 + #endif 3080 + return 0; 3081 + } 3082 + 3119 3083 static void f2fs_tuning_parameters(struct f2fs_sb_info *sbi) 3120 3084 { 3121 3085 struct f2fs_sm_info *sm_i = SM_I(sbi); ··· 3259 3129 le32_to_cpu(raw_super->log_blocksize); 3260 3130 sb->s_max_links = F2FS_LINK_MAX; 3261 3131 3132 + err = f2fs_setup_casefold(sbi); 3133 + if (err) 3134 + goto free_options; 3135 + 3262 3136 #ifdef CONFIG_QUOTA 3263 3137 sb->dq_op = &f2fs_quota_operations; 3264 3138 sb->s_qcop = &f2fs_quotactl_ops; ··· 3341 3207 if (err) 3342 3208 goto free_bio_info; 3343 3209 3344 - if (F2FS_IO_SIZE(sbi) > 1) { 3210 + if (F2FS_IO_ALIGNED(sbi)) { 3345 3211 sbi->write_io_dummy = 3346 3212 mempool_create_page_pool(2 * (F2FS_IO_SIZE(sbi) - 1), 0); 3347 3213 if (!sbi->write_io_dummy) { ··· 3616 3482 free_bio_info: 3617 3483 for (i = 0; i < NR_PAGE_TYPE; i++) 3618 3484 kvfree(sbi->write_io[i]); 3485 + 3486 + #ifdef CONFIG_UNICODE 3487 + utf8_unload(sbi->s_encoding); 3488 + #endif 3619 3489 free_options: 3620 3490 #ifdef CONFIG_QUOTA 3621 3491 for (i = 0; i < MAXQUOTAS; i++)
+23
fs/f2fs/sysfs.c
··· 10 10 #include <linux/proc_fs.h> 11 11 #include <linux/f2fs_fs.h> 12 12 #include <linux/seq_file.h> 13 + #include <linux/unicode.h> 13 14 14 15 #include "f2fs.h" 15 16 #include "segment.h" ··· 82 81 (unsigned long long)unusable); 83 82 } 84 83 84 + static ssize_t encoding_show(struct f2fs_attr *a, 85 + struct f2fs_sb_info *sbi, char *buf) 86 + { 87 + #ifdef CONFIG_UNICODE 88 + if (f2fs_sb_has_casefold(sbi)) 89 + return snprintf(buf, PAGE_SIZE, "%s (%d.%d.%d)\n", 90 + sbi->s_encoding->charset, 91 + (sbi->s_encoding->version >> 16) & 0xff, 92 + (sbi->s_encoding->version >> 8) & 0xff, 93 + sbi->s_encoding->version & 0xff); 94 + #endif 95 + return snprintf(buf, PAGE_SIZE, "(none)"); 96 + } 85 97 86 98 static ssize_t lifetime_write_kbytes_show(struct f2fs_attr *a, 87 99 struct f2fs_sb_info *sbi, char *buf) ··· 151 137 if (f2fs_sb_has_sb_chksum(sbi)) 152 138 len += snprintf(buf + len, PAGE_SIZE - len, "%s%s", 153 139 len ? ", " : "", "sb_checksum"); 140 + if (f2fs_sb_has_casefold(sbi)) 141 + len += snprintf(buf + len, PAGE_SIZE - len, "%s%s", 142 + len ? ", " : "", "casefold"); 154 143 len += snprintf(buf + len, PAGE_SIZE - len, "\n"); 155 144 return len; 156 145 } ··· 386 369 FEAT_LOST_FOUND, 387 370 FEAT_VERITY, 388 371 FEAT_SB_CHECKSUM, 372 + FEAT_CASEFOLD, 389 373 }; 390 374 391 375 static ssize_t f2fs_feature_show(struct f2fs_attr *a, ··· 405 387 case FEAT_LOST_FOUND: 406 388 case FEAT_VERITY: 407 389 case FEAT_SB_CHECKSUM: 390 + case FEAT_CASEFOLD: 408 391 return snprintf(buf, PAGE_SIZE, "supported\n"); 409 392 } 410 393 return 0; ··· 479 460 F2FS_GENERAL_RO_ATTR(features); 480 461 F2FS_GENERAL_RO_ATTR(current_reserved_blocks); 481 462 F2FS_GENERAL_RO_ATTR(unusable); 463 + F2FS_GENERAL_RO_ATTR(encoding); 482 464 483 465 #ifdef CONFIG_FS_ENCRYPTION 484 466 F2FS_FEATURE_RO_ATTR(encryption, FEAT_CRYPTO); ··· 499 479 F2FS_FEATURE_RO_ATTR(verity, FEAT_VERITY); 500 480 #endif 501 481 F2FS_FEATURE_RO_ATTR(sb_checksum, FEAT_SB_CHECKSUM); 482 + F2FS_FEATURE_RO_ATTR(casefold, FEAT_CASEFOLD); 502 483 503 484 #define ATTR_LIST(name) (&f2fs_attr_##name.attr) 504 485 static struct attribute *f2fs_attrs[] = { ··· 544 523 ATTR_LIST(features), 545 524 ATTR_LIST(reserved_blocks), 546 525 ATTR_LIST(current_reserved_blocks), 526 + ATTR_LIST(encoding), 547 527 NULL, 548 528 }; 549 529 ATTRIBUTE_GROUPS(f2fs); ··· 568 546 ATTR_LIST(verity), 569 547 #endif 570 548 ATTR_LIST(sb_checksum), 549 + ATTR_LIST(casefold), 571 550 NULL, 572 551 }; 573 552 ATTRIBUTE_GROUPS(f2fs_feat);
+6
fs/f2fs/xattr.c
··· 21 21 #include <linux/posix_acl_xattr.h> 22 22 #include "f2fs.h" 23 23 #include "xattr.h" 24 + #include "segment.h" 24 25 25 26 static int f2fs_xattr_generic_get(const struct xattr_handler *handler, 26 27 struct dentry *unused, struct inode *inode, ··· 729 728 { 730 729 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 731 730 int err; 731 + 732 + if (unlikely(f2fs_cp_error(sbi))) 733 + return -EIO; 734 + if (!f2fs_is_checkpoint_ready(sbi)) 735 + return -ENOSPC; 732 736 733 737 err = dquot_initialize(inode); 734 738 if (err)
+9 -1
include/linux/f2fs_fs.h
··· 36 36 37 37 #define F2FS_MAX_QUOTAS 3 38 38 39 + #define F2FS_ENC_UTF8_12_1 1 40 + #define F2FS_ENC_STRICT_MODE_FL (1 << 0) 41 + #define f2fs_has_strict_mode(sbi) \ 42 + (sbi->s_encoding_flags & F2FS_ENC_STRICT_MODE_FL) 43 + 39 44 #define F2FS_IO_SIZE(sbi) (1 << F2FS_OPTION(sbi).write_io_size_bits) /* Blocks */ 40 45 #define F2FS_IO_SIZE_KB(sbi) (1 << (F2FS_OPTION(sbi).write_io_size_bits + 2)) /* KB */ 41 46 #define F2FS_IO_SIZE_BYTES(sbi) (1 << (F2FS_OPTION(sbi).write_io_size_bits + 12)) /* B */ 42 47 #define F2FS_IO_SIZE_BITS(sbi) (F2FS_OPTION(sbi).write_io_size_bits) /* power of 2 */ 43 48 #define F2FS_IO_SIZE_MASK(sbi) (F2FS_IO_SIZE(sbi) - 1) 49 + #define F2FS_IO_ALIGNED(sbi) (F2FS_IO_SIZE(sbi) > 1) 44 50 45 51 /* This flag is used by node and meta inodes, and by recovery */ 46 52 #define GFP_F2FS_ZERO (GFP_NOFS | __GFP_ZERO) ··· 115 109 struct f2fs_device devs[MAX_DEVICES]; /* device list */ 116 110 __le32 qf_ino[F2FS_MAX_QUOTAS]; /* quota inode numbers */ 117 111 __u8 hot_ext_count; /* # of hot file extension */ 118 - __u8 reserved[310]; /* valid reserved region */ 112 + __le16 s_encoding; /* Filename charset encoding */ 113 + __le16 s_encoding_flags; /* Filename charset encoding flags */ 114 + __u8 reserved[306]; /* valid reserved region */ 119 115 __le32 crc; /* checksum of superblock */ 120 116 } __packed; 121 117
+1
include/uapi/linux/fs.h
··· 264 264 #define FS_NOCOW_FL 0x00800000 /* Do not cow file */ 265 265 #define FS_INLINE_DATA_FL 0x10000000 /* Reserved for ext4 */ 266 266 #define FS_PROJINHERIT_FL 0x20000000 /* Create with parents projid */ 267 + #define FS_CASEFOLD_FL 0x40000000 /* Folder is case insensitive */ 267 268 #define FS_RESERVED_FL 0x80000000 /* reserved for ext2 lib */ 268 269 269 270 #define FS_FL_USER_VISIBLE 0x0003DFFF /* User visible flags */
+1
tools/include/uapi/linux/fs.h
··· 311 311 #define FS_NOCOW_FL 0x00800000 /* Do not cow file */ 312 312 #define FS_INLINE_DATA_FL 0x10000000 /* Reserved for ext4 */ 313 313 #define FS_PROJINHERIT_FL 0x20000000 /* Create with parents projid */ 314 + #define FS_CASEFOLD_FL 0x40000000 /* Folder is case insensitive */ 314 315 #define FS_RESERVED_FL 0x80000000 /* reserved for ext2 lib */ 315 316 316 317 #define FS_FL_USER_VISIBLE 0x0003DFFF /* User visible flags */