Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'vfs-6.19-rc1.misc' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs

Pull misc vfs updates from Christian Brauner:
"Features:

- Cheaper MAY_EXEC handling for path lookup. This elides MAY_WRITE
permission checks during path lookup and adds the
IOP_FASTPERM_MAY_EXEC flag so filesystems like btrfs can avoid
expensive permission work.

- Hide dentry_cache behind runtime const machinery.

- Add German Maglione as virtiofs co-maintainer.

Cleanups:

- Tidy up and inline step_into() and walk_component() for improved
code generation.

- Re-enable IOCB_NOWAIT writes to files. This refactors file
timestamp update logic, fixing a layering bypass in btrfs when
updating timestamps on device files and improving FMODE_NOCMTIME
handling in VFS now that nfsd started using it.

- Path lookup optimizations extracting slowpaths into dedicated
routines and adding branch prediction hints for mntput_no_expire(),
fd_install(), lookup_slow(), and various other hot paths.

- Enable clang's -fms-extensions flag, requiring a JFS rename to
avoid conflicts.

- Remove spurious exports in fs/file_attr.c.

- Stop duplicating union pipe_index declaration. This depends on the
shared kbuild branch that brings in -fms-extensions support which
is merged into this branch.

- Use MD5 library instead of crypto_shash in ecryptfs.

- Use largest_zero_folio() in iomap_dio_zero().

- Replace simple_strtol/strtoul with kstrtoint/kstrtouint in init and
initrd code.

- Various typo fixes.

Fixes:

- Fix emergency sync for btrfs. Btrfs requires an explicit sync_fs()
call with wait == 1 to commit super blocks. The emergency sync path
never passed this, leaving btrfs data uncommitted during emergency
sync.

- Use local kmap in watch_queue's post_one_notification().

- Add hint prints in sb_set_blocksize() for LBS dependency on THP"

* tag 'vfs-6.19-rc1.misc' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs: (35 commits)
MAINTAINERS: add German Maglione as virtiofs co-maintainer
fs: inline step_into() and walk_component()
fs: tidy up step_into() & friends before inlining
orangefs: use inode_update_timestamps directly
btrfs: fix the comment on btrfs_update_time
btrfs: use vfs_utimes to update file timestamps
fs: export vfs_utimes
fs: lift the FMODE_NOCMTIME check into file_update_time_flags
fs: refactor file timestamp update logic
include/linux/fs.h: trivial fix: regualr -> regular
fs/splice.c: trivial fix: pipes -> pipe's
fs: mark lookup_slow() as noinline
fs: add predicts based on nd->depth
fs: move mntput_no_expire() slowpath into a dedicated routine
fs: remove spurious exports in fs/file_attr.c
watch_queue: Use local kmap in post_one_notification()
fs: touch up predicts in path lookup
fs: move fd_install() slowpath into a dedicated routine and provide commentary
fs: hide dentry_cache behind runtime const machinery
fs: touch predicts in do_dentry_open()
...

+330 -344
+1
MAINTAINERS
··· 27166 27166 F: drivers/s390/virtio/ 27167 27167 27168 27168 VIRTIO FILE SYSTEM 27169 + M: German Maglione <gmaglione@redhat.com> 27169 27170 M: Vivek Goyal <vgoyal@redhat.com> 27170 27171 M: Stefan Hajnoczi <stefanha@redhat.com> 27171 27172 M: Miklos Szeredi <miklos@szeredi.hu>
+3
Makefile
··· 1061 1061 # perform bounds checking. 1062 1062 KBUILD_CFLAGS += $(call cc-option, -fstrict-flex-arrays=3) 1063 1063 1064 + # Allow including a tagged struct or union anonymously in another struct/union. 1065 + KBUILD_CFLAGS += -fms-extensions 1066 + 1064 1067 # disable invalid "can't wrap" optimizations for signed / pointers 1065 1068 KBUILD_CFLAGS += -fno-strict-overflow 1066 1069
+2 -1
arch/arm64/kernel/vdso32/Makefile
··· 63 63 $(filter -Werror,$(KBUILD_CPPFLAGS)) \ 64 64 -Werror-implicit-function-declaration \ 65 65 -Wno-format-security \ 66 - -std=gnu11 66 + -std=gnu11 -fms-extensions 67 67 VDSO_CFLAGS += -O2 68 68 # Some useful compiler-dependent flags from top-level Makefile 69 69 VDSO_CFLAGS += $(call cc32-option,-Wno-pointer-sign) ··· 71 71 VDSO_CFLAGS += $(call cc32-option,-Werror=strict-prototypes) 72 72 VDSO_CFLAGS += -Werror=date-time 73 73 VDSO_CFLAGS += $(call cc32-option,-Werror=incompatible-pointer-types) 74 + VDSO_CFLAGS += $(if $(CONFIG_CC_IS_CLANG),-Wno-microsoft-anon-tag) 74 75 75 76 # Compile as THUMB2 or ARM. Unwinding via frame-pointers in THUMB2 is 76 77 # unreliable.
+1 -1
arch/loongarch/vdso/Makefile
··· 19 19 cflags-vdso := $(ccflags-vdso) \ 20 20 -isystem $(shell $(CC) -print-file-name=include) \ 21 21 $(filter -W%,$(filter-out -Wa$(comma)%,$(KBUILD_CFLAGS))) \ 22 - -std=gnu11 -O2 -g -fno-strict-aliasing -fno-common -fno-builtin \ 22 + -std=gnu11 -fms-extensions -O2 -g -fno-strict-aliasing -fno-common -fno-builtin \ 23 23 -fno-stack-protector -fno-jump-tables -DDISABLE_BRANCH_PROFILING \ 24 24 $(call cc-option, -fno-asynchronous-unwind-tables) \ 25 25 $(call cc-option, -fno-stack-protector)
+1 -1
arch/parisc/boot/compressed/Makefile
··· 18 18 ifndef CONFIG_64BIT 19 19 KBUILD_CFLAGS += -mfast-indirect-calls 20 20 endif 21 - KBUILD_CFLAGS += -std=gnu11 21 + KBUILD_CFLAGS += -std=gnu11 -fms-extensions 22 22 23 23 LDFLAGS_vmlinux := -X -e startup --as-needed -T 24 24 $(obj)/vmlinux: $(obj)/vmlinux.lds $(addprefix $(obj)/, $(OBJECTS)) $(LIBGCC) FORCE
+2 -1
arch/powerpc/boot/Makefile
··· 70 70 BOOTCPPFLAGS += -isystem $(shell $(BOOTCC) -print-file-name=include) 71 71 72 72 BOOTCFLAGS := $(BOOTTARGETFLAGS) \ 73 - -std=gnu11 \ 73 + -std=gnu11 -fms-extensions \ 74 74 -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \ 75 75 -fno-strict-aliasing -O2 \ 76 76 -msoft-float -mno-altivec -mno-vsx \ ··· 86 86 87 87 ifdef CONFIG_CC_IS_CLANG 88 88 BOOTCFLAGS += $(CLANG_FLAGS) 89 + BOOTCFLAGS += -Wno-microsoft-anon-tag 89 90 BOOTAFLAGS += $(CLANG_FLAGS) 90 91 endif 91 92
+2 -1
arch/s390/Makefile
··· 22 22 ifndef CONFIG_AS_IS_LLVM 23 23 KBUILD_AFLAGS_DECOMPRESSOR += $(if $(CONFIG_DEBUG_INFO),$(aflags_dwarf)) 24 24 endif 25 - KBUILD_CFLAGS_DECOMPRESSOR := $(CLANG_FLAGS) -m64 -O2 -mpacked-stack -std=gnu11 25 + KBUILD_CFLAGS_DECOMPRESSOR := $(CLANG_FLAGS) -m64 -O2 -mpacked-stack -std=gnu11 -fms-extensions 26 26 KBUILD_CFLAGS_DECOMPRESSOR += -DDISABLE_BRANCH_PROFILING -D__NO_FORTIFY 27 27 KBUILD_CFLAGS_DECOMPRESSOR += -D__DECOMPRESSOR 28 28 KBUILD_CFLAGS_DECOMPRESSOR += -Wno-pointer-sign ··· 35 35 KBUILD_CFLAGS_DECOMPRESSOR += $(if $(CONFIG_DEBUG_INFO),-g) 36 36 KBUILD_CFLAGS_DECOMPRESSOR += $(if $(CONFIG_DEBUG_INFO_DWARF4), $(call cc-option, -gdwarf-4,)) 37 37 KBUILD_CFLAGS_DECOMPRESSOR += $(if $(CONFIG_CC_NO_ARRAY_BOUNDS),-Wno-array-bounds) 38 + KBUILD_CFLAGS_DECOMPRESSOR += $(if $(CONFIG_CC_IS_CLANG),-Wno-microsoft-anon-tag) 38 39 39 40 UTS_MACHINE := s390x 40 41 STACK_SIZE := $(if $(CONFIG_KASAN),65536,$(if $(CONFIG_KMSAN),65536,16384))
+2 -1
arch/s390/purgatory/Makefile
··· 13 13 $(obj)/mem.o: $(srctree)/arch/s390/lib/mem.S FORCE 14 14 $(call if_changed_rule,as_o_S) 15 15 16 - KBUILD_CFLAGS := -std=gnu11 -fno-strict-aliasing -Wall -Wstrict-prototypes 16 + KBUILD_CFLAGS := -std=gnu11 -fms-extensions -fno-strict-aliasing -Wall -Wstrict-prototypes 17 17 KBUILD_CFLAGS += -Wno-pointer-sign -Wno-sign-compare 18 18 KBUILD_CFLAGS += -fno-zero-initialized-in-bss -fno-builtin -ffreestanding 19 19 KBUILD_CFLAGS += -Os -m64 -msoft-float -fno-common ··· 21 21 KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING 22 22 KBUILD_CFLAGS += -D__DISABLE_EXPORTS 23 23 KBUILD_CFLAGS += $(CLANG_FLAGS) 24 + KBUILD_CFLAGS += $(if $(CONFIG_CC_IS_CLANG),-Wno-microsoft-anon-tag) 24 25 KBUILD_CFLAGS += $(call cc-option,-fno-PIE) 25 26 KBUILD_AFLAGS := $(filter-out -DCC_USING_EXPOLINE,$(KBUILD_AFLAGS)) 26 27 KBUILD_AFLAGS += -D__DISABLE_EXPORTS
+3 -1
arch/x86/Makefile
··· 48 48 49 49 # How to compile the 16-bit code. Note we always compile for -march=i386; 50 50 # that way we can complain to the user if the CPU is insufficient. 51 - REALMODE_CFLAGS := -std=gnu11 -m16 -g -Os -DDISABLE_BRANCH_PROFILING -D__DISABLE_EXPORTS \ 51 + REALMODE_CFLAGS := -std=gnu11 -fms-extensions -m16 -g -Os \ 52 + -DDISABLE_BRANCH_PROFILING -D__DISABLE_EXPORTS \ 52 53 -Wall -Wstrict-prototypes -march=i386 -mregparm=3 \ 53 54 -fno-strict-aliasing -fomit-frame-pointer -fno-pic \ 54 55 -mno-mmx -mno-sse $(call cc-option,-fcf-protection=none) ··· 61 60 REALMODE_CFLAGS += $(CLANG_FLAGS) 62 61 ifdef CONFIG_CC_IS_CLANG 63 62 REALMODE_CFLAGS += -Wno-gnu 63 + REALMODE_CFLAGS += -Wno-microsoft-anon-tag 64 64 endif 65 65 export REALMODE_CFLAGS 66 66
+5 -2
arch/x86/boot/compressed/Makefile
··· 25 25 # avoid errors with '-march=i386', and future flags may depend on the target to 26 26 # be valid. 27 27 KBUILD_CFLAGS := -m$(BITS) -O2 $(CLANG_FLAGS) 28 - KBUILD_CFLAGS += -std=gnu11 28 + KBUILD_CFLAGS += -std=gnu11 -fms-extensions 29 29 KBUILD_CFLAGS += -fno-strict-aliasing -fPIE 30 30 KBUILD_CFLAGS += -Wundef 31 31 KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING ··· 36 36 KBUILD_CFLAGS += -ffreestanding -fshort-wchar 37 37 KBUILD_CFLAGS += -fno-stack-protector 38 38 KBUILD_CFLAGS += $(call cc-disable-warning, address-of-packed-member) 39 - KBUILD_CFLAGS += $(call cc-disable-warning, gnu) 39 + ifdef CONFIG_CC_IS_CLANG 40 + KBUILD_CFLAGS += -Wno-gnu 41 + KBUILD_CFLAGS += -Wno-microsoft-anon-tag 42 + endif 40 43 KBUILD_CFLAGS += -Wno-pointer-sign 41 44 KBUILD_CFLAGS += -fno-asynchronous-unwind-tables 42 45 KBUILD_CFLAGS += -D__DISABLE_EXPORTS
+18 -1
block/bdev.c
··· 217 217 218 218 EXPORT_SYMBOL(set_blocksize); 219 219 220 + static int sb_validate_large_blocksize(struct super_block *sb, int size) 221 + { 222 + const char *err_str = NULL; 223 + 224 + if (!(sb->s_type->fs_flags & FS_LBS)) 225 + err_str = "not supported by filesystem"; 226 + else if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) 227 + err_str = "is only supported with CONFIG_TRANSPARENT_HUGEPAGE"; 228 + 229 + if (!err_str) 230 + return 0; 231 + 232 + pr_warn_ratelimited("%s: block size(%d) > page size(%lu) %s\n", 233 + sb->s_type->name, size, PAGE_SIZE, err_str); 234 + return -EINVAL; 235 + } 236 + 220 237 int sb_set_blocksize(struct super_block *sb, int size) 221 238 { 222 - if (!(sb->s_type->fs_flags & FS_LBS) && size > PAGE_SIZE) 239 + if (size > PAGE_SIZE && sb_validate_large_blocksize(sb, size)) 223 240 return 0; 224 241 if (set_blocksize(sb->s_bdev_file, size)) 225 242 return 0;
+2 -2
drivers/firmware/efi/libstub/Makefile
··· 11 11 12 12 cflags-$(CONFIG_X86_32) := -march=i386 13 13 cflags-$(CONFIG_X86_64) := -mcmodel=small 14 - cflags-$(CONFIG_X86) += -m$(BITS) -D__KERNEL__ -std=gnu11 \ 14 + cflags-$(CONFIG_X86) += -m$(BITS) -D__KERNEL__ -std=gnu11 -fms-extensions \ 15 15 -fPIC -fno-strict-aliasing -mno-red-zone \ 16 16 -mno-mmx -mno-sse -fshort-wchar \ 17 17 -Wno-pointer-sign \ 18 18 $(call cc-disable-warning, address-of-packed-member) \ 19 - $(call cc-disable-warning, gnu) \ 19 + $(if $(CONFIG_CC_IS_CLANG),-Wno-gnu -Wno-microsoft-anon-tag) \ 20 20 -fno-asynchronous-unwind-tables \ 21 21 $(CLANG_FLAGS) 22 22
+13 -3
fs/btrfs/inode.c
··· 5839 5839 if (ret) 5840 5840 return ERR_PTR(ret); 5841 5841 5842 + if (S_ISDIR(inode->vfs_inode.i_mode)) 5843 + inode->vfs_inode.i_opflags |= IOP_FASTPERM_MAY_EXEC; 5842 5844 unlock_new_inode(&inode->vfs_inode); 5843 5845 return inode; 5844 5846 } ··· 6293 6291 } 6294 6292 6295 6293 /* 6296 - * This is a copy of file_update_time. We need this so we can return error on 6297 - * ENOSPC for updating the inode in the case of file write and mmap writes. 6294 + * We need our own ->update_time so that we can return error on ENOSPC for 6295 + * updating the inode in the case of file write and mmap writes. 6298 6296 */ 6299 6297 static int btrfs_update_time(struct inode *inode, int flags) 6300 6298 { ··· 6792 6790 } 6793 6791 6794 6792 ret = btrfs_create_new_inode(trans, &new_inode_args); 6795 - if (!ret) 6793 + if (!ret) { 6794 + if (S_ISDIR(inode->i_mode)) 6795 + inode->i_opflags |= IOP_FASTPERM_MAY_EXEC; 6796 6796 d_instantiate_new(dentry, inode); 6797 + } 6797 6798 6798 6799 btrfs_end_transaction(trans); 6799 6800 btrfs_btree_balance_dirty(fs_info); ··· 9175 9170 min_size, actual_len, alloc_hint, trans); 9176 9171 } 9177 9172 9173 + /* 9174 + * NOTE: in case you are adding MAY_EXEC check for directories: 9175 + * we are marking them with IOP_FASTPERM_MAY_EXEC, allowing path lookup to 9176 + * elide calls here. 9177 + */ 9178 9178 static int btrfs_permission(struct mnt_idmap *idmap, 9179 9179 struct inode *inode, int mask) 9180 9180 {
+4 -7
fs/btrfs/volumes.c
··· 2002 2002 static void update_dev_time(const char *device_path) 2003 2003 { 2004 2004 struct path path; 2005 - int ret; 2006 2005 2007 - ret = kern_path(device_path, LOOKUP_FOLLOW, &path); 2008 - if (ret) 2009 - return; 2010 - 2011 - inode_update_time(d_inode(path.dentry), S_MTIME | S_CTIME | S_VERSION); 2012 - path_put(&path); 2006 + if (!kern_path(device_path, LOOKUP_FOLLOW, &path)) { 2007 + vfs_utimes(&path, NULL); 2008 + path_put(&path); 2009 + } 2013 2010 } 2014 2011 2015 2012 static int btrfs_rm_dev_item(struct btrfs_trans_handle *trans,
+4 -2
fs/dcache.c
··· 86 86 87 87 EXPORT_SYMBOL(rename_lock); 88 88 89 - static struct kmem_cache *dentry_cache __ro_after_init; 89 + static struct kmem_cache *__dentry_cache __ro_after_init; 90 + #define dentry_cache runtime_const_ptr(__dentry_cache) 90 91 91 92 const struct qstr empty_name = QSTR_INIT("", 0); 92 93 EXPORT_SYMBOL(empty_name); ··· 3223 3222 * but it is probably not worth it because of the cache nature 3224 3223 * of the dcache. 3225 3224 */ 3226 - dentry_cache = KMEM_CACHE_USERCOPY(dentry, 3225 + __dentry_cache = KMEM_CACHE_USERCOPY(dentry, 3227 3226 SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|SLAB_ACCOUNT, 3228 3227 d_shortname.string); 3228 + runtime_const_init(ptr, __dentry_cache); 3229 3229 3230 3230 /* Hash may have been set up in dcache_init_early */ 3231 3231 if (!hashdist)
+1 -1
fs/ecryptfs/Kconfig
··· 4 4 depends on KEYS && CRYPTO && (ENCRYPTED_KEYS || ENCRYPTED_KEYS=n) 5 5 select CRYPTO_ECB 6 6 select CRYPTO_CBC 7 - select CRYPTO_MD5 7 + select CRYPTO_LIB_MD5 8 8 help 9 9 Encrypted filesystem that operates on the VFS layer. See 10 10 <file:Documentation/filesystems/ecryptfs.rst> to learn more about
+9 -81
fs/ecryptfs/crypto.c
··· 9 9 * Michael C. Thompson <mcthomps@us.ibm.com> 10 10 */ 11 11 12 - #include <crypto/hash.h> 13 12 #include <crypto/skcipher.h> 14 13 #include <linux/fs.h> 15 14 #include <linux/mount.h> ··· 47 48 } 48 49 } 49 50 50 - /** 51 - * ecryptfs_calculate_md5 - calculates the md5 of @src 52 - * @dst: Pointer to 16 bytes of allocated memory 53 - * @crypt_stat: Pointer to crypt_stat struct for the current inode 54 - * @src: Data to be md5'd 55 - * @len: Length of @src 56 - * 57 - * Uses the allocated crypto context that crypt_stat references to 58 - * generate the MD5 sum of the contents of src. 59 - */ 60 - static int ecryptfs_calculate_md5(char *dst, 61 - struct ecryptfs_crypt_stat *crypt_stat, 62 - char *src, int len) 63 - { 64 - int rc = crypto_shash_tfm_digest(crypt_stat->hash_tfm, src, len, dst); 65 - 66 - if (rc) { 67 - printk(KERN_ERR 68 - "%s: Error computing crypto hash; rc = [%d]\n", 69 - __func__, rc); 70 - goto out; 71 - } 72 - out: 73 - return rc; 74 - } 75 - 76 51 static int ecryptfs_crypto_api_algify_cipher_name(char **algified_name, 77 52 char *cipher_name, 78 53 char *chaining_modifier) ··· 77 104 * 78 105 * Generate the initialization vector from the given root IV and page 79 106 * offset. 80 - * 81 - * Returns zero on success; non-zero on error. 82 107 */ 83 - int ecryptfs_derive_iv(char *iv, struct ecryptfs_crypt_stat *crypt_stat, 84 - loff_t offset) 108 + void ecryptfs_derive_iv(char *iv, struct ecryptfs_crypt_stat *crypt_stat, 109 + loff_t offset) 85 110 { 86 - int rc = 0; 87 111 char dst[MD5_DIGEST_SIZE]; 88 112 char src[ECRYPTFS_MAX_IV_BYTES + 16]; 89 113 ··· 99 129 ecryptfs_printk(KERN_DEBUG, "source:\n"); 100 130 ecryptfs_dump_hex(src, (crypt_stat->iv_bytes + 16)); 101 131 } 102 - rc = ecryptfs_calculate_md5(dst, crypt_stat, src, 103 - (crypt_stat->iv_bytes + 16)); 104 - if (rc) { 105 - ecryptfs_printk(KERN_WARNING, "Error attempting to compute " 106 - "MD5 while generating IV for a page\n"); 107 - goto out; 108 - } 132 + md5(src, crypt_stat->iv_bytes + 16, dst); 109 133 memcpy(iv, dst, crypt_stat->iv_bytes); 110 134 if (unlikely(ecryptfs_verbosity > 0)) { 111 135 ecryptfs_printk(KERN_DEBUG, "derived iv:\n"); 112 136 ecryptfs_dump_hex(iv, crypt_stat->iv_bytes); 113 137 } 114 - out: 115 - return rc; 116 138 } 117 139 118 140 /** ··· 113 151 * 114 152 * Initialize the crypt_stat structure. 115 153 */ 116 - int ecryptfs_init_crypt_stat(struct ecryptfs_crypt_stat *crypt_stat) 154 + void ecryptfs_init_crypt_stat(struct ecryptfs_crypt_stat *crypt_stat) 117 155 { 118 - struct crypto_shash *tfm; 119 - int rc; 120 - 121 - tfm = crypto_alloc_shash(ECRYPTFS_DEFAULT_HASH, 0, 0); 122 - if (IS_ERR(tfm)) { 123 - rc = PTR_ERR(tfm); 124 - ecryptfs_printk(KERN_ERR, "Error attempting to " 125 - "allocate crypto context; rc = [%d]\n", 126 - rc); 127 - return rc; 128 - } 129 - 130 156 memset((void *)crypt_stat, 0, sizeof(struct ecryptfs_crypt_stat)); 131 157 INIT_LIST_HEAD(&crypt_stat->keysig_list); 132 158 mutex_init(&crypt_stat->keysig_list_mutex); 133 159 mutex_init(&crypt_stat->cs_mutex); 134 160 mutex_init(&crypt_stat->cs_tfm_mutex); 135 - crypt_stat->hash_tfm = tfm; 136 161 crypt_stat->flags |= ECRYPTFS_STRUCT_INITIALIZED; 137 - 138 - return 0; 139 162 } 140 163 141 164 /** ··· 134 187 struct ecryptfs_key_sig *key_sig, *key_sig_tmp; 135 188 136 189 crypto_free_skcipher(crypt_stat->tfm); 137 - crypto_free_shash(crypt_stat->hash_tfm); 138 190 list_for_each_entry_safe(key_sig, key_sig_tmp, 139 191 &crypt_stat->keysig_list, crypt_stat_list) { 140 192 list_del(&key_sig->crypt_stat_list); ··· 307 361 int rc; 308 362 309 363 extent_base = (((loff_t)page_index) * (PAGE_SIZE / extent_size)); 310 - rc = ecryptfs_derive_iv(extent_iv, crypt_stat, 311 - (extent_base + extent_offset)); 312 - if (rc) { 313 - ecryptfs_printk(KERN_ERR, "Error attempting to derive IV for " 314 - "extent [0x%.16llx]; rc = [%d]\n", 315 - (unsigned long long)(extent_base + extent_offset), rc); 316 - goto out; 317 - } 364 + ecryptfs_derive_iv(extent_iv, crypt_stat, extent_base + extent_offset); 318 365 319 366 sg_init_table(&src_sg, 1); 320 367 sg_init_table(&dst_sg, 1); ··· 548 609 */ 549 610 int ecryptfs_compute_root_iv(struct ecryptfs_crypt_stat *crypt_stat) 550 611 { 551 - int rc = 0; 552 612 char dst[MD5_DIGEST_SIZE]; 553 613 554 614 BUG_ON(crypt_stat->iv_bytes > MD5_DIGEST_SIZE); 555 615 BUG_ON(crypt_stat->iv_bytes <= 0); 556 616 if (!(crypt_stat->flags & ECRYPTFS_KEY_VALID)) { 557 - rc = -EINVAL; 558 617 ecryptfs_printk(KERN_WARNING, "Session key not valid; " 559 618 "cannot generate root IV\n"); 560 - goto out; 561 - } 562 - rc = ecryptfs_calculate_md5(dst, crypt_stat, crypt_stat->key, 563 - crypt_stat->key_size); 564 - if (rc) { 565 - ecryptfs_printk(KERN_WARNING, "Error attempting to compute " 566 - "MD5 while generating root IV\n"); 567 - goto out; 568 - } 569 - memcpy(crypt_stat->root_iv, dst, crypt_stat->iv_bytes); 570 - out: 571 - if (rc) { 572 619 memset(crypt_stat->root_iv, 0, crypt_stat->iv_bytes); 573 620 crypt_stat->flags |= ECRYPTFS_SECURITY_WARNING; 621 + return -EINVAL; 574 622 } 575 - return rc; 623 + md5(crypt_stat->key, crypt_stat->key_size, dst); 624 + memcpy(crypt_stat->root_iv, dst, crypt_stat->iv_bytes); 625 + return 0; 576 626 } 577 627 578 628 static void ecryptfs_generate_new_key(struct ecryptfs_crypt_stat *crypt_stat)
+4 -9
fs/ecryptfs/ecryptfs_kernel.h
··· 14 14 #ifndef ECRYPTFS_KERNEL_H 15 15 #define ECRYPTFS_KERNEL_H 16 16 17 + #include <crypto/md5.h> 17 18 #include <crypto/skcipher.h> 18 19 #include <keys/user-type.h> 19 20 #include <keys/encrypted-type.h> ··· 138 137 + MAGIC_ECRYPTFS_MARKER_SIZE_BYTES) 139 138 #define ECRYPTFS_DEFAULT_CIPHER "aes" 140 139 #define ECRYPTFS_DEFAULT_KEY_BYTES 16 141 - #define ECRYPTFS_DEFAULT_HASH "md5" 142 - #define ECRYPTFS_TAG_70_DIGEST ECRYPTFS_DEFAULT_HASH 143 140 #define ECRYPTFS_TAG_1_PACKET_TYPE 0x01 144 141 #define ECRYPTFS_TAG_3_PACKET_TYPE 0x8C 145 142 #define ECRYPTFS_TAG_11_PACKET_TYPE 0xED ··· 162 163 * ECRYPTFS_MAX_IV_BYTES */ 163 164 #define ECRYPTFS_FILENAME_MIN_RANDOM_PREPEND_BYTES 16 164 165 #define ECRYPTFS_NON_NULL 0x42 /* A reasonable substitute for NULL */ 165 - #define MD5_DIGEST_SIZE 16 166 - #define ECRYPTFS_TAG_70_DIGEST_SIZE MD5_DIGEST_SIZE 167 166 #define ECRYPTFS_TAG_70_MIN_METADATA_SIZE (1 + ECRYPTFS_MIN_PKT_LEN_SIZE \ 168 167 + ECRYPTFS_SIG_SIZE + 1 + 1) 169 168 #define ECRYPTFS_TAG_70_MAX_METADATA_SIZE (1 + ECRYPTFS_MAX_PKT_LEN_SIZE \ ··· 234 237 unsigned int extent_mask; 235 238 struct ecryptfs_mount_crypt_stat *mount_crypt_stat; 236 239 struct crypto_skcipher *tfm; 237 - struct crypto_shash *hash_tfm; /* Crypto context for generating 238 - * the initialization vectors */ 239 240 unsigned char cipher[ECRYPTFS_MAX_CIPHER_NAME_SIZE + 1]; 240 241 unsigned char key[ECRYPTFS_MAX_KEY_BYTES]; 241 242 unsigned char root_iv[ECRYPTFS_MAX_IV_BYTES]; ··· 553 558 int sg_size); 554 559 int ecryptfs_compute_root_iv(struct ecryptfs_crypt_stat *crypt_stat); 555 560 void ecryptfs_rotate_iv(unsigned char *iv); 556 - int ecryptfs_init_crypt_stat(struct ecryptfs_crypt_stat *crypt_stat); 561 + void ecryptfs_init_crypt_stat(struct ecryptfs_crypt_stat *crypt_stat); 557 562 void ecryptfs_destroy_crypt_stat(struct ecryptfs_crypt_stat *crypt_stat); 558 563 void ecryptfs_destroy_mount_crypt_stat( 559 564 struct ecryptfs_mount_crypt_stat *mount_crypt_stat); ··· 688 693 char *data, size_t max_packet_size); 689 694 int ecryptfs_set_f_namelen(long *namelen, long lower_namelen, 690 695 struct ecryptfs_mount_crypt_stat *mount_crypt_stat); 691 - int ecryptfs_derive_iv(char *iv, struct ecryptfs_crypt_stat *crypt_stat, 692 - loff_t offset); 696 + void ecryptfs_derive_iv(char *iv, struct ecryptfs_crypt_stat *crypt_stat, 697 + loff_t offset); 693 698 694 699 extern const struct xattr_handler * const ecryptfs_xattr_handlers[]; 695 700
+2 -5
fs/ecryptfs/inode.c
··· 903 903 struct ecryptfs_crypt_stat *crypt_stat; 904 904 905 905 crypt_stat = &ecryptfs_inode_to_private(d_inode(dentry))->crypt_stat; 906 - if (!(crypt_stat->flags & ECRYPTFS_STRUCT_INITIALIZED)) { 907 - rc = ecryptfs_init_crypt_stat(crypt_stat); 908 - if (rc) 909 - return rc; 910 - } 906 + if (!(crypt_stat->flags & ECRYPTFS_STRUCT_INITIALIZED)) 907 + ecryptfs_init_crypt_stat(crypt_stat); 911 908 inode = d_inode(dentry); 912 909 lower_inode = ecryptfs_inode_to_lower(inode); 913 910 lower_dentry = ecryptfs_dentry_to_lower(dentry);
+11 -54
fs/ecryptfs/keystore.c
··· 11 11 * Trevor S. Highland <trevor.highland@gmail.com> 12 12 */ 13 13 14 - #include <crypto/hash.h> 15 14 #include <crypto/skcipher.h> 16 15 #include <linux/string.h> 17 16 #include <linux/pagemap.h> ··· 600 601 struct crypto_skcipher *skcipher_tfm; 601 602 struct skcipher_request *skcipher_req; 602 603 char iv[ECRYPTFS_MAX_IV_BYTES]; 603 - char hash[ECRYPTFS_TAG_70_DIGEST_SIZE]; 604 - char tmp_hash[ECRYPTFS_TAG_70_DIGEST_SIZE]; 605 - struct crypto_shash *hash_tfm; 606 - struct shash_desc *hash_desc; 604 + char hash[MD5_DIGEST_SIZE]; 607 605 }; 608 606 609 607 /* ··· 737 741 "password tokens\n", __func__); 738 742 goto out_free_unlock; 739 743 } 740 - s->hash_tfm = crypto_alloc_shash(ECRYPTFS_TAG_70_DIGEST, 0, 0); 741 - if (IS_ERR(s->hash_tfm)) { 742 - rc = PTR_ERR(s->hash_tfm); 743 - printk(KERN_ERR "%s: Error attempting to " 744 - "allocate hash crypto context; rc = [%d]\n", 745 - __func__, rc); 746 - goto out_free_unlock; 747 - } 748 744 749 - s->hash_desc = kmalloc(sizeof(*s->hash_desc) + 750 - crypto_shash_descsize(s->hash_tfm), GFP_KERNEL); 751 - if (!s->hash_desc) { 752 - rc = -ENOMEM; 753 - goto out_release_free_unlock; 754 - } 755 - 756 - s->hash_desc->tfm = s->hash_tfm; 757 - 758 - rc = crypto_shash_digest(s->hash_desc, 759 - (u8 *)s->auth_tok->token.password.session_key_encryption_key, 760 - s->auth_tok->token.password.session_key_encryption_key_bytes, 761 - s->hash); 762 - if (rc) { 763 - printk(KERN_ERR 764 - "%s: Error computing crypto hash; rc = [%d]\n", 765 - __func__, rc); 766 - goto out_release_free_unlock; 767 - } 745 + md5(s->auth_tok->token.password.session_key_encryption_key, 746 + s->auth_tok->token.password.session_key_encryption_key_bytes, 747 + s->hash); 768 748 for (s->j = 0; s->j < (s->num_rand_bytes - 1); s->j++) { 769 749 s->block_aligned_filename[s->j] = 770 - s->hash[(s->j % ECRYPTFS_TAG_70_DIGEST_SIZE)]; 771 - if ((s->j % ECRYPTFS_TAG_70_DIGEST_SIZE) 772 - == (ECRYPTFS_TAG_70_DIGEST_SIZE - 1)) { 773 - rc = crypto_shash_digest(s->hash_desc, (u8 *)s->hash, 774 - ECRYPTFS_TAG_70_DIGEST_SIZE, 775 - s->tmp_hash); 776 - if (rc) { 777 - printk(KERN_ERR 778 - "%s: Error computing crypto hash; " 779 - "rc = [%d]\n", __func__, rc); 780 - goto out_release_free_unlock; 781 - } 782 - memcpy(s->hash, s->tmp_hash, 783 - ECRYPTFS_TAG_70_DIGEST_SIZE); 784 - } 750 + s->hash[s->j % MD5_DIGEST_SIZE]; 751 + if ((s->j % MD5_DIGEST_SIZE) == (MD5_DIGEST_SIZE - 1)) 752 + md5(s->hash, MD5_DIGEST_SIZE, s->hash); 785 753 if (s->block_aligned_filename[s->j] == '\0') 786 754 s->block_aligned_filename[s->j] = ECRYPTFS_NON_NULL; 787 755 } ··· 758 798 "convert filename memory to scatterlist; rc = [%d]. " 759 799 "block_aligned_filename_size = [%zd]\n", __func__, rc, 760 800 s->block_aligned_filename_size); 761 - goto out_release_free_unlock; 801 + goto out_free_unlock; 762 802 } 763 803 rc = virt_to_scatterlist(&dest[s->i], s->block_aligned_filename_size, 764 804 s->dst_sg, 2); ··· 767 807 "convert encrypted filename memory to scatterlist; " 768 808 "rc = [%d]. block_aligned_filename_size = [%zd]\n", 769 809 __func__, rc, s->block_aligned_filename_size); 770 - goto out_release_free_unlock; 810 + goto out_free_unlock; 771 811 } 772 812 /* The characters in the first block effectively do the job 773 813 * of the IV here, so we just use 0's for the IV. Note the ··· 785 825 rc, 786 826 s->auth_tok->token.password.session_key_encryption_key, 787 827 mount_crypt_stat->global_default_fn_cipher_key_bytes); 788 - goto out_release_free_unlock; 828 + goto out_free_unlock; 789 829 } 790 830 skcipher_request_set_crypt(s->skcipher_req, s->src_sg, s->dst_sg, 791 831 s->block_aligned_filename_size, s->iv); ··· 793 833 if (rc) { 794 834 printk(KERN_ERR "%s: Error attempting to encrypt filename; " 795 835 "rc = [%d]\n", __func__, rc); 796 - goto out_release_free_unlock; 836 + goto out_free_unlock; 797 837 } 798 838 s->i += s->block_aligned_filename_size; 799 839 (*packet_size) = s->i; 800 840 (*remaining_bytes) -= (*packet_size); 801 - out_release_free_unlock: 802 - crypto_free_shash(s->hash_tfm); 803 841 out_free_unlock: 804 842 kfree_sensitive(s->block_aligned_filename); 805 843 out_unlock: ··· 808 850 key_put(auth_tok_key); 809 851 } 810 852 skcipher_request_free(s->skcipher_req); 811 - kfree_sensitive(s->hash_desc); 812 853 kfree(s); 813 854 return rc; 814 855 }
+7
fs/ecryptfs/main.c
··· 12 12 13 13 #include <linux/dcache.h> 14 14 #include <linux/file.h> 15 + #include <linux/fips.h> 15 16 #include <linux/module.h> 16 17 #include <linux/namei.h> 17 18 #include <linux/skbuff.h> ··· 452 451 rc = ecryptfs_validate_options(fc); 453 452 if (rc) { 454 453 err = "Error validating options"; 454 + goto out; 455 + } 456 + 457 + if (fips_enabled) { 458 + rc = -EINVAL; 459 + err = "eCryptfs support is disabled due to FIPS"; 455 460 goto out; 456 461 } 457 462
+1 -4
fs/ecryptfs/super.c
··· 41 41 inode_info = alloc_inode_sb(sb, ecryptfs_inode_info_cache, GFP_KERNEL); 42 42 if (unlikely(!inode_info)) 43 43 goto out; 44 - if (ecryptfs_init_crypt_stat(&inode_info->crypt_stat)) { 45 - kmem_cache_free(ecryptfs_inode_info_cache, inode_info); 46 - goto out; 47 - } 44 + ecryptfs_init_crypt_stat(&inode_info->crypt_stat); 48 45 mutex_init(&inode_info->lower_file_mutex); 49 46 atomic_set(&inode_info->lower_file_count, 0); 50 47 inode_info->lower_file = NULL;
+29 -6
fs/file.c
··· 641 641 642 642 EXPORT_SYMBOL(put_unused_fd); 643 643 644 + /* 645 + * Install a file pointer in the fd array while it is being resized. 646 + * 647 + * We need to make sure our update to the array does not get lost as the resizing 648 + * thread can be copying the content as we modify it. 649 + * 650 + * We have two ways to do it: 651 + * - go off CPU waiting for resize_in_progress to clear 652 + * - take the spin lock 653 + * 654 + * The latter is trivial to implement and saves us from having to might_sleep() 655 + * for debugging purposes. 656 + * 657 + * This is moved out of line from fd_install() to convince gcc to optimize that 658 + * routine better. 659 + */ 660 + static void noinline fd_install_slowpath(unsigned int fd, struct file *file) 661 + { 662 + struct files_struct *files = current->files; 663 + struct fdtable *fdt; 664 + 665 + spin_lock(&files->file_lock); 666 + fdt = files_fdtable(files); 667 + VFS_BUG_ON(rcu_access_pointer(fdt->fd[fd]) != NULL); 668 + rcu_assign_pointer(fdt->fd[fd], file); 669 + spin_unlock(&files->file_lock); 670 + } 671 + 644 672 /** 645 673 * fd_install - install a file pointer in the fd array 646 674 * @fd: file descriptor to install the file in ··· 686 658 return; 687 659 688 660 rcu_read_lock_sched(); 689 - 690 661 if (unlikely(files->resize_in_progress)) { 691 662 rcu_read_unlock_sched(); 692 - spin_lock(&files->file_lock); 693 - fdt = files_fdtable(files); 694 - VFS_BUG_ON(rcu_access_pointer(fdt->fd[fd]) != NULL); 695 - rcu_assign_pointer(fdt->fd[fd], file); 696 - spin_unlock(&files->file_lock); 663 + fd_install_slowpath(fd, file); 697 664 return; 698 665 } 699 666 /* coupled with smp_wmb() in expand_fdtable() */
-4
fs/file_attr.c
··· 316 316 err = put_user(fa.flags, argp); 317 317 return err; 318 318 } 319 - EXPORT_SYMBOL(ioctl_getflags); 320 319 321 320 int ioctl_setflags(struct file *file, unsigned int __user *argp) 322 321 { ··· 336 337 } 337 338 return err; 338 339 } 339 - EXPORT_SYMBOL(ioctl_setflags); 340 340 341 341 int ioctl_fsgetxattr(struct file *file, void __user *argp) 342 342 { ··· 348 350 349 351 return err; 350 352 } 351 - EXPORT_SYMBOL(ioctl_fsgetxattr); 352 353 353 354 int ioctl_fssetxattr(struct file *file, void __user *argp) 354 355 { ··· 366 369 } 367 370 return err; 368 371 } 369 - EXPORT_SYMBOL(ioctl_fssetxattr); 370 372 371 373 SYSCALL_DEFINE5(file_getattr, int, dfd, const char __user *, filename, 372 374 struct file_attr __user *, ufattr, size_t, usize,
+19 -39
fs/inode.c
··· 2322 2322 } 2323 2323 EXPORT_SYMBOL(current_time); 2324 2324 2325 - static int inode_needs_update_time(struct inode *inode) 2325 + static int file_update_time_flags(struct file *file, unsigned int flags) 2326 2326 { 2327 + struct inode *inode = file_inode(file); 2327 2328 struct timespec64 now, ts; 2328 - int sync_it = 0; 2329 + int sync_mode = 0; 2330 + int ret = 0; 2329 2331 2330 2332 /* First try to exhaust all avenues to not sync */ 2331 2333 if (IS_NOCMTIME(inode)) 2334 + return 0; 2335 + if (unlikely(file->f_mode & FMODE_NOCMTIME)) 2332 2336 return 0; 2333 2337 2334 2338 now = current_time(inode); 2335 2339 2336 2340 ts = inode_get_mtime(inode); 2337 2341 if (!timespec64_equal(&ts, &now)) 2338 - sync_it |= S_MTIME; 2339 - 2342 + sync_mode |= S_MTIME; 2340 2343 ts = inode_get_ctime(inode); 2341 2344 if (!timespec64_equal(&ts, &now)) 2342 - sync_it |= S_CTIME; 2343 - 2345 + sync_mode |= S_CTIME; 2344 2346 if (IS_I_VERSION(inode) && inode_iversion_need_inc(inode)) 2345 - sync_it |= S_VERSION; 2347 + sync_mode |= S_VERSION; 2346 2348 2347 - return sync_it; 2348 - } 2349 + if (!sync_mode) 2350 + return 0; 2349 2351 2350 - static int __file_update_time(struct file *file, int sync_mode) 2351 - { 2352 - int ret = 0; 2353 - struct inode *inode = file_inode(file); 2352 + if (flags & IOCB_NOWAIT) 2353 + return -EAGAIN; 2354 2354 2355 - /* try to update time settings */ 2356 - if (!mnt_get_write_access_file(file)) { 2357 - ret = inode_update_time(inode, sync_mode); 2358 - mnt_put_write_access_file(file); 2359 - } 2360 - 2355 + if (mnt_get_write_access_file(file)) 2356 + return 0; 2357 + ret = inode_update_time(inode, sync_mode); 2358 + mnt_put_write_access_file(file); 2361 2359 return ret; 2362 2360 } 2363 2361 ··· 2375 2377 */ 2376 2378 int file_update_time(struct file *file) 2377 2379 { 2378 - int ret; 2379 - struct inode *inode = file_inode(file); 2380 - 2381 - ret = inode_needs_update_time(inode); 2382 - if (ret <= 0) 2383 - return ret; 2384 - 2385 - return __file_update_time(file, ret); 2380 + return file_update_time_flags(file, 0); 2386 2381 } 2387 2382 EXPORT_SYMBOL(file_update_time); 2388 2383 ··· 2397 2406 static int file_modified_flags(struct file *file, int flags) 2398 2407 { 2399 2408 int ret; 2400 - struct inode *inode = file_inode(file); 2401 2409 2402 2410 /* 2403 2411 * Clear the security bits if the process is not being run by root. ··· 2405 2415 ret = file_remove_privs_flags(file, flags); 2406 2416 if (ret) 2407 2417 return ret; 2408 - 2409 - if (unlikely(file->f_mode & FMODE_NOCMTIME)) 2410 - return 0; 2411 - 2412 - ret = inode_needs_update_time(inode); 2413 - if (ret <= 0) 2414 - return ret; 2415 - if (flags & IOCB_NOWAIT) 2416 - return -EAGAIN; 2417 - 2418 - return __file_update_time(file, ret); 2418 + return file_update_time_flags(file, flags); 2419 2419 } 2420 2420 2421 2421 /**
+15 -23
fs/iomap/direct-io.c
··· 23 23 #define IOMAP_DIO_WRITE (1U << 30) 24 24 #define IOMAP_DIO_DIRTY (1U << 31) 25 25 26 - /* 27 - * Used for sub block zeroing in iomap_dio_zero() 28 - */ 29 - #define IOMAP_ZERO_PAGE_SIZE (SZ_64K) 30 - #define IOMAP_ZERO_PAGE_ORDER (get_order(IOMAP_ZERO_PAGE_SIZE)) 31 - static struct page *zero_page; 32 - 33 26 struct iomap_dio { 34 27 struct kiocb *iocb; 35 28 const struct iomap_dio_ops *dops; ··· 269 276 { 270 277 struct inode *inode = file_inode(dio->iocb->ki_filp); 271 278 struct bio *bio; 279 + struct folio *zero_folio = largest_zero_folio(); 280 + int nr_vecs = max(1, i_blocksize(inode) / folio_size(zero_folio)); 272 281 273 282 if (!len) 274 283 return 0; 284 + 275 285 /* 276 - * Max block size supported is 64k 286 + * This limit shall never be reached as most filesystems have a 287 + * maximum blocksize of 64k. 277 288 */ 278 - if (WARN_ON_ONCE(len > IOMAP_ZERO_PAGE_SIZE)) 289 + if (WARN_ON_ONCE(nr_vecs > BIO_MAX_VECS)) 279 290 return -EINVAL; 280 291 281 - bio = iomap_dio_alloc_bio(iter, dio, 1, REQ_OP_WRITE | REQ_SYNC | REQ_IDLE); 292 + bio = iomap_dio_alloc_bio(iter, dio, nr_vecs, 293 + REQ_OP_WRITE | REQ_SYNC | REQ_IDLE); 282 294 fscrypt_set_bio_crypt_ctx(bio, inode, pos >> inode->i_blkbits, 283 295 GFP_KERNEL); 284 296 bio->bi_iter.bi_sector = iomap_sector(&iter->iomap, pos); 285 297 bio->bi_private = dio; 286 298 bio->bi_end_io = iomap_dio_bio_end_io; 287 299 288 - __bio_add_page(bio, zero_page, len, 0); 300 + while (len > 0) { 301 + unsigned int io_len = min(len, folio_size(zero_folio)); 302 + 303 + bio_add_folio_nofail(bio, zero_folio, io_len, 0); 304 + len -= io_len; 305 + } 289 306 iomap_dio_submit_bio(iter, dio, bio, pos); 307 + 290 308 return 0; 291 309 } 292 310 ··· 851 847 return iomap_dio_complete(dio); 852 848 } 853 849 EXPORT_SYMBOL_GPL(iomap_dio_rw); 854 - 855 - static int __init iomap_dio_init(void) 856 - { 857 - zero_page = alloc_pages(GFP_KERNEL | __GFP_ZERO, 858 - IOMAP_ZERO_PAGE_ORDER); 859 - 860 - if (!zero_page) 861 - return -ENOMEM; 862 - 863 - return 0; 864 - } 865 - fs_initcall(iomap_dio_init);
+3 -3
fs/jfs/jfs_incore.h
··· 76 76 struct { 77 77 unchar _unused[16]; /* 16: */ 78 78 dxd_t _dxd; /* 16: */ 79 - /* _inline may overflow into _inline_ea when needed */ 79 + /* _inline_sym may overflow into _inline_ea when needed */ 80 80 /* _inline_ea may overlay the last part of 81 81 * file._xtroot if maxentry = XTROOTINITSLOT 82 82 */ 83 83 union { 84 84 struct { 85 85 /* 128: inline symlink */ 86 - unchar _inline[128]; 86 + unchar _inline_sym[128]; 87 87 /* 128: inline extended attr */ 88 88 unchar _inline_ea[128]; 89 89 }; ··· 101 101 #define i_imap u.file._imap 102 102 #define i_dirtable u.dir._table 103 103 #define i_dtroot u.dir._dtroot 104 - #define i_inline u.link._inline 104 + #define i_inline u.link._inline_sym 105 105 #define i_inline_ea u.link._inline_ea 106 106 #define i_inline_all u.link._inline_all 107 107
+107 -37
fs/namei.c
··· 282 282 return; 283 283 284 284 refcnt = atomic_read(&name->refcnt); 285 - if (refcnt != 1) { 285 + if (unlikely(refcnt != 1)) { 286 286 if (WARN_ON_ONCE(!refcnt)) 287 287 return; 288 288 ··· 290 290 return; 291 291 } 292 292 293 - if (name->name != name->iname) { 293 + if (unlikely(name->name != name->iname)) { 294 294 __putname(name->name); 295 295 kfree(name); 296 296 } else ··· 540 540 * @mask: Right to check for (%MAY_READ, %MAY_WRITE, %MAY_EXEC) 541 541 * 542 542 * Separate out file-system wide checks from inode-specific permission checks. 543 + * 544 + * Note: lookup_inode_permission_may_exec() does not call here. If you add 545 + * MAY_EXEC checks, adjust it. 543 546 */ 544 547 static int sb_permission(struct super_block *sb, struct inode *inode, int mask) 545 548 { 546 - if (unlikely(mask & MAY_WRITE)) { 549 + if (mask & MAY_WRITE) { 547 550 umode_t mode = inode->i_mode; 548 551 549 552 /* Nobody gets write access to a read-only fs. */ ··· 577 574 if (unlikely(retval)) 578 575 return retval; 579 576 580 - if (unlikely(mask & MAY_WRITE)) { 577 + if (mask & MAY_WRITE) { 581 578 /* 582 579 * Nobody gets write access to an immutable file. 583 580 */ ··· 604 601 return security_inode_permission(inode, mask); 605 602 } 606 603 EXPORT_SYMBOL(inode_permission); 604 + 605 + /* 606 + * lookup_inode_permission_may_exec - Check traversal right for given inode 607 + * 608 + * This is a special case routine for may_lookup() making assumptions specific 609 + * to path traversal. Use inode_permission() if you are doing something else. 610 + * 611 + * Work is shaved off compared to inode_permission() as follows: 612 + * - we know for a fact there is no MAY_WRITE to worry about 613 + * - it is an invariant the inode is a directory 614 + * 615 + * Since majority of real-world traversal happens on inodes which grant it for 616 + * everyone, we check it upfront and only resort to more expensive work if it 617 + * fails. 618 + * 619 + * Filesystems which have their own ->permission hook and consequently miss out 620 + * on IOP_FASTPERM can still get the optimization if they set IOP_FASTPERM_MAY_EXEC 621 + * on their directory inodes. 622 + */ 623 + static __always_inline int lookup_inode_permission_may_exec(struct mnt_idmap *idmap, 624 + struct inode *inode, int mask) 625 + { 626 + /* Lookup already checked this to return -ENOTDIR */ 627 + VFS_BUG_ON_INODE(!S_ISDIR(inode->i_mode), inode); 628 + VFS_BUG_ON((mask & ~MAY_NOT_BLOCK) != 0); 629 + 630 + mask |= MAY_EXEC; 631 + 632 + if (unlikely(!(inode->i_opflags & (IOP_FASTPERM | IOP_FASTPERM_MAY_EXEC)))) 633 + return inode_permission(idmap, inode, mask); 634 + 635 + if (unlikely(((inode->i_mode & 0111) != 0111) || !no_acl_inode(inode))) 636 + return inode_permission(idmap, inode, mask); 637 + 638 + return security_inode_permission(inode, mask); 639 + } 607 640 608 641 /** 609 642 * path_get - get a reference to a path ··· 785 746 786 747 static void terminate_walk(struct nameidata *nd) 787 748 { 788 - drop_links(nd); 749 + if (unlikely(nd->depth)) 750 + drop_links(nd); 789 751 if (!(nd->flags & LOOKUP_RCU)) { 790 752 int i; 791 753 path_put(&nd->path); ··· 883 843 884 844 BUG_ON(!(nd->flags & LOOKUP_RCU)); 885 845 886 - if (unlikely(!legitimize_links(nd))) 846 + if (unlikely(nd->depth && !legitimize_links(nd))) 887 847 goto out1; 888 848 if (unlikely(!legitimize_path(nd, &nd->path, nd->seq))) 889 849 goto out; ··· 918 878 int res; 919 879 BUG_ON(!(nd->flags & LOOKUP_RCU)); 920 880 921 - if (unlikely(!legitimize_links(nd))) 881 + if (unlikely(nd->depth && !legitimize_links(nd))) 922 882 goto out2; 923 883 res = __legitimize_mnt(nd->path.mnt, nd->m_seq); 924 884 if (unlikely(res)) { ··· 991 951 * We don't want to zero nd->root for scoped-lookups or 992 952 * externally-managed nd->root. 993 953 */ 994 - if (!(nd->state & ND_ROOT_PRESET)) 995 - if (!(nd->flags & LOOKUP_IS_SCOPED)) 954 + if (likely(!(nd->state & ND_ROOT_PRESET))) 955 + if (likely(!(nd->flags & LOOKUP_IS_SCOPED))) 996 956 nd->root.mnt = NULL; 997 957 nd->flags &= ~LOOKUP_CACHED; 998 958 if (!try_to_unlazy(nd)) ··· 1074 1034 } 1075 1035 if (!nd->root.mnt) { 1076 1036 int error = set_root(nd); 1077 - if (error) 1037 + if (unlikely(error)) 1078 1038 return error; 1079 1039 } 1080 1040 if (nd->flags & LOOKUP_RCU) { ··· 1672 1632 path->dentry = dentry; 1673 1633 if (nd->flags & LOOKUP_RCU) { 1674 1634 unsigned int seq = nd->next_seq; 1635 + if (likely(!d_managed(dentry))) 1636 + return 0; 1675 1637 if (likely(__follow_mount_rcu(nd, path))) 1676 1638 return 0; 1677 1639 // *path and nd->next_seq might've been clobbered 1678 1640 path->mnt = nd->path.mnt; 1679 1641 path->dentry = dentry; 1680 1642 nd->next_seq = seq; 1681 - if (!try_to_unlazy_next(nd, dentry)) 1643 + if (unlikely(!try_to_unlazy_next(nd, dentry))) 1682 1644 return -ECHILD; 1683 1645 } 1684 1646 ret = traverse_mounts(path, &jumped, &nd->total_link_count, nd->flags); ··· 1865 1823 return dentry; 1866 1824 } 1867 1825 1868 - static struct dentry *lookup_slow(const struct qstr *name, 1826 + static noinline struct dentry *lookup_slow(const struct qstr *name, 1869 1827 struct dentry *dir, 1870 1828 unsigned int flags) 1871 1829 { ··· 1897 1855 int err, mask; 1898 1856 1899 1857 mask = nd->flags & LOOKUP_RCU ? MAY_NOT_BLOCK : 0; 1900 - err = inode_permission(idmap, nd->inode, mask | MAY_EXEC); 1858 + err = lookup_inode_permission_may_exec(idmap, nd->inode, mask); 1901 1859 if (likely(!err)) 1902 1860 return 0; 1903 1861 ··· 1912 1870 if (err != -ECHILD) // hard error 1913 1871 return err; 1914 1872 1915 - return inode_permission(idmap, nd->inode, MAY_EXEC); 1873 + return lookup_inode_permission_may_exec(idmap, nd->inode, 0); 1916 1874 } 1917 1875 1918 1876 static int reserve_stack(struct nameidata *nd, struct path *link) ··· 1943 1901 1944 1902 enum {WALK_TRAILING = 1, WALK_MORE = 2, WALK_NOFOLLOW = 4}; 1945 1903 1946 - static const char *pick_link(struct nameidata *nd, struct path *link, 1904 + static noinline const char *pick_link(struct nameidata *nd, struct path *link, 1947 1905 struct inode *inode, int flags) 1948 1906 { 1949 1907 struct saved *last; 1950 1908 const char *res; 1951 - int error = reserve_stack(nd, link); 1909 + int error; 1952 1910 1911 + if (nd->flags & LOOKUP_RCU) { 1912 + /* make sure that d_is_symlink from step_into_slowpath() matches the inode */ 1913 + if (read_seqcount_retry(&link->dentry->d_seq, nd->next_seq)) 1914 + return ERR_PTR(-ECHILD); 1915 + } else { 1916 + if (link->mnt == nd->path.mnt) 1917 + mntget(link->mnt); 1918 + } 1919 + 1920 + error = reserve_stack(nd, link); 1953 1921 if (unlikely(error)) { 1954 1922 if (!(nd->flags & LOOKUP_RCU)) 1955 1923 path_put(link); ··· 2033 1981 * 2034 1982 * NOTE: dentry must be what nd->next_seq had been sampled from. 2035 1983 */ 2036 - static const char *step_into(struct nameidata *nd, int flags, 1984 + static noinline const char *step_into_slowpath(struct nameidata *nd, int flags, 2037 1985 struct dentry *dentry) 2038 1986 { 2039 1987 struct path path; 2040 1988 struct inode *inode; 2041 - int err = handle_mounts(nd, dentry, &path); 1989 + int err; 2042 1990 2043 - if (err < 0) 1991 + err = handle_mounts(nd, dentry, &path); 1992 + if (unlikely(err < 0)) 2044 1993 return ERR_PTR(err); 2045 1994 inode = path.dentry->d_inode; 2046 1995 if (likely(!d_is_symlink(path.dentry)) || ··· 2063 2010 nd->seq = nd->next_seq; 2064 2011 return NULL; 2065 2012 } 2066 - if (nd->flags & LOOKUP_RCU) { 2067 - /* make sure that d_is_symlink above matches inode */ 2068 - if (read_seqcount_retry(&path.dentry->d_seq, nd->next_seq)) 2069 - return ERR_PTR(-ECHILD); 2070 - } else { 2071 - if (path.mnt == nd->path.mnt) 2072 - mntget(path.mnt); 2073 - } 2074 2013 return pick_link(nd, &path, inode, flags); 2014 + } 2015 + 2016 + static __always_inline const char *step_into(struct nameidata *nd, int flags, 2017 + struct dentry *dentry) 2018 + { 2019 + /* 2020 + * In the common case we are in rcu-walk and traversing over a non-mounted on 2021 + * directory (as opposed to e.g., a symlink). 2022 + * 2023 + * We can handle that and negative entries with the checks below. 2024 + */ 2025 + if (likely((nd->flags & LOOKUP_RCU) && 2026 + !d_managed(dentry) && !d_is_symlink(dentry))) { 2027 + struct inode *inode = dentry->d_inode; 2028 + if (read_seqcount_retry(&dentry->d_seq, nd->next_seq)) 2029 + return ERR_PTR(-ECHILD); 2030 + if (unlikely(!inode)) 2031 + return ERR_PTR(-ENOENT); 2032 + nd->path.dentry = dentry; 2033 + /* nd->path.mnt is retained on purpose */ 2034 + nd->inode = inode; 2035 + nd->seq = nd->next_seq; 2036 + return NULL; 2037 + } 2038 + return step_into_slowpath(nd, flags, dentry); 2075 2039 } 2076 2040 2077 2041 static struct dentry *follow_dotdot_rcu(struct nameidata *nd) ··· 2171 2101 2172 2102 if (!nd->root.mnt) { 2173 2103 error = ERR_PTR(set_root(nd)); 2174 - if (error) 2104 + if (unlikely(error)) 2175 2105 return error; 2176 2106 } 2177 2107 if (nd->flags & LOOKUP_RCU) ··· 2201 2131 return NULL; 2202 2132 } 2203 2133 2204 - static const char *walk_component(struct nameidata *nd, int flags) 2134 + static __always_inline const char *walk_component(struct nameidata *nd, int flags) 2205 2135 { 2206 2136 struct dentry *dentry; 2207 2137 /* ··· 2210 2140 * parent relationships. 2211 2141 */ 2212 2142 if (unlikely(nd->last_type != LAST_NORM)) { 2213 - if (!(flags & WALK_MORE) && nd->depth) 2143 + if (unlikely(nd->depth) && !(flags & WALK_MORE)) 2214 2144 put_link(nd); 2215 2145 return handle_dots(nd, nd->last_type); 2216 2146 } ··· 2222 2152 if (IS_ERR(dentry)) 2223 2153 return ERR_CAST(dentry); 2224 2154 } 2225 - if (!(flags & WALK_MORE) && nd->depth) 2155 + if (unlikely(nd->depth) && !(flags & WALK_MORE)) 2226 2156 put_link(nd); 2227 2157 return step_into(nd, flags, dentry); 2228 2158 } ··· 2575 2505 if (unlikely(!*name)) { 2576 2506 OK: 2577 2507 /* pathname or trailing symlink, done */ 2578 - if (!depth) { 2508 + if (likely(!depth)) { 2579 2509 nd->dir_vfsuid = i_uid_into_vfsuid(idmap, nd->inode); 2580 2510 nd->dir_mode = nd->inode->i_mode; 2581 2511 nd->flags &= ~LOOKUP_PARENT; ··· 2613 2543 const char *s = nd->pathname; 2614 2544 2615 2545 /* LOOKUP_CACHED requires RCU, ask caller to retry */ 2616 - if ((flags & (LOOKUP_RCU | LOOKUP_CACHED)) == LOOKUP_CACHED) 2546 + if (unlikely((flags & (LOOKUP_RCU | LOOKUP_CACHED)) == LOOKUP_CACHED)) 2617 2547 return ERR_PTR(-EAGAIN); 2618 2548 2619 - if (!*s) 2549 + if (unlikely(!*s)) 2620 2550 flags &= ~LOOKUP_RCU; 2621 2551 if (flags & LOOKUP_RCU) 2622 2552 rcu_read_lock(); ··· 2630 2560 nd->r_seq = __read_seqcount_begin(&rename_lock.seqcount); 2631 2561 smp_rmb(); 2632 2562 2633 - if (nd->state & ND_ROOT_PRESET) { 2563 + if (unlikely(nd->state & ND_ROOT_PRESET)) { 2634 2564 struct dentry *root = nd->root.dentry; 2635 2565 struct inode *inode = root->d_inode; 2636 2566 if (*s && unlikely(!d_can_lookup(root))) ··· 2649 2579 nd->root.mnt = NULL; 2650 2580 2651 2581 /* Absolute pathname -- fetch the root (LOOKUP_IN_ROOT uses nd->dfd). */ 2652 - if (*s == '/' && !(flags & LOOKUP_IN_ROOT)) { 2582 + if (*s == '/' && likely(!(flags & LOOKUP_IN_ROOT))) { 2653 2583 error = nd_jump_root(nd); 2654 2584 if (unlikely(error)) 2655 2585 return ERR_PTR(error); ··· 2702 2632 } 2703 2633 2704 2634 /* For scoped-lookups we need to set the root to the dirfd as well. */ 2705 - if (flags & LOOKUP_IS_SCOPED) { 2635 + if (unlikely(flags & LOOKUP_IS_SCOPED)) { 2706 2636 nd->root = nd->path; 2707 2637 if (flags & LOOKUP_RCU) { 2708 2638 nd->root_seq = nd->seq;
+22 -16
fs/namespace.c
··· 1336 1336 } 1337 1337 static DECLARE_DELAYED_WORK(delayed_mntput_work, delayed_mntput); 1338 1338 1339 - static void mntput_no_expire(struct mount *mnt) 1339 + static void noinline mntput_no_expire_slowpath(struct mount *mnt) 1340 1340 { 1341 1341 LIST_HEAD(list); 1342 1342 int count; 1343 1343 1344 - rcu_read_lock(); 1345 - if (likely(READ_ONCE(mnt->mnt_ns))) { 1346 - /* 1347 - * Since we don't do lock_mount_hash() here, 1348 - * ->mnt_ns can change under us. However, if it's 1349 - * non-NULL, then there's a reference that won't 1350 - * be dropped until after an RCU delay done after 1351 - * turning ->mnt_ns NULL. So if we observe it 1352 - * non-NULL under rcu_read_lock(), the reference 1353 - * we are dropping is not the final one. 1354 - */ 1355 - mnt_add_count(mnt, -1); 1356 - rcu_read_unlock(); 1357 - return; 1358 - } 1344 + VFS_BUG_ON(mnt->mnt_ns); 1359 1345 lock_mount_hash(); 1360 1346 /* 1361 1347 * make sure that if __legitimize_mnt() has not seen us grab ··· 1390 1404 return; 1391 1405 } 1392 1406 cleanup_mnt(mnt); 1407 + } 1408 + 1409 + static void mntput_no_expire(struct mount *mnt) 1410 + { 1411 + rcu_read_lock(); 1412 + if (likely(READ_ONCE(mnt->mnt_ns))) { 1413 + /* 1414 + * Since we don't do lock_mount_hash() here, 1415 + * ->mnt_ns can change under us. However, if it's 1416 + * non-NULL, then there's a reference that won't 1417 + * be dropped until after an RCU delay done after 1418 + * turning ->mnt_ns NULL. So if we observe it 1419 + * non-NULL under rcu_read_lock(), the reference 1420 + * we are dropping is not the final one. 1421 + */ 1422 + mnt_add_count(mnt, -1); 1423 + rcu_read_unlock(); 1424 + return; 1425 + } 1426 + mntput_no_expire_slowpath(mnt); 1393 1427 } 1394 1428 1395 1429 void mntput(struct vfsmount *mnt)
+3 -3
fs/open.c
··· 940 940 } 941 941 942 942 error = security_file_open(f); 943 - if (error) 943 + if (unlikely(error)) 944 944 goto cleanup_all; 945 945 946 946 /* ··· 950 950 * pseudo file, this call will not change the mode. 951 951 */ 952 952 error = fsnotify_open_perm_and_set_mode(f); 953 - if (error) 953 + if (unlikely(error)) 954 954 goto cleanup_all; 955 955 956 956 error = break_lease(file_inode(f), f->f_flags); 957 - if (error) 957 + if (unlikely(error)) 958 958 goto cleanup_all; 959 959 960 960 /* normally all 3 are set; ->open() can clear them if needed */
+3 -1
fs/orangefs/inode.c
··· 878 878 879 879 gossip_debug(GOSSIP_INODE_DEBUG, "orangefs_update_time: %pU\n", 880 880 get_khandle_from_ino(inode)); 881 - flags = generic_update_time(inode, flags); 881 + 882 + flags = inode_update_timestamps(inode, flags); 883 + 882 884 memset(&iattr, 0, sizeof iattr); 883 885 if (flags & S_ATIME) 884 886 iattr.ia_valid |= ATTR_ATIME;
+1 -1
fs/splice.c
··· 1498 1498 1499 1499 /* 1500 1500 * For lack of a better implementation, implement vmsplice() to userspace 1501 - * as a simple copy of the pipes pages to the user iov. 1501 + * as a simple copy of the pipe's pages to the user iov. 1502 1502 */ 1503 1503 static ssize_t vmsplice_to_user(struct file *file, struct iov_iter *iter, 1504 1504 unsigned int flags)
+4 -3
fs/sync.c
··· 117 117 static void do_sync_work(struct work_struct *work) 118 118 { 119 119 int nowait = 0; 120 + int wait = 1; 120 121 121 122 /* 122 123 * Sync twice to reduce the possibility we skipped some inodes / pages 123 124 * because they were temporarily locked 124 125 */ 125 - iterate_supers(sync_inodes_one_sb, &nowait); 126 + iterate_supers(sync_inodes_one_sb, NULL); 126 127 iterate_supers(sync_fs_one_sb, &nowait); 127 128 sync_bdevs(false); 128 - iterate_supers(sync_inodes_one_sb, &nowait); 129 - iterate_supers(sync_fs_one_sb, &nowait); 129 + iterate_supers(sync_inodes_one_sb, NULL); 130 + iterate_supers(sync_fs_one_sb, &wait); 130 131 sync_bdevs(false); 131 132 printk("Emergency Sync complete\n"); 132 133 kfree(work);
+1
fs/utimes.c
··· 76 76 out: 77 77 return error; 78 78 } 79 + EXPORT_SYMBOL_GPL(vfs_utimes); 79 80 80 81 static int do_utimes_path(int dfd, const char __user *filename, 81 82 struct timespec64 *times, int flags)
+2 -1
include/asm-generic/vmlinux.lds.h
··· 955 955 956 956 #define RUNTIME_CONST_VARIABLES \ 957 957 RUNTIME_CONST(shift, d_hash_shift) \ 958 - RUNTIME_CONST(ptr, dentry_hashtable) 958 + RUNTIME_CONST(ptr, dentry_hashtable) \ 959 + RUNTIME_CONST(ptr, __dentry_cache) 959 960 960 961 /* Alignment must be consistent with (kunit_suite *) in include/kunit/test.h */ 961 962 #define KUNIT_TABLE() \
+8 -7
include/linux/fs.h
··· 634 634 return (long)acl & 1; 635 635 } 636 636 637 - #define IOP_FASTPERM 0x0001 638 - #define IOP_LOOKUP 0x0002 639 - #define IOP_NOFOLLOW 0x0004 640 - #define IOP_XATTR 0x0008 637 + #define IOP_FASTPERM 0x0001 638 + #define IOP_LOOKUP 0x0002 639 + #define IOP_NOFOLLOW 0x0004 640 + #define IOP_XATTR 0x0008 641 641 #define IOP_DEFAULT_READLINK 0x0010 642 - #define IOP_MGTIME 0x0020 643 - #define IOP_CACHED_LINK 0x0040 642 + #define IOP_MGTIME 0x0020 643 + #define IOP_CACHED_LINK 0x0040 644 + #define IOP_FASTPERM_MAY_EXEC 0x0080 644 645 645 646 /* 646 647 * Inode state bits. Protected by inode->i_lock ··· 3079 3078 * file_start_write - get write access to a superblock for regular file io 3080 3079 * @file: the file we want to write to 3081 3080 * 3082 - * This is a variant of sb_start_write() which is a noop on non-regualr file. 3081 + * This is a variant of sb_start_write() which is a noop on non-regular file. 3083 3082 * Should be matched with a call to file_end_write(). 3084 3083 */ 3085 3084 static inline void file_start_write(struct file *file)
+7 -16
include/linux/pipe_fs_i.h
··· 44 44 typedef unsigned short pipe_index_t; 45 45 #endif 46 46 47 - /* 48 - * We have to declare this outside 'struct pipe_inode_info', 49 - * but then we can't use 'union pipe_index' for an anonymous 50 - * union, so we end up having to duplicate this declaration 51 - * below. Annoying. 47 + /** 48 + * struct pipe_index - pipe indeces 49 + * @head: The point of buffer production 50 + * @tail: The point of buffer consumption 51 + * @head_tail: unsigned long union of @head and @tail 52 52 */ 53 53 union pipe_index { 54 54 unsigned long head_tail; ··· 63 63 * @mutex: mutex protecting the whole thing 64 64 * @rd_wait: reader wait point in case of empty pipe 65 65 * @wr_wait: writer wait point in case of full pipe 66 - * @head: The point of buffer production 67 - * @tail: The point of buffer consumption 68 - * @head_tail: unsigned long union of @head and @tail 66 + * @pipe_index: the pipe indeces 69 67 * @note_loss: The next read() should insert a data-lost message 70 68 * @max_usage: The maximum number of slots that may be used in the ring 71 69 * @ring_size: total number of buffers (should be a power of 2) ··· 85 87 struct mutex mutex; 86 88 wait_queue_head_t rd_wait, wr_wait; 87 89 88 - /* This has to match the 'union pipe_index' above */ 89 - union { 90 - unsigned long head_tail; 91 - struct { 92 - pipe_index_t head; 93 - pipe_index_t tail; 94 - }; 95 - }; 90 + union pipe_index; 96 91 97 92 unsigned int max_usage; 98 93 unsigned int ring_size;
+2 -1
init/do_mounts.c
··· 120 120 static unsigned int __initdata root_delay; 121 121 static int __init root_delay_setup(char *str) 122 122 { 123 - root_delay = simple_strtoul(str, NULL, 0); 123 + if (kstrtouint(str, 0, &root_delay)) 124 + return 0; 124 125 return 1; 125 126 } 126 127
+1 -2
init/do_mounts_rd.c
··· 29 29 30 30 static int __init ramdisk_start_setup(char *str) 31 31 { 32 - rd_image_start = simple_strtol(str,NULL,0); 33 - return 1; 32 + return kstrtoint(str, 0, &rd_image_start) == 0; 34 33 } 35 34 __setup("ramdisk_start=", ramdisk_start_setup); 36 35
+2 -2
kernel/watch_queue.c
··· 119 119 offset = note % WATCH_QUEUE_NOTES_PER_PAGE * WATCH_QUEUE_NOTE_SIZE; 120 120 get_page(page); 121 121 len = n->info & WATCH_INFO_LENGTH; 122 - p = kmap_atomic(page); 122 + p = kmap_local_page(page); 123 123 memcpy(p + offset, n, len); 124 - kunmap_atomic(p); 124 + kunmap_local(p); 125 125 126 126 buf = pipe_buf(pipe, head); 127 127 buf->page = page;
+3 -1
scripts/Makefile.extrawarn
··· 28 28 KBUILD_CFLAGS-$(CONFIG_CC_NO_ARRAY_BOUNDS) += -Wno-array-bounds 29 29 30 30 ifdef CONFIG_CC_IS_CLANG 31 - # The kernel builds with '-std=gnu11' so use of GNU extensions is acceptable. 31 + # The kernel builds with '-std=gnu11' and '-fms-extensions' so use of GNU and 32 + # Microsoft extensions is acceptable. 32 33 KBUILD_CFLAGS += -Wno-gnu 34 + KBUILD_CFLAGS += -Wno-microsoft-anon-tag 33 35 34 36 # Clang checks for overflow/truncation with '%p', while GCC does not: 35 37 # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=111219