Merge tag 'mm-hotfixes-stable-2023-03-04-13-12' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm

Pull misc fixes from Andrew Morton:
"17 hotfixes.

Eight are for MM and seven are for other parts of the kernel. Seven
are cc:stable and eight address post-6.3 issues or were judged
unsuitable for -stable backporting"

* tag 'mm-hotfixes-stable-2023-03-04-13-12' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm:
mailmap: map Dikshita Agarwal's old address to his current one
mailmap: map Vikash Garodia's old address to his current one
fs/cramfs/inode.c: initialize file_ra_state
fs: hfsplus: fix UAF issue in hfsplus_put_super
panic: fix the panic_print NMI backtrace setting
lib: parser: update documentation for match_NUMBER functions
kasan, x86: don't rename memintrinsics in uninstrumented files
kasan: test: fix test for new meminstrinsic instrumentation
kasan: treat meminstrinsic as builtins in uninstrumented files
kasan: emit different calls for instrumentable memintrinsics
ocfs2: fix non-auto defrag path not working issue
ocfs2: fix defrag path triggering jbd2 ASSERT
mailmap: map Georgi Djakov's old Linaro address to his current one
mm/hwpoison: convert TTU_IGNORE_HWPOISON to TTU_HWPOISON
lib/zlib: DFLTCC deflate does not write all available bits for Z_NO_FLUSH
mm/damon/paddr: fix missing folio_put()
mm/mremap: fix dup_anon_vma() in vma_merge() case 4

+147 -82
+3
.mailmap
··· 121 Dengcheng Zhu <dzhu@wavecomp.com> <dengcheng.zhu@imgtec.com> 122 Dengcheng Zhu <dzhu@wavecomp.com> <dengcheng.zhu@mips.com> 123 <dev.kurt@vandijck-laurijssen.be> <kurt.van.dijck@eia.be> 124 Dmitry Baryshkov <dbaryshkov@gmail.com> 125 Dmitry Baryshkov <dbaryshkov@gmail.com> <[dbaryshkov@gmail.com]> 126 Dmitry Baryshkov <dbaryshkov@gmail.com> <dmitry_baryshkov@mentor.com> ··· 151 Gao Xiang <xiang@kernel.org> <hsiangkao@aol.com> 152 Gao Xiang <xiang@kernel.org> <hsiangkao@linux.alibaba.com> 153 Gao Xiang <xiang@kernel.org> <hsiangkao@redhat.com> 154 Gerald Schaefer <gerald.schaefer@linux.ibm.com> <geraldsc@de.ibm.com> 155 Gerald Schaefer <gerald.schaefer@linux.ibm.com> <gerald.schaefer@de.ibm.com> 156 Gerald Schaefer <gerald.schaefer@linux.ibm.com> <geraldsc@linux.vnet.ibm.com> ··· 443 Vasily Averin <vasily.averin@linux.dev> <vvs@parallels.com> 444 Vasily Averin <vasily.averin@linux.dev> <vvs@sw.ru> 445 Valentin Schneider <vschneid@redhat.com> <valentin.schneider@arm.com> 446 Vinod Koul <vkoul@kernel.org> <vinod.koul@intel.com> 447 Vinod Koul <vkoul@kernel.org> <vinod.koul@linux.intel.com> 448 Vinod Koul <vkoul@kernel.org> <vkoul@infradead.org>
··· 121 Dengcheng Zhu <dzhu@wavecomp.com> <dengcheng.zhu@imgtec.com> 122 Dengcheng Zhu <dzhu@wavecomp.com> <dengcheng.zhu@mips.com> 123 <dev.kurt@vandijck-laurijssen.be> <kurt.van.dijck@eia.be> 124 + Dikshita Agarwal <dikshita@qti.qualcomm.com> <dikshita@codeaurora.org> 125 Dmitry Baryshkov <dbaryshkov@gmail.com> 126 Dmitry Baryshkov <dbaryshkov@gmail.com> <[dbaryshkov@gmail.com]> 127 Dmitry Baryshkov <dbaryshkov@gmail.com> <dmitry_baryshkov@mentor.com> ··· 150 Gao Xiang <xiang@kernel.org> <hsiangkao@aol.com> 151 Gao Xiang <xiang@kernel.org> <hsiangkao@linux.alibaba.com> 152 Gao Xiang <xiang@kernel.org> <hsiangkao@redhat.com> 153 + Georgi Djakov <djakov@kernel.org> <georgi.djakov@linaro.org> 154 Gerald Schaefer <gerald.schaefer@linux.ibm.com> <geraldsc@de.ibm.com> 155 Gerald Schaefer <gerald.schaefer@linux.ibm.com> <gerald.schaefer@de.ibm.com> 156 Gerald Schaefer <gerald.schaefer@linux.ibm.com> <geraldsc@linux.vnet.ibm.com> ··· 441 Vasily Averin <vasily.averin@linux.dev> <vvs@parallels.com> 442 Vasily Averin <vasily.averin@linux.dev> <vvs@sw.ru> 443 Valentin Schneider <vschneid@redhat.com> <valentin.schneider@arm.com> 444 + Vikash Garodia <quic_vgarodia@quicinc.com> <vgarodia@codeaurora.org> 445 Vinod Koul <vkoul@kernel.org> <vinod.koul@intel.com> 446 Vinod Koul <vkoul@kernel.org> <vinod.koul@linux.intel.com> 447 Vinod Koul <vkoul@kernel.org> <vkoul@infradead.org>
-19
arch/x86/include/asm/string_64.h
··· 85 char *strcat(char *dest, const char *src); 86 int strcmp(const char *cs, const char *ct); 87 88 - #if (defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__)) 89 - /* 90 - * For files that not instrumented (e.g. mm/slub.c) we 91 - * should use not instrumented version of mem* functions. 92 - */ 93 - 94 - #undef memcpy 95 - #define memcpy(dst, src, len) __memcpy(dst, src, len) 96 - #undef memmove 97 - #define memmove(dst, src, len) __memmove(dst, src, len) 98 - #undef memset 99 - #define memset(s, c, n) __memset(s, c, n) 100 - 101 - #ifndef __NO_FORTIFY 102 - #define __NO_FORTIFY /* FORTIFY_SOURCE uses __builtin_memcpy, etc. */ 103 - #endif 104 - 105 - #endif 106 - 107 #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE 108 #define __HAVE_ARCH_MEMCPY_FLUSHCACHE 1 109 void __memcpy_flushcache(void *dst, const void *src, size_t cnt);
··· 85 char *strcat(char *dest, const char *src); 86 int strcmp(const char *cs, const char *ct); 87 88 #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE 89 #define __HAVE_ARCH_MEMCPY_FLUSHCACHE 1 90 void __memcpy_flushcache(void *dst, const void *src, size_t cnt);
+1 -1
fs/cramfs/inode.c
··· 183 unsigned int len) 184 { 185 struct address_space *mapping = sb->s_bdev->bd_inode->i_mapping; 186 - struct file_ra_state ra; 187 struct page *pages[BLKS_PER_BUF]; 188 unsigned i, blocknr, buffer; 189 unsigned long devsize;
··· 183 unsigned int len) 184 { 185 struct address_space *mapping = sb->s_bdev->bd_inode->i_mapping; 186 + struct file_ra_state ra = {}; 187 struct page *pages[BLKS_PER_BUF]; 188 unsigned i, blocknr, buffer; 189 unsigned long devsize;
+2 -2
fs/hfsplus/super.c
··· 295 hfsplus_sync_fs(sb, 1); 296 } 297 298 hfs_btree_close(sbi->attr_tree); 299 hfs_btree_close(sbi->cat_tree); 300 hfs_btree_close(sbi->ext_tree); 301 - iput(sbi->alloc_file); 302 - iput(sbi->hidden_dir); 303 kfree(sbi->s_vhdr_buf); 304 kfree(sbi->s_backup_vhdr_buf); 305 unload_nls(sbi->nls);
··· 295 hfsplus_sync_fs(sb, 1); 296 } 297 298 + iput(sbi->alloc_file); 299 + iput(sbi->hidden_dir); 300 hfs_btree_close(sbi->attr_tree); 301 hfs_btree_close(sbi->cat_tree); 302 hfs_btree_close(sbi->ext_tree); 303 kfree(sbi->s_vhdr_buf); 304 kfree(sbi->s_backup_vhdr_buf); 305 unload_nls(sbi->nls);
+13 -21
fs/ocfs2/move_extents.c
··· 105 */ 106 replace_rec.e_flags = ext_flags & ~OCFS2_EXT_REFCOUNTED; 107 108 - ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), 109 - context->et.et_root_bh, 110 - OCFS2_JOURNAL_ACCESS_WRITE); 111 - if (ret) { 112 - mlog_errno(ret); 113 - goto out; 114 - } 115 - 116 ret = ocfs2_split_extent(handle, &context->et, path, index, 117 &replace_rec, context->meta_ac, 118 &context->dealloc); ··· 112 mlog_errno(ret); 113 goto out; 114 } 115 - 116 - ocfs2_journal_dirty(handle, context->et.et_root_bh); 117 118 context->new_phys_cpos = new_p_cpos; 119 ··· 434 bg = (struct ocfs2_group_desc *)gd_bh->b_data; 435 436 if (vict_blkno < (le64_to_cpu(bg->bg_blkno) + 437 - le16_to_cpu(bg->bg_bits))) { 438 439 *ret_bh = gd_bh; 440 *vict_bit = (vict_blkno - blkno) >> ··· 549 last_free_bits++; 550 551 if (last_free_bits == move_len) { 552 *goal_bit = i; 553 *phys_cpos = base_cpos + i; 554 break; ··· 1021 1022 context->range = &range; 1023 1024 if (range.me_flags & OCFS2_MOVE_EXT_FL_AUTO_DEFRAG) { 1025 context->auto_defrag = 1; 1026 - /* 1027 - * ok, the default theshold for the defragmentation 1028 - * is 1M, since our maximum clustersize was 1M also. 1029 - * any thought? 1030 - */ 1031 - if (!range.me_threshold) 1032 - range.me_threshold = 1024 * 1024; 1033 - 1034 - if (range.me_threshold > i_size_read(inode)) 1035 - range.me_threshold = i_size_read(inode); 1036 1037 if (range.me_flags & OCFS2_MOVE_EXT_FL_PART_DEFRAG) 1038 context->partial = 1;
··· 105 */ 106 replace_rec.e_flags = ext_flags & ~OCFS2_EXT_REFCOUNTED; 107 108 ret = ocfs2_split_extent(handle, &context->et, path, index, 109 &replace_rec, context->meta_ac, 110 &context->dealloc); ··· 120 mlog_errno(ret); 121 goto out; 122 } 123 124 context->new_phys_cpos = new_p_cpos; 125 ··· 444 bg = (struct ocfs2_group_desc *)gd_bh->b_data; 445 446 if (vict_blkno < (le64_to_cpu(bg->bg_blkno) + 447 + (le16_to_cpu(bg->bg_bits) << bits_per_unit))) { 448 449 *ret_bh = gd_bh; 450 *vict_bit = (vict_blkno - blkno) >> ··· 559 last_free_bits++; 560 561 if (last_free_bits == move_len) { 562 + i -= move_len; 563 *goal_bit = i; 564 *phys_cpos = base_cpos + i; 565 break; ··· 1030 1031 context->range = &range; 1032 1033 + /* 1034 + * ok, the default theshold for the defragmentation 1035 + * is 1M, since our maximum clustersize was 1M also. 1036 + * any thought? 1037 + */ 1038 + if (!range.me_threshold) 1039 + range.me_threshold = 1024 * 1024; 1040 + 1041 + if (range.me_threshold > i_size_read(inode)) 1042 + range.me_threshold = i_size_read(inode); 1043 + 1044 if (range.me_flags & OCFS2_MOVE_EXT_FL_AUTO_DEFRAG) { 1045 context->auto_defrag = 1; 1046 1047 if (range.me_flags & OCFS2_MOVE_EXT_FL_PART_DEFRAG) 1048 context->partial = 1;
+1 -1
include/linux/rmap.h
··· 94 TTU_SPLIT_HUGE_PMD = 0x4, /* split huge PMD if any */ 95 TTU_IGNORE_MLOCK = 0x8, /* ignore mlock */ 96 TTU_SYNC = 0x10, /* avoid racy checks with PVMW_SYNC */ 97 - TTU_IGNORE_HWPOISON = 0x20, /* corrupted page is recoverable */ 98 TTU_BATCH_FLUSH = 0x40, /* Batch TLB flushes where possible 99 * and caller guarantees they will 100 * do a final flush if necessary */
··· 94 TTU_SPLIT_HUGE_PMD = 0x4, /* split huge PMD if any */ 95 TTU_IGNORE_MLOCK = 0x8, /* ignore mlock */ 96 TTU_SYNC = 0x10, /* avoid racy checks with PVMW_SYNC */ 97 + TTU_HWPOISON = 0x20, /* do convert pte to hwpoison entry */ 98 TTU_BATCH_FLUSH = 0x40, /* Batch TLB flushes where possible 99 * and caller guarantees they will 100 * do a final flush if necessary */
+26 -18
kernel/panic.c
··· 212 return; 213 } 214 215 - if (panic_print & PANIC_PRINT_ALL_CPU_BT) 216 - trigger_all_cpu_backtrace(); 217 - 218 if (panic_print & PANIC_PRINT_TASK_INFO) 219 show_state(); 220 ··· 239 if (atomic_inc_return(&warn_count) >= limit && limit) 240 panic("%s: system warned too often (kernel.warn_limit is %d)", 241 origin, limit); 242 } 243 244 /** ··· 355 * 356 * Bypass the panic_cpu check and call __crash_kexec directly. 357 */ 358 - if (!_crash_kexec_post_notifiers) { 359 __crash_kexec(NULL); 360 361 - /* 362 - * Note smp_send_stop is the usual smp shutdown function, which 363 - * unfortunately means it may not be hardened to work in a 364 - * panic situation. 365 - */ 366 - smp_send_stop(); 367 - } else { 368 - /* 369 - * If we want to do crash dump after notifier calls and 370 - * kmsg_dump, we will need architecture dependent extra 371 - * works in addition to stopping other CPUs. 372 - */ 373 - crash_smp_send_stop(); 374 - } 375 376 /* 377 * Run any panic handlers, including those that might need to
··· 212 return; 213 } 214 215 if (panic_print & PANIC_PRINT_TASK_INFO) 216 show_state(); 217 ··· 242 if (atomic_inc_return(&warn_count) >= limit && limit) 243 panic("%s: system warned too often (kernel.warn_limit is %d)", 244 origin, limit); 245 + } 246 + 247 + /* 248 + * Helper that triggers the NMI backtrace (if set in panic_print) 249 + * and then performs the secondary CPUs shutdown - we cannot have 250 + * the NMI backtrace after the CPUs are off! 251 + */ 252 + static void panic_other_cpus_shutdown(bool crash_kexec) 253 + { 254 + if (panic_print & PANIC_PRINT_ALL_CPU_BT) 255 + trigger_all_cpu_backtrace(); 256 + 257 + /* 258 + * Note that smp_send_stop() is the usual SMP shutdown function, 259 + * which unfortunately may not be hardened to work in a panic 260 + * situation. If we want to do crash dump after notifier calls 261 + * and kmsg_dump, we will need architecture dependent extra 262 + * bits in addition to stopping other CPUs, hence we rely on 263 + * crash_smp_send_stop() for that. 264 + */ 265 + if (!crash_kexec) 266 + smp_send_stop(); 267 + else 268 + crash_smp_send_stop(); 269 } 270 271 /** ··· 334 * 335 * Bypass the panic_cpu check and call __crash_kexec directly. 336 */ 337 + if (!_crash_kexec_post_notifiers) 338 __crash_kexec(NULL); 339 340 + panic_other_cpus_shutdown(_crash_kexec_post_notifiers); 341 342 /* 343 * Run any panic handlers, including those that might need to
+9
lib/Kconfig.kasan
··· 49 50 if KASAN 51 52 choice 53 prompt "KASAN mode" 54 default KASAN_GENERIC
··· 49 50 if KASAN 51 52 + config CC_HAS_KASAN_MEMINTRINSIC_PREFIX 53 + def_bool (CC_IS_CLANG && $(cc-option,-fsanitize=kernel-address -mllvm -asan-kernel-mem-intrinsic-prefix=1)) || \ 54 + (CC_IS_GCC && $(cc-option,-fsanitize=kernel-address --param asan-kernel-mem-intrinsic-prefix=1)) 55 + # Don't define it if we don't need it: compilation of the test uses 56 + # this variable to decide how the compiler should treat builtins. 57 + depends on !KASAN_HW_TAGS 58 + help 59 + The compiler is able to prefix memintrinsics with __asan or __hwasan. 60 + 61 choice 62 prompt "KASAN mode" 63 default KASAN_GENERIC
+7 -7
lib/parser.c
··· 133 * as a number in that base. 134 * 135 * Return: On success, sets @result to the integer represented by the 136 - * string and returns 0. Returns -ENOMEM, -EINVAL, or -ERANGE on failure. 137 */ 138 static int match_number(substring_t *s, int *result, int base) 139 { ··· 165 * as a number in that base. 166 * 167 * Return: On success, sets @result to the integer represented by the 168 - * string and returns 0. Returns -ENOMEM, -EINVAL, or -ERANGE on failure. 169 */ 170 static int match_u64int(substring_t *s, u64 *result, int base) 171 { ··· 189 * Description: Attempts to parse the &substring_t @s as a decimal integer. 190 * 191 * Return: On success, sets @result to the integer represented by the string 192 - * and returns 0. Returns -ENOMEM, -EINVAL, or -ERANGE on failure. 193 */ 194 int match_int(substring_t *s, int *result) 195 { ··· 205 * Description: Attempts to parse the &substring_t @s as a decimal integer. 206 * 207 * Return: On success, sets @result to the integer represented by the string 208 - * and returns 0. Returns -ENOMEM, -EINVAL, or -ERANGE on failure. 209 */ 210 int match_uint(substring_t *s, unsigned int *result) 211 { ··· 228 * integer. 229 * 230 * Return: On success, sets @result to the integer represented by the string 231 - * and returns 0. Returns -ENOMEM, -EINVAL, or -ERANGE on failure. 232 */ 233 int match_u64(substring_t *s, u64 *result) 234 { ··· 244 * Description: Attempts to parse the &substring_t @s as an octal integer. 245 * 246 * Return: On success, sets @result to the integer represented by the string 247 - * and returns 0. Returns -ENOMEM, -EINVAL, or -ERANGE on failure. 248 */ 249 int match_octal(substring_t *s, int *result) 250 { ··· 260 * Description: Attempts to parse the &substring_t @s as a hexadecimal integer. 261 * 262 * Return: On success, sets @result to the integer represented by the string 263 - * and returns 0. Returns -ENOMEM, -EINVAL, or -ERANGE on failure. 264 */ 265 int match_hex(substring_t *s, int *result) 266 {
··· 133 * as a number in that base. 134 * 135 * Return: On success, sets @result to the integer represented by the 136 + * string and returns 0. Returns -EINVAL or -ERANGE on failure. 137 */ 138 static int match_number(substring_t *s, int *result, int base) 139 { ··· 165 * as a number in that base. 166 * 167 * Return: On success, sets @result to the integer represented by the 168 + * string and returns 0. Returns -EINVAL or -ERANGE on failure. 169 */ 170 static int match_u64int(substring_t *s, u64 *result, int base) 171 { ··· 189 * Description: Attempts to parse the &substring_t @s as a decimal integer. 190 * 191 * Return: On success, sets @result to the integer represented by the string 192 + * and returns 0. Returns -EINVAL or -ERANGE on failure. 193 */ 194 int match_int(substring_t *s, int *result) 195 { ··· 205 * Description: Attempts to parse the &substring_t @s as a decimal integer. 206 * 207 * Return: On success, sets @result to the integer represented by the string 208 + * and returns 0. Returns -EINVAL or -ERANGE on failure. 209 */ 210 int match_uint(substring_t *s, unsigned int *result) 211 { ··· 228 * integer. 229 * 230 * Return: On success, sets @result to the integer represented by the string 231 + * and returns 0. Returns -EINVAL or -ERANGE on failure. 232 */ 233 int match_u64(substring_t *s, u64 *result) 234 { ··· 244 * Description: Attempts to parse the &substring_t @s as an octal integer. 245 * 246 * Return: On success, sets @result to the integer represented by the string 247 + * and returns 0. Returns -EINVAL or -ERANGE on failure. 248 */ 249 int match_octal(substring_t *s, int *result) 250 { ··· 260 * Description: Attempts to parse the &substring_t @s as a hexadecimal integer. 261 * 262 * Return: On success, sets @result to the integer represented by the string 263 + * and returns 0. Returns -EINVAL or -ERANGE on failure. 264 */ 265 int match_hex(substring_t *s, int *result) 266 {
+3 -1
lib/zlib_deflate/defutil.h
··· 420 z_streamp strm 421 ) 422 { 423 deflate_state *s = (deflate_state *) strm->state; 424 - unsigned len = s->pending; 425 426 if (len > strm->avail_out) len = strm->avail_out; 427 if (len == 0) return; 428
··· 420 z_streamp strm 421 ) 422 { 423 + unsigned len; 424 deflate_state *s = (deflate_state *) strm->state; 425 426 + bi_flush(s); 427 + len = s->pending; 428 if (len > strm->avail_out) len = strm->avail_out; 429 if (len == 0) return; 430
+3 -4
mm/damon/paddr.c
··· 250 folio_put(folio); 251 continue; 252 } 253 - if (folio_test_unevictable(folio)) { 254 folio_putback_lru(folio); 255 - } else { 256 list_add(&folio->lru, &folio_list); 257 - folio_put(folio); 258 - } 259 } 260 applied = reclaim_pages(&folio_list); 261 cond_resched();
··· 250 folio_put(folio); 251 continue; 252 } 253 + if (folio_test_unevictable(folio)) 254 folio_putback_lru(folio); 255 + else 256 list_add(&folio->lru, &folio_list); 257 + folio_put(folio); 258 } 259 applied = reclaim_pages(&folio_list); 260 cond_resched();
+8 -1
mm/kasan/Makefile
··· 35 CFLAGS_hw_tags.o := $(CC_FLAGS_KASAN_RUNTIME) 36 CFLAGS_sw_tags.o := $(CC_FLAGS_KASAN_RUNTIME) 37 38 - CFLAGS_KASAN_TEST := $(CFLAGS_KASAN) -fno-builtin $(call cc-disable-warning, vla) 39 40 CFLAGS_kasan_test.o := $(CFLAGS_KASAN_TEST) 41 CFLAGS_kasan_test_module.o := $(CFLAGS_KASAN_TEST)
··· 35 CFLAGS_hw_tags.o := $(CC_FLAGS_KASAN_RUNTIME) 36 CFLAGS_sw_tags.o := $(CC_FLAGS_KASAN_RUNTIME) 37 38 + CFLAGS_KASAN_TEST := $(CFLAGS_KASAN) $(call cc-disable-warning, vla) 39 + ifndef CONFIG_CC_HAS_KASAN_MEMINTRINSIC_PREFIX 40 + # If compiler instruments memintrinsics by prefixing them with __asan/__hwasan, 41 + # we need to treat them normally (as builtins), otherwise the compiler won't 42 + # recognize them as instrumentable. If it doesn't instrument them, we need to 43 + # pass -fno-builtin, so the compiler doesn't inline them. 44 + CFLAGS_KASAN_TEST += -fno-builtin 45 + endif 46 47 CFLAGS_kasan_test.o := $(CFLAGS_KASAN_TEST) 48 CFLAGS_kasan_test_module.o := $(CFLAGS_KASAN_TEST)
+4
mm/kasan/kasan.h
··· 666 667 void __hwasan_tag_memory(unsigned long addr, u8 tag, unsigned long size); 668 669 #endif /* __MM_KASAN_KASAN_H */
··· 666 667 void __hwasan_tag_memory(unsigned long addr, u8 tag, unsigned long size); 668 669 + void *__hwasan_memset(void *addr, int c, size_t len); 670 + void *__hwasan_memmove(void *dest, const void *src, size_t len); 671 + void *__hwasan_memcpy(void *dest, const void *src, size_t len); 672 + 673 #endif /* __MM_KASAN_KASAN_H */
+29
mm/kasan/kasan_test.c
··· 165 kunit_skip((test), "Test requires " #config "=n"); \ 166 } while (0) 167 168 static void kmalloc_oob_right(struct kunit *test) 169 { 170 char *ptr; ··· 463 u64 words[2]; 464 } *ptr1, *ptr2; 465 466 /* This test is specifically crafted for the generic mode. */ 467 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC); 468 ··· 486 struct { 487 u64 words[2]; 488 } *ptr1, *ptr2; 489 490 ptr1 = kmalloc(sizeof(*ptr1), GFP_KERNEL); 491 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1); ··· 511 char *ptr; 512 size_t size = 128 - KASAN_GRANULE_SIZE; 513 514 ptr = kmalloc(size, GFP_KERNEL); 515 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); 516 ··· 525 { 526 char *ptr; 527 size_t size = 128 - KASAN_GRANULE_SIZE; 528 529 ptr = kmalloc(size, GFP_KERNEL); 530 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); ··· 541 char *ptr; 542 size_t size = 128 - KASAN_GRANULE_SIZE; 543 544 ptr = kmalloc(size, GFP_KERNEL); 545 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); 546 ··· 556 char *ptr; 557 size_t size = 128 - KASAN_GRANULE_SIZE; 558 559 ptr = kmalloc(size, GFP_KERNEL); 560 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); 561 ··· 570 { 571 char *ptr; 572 size_t size = 128 - KASAN_GRANULE_SIZE; 573 574 ptr = kmalloc(size, GFP_KERNEL); 575 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); ··· 588 char *ptr; 589 size_t size = 64; 590 size_t invalid_size = -2; 591 592 /* 593 * Hardware tag-based mode doesn't check memmove for negative size. ··· 614 char *ptr; 615 size_t size = 64; 616 size_t invalid_size = size; 617 618 ptr = kmalloc(size, GFP_KERNEL); 619 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); ··· 644 { 645 char *ptr; 646 size_t size = 33; 647 648 /* 649 * Only generic KASAN uses quarantine, which is required to avoid a
··· 165 kunit_skip((test), "Test requires " #config "=n"); \ 166 } while (0) 167 168 + #define KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test) do { \ 169 + if (IS_ENABLED(CONFIG_KASAN_HW_TAGS)) \ 170 + break; /* No compiler instrumentation. */ \ 171 + if (IS_ENABLED(CONFIG_CC_HAS_KASAN_MEMINTRINSIC_PREFIX)) \ 172 + break; /* Should always be instrumented! */ \ 173 + if (IS_ENABLED(CONFIG_GENERIC_ENTRY)) \ 174 + kunit_skip((test), "Test requires checked mem*()"); \ 175 + } while (0) 176 + 177 static void kmalloc_oob_right(struct kunit *test) 178 { 179 char *ptr; ··· 454 u64 words[2]; 455 } *ptr1, *ptr2; 456 457 + KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test); 458 + 459 /* This test is specifically crafted for the generic mode. */ 460 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC); 461 ··· 475 struct { 476 u64 words[2]; 477 } *ptr1, *ptr2; 478 + 479 + KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test); 480 481 ptr1 = kmalloc(sizeof(*ptr1), GFP_KERNEL); 482 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1); ··· 498 char *ptr; 499 size_t size = 128 - KASAN_GRANULE_SIZE; 500 501 + KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test); 502 + 503 ptr = kmalloc(size, GFP_KERNEL); 504 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); 505 ··· 510 { 511 char *ptr; 512 size_t size = 128 - KASAN_GRANULE_SIZE; 513 + 514 + KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test); 515 516 ptr = kmalloc(size, GFP_KERNEL); 517 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); ··· 524 char *ptr; 525 size_t size = 128 - KASAN_GRANULE_SIZE; 526 527 + KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test); 528 + 529 ptr = kmalloc(size, GFP_KERNEL); 530 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); 531 ··· 537 char *ptr; 538 size_t size = 128 - KASAN_GRANULE_SIZE; 539 540 + KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test); 541 + 542 ptr = kmalloc(size, GFP_KERNEL); 543 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); 544 ··· 549 { 550 char *ptr; 551 size_t size = 128 - KASAN_GRANULE_SIZE; 552 + 553 + KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test); 554 555 ptr = kmalloc(size, GFP_KERNEL); 556 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); ··· 565 char *ptr; 566 size_t size = 64; 567 size_t invalid_size = -2; 568 + 569 + KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test); 570 571 /* 572 * Hardware tag-based mode doesn't check memmove for negative size. ··· 589 char *ptr; 590 size_t size = 64; 591 size_t invalid_size = size; 592 + 593 + KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test); 594 595 ptr = kmalloc(size, GFP_KERNEL); 596 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); ··· 617 { 618 char *ptr; 619 size_t size = 33; 620 + 621 + KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test); 622 623 /* 624 * Only generic KASAN uses quarantine, which is required to avoid a
+15 -1
mm/kasan/shadow.c
··· 38 } 39 EXPORT_SYMBOL(__kasan_check_write); 40 41 - #ifndef CONFIG_GENERIC_ENTRY 42 /* 43 * CONFIG_GENERIC_ENTRY relies on compiler emitted mem*() calls to not be 44 * instrumented. KASAN enabled toolchains should emit __asan_mem*() functions 45 * for the sites they want to instrument. 46 */ 47 #undef memset 48 void *memset(void *addr, int c, size_t len) ··· 109 return __memcpy(dest, src, len); 110 } 111 EXPORT_SYMBOL(__asan_memcpy); 112 113 void kasan_poison(const void *addr, size_t size, u8 value, bool init) 114 {
··· 38 } 39 EXPORT_SYMBOL(__kasan_check_write); 40 41 + #if !defined(CONFIG_CC_HAS_KASAN_MEMINTRINSIC_PREFIX) && !defined(CONFIG_GENERIC_ENTRY) 42 /* 43 * CONFIG_GENERIC_ENTRY relies on compiler emitted mem*() calls to not be 44 * instrumented. KASAN enabled toolchains should emit __asan_mem*() functions 45 * for the sites they want to instrument. 46 + * 47 + * If we have a compiler that can instrument meminstrinsics, never override 48 + * these, so that non-instrumented files can safely consider them as builtins. 49 */ 50 #undef memset 51 void *memset(void *addr, int c, size_t len) ··· 106 return __memcpy(dest, src, len); 107 } 108 EXPORT_SYMBOL(__asan_memcpy); 109 + 110 + #ifdef CONFIG_KASAN_SW_TAGS 111 + void *__hwasan_memset(void *addr, int c, size_t len) __alias(__asan_memset); 112 + EXPORT_SYMBOL(__hwasan_memset); 113 + #ifdef __HAVE_ARCH_MEMMOVE 114 + void *__hwasan_memmove(void *dest, const void *src, size_t len) __alias(__asan_memmove); 115 + EXPORT_SYMBOL(__hwasan_memmove); 116 + #endif 117 + void *__hwasan_memcpy(void *dest, const void *src, size_t len) __alias(__asan_memcpy); 118 + EXPORT_SYMBOL(__hwasan_memcpy); 119 + #endif 120 121 void kasan_poison(const void *addr, size_t size, u8 value, bool init) 122 {
+4 -4
mm/memory-failure.c
··· 1069 * cache and swap cache(ie. page is freshly swapped in). So it could be 1070 * referenced concurrently by 2 types of PTEs: 1071 * normal PTEs and swap PTEs. We try to handle them consistently by calling 1072 - * try_to_unmap(TTU_IGNORE_HWPOISON) to convert the normal PTEs to swap PTEs, 1073 * and then 1074 * - clear dirty bit to prevent IO 1075 * - remove from LRU ··· 1486 int flags, struct page *hpage) 1487 { 1488 struct folio *folio = page_folio(hpage); 1489 - enum ttu_flags ttu = TTU_IGNORE_MLOCK | TTU_SYNC; 1490 struct address_space *mapping; 1491 LIST_HEAD(tokill); 1492 bool unmap_success; ··· 1516 1517 if (PageSwapCache(p)) { 1518 pr_err("%#lx: keeping poisoned page in swap cache\n", pfn); 1519 - ttu |= TTU_IGNORE_HWPOISON; 1520 } 1521 1522 /* ··· 1531 if (page_mkclean(hpage)) { 1532 SetPageDirty(hpage); 1533 } else { 1534 - ttu |= TTU_IGNORE_HWPOISON; 1535 pr_info("%#lx: corrupted page was clean: dropped without side effects\n", 1536 pfn); 1537 }
··· 1069 * cache and swap cache(ie. page is freshly swapped in). So it could be 1070 * referenced concurrently by 2 types of PTEs: 1071 * normal PTEs and swap PTEs. We try to handle them consistently by calling 1072 + * try_to_unmap(!TTU_HWPOISON) to convert the normal PTEs to swap PTEs, 1073 * and then 1074 * - clear dirty bit to prevent IO 1075 * - remove from LRU ··· 1486 int flags, struct page *hpage) 1487 { 1488 struct folio *folio = page_folio(hpage); 1489 + enum ttu_flags ttu = TTU_IGNORE_MLOCK | TTU_SYNC | TTU_HWPOISON; 1490 struct address_space *mapping; 1491 LIST_HEAD(tokill); 1492 bool unmap_success; ··· 1516 1517 if (PageSwapCache(p)) { 1518 pr_err("%#lx: keeping poisoned page in swap cache\n", pfn); 1519 + ttu &= ~TTU_HWPOISON; 1520 } 1521 1522 /* ··· 1531 if (page_mkclean(hpage)) { 1532 SetPageDirty(hpage); 1533 } else { 1534 + ttu &= ~TTU_HWPOISON; 1535 pr_info("%#lx: corrupted page was clean: dropped without side effects\n", 1536 pfn); 1537 }
+1 -1
mm/mmap.c
··· 973 vma_end = addr; 974 adjust = mid; 975 adj_next = -(vma->vm_end - addr); 976 - err = dup_anon_vma(res, adjust); 977 } else { 978 vma = next; /* case 3 */ 979 vma_start = addr;
··· 973 vma_end = addr; 974 adjust = mid; 975 adj_next = -(vma->vm_end - addr); 976 + err = dup_anon_vma(adjust, prev); 977 } else { 978 vma = next; /* case 3 */ 979 vma_start = addr;
+1 -1
mm/rmap.c
··· 1602 /* Update high watermark before we lower rss */ 1603 update_hiwater_rss(mm); 1604 1605 - if (PageHWPoison(subpage) && !(flags & TTU_IGNORE_HWPOISON)) { 1606 pteval = swp_entry_to_pte(make_hwpoison_entry(subpage)); 1607 if (folio_test_hugetlb(folio)) { 1608 hugetlb_count_sub(folio_nr_pages(folio), mm);
··· 1602 /* Update high watermark before we lower rss */ 1603 update_hiwater_rss(mm); 1604 1605 + if (PageHWPoison(subpage) && (flags & TTU_HWPOISON)) { 1606 pteval = swp_entry_to_pte(make_hwpoison_entry(subpage)); 1607 if (folio_test_hugetlb(folio)) { 1608 hugetlb_count_sub(folio_nr_pages(folio), mm);
+17
scripts/Makefile.kasan
··· 1 # SPDX-License-Identifier: GPL-2.0 2 CFLAGS_KASAN_NOSANITIZE := -fno-builtin 3 KASAN_SHADOW_OFFSET ?= $(CONFIG_KASAN_SHADOW_OFFSET) 4 5 cc-param = $(call cc-option, -mllvm -$(1), $(call cc-option, --param $(1))) ··· 47 48 CFLAGS_KASAN += $(call cc-param,asan-stack=$(stack_enable)) 49 50 endif # CONFIG_KASAN_GENERIC 51 52 ifdef CONFIG_KASAN_SW_TAGS ··· 67 $(call cc-param,hwasan-use-short-granules=0) \ 68 $(call cc-param,hwasan-inline-all-checks=0) \ 69 $(instrumentation_flags) 70 71 endif # CONFIG_KASAN_SW_TAGS 72
··· 1 # SPDX-License-Identifier: GPL-2.0 2 + 3 + ifdef CONFIG_CC_HAS_KASAN_MEMINTRINSIC_PREFIX 4 + # Safe for compiler to generate meminstrinsic calls in uninstrumented files. 5 + CFLAGS_KASAN_NOSANITIZE := 6 + else 7 + # Don't let compiler generate memintrinsic calls in uninstrumented files 8 + # because they are instrumented. 9 CFLAGS_KASAN_NOSANITIZE := -fno-builtin 10 + endif 11 + 12 KASAN_SHADOW_OFFSET ?= $(CONFIG_KASAN_SHADOW_OFFSET) 13 14 cc-param = $(call cc-option, -mllvm -$(1), $(call cc-option, --param $(1))) ··· 38 39 CFLAGS_KASAN += $(call cc-param,asan-stack=$(stack_enable)) 40 41 + # Instrument memcpy/memset/memmove calls by using instrumented __asan_mem*() 42 + # instead. With compilers that don't support this option, compiler-inserted 43 + # memintrinsics won't be checked by KASAN on GENERIC_ENTRY architectures. 44 + CFLAGS_KASAN += $(call cc-param,asan-kernel-mem-intrinsic-prefix=1) 45 + 46 endif # CONFIG_KASAN_GENERIC 47 48 ifdef CONFIG_KASAN_SW_TAGS ··· 53 $(call cc-param,hwasan-use-short-granules=0) \ 54 $(call cc-param,hwasan-inline-all-checks=0) \ 55 $(instrumentation_flags) 56 + 57 + # Instrument memcpy/memset/memmove calls by using instrumented __hwasan_mem*(). 58 + CFLAGS_KASAN += $(call cc-param,hwasan-kernel-mem-intrinsic-prefix=1) 59 60 endif # CONFIG_KASAN_SW_TAGS 61