Merge branch 'akpm' (patches from Andrew)

Merge misc fixes from Andrew Morton:
"16 patches.

Subsystems affected by this patch series: xtensa, sh, ocfs2, scripts,
lib, and mm (memory-failure, kasan, damon, shmem, tools, pagecache,
debug, and pagemap)"

* emailed patches from Andrew Morton <akpm@linux-foundation.org>:
mm: fix uninitialized use in overcommit_policy_handler
mm/memory_failure: fix the missing pte_unmap() call
kasan: always respect CONFIG_KASAN_STACK
sh: pgtable-3level: fix cast to pointer from integer of different size
mm/debug: sync up latest migrate_reason to migrate_reason_names
mm/debug: sync up MR_CONTIG_RANGE and MR_LONGTERM_PIN
mm: fs: invalidate bh_lrus for only cold path
lib/zlib_inflate/inffast: check config in C to avoid unused function warning
tools/vm/page-types: remove dependency on opt_file for idle page tracking
scripts/sorttable: riscv: fix undeclared identifier 'EM_RISCV' error
ocfs2: drop acl cache for directories too
mm/shmem.c: fix judgment error in shmem_is_huge()
xtensa: increase size of gcc stack frame check
mm/damon: don't use strnlen() with known-bogus source length
kasan: fix Kconfig check of CC_HAS_WORKING_NOSANITIZE_ADDRESS
mm, hwpoison: add is_free_buddy_page() in HWPoisonHandlable()

+69 -39
+1 -1
arch/sh/include/asm/pgtable-3level.h
··· 34 35 static inline pmd_t *pud_pgtable(pud_t pud) 36 { 37 - return (pmd_t *)pud_val(pud); 38 } 39 40 /* only used by the stubbed out hugetlb gup code, should never be called */
··· 34 35 static inline pmd_t *pud_pgtable(pud_t pud) 36 { 37 + return (pmd_t *)(unsigned long)pud_val(pud); 38 } 39 40 /* only used by the stubbed out hugetlb gup code, should never be called */
+6 -2
fs/buffer.c
··· 1425 } 1426 EXPORT_SYMBOL_GPL(invalidate_bh_lrus); 1427 1428 - void invalidate_bh_lrus_cpu(int cpu) 1429 { 1430 struct bh_lru *b; 1431 1432 bh_lru_lock(); 1433 - b = per_cpu_ptr(&bh_lrus, cpu); 1434 __invalidate_bh_lrus(b); 1435 bh_lru_unlock(); 1436 }
··· 1425 } 1426 EXPORT_SYMBOL_GPL(invalidate_bh_lrus); 1427 1428 + /* 1429 + * It's called from workqueue context so we need a bh_lru_lock to close 1430 + * the race with preemption/irq. 1431 + */ 1432 + void invalidate_bh_lrus_cpu(void) 1433 { 1434 struct bh_lru *b; 1435 1436 bh_lru_lock(); 1437 + b = this_cpu_ptr(&bh_lrus); 1438 __invalidate_bh_lrus(b); 1439 bh_lru_unlock(); 1440 }
+2 -1
fs/ocfs2/dlmglue.c
··· 3951 oi = OCFS2_I(inode); 3952 oi->ip_dir_lock_gen++; 3953 mlog(0, "generation: %u\n", oi->ip_dir_lock_gen); 3954 - goto out; 3955 } 3956 3957 if (!S_ISREG(inode->i_mode)) ··· 3982 filemap_fdatawait(mapping); 3983 } 3984 3985 forget_all_cached_acls(inode); 3986 3987 out:
··· 3951 oi = OCFS2_I(inode); 3952 oi->ip_dir_lock_gen++; 3953 mlog(0, "generation: %u\n", oi->ip_dir_lock_gen); 3954 + goto out_forget; 3955 } 3956 3957 if (!S_ISREG(inode->i_mode)) ··· 3982 filemap_fdatawait(mapping); 3983 } 3984 3985 + out_forget: 3986 forget_all_cached_acls(inode); 3987 3988 out:
+2 -2
include/linux/buffer_head.h
··· 194 struct buffer_head *__bread_gfp(struct block_device *, 195 sector_t block, unsigned size, gfp_t gfp); 196 void invalidate_bh_lrus(void); 197 - void invalidate_bh_lrus_cpu(int cpu); 198 bool has_bh_in_lru(int cpu, void *dummy); 199 struct buffer_head *alloc_buffer_head(gfp_t gfp_flags); 200 void free_buffer_head(struct buffer_head * bh); ··· 408 static inline void invalidate_inode_buffers(struct inode *inode) {} 409 static inline int remove_inode_buffers(struct inode *inode) { return 1; } 410 static inline int sync_mapping_buffers(struct address_space *mapping) { return 0; } 411 - static inline void invalidate_bh_lrus_cpu(int cpu) {} 412 static inline bool has_bh_in_lru(int cpu, void *dummy) { return false; } 413 #define buffer_heads_over_limit 0 414
··· 194 struct buffer_head *__bread_gfp(struct block_device *, 195 sector_t block, unsigned size, gfp_t gfp); 196 void invalidate_bh_lrus(void); 197 + void invalidate_bh_lrus_cpu(void); 198 bool has_bh_in_lru(int cpu, void *dummy); 199 struct buffer_head *alloc_buffer_head(gfp_t gfp_flags); 200 void free_buffer_head(struct buffer_head * bh); ··· 408 static inline void invalidate_inode_buffers(struct inode *inode) {} 409 static inline int remove_inode_buffers(struct inode *inode) { return 1; } 410 static inline int sync_mapping_buffers(struct address_space *mapping) { return 0; } 411 + static inline void invalidate_bh_lrus_cpu(void) {} 412 static inline bool has_bh_in_lru(int cpu, void *dummy) { return false; } 413 #define buffer_heads_over_limit 0 414
+5 -1
include/linux/migrate.h
··· 19 */ 20 #define MIGRATEPAGE_SUCCESS 0 21 22 enum migrate_reason { 23 MR_COMPACTION, 24 MR_MEMORY_FAILURE, ··· 37 MR_TYPES 38 }; 39 40 - /* In mm/debug.c; also keep sync with include/trace/events/migrate.h */ 41 extern const char *migrate_reason_names[MR_TYPES]; 42 43 #ifdef CONFIG_MIGRATION
··· 19 */ 20 #define MIGRATEPAGE_SUCCESS 0 21 22 + /* 23 + * Keep sync with: 24 + * - macro MIGRATE_REASON in include/trace/events/migrate.h 25 + * - migrate_reason_names[MR_TYPES] in mm/debug.c 26 + */ 27 enum migrate_reason { 28 MR_COMPACTION, 29 MR_MEMORY_FAILURE, ··· 32 MR_TYPES 33 }; 34 35 extern const char *migrate_reason_names[MR_TYPES]; 36 37 #ifdef CONFIG_MIGRATION
+1 -1
lib/Kconfig.debug
··· 346 int "Warn for stack frames larger than" 347 range 0 8192 348 default 2048 if GCC_PLUGIN_LATENT_ENTROPY 349 - default 1536 if (!64BIT && PARISC) 350 default 1024 if (!64BIT && !PARISC) 351 default 2048 if 64BIT 352 help
··· 346 int "Warn for stack frames larger than" 347 range 0 8192 348 default 2048 if GCC_PLUGIN_LATENT_ENTROPY 349 + default 1536 if (!64BIT && (PARISC || XTENSA)) 350 default 1024 if (!64BIT && !PARISC) 351 default 2048 if 64BIT 352 help
+2
lib/Kconfig.kasan
··· 66 config KASAN_GENERIC 67 bool "Generic mode" 68 depends on HAVE_ARCH_KASAN && CC_HAS_KASAN_GENERIC 69 select SLUB_DEBUG if SLUB 70 select CONSTRUCTORS 71 help ··· 87 config KASAN_SW_TAGS 88 bool "Software tag-based mode" 89 depends on HAVE_ARCH_KASAN_SW_TAGS && CC_HAS_KASAN_SW_TAGS 90 select SLUB_DEBUG if SLUB 91 select CONSTRUCTORS 92 help
··· 66 config KASAN_GENERIC 67 bool "Generic mode" 68 depends on HAVE_ARCH_KASAN && CC_HAS_KASAN_GENERIC 69 + depends on CC_HAS_WORKING_NOSANITIZE_ADDRESS 70 select SLUB_DEBUG if SLUB 71 select CONSTRUCTORS 72 help ··· 86 config KASAN_SW_TAGS 87 bool "Software tag-based mode" 88 depends on HAVE_ARCH_KASAN_SW_TAGS && CC_HAS_KASAN_SW_TAGS 89 + depends on CC_HAS_WORKING_NOSANITIZE_ADDRESS 90 select SLUB_DEBUG if SLUB 91 select CONSTRUCTORS 92 help
+6 -7
lib/zlib_inflate/inffast.c
··· 253 254 sfrom = (unsigned short *)(from); 255 loops = len >> 1; 256 - do 257 - #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 258 - *sout++ = *sfrom++; 259 - #else 260 - *sout++ = get_unaligned16(sfrom++); 261 - #endif 262 - while (--loops); 263 out = (unsigned char *)sout; 264 from = (unsigned char *)sfrom; 265 } else { /* dist == 1 or dist == 2 */
··· 253 254 sfrom = (unsigned short *)(from); 255 loops = len >> 1; 256 + do { 257 + if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) 258 + *sout++ = *sfrom++; 259 + else 260 + *sout++ = get_unaligned16(sfrom++); 261 + } while (--loops); 262 out = (unsigned char *)sout; 263 from = (unsigned char *)sfrom; 264 } else { /* dist == 1 or dist == 2 */
+8 -8
mm/damon/dbgfs-test.h
··· 20 ssize_t nr_integers = 0, i; 21 22 question = "123"; 23 - answers = str_to_target_ids(question, strnlen(question, 128), 24 &nr_integers); 25 KUNIT_EXPECT_EQ(test, (ssize_t)1, nr_integers); 26 KUNIT_EXPECT_EQ(test, 123ul, answers[0]); 27 kfree(answers); 28 29 question = "123abc"; 30 - answers = str_to_target_ids(question, strnlen(question, 128), 31 &nr_integers); 32 KUNIT_EXPECT_EQ(test, (ssize_t)1, nr_integers); 33 KUNIT_EXPECT_EQ(test, 123ul, answers[0]); 34 kfree(answers); 35 36 question = "a123"; 37 - answers = str_to_target_ids(question, strnlen(question, 128), 38 &nr_integers); 39 KUNIT_EXPECT_EQ(test, (ssize_t)0, nr_integers); 40 kfree(answers); 41 42 question = "12 35"; 43 - answers = str_to_target_ids(question, strnlen(question, 128), 44 &nr_integers); 45 KUNIT_EXPECT_EQ(test, (ssize_t)2, nr_integers); 46 for (i = 0; i < nr_integers; i++) ··· 48 kfree(answers); 49 50 question = "12 35 46"; 51 - answers = str_to_target_ids(question, strnlen(question, 128), 52 &nr_integers); 53 KUNIT_EXPECT_EQ(test, (ssize_t)3, nr_integers); 54 for (i = 0; i < nr_integers; i++) ··· 56 kfree(answers); 57 58 question = "12 35 abc 46"; 59 - answers = str_to_target_ids(question, strnlen(question, 128), 60 &nr_integers); 61 KUNIT_EXPECT_EQ(test, (ssize_t)2, nr_integers); 62 for (i = 0; i < 2; i++) ··· 64 kfree(answers); 65 66 question = ""; 67 - answers = str_to_target_ids(question, strnlen(question, 128), 68 &nr_integers); 69 KUNIT_EXPECT_EQ(test, (ssize_t)0, nr_integers); 70 kfree(answers); 71 72 question = "\n"; 73 - answers = str_to_target_ids(question, strnlen(question, 128), 74 &nr_integers); 75 KUNIT_EXPECT_EQ(test, (ssize_t)0, nr_integers); 76 kfree(answers);
··· 20 ssize_t nr_integers = 0, i; 21 22 question = "123"; 23 + answers = str_to_target_ids(question, strlen(question), 24 &nr_integers); 25 KUNIT_EXPECT_EQ(test, (ssize_t)1, nr_integers); 26 KUNIT_EXPECT_EQ(test, 123ul, answers[0]); 27 kfree(answers); 28 29 question = "123abc"; 30 + answers = str_to_target_ids(question, strlen(question), 31 &nr_integers); 32 KUNIT_EXPECT_EQ(test, (ssize_t)1, nr_integers); 33 KUNIT_EXPECT_EQ(test, 123ul, answers[0]); 34 kfree(answers); 35 36 question = "a123"; 37 + answers = str_to_target_ids(question, strlen(question), 38 &nr_integers); 39 KUNIT_EXPECT_EQ(test, (ssize_t)0, nr_integers); 40 kfree(answers); 41 42 question = "12 35"; 43 + answers = str_to_target_ids(question, strlen(question), 44 &nr_integers); 45 KUNIT_EXPECT_EQ(test, (ssize_t)2, nr_integers); 46 for (i = 0; i < nr_integers; i++) ··· 48 kfree(answers); 49 50 question = "12 35 46"; 51 + answers = str_to_target_ids(question, strlen(question), 52 &nr_integers); 53 KUNIT_EXPECT_EQ(test, (ssize_t)3, nr_integers); 54 for (i = 0; i < nr_integers; i++) ··· 56 kfree(answers); 57 58 question = "12 35 abc 46"; 59 + answers = str_to_target_ids(question, strlen(question), 60 &nr_integers); 61 KUNIT_EXPECT_EQ(test, (ssize_t)2, nr_integers); 62 for (i = 0; i < 2; i++) ··· 64 kfree(answers); 65 66 question = ""; 67 + answers = str_to_target_ids(question, strlen(question), 68 &nr_integers); 69 KUNIT_EXPECT_EQ(test, (ssize_t)0, nr_integers); 70 kfree(answers); 71 72 question = "\n"; 73 + answers = str_to_target_ids(question, strlen(question), 74 &nr_integers); 75 KUNIT_EXPECT_EQ(test, (ssize_t)0, nr_integers); 76 kfree(answers);
+3 -1
mm/debug.c
··· 24 "syscall_or_cpuset", 25 "mempolicy_mbind", 26 "numa_misplaced", 27 - "cma", 28 }; 29 30 const struct trace_print_flags pageflag_names[] = {
··· 24 "syscall_or_cpuset", 25 "mempolicy_mbind", 26 "numa_misplaced", 27 + "contig_range", 28 + "longterm_pin", 29 + "demotion", 30 }; 31 32 const struct trace_print_flags pageflag_names[] = {
+6 -6
mm/memory-failure.c
··· 306 struct vm_area_struct *vma) 307 { 308 unsigned long address = vma_address(page, vma); 309 pgd_t *pgd; 310 p4d_t *p4d; 311 pud_t *pud; ··· 330 if (pmd_devmap(*pmd)) 331 return PMD_SHIFT; 332 pte = pte_offset_map(pmd, address); 333 - if (!pte_present(*pte)) 334 - return 0; 335 - if (pte_devmap(*pte)) 336 - return PAGE_SHIFT; 337 - return 0; 338 } 339 340 /* ··· 1126 */ 1127 static inline bool HWPoisonHandlable(struct page *page) 1128 { 1129 - return PageLRU(page) || __PageMovable(page); 1130 } 1131 1132 static int __get_hwpoison_page(struct page *page)
··· 306 struct vm_area_struct *vma) 307 { 308 unsigned long address = vma_address(page, vma); 309 + unsigned long ret = 0; 310 pgd_t *pgd; 311 p4d_t *p4d; 312 pud_t *pud; ··· 329 if (pmd_devmap(*pmd)) 330 return PMD_SHIFT; 331 pte = pte_offset_map(pmd, address); 332 + if (pte_present(*pte) && pte_devmap(*pte)) 333 + ret = PAGE_SHIFT; 334 + pte_unmap(pte); 335 + return ret; 336 } 337 338 /* ··· 1126 */ 1127 static inline bool HWPoisonHandlable(struct page *page) 1128 { 1129 + return PageLRU(page) || __PageMovable(page) || is_free_buddy_page(page); 1130 } 1131 1132 static int __get_hwpoison_page(struct page *page)
+2 -2
mm/shmem.c
··· 490 case SHMEM_HUGE_ALWAYS: 491 return true; 492 case SHMEM_HUGE_WITHIN_SIZE: 493 - index = round_up(index, HPAGE_PMD_NR); 494 i_size = round_up(i_size_read(inode), PAGE_SIZE); 495 - if (i_size >= HPAGE_PMD_SIZE && (i_size >> PAGE_SHIFT) >= index) 496 return true; 497 fallthrough; 498 case SHMEM_HUGE_ADVISE:
··· 490 case SHMEM_HUGE_ALWAYS: 491 return true; 492 case SHMEM_HUGE_WITHIN_SIZE: 493 + index = round_up(index + 1, HPAGE_PMD_NR); 494 i_size = round_up(i_size_read(inode), PAGE_SIZE); 495 + if (i_size >> PAGE_SHIFT >= index) 496 return true; 497 fallthrough; 498 case SHMEM_HUGE_ADVISE:
+16 -3
mm/swap.c
··· 620 pagevec_lru_move_fn(pvec, lru_lazyfree_fn); 621 622 activate_page_drain(cpu); 623 - invalidate_bh_lrus_cpu(cpu); 624 } 625 626 /** ··· 702 local_unlock(&lru_pvecs.lock); 703 } 704 705 void lru_add_drain_cpu_zone(struct zone *zone) 706 { 707 local_lock(&lru_pvecs.lock); ··· 730 731 static void lru_add_drain_per_cpu(struct work_struct *dummy) 732 { 733 - lru_add_drain(); 734 } 735 736 /* ··· 871 */ 872 __lru_add_drain_all(true); 873 #else 874 - lru_add_drain(); 875 #endif 876 } 877
··· 620 pagevec_lru_move_fn(pvec, lru_lazyfree_fn); 621 622 activate_page_drain(cpu); 623 } 624 625 /** ··· 703 local_unlock(&lru_pvecs.lock); 704 } 705 706 + /* 707 + * It's called from per-cpu workqueue context in SMP case so 708 + * lru_add_drain_cpu and invalidate_bh_lrus_cpu should run on 709 + * the same cpu. It shouldn't be a problem in !SMP case since 710 + * the core is only one and the locks will disable preemption. 711 + */ 712 + static void lru_add_and_bh_lrus_drain(void) 713 + { 714 + local_lock(&lru_pvecs.lock); 715 + lru_add_drain_cpu(smp_processor_id()); 716 + local_unlock(&lru_pvecs.lock); 717 + invalidate_bh_lrus_cpu(); 718 + } 719 + 720 void lru_add_drain_cpu_zone(struct zone *zone) 721 { 722 local_lock(&lru_pvecs.lock); ··· 717 718 static void lru_add_drain_per_cpu(struct work_struct *dummy) 719 { 720 + lru_add_and_bh_lrus_drain(); 721 } 722 723 /* ··· 858 */ 859 __lru_add_drain_all(true); 860 #else 861 + lru_add_and_bh_lrus_drain(); 862 #endif 863 } 864
+2 -2
mm/util.c
··· 787 size_t *lenp, loff_t *ppos) 788 { 789 struct ctl_table t; 790 - int new_policy; 791 int ret; 792 793 /* ··· 805 t = *table; 806 t.data = &new_policy; 807 ret = proc_dointvec_minmax(&t, write, buffer, lenp, ppos); 808 - if (ret) 809 return ret; 810 811 mm_compute_batch(new_policy);
··· 787 size_t *lenp, loff_t *ppos) 788 { 789 struct ctl_table t; 790 + int new_policy = -1; 791 int ret; 792 793 /* ··· 805 t = *table; 806 t.data = &new_policy; 807 ret = proc_dointvec_minmax(&t, write, buffer, lenp, ppos); 808 + if (ret || new_policy == -1) 809 return ret; 810 811 mm_compute_batch(new_policy);
+2 -1
scripts/Makefile.kasan
··· 33 CFLAGS_KASAN := $(CFLAGS_KASAN_SHADOW) \ 34 $(call cc-param,asan-globals=1) \ 35 $(call cc-param,asan-instrumentation-with-call-threshold=$(call_threshold)) \ 36 - $(call cc-param,asan-stack=$(stack_enable)) \ 37 $(call cc-param,asan-instrument-allocas=1) 38 endif 39 40 endif # CONFIG_KASAN_GENERIC 41
··· 33 CFLAGS_KASAN := $(CFLAGS_KASAN_SHADOW) \ 34 $(call cc-param,asan-globals=1) \ 35 $(call cc-param,asan-instrumentation-with-call-threshold=$(call_threshold)) \ 36 $(call cc-param,asan-instrument-allocas=1) 37 endif 38 + 39 + CFLAGS_KASAN += $(call cc-param,asan-stack=$(stack_enable)) 40 41 endif # CONFIG_KASAN_GENERIC 42
+4
scripts/sorttable.c
··· 54 #define EM_ARCV2 195 55 #endif 56 57 static uint32_t (*r)(const uint32_t *); 58 static uint16_t (*r2)(const uint16_t *); 59 static uint64_t (*r8)(const uint64_t *);
··· 54 #define EM_ARCV2 195 55 #endif 56 57 + #ifndef EM_RISCV 58 + #define EM_RISCV 243 59 + #endif 60 + 61 static uint32_t (*r)(const uint32_t *); 62 static uint16_t (*r2)(const uint16_t *); 63 static uint64_t (*r8)(const uint64_t *);
+1 -1
tools/vm/page-types.c
··· 1331 if (opt_list && opt_list_mapcnt) 1332 kpagecount_fd = checked_open(PROC_KPAGECOUNT, O_RDONLY); 1333 1334 - if (opt_mark_idle && opt_file) 1335 page_idle_fd = checked_open(SYS_KERNEL_MM_PAGE_IDLE, O_RDWR); 1336 1337 if (opt_list && opt_pid)
··· 1331 if (opt_list && opt_list_mapcnt) 1332 kpagecount_fd = checked_open(PROC_KPAGECOUNT, O_RDONLY); 1333 1334 + if (opt_mark_idle) 1335 page_idle_fd = checked_open(SYS_KERNEL_MM_PAGE_IDLE, O_RDWR); 1336 1337 if (opt_list && opt_pid)