Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'mm-hotfixes-stable-2024-03-07-16-17' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm

Pull misc fixes from Andrew Morton:
"6 hotfixes. 4 are cc:stable and the remainder pertain to post-6.7
issues or aren't considered to be needed in earlier kernel versions"

* tag 'mm-hotfixes-stable-2024-03-07-16-17' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm:
scripts/gdb/symbols: fix invalid escape sequence warning
mailmap: fix Kishon's email
init/Kconfig: lower GCC version check for -Warray-bounds
mm, mmap: fix vma_merge() case 7 with vma_ops->close
mm: userfaultfd: fix unexpected change to src_folio when UFFDIO_MOVE fails
mm, vmscan: prevent infinite loop for costly GFP_NOIO | __GFP_RETRY_MAYFAIL allocations

+37 -19
+1
.mailmap
··· 325 325 Kenneth Westfield <quic_kwestfie@quicinc.com> <kwestfie@codeaurora.org> 326 326 Kiran Gunda <quic_kgunda@quicinc.com> <kgunda@codeaurora.org> 327 327 Kirill Tkhai <tkhai@ya.ru> <ktkhai@virtuozzo.com> 328 + Kishon Vijay Abraham I <kishon@kernel.org> <kishon@ti.com> 328 329 Konstantin Khlebnikov <koct9i@gmail.com> <khlebnikov@yandex-team.ru> 329 330 Konstantin Khlebnikov <koct9i@gmail.com> <k.khlebnikov@samsung.com> 330 331 Koushik <raghavendra.koushik@neterion.com>
+9
include/linux/gfp.h
··· 353 353 return (gfp & (__GFP_IO | __GFP_FS)) == (__GFP_IO | __GFP_FS); 354 354 } 355 355 356 + /* 357 + * Check if the gfp flags allow compaction - GFP_NOIO is a really 358 + * tricky context because the migration might require IO. 359 + */ 360 + static inline bool gfp_compaction_allowed(gfp_t gfp_mask) 361 + { 362 + return IS_ENABLED(CONFIG_COMPACTION) && (gfp_mask & __GFP_IO); 363 + } 364 + 356 365 extern gfp_t vma_thp_gfp_mask(struct vm_area_struct *vma); 357 366 358 367 #ifdef CONFIG_CONTIG_ALLOC
+3 -3
init/Kconfig
··· 876 876 default "-Wimplicit-fallthrough=5" if CC_IS_GCC && $(cc-option,-Wimplicit-fallthrough=5) 877 877 default "-Wimplicit-fallthrough" if CC_IS_CLANG && $(cc-option,-Wunreachable-code-fallthrough) 878 878 879 - # Currently, disable gcc-11+ array-bounds globally. 879 + # Currently, disable gcc-10+ array-bounds globally. 880 880 # It's still broken in gcc-13, so no upper bound yet. 881 - config GCC11_NO_ARRAY_BOUNDS 881 + config GCC10_NO_ARRAY_BOUNDS 882 882 def_bool y 883 883 884 884 config CC_NO_ARRAY_BOUNDS 885 885 bool 886 - default y if CC_IS_GCC && GCC_VERSION >= 110000 && GCC11_NO_ARRAY_BOUNDS 886 + default y if CC_IS_GCC && GCC_VERSION >= 100000 && GCC10_NO_ARRAY_BOUNDS 887 887 888 888 # Currently, disable -Wstringop-overflow for GCC globally. 889 889 config GCC_NO_STRINGOP_OVERFLOW
+1 -6
mm/compaction.c
··· 2723 2723 unsigned int alloc_flags, const struct alloc_context *ac, 2724 2724 enum compact_priority prio, struct page **capture) 2725 2725 { 2726 - int may_perform_io = (__force int)(gfp_mask & __GFP_IO); 2727 2726 struct zoneref *z; 2728 2727 struct zone *zone; 2729 2728 enum compact_result rc = COMPACT_SKIPPED; 2730 2729 2731 - /* 2732 - * Check if the GFP flags allow compaction - GFP_NOIO is really 2733 - * tricky context because the migration might require IO 2734 - */ 2735 - if (!may_perform_io) 2730 + if (!gfp_compaction_allowed(gfp_mask)) 2736 2731 return COMPACT_SKIPPED; 2737 2732 2738 2733 trace_mm_compaction_try_to_compact_pages(order, gfp_mask, prio);
+9 -1
mm/mmap.c
··· 954 954 } else if (merge_prev) { /* case 2 */ 955 955 if (curr) { 956 956 vma_start_write(curr); 957 - err = dup_anon_vma(prev, curr, &anon_dup); 958 957 if (end == curr->vm_end) { /* case 7 */ 958 + /* 959 + * can_vma_merge_after() assumed we would not be 960 + * removing prev vma, so it skipped the check 961 + * for vm_ops->close, but we are removing curr 962 + */ 963 + if (curr->vm_ops && curr->vm_ops->close) 964 + err = -EINVAL; 959 965 remove = curr; 960 966 } else { /* case 5 */ 961 967 adjust = curr; 962 968 adj_start = (end - curr->vm_start); 963 969 } 970 + if (!err) 971 + err = dup_anon_vma(prev, curr, &anon_dup); 964 972 } 965 973 } else { /* merge_next */ 966 974 vma_start_write(next);
+6 -4
mm/page_alloc.c
··· 4041 4041 struct alloc_context *ac) 4042 4042 { 4043 4043 bool can_direct_reclaim = gfp_mask & __GFP_DIRECT_RECLAIM; 4044 + bool can_compact = gfp_compaction_allowed(gfp_mask); 4044 4045 const bool costly_order = order > PAGE_ALLOC_COSTLY_ORDER; 4045 4046 struct page *page = NULL; 4046 4047 unsigned int alloc_flags; ··· 4112 4111 * Don't try this for allocations that are allowed to ignore 4113 4112 * watermarks, as the ALLOC_NO_WATERMARKS attempt didn't yet happen. 4114 4113 */ 4115 - if (can_direct_reclaim && 4114 + if (can_direct_reclaim && can_compact && 4116 4115 (costly_order || 4117 4116 (order > 0 && ac->migratetype != MIGRATE_MOVABLE)) 4118 4117 && !gfp_pfmemalloc_allowed(gfp_mask)) { ··· 4210 4209 4211 4210 /* 4212 4211 * Do not retry costly high order allocations unless they are 4213 - * __GFP_RETRY_MAYFAIL 4212 + * __GFP_RETRY_MAYFAIL and we can compact 4214 4213 */ 4215 - if (costly_order && !(gfp_mask & __GFP_RETRY_MAYFAIL)) 4214 + if (costly_order && (!can_compact || 4215 + !(gfp_mask & __GFP_RETRY_MAYFAIL))) 4216 4216 goto nopage; 4217 4217 4218 4218 if (should_reclaim_retry(gfp_mask, order, ac, alloc_flags, ··· 4226 4224 * implementation of the compaction depends on the sufficient amount 4227 4225 * of free memory (see __compaction_suitable) 4228 4226 */ 4229 - if (did_some_progress > 0 && 4227 + if (did_some_progress > 0 && can_compact && 4230 4228 should_compact_retry(ac, order, alloc_flags, 4231 4229 compact_result, &compact_priority, 4232 4230 &compaction_retries))
+3 -3
mm/userfaultfd.c
··· 914 914 goto out; 915 915 } 916 916 917 - folio_move_anon_rmap(src_folio, dst_vma); 918 - WRITE_ONCE(src_folio->index, linear_page_index(dst_vma, dst_addr)); 919 - 920 917 orig_src_pte = ptep_clear_flush(src_vma, src_addr, src_pte); 921 918 /* Folio got pinned from under us. Put it back and fail the move. */ 922 919 if (folio_maybe_dma_pinned(src_folio)) { ··· 921 924 err = -EBUSY; 922 925 goto out; 923 926 } 927 + 928 + folio_move_anon_rmap(src_folio, dst_vma); 929 + WRITE_ONCE(src_folio->index, linear_page_index(dst_vma, dst_addr)); 924 930 925 931 orig_dst_pte = mk_pte(&src_folio->page, dst_vma->vm_page_prot); 926 932 /* Follow mremap() behavior and treat the entry dirty after the move */
+4 -1
mm/vmscan.c
··· 5753 5753 /* Use reclaim/compaction for costly allocs or under memory pressure */ 5754 5754 static bool in_reclaim_compaction(struct scan_control *sc) 5755 5755 { 5756 - if (IS_ENABLED(CONFIG_COMPACTION) && sc->order && 5756 + if (gfp_compaction_allowed(sc->gfp_mask) && sc->order && 5757 5757 (sc->order > PAGE_ALLOC_COSTLY_ORDER || 5758 5758 sc->priority < DEF_PRIORITY - 2)) 5759 5759 return true; ··· 5997 5997 static inline bool compaction_ready(struct zone *zone, struct scan_control *sc) 5998 5998 { 5999 5999 unsigned long watermark; 6000 + 6001 + if (!gfp_compaction_allowed(sc->gfp_mask)) 6002 + return false; 6000 6003 6001 6004 /* Allocation can already succeed, nothing to do */ 6002 6005 if (zone_watermark_ok(zone, sc->order, min_wmark_pages(zone),
+1 -1
scripts/gdb/linux/symbols.py
··· 82 82 self.module_files_updated = True 83 83 84 84 def _get_module_file(self, module_name): 85 - module_pattern = ".*/{0}\.ko(?:.debug)?$".format( 85 + module_pattern = r".*/{0}\.ko(?:.debug)?$".format( 86 86 module_name.replace("_", r"[_\-]")) 87 87 for name in self.module_files: 88 88 if re.match(module_pattern, name) and os.path.exists(name):