Merge tag 'mm-hotfixes-stable-2024-01-05-11-35' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm

Pull misc mm fixes from Andrew Morton:
"12 hotfixes.

Two are cc:stable and the remainder either address post-6.7 issues or
aren't considered necessary for earlier kernel versions"

* tag 'mm-hotfixes-stable-2024-01-05-11-35' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm:
mm: shrinker: use kvzalloc_node() from expand_one_shrinker_info()
mailmap: add entries for Mathieu Othacehe
MAINTAINERS: change vmware.com addresses to broadcom.com
arch/mm/fault: fix major fault accounting when retrying under per-VMA lock
mm/mglru: skip special VMAs in lru_gen_look_around()
MAINTAINERS: hand over hwpoison maintainership to Miaohe Lin
MAINTAINERS: remove hugetlb maintainer Mike Kravetz
mm: fix unmap_mapping_range high bits shift bug
mm: memcg: fix split queue list crash when large folio migration
mm: fix arithmetic for max_prop_frac when setting max_ratio
mm: fix arithmetic for bdi min_ratio
mm: align larger anonymous mappings on THP boundaries

+2 -1
.mailmap
··· 377 377 Martin Kepplinger <martink@posteo.de> <martin.kepplinger@puri.sm> 378 378 Martin Kepplinger <martink@posteo.de> <martin.kepplinger@theobroma-systems.com> 379 379 Martyna Szapar-Mudlaw <martyna.szapar-mudlaw@linux.intel.com> <martyna.szapar-mudlaw@intel.com> 380 - Mathieu Othacehe <m.othacehe@gmail.com> 380 + Mathieu Othacehe <m.othacehe@gmail.com> <othacehe@gnu.org> 381 381 Mat Martineau <martineau@kernel.org> <mathew.j.martineau@linux.intel.com> 382 382 Mat Martineau <martineau@kernel.org> <mathewm@codeaurora.org> 383 383 Matthew Wilcox <willy@infradead.org> <matthew.r.wilcox@intel.com> ··· 638 638 Wolfram Sang <wsa@kernel.org> <wsa@the-dreams.de> 639 639 Yakir Yang <kuankuan.y@gmail.com> <ykk@rock-chips.com> 640 640 Yusuke Goda <goda.yusuke@renesas.com> 641 + Zack Rusin <zack.rusin@broadcom.com> <zackr@vmware.com> 641 642 Zhu Yanjun <zyjzyj2000@gmail.com> <yanjunz@nvidia.com>
+4
CREDITS
··· 2130 2130 S: San Jose, CA 95123 2131 2131 S: USA 2132 2132 2133 + N: Mike Kravetz 2134 + E: mike.kravetz@oracle.com 2135 + D: Maintenance and development of the hugetlb subsystem 2136 + 2133 2137 N: Andreas S. Krebs 2134 2138 E: akrebs@altavista.net 2135 2139 D: CYPRESS CY82C693 chipset IDE, Digital's PC-Alpha 164SX boards
+6 -8
MAINTAINERS
··· 6901 6901 F: drivers/gpu/drm/vboxvideo/ 6902 6902 6903 6903 DRM DRIVER FOR VMWARE VIRTUAL GPU 6904 - M: Zack Rusin <zackr@vmware.com> 6905 - R: VMware Graphics Reviewers <linux-graphics-maintainer@vmware.com> 6904 + M: Zack Rusin <zack.rusin@broadcom.com> 6905 + R: Broadcom internal kernel review list <bcm-kernel-feedback-list@broadcom.com> 6906 6906 L: dri-devel@lists.freedesktop.org 6907 6907 S: Supported 6908 6908 T: git git://anongit.freedesktop.org/drm/drm-misc ··· 9767 9767 F: drivers/net/ethernet/huawei/hinic/ 9768 9768 9769 9769 HUGETLB SUBSYSTEM 9770 - M: Mike Kravetz <mike.kravetz@oracle.com> 9771 9770 M: Muchun Song <muchun.song@linux.dev> 9772 9771 L: linux-mm@kvack.org 9773 9772 S: Maintained ··· 9790 9791 F: drivers/media/platform/st/sti/hva 9791 9792 9792 9793 HWPOISON MEMORY FAILURE HANDLING 9793 - M: Naoya Horiguchi <naoya.horiguchi@nec.com> 9794 - R: Miaohe Lin <linmiaohe@huawei.com> 9794 + M: Miaohe Lin <linmiaohe@huawei.com> 9795 + R: Naoya Horiguchi <naoya.horiguchi@nec.com> 9795 9796 L: linux-mm@kvack.org 9796 9797 S: Maintained 9797 9798 F: mm/hwpoison-inject.c ··· 23214 23215 F: include/linux/vmw_vmci* 23215 23216 23216 23217 VMWARE VMMOUSE SUBDRIVER 23217 - M: Zack Rusin <zackr@vmware.com> 23218 - R: VMware Graphics Reviewers <linux-graphics-maintainer@vmware.com> 23219 - R: VMware PV-Drivers Reviewers <pv-drivers@vmware.com> 23218 + M: Zack Rusin <zack.rusin@broadcom.com> 23219 + R: Broadcom internal kernel review list <bcm-kernel-feedback-list@broadcom.com> 23220 23220 L: linux-input@vger.kernel.org 23221 23221 S: Supported 23222 23222 F: drivers/input/mouse/vmmouse.c
+2
arch/arm64/mm/fault.c
··· 607 607 goto done; 608 608 } 609 609 count_vm_vma_lock_event(VMA_LOCK_RETRY); 610 + if (fault & VM_FAULT_MAJOR) 611 + mm_flags |= FAULT_FLAG_TRIED; 610 612 611 613 /* Quick path to respond to signals */ 612 614 if (fault_signal_pending(fault, regs)) {
+2
arch/powerpc/mm/fault.c
··· 497 497 goto done; 498 498 } 499 499 count_vm_vma_lock_event(VMA_LOCK_RETRY); 500 + if (fault & VM_FAULT_MAJOR) 501 + flags |= FAULT_FLAG_TRIED; 500 502 501 503 if (fault_signal_pending(fault, regs)) 502 504 return user_mode(regs) ? 0 : SIGBUS;
+2
arch/riscv/mm/fault.c
··· 304 304 goto done; 305 305 } 306 306 count_vm_vma_lock_event(VMA_LOCK_RETRY); 307 + if (fault & VM_FAULT_MAJOR) 308 + flags |= FAULT_FLAG_TRIED; 307 309 308 310 if (fault_signal_pending(fault, regs)) { 309 311 if (!user_mode(regs))
+3
arch/s390/mm/fault.c
··· 337 337 return; 338 338 } 339 339 count_vm_vma_lock_event(VMA_LOCK_RETRY); 340 + if (fault & VM_FAULT_MAJOR) 341 + flags |= FAULT_FLAG_TRIED; 342 + 340 343 /* Quick path to respond to signals */ 341 344 if (fault_signal_pending(fault, regs)) { 342 345 if (!user_mode(regs))
+2
arch/x86/mm/fault.c
··· 1370 1370 goto done; 1371 1371 } 1372 1372 count_vm_vma_lock_event(VMA_LOCK_RETRY); 1373 + if (fault & VM_FAULT_MAJOR) 1374 + flags |= FAULT_FLAG_TRIED; 1373 1375 1374 1376 /* Quick path to respond to signals */ 1375 1377 if (fault_signal_pending(fault, regs)) {
+1 -1
mm/huge_memory.c
··· 2823 2823 spin_lock_irqsave(&ds_queue->split_queue_lock, flags); 2824 2824 if (!list_empty(&folio->_deferred_list)) { 2825 2825 ds_queue->split_queue_len--; 2826 - list_del(&folio->_deferred_list); 2826 + list_del_init(&folio->_deferred_list); 2827 2827 } 2828 2828 spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags); 2829 2829 }
+11
mm/memcontrol.c
··· 7543 7543 7544 7544 /* Transfer the charge and the css ref */ 7545 7545 commit_charge(new, memcg); 7546 + /* 7547 + * If the old folio is a large folio and is in the split queue, it needs 7548 + * to be removed from the split queue now, in case getting an incorrect 7549 + * split queue in destroy_large_folio() after the memcg of the old folio 7550 + * is cleared. 7551 + * 7552 + * In addition, the old folio is about to be freed after migration, so 7553 + * removing from the split queue a bit earlier seems reasonable. 7554 + */ 7555 + if (folio_test_large(old) && folio_test_large_rmappable(old)) 7556 + folio_undo_large_rmappable(old); 7546 7557 old->memcg_data = 0; 7547 7558 } 7548 7559
+2 -2
mm/memory.c
··· 3624 3624 void unmap_mapping_range(struct address_space *mapping, 3625 3625 loff_t const holebegin, loff_t const holelen, int even_cows) 3626 3626 { 3627 - pgoff_t hba = holebegin >> PAGE_SHIFT; 3628 - pgoff_t hlen = (holelen + PAGE_SIZE - 1) >> PAGE_SHIFT; 3627 + pgoff_t hba = (pgoff_t)(holebegin) >> PAGE_SHIFT; 3628 + pgoff_t hlen = ((pgoff_t)(holelen) + PAGE_SIZE - 1) >> PAGE_SHIFT; 3629 3629 3630 3630 /* Check for overflow. */ 3631 3631 if (sizeof(holelen) > sizeof(hlen)) {
+3
mm/mmap.c
··· 1829 1829 */ 1830 1830 pgoff = 0; 1831 1831 get_area = shmem_get_unmapped_area; 1832 + } else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) { 1833 + /* Ensures that larger anonymous mappings are THP aligned. */ 1834 + get_area = thp_get_unmapped_area; 1832 1835 } 1833 1836 1834 1837 addr = get_area(file, addr, len, pgoff, flags);
+2 -2
mm/page-writeback.c
··· 692 692 693 693 if (min_ratio > 100 * BDI_RATIO_SCALE) 694 694 return -EINVAL; 695 - min_ratio *= BDI_RATIO_SCALE; 696 695 697 696 spin_lock_bh(&bdi_lock); 698 697 if (min_ratio > bdi->max_ratio) { ··· 728 729 ret = -EINVAL; 729 730 } else { 730 731 bdi->max_ratio = max_ratio; 731 - bdi->max_prop_frac = (FPROP_FRAC_BASE * max_ratio) / 100; 732 + bdi->max_prop_frac = (FPROP_FRAC_BASE * max_ratio) / 733 + (100 * BDI_RATIO_SCALE); 732 734 } 733 735 spin_unlock_bh(&bdi_lock); 734 736
+1 -1
mm/shrinker.c
··· 126 126 if (new_nr_max <= old->map_nr_max) 127 127 continue; 128 128 129 - new = kvmalloc_node(sizeof(*new) + new_size, GFP_KERNEL, nid); 129 + new = kvzalloc_node(sizeof(*new) + new_size, GFP_KERNEL, nid); 130 130 if (!new) 131 131 return -ENOMEM; 132 132
+9 -4
mm/vmscan.c
··· 3955 3955 int young = 0; 3956 3956 pte_t *pte = pvmw->pte; 3957 3957 unsigned long addr = pvmw->address; 3958 + struct vm_area_struct *vma = pvmw->vma; 3958 3959 struct folio *folio = pfn_folio(pvmw->pfn); 3959 3960 bool can_swap = !folio_is_file_lru(folio); 3960 3961 struct mem_cgroup *memcg = folio_memcg(folio); ··· 3970 3969 if (spin_is_contended(pvmw->ptl)) 3971 3970 return; 3972 3971 3972 + /* exclude special VMAs containing anon pages from COW */ 3973 + if (vma->vm_flags & VM_SPECIAL) 3974 + return; 3975 + 3973 3976 /* avoid taking the LRU lock under the PTL when possible */ 3974 3977 walk = current->reclaim_state ? current->reclaim_state->mm_walk : NULL; 3975 3978 3976 - start = max(addr & PMD_MASK, pvmw->vma->vm_start); 3977 - end = min(addr | ~PMD_MASK, pvmw->vma->vm_end - 1) + 1; 3979 + start = max(addr & PMD_MASK, vma->vm_start); 3980 + end = min(addr | ~PMD_MASK, vma->vm_end - 1) + 1; 3978 3981 3979 3982 if (end - start > MIN_LRU_BATCH * PAGE_SIZE) { 3980 3983 if (addr - start < MIN_LRU_BATCH * PAGE_SIZE / 2) ··· 4003 3998 unsigned long pfn; 4004 3999 pte_t ptent = ptep_get(pte + i); 4005 4000 4006 - pfn = get_pte_pfn(ptent, pvmw->vma, addr); 4001 + pfn = get_pte_pfn(ptent, vma, addr); 4007 4002 if (pfn == -1) 4008 4003 continue; 4009 4004 ··· 4014 4009 if (!folio) 4015 4010 continue; 4016 4011 4017 - if (!ptep_test_and_clear_young(pvmw->vma, addr, pte + i)) 4012 + if (!ptep_test_and_clear_young(vma, addr, pte + i)) 4018 4013 VM_WARN_ON_ONCE(true); 4019 4014 4020 4015 young++;