Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mm: kill mm_wr_locked from unmap_vmas() and unmap_single_vma()

Kill mm_wr_locked since commit f8e97613fed2 ("mm: convert VM_PFNMAP
tracking to pfnmap_track() + pfnmap_untrack()") remove the user.

Link: https://lkml.kernel.org/r/20251104085709.2688433-1-wangkefeng.wang@huawei.com
Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Reviewed-by: Liam R. Howlett <Liam.Howlett@oracle.com>
Acked-by: David Hildenbrand (Red Hat) <david@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>

authored by

Kefeng Wang and committed by
Andrew Morton
340b5981 3b12a53b

+9 -15
+1 -1
include/linux/mm.h
··· 2480 2480 } 2481 2481 void unmap_vmas(struct mmu_gather *tlb, struct ma_state *mas, 2482 2482 struct vm_area_struct *start_vma, unsigned long start, 2483 - unsigned long end, unsigned long tree_end, bool mm_wr_locked); 2483 + unsigned long end, unsigned long tree_end); 2484 2484 2485 2485 struct mmu_notifier_range; 2486 2486
+4 -8
mm/memory.c
··· 2023 2023 2024 2024 static void unmap_single_vma(struct mmu_gather *tlb, 2025 2025 struct vm_area_struct *vma, unsigned long start_addr, 2026 - unsigned long end_addr, 2027 - struct zap_details *details, bool mm_wr_locked) 2026 + unsigned long end_addr, struct zap_details *details) 2028 2027 { 2029 2028 unsigned long start = max(vma->vm_start, start_addr); 2030 2029 unsigned long end; ··· 2069 2070 * @start_addr: virtual address at which to start unmapping 2070 2071 * @end_addr: virtual address at which to end unmapping 2071 2072 * @tree_end: The maximum index to check 2072 - * @mm_wr_locked: lock flag 2073 2073 * 2074 2074 * Unmap all pages in the vma list. 2075 2075 * ··· 2083 2085 */ 2084 2086 void unmap_vmas(struct mmu_gather *tlb, struct ma_state *mas, 2085 2087 struct vm_area_struct *vma, unsigned long start_addr, 2086 - unsigned long end_addr, unsigned long tree_end, 2087 - bool mm_wr_locked) 2088 + unsigned long end_addr, unsigned long tree_end) 2088 2089 { 2089 2090 struct mmu_notifier_range range; 2090 2091 struct zap_details details = { ··· 2099 2102 unsigned long start = start_addr; 2100 2103 unsigned long end = end_addr; 2101 2104 hugetlb_zap_begin(vma, &start, &end); 2102 - unmap_single_vma(tlb, vma, start, end, &details, 2103 - mm_wr_locked); 2105 + unmap_single_vma(tlb, vma, start, end, &details); 2104 2106 hugetlb_zap_end(vma, &details); 2105 2107 vma = mas_find(mas, tree_end - 1); 2106 2108 } while (vma && likely(!xa_is_zero(vma))); ··· 2135 2139 * unmap 'address-end' not 'range.start-range.end' as range 2136 2140 * could have been expanded for hugetlb pmd sharing. 2137 2141 */ 2138 - unmap_single_vma(tlb, vma, address, end, details, false); 2142 + unmap_single_vma(tlb, vma, address, end, details); 2139 2143 mmu_notifier_invalidate_range_end(&range); 2140 2144 if (is_vm_hugetlb_page(vma)) { 2141 2145 /*
+1 -1
mm/mmap.c
··· 1274 1274 tlb_gather_mmu_fullmm(&tlb, mm); 1275 1275 /* update_hiwater_rss(mm) here? but nobody should be looking */ 1276 1276 /* Use ULONG_MAX here to ensure all VMAs in the mm are unmapped */ 1277 - unmap_vmas(&tlb, &vmi.mas, vma, 0, ULONG_MAX, ULONG_MAX, false); 1277 + unmap_vmas(&tlb, &vmi.mas, vma, 0, ULONG_MAX, ULONG_MAX); 1278 1278 mmap_read_unlock(mm); 1279 1279 1280 1280 /*
+2 -3
mm/vma.c
··· 483 483 484 484 tlb_gather_mmu(&tlb, mm); 485 485 update_hiwater_rss(mm); 486 - unmap_vmas(&tlb, mas, vma, vma->vm_start, vma->vm_end, vma->vm_end, 487 - /* mm_wr_locked = */ true); 486 + unmap_vmas(&tlb, mas, vma, vma->vm_start, vma->vm_end, vma->vm_end); 488 487 mas_set(mas, vma->vm_end); 489 488 free_pgtables(&tlb, mas, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS, 490 489 next ? next->vm_start : USER_PGTABLES_CEILING, ··· 1227 1228 tlb_gather_mmu(&tlb, vms->vma->vm_mm); 1228 1229 update_hiwater_rss(vms->vma->vm_mm); 1229 1230 unmap_vmas(&tlb, mas_detach, vms->vma, vms->start, vms->end, 1230 - vms->vma_count, mm_wr_locked); 1231 + vms->vma_count); 1231 1232 1232 1233 mas_set(mas_detach, 1); 1233 1234 /* start and end may be different if there is no prev or next vma. */
+1 -2
tools/testing/vma/vma_internal.h
··· 848 848 849 849 static inline void unmap_vmas(struct mmu_gather *tlb, struct ma_state *mas, 850 850 struct vm_area_struct *vma, unsigned long start_addr, 851 - unsigned long end_addr, unsigned long tree_end, 852 - bool mm_wr_locked) 851 + unsigned long end_addr, unsigned long tree_end) 853 852 { 854 853 } 855 854