Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mmap: change zeroing of maple tree in __vma_adjust()

Only write to the maple tree if we are not inserting or the insert isn't
going to overwrite the area to clear. This avoids spanning writes and
node coealescing when unnecessary.

The change requires a custom search for the linked list addition to find
the correct VMA for the prev link.

Link: https://lkml.kernel.org/r/20220906194824.2110408-19-Liam.Howlett@oracle.com
Signed-off-by: Liam R. Howlett <Liam.Howlett@oracle.com>
Tested-by: Yu Zhao <yuzhao@google.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: David Howells <dhowells@redhat.com>
Cc: Davidlohr Bueso <dave@stgolabs.net>
Cc: "Matthew Wilcox (Oracle)" <willy@infradead.org>
Cc: SeongJae Park <sj@kernel.org>
Cc: Sven Schnelle <svens@linux.ibm.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Will Deacon <will@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>

authored by

Liam R. Howlett and committed by
Andrew Morton
3b0e81a1 524e00b3

+22 -8
+22 -8
mm/mmap.c
··· 567 567 * mm's list and the mm tree. It has already been inserted into the interval tree. 568 568 */ 569 569 static void __insert_vm_struct(struct mm_struct *mm, struct ma_state *mas, 570 - struct vm_area_struct *vma) 570 + struct vm_area_struct *vma, unsigned long location) 571 571 { 572 572 struct vm_area_struct *prev; 573 573 574 - mas_set(mas, vma->vm_start); 574 + mas_set(mas, location); 575 575 prev = mas_prev(mas, 0); 576 576 vma_mas_store(vma, mas); 577 577 __vma_link_list(mm, vma, prev); ··· 601 601 int remove_next = 0; 602 602 MA_STATE(mas, &mm->mm_mt, 0, 0); 603 603 struct vm_area_struct *exporter = NULL, *importer = NULL; 604 + unsigned long ll_prev = vma->vm_start; /* linked list prev. */ 604 605 605 606 if (next && !insert) { 606 607 if (end >= next->vm_end) { ··· 729 728 } 730 729 731 730 if (start != vma->vm_start) { 732 - if (vma->vm_start < start) 731 + if ((vma->vm_start < start) && 732 + (!insert || (insert->vm_end != start))) { 733 733 vma_mas_szero(&mas, vma->vm_start, start); 734 - vma_changed = true; 734 + VM_WARN_ON(insert && insert->vm_start > vma->vm_start); 735 + } else { 736 + vma_changed = true; 737 + } 735 738 vma->vm_start = start; 736 739 } 737 740 if (end != vma->vm_end) { 738 - if (vma->vm_end > end) 739 - vma_mas_szero(&mas, end, vma->vm_end); 740 - vma_changed = true; 741 + if (vma->vm_end > end) { 742 + if (!insert || (insert->vm_start != end)) { 743 + vma_mas_szero(&mas, end, vma->vm_end); 744 + VM_WARN_ON(insert && 745 + insert->vm_end < vma->vm_end); 746 + } else if (insert->vm_start == end) { 747 + ll_prev = vma->vm_end; 748 + } 749 + } else { 750 + vma_changed = true; 751 + } 741 752 vma->vm_end = end; 742 753 if (!next) 743 754 mm->highest_vm_end = vm_end_gap(vma); ··· 796 783 * us to insert it before dropping the locks 797 784 * (it may either follow vma or precede it). 798 785 */ 799 - __insert_vm_struct(mm, &mas, insert); 786 + __insert_vm_struct(mm, &mas, insert, ll_prev); 800 787 } 801 788 802 789 if (anon_vma) { ··· 883 870 if (insert && file) 884 871 uprobe_mmap(insert); 885 872 873 + mas_destroy(&mas); 886 874 validate_mm(mm); 887 875 return 0; 888 876 }