[PATCH] freepgt: remove MM_VM_SIZE(mm)

There's only one usage of MM_VM_SIZE(mm) left, and it's a troublesome macro
because mm doesn't contain the (32-bit emulation?) info needed. But it too is
only needed because we ignore the end from the vma list.

We could make flush_pgtables return that end, or unmap_vmas. Choose the
latter, since it's a natural fit with unmap_mapping_range_vma needing to know
its restart addr. This does make more than minimal change, but if unmap_vmas
had returned the end before, this is how we'd have done it, rather than
storing the break_addr in zap_details.

unmap_vmas used to return count of vmas scanned, but that's just debug which
hasn't been useful in a while; and if we want the map_count 0 on exit check
back, it can easily come from the final remove_vm_struct loop.

Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>

authored by Hugh Dickins and committed by Linus Torvalds ee39b37b e0da382c

+18 -39
-8
include/asm-ia64/processor.h
··· 43 43 #define TASK_SIZE (current->thread.task_size) 44 44 45 45 /* 46 - * MM_VM_SIZE(mm) gives the maximum address (plus 1) which may contain a mapping for 47 - * address-space MM. Note that with 32-bit tasks, this is still DEFAULT_TASK_SIZE, 48 - * because the kernel may have installed helper-mappings above TASK_SIZE. For example, 49 - * for x86 emulation, the LDT and GDT are mapped above TASK_SIZE. 50 - */ 51 - #define MM_VM_SIZE(mm) DEFAULT_TASK_SIZE 52 - 53 - /* 54 46 * This decides where the kernel will search for a free chunk of vm 55 47 * space during mmap's. 56 48 */
-4
include/asm-ppc64/processor.h
··· 542 542 #define TASK_SIZE (test_thread_flag(TIF_32BIT) ? \ 543 543 TASK_SIZE_USER32 : TASK_SIZE_USER64) 544 544 545 - /* We can't actually tell the TASK_SIZE given just the mm, but default 546 - * to the 64-bit case to make sure that enough gets cleaned up. */ 547 - #define MM_VM_SIZE(mm) TASK_SIZE_USER64 548 - 549 545 /* This decides where the kernel will search for a free chunk of vm 550 546 * space during mmap's. 551 547 */
-2
include/asm-s390/processor.h
··· 74 74 75 75 #endif /* __s390x__ */ 76 76 77 - #define MM_VM_SIZE(mm) DEFAULT_TASK_SIZE 78 - 79 77 #define HAVE_ARCH_PICK_MMAP_LAYOUT 80 78 81 79 typedef struct {
+2 -7
include/linux/mm.h
··· 37 37 #include <asm/processor.h> 38 38 #include <asm/atomic.h> 39 39 40 - #ifndef MM_VM_SIZE 41 - #define MM_VM_SIZE(mm) ((TASK_SIZE + PGDIR_SIZE - 1) & PGDIR_MASK) 42 - #endif 43 - 44 40 #define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n)) 45 41 46 42 /* ··· 578 582 pgoff_t first_index; /* Lowest page->index to unmap */ 579 583 pgoff_t last_index; /* Highest page->index to unmap */ 580 584 spinlock_t *i_mmap_lock; /* For unmap_mapping_range: */ 581 - unsigned long break_addr; /* Where unmap_vmas stopped */ 582 585 unsigned long truncate_count; /* Compare vm_truncate_count */ 583 586 }; 584 587 585 - void zap_page_range(struct vm_area_struct *vma, unsigned long address, 588 + unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address, 586 589 unsigned long size, struct zap_details *); 587 - int unmap_vmas(struct mmu_gather **tlbp, struct mm_struct *mm, 590 + unsigned long unmap_vmas(struct mmu_gather **tlb, struct mm_struct *mm, 588 591 struct vm_area_struct *start_vma, unsigned long start_addr, 589 592 unsigned long end_addr, unsigned long *nr_accounted, 590 593 struct zap_details *);
+13 -15
mm/memory.c
··· 645 645 * @nr_accounted: Place number of unmapped pages in vm-accountable vma's here 646 646 * @details: details of nonlinear truncation or shared cache invalidation 647 647 * 648 - * Returns the number of vma's which were covered by the unmapping. 648 + * Returns the end address of the unmapping (restart addr if interrupted). 649 649 * 650 650 * Unmap all pages in the vma list. Called under page_table_lock. 651 651 * ··· 662 662 * ensure that any thus-far unmapped pages are flushed before unmap_vmas() 663 663 * drops the lock and schedules. 664 664 */ 665 - int unmap_vmas(struct mmu_gather **tlbp, struct mm_struct *mm, 665 + unsigned long unmap_vmas(struct mmu_gather **tlbp, struct mm_struct *mm, 666 666 struct vm_area_struct *vma, unsigned long start_addr, 667 667 unsigned long end_addr, unsigned long *nr_accounted, 668 668 struct zap_details *details) ··· 670 670 unsigned long zap_bytes = ZAP_BLOCK_SIZE; 671 671 unsigned long tlb_start = 0; /* For tlb_finish_mmu */ 672 672 int tlb_start_valid = 0; 673 - int ret = 0; 673 + unsigned long start = start_addr; 674 674 spinlock_t *i_mmap_lock = details? details->i_mmap_lock: NULL; 675 675 int fullmm = tlb_is_full_mm(*tlbp); 676 676 677 677 for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next) { 678 - unsigned long start; 679 678 unsigned long end; 680 679 681 680 start = max(vma->vm_start, start_addr); ··· 687 688 if (vma->vm_flags & VM_ACCOUNT) 688 689 *nr_accounted += (end - start) >> PAGE_SHIFT; 689 690 690 - ret++; 691 691 while (start != end) { 692 692 unsigned long block; 693 693 ··· 717 719 if (i_mmap_lock) { 718 720 /* must reset count of rss freed */ 719 721 *tlbp = tlb_gather_mmu(mm, fullmm); 720 - details->break_addr = start; 721 722 goto out; 722 723 } 723 724 spin_unlock(&mm->page_table_lock); ··· 730 733 } 731 734 } 732 735 out: 733 - return ret; 736 + return start; /* which is now the end (or restart) address */ 734 737 } 735 738 736 739 /** ··· 740 743 * @size: number of bytes to zap 741 744 * @details: details of nonlinear truncation or shared cache invalidation 742 745 */ 743 - void zap_page_range(struct vm_area_struct *vma, unsigned long address, 746 + unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address, 744 747 unsigned long size, struct zap_details *details) 745 748 { 746 749 struct mm_struct *mm = vma->vm_mm; ··· 750 753 751 754 if (is_vm_hugetlb_page(vma)) { 752 755 zap_hugepage_range(vma, address, size); 753 - return; 756 + return end; 754 757 } 755 758 756 759 lru_add_drain(); 757 760 spin_lock(&mm->page_table_lock); 758 761 tlb = tlb_gather_mmu(mm, 0); 759 - unmap_vmas(&tlb, mm, vma, address, end, &nr_accounted, details); 762 + end = unmap_vmas(&tlb, mm, vma, address, end, &nr_accounted, details); 760 763 tlb_finish_mmu(tlb, address, end); 761 764 spin_unlock(&mm->page_table_lock); 765 + return end; 762 766 } 763 767 764 768 /* ··· 1346 1348 * i_mmap_lock. 1347 1349 * 1348 1350 * In order to make forward progress despite repeatedly restarting some 1349 - * large vma, note the break_addr set by unmap_vmas when it breaks out: 1351 + * large vma, note the restart_addr from unmap_vmas when it breaks out: 1350 1352 * and restart from that address when we reach that vma again. It might 1351 1353 * have been split or merged, shrunk or extended, but never shifted: so 1352 1354 * restart_addr remains valid so long as it remains in the vma's range. ··· 1384 1386 } 1385 1387 } 1386 1388 1387 - details->break_addr = end_addr; 1388 - zap_page_range(vma, start_addr, end_addr - start_addr, details); 1389 + restart_addr = zap_page_range(vma, start_addr, 1390 + end_addr - start_addr, details); 1389 1391 1390 1392 /* 1391 1393 * We cannot rely on the break test in unmap_vmas: ··· 1396 1398 need_break = need_resched() || 1397 1399 need_lockbreak(details->i_mmap_lock); 1398 1400 1399 - if (details->break_addr >= end_addr) { 1401 + if (restart_addr >= end_addr) { 1400 1402 /* We have now completed this vma: mark it so */ 1401 1403 vma->vm_truncate_count = details->truncate_count; 1402 1404 if (!need_break) 1403 1405 return 0; 1404 1406 } else { 1405 1407 /* Note restart_addr in vma's truncate_count field */ 1406 - vma->vm_truncate_count = details->break_addr; 1408 + vma->vm_truncate_count = restart_addr; 1407 1409 if (!need_break) 1408 1410 goto again; 1409 1411 }
+3 -3
mm/mmap.c
··· 1900 1900 struct mmu_gather *tlb; 1901 1901 struct vm_area_struct *vma = mm->mmap; 1902 1902 unsigned long nr_accounted = 0; 1903 + unsigned long end; 1903 1904 1904 1905 lru_add_drain(); 1905 1906 ··· 1909 1908 flush_cache_mm(mm); 1910 1909 tlb = tlb_gather_mmu(mm, 1); 1911 1910 /* Use -1 here to ensure all VMAs in the mm are unmapped */ 1912 - mm->map_count -= unmap_vmas(&tlb, mm, vma, 0, -1, &nr_accounted, NULL); 1911 + end = unmap_vmas(&tlb, mm, vma, 0, -1, &nr_accounted, NULL); 1913 1912 vm_unacct_memory(nr_accounted); 1914 1913 free_pgtables(&tlb, vma, 0, 0); 1915 - tlb_finish_mmu(tlb, 0, MM_VM_SIZE(mm)); 1914 + tlb_finish_mmu(tlb, 0, end); 1916 1915 1917 1916 mm->mmap = mm->mmap_cache = NULL; 1918 1917 mm->mm_rb = RB_ROOT; ··· 1932 1931 vma = next; 1933 1932 } 1934 1933 1935 - BUG_ON(mm->map_count); /* This is just debugging */ 1936 1934 BUG_ON(mm->nr_ptes); /* This is just debugging */ 1937 1935 } 1938 1936