Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mm/rmap: use rmap_walk() in try_to_munlock()

Now, we have an infrastructure in rmap_walk() to handle difference from
variants of rmap traversing functions.

So, just use it in try_to_munlock().

In this patch, I change following things.

1. remove some variants of rmap traversing functions.
cf> try_to_unmap_ksm, try_to_unmap_anon, try_to_unmap_file
2. mechanical change to use rmap_walk() in try_to_munlock().
3. copy and paste comments.

Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Reviewed-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Hugh Dickins <hughd@google.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Hillf Danton <dhillf@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Joonsoo Kim and committed by
Linus Torvalds
e8351ac9 52629506

+42 -168
-6
include/linux/ksm.h
··· 75 75 76 76 int page_referenced_ksm(struct page *page, 77 77 struct mem_cgroup *memcg, unsigned long *vm_flags); 78 - int try_to_unmap_ksm(struct page *page, enum ttu_flags flags); 79 78 int rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc); 80 79 void ksm_migrate_page(struct page *newpage, struct page *oldpage); 81 80 ··· 109 110 110 111 static inline int page_referenced_ksm(struct page *page, 111 112 struct mem_cgroup *memcg, unsigned long *vm_flags) 112 - { 113 - return 0; 114 - } 115 - 116 - static inline int try_to_unmap_ksm(struct page *page, enum ttu_flags flags) 117 113 { 118 114 return 0; 119 115 }
-50
mm/ksm.c
··· 1946 1946 return referenced; 1947 1947 } 1948 1948 1949 - int try_to_unmap_ksm(struct page *page, enum ttu_flags flags) 1950 - { 1951 - struct stable_node *stable_node; 1952 - struct rmap_item *rmap_item; 1953 - int ret = SWAP_AGAIN; 1954 - int search_new_forks = 0; 1955 - 1956 - VM_BUG_ON(!PageKsm(page)); 1957 - VM_BUG_ON(!PageLocked(page)); 1958 - 1959 - stable_node = page_stable_node(page); 1960 - if (!stable_node) 1961 - return SWAP_FAIL; 1962 - again: 1963 - hlist_for_each_entry(rmap_item, &stable_node->hlist, hlist) { 1964 - struct anon_vma *anon_vma = rmap_item->anon_vma; 1965 - struct anon_vma_chain *vmac; 1966 - struct vm_area_struct *vma; 1967 - 1968 - anon_vma_lock_read(anon_vma); 1969 - anon_vma_interval_tree_foreach(vmac, &anon_vma->rb_root, 1970 - 0, ULONG_MAX) { 1971 - vma = vmac->vma; 1972 - if (rmap_item->address < vma->vm_start || 1973 - rmap_item->address >= vma->vm_end) 1974 - continue; 1975 - /* 1976 - * Initially we examine only the vma which covers this 1977 - * rmap_item; but later, if there is still work to do, 1978 - * we examine covering vmas in other mms: in case they 1979 - * were forked from the original since ksmd passed. 1980 - */ 1981 - if ((rmap_item->mm == vma->vm_mm) == search_new_forks) 1982 - continue; 1983 - 1984 - ret = try_to_unmap_one(page, vma, 1985 - rmap_item->address, (void *)flags); 1986 - if (ret != SWAP_AGAIN || !page_mapped(page)) { 1987 - anon_vma_unlock_read(anon_vma); 1988 - goto out; 1989 - } 1990 - } 1991 - anon_vma_unlock_read(anon_vma); 1992 - } 1993 - if (!search_new_forks++) 1994 - goto again; 1995 - out: 1996 - return ret; 1997 - } 1998 - 1999 1949 int rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc) 2000 1950 { 2001 1951 struct stable_node *stable_node;
+42 -112
mm/rmap.c
··· 1177 1177 } 1178 1178 1179 1179 /* 1180 - * Subfunctions of try_to_unmap: try_to_unmap_one called 1181 - * repeatedly from try_to_unmap_ksm, try_to_unmap_anon or try_to_unmap_file. 1182 - * 1183 1180 * @arg: enum ttu_flags will be passed to this argument 1184 1181 */ 1185 1182 int try_to_unmap_one(struct page *page, struct vm_area_struct *vma, ··· 1518 1521 return is_vma_temporary_stack(vma); 1519 1522 } 1520 1523 1521 - /** 1522 - * try_to_unmap_anon - unmap or unlock anonymous page using the object-based 1523 - * rmap method 1524 - * @page: the page to unmap/unlock 1525 - * @flags: action and flags 1526 - * 1527 - * Find all the mappings of a page using the mapping pointer and the vma chains 1528 - * contained in the anon_vma struct it points to. 1529 - * 1530 - * This function is only called from try_to_unmap/try_to_munlock for 1531 - * anonymous pages. 1532 - * When called from try_to_munlock(), the mmap_sem of the mm containing the vma 1533 - * where the page was found will be held for write. So, we won't recheck 1534 - * vm_flags for that VMA. That should be OK, because that vma shouldn't be 1535 - * 'LOCKED. 1536 - */ 1537 - static int try_to_unmap_anon(struct page *page, enum ttu_flags flags) 1538 - { 1539 - struct anon_vma *anon_vma; 1540 - pgoff_t pgoff; 1541 - struct anon_vma_chain *avc; 1542 - int ret = SWAP_AGAIN; 1543 - 1544 - anon_vma = page_lock_anon_vma_read(page); 1545 - if (!anon_vma) 1546 - return ret; 1547 - 1548 - pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 1549 - anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff, pgoff) { 1550 - struct vm_area_struct *vma = avc->vma; 1551 - unsigned long address; 1552 - 1553 - /* 1554 - * During exec, a temporary VMA is setup and later moved. 1555 - * The VMA is moved under the anon_vma lock but not the 1556 - * page tables leading to a race where migration cannot 1557 - * find the migration ptes. Rather than increasing the 1558 - * locking requirements of exec(), migration skips 1559 - * temporary VMAs until after exec() completes. 1560 - */ 1561 - if (IS_ENABLED(CONFIG_MIGRATION) && (flags & TTU_MIGRATION) && 1562 - is_vma_temporary_stack(vma)) 1563 - continue; 1564 - 1565 - address = vma_address(page, vma); 1566 - ret = try_to_unmap_one(page, vma, address, (void *)flags); 1567 - if (ret != SWAP_AGAIN || !page_mapped(page)) 1568 - break; 1569 - } 1570 - 1571 - page_unlock_anon_vma_read(anon_vma); 1572 - return ret; 1573 - } 1574 - 1575 - /** 1576 - * try_to_unmap_file - unmap/unlock file page using the object-based rmap method 1577 - * @page: the page to unmap/unlock 1578 - * @flags: action and flags 1579 - * 1580 - * Find all the mappings of a page using the mapping pointer and the vma chains 1581 - * contained in the address_space struct it points to. 1582 - * 1583 - * This function is only called from try_to_unmap/try_to_munlock for 1584 - * object-based pages. 1585 - * When called from try_to_munlock(), the mmap_sem of the mm containing the vma 1586 - * where the page was found will be held for write. So, we won't recheck 1587 - * vm_flags for that VMA. That should be OK, because that vma shouldn't be 1588 - * 'LOCKED. 1589 - */ 1590 - static int try_to_unmap_file(struct page *page, enum ttu_flags flags) 1591 - { 1592 - struct address_space *mapping = page->mapping; 1593 - pgoff_t pgoff = page->index << compound_order(page); 1594 - struct vm_area_struct *vma; 1595 - int ret = SWAP_AGAIN; 1596 - 1597 - mutex_lock(&mapping->i_mmap_mutex); 1598 - vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) { 1599 - unsigned long address = vma_address(page, vma); 1600 - ret = try_to_unmap_one(page, vma, address, (void *)flags); 1601 - if (ret != SWAP_AGAIN || !page_mapped(page)) 1602 - goto out; 1603 - } 1604 - 1605 - if (list_empty(&mapping->i_mmap_nonlinear)) 1606 - goto out; 1607 - 1608 - /* 1609 - * We don't bother to try to find the munlocked page in nonlinears. 1610 - * It's costly. Instead, later, page reclaim logic may call 1611 - * try_to_unmap(TTU_MUNLOCK) and recover PG_mlocked lazily. 1612 - */ 1613 - if (TTU_ACTION(flags) == TTU_MUNLOCK) 1614 - goto out; 1615 - 1616 - ret = try_to_unmap_nonlinear(page, mapping, vma); 1617 - out: 1618 - mutex_unlock(&mapping->i_mmap_mutex); 1619 - return ret; 1620 - } 1621 - 1622 1524 static int page_not_mapped(struct page *page) 1623 1525 { 1624 1526 return !page_mapped(page); ··· 1585 1689 */ 1586 1690 int try_to_munlock(struct page *page) 1587 1691 { 1692 + int ret; 1693 + struct rmap_walk_control rwc = { 1694 + .rmap_one = try_to_unmap_one, 1695 + .arg = (void *)TTU_MUNLOCK, 1696 + .done = page_not_mapped, 1697 + /* 1698 + * We don't bother to try to find the munlocked page in 1699 + * nonlinears. It's costly. Instead, later, page reclaim logic 1700 + * may call try_to_unmap() and recover PG_mlocked lazily. 1701 + */ 1702 + .file_nonlinear = NULL, 1703 + .anon_lock = page_lock_anon_vma_read, 1704 + 1705 + }; 1706 + 1588 1707 VM_BUG_ON(!PageLocked(page) || PageLRU(page)); 1589 1708 1590 - if (unlikely(PageKsm(page))) 1591 - return try_to_unmap_ksm(page, TTU_MUNLOCK); 1592 - else if (PageAnon(page)) 1593 - return try_to_unmap_anon(page, TTU_MUNLOCK); 1594 - else 1595 - return try_to_unmap_file(page, TTU_MUNLOCK); 1709 + ret = rmap_walk(page, &rwc); 1710 + return ret; 1596 1711 } 1597 1712 1598 1713 void __put_anon_vma(struct anon_vma *anon_vma) ··· 1639 1732 } 1640 1733 1641 1734 /* 1642 - * rmap_walk() and its helpers rmap_walk_anon() and rmap_walk_file(): 1643 - * Called by migrate.c to remove migration ptes, but might be used more later. 1735 + * rmap_walk_anon - do something to anonymous page using the object-based 1736 + * rmap method 1737 + * @page: the page to be handled 1738 + * @rwc: control variable according to each walk type 1739 + * 1740 + * Find all the mappings of a page using the mapping pointer and the vma chains 1741 + * contained in the anon_vma struct it points to. 1742 + * 1743 + * When called from try_to_munlock(), the mmap_sem of the mm containing the vma 1744 + * where the page was found will be held for write. So, we won't recheck 1745 + * vm_flags for that VMA. That should be OK, because that vma shouldn't be 1746 + * LOCKED. 1644 1747 */ 1645 1748 static int rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc) 1646 1749 { ··· 1680 1763 return ret; 1681 1764 } 1682 1765 1766 + /* 1767 + * rmap_walk_file - do something to file page using the object-based rmap method 1768 + * @page: the page to be handled 1769 + * @rwc: control variable according to each walk type 1770 + * 1771 + * Find all the mappings of a page using the mapping pointer and the vma chains 1772 + * contained in the address_space struct it points to. 1773 + * 1774 + * When called from try_to_munlock(), the mmap_sem of the mm containing the vma 1775 + * where the page was found will be held for write. So, we won't recheck 1776 + * vm_flags for that VMA. That should be OK, because that vma shouldn't be 1777 + * LOCKED. 1778 + */ 1683 1779 static int rmap_walk_file(struct page *page, struct rmap_walk_control *rwc) 1684 1780 { 1685 1781 struct address_space *mapping = page->mapping;