Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mm, thp: remove infrastructure for handling splitting PMDs

With new refcounting we don't need to mark PMDs splitting. Let's drop
code to handle this.

Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Tested-by: Sasha Levin <sasha.levin@oracle.com>
Tested-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Acked-by: Jerome Marchand <jmarchan@redhat.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Rik van Riel <riel@redhat.com>
Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Cc: Steve Capper <steve.capper@linaro.org>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Michal Hocko <mhocko@suse.cz>
Cc: Christoph Lameter <cl@linux.com>
Cc: David Rientjes <rientjes@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Kirill A. Shutemov and committed by
Linus Torvalds
4b471e88 1f19617d

+41 -183
-40
Documentation/features/vm/pmdp_splitting_flush/arch-support.txt
··· 1 - # 2 - # Feature name: pmdp_splitting_flush 3 - # Kconfig: __HAVE_ARCH_PMDP_SPLITTING_FLUSH 4 - # description: arch supports the pmdp_splitting_flush() VM API 5 - # 6 - ----------------------- 7 - | arch |status| 8 - ----------------------- 9 - | alpha: | TODO | 10 - | arc: | TODO | 11 - | arm: | ok | 12 - | arm64: | ok | 13 - | avr32: | TODO | 14 - | blackfin: | TODO | 15 - | c6x: | TODO | 16 - | cris: | TODO | 17 - | frv: | TODO | 18 - | h8300: | TODO | 19 - | hexagon: | TODO | 20 - | ia64: | TODO | 21 - | m32r: | TODO | 22 - | m68k: | TODO | 23 - | metag: | TODO | 24 - | microblaze: | TODO | 25 - | mips: | ok | 26 - | mn10300: | TODO | 27 - | nios2: | TODO | 28 - | openrisc: | TODO | 29 - | parisc: | TODO | 30 - | powerpc: | ok | 31 - | s390: | ok | 32 - | score: | TODO | 33 - | sh: | TODO | 34 - | sparc: | TODO | 35 - | tile: | TODO | 36 - | um: | TODO | 37 - | unicore32: | TODO | 38 - | x86: | ok | 39 - | xtensa: | TODO | 40 - -----------------------
+4 -4
fs/proc/task_mmu.c
··· 602 602 pte_t *pte; 603 603 spinlock_t *ptl; 604 604 605 - if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) { 605 + if (pmd_trans_huge_lock(pmd, vma, &ptl)) { 606 606 smaps_pmd_entry(pmd, addr, walk); 607 607 spin_unlock(ptl); 608 608 return 0; ··· 913 913 spinlock_t *ptl; 914 914 struct page *page; 915 915 916 - if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) { 916 + if (pmd_trans_huge_lock(pmd, vma, &ptl)) { 917 917 if (cp->type == CLEAR_REFS_SOFT_DIRTY) { 918 918 clear_soft_dirty_pmd(vma, addr, pmd); 919 919 goto out; ··· 1187 1187 int err = 0; 1188 1188 1189 1189 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 1190 - if (pmd_trans_huge_lock(pmdp, vma, &ptl) == 1) { 1190 + if (pmd_trans_huge_lock(pmdp, vma, &ptl)) { 1191 1191 u64 flags = 0, frame = 0; 1192 1192 pmd_t pmd = *pmdp; 1193 1193 ··· 1519 1519 pte_t *orig_pte; 1520 1520 pte_t *pte; 1521 1521 1522 - if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) { 1522 + if (pmd_trans_huge_lock(pmd, vma, &ptl)) { 1523 1523 pte_t huge_pte = *(pte_t *)pmd; 1524 1524 struct page *page; 1525 1525
-9
include/asm-generic/pgtable.h
··· 207 207 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 208 208 #endif 209 209 210 - #ifndef __HAVE_ARCH_PMDP_SPLITTING_FLUSH 211 - extern void pmdp_splitting_flush(struct vm_area_struct *vma, 212 - unsigned long address, pmd_t *pmdp); 213 - #endif 214 - 215 210 #ifndef pmdp_collapse_flush 216 211 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 217 212 extern pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, ··· 619 624 620 625 #ifndef CONFIG_TRANSPARENT_HUGEPAGE 621 626 static inline int pmd_trans_huge(pmd_t pmd) 622 - { 623 - return 0; 624 - } 625 - static inline int pmd_trans_splitting(pmd_t pmd) 626 627 { 627 628 return 0; 628 629 }
+6 -15
include/linux/huge_mm.h
··· 25 25 extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, 26 26 unsigned long addr, unsigned long end, 27 27 unsigned char *vec); 28 - extern int move_huge_pmd(struct vm_area_struct *vma, 28 + extern bool move_huge_pmd(struct vm_area_struct *vma, 29 29 struct vm_area_struct *new_vma, 30 30 unsigned long old_addr, 31 31 unsigned long new_addr, unsigned long old_end, ··· 48 48 #endif 49 49 }; 50 50 51 - enum page_check_address_pmd_flag { 52 - PAGE_CHECK_ADDRESS_PMD_FLAG, 53 - PAGE_CHECK_ADDRESS_PMD_NOTSPLITTING_FLAG, 54 - PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG, 55 - }; 56 51 extern pmd_t *page_check_address_pmd(struct page *page, 57 52 struct mm_struct *mm, 58 53 unsigned long address, 59 - enum page_check_address_pmd_flag flag, 60 54 spinlock_t **ptl); 61 55 62 56 #define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT) ··· 94 100 #define split_huge_page(page) BUILD_BUG() 95 101 #define split_huge_pmd(__vma, __pmd, __address) BUILD_BUG() 96 102 97 - #define wait_split_huge_page(__anon_vma, __pmd) BUILD_BUG() 98 103 #if HPAGE_PMD_ORDER >= MAX_ORDER 99 104 #error "hugepages can't be allocated by the buddy allocator" 100 105 #endif ··· 103 110 unsigned long start, 104 111 unsigned long end, 105 112 long adjust_next); 106 - extern int __pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma, 113 + extern bool __pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma, 107 114 spinlock_t **ptl); 108 115 /* mmap_sem must be held on entry */ 109 - static inline int pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma, 116 + static inline bool pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma, 110 117 spinlock_t **ptl) 111 118 { 112 119 VM_BUG_ON_VMA(!rwsem_is_locked(&vma->vm_mm->mmap_sem), vma); 113 120 if (pmd_trans_huge(*pmd)) 114 121 return __pmd_trans_huge_lock(pmd, vma, ptl); 115 122 else 116 - return 0; 123 + return false; 117 124 } 118 125 static inline int hpage_nr_pages(struct page *page) 119 126 { ··· 158 165 { 159 166 return 0; 160 167 } 161 - #define wait_split_huge_page(__anon_vma, __pmd) \ 162 - do { } while (0) 163 168 #define split_huge_pmd(__vma, __pmd, __address) \ 164 169 do { } while (0) 165 170 static inline int hugepage_madvise(struct vm_area_struct *vma, ··· 172 181 long adjust_next) 173 182 { 174 183 } 175 - static inline int pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma, 184 + static inline bool pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma, 176 185 spinlock_t **ptl) 177 186 { 178 - return 0; 187 + return false; 179 188 } 180 189 181 190 static inline int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
+1 -11
mm/gup.c
··· 241 241 spin_unlock(ptl); 242 242 return follow_page_pte(vma, address, pmd, flags); 243 243 } 244 - 245 - if (unlikely(pmd_trans_splitting(*pmd))) { 246 - spin_unlock(ptl); 247 - wait_split_huge_page(vma->anon_vma, pmd); 248 - return follow_page_pte(vma, address, pmd, flags); 249 - } 250 - 251 244 if (flags & FOLL_SPLIT) { 252 245 int ret; 253 246 page = pmd_page(*pmd); ··· 1061 1068 * *) HAVE_RCU_TABLE_FREE is enabled, and tlb_remove_table is used to free 1062 1069 * pages containing page tables. 1063 1070 * 1064 - * *) THP splits will broadcast an IPI, this can be achieved by overriding 1065 - * pmdp_splitting_flush. 1066 - * 1067 1071 * *) ptes can be read atomically by the architecture. 1068 1072 * 1069 1073 * *) access_ok is sufficient to validate userspace address ranges. ··· 1257 1267 pmd_t pmd = READ_ONCE(*pmdp); 1258 1268 1259 1269 next = pmd_addr_end(addr, end); 1260 - if (pmd_none(pmd) || pmd_trans_splitting(pmd)) 1270 + if (pmd_none(pmd)) 1261 1271 return 0; 1262 1272 1263 1273 if (unlikely(pmd_trans_huge(pmd) || pmd_huge(pmd))) {
+16 -51
mm/huge_memory.c
··· 986 986 goto out_unlock; 987 987 } 988 988 989 - if (unlikely(pmd_trans_splitting(pmd))) { 990 - /* split huge page running from under us */ 991 - spin_unlock(src_ptl); 992 - spin_unlock(dst_ptl); 993 - pte_free(dst_mm, pgtable); 994 - 995 - wait_split_huge_page(vma->anon_vma, src_pmd); /* src_vma */ 996 - goto out; 997 - } 998 989 src_page = pmd_page(pmd); 999 990 VM_BUG_ON_PAGE(!PageHead(src_page), src_page); 1000 991 get_page(src_page); ··· 1461 1470 pmd_t orig_pmd; 1462 1471 spinlock_t *ptl; 1463 1472 1464 - if (__pmd_trans_huge_lock(pmd, vma, &ptl) != 1) 1473 + if (!__pmd_trans_huge_lock(pmd, vma, &ptl)) 1465 1474 return 0; 1466 1475 /* 1467 1476 * For architectures like ppc64 we look at deposited pgtable ··· 1495 1504 return 1; 1496 1505 } 1497 1506 1498 - int move_huge_pmd(struct vm_area_struct *vma, struct vm_area_struct *new_vma, 1507 + bool move_huge_pmd(struct vm_area_struct *vma, struct vm_area_struct *new_vma, 1499 1508 unsigned long old_addr, 1500 1509 unsigned long new_addr, unsigned long old_end, 1501 1510 pmd_t *old_pmd, pmd_t *new_pmd) 1502 1511 { 1503 1512 spinlock_t *old_ptl, *new_ptl; 1504 - int ret = 0; 1505 1513 pmd_t pmd; 1506 1514 1507 1515 struct mm_struct *mm = vma->vm_mm; ··· 1509 1519 (new_addr & ~HPAGE_PMD_MASK) || 1510 1520 old_end - old_addr < HPAGE_PMD_SIZE || 1511 1521 (new_vma->vm_flags & VM_NOHUGEPAGE)) 1512 - goto out; 1522 + return false; 1513 1523 1514 1524 /* 1515 1525 * The destination pmd shouldn't be established, free_pgtables() ··· 1517 1527 */ 1518 1528 if (WARN_ON(!pmd_none(*new_pmd))) { 1519 1529 VM_BUG_ON(pmd_trans_huge(*new_pmd)); 1520 - goto out; 1530 + return false; 1521 1531 } 1522 1532 1523 1533 /* 1524 1534 * We don't have to worry about the ordering of src and dst 1525 1535 * ptlocks because exclusive mmap_sem prevents deadlock. 1526 1536 */ 1527 - ret = __pmd_trans_huge_lock(old_pmd, vma, &old_ptl); 1528 - if (ret == 1) { 1537 + if (__pmd_trans_huge_lock(old_pmd, vma, &old_ptl)) { 1529 1538 new_ptl = pmd_lockptr(mm, new_pmd); 1530 1539 if (new_ptl != old_ptl) 1531 1540 spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING); ··· 1540 1551 if (new_ptl != old_ptl) 1541 1552 spin_unlock(new_ptl); 1542 1553 spin_unlock(old_ptl); 1554 + return true; 1543 1555 } 1544 - out: 1545 - return ret; 1556 + return false; 1546 1557 } 1547 1558 1548 1559 /* ··· 1558 1569 spinlock_t *ptl; 1559 1570 int ret = 0; 1560 1571 1561 - if (__pmd_trans_huge_lock(pmd, vma, &ptl) == 1) { 1572 + if (__pmd_trans_huge_lock(pmd, vma, &ptl)) { 1562 1573 pmd_t entry; 1563 1574 bool preserve_write = prot_numa && pmd_write(*pmd); 1564 1575 ret = 1; ··· 1589 1600 } 1590 1601 1591 1602 /* 1592 - * Returns 1 if a given pmd maps a stable (not under splitting) thp. 1593 - * Returns -1 if it maps a thp under splitting. Returns 0 otherwise. 1603 + * Returns true if a given pmd maps a thp, false otherwise. 1594 1604 * 1595 - * Note that if it returns 1, this routine returns without unlocking page 1596 - * table locks. So callers must unlock them. 1605 + * Note that if it returns true, this routine returns without unlocking page 1606 + * table lock. So callers must unlock it. 1597 1607 */ 1598 - int __pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma, 1608 + bool __pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma, 1599 1609 spinlock_t **ptl) 1600 1610 { 1601 1611 *ptl = pmd_lock(vma->vm_mm, pmd); 1602 - if (likely(pmd_trans_huge(*pmd))) { 1603 - if (unlikely(pmd_trans_splitting(*pmd))) { 1604 - spin_unlock(*ptl); 1605 - wait_split_huge_page(vma->anon_vma, pmd); 1606 - return -1; 1607 - } else { 1608 - /* Thp mapped by 'pmd' is stable, so we can 1609 - * handle it as it is. */ 1610 - return 1; 1611 - } 1612 - } 1612 + if (likely(pmd_trans_huge(*pmd))) 1613 + return true; 1613 1614 spin_unlock(*ptl); 1614 - return 0; 1615 + return false; 1615 1616 } 1616 1617 1617 1618 /* ··· 1615 1636 pmd_t *page_check_address_pmd(struct page *page, 1616 1637 struct mm_struct *mm, 1617 1638 unsigned long address, 1618 - enum page_check_address_pmd_flag flag, 1619 1639 spinlock_t **ptl) 1620 1640 { 1621 1641 pgd_t *pgd; ··· 1637 1659 goto unlock; 1638 1660 if (pmd_page(*pmd) != page) 1639 1661 goto unlock; 1640 - /* 1641 - * split_vma() may create temporary aliased mappings. There is 1642 - * no risk as long as all huge pmd are found and have their 1643 - * splitting bit set before __split_huge_page_refcount 1644 - * runs. Finding the same huge pmd more than once during the 1645 - * same rmap walk is not a problem. 1646 - */ 1647 - if (flag == PAGE_CHECK_ADDRESS_PMD_NOTSPLITTING_FLAG && 1648 - pmd_trans_splitting(*pmd)) 1649 - goto unlock; 1650 - if (pmd_trans_huge(*pmd)) { 1651 - VM_BUG_ON(flag == PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG && 1652 - !pmd_trans_splitting(*pmd)); 1662 + if (pmd_trans_huge(*pmd)) 1653 1663 return pmd; 1654 - } 1655 1664 unlock: 1656 1665 spin_unlock(*ptl); 1657 1666 return NULL;
+2 -11
mm/memcontrol.c
··· 4675 4675 pte_t *pte; 4676 4676 spinlock_t *ptl; 4677 4677 4678 - if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) { 4678 + if (pmd_trans_huge_lock(pmd, vma, &ptl)) { 4679 4679 if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE) 4680 4680 mc.precharge += HPAGE_PMD_NR; 4681 4681 spin_unlock(ptl); ··· 4863 4863 union mc_target target; 4864 4864 struct page *page; 4865 4865 4866 - /* 4867 - * No race with splitting thp happens because: 4868 - * - if pmd_trans_huge_lock() returns 1, the relevant thp is not 4869 - * under splitting, which means there's no concurrent thp split, 4870 - * - if another thread runs into split_huge_page() just after we 4871 - * entered this if-block, the thread must wait for page table lock 4872 - * to be unlocked in __split_huge_page_splitting(), where the main 4873 - * part of thp split is not executed yet. 4874 - */ 4875 - if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) { 4866 + if (pmd_trans_huge_lock(pmd, vma, &ptl)) { 4876 4867 if (mc.precharge < HPAGE_PMD_NR) { 4877 4868 spin_unlock(ptl); 4878 4869 return 0;
+2 -16
mm/memory.c
··· 566 566 { 567 567 spinlock_t *ptl; 568 568 pgtable_t new = pte_alloc_one(mm, address); 569 - int wait_split_huge_page; 570 569 if (!new) 571 570 return -ENOMEM; 572 571 ··· 585 586 smp_wmb(); /* Could be smp_wmb__xxx(before|after)_spin_lock */ 586 587 587 588 ptl = pmd_lock(mm, pmd); 588 - wait_split_huge_page = 0; 589 589 if (likely(pmd_none(*pmd))) { /* Has another populated it ? */ 590 590 atomic_long_inc(&mm->nr_ptes); 591 591 pmd_populate(mm, pmd, new); 592 592 new = NULL; 593 - } else if (unlikely(pmd_trans_splitting(*pmd))) 594 - wait_split_huge_page = 1; 593 + } 595 594 spin_unlock(ptl); 596 595 if (new) 597 596 pte_free(mm, new); 598 - if (wait_split_huge_page) 599 - wait_split_huge_page(vma->anon_vma, pmd); 600 597 return 0; 601 598 } 602 599 ··· 608 613 if (likely(pmd_none(*pmd))) { /* Has another populated it ? */ 609 614 pmd_populate_kernel(&init_mm, pmd, new); 610 615 new = NULL; 611 - } else 612 - VM_BUG_ON(pmd_trans_splitting(*pmd)); 616 + } 613 617 spin_unlock(&init_mm.page_table_lock); 614 618 if (new) 615 619 pte_free_kernel(&init_mm, new); ··· 3367 3373 barrier(); 3368 3374 if (pmd_trans_huge(orig_pmd)) { 3369 3375 unsigned int dirty = flags & FAULT_FLAG_WRITE; 3370 - 3371 - /* 3372 - * If the pmd is splitting, return and retry the 3373 - * the fault. Alternative: wait until the split 3374 - * is done, and goto retry. 3375 - */ 3376 - if (pmd_trans_splitting(orig_pmd)) 3377 - return 0; 3378 3376 3379 3377 if (pmd_protnone(orig_pmd)) 3380 3378 return do_huge_pmd_numa_page(mm, vma, address,
+1 -1
mm/mincore.c
··· 117 117 unsigned char *vec = walk->private; 118 118 int nr = (end - addr) >> PAGE_SHIFT; 119 119 120 - if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) { 120 + if (pmd_trans_huge_lock(pmd, vma, &ptl)) { 121 121 memset(vec, 1, nr); 122 122 spin_unlock(ptl); 123 123 goto out;
+7 -8
mm/mremap.c
··· 192 192 if (!new_pmd) 193 193 break; 194 194 if (pmd_trans_huge(*old_pmd)) { 195 - int err = 0; 196 195 if (extent == HPAGE_PMD_SIZE) { 196 + bool moved; 197 197 VM_BUG_ON_VMA(vma->vm_file || !vma->anon_vma, 198 198 vma); 199 199 /* See comment in move_ptes() */ 200 200 if (need_rmap_locks) 201 201 anon_vma_lock_write(vma->anon_vma); 202 - err = move_huge_pmd(vma, new_vma, old_addr, 202 + moved = move_huge_pmd(vma, new_vma, old_addr, 203 203 new_addr, old_end, 204 204 old_pmd, new_pmd); 205 205 if (need_rmap_locks) 206 206 anon_vma_unlock_write(vma->anon_vma); 207 + if (moved) { 208 + need_flush = true; 209 + continue; 210 + } 207 211 } 208 - if (err > 0) { 209 - need_flush = true; 210 - continue; 211 - } else if (!err) { 212 - split_huge_pmd(vma, old_pmd, old_addr); 213 - } 212 + split_huge_pmd(vma, old_pmd, old_addr); 214 213 VM_BUG_ON(pmd_trans_huge(*old_pmd)); 215 214 } 216 215 if (pmd_none(*new_pmd) && __pte_alloc(new_vma->vm_mm, new_vma,
+1 -2
mm/page_idle.c
··· 61 61 bool referenced = false; 62 62 63 63 if (unlikely(PageTransHuge(page))) { 64 - pmd = page_check_address_pmd(page, mm, addr, 65 - PAGE_CHECK_ADDRESS_PMD_FLAG, &ptl); 64 + pmd = page_check_address_pmd(page, mm, addr, &ptl); 66 65 if (pmd) { 67 66 referenced = pmdp_clear_young_notify(vma, addr, pmd); 68 67 spin_unlock(ptl);
-12
mm/pgtable-generic.c
··· 139 139 } 140 140 #endif 141 141 142 - #ifndef __HAVE_ARCH_PMDP_SPLITTING_FLUSH 143 - void pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address, 144 - pmd_t *pmdp) 145 - { 146 - pmd_t pmd = pmd_mksplitting(*pmdp); 147 - VM_BUG_ON(address & ~HPAGE_PMD_MASK); 148 - set_pmd_at(vma->vm_mm, address, pmdp, pmd); 149 - /* tlb flush only to serialize against gup-fast */ 150 - flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE); 151 - } 152 - #endif 153 - 154 142 #ifndef __HAVE_ARCH_PGTABLE_DEPOSIT 155 143 void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, 156 144 pgtable_t pgtable)
+1 -3
mm/rmap.c
··· 843 843 * rmap might return false positives; we must filter 844 844 * these out using page_check_address_pmd(). 845 845 */ 846 - pmd = page_check_address_pmd(page, mm, address, 847 - PAGE_CHECK_ADDRESS_PMD_FLAG, &ptl); 846 + pmd = page_check_address_pmd(page, mm, address, &ptl); 848 847 if (!pmd) 849 848 return SWAP_AGAIN; 850 849 ··· 853 854 return SWAP_FAIL; /* To break the loop */ 854 855 } 855 856 856 - /* go ahead even if the pmd is pmd_trans_splitting() */ 857 857 if (pmdp_clear_flush_young_notify(vma, address, pmd)) 858 858 referenced++; 859 859 spin_unlock(ptl);