Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

hugetlb: convert hugetlb_wp() to use struct vm_fault

hugetlb_wp() can use the struct vm_fault passed in from hugetlb_fault().
This alleviates the stack by consolidating 5 variables into a single
struct.

[vishal.moola@gmail.com: simplify hugetlb_wp() arguments]
Link: https://lkml.kernel.org/r/ZhQtoFNZBNwBCeXn@fedora
Link: https://lkml.kernel.org/r/20240401202651.31440-4-vishal.moola@gmail.com
Signed-off-by: Vishal Moola (Oracle) <vishal.moola@gmail.com>
Reviewed-by: Oscar Salvador <osalvador@suse.de>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Muchun Song <muchun.song@linux.dev>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>

authored by

Vishal Moola (Oracle) and committed by
Andrew Morton
bd722058 7b6ec181

+32 -32
+32 -32
mm/hugetlb.c
··· 5918 5918 * cannot race with other handlers or page migration. 5919 5919 * Keep the pte_same checks anyway to make transition from the mutex easier. 5920 5920 */ 5921 - static vm_fault_t hugetlb_wp(struct mm_struct *mm, struct vm_area_struct *vma, 5922 - unsigned long address, pte_t *ptep, unsigned int flags, 5923 - struct folio *pagecache_folio, spinlock_t *ptl, 5921 + static vm_fault_t hugetlb_wp(struct folio *pagecache_folio, 5924 5922 struct vm_fault *vmf) 5925 5923 { 5926 - const bool unshare = flags & FAULT_FLAG_UNSHARE; 5927 - pte_t pte = huge_ptep_get(ptep); 5924 + struct vm_area_struct *vma = vmf->vma; 5925 + struct mm_struct *mm = vma->vm_mm; 5926 + const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE; 5927 + pte_t pte = huge_ptep_get(vmf->pte); 5928 5928 struct hstate *h = hstate_vma(vma); 5929 5929 struct folio *old_folio; 5930 5930 struct folio *new_folio; 5931 5931 int outside_reserve = 0; 5932 5932 vm_fault_t ret = 0; 5933 - unsigned long haddr = address & huge_page_mask(h); 5934 5933 struct mmu_notifier_range range; 5935 5934 5936 5935 /* ··· 5952 5953 5953 5954 /* Let's take out MAP_SHARED mappings first. */ 5954 5955 if (vma->vm_flags & VM_MAYSHARE) { 5955 - set_huge_ptep_writable(vma, haddr, ptep); 5956 + set_huge_ptep_writable(vma, vmf->address, vmf->pte); 5956 5957 return 0; 5957 5958 } 5958 5959 ··· 5971 5972 SetPageAnonExclusive(&old_folio->page); 5972 5973 } 5973 5974 if (likely(!unshare)) 5974 - set_huge_ptep_writable(vma, haddr, ptep); 5975 + set_huge_ptep_writable(vma, vmf->address, vmf->pte); 5975 5976 5976 5977 delayacct_wpcopy_end(); 5977 5978 return 0; ··· 5998 5999 * Drop page table lock as buddy allocator may be called. It will 5999 6000 * be acquired again before returning to the caller, as expected. 6000 6001 */ 6001 - spin_unlock(ptl); 6002 - new_folio = alloc_hugetlb_folio(vma, haddr, outside_reserve); 6002 + spin_unlock(vmf->ptl); 6003 + new_folio = alloc_hugetlb_folio(vma, vmf->address, outside_reserve); 6003 6004 6004 6005 if (IS_ERR(new_folio)) { 6005 6006 /* ··· 6024 6025 * 6025 6026 * Reacquire both after unmap operation. 6026 6027 */ 6027 - idx = vma_hugecache_offset(h, vma, haddr); 6028 + idx = vma_hugecache_offset(h, vma, vmf->address); 6028 6029 hash = hugetlb_fault_mutex_hash(mapping, idx); 6029 6030 hugetlb_vma_unlock_read(vma); 6030 6031 mutex_unlock(&hugetlb_fault_mutex_table[hash]); 6031 6032 6032 - unmap_ref_private(mm, vma, &old_folio->page, haddr); 6033 + unmap_ref_private(mm, vma, &old_folio->page, 6034 + vmf->address); 6033 6035 6034 6036 mutex_lock(&hugetlb_fault_mutex_table[hash]); 6035 6037 hugetlb_vma_lock_read(vma); 6036 - spin_lock(ptl); 6037 - ptep = hugetlb_walk(vma, haddr, huge_page_size(h)); 6038 - if (likely(ptep && 6039 - pte_same(huge_ptep_get(ptep), pte))) 6038 + spin_lock(vmf->ptl); 6039 + vmf->pte = hugetlb_walk(vma, vmf->address, 6040 + huge_page_size(h)); 6041 + if (likely(vmf->pte && 6042 + pte_same(huge_ptep_get(vmf->pte), pte))) 6040 6043 goto retry_avoidcopy; 6041 6044 /* 6042 6045 * race occurs while re-acquiring page table ··· 6060 6059 if (unlikely(ret)) 6061 6060 goto out_release_all; 6062 6061 6063 - if (copy_user_large_folio(new_folio, old_folio, address, vma)) { 6062 + if (copy_user_large_folio(new_folio, old_folio, vmf->real_address, vma)) { 6064 6063 ret = VM_FAULT_HWPOISON_LARGE; 6065 6064 goto out_release_all; 6066 6065 } 6067 6066 __folio_mark_uptodate(new_folio); 6068 6067 6069 - mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, haddr, 6070 - haddr + huge_page_size(h)); 6068 + mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, vmf->address, 6069 + vmf->address + huge_page_size(h)); 6071 6070 mmu_notifier_invalidate_range_start(&range); 6072 6071 6073 6072 /* 6074 6073 * Retake the page table lock to check for racing updates 6075 6074 * before the page tables are altered 6076 6075 */ 6077 - spin_lock(ptl); 6078 - ptep = hugetlb_walk(vma, haddr, huge_page_size(h)); 6079 - if (likely(ptep && pte_same(huge_ptep_get(ptep), pte))) { 6076 + spin_lock(vmf->ptl); 6077 + vmf->pte = hugetlb_walk(vma, vmf->address, huge_page_size(h)); 6078 + if (likely(vmf->pte && pte_same(huge_ptep_get(vmf->pte), pte))) { 6080 6079 pte_t newpte = make_huge_pte(vma, &new_folio->page, !unshare); 6081 6080 6082 6081 /* Break COW or unshare */ 6083 - huge_ptep_clear_flush(vma, haddr, ptep); 6082 + huge_ptep_clear_flush(vma, vmf->address, vmf->pte); 6084 6083 hugetlb_remove_rmap(old_folio); 6085 - hugetlb_add_new_anon_rmap(new_folio, vma, haddr); 6084 + hugetlb_add_new_anon_rmap(new_folio, vma, vmf->address); 6086 6085 if (huge_pte_uffd_wp(pte)) 6087 6086 newpte = huge_pte_mkuffd_wp(newpte); 6088 - set_huge_pte_at(mm, haddr, ptep, newpte, huge_page_size(h)); 6087 + set_huge_pte_at(mm, vmf->address, vmf->pte, newpte, 6088 + huge_page_size(h)); 6089 6089 folio_set_hugetlb_migratable(new_folio); 6090 6090 /* Make the old page be freed below */ 6091 6091 new_folio = old_folio; 6092 6092 } 6093 - spin_unlock(ptl); 6093 + spin_unlock(vmf->ptl); 6094 6094 mmu_notifier_invalidate_range_end(&range); 6095 6095 out_release_all: 6096 6096 /* ··· 6099 6097 * unshare) 6100 6098 */ 6101 6099 if (new_folio != old_folio) 6102 - restore_reserve_on_error(h, vma, haddr, new_folio); 6100 + restore_reserve_on_error(h, vma, vmf->address, new_folio); 6103 6101 folio_put(new_folio); 6104 6102 out_release_old: 6105 6103 folio_put(old_folio); 6106 6104 6107 - spin_lock(ptl); /* Caller expects lock to be held */ 6105 + spin_lock(vmf->ptl); /* Caller expects lock to be held */ 6108 6106 6109 6107 delayacct_wpcopy_end(); 6110 6108 return ret; ··· 6371 6369 hugetlb_count_add(pages_per_huge_page(h), mm); 6372 6370 if ((vmf->flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) { 6373 6371 /* Optimization, do the COW without a second fault */ 6374 - ret = hugetlb_wp(mm, vma, vmf->real_address, vmf->pte, 6375 - vmf->flags, folio, vmf->ptl, vmf); 6372 + ret = hugetlb_wp(folio, vmf); 6376 6373 } 6377 6374 6378 6375 spin_unlock(vmf->ptl); ··· 6584 6583 6585 6584 if (flags & (FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE)) { 6586 6585 if (!huge_pte_write(vmf.orig_pte)) { 6587 - ret = hugetlb_wp(mm, vma, address, vmf.pte, flags, 6588 - pagecache_folio, vmf.ptl, &vmf); 6586 + ret = hugetlb_wp(pagecache_folio, &vmf); 6589 6587 goto out_put_page; 6590 6588 } else if (likely(flags & FAULT_FLAG_WRITE)) { 6591 6589 vmf.orig_pte = huge_pte_mkdirty(vmf.orig_pte);