Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

KVM: s390: Refactor and split some gmap helpers

Refactor some gmap functions; move the implementation into a separate
file with only helper functions. The new helper functions work on vm
addresses, leaving all gmap logic in the gmap functions, which mostly
become just wrappers.

The whole gmap handling is going to be moved inside KVM soon, but the
helper functions need to touch core mm functions, and thus need to
stay in the core of kernel.

Reviewed-by: Steffen Eiden <seiden@linux.ibm.com>
Reviewed-by: Christoph Schlameuss <schlameuss@linux.ibm.com>
Acked-by: Janosch Frank <frankja@linux.ibm.com>
Link: https://lore.kernel.org/r/20250528095502.226213-4-imbrenda@linux.ibm.com
Signed-off-by: Claudio Imbrenda <imbrenda@linux.ibm.com>
Message-ID: <20250528095502.226213-4-imbrenda@linux.ibm.com>

+274 -187
+2
MAINTAINERS
··· 13075 13075 T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvms390/linux.git 13076 13076 F: Documentation/virt/kvm/s390* 13077 13077 F: arch/s390/include/asm/gmap.h 13078 + F: arch/s390/include/asm/gmap_helpers.h 13078 13079 F: arch/s390/include/asm/kvm* 13079 13080 F: arch/s390/include/uapi/asm/kvm* 13080 13081 F: arch/s390/include/uapi/asm/uvdevice.h 13081 13082 F: arch/s390/kernel/uv.c 13082 13083 F: arch/s390/kvm/ 13083 13084 F: arch/s390/mm/gmap.c 13085 + F: arch/s390/mm/gmap_helpers.c 13084 13086 F: drivers/s390/char/uvdevice.c 13085 13087 F: tools/testing/selftests/drivers/s390x/uvdevice/ 13086 13088 F: tools/testing/selftests/kvm/*/s390/
-2
arch/s390/include/asm/gmap.h
··· 110 110 int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len); 111 111 unsigned long __gmap_translate(struct gmap *, unsigned long gaddr); 112 112 int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr); 113 - void gmap_discard(struct gmap *, unsigned long from, unsigned long to); 114 113 void __gmap_zap(struct gmap *, unsigned long gaddr); 115 114 void gmap_unlink(struct mm_struct *, unsigned long *table, unsigned long vmaddr); 116 115 ··· 133 134 134 135 void gmap_sync_dirty_log_pmd(struct gmap *gmap, unsigned long dirty_bitmap[4], 135 136 unsigned long gaddr, unsigned long vmaddr); 136 - int s390_disable_cow_sharing(void); 137 137 int s390_replace_asce(struct gmap *gmap); 138 138 void s390_uv_destroy_pfns(unsigned long count, unsigned long *pfns); 139 139 int __s390_uv_destroy_range(struct mm_struct *mm, unsigned long start,
+15
arch/s390/include/asm/gmap_helpers.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* 3 + * Helper functions for KVM guest address space mapping code 4 + * 5 + * Copyright IBM Corp. 2025 6 + */ 7 + 8 + #ifndef _ASM_S390_GMAP_HELPERS_H 9 + #define _ASM_S390_GMAP_HELPERS_H 10 + 11 + void gmap_helper_zap_one_page(struct mm_struct *mm, unsigned long vmaddr); 12 + void gmap_helper_discard(struct mm_struct *mm, unsigned long vmaddr, unsigned long end); 13 + int gmap_helper_disable_cow_sharing(void); 14 + 15 + #endif /* _ASM_S390_GMAP_HELPERS_H */
+25 -5
arch/s390/kvm/diag.c
··· 11 11 #include <linux/kvm.h> 12 12 #include <linux/kvm_host.h> 13 13 #include <asm/gmap.h> 14 + #include <asm/gmap_helpers.h> 14 15 #include <asm/virtio-ccw.h> 15 16 #include "kvm-s390.h" 16 17 #include "trace.h" 17 18 #include "trace-s390.h" 18 19 #include "gaccess.h" 20 + 21 + static void do_discard_gfn_range(struct kvm_vcpu *vcpu, gfn_t gfn_start, gfn_t gfn_end) 22 + { 23 + struct kvm_memslot_iter iter; 24 + struct kvm_memory_slot *slot; 25 + struct kvm_memslots *slots; 26 + unsigned long start, end; 27 + 28 + slots = kvm_vcpu_memslots(vcpu); 29 + 30 + kvm_for_each_memslot_in_gfn_range(&iter, slots, gfn_start, gfn_end) { 31 + slot = iter.slot; 32 + start = __gfn_to_hva_memslot(slot, max(gfn_start, slot->base_gfn)); 33 + end = __gfn_to_hva_memslot(slot, min(gfn_end, slot->base_gfn + slot->npages)); 34 + gmap_helper_discard(vcpu->kvm->mm, start, end); 35 + } 36 + } 19 37 20 38 static int diag_release_pages(struct kvm_vcpu *vcpu) 21 39 { ··· 50 32 51 33 VCPU_EVENT(vcpu, 5, "diag release pages %lX %lX", start, end); 52 34 35 + mmap_read_lock(vcpu->kvm->mm); 53 36 /* 54 37 * We checked for start >= end above, so lets check for the 55 38 * fast path (no prefix swap page involved) 56 39 */ 57 40 if (end <= prefix || start >= prefix + 2 * PAGE_SIZE) { 58 - gmap_discard(vcpu->arch.gmap, start, end); 41 + do_discard_gfn_range(vcpu, gpa_to_gfn(start), gpa_to_gfn(end)); 59 42 } else { 60 43 /* 61 44 * This is slow path. gmap_discard will check for start ··· 64 45 * prefix and let gmap_discard make some of these calls 65 46 * NOPs. 66 47 */ 67 - gmap_discard(vcpu->arch.gmap, start, prefix); 48 + do_discard_gfn_range(vcpu, gpa_to_gfn(start), gpa_to_gfn(prefix)); 68 49 if (start <= prefix) 69 - gmap_discard(vcpu->arch.gmap, 0, PAGE_SIZE); 50 + do_discard_gfn_range(vcpu, 0, 1); 70 51 if (end > prefix + PAGE_SIZE) 71 - gmap_discard(vcpu->arch.gmap, PAGE_SIZE, 2 * PAGE_SIZE); 72 - gmap_discard(vcpu->arch.gmap, prefix + 2 * PAGE_SIZE, end); 52 + do_discard_gfn_range(vcpu, 1, 2); 53 + do_discard_gfn_range(vcpu, gpa_to_gfn(prefix) + 2, gpa_to_gfn(end)); 73 54 } 55 + mmap_read_unlock(vcpu->kvm->mm); 74 56 return 0; 75 57 } 76 58
+4 -1
arch/s390/kvm/kvm-s390.c
··· 40 40 #include <asm/machine.h> 41 41 #include <asm/stp.h> 42 42 #include <asm/gmap.h> 43 + #include <asm/gmap_helpers.h> 43 44 #include <asm/nmi.h> 44 45 #include <asm/isc.h> 45 46 #include <asm/sclp.h> ··· 2675 2674 if (r) 2676 2675 break; 2677 2676 2678 - r = s390_disable_cow_sharing(); 2677 + mmap_write_lock(kvm->mm); 2678 + r = gmap_helper_disable_cow_sharing(); 2679 + mmap_write_unlock(kvm->mm); 2679 2680 if (r) 2680 2681 break; 2681 2682
+2
arch/s390/mm/Makefile
··· 12 12 obj-$(CONFIG_PTDUMP) += dump_pagetables.o 13 13 obj-$(CONFIG_PGSTE) += gmap.o 14 14 obj-$(CONFIG_PFAULT) += pfault.o 15 + 16 + obj-$(subst m,y,$(CONFIG_KVM)) += gmap_helpers.o
+5 -179
arch/s390/mm/gmap.c
··· 22 22 #include <asm/page-states.h> 23 23 #include <asm/pgalloc.h> 24 24 #include <asm/machine.h> 25 + #include <asm/gmap_helpers.h> 25 26 #include <asm/gmap.h> 26 27 #include <asm/page.h> 27 28 ··· 620 619 */ 621 620 void __gmap_zap(struct gmap *gmap, unsigned long gaddr) 622 621 { 623 - struct vm_area_struct *vma; 624 622 unsigned long vmaddr; 625 - spinlock_t *ptl; 626 - pte_t *ptep; 623 + 624 + mmap_assert_locked(gmap->mm); 627 625 628 626 /* Find the vm address for the guest address */ 629 627 vmaddr = (unsigned long) radix_tree_lookup(&gmap->guest_to_host, 630 628 gaddr >> PMD_SHIFT); 631 629 if (vmaddr) { 632 630 vmaddr |= gaddr & ~PMD_MASK; 633 - 634 - vma = vma_lookup(gmap->mm, vmaddr); 635 - if (!vma || is_vm_hugetlb_page(vma)) 636 - return; 637 - 638 - /* Get pointer to the page table entry */ 639 - ptep = get_locked_pte(gmap->mm, vmaddr, &ptl); 640 - if (likely(ptep)) { 641 - ptep_zap_unused(gmap->mm, vmaddr, ptep, 0); 642 - pte_unmap_unlock(ptep, ptl); 643 - } 631 + gmap_helper_zap_one_page(gmap->mm, vmaddr); 644 632 } 645 633 } 646 634 EXPORT_SYMBOL_GPL(__gmap_zap); 647 - 648 - void gmap_discard(struct gmap *gmap, unsigned long from, unsigned long to) 649 - { 650 - unsigned long gaddr, vmaddr, size; 651 - struct vm_area_struct *vma; 652 - 653 - mmap_read_lock(gmap->mm); 654 - for (gaddr = from; gaddr < to; 655 - gaddr = (gaddr + PMD_SIZE) & PMD_MASK) { 656 - /* Find the vm address for the guest address */ 657 - vmaddr = (unsigned long) 658 - radix_tree_lookup(&gmap->guest_to_host, 659 - gaddr >> PMD_SHIFT); 660 - if (!vmaddr) 661 - continue; 662 - vmaddr |= gaddr & ~PMD_MASK; 663 - /* Find vma in the parent mm */ 664 - vma = find_vma(gmap->mm, vmaddr); 665 - if (!vma) 666 - continue; 667 - /* 668 - * We do not discard pages that are backed by 669 - * hugetlbfs, so we don't have to refault them. 670 - */ 671 - if (is_vm_hugetlb_page(vma)) 672 - continue; 673 - size = min(to - gaddr, PMD_SIZE - (gaddr & ~PMD_MASK)); 674 - zap_page_range_single(vma, vmaddr, size, NULL); 675 - } 676 - mmap_read_unlock(gmap->mm); 677 - } 678 - EXPORT_SYMBOL_GPL(gmap_discard); 679 635 680 636 static LIST_HEAD(gmap_notifier_list); 681 637 static DEFINE_SPINLOCK(gmap_notifier_lock); ··· 2226 2268 } 2227 2269 EXPORT_SYMBOL_GPL(s390_enable_sie); 2228 2270 2229 - static int find_zeropage_pte_entry(pte_t *pte, unsigned long addr, 2230 - unsigned long end, struct mm_walk *walk) 2231 - { 2232 - unsigned long *found_addr = walk->private; 2233 - 2234 - /* Return 1 of the page is a zeropage. */ 2235 - if (is_zero_pfn(pte_pfn(*pte))) { 2236 - /* 2237 - * Shared zeropage in e.g., a FS DAX mapping? We cannot do the 2238 - * right thing and likely don't care: FAULT_FLAG_UNSHARE 2239 - * currently only works in COW mappings, which is also where 2240 - * mm_forbids_zeropage() is checked. 2241 - */ 2242 - if (!is_cow_mapping(walk->vma->vm_flags)) 2243 - return -EFAULT; 2244 - 2245 - *found_addr = addr; 2246 - return 1; 2247 - } 2248 - return 0; 2249 - } 2250 - 2251 - static const struct mm_walk_ops find_zeropage_ops = { 2252 - .pte_entry = find_zeropage_pte_entry, 2253 - .walk_lock = PGWALK_WRLOCK, 2254 - }; 2255 - 2256 - /* 2257 - * Unshare all shared zeropages, replacing them by anonymous pages. Note that 2258 - * we cannot simply zap all shared zeropages, because this could later 2259 - * trigger unexpected userfaultfd missing events. 2260 - * 2261 - * This must be called after mm->context.allow_cow_sharing was 2262 - * set to 0, to avoid future mappings of shared zeropages. 2263 - * 2264 - * mm contracts with s390, that even if mm were to remove a page table, 2265 - * and racing with walk_page_range_vma() calling pte_offset_map_lock() 2266 - * would fail, it will never insert a page table containing empty zero 2267 - * pages once mm_forbids_zeropage(mm) i.e. 2268 - * mm->context.allow_cow_sharing is set to 0. 2269 - */ 2270 - static int __s390_unshare_zeropages(struct mm_struct *mm) 2271 - { 2272 - struct vm_area_struct *vma; 2273 - VMA_ITERATOR(vmi, mm, 0); 2274 - unsigned long addr; 2275 - vm_fault_t fault; 2276 - int rc; 2277 - 2278 - for_each_vma(vmi, vma) { 2279 - /* 2280 - * We could only look at COW mappings, but it's more future 2281 - * proof to catch unexpected zeropages in other mappings and 2282 - * fail. 2283 - */ 2284 - if ((vma->vm_flags & VM_PFNMAP) || is_vm_hugetlb_page(vma)) 2285 - continue; 2286 - addr = vma->vm_start; 2287 - 2288 - retry: 2289 - rc = walk_page_range_vma(vma, addr, vma->vm_end, 2290 - &find_zeropage_ops, &addr); 2291 - if (rc < 0) 2292 - return rc; 2293 - else if (!rc) 2294 - continue; 2295 - 2296 - /* addr was updated by find_zeropage_pte_entry() */ 2297 - fault = handle_mm_fault(vma, addr, 2298 - FAULT_FLAG_UNSHARE | FAULT_FLAG_REMOTE, 2299 - NULL); 2300 - if (fault & VM_FAULT_OOM) 2301 - return -ENOMEM; 2302 - /* 2303 - * See break_ksm(): even after handle_mm_fault() returned 0, we 2304 - * must start the lookup from the current address, because 2305 - * handle_mm_fault() may back out if there's any difficulty. 2306 - * 2307 - * VM_FAULT_SIGBUS and VM_FAULT_SIGSEGV are unexpected but 2308 - * maybe they could trigger in the future on concurrent 2309 - * truncation. In that case, the shared zeropage would be gone 2310 - * and we can simply retry and make progress. 2311 - */ 2312 - cond_resched(); 2313 - goto retry; 2314 - } 2315 - 2316 - return 0; 2317 - } 2318 - 2319 - static int __s390_disable_cow_sharing(struct mm_struct *mm) 2320 - { 2321 - int rc; 2322 - 2323 - if (!mm->context.allow_cow_sharing) 2324 - return 0; 2325 - 2326 - mm->context.allow_cow_sharing = 0; 2327 - 2328 - /* Replace all shared zeropages by anonymous pages. */ 2329 - rc = __s390_unshare_zeropages(mm); 2330 - /* 2331 - * Make sure to disable KSM (if enabled for the whole process or 2332 - * individual VMAs). Note that nothing currently hinders user space 2333 - * from re-enabling it. 2334 - */ 2335 - if (!rc) 2336 - rc = ksm_disable(mm); 2337 - if (rc) 2338 - mm->context.allow_cow_sharing = 1; 2339 - return rc; 2340 - } 2341 - 2342 - /* 2343 - * Disable most COW-sharing of memory pages for the whole process: 2344 - * (1) Disable KSM and unmerge/unshare any KSM pages. 2345 - * (2) Disallow shared zeropages and unshare any zerpages that are mapped. 2346 - * 2347 - * Not that we currently don't bother with COW-shared pages that are shared 2348 - * with parent/child processes due to fork(). 2349 - */ 2350 - int s390_disable_cow_sharing(void) 2351 - { 2352 - int rc; 2353 - 2354 - mmap_write_lock(current->mm); 2355 - rc = __s390_disable_cow_sharing(current->mm); 2356 - mmap_write_unlock(current->mm); 2357 - return rc; 2358 - } 2359 - EXPORT_SYMBOL_GPL(s390_disable_cow_sharing); 2360 - 2361 2271 /* 2362 2272 * Enable storage key handling from now on and initialize the storage 2363 2273 * keys with the default key. ··· 2293 2467 goto out_up; 2294 2468 2295 2469 mm->context.uses_skeys = 1; 2296 - rc = __s390_disable_cow_sharing(mm); 2470 + rc = gmap_helper_disable_cow_sharing(); 2297 2471 if (rc) { 2298 2472 mm->context.uses_skeys = 0; 2299 2473 goto out_up;
+221
arch/s390/mm/gmap_helpers.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * Helper functions for KVM guest address space mapping code 4 + * 5 + * Copyright IBM Corp. 2007, 2025 6 + */ 7 + #include <linux/mm_types.h> 8 + #include <linux/mmap_lock.h> 9 + #include <linux/mm.h> 10 + #include <linux/hugetlb.h> 11 + #include <linux/swap.h> 12 + #include <linux/swapops.h> 13 + #include <linux/pagewalk.h> 14 + #include <linux/ksm.h> 15 + #include <asm/gmap_helpers.h> 16 + 17 + /** 18 + * ptep_zap_swap_entry() - discard a swap entry. 19 + * @mm: the mm 20 + * @entry: the swap entry that needs to be zapped 21 + * 22 + * Discards the given swap entry. If the swap entry was an actual swap 23 + * entry (and not a migration entry, for example), the actual swapped 24 + * page is also discarded from swap. 25 + */ 26 + static void ptep_zap_swap_entry(struct mm_struct *mm, swp_entry_t entry) 27 + { 28 + if (!non_swap_entry(entry)) 29 + dec_mm_counter(mm, MM_SWAPENTS); 30 + else if (is_migration_entry(entry)) 31 + dec_mm_counter(mm, mm_counter(pfn_swap_entry_folio(entry))); 32 + free_swap_and_cache(entry); 33 + } 34 + 35 + /** 36 + * gmap_helper_zap_one_page() - discard a page if it was swapped. 37 + * @mm: the mm 38 + * @vmaddr: the userspace virtual address that needs to be discarded 39 + * 40 + * If the given address maps to a swap entry, discard it. 41 + * 42 + * Context: needs to be called while holding the mmap lock. 43 + */ 44 + void gmap_helper_zap_one_page(struct mm_struct *mm, unsigned long vmaddr) 45 + { 46 + struct vm_area_struct *vma; 47 + spinlock_t *ptl; 48 + pte_t *ptep; 49 + 50 + mmap_assert_locked(mm); 51 + 52 + /* Find the vm address for the guest address */ 53 + vma = vma_lookup(mm, vmaddr); 54 + if (!vma || is_vm_hugetlb_page(vma)) 55 + return; 56 + 57 + /* Get pointer to the page table entry */ 58 + ptep = get_locked_pte(mm, vmaddr, &ptl); 59 + if (unlikely(!ptep)) 60 + return; 61 + if (pte_swap(*ptep)) 62 + ptep_zap_swap_entry(mm, pte_to_swp_entry(*ptep)); 63 + pte_unmap_unlock(ptep, ptl); 64 + } 65 + EXPORT_SYMBOL_GPL(gmap_helper_zap_one_page); 66 + 67 + /** 68 + * gmap_helper_discard() - discard user pages in the given range 69 + * @mm: the mm 70 + * @vmaddr: starting userspace address 71 + * @end: end address (first address outside the range) 72 + * 73 + * All userpace pages in the range [@vamddr, @end) are discarded and unmapped. 74 + * 75 + * Context: needs to be called while holding the mmap lock. 76 + */ 77 + void gmap_helper_discard(struct mm_struct *mm, unsigned long vmaddr, unsigned long end) 78 + { 79 + struct vm_area_struct *vma; 80 + 81 + mmap_assert_locked(mm); 82 + 83 + while (vmaddr < end) { 84 + vma = find_vma_intersection(mm, vmaddr, end); 85 + if (!vma) 86 + return; 87 + if (!is_vm_hugetlb_page(vma)) 88 + zap_page_range_single(vma, vmaddr, min(end, vma->vm_end) - vmaddr, NULL); 89 + vmaddr = vma->vm_end; 90 + } 91 + } 92 + EXPORT_SYMBOL_GPL(gmap_helper_discard); 93 + 94 + static int find_zeropage_pte_entry(pte_t *pte, unsigned long addr, 95 + unsigned long end, struct mm_walk *walk) 96 + { 97 + unsigned long *found_addr = walk->private; 98 + 99 + /* Return 1 of the page is a zeropage. */ 100 + if (is_zero_pfn(pte_pfn(*pte))) { 101 + /* 102 + * Shared zeropage in e.g., a FS DAX mapping? We cannot do the 103 + * right thing and likely don't care: FAULT_FLAG_UNSHARE 104 + * currently only works in COW mappings, which is also where 105 + * mm_forbids_zeropage() is checked. 106 + */ 107 + if (!is_cow_mapping(walk->vma->vm_flags)) 108 + return -EFAULT; 109 + 110 + *found_addr = addr; 111 + return 1; 112 + } 113 + return 0; 114 + } 115 + 116 + static const struct mm_walk_ops find_zeropage_ops = { 117 + .pte_entry = find_zeropage_pte_entry, 118 + .walk_lock = PGWALK_WRLOCK, 119 + }; 120 + 121 + /** __gmap_helper_unshare_zeropages() - unshare all shared zeropages 122 + * @mm: the mm whose zero pages are to be unshared 123 + * 124 + * Unshare all shared zeropages, replacing them by anonymous pages. Note that 125 + * we cannot simply zap all shared zeropages, because this could later 126 + * trigger unexpected userfaultfd missing events. 127 + * 128 + * This must be called after mm->context.allow_cow_sharing was 129 + * set to 0, to avoid future mappings of shared zeropages. 130 + * 131 + * mm contracts with s390, that even if mm were to remove a page table, 132 + * and racing with walk_page_range_vma() calling pte_offset_map_lock() 133 + * would fail, it will never insert a page table containing empty zero 134 + * pages once mm_forbids_zeropage(mm) i.e. 135 + * mm->context.allow_cow_sharing is set to 0. 136 + */ 137 + static int __gmap_helper_unshare_zeropages(struct mm_struct *mm) 138 + { 139 + struct vm_area_struct *vma; 140 + VMA_ITERATOR(vmi, mm, 0); 141 + unsigned long addr; 142 + vm_fault_t fault; 143 + int rc; 144 + 145 + for_each_vma(vmi, vma) { 146 + /* 147 + * We could only look at COW mappings, but it's more future 148 + * proof to catch unexpected zeropages in other mappings and 149 + * fail. 150 + */ 151 + if ((vma->vm_flags & VM_PFNMAP) || is_vm_hugetlb_page(vma)) 152 + continue; 153 + addr = vma->vm_start; 154 + 155 + retry: 156 + rc = walk_page_range_vma(vma, addr, vma->vm_end, 157 + &find_zeropage_ops, &addr); 158 + if (rc < 0) 159 + return rc; 160 + else if (!rc) 161 + continue; 162 + 163 + /* addr was updated by find_zeropage_pte_entry() */ 164 + fault = handle_mm_fault(vma, addr, 165 + FAULT_FLAG_UNSHARE | FAULT_FLAG_REMOTE, 166 + NULL); 167 + if (fault & VM_FAULT_OOM) 168 + return -ENOMEM; 169 + /* 170 + * See break_ksm(): even after handle_mm_fault() returned 0, we 171 + * must start the lookup from the current address, because 172 + * handle_mm_fault() may back out if there's any difficulty. 173 + * 174 + * VM_FAULT_SIGBUS and VM_FAULT_SIGSEGV are unexpected but 175 + * maybe they could trigger in the future on concurrent 176 + * truncation. In that case, the shared zeropage would be gone 177 + * and we can simply retry and make progress. 178 + */ 179 + cond_resched(); 180 + goto retry; 181 + } 182 + 183 + return 0; 184 + } 185 + 186 + /** 187 + * gmap_helper_disable_cow_sharing() - disable all COW sharing 188 + * 189 + * Disable most COW-sharing of memory pages for the whole process: 190 + * (1) Disable KSM and unmerge/unshare any KSM pages. 191 + * (2) Disallow shared zeropages and unshare any zerpages that are mapped. 192 + * 193 + * Not that we currently don't bother with COW-shared pages that are shared 194 + * with parent/child processes due to fork(). 195 + */ 196 + int gmap_helper_disable_cow_sharing(void) 197 + { 198 + struct mm_struct *mm = current->mm; 199 + int rc; 200 + 201 + mmap_assert_write_locked(mm); 202 + 203 + if (!mm->context.allow_cow_sharing) 204 + return 0; 205 + 206 + mm->context.allow_cow_sharing = 0; 207 + 208 + /* Replace all shared zeropages by anonymous pages. */ 209 + rc = __gmap_helper_unshare_zeropages(mm); 210 + /* 211 + * Make sure to disable KSM (if enabled for the whole process or 212 + * individual VMAs). Note that nothing currently hinders user space 213 + * from re-enabling it. 214 + */ 215 + if (!rc) 216 + rc = ksm_disable(mm); 217 + if (rc) 218 + mm->context.allow_cow_sharing = 1; 219 + return rc; 220 + } 221 + EXPORT_SYMBOL_GPL(gmap_helper_disable_cow_sharing);