Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'mm-stable-2022-10-13' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm

Pull more MM updates from Andrew Morton:

- fix a race which causes page refcounting errors in ZONE_DEVICE pages
(Alistair Popple)

- fix userfaultfd test harness instability (Peter Xu)

- various other patches in MM, mainly fixes

* tag 'mm-stable-2022-10-13' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm: (29 commits)
highmem: fix kmap_to_page() for kmap_local_page() addresses
mm/page_alloc: fix incorrect PGFREE and PGALLOC for high-order page
mm/selftest: uffd: explain the write missing fault check
mm/hugetlb: use hugetlb_pte_stable in migration race check
mm/hugetlb: fix race condition of uffd missing/minor handling
zram: always expose rw_page
LoongArch: update local TLB if PTE entry exists
mm: use update_mmu_tlb() on the second thread
kasan: fix array-bounds warnings in tests
hmm-tests: add test for migrate_device_range()
nouveau/dmem: evict device private memory during release
nouveau/dmem: refactor nouveau_dmem_fault_copy_one()
mm/migrate_device.c: add migrate_device_range()
mm/migrate_device.c: refactor migrate_vma and migrate_deivce_coherent_page()
mm/memremap.c: take a pgmap reference on page allocation
mm: free device private pages have zero refcount
mm/memory.c: fix race when faulting a device private page
mm/damon: use damon_sz_region() in appropriate place
mm/damon: move sz_damon_region to damon_sz_region
lib/test_meminit: add checks for the allocation functions
...

+723 -257
+3
arch/loongarch/include/asm/pgtable.h
··· 412 412 __update_tlb(vma, address, ptep); 413 413 } 414 414 415 + #define __HAVE_ARCH_UPDATE_MMU_TLB 416 + #define update_mmu_tlb update_mmu_cache 417 + 415 418 static inline void update_mmu_cache_pmd(struct vm_area_struct *vma, 416 419 unsigned long address, pmd_t *pmdp) 417 420 {
+12 -9
arch/powerpc/kvm/book3s_hv_uvmem.c
··· 508 508 static int __kvmppc_svm_page_out(struct vm_area_struct *vma, 509 509 unsigned long start, 510 510 unsigned long end, unsigned long page_shift, 511 - struct kvm *kvm, unsigned long gpa) 511 + struct kvm *kvm, unsigned long gpa, struct page *fault_page) 512 512 { 513 513 unsigned long src_pfn, dst_pfn = 0; 514 - struct migrate_vma mig; 514 + struct migrate_vma mig = { 0 }; 515 515 struct page *dpage, *spage; 516 516 struct kvmppc_uvmem_page_pvt *pvt; 517 517 unsigned long pfn; ··· 525 525 mig.dst = &dst_pfn; 526 526 mig.pgmap_owner = &kvmppc_uvmem_pgmap; 527 527 mig.flags = MIGRATE_VMA_SELECT_DEVICE_PRIVATE; 528 + mig.fault_page = fault_page; 528 529 529 530 /* The requested page is already paged-out, nothing to do */ 530 531 if (!kvmppc_gfn_is_uvmem_pfn(gpa >> page_shift, kvm, NULL)) ··· 581 580 static inline int kvmppc_svm_page_out(struct vm_area_struct *vma, 582 581 unsigned long start, unsigned long end, 583 582 unsigned long page_shift, 584 - struct kvm *kvm, unsigned long gpa) 583 + struct kvm *kvm, unsigned long gpa, 584 + struct page *fault_page) 585 585 { 586 586 int ret; 587 587 588 588 mutex_lock(&kvm->arch.uvmem_lock); 589 - ret = __kvmppc_svm_page_out(vma, start, end, page_shift, kvm, gpa); 589 + ret = __kvmppc_svm_page_out(vma, start, end, page_shift, kvm, gpa, 590 + fault_page); 590 591 mutex_unlock(&kvm->arch.uvmem_lock); 591 592 592 593 return ret; ··· 637 634 pvt->remove_gfn = true; 638 635 639 636 if (__kvmppc_svm_page_out(vma, addr, addr + PAGE_SIZE, 640 - PAGE_SHIFT, kvm, pvt->gpa)) 637 + PAGE_SHIFT, kvm, pvt->gpa, NULL)) 641 638 pr_err("Can't page out gpa:0x%lx addr:0x%lx\n", 642 639 pvt->gpa, addr); 643 640 } else { ··· 718 715 719 716 dpage = pfn_to_page(uvmem_pfn); 720 717 dpage->zone_device_data = pvt; 721 - lock_page(dpage); 718 + zone_device_page_init(dpage); 722 719 return dpage; 723 720 out_clear: 724 721 spin_lock(&kvmppc_uvmem_bitmap_lock); ··· 739 736 bool pagein) 740 737 { 741 738 unsigned long src_pfn, dst_pfn = 0; 742 - struct migrate_vma mig; 739 + struct migrate_vma mig = { 0 }; 743 740 struct page *spage; 744 741 unsigned long pfn; 745 742 struct page *dpage; ··· 997 994 998 995 if (kvmppc_svm_page_out(vmf->vma, vmf->address, 999 996 vmf->address + PAGE_SIZE, PAGE_SHIFT, 1000 - pvt->kvm, pvt->gpa)) 997 + pvt->kvm, pvt->gpa, vmf->page)) 1001 998 return VM_FAULT_SIGBUS; 1002 999 else 1003 1000 return 0; ··· 1068 1065 if (!vma || vma->vm_start > start || vma->vm_end < end) 1069 1066 goto out; 1070 1067 1071 - if (!kvmppc_svm_page_out(vma, start, end, page_shift, kvm, gpa)) 1068 + if (!kvmppc_svm_page_out(vma, start, end, page_shift, kvm, gpa, NULL)) 1072 1069 ret = H_SUCCESS; 1073 1070 out: 1074 1071 mmap_read_unlock(kvm->mm);
+3 -23
drivers/block/zram/zram_drv.c
··· 52 52 static size_t huge_class_size; 53 53 54 54 static const struct block_device_operations zram_devops; 55 - #ifdef CONFIG_ZRAM_WRITEBACK 56 - static const struct block_device_operations zram_wb_devops; 57 - #endif 58 55 59 56 static void zram_free_page(struct zram *zram, size_t index); 60 57 static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec, ··· 543 546 zram->backing_dev = backing_dev; 544 547 zram->bitmap = bitmap; 545 548 zram->nr_pages = nr_pages; 546 - /* 547 - * With writeback feature, zram does asynchronous IO so it's no longer 548 - * synchronous device so let's remove synchronous io flag. Othewise, 549 - * upper layer(e.g., swap) could wait IO completion rather than 550 - * (submit and return), which will cause system sluggish. 551 - * Furthermore, when the IO function returns(e.g., swap_readpage), 552 - * upper layer expects IO was done so it could deallocate the page 553 - * freely but in fact, IO is going on so finally could cause 554 - * use-after-free when the IO is really done. 555 - */ 556 - zram->disk->fops = &zram_wb_devops; 557 549 up_write(&zram->init_lock); 558 550 559 551 pr_info("setup backing device %s\n", file_name); ··· 1256 1270 struct bio_vec bvec; 1257 1271 1258 1272 zram_slot_unlock(zram, index); 1273 + /* A null bio means rw_page was used, we must fallback to bio */ 1274 + if (!bio) 1275 + return -EOPNOTSUPP; 1259 1276 1260 1277 bvec.bv_page = page; 1261 1278 bvec.bv_len = PAGE_SIZE; ··· 1844 1855 .rw_page = zram_rw_page, 1845 1856 .owner = THIS_MODULE 1846 1857 }; 1847 - 1848 - #ifdef CONFIG_ZRAM_WRITEBACK 1849 - static const struct block_device_operations zram_wb_devops = { 1850 - .open = zram_open, 1851 - .submit_bio = zram_submit_bio, 1852 - .swap_slot_free_notify = zram_slot_free_notify, 1853 - .owner = THIS_MODULE 1854 - }; 1855 - #endif 1856 1858 1857 1859 static DEVICE_ATTR_WO(compact); 1858 1860 static DEVICE_ATTR_RW(disksize);
+11 -8
drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
··· 223 223 page = pfn_to_page(pfn); 224 224 svm_range_bo_ref(prange->svm_bo); 225 225 page->zone_device_data = prange->svm_bo; 226 - lock_page(page); 226 + zone_device_page_init(page); 227 227 } 228 228 229 229 static void ··· 410 410 uint64_t npages = (end - start) >> PAGE_SHIFT; 411 411 struct kfd_process_device *pdd; 412 412 struct dma_fence *mfence = NULL; 413 - struct migrate_vma migrate; 413 + struct migrate_vma migrate = { 0 }; 414 414 unsigned long cpages = 0; 415 415 dma_addr_t *scratch; 416 416 void *buf; ··· 666 666 static long 667 667 svm_migrate_vma_to_ram(struct amdgpu_device *adev, struct svm_range *prange, 668 668 struct vm_area_struct *vma, uint64_t start, uint64_t end, 669 - uint32_t trigger) 669 + uint32_t trigger, struct page *fault_page) 670 670 { 671 671 struct kfd_process *p = container_of(prange->svms, struct kfd_process, svms); 672 672 uint64_t npages = (end - start) >> PAGE_SHIFT; ··· 674 674 unsigned long cpages = 0; 675 675 struct kfd_process_device *pdd; 676 676 struct dma_fence *mfence = NULL; 677 - struct migrate_vma migrate; 677 + struct migrate_vma migrate = { 0 }; 678 678 dma_addr_t *scratch; 679 679 void *buf; 680 680 int r = -ENOMEM; ··· 697 697 698 698 migrate.src = buf; 699 699 migrate.dst = migrate.src + npages; 700 + migrate.fault_page = fault_page; 700 701 scratch = (dma_addr_t *)(migrate.dst + npages); 701 702 702 703 kfd_smi_event_migration_start(adev->kfd.dev, p->lead_thread->pid, ··· 765 764 * 0 - OK, otherwise error code 766 765 */ 767 766 int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm, 768 - uint32_t trigger) 767 + uint32_t trigger, struct page *fault_page) 769 768 { 770 769 struct amdgpu_device *adev; 771 770 struct vm_area_struct *vma; ··· 806 805 } 807 806 808 807 next = min(vma->vm_end, end); 809 - r = svm_migrate_vma_to_ram(adev, prange, vma, addr, next, trigger); 808 + r = svm_migrate_vma_to_ram(adev, prange, vma, addr, next, trigger, 809 + fault_page); 810 810 if (r < 0) { 811 811 pr_debug("failed %ld to migrate prange %p\n", r, prange); 812 812 break; ··· 851 849 pr_debug("from gpu 0x%x to gpu 0x%x\n", prange->actual_loc, best_loc); 852 850 853 851 do { 854 - r = svm_migrate_vram_to_ram(prange, mm, trigger); 852 + r = svm_migrate_vram_to_ram(prange, mm, trigger, NULL); 855 853 if (r) 856 854 return r; 857 855 } while (prange->actual_loc && --retries); ··· 952 950 } 953 951 954 952 r = svm_migrate_vram_to_ram(prange, vmf->vma->vm_mm, 955 - KFD_MIGRATE_TRIGGER_PAGEFAULT_CPU); 953 + KFD_MIGRATE_TRIGGER_PAGEFAULT_CPU, 954 + vmf->page); 956 955 if (r) 957 956 pr_debug("failed %d migrate svms 0x%p range 0x%p [0x%lx 0x%lx]\n", 958 957 r, prange->svms, prange, prange->start, prange->last);
+1 -1
drivers/gpu/drm/amd/amdkfd/kfd_migrate.h
··· 43 43 int svm_migrate_to_vram(struct svm_range *prange, uint32_t best_loc, 44 44 struct mm_struct *mm, uint32_t trigger); 45 45 int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm, 46 - uint32_t trigger); 46 + uint32_t trigger, struct page *fault_page); 47 47 unsigned long 48 48 svm_migrate_addr_to_pfn(struct amdgpu_device *adev, unsigned long addr); 49 49
+7 -4
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
··· 2913 2913 */ 2914 2914 if (prange->actual_loc) 2915 2915 r = svm_migrate_vram_to_ram(prange, mm, 2916 - KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU); 2916 + KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU, 2917 + NULL); 2917 2918 else 2918 2919 r = 0; 2919 2920 } 2920 2921 } else { 2921 2922 r = svm_migrate_vram_to_ram(prange, mm, 2922 - KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU); 2923 + KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU, 2924 + NULL); 2923 2925 } 2924 2926 if (r) { 2925 2927 pr_debug("failed %d to migrate svms %p [0x%lx 0x%lx]\n", ··· 3280 3278 return 0; 3281 3279 3282 3280 if (!best_loc) { 3283 - r = svm_migrate_vram_to_ram(prange, mm, KFD_MIGRATE_TRIGGER_PREFETCH); 3281 + r = svm_migrate_vram_to_ram(prange, mm, 3282 + KFD_MIGRATE_TRIGGER_PREFETCH, NULL); 3284 3283 *migrated = !r; 3285 3284 return r; 3286 3285 } ··· 3342 3339 mutex_lock(&prange->migrate_mutex); 3343 3340 do { 3344 3341 r = svm_migrate_vram_to_ram(prange, mm, 3345 - KFD_MIGRATE_TRIGGER_TTM_EVICTION); 3342 + KFD_MIGRATE_TRIGGER_TTM_EVICTION, NULL); 3346 3343 } while (!r && prange->actual_loc && --retries); 3347 3344 3348 3345 if (!r && prange->actual_loc)
+77 -31
drivers/gpu/drm/nouveau/nouveau_dmem.c
··· 139 139 } 140 140 } 141 141 142 - static vm_fault_t nouveau_dmem_fault_copy_one(struct nouveau_drm *drm, 143 - struct vm_fault *vmf, struct migrate_vma *args, 144 - dma_addr_t *dma_addr) 142 + static int nouveau_dmem_copy_one(struct nouveau_drm *drm, struct page *spage, 143 + struct page *dpage, dma_addr_t *dma_addr) 145 144 { 146 145 struct device *dev = drm->dev->dev; 147 - struct page *dpage, *spage; 148 - struct nouveau_svmm *svmm; 149 146 150 - spage = migrate_pfn_to_page(args->src[0]); 151 - if (!spage || !(args->src[0] & MIGRATE_PFN_MIGRATE)) 152 - return 0; 153 - 154 - dpage = alloc_page_vma(GFP_HIGHUSER, vmf->vma, vmf->address); 155 - if (!dpage) 156 - return VM_FAULT_SIGBUS; 157 147 lock_page(dpage); 158 148 159 149 *dma_addr = dma_map_page(dev, dpage, 0, PAGE_SIZE, DMA_BIDIRECTIONAL); 160 150 if (dma_mapping_error(dev, *dma_addr)) 161 - goto error_free_page; 151 + return -EIO; 162 152 163 - svmm = spage->zone_device_data; 164 - mutex_lock(&svmm->mutex); 165 - nouveau_svmm_invalidate(svmm, args->start, args->end); 166 153 if (drm->dmem->migrate.copy_func(drm, 1, NOUVEAU_APER_HOST, *dma_addr, 167 - NOUVEAU_APER_VRAM, nouveau_dmem_page_addr(spage))) 168 - goto error_dma_unmap; 169 - mutex_unlock(&svmm->mutex); 154 + NOUVEAU_APER_VRAM, nouveau_dmem_page_addr(spage))) { 155 + dma_unmap_page(dev, *dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL); 156 + return -EIO; 157 + } 170 158 171 - args->dst[0] = migrate_pfn(page_to_pfn(dpage)); 172 159 return 0; 173 - 174 - error_dma_unmap: 175 - mutex_unlock(&svmm->mutex); 176 - dma_unmap_page(dev, *dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL); 177 - error_free_page: 178 - __free_page(dpage); 179 - return VM_FAULT_SIGBUS; 180 160 } 181 161 182 162 static vm_fault_t nouveau_dmem_migrate_to_ram(struct vm_fault *vmf) ··· 164 184 struct nouveau_drm *drm = page_to_drm(vmf->page); 165 185 struct nouveau_dmem *dmem = drm->dmem; 166 186 struct nouveau_fence *fence; 187 + struct nouveau_svmm *svmm; 188 + struct page *spage, *dpage; 167 189 unsigned long src = 0, dst = 0; 168 190 dma_addr_t dma_addr = 0; 169 - vm_fault_t ret; 191 + vm_fault_t ret = 0; 170 192 struct migrate_vma args = { 171 193 .vma = vmf->vma, 172 194 .start = vmf->address, ··· 189 207 if (!args.cpages) 190 208 return 0; 191 209 192 - ret = nouveau_dmem_fault_copy_one(drm, vmf, &args, &dma_addr); 193 - if (ret || dst == 0) 210 + spage = migrate_pfn_to_page(src); 211 + if (!spage || !(src & MIGRATE_PFN_MIGRATE)) 194 212 goto done; 213 + 214 + dpage = alloc_page_vma(GFP_HIGHUSER, vmf->vma, vmf->address); 215 + if (!dpage) 216 + goto done; 217 + 218 + dst = migrate_pfn(page_to_pfn(dpage)); 219 + 220 + svmm = spage->zone_device_data; 221 + mutex_lock(&svmm->mutex); 222 + nouveau_svmm_invalidate(svmm, args.start, args.end); 223 + ret = nouveau_dmem_copy_one(drm, spage, dpage, &dma_addr); 224 + mutex_unlock(&svmm->mutex); 225 + if (ret) { 226 + ret = VM_FAULT_SIGBUS; 227 + goto done; 228 + } 195 229 196 230 nouveau_fence_new(dmem->migrate.chan, false, &fence); 197 231 migrate_vma_pages(&args); ··· 324 326 return NULL; 325 327 } 326 328 327 - lock_page(page); 329 + zone_device_page_init(page); 328 330 return page; 329 331 } 330 332 ··· 367 369 mutex_unlock(&drm->dmem->mutex); 368 370 } 369 371 372 + /* 373 + * Evict all pages mapping a chunk. 374 + */ 375 + static void 376 + nouveau_dmem_evict_chunk(struct nouveau_dmem_chunk *chunk) 377 + { 378 + unsigned long i, npages = range_len(&chunk->pagemap.range) >> PAGE_SHIFT; 379 + unsigned long *src_pfns, *dst_pfns; 380 + dma_addr_t *dma_addrs; 381 + struct nouveau_fence *fence; 382 + 383 + src_pfns = kcalloc(npages, sizeof(*src_pfns), GFP_KERNEL); 384 + dst_pfns = kcalloc(npages, sizeof(*dst_pfns), GFP_KERNEL); 385 + dma_addrs = kcalloc(npages, sizeof(*dma_addrs), GFP_KERNEL); 386 + 387 + migrate_device_range(src_pfns, chunk->pagemap.range.start >> PAGE_SHIFT, 388 + npages); 389 + 390 + for (i = 0; i < npages; i++) { 391 + if (src_pfns[i] & MIGRATE_PFN_MIGRATE) { 392 + struct page *dpage; 393 + 394 + /* 395 + * _GFP_NOFAIL because the GPU is going away and there 396 + * is nothing sensible we can do if we can't copy the 397 + * data back. 398 + */ 399 + dpage = alloc_page(GFP_HIGHUSER | __GFP_NOFAIL); 400 + dst_pfns[i] = migrate_pfn(page_to_pfn(dpage)); 401 + nouveau_dmem_copy_one(chunk->drm, 402 + migrate_pfn_to_page(src_pfns[i]), dpage, 403 + &dma_addrs[i]); 404 + } 405 + } 406 + 407 + nouveau_fence_new(chunk->drm->dmem->migrate.chan, false, &fence); 408 + migrate_device_pages(src_pfns, dst_pfns, npages); 409 + nouveau_dmem_fence_done(&fence); 410 + migrate_device_finalize(src_pfns, dst_pfns, npages); 411 + kfree(src_pfns); 412 + kfree(dst_pfns); 413 + for (i = 0; i < npages; i++) 414 + dma_unmap_page(chunk->drm->dev->dev, dma_addrs[i], PAGE_SIZE, DMA_BIDIRECTIONAL); 415 + kfree(dma_addrs); 416 + } 417 + 370 418 void 371 419 nouveau_dmem_fini(struct nouveau_drm *drm) 372 420 { ··· 424 380 mutex_lock(&drm->dmem->mutex); 425 381 426 382 list_for_each_entry_safe(chunk, tmp, &drm->dmem->chunks, list) { 383 + nouveau_dmem_evict_chunk(chunk); 427 384 nouveau_bo_unpin(chunk->bo); 428 385 nouveau_bo_ref(NULL, &chunk->bo); 386 + WARN_ON(chunk->callocated); 429 387 list_del(&chunk->list); 430 388 memunmap_pages(&chunk->pagemap); 431 389 release_mem_region(chunk->pagemap.range.start,
+2 -1
fs/ext4/verity.c
··· 363 363 pgoff_t index, 364 364 unsigned long num_ra_pages) 365 365 { 366 - DEFINE_READAHEAD(ractl, NULL, NULL, inode->i_mapping, index); 367 366 struct page *page; 368 367 369 368 index += ext4_verity_metadata_pos(inode) >> PAGE_SHIFT; 370 369 371 370 page = find_get_page_flags(inode->i_mapping, index, FGP_ACCESSED); 372 371 if (!page || !PageUptodate(page)) { 372 + DEFINE_READAHEAD(ractl, NULL, NULL, inode->i_mapping, index); 373 + 373 374 if (page) 374 375 put_page(page); 375 376 else if (num_ra_pages > 1)
+2 -1
fs/f2fs/verity.c
··· 258 258 pgoff_t index, 259 259 unsigned long num_ra_pages) 260 260 { 261 - DEFINE_READAHEAD(ractl, NULL, NULL, inode->i_mapping, index); 262 261 struct page *page; 263 262 264 263 index += f2fs_verity_metadata_pos(inode) >> PAGE_SHIFT; 265 264 266 265 page = find_get_page_flags(inode->i_mapping, index, FGP_ACCESSED); 267 266 if (!page || !PageUptodate(page)) { 267 + DEFINE_READAHEAD(ractl, NULL, NULL, inode->i_mapping, index); 268 + 268 269 if (page) 269 270 put_page(page); 270 271 else if (num_ra_pages > 1)
+6
include/linux/damon.h
··· 484 484 return list_first_entry(&t->regions_list, struct damon_region, list); 485 485 } 486 486 487 + static inline unsigned long damon_sz_region(struct damon_region *r) 488 + { 489 + return r->ar.end - r->ar.start; 490 + } 491 + 492 + 487 493 #define damon_for_each_region(r, t) \ 488 494 list_for_each_entry(r, &t->regions_list, list) 489 495
+1
include/linux/memremap.h
··· 187 187 } 188 188 189 189 #ifdef CONFIG_ZONE_DEVICE 190 + void zone_device_page_init(struct page *page); 190 191 void *memremap_pages(struct dev_pagemap *pgmap, int nid); 191 192 void memunmap_pages(struct dev_pagemap *pgmap); 192 193 void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap);
+15
include/linux/migrate.h
··· 62 62 #ifdef CONFIG_MIGRATION 63 63 64 64 extern void putback_movable_pages(struct list_head *l); 65 + int migrate_folio_extra(struct address_space *mapping, struct folio *dst, 66 + struct folio *src, enum migrate_mode mode, int extra_count); 65 67 int migrate_folio(struct address_space *mapping, struct folio *dst, 66 68 struct folio *src, enum migrate_mode mode); 67 69 extern int migrate_pages(struct list_head *l, new_page_t new, free_page_t free, ··· 199 197 */ 200 198 void *pgmap_owner; 201 199 unsigned long flags; 200 + 201 + /* 202 + * Set to vmf->page if this is being called to migrate a page as part of 203 + * a migrate_to_ram() callback. 204 + */ 205 + struct page *fault_page; 202 206 }; 203 207 204 208 int migrate_vma_setup(struct migrate_vma *args); 205 209 void migrate_vma_pages(struct migrate_vma *migrate); 206 210 void migrate_vma_finalize(struct migrate_vma *migrate); 211 + int migrate_device_range(unsigned long *src_pfns, unsigned long start, 212 + unsigned long npages); 213 + void migrate_device_pages(unsigned long *src_pfns, unsigned long *dst_pfns, 214 + unsigned long npages); 215 + void migrate_device_finalize(unsigned long *src_pfns, 216 + unsigned long *dst_pfns, unsigned long npages); 217 + 207 218 #endif /* CONFIG_MIGRATION */ 208 219 209 220 #endif /* _LINUX_MIGRATE_H */
-2
include/linux/sched.h
··· 870 870 struct mm_struct *mm; 871 871 struct mm_struct *active_mm; 872 872 873 - /* Per-thread vma caching: */ 874 - 875 873 #ifdef SPLIT_RSS_COUNTING 876 874 struct task_rss_stat rss_stat; 877 875 #endif
+104 -25
lib/test_hmm.c
··· 100 100 struct dmirror_chunk { 101 101 struct dev_pagemap pagemap; 102 102 struct dmirror_device *mdevice; 103 + bool remove; 103 104 }; 104 105 105 106 /* ··· 193 192 return 0; 194 193 } 195 194 195 + static struct dmirror_chunk *dmirror_page_to_chunk(struct page *page) 196 + { 197 + return container_of(page->pgmap, struct dmirror_chunk, pagemap); 198 + } 199 + 196 200 static struct dmirror_device *dmirror_page_to_device(struct page *page) 197 201 198 202 { 199 - return container_of(page->pgmap, struct dmirror_chunk, 200 - pagemap)->mdevice; 203 + return dmirror_page_to_chunk(page)->mdevice; 201 204 } 202 205 203 206 static int dmirror_do_fault(struct dmirror *dmirror, struct hmm_range *range) ··· 632 627 goto error; 633 628 } 634 629 630 + zone_device_page_init(dpage); 635 631 dpage->zone_device_data = rpage; 636 - lock_page(dpage); 637 632 return dpage; 638 633 639 634 error: ··· 912 907 struct vm_area_struct *vma; 913 908 unsigned long src_pfns[64] = { 0 }; 914 909 unsigned long dst_pfns[64] = { 0 }; 915 - struct migrate_vma args; 910 + struct migrate_vma args = { 0 }; 916 911 unsigned long next; 917 912 int ret; 918 913 ··· 973 968 unsigned long src_pfns[64] = { 0 }; 974 969 unsigned long dst_pfns[64] = { 0 }; 975 970 struct dmirror_bounce bounce; 976 - struct migrate_vma args; 971 + struct migrate_vma args = { 0 }; 977 972 unsigned long next; 978 973 int ret; 979 974 ··· 1223 1218 return ret; 1224 1219 } 1225 1220 1221 + static void dmirror_device_evict_chunk(struct dmirror_chunk *chunk) 1222 + { 1223 + unsigned long start_pfn = chunk->pagemap.range.start >> PAGE_SHIFT; 1224 + unsigned long end_pfn = chunk->pagemap.range.end >> PAGE_SHIFT; 1225 + unsigned long npages = end_pfn - start_pfn + 1; 1226 + unsigned long i; 1227 + unsigned long *src_pfns; 1228 + unsigned long *dst_pfns; 1229 + 1230 + src_pfns = kcalloc(npages, sizeof(*src_pfns), GFP_KERNEL); 1231 + dst_pfns = kcalloc(npages, sizeof(*dst_pfns), GFP_KERNEL); 1232 + 1233 + migrate_device_range(src_pfns, start_pfn, npages); 1234 + for (i = 0; i < npages; i++) { 1235 + struct page *dpage, *spage; 1236 + 1237 + spage = migrate_pfn_to_page(src_pfns[i]); 1238 + if (!spage || !(src_pfns[i] & MIGRATE_PFN_MIGRATE)) 1239 + continue; 1240 + 1241 + if (WARN_ON(!is_device_private_page(spage) && 1242 + !is_device_coherent_page(spage))) 1243 + continue; 1244 + spage = BACKING_PAGE(spage); 1245 + dpage = alloc_page(GFP_HIGHUSER_MOVABLE | __GFP_NOFAIL); 1246 + lock_page(dpage); 1247 + copy_highpage(dpage, spage); 1248 + dst_pfns[i] = migrate_pfn(page_to_pfn(dpage)); 1249 + if (src_pfns[i] & MIGRATE_PFN_WRITE) 1250 + dst_pfns[i] |= MIGRATE_PFN_WRITE; 1251 + } 1252 + migrate_device_pages(src_pfns, dst_pfns, npages); 1253 + migrate_device_finalize(src_pfns, dst_pfns, npages); 1254 + kfree(src_pfns); 1255 + kfree(dst_pfns); 1256 + } 1257 + 1258 + /* Removes free pages from the free list so they can't be re-allocated */ 1259 + static void dmirror_remove_free_pages(struct dmirror_chunk *devmem) 1260 + { 1261 + struct dmirror_device *mdevice = devmem->mdevice; 1262 + struct page *page; 1263 + 1264 + for (page = mdevice->free_pages; page; page = page->zone_device_data) 1265 + if (dmirror_page_to_chunk(page) == devmem) 1266 + mdevice->free_pages = page->zone_device_data; 1267 + } 1268 + 1269 + static void dmirror_device_remove_chunks(struct dmirror_device *mdevice) 1270 + { 1271 + unsigned int i; 1272 + 1273 + mutex_lock(&mdevice->devmem_lock); 1274 + if (mdevice->devmem_chunks) { 1275 + for (i = 0; i < mdevice->devmem_count; i++) { 1276 + struct dmirror_chunk *devmem = 1277 + mdevice->devmem_chunks[i]; 1278 + 1279 + spin_lock(&mdevice->lock); 1280 + devmem->remove = true; 1281 + dmirror_remove_free_pages(devmem); 1282 + spin_unlock(&mdevice->lock); 1283 + 1284 + dmirror_device_evict_chunk(devmem); 1285 + memunmap_pages(&devmem->pagemap); 1286 + if (devmem->pagemap.type == MEMORY_DEVICE_PRIVATE) 1287 + release_mem_region(devmem->pagemap.range.start, 1288 + range_len(&devmem->pagemap.range)); 1289 + kfree(devmem); 1290 + } 1291 + mdevice->devmem_count = 0; 1292 + mdevice->devmem_capacity = 0; 1293 + mdevice->free_pages = NULL; 1294 + kfree(mdevice->devmem_chunks); 1295 + mdevice->devmem_chunks = NULL; 1296 + } 1297 + mutex_unlock(&mdevice->devmem_lock); 1298 + } 1299 + 1226 1300 static long dmirror_fops_unlocked_ioctl(struct file *filp, 1227 1301 unsigned int command, 1228 1302 unsigned long arg) ··· 1354 1270 1355 1271 case HMM_DMIRROR_SNAPSHOT: 1356 1272 ret = dmirror_snapshot(dmirror, &cmd); 1273 + break; 1274 + 1275 + case HMM_DMIRROR_RELEASE: 1276 + dmirror_device_remove_chunks(dmirror->mdevice); 1277 + ret = 0; 1357 1278 break; 1358 1279 1359 1280 default: ··· 1415 1326 1416 1327 mdevice = dmirror_page_to_device(page); 1417 1328 spin_lock(&mdevice->lock); 1418 - mdevice->cfree++; 1419 - page->zone_device_data = mdevice->free_pages; 1420 - mdevice->free_pages = page; 1329 + 1330 + /* Return page to our allocator if not freeing the chunk */ 1331 + if (!dmirror_page_to_chunk(page)->remove) { 1332 + mdevice->cfree++; 1333 + page->zone_device_data = mdevice->free_pages; 1334 + mdevice->free_pages = page; 1335 + } 1421 1336 spin_unlock(&mdevice->lock); 1422 1337 } 1423 1338 1424 1339 static vm_fault_t dmirror_devmem_fault(struct vm_fault *vmf) 1425 1340 { 1426 - struct migrate_vma args; 1341 + struct migrate_vma args = { 0 }; 1427 1342 unsigned long src_pfns = 0; 1428 1343 unsigned long dst_pfns = 0; 1429 1344 struct page *rpage; ··· 1450 1357 args.dst = &dst_pfns; 1451 1358 args.pgmap_owner = dmirror->mdevice; 1452 1359 args.flags = dmirror_select_device(dmirror); 1360 + args.fault_page = vmf->page; 1453 1361 1454 1362 if (migrate_vma_setup(&args)) 1455 1363 return VM_FAULT_SIGBUS; ··· 1501 1407 1502 1408 static void dmirror_device_remove(struct dmirror_device *mdevice) 1503 1409 { 1504 - unsigned int i; 1505 - 1506 - if (mdevice->devmem_chunks) { 1507 - for (i = 0; i < mdevice->devmem_count; i++) { 1508 - struct dmirror_chunk *devmem = 1509 - mdevice->devmem_chunks[i]; 1510 - 1511 - memunmap_pages(&devmem->pagemap); 1512 - if (devmem->pagemap.type == MEMORY_DEVICE_PRIVATE) 1513 - release_mem_region(devmem->pagemap.range.start, 1514 - range_len(&devmem->pagemap.range)); 1515 - kfree(devmem); 1516 - } 1517 - kfree(mdevice->devmem_chunks); 1518 - } 1519 - 1410 + dmirror_device_remove_chunks(mdevice); 1520 1411 cdev_device_del(&mdevice->cdevice, &mdevice->device); 1521 1412 } 1522 1413
+1
lib/test_hmm_uapi.h
··· 36 36 #define HMM_DMIRROR_SNAPSHOT _IOWR('H', 0x04, struct hmm_dmirror_cmd) 37 37 #define HMM_DMIRROR_EXCLUSIVE _IOWR('H', 0x05, struct hmm_dmirror_cmd) 38 38 #define HMM_DMIRROR_CHECK_EXCLUSIVE _IOWR('H', 0x06, struct hmm_dmirror_cmd) 39 + #define HMM_DMIRROR_RELEASE _IOWR('H', 0x07, struct hmm_dmirror_cmd) 39 40 40 41 /* 41 42 * Values returned in hmm_dmirror_cmd.ptr for HMM_DMIRROR_SNAPSHOT.
+21
lib/test_meminit.c
··· 67 67 size_t size = PAGE_SIZE << order; 68 68 69 69 page = alloc_pages(GFP_KERNEL, order); 70 + if (!page) 71 + goto err; 70 72 buf = page_address(page); 71 73 fill_with_garbage(buf, size); 72 74 __free_pages(page, order); 73 75 74 76 page = alloc_pages(GFP_KERNEL, order); 77 + if (!page) 78 + goto err; 75 79 buf = page_address(page); 76 80 if (count_nonzero_bytes(buf, size)) 77 81 (*total_failures)++; 78 82 fill_with_garbage(buf, size); 79 83 __free_pages(page, order); 84 + return 1; 85 + err: 86 + (*total_failures)++; 80 87 return 1; 81 88 } 82 89 ··· 107 100 void *buf; 108 101 109 102 buf = kmalloc(size, GFP_KERNEL); 103 + if (!buf) 104 + goto err; 110 105 fill_with_garbage(buf, size); 111 106 kfree(buf); 112 107 113 108 buf = kmalloc(size, GFP_KERNEL); 109 + if (!buf) 110 + goto err; 114 111 if (count_nonzero_bytes(buf, size)) 115 112 (*total_failures)++; 116 113 fill_with_garbage(buf, size); 117 114 kfree(buf); 115 + return 1; 116 + err: 117 + (*total_failures)++; 118 118 return 1; 119 119 } 120 120 ··· 131 117 void *buf; 132 118 133 119 buf = vmalloc(size); 120 + if (!buf) 121 + goto err; 134 122 fill_with_garbage(buf, size); 135 123 vfree(buf); 136 124 137 125 buf = vmalloc(size); 126 + if (!buf) 127 + goto err; 138 128 if (count_nonzero_bytes(buf, size)) 139 129 (*total_failures)++; 140 130 fill_with_garbage(buf, size); 141 131 vfree(buf); 132 + return 1; 133 + err: 134 + (*total_failures)++; 142 135 return 1; 143 136 } 144 137
-1
mm/compaction.c
··· 1847 1847 pfn = cc->zone->zone_start_pfn; 1848 1848 cc->fast_search_fail = 0; 1849 1849 found_block = true; 1850 - set_pageblock_skip(freepage); 1851 1850 break; 1852 1851 } 1853 1852 }
+10 -16
mm/damon/core.c
··· 491 491 492 492 damon_for_each_target(t, ctx) { 493 493 damon_for_each_region(r, t) 494 - sz += r->ar.end - r->ar.start; 494 + sz += damon_sz_region(r); 495 495 } 496 496 497 497 if (ctx->attrs.min_nr_regions) ··· 674 674 { 675 675 unsigned long sz; 676 676 677 - sz = r->ar.end - r->ar.start; 677 + sz = damon_sz_region(r); 678 678 return s->pattern.min_sz_region <= sz && 679 679 sz <= s->pattern.max_sz_region && 680 680 s->pattern.min_nr_accesses <= r->nr_accesses && ··· 702 702 703 703 damon_for_each_scheme(s, c) { 704 704 struct damos_quota *quota = &s->quota; 705 - unsigned long sz = r->ar.end - r->ar.start; 705 + unsigned long sz = damon_sz_region(r); 706 706 struct timespec64 begin, end; 707 707 unsigned long sz_applied = 0; 708 708 ··· 731 731 sz = ALIGN_DOWN(quota->charge_addr_from - 732 732 r->ar.start, DAMON_MIN_REGION); 733 733 if (!sz) { 734 - if (r->ar.end - r->ar.start <= 735 - DAMON_MIN_REGION) 734 + if (damon_sz_region(r) <= 735 + DAMON_MIN_REGION) 736 736 continue; 737 737 sz = DAMON_MIN_REGION; 738 738 } 739 739 damon_split_region_at(t, r, sz); 740 740 r = damon_next_region(r); 741 - sz = r->ar.end - r->ar.start; 741 + sz = damon_sz_region(r); 742 742 } 743 743 quota->charge_target_from = NULL; 744 744 quota->charge_addr_from = 0; ··· 843 843 continue; 844 844 score = c->ops.get_scheme_score( 845 845 c, t, r, s); 846 - quota->histogram[score] += 847 - r->ar.end - r->ar.start; 846 + quota->histogram[score] += damon_sz_region(r); 848 847 if (score > max_score) 849 848 max_score = score; 850 849 } ··· 864 865 } 865 866 } 866 867 867 - static inline unsigned long sz_damon_region(struct damon_region *r) 868 - { 869 - return r->ar.end - r->ar.start; 870 - } 871 - 872 868 /* 873 869 * Merge two adjacent regions into one region 874 870 */ 875 871 static void damon_merge_two_regions(struct damon_target *t, 876 872 struct damon_region *l, struct damon_region *r) 877 873 { 878 - unsigned long sz_l = sz_damon_region(l), sz_r = sz_damon_region(r); 874 + unsigned long sz_l = damon_sz_region(l), sz_r = damon_sz_region(r); 879 875 880 876 l->nr_accesses = (l->nr_accesses * sz_l + r->nr_accesses * sz_r) / 881 877 (sz_l + sz_r); ··· 899 905 900 906 if (prev && prev->ar.end == r->ar.start && 901 907 abs(prev->nr_accesses - r->nr_accesses) <= thres && 902 - sz_damon_region(prev) + sz_damon_region(r) <= sz_limit) 908 + damon_sz_region(prev) + damon_sz_region(r) <= sz_limit) 903 909 damon_merge_two_regions(t, prev, r); 904 910 else 905 911 prev = r; ··· 957 963 int i; 958 964 959 965 damon_for_each_region_safe(r, next, t) { 960 - sz_region = r->ar.end - r->ar.start; 966 + sz_region = damon_sz_region(r); 961 967 962 968 for (i = 0; i < nr_subs - 1 && 963 969 sz_region > 2 * DAMON_MIN_REGION; i++) {
+2 -2
mm/damon/vaddr.c
··· 72 72 return -EINVAL; 73 73 74 74 orig_end = r->ar.end; 75 - sz_orig = r->ar.end - r->ar.start; 75 + sz_orig = damon_sz_region(r); 76 76 sz_piece = ALIGN_DOWN(sz_orig / nr_pieces, DAMON_MIN_REGION); 77 77 78 78 if (!sz_piece) ··· 618 618 { 619 619 struct mm_struct *mm; 620 620 unsigned long start = PAGE_ALIGN(r->ar.start); 621 - unsigned long len = PAGE_ALIGN(r->ar.end - r->ar.start); 621 + unsigned long len = PAGE_ALIGN(damon_sz_region(r)); 622 622 unsigned long applied; 623 623 624 624 mm = damon_get_mm(target);
+31 -12
mm/highmem.c
··· 30 30 #include <asm/tlbflush.h> 31 31 #include <linux/vmalloc.h> 32 32 33 + #ifdef CONFIG_KMAP_LOCAL 34 + static inline int kmap_local_calc_idx(int idx) 35 + { 36 + return idx + KM_MAX_IDX * smp_processor_id(); 37 + } 38 + 39 + #ifndef arch_kmap_local_map_idx 40 + #define arch_kmap_local_map_idx(idx, pfn) kmap_local_calc_idx(idx) 41 + #endif 42 + #endif /* CONFIG_KMAP_LOCAL */ 43 + 33 44 /* 34 45 * Virtual_count is not a pure "count". 35 46 * 0 means that it is not mapped, and has not been mapped ··· 153 142 154 143 struct page *__kmap_to_page(void *vaddr) 155 144 { 145 + unsigned long base = (unsigned long) vaddr & PAGE_MASK; 146 + struct kmap_ctrl *kctrl = &current->kmap_ctrl; 156 147 unsigned long addr = (unsigned long)vaddr; 148 + int i; 157 149 158 - if (addr >= PKMAP_ADDR(0) && addr < PKMAP_ADDR(LAST_PKMAP)) { 159 - int i = PKMAP_NR(addr); 150 + /* kmap() mappings */ 151 + if (WARN_ON_ONCE(addr >= PKMAP_ADDR(0) && 152 + addr < PKMAP_ADDR(LAST_PKMAP))) 153 + return pte_page(pkmap_page_table[PKMAP_NR(addr)]); 160 154 161 - return pte_page(pkmap_page_table[i]); 155 + /* kmap_local_page() mappings */ 156 + if (WARN_ON_ONCE(base >= __fix_to_virt(FIX_KMAP_END) && 157 + base < __fix_to_virt(FIX_KMAP_BEGIN))) { 158 + for (i = 0; i < kctrl->idx; i++) { 159 + unsigned long base_addr; 160 + int idx; 161 + 162 + idx = arch_kmap_local_map_idx(i, pte_pfn(pteval)); 163 + base_addr = __fix_to_virt(FIX_KMAP_BEGIN + idx); 164 + 165 + if (base_addr == base) 166 + return pte_page(kctrl->pteval[i]); 167 + } 162 168 } 163 169 164 170 return virt_to_page(vaddr); ··· 490 462 # define arch_kmap_local_post_unmap(vaddr) do { } while (0) 491 463 #endif 492 464 493 - #ifndef arch_kmap_local_map_idx 494 - #define arch_kmap_local_map_idx(idx, pfn) kmap_local_calc_idx(idx) 495 - #endif 496 - 497 465 #ifndef arch_kmap_local_unmap_idx 498 466 #define arch_kmap_local_unmap_idx(idx, vaddr) kmap_local_calc_idx(idx) 499 467 #endif ··· 516 492 } 517 493 #endif 518 494 return false; 519 - } 520 - 521 - static inline int kmap_local_calc_idx(int idx) 522 - { 523 - return idx + KM_MAX_IDX * smp_processor_id(); 524 495 } 525 496 526 497 static pte_t *__kmap_pte;
+60 -12
mm/hugetlb.c
··· 5096 5096 * unmapped and its refcount is dropped, so just clear pte here. 5097 5097 */ 5098 5098 if (unlikely(!pte_present(pte))) { 5099 + #ifdef CONFIG_PTE_MARKER_UFFD_WP 5099 5100 /* 5100 5101 * If the pte was wr-protected by uffd-wp in any of the 5101 5102 * swap forms, meanwhile the caller does not want to ··· 5108 5107 set_huge_pte_at(mm, address, ptep, 5109 5108 make_pte_marker(PTE_MARKER_UFFD_WP)); 5110 5109 else 5110 + #endif 5111 5111 huge_pte_clear(mm, address, ptep, sz); 5112 5112 spin_unlock(ptl); 5113 5113 continue; ··· 5137 5135 tlb_remove_huge_tlb_entry(h, tlb, ptep, address); 5138 5136 if (huge_pte_dirty(pte)) 5139 5137 set_page_dirty(page); 5138 + #ifdef CONFIG_PTE_MARKER_UFFD_WP 5140 5139 /* Leave a uffd-wp pte marker if needed */ 5141 5140 if (huge_pte_uffd_wp(pte) && 5142 5141 !(zap_flags & ZAP_FLAG_DROP_MARKER)) 5143 5142 set_huge_pte_at(mm, address, ptep, 5144 5143 make_pte_marker(PTE_MARKER_UFFD_WP)); 5144 + #endif 5145 5145 hugetlb_count_sub(pages_per_huge_page(h), mm); 5146 5146 page_remove_rmap(page, vma, true); 5147 5147 ··· 5535 5531 return handle_userfault(&vmf, reason); 5536 5532 } 5537 5533 5534 + /* 5535 + * Recheck pte with pgtable lock. Returns true if pte didn't change, or 5536 + * false if pte changed or is changing. 5537 + */ 5538 + static bool hugetlb_pte_stable(struct hstate *h, struct mm_struct *mm, 5539 + pte_t *ptep, pte_t old_pte) 5540 + { 5541 + spinlock_t *ptl; 5542 + bool same; 5543 + 5544 + ptl = huge_pte_lock(h, mm, ptep); 5545 + same = pte_same(huge_ptep_get(ptep), old_pte); 5546 + spin_unlock(ptl); 5547 + 5548 + return same; 5549 + } 5550 + 5538 5551 static vm_fault_t hugetlb_no_page(struct mm_struct *mm, 5539 5552 struct vm_area_struct *vma, 5540 5553 struct address_space *mapping, pgoff_t idx, ··· 5592 5571 if (idx >= size) 5593 5572 goto out; 5594 5573 /* Check for page in userfault range */ 5595 - if (userfaultfd_missing(vma)) 5596 - return hugetlb_handle_userfault(vma, mapping, idx, 5597 - flags, haddr, address, 5598 - VM_UFFD_MISSING); 5574 + if (userfaultfd_missing(vma)) { 5575 + /* 5576 + * Since hugetlb_no_page() was examining pte 5577 + * without pgtable lock, we need to re-test under 5578 + * lock because the pte may not be stable and could 5579 + * have changed from under us. Try to detect 5580 + * either changed or during-changing ptes and retry 5581 + * properly when needed. 5582 + * 5583 + * Note that userfaultfd is actually fine with 5584 + * false positives (e.g. caused by pte changed), 5585 + * but not wrong logical events (e.g. caused by 5586 + * reading a pte during changing). The latter can 5587 + * confuse the userspace, so the strictness is very 5588 + * much preferred. E.g., MISSING event should 5589 + * never happen on the page after UFFDIO_COPY has 5590 + * correctly installed the page and returned. 5591 + */ 5592 + if (!hugetlb_pte_stable(h, mm, ptep, old_pte)) { 5593 + ret = 0; 5594 + goto out; 5595 + } 5596 + 5597 + return hugetlb_handle_userfault(vma, mapping, idx, flags, 5598 + haddr, address, 5599 + VM_UFFD_MISSING); 5600 + } 5599 5601 5600 5602 page = alloc_huge_page(vma, haddr, 0); 5601 5603 if (IS_ERR(page)) { ··· 5634 5590 * here. Before returning error, get ptl and make 5635 5591 * sure there really is no pte entry. 5636 5592 */ 5637 - ptl = huge_pte_lock(h, mm, ptep); 5638 - ret = 0; 5639 - if (huge_pte_none(huge_ptep_get(ptep))) 5593 + if (hugetlb_pte_stable(h, mm, ptep, old_pte)) 5640 5594 ret = vmf_error(PTR_ERR(page)); 5641 - spin_unlock(ptl); 5595 + else 5596 + ret = 0; 5642 5597 goto out; 5643 5598 } 5644 5599 clear_huge_page(page, address, pages_per_huge_page(h)); ··· 5683 5640 if (userfaultfd_minor(vma)) { 5684 5641 unlock_page(page); 5685 5642 put_page(page); 5686 - return hugetlb_handle_userfault(vma, mapping, idx, 5687 - flags, haddr, address, 5688 - VM_UFFD_MINOR); 5643 + /* See comment in userfaultfd_missing() block above */ 5644 + if (!hugetlb_pte_stable(h, mm, ptep, old_pte)) { 5645 + ret = 0; 5646 + goto out; 5647 + } 5648 + return hugetlb_handle_userfault(vma, mapping, idx, flags, 5649 + haddr, address, 5650 + VM_UFFD_MINOR); 5689 5651 } 5690 5652 } 5691 5653 ··· 6852 6804 kfree(vma_lock); 6853 6805 } 6854 6806 6855 - void __hugetlb_vma_unlock_write_put(struct hugetlb_vma_lock *vma_lock) 6807 + static void __hugetlb_vma_unlock_write_put(struct hugetlb_vma_lock *vma_lock) 6856 6808 { 6857 6809 struct vm_area_struct *vma = vma_lock->vma; 6858 6810
+8 -1
mm/kasan/kasan_test.c
··· 295 295 ptr2 = krealloc(ptr1, size2, GFP_KERNEL); 296 296 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2); 297 297 298 + /* Suppress -Warray-bounds warnings. */ 299 + OPTIMIZER_HIDE_VAR(ptr2); 300 + 298 301 /* All offsets up to size2 must be accessible. */ 299 302 ptr2[size1 - 1] = 'x'; 300 303 ptr2[size1] = 'x'; ··· 329 326 330 327 ptr2 = krealloc(ptr1, size2, GFP_KERNEL); 331 328 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2); 329 + 330 + /* Suppress -Warray-bounds warnings. */ 331 + OPTIMIZER_HIDE_VAR(ptr2); 332 332 333 333 /* Must be accessible for all modes. */ 334 334 ptr2[size2 - 1] = 'x'; ··· 546 540 { 547 541 char *ptr; 548 542 size_t size = 64; 549 - volatile size_t invalid_size = size; 543 + size_t invalid_size = size; 550 544 551 545 ptr = kmalloc(size, GFP_KERNEL); 552 546 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); 553 547 554 548 memset((char *)ptr, 0, 64); 555 549 OPTIMIZER_HIDE_VAR(ptr); 550 + OPTIMIZER_HIDE_VAR(invalid_size); 556 551 KUNIT_EXPECT_KASAN_FAIL(test, 557 552 memmove((char *)ptr, (char *)ptr + 4, invalid_size)); 558 553 kfree(ptr);
+18 -2
mm/memory.c
··· 1393 1393 unsigned long addr, pte_t *pte, 1394 1394 struct zap_details *details, pte_t pteval) 1395 1395 { 1396 + #ifdef CONFIG_PTE_MARKER_UFFD_WP 1396 1397 if (zap_drop_file_uffd_wp(details)) 1397 1398 return; 1398 1399 1399 1400 pte_install_uffd_wp_if_needed(vma, addr, pte, pteval); 1401 + #endif 1400 1402 } 1401 1403 1402 1404 static unsigned long zap_pte_range(struct mmu_gather *tlb, ··· 3750 3748 ret = remove_device_exclusive_entry(vmf); 3751 3749 } else if (is_device_private_entry(entry)) { 3752 3750 vmf->page = pfn_swap_entry_to_page(entry); 3753 - ret = vmf->page->pgmap->ops->migrate_to_ram(vmf); 3751 + vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, 3752 + vmf->address, &vmf->ptl); 3753 + if (unlikely(!pte_same(*vmf->pte, vmf->orig_pte))) { 3754 + spin_unlock(vmf->ptl); 3755 + goto out; 3756 + } 3757 + 3758 + /* 3759 + * Get a page reference while we know the page can't be 3760 + * freed. 3761 + */ 3762 + get_page(vmf->page); 3763 + pte_unmap_unlock(vmf->pte, vmf->ptl); 3764 + vmf->page->pgmap->ops->migrate_to_ram(vmf); 3765 + put_page(vmf->page); 3754 3766 } else if (is_hwpoison_entry(entry)) { 3755 3767 ret = VM_FAULT_HWPOISON; 3756 3768 } else if (is_swapin_error_entry(entry)) { ··· 4134 4118 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, 4135 4119 &vmf->ptl); 4136 4120 if (!pte_none(*vmf->pte)) { 4137 - update_mmu_cache(vma, vmf->address, vmf->pte); 4121 + update_mmu_tlb(vma, vmf->address, vmf->pte); 4138 4122 goto release; 4139 4123 } 4140 4124
+29 -7
mm/memremap.c
··· 138 138 int i; 139 139 140 140 percpu_ref_kill(&pgmap->ref); 141 - for (i = 0; i < pgmap->nr_range; i++) 142 - percpu_ref_put_many(&pgmap->ref, pfn_len(pgmap, i)); 141 + if (pgmap->type != MEMORY_DEVICE_PRIVATE && 142 + pgmap->type != MEMORY_DEVICE_COHERENT) 143 + for (i = 0; i < pgmap->nr_range; i++) 144 + percpu_ref_put_many(&pgmap->ref, pfn_len(pgmap, i)); 145 + 143 146 wait_for_completion(&pgmap->done); 144 147 145 148 for (i = 0; i < pgmap->nr_range; i++) ··· 267 264 memmap_init_zone_device(&NODE_DATA(nid)->node_zones[ZONE_DEVICE], 268 265 PHYS_PFN(range->start), 269 266 PHYS_PFN(range_len(range)), pgmap); 270 - percpu_ref_get_many(&pgmap->ref, pfn_len(pgmap, range_id)); 267 + if (pgmap->type != MEMORY_DEVICE_PRIVATE && 268 + pgmap->type != MEMORY_DEVICE_COHERENT) 269 + percpu_ref_get_many(&pgmap->ref, pfn_len(pgmap, range_id)); 271 270 return 0; 272 271 273 272 err_add_memory: ··· 507 502 page->mapping = NULL; 508 503 page->pgmap->ops->page_free(page); 509 504 510 - /* 511 - * Reset the page count to 1 to prepare for handing out the page again. 512 - */ 513 - set_page_count(page, 1); 505 + if (page->pgmap->type != MEMORY_DEVICE_PRIVATE && 506 + page->pgmap->type != MEMORY_DEVICE_COHERENT) 507 + /* 508 + * Reset the page count to 1 to prepare for handing out the page 509 + * again. 510 + */ 511 + set_page_count(page, 1); 512 + else 513 + put_dev_pagemap(page->pgmap); 514 514 } 515 + 516 + void zone_device_page_init(struct page *page) 517 + { 518 + /* 519 + * Drivers shouldn't be allocating pages after calling 520 + * memunmap_pages(). 521 + */ 522 + WARN_ON_ONCE(!percpu_ref_tryget_live(&page->pgmap->ref)); 523 + set_page_count(page, 1); 524 + lock_page(page); 525 + } 526 + EXPORT_SYMBOL_GPL(zone_device_page_init); 515 527 516 528 #ifdef CONFIG_FS_DAX 517 529 bool __put_devmap_managed_page_refs(struct page *page, int refs)
+20 -14
mm/migrate.c
··· 625 625 * Migration functions 626 626 ***********************************************************/ 627 627 628 + int migrate_folio_extra(struct address_space *mapping, struct folio *dst, 629 + struct folio *src, enum migrate_mode mode, int extra_count) 630 + { 631 + int rc; 632 + 633 + BUG_ON(folio_test_writeback(src)); /* Writeback must be complete */ 634 + 635 + rc = folio_migrate_mapping(mapping, dst, src, extra_count); 636 + 637 + if (rc != MIGRATEPAGE_SUCCESS) 638 + return rc; 639 + 640 + if (mode != MIGRATE_SYNC_NO_COPY) 641 + folio_migrate_copy(dst, src); 642 + else 643 + folio_migrate_flags(dst, src); 644 + return MIGRATEPAGE_SUCCESS; 645 + } 646 + 628 647 /** 629 648 * migrate_folio() - Simple folio migration. 630 649 * @mapping: The address_space containing the folio. ··· 659 640 int migrate_folio(struct address_space *mapping, struct folio *dst, 660 641 struct folio *src, enum migrate_mode mode) 661 642 { 662 - int rc; 663 - 664 - BUG_ON(folio_test_writeback(src)); /* Writeback must be complete */ 665 - 666 - rc = folio_migrate_mapping(mapping, dst, src, 0); 667 - 668 - if (rc != MIGRATEPAGE_SUCCESS) 669 - return rc; 670 - 671 - if (mode != MIGRATE_SYNC_NO_COPY) 672 - folio_migrate_copy(dst, src); 673 - else 674 - folio_migrate_flags(dst, src); 675 - return MIGRATEPAGE_SUCCESS; 643 + return migrate_folio_extra(mapping, dst, src, mode, 0); 676 644 } 677 645 EXPORT_SYMBOL(migrate_folio); 678 646
+172 -69
mm/migrate_device.c
··· 325 325 * folio_migrate_mapping(), except that here we allow migration of a 326 326 * ZONE_DEVICE page. 327 327 */ 328 - static bool migrate_vma_check_page(struct page *page) 328 + static bool migrate_vma_check_page(struct page *page, struct page *fault_page) 329 329 { 330 330 /* 331 331 * One extra ref because caller holds an extra reference, either from 332 332 * isolate_lru_page() for a regular page, or migrate_vma_collect() for 333 333 * a device page. 334 334 */ 335 - int extra = 1; 335 + int extra = 1 + (page == fault_page); 336 336 337 337 /* 338 338 * FIXME support THP (transparent huge page), it is bit more complex to ··· 357 357 } 358 358 359 359 /* 360 - * migrate_vma_unmap() - replace page mapping with special migration pte entry 361 - * @migrate: migrate struct containing all migration information 362 - * 363 - * Isolate pages from the LRU and replace mappings (CPU page table pte) with a 364 - * special migration pte entry and check if it has been pinned. Pinned pages are 365 - * restored because we cannot migrate them. 366 - * 367 - * This is the last step before we call the device driver callback to allocate 368 - * destination memory and copy contents of original page over to new page. 360 + * Unmaps pages for migration. Returns number of unmapped pages. 369 361 */ 370 - static void migrate_vma_unmap(struct migrate_vma *migrate) 362 + static unsigned long migrate_device_unmap(unsigned long *src_pfns, 363 + unsigned long npages, 364 + struct page *fault_page) 371 365 { 372 - const unsigned long npages = migrate->npages; 373 366 unsigned long i, restore = 0; 374 367 bool allow_drain = true; 368 + unsigned long unmapped = 0; 375 369 376 370 lru_add_drain(); 377 371 378 372 for (i = 0; i < npages; i++) { 379 - struct page *page = migrate_pfn_to_page(migrate->src[i]); 373 + struct page *page = migrate_pfn_to_page(src_pfns[i]); 380 374 struct folio *folio; 381 375 382 376 if (!page) ··· 385 391 } 386 392 387 393 if (isolate_lru_page(page)) { 388 - migrate->src[i] &= ~MIGRATE_PFN_MIGRATE; 389 - migrate->cpages--; 394 + src_pfns[i] &= ~MIGRATE_PFN_MIGRATE; 390 395 restore++; 391 396 continue; 392 397 } ··· 398 405 if (folio_mapped(folio)) 399 406 try_to_migrate(folio, 0); 400 407 401 - if (page_mapped(page) || !migrate_vma_check_page(page)) { 408 + if (page_mapped(page) || 409 + !migrate_vma_check_page(page, fault_page)) { 402 410 if (!is_zone_device_page(page)) { 403 411 get_page(page); 404 412 putback_lru_page(page); 405 413 } 406 414 407 - migrate->src[i] &= ~MIGRATE_PFN_MIGRATE; 408 - migrate->cpages--; 415 + src_pfns[i] &= ~MIGRATE_PFN_MIGRATE; 409 416 restore++; 410 417 continue; 411 418 } 419 + 420 + unmapped++; 412 421 } 413 422 414 423 for (i = 0; i < npages && restore; i++) { 415 - struct page *page = migrate_pfn_to_page(migrate->src[i]); 424 + struct page *page = migrate_pfn_to_page(src_pfns[i]); 416 425 struct folio *folio; 417 426 418 - if (!page || (migrate->src[i] & MIGRATE_PFN_MIGRATE)) 427 + if (!page || (src_pfns[i] & MIGRATE_PFN_MIGRATE)) 419 428 continue; 420 429 421 430 folio = page_folio(page); 422 431 remove_migration_ptes(folio, folio, false); 423 432 424 - migrate->src[i] = 0; 433 + src_pfns[i] = 0; 425 434 folio_unlock(folio); 426 435 folio_put(folio); 427 436 restore--; 428 437 } 438 + 439 + return unmapped; 440 + } 441 + 442 + /* 443 + * migrate_vma_unmap() - replace page mapping with special migration pte entry 444 + * @migrate: migrate struct containing all migration information 445 + * 446 + * Isolate pages from the LRU and replace mappings (CPU page table pte) with a 447 + * special migration pte entry and check if it has been pinned. Pinned pages are 448 + * restored because we cannot migrate them. 449 + * 450 + * This is the last step before we call the device driver callback to allocate 451 + * destination memory and copy contents of original page over to new page. 452 + */ 453 + static void migrate_vma_unmap(struct migrate_vma *migrate) 454 + { 455 + migrate->cpages = migrate_device_unmap(migrate->src, migrate->npages, 456 + migrate->fault_page); 429 457 } 430 458 431 459 /** ··· 530 516 if (args->end <= args->vma->vm_start || args->end > args->vma->vm_end) 531 517 return -EINVAL; 532 518 if (!args->src || !args->dst) 519 + return -EINVAL; 520 + if (args->fault_page && !is_device_private_page(args->fault_page)) 533 521 return -EINVAL; 534 522 535 523 memset(args->src, 0, sizeof(*args->src) * nr_pages); ··· 693 677 *src &= ~MIGRATE_PFN_MIGRATE; 694 678 } 695 679 696 - /** 697 - * migrate_vma_pages() - migrate meta-data from src page to dst page 698 - * @migrate: migrate struct containing all migration information 699 - * 700 - * This migrates struct page meta-data from source struct page to destination 701 - * struct page. This effectively finishes the migration from source page to the 702 - * destination page. 703 - */ 704 - void migrate_vma_pages(struct migrate_vma *migrate) 680 + static void __migrate_device_pages(unsigned long *src_pfns, 681 + unsigned long *dst_pfns, unsigned long npages, 682 + struct migrate_vma *migrate) 705 683 { 706 - const unsigned long npages = migrate->npages; 707 - const unsigned long start = migrate->start; 708 684 struct mmu_notifier_range range; 709 - unsigned long addr, i; 685 + unsigned long i; 710 686 bool notified = false; 711 687 712 - for (i = 0, addr = start; i < npages; addr += PAGE_SIZE, i++) { 713 - struct page *newpage = migrate_pfn_to_page(migrate->dst[i]); 714 - struct page *page = migrate_pfn_to_page(migrate->src[i]); 688 + for (i = 0; i < npages; i++) { 689 + struct page *newpage = migrate_pfn_to_page(dst_pfns[i]); 690 + struct page *page = migrate_pfn_to_page(src_pfns[i]); 715 691 struct address_space *mapping; 716 692 int r; 717 693 718 694 if (!newpage) { 719 - migrate->src[i] &= ~MIGRATE_PFN_MIGRATE; 695 + src_pfns[i] &= ~MIGRATE_PFN_MIGRATE; 720 696 continue; 721 697 } 722 698 723 699 if (!page) { 700 + unsigned long addr; 701 + 702 + if (!(src_pfns[i] & MIGRATE_PFN_MIGRATE)) 703 + continue; 704 + 724 705 /* 725 706 * The only time there is no vma is when called from 726 707 * migrate_device_coherent_page(). However this isn't 727 708 * called if the page could not be unmapped. 728 709 */ 729 - VM_BUG_ON(!migrate->vma); 730 - if (!(migrate->src[i] & MIGRATE_PFN_MIGRATE)) 731 - continue; 710 + VM_BUG_ON(!migrate); 711 + addr = migrate->start + i*PAGE_SIZE; 732 712 if (!notified) { 733 713 notified = true; 734 714 ··· 735 723 mmu_notifier_invalidate_range_start(&range); 736 724 } 737 725 migrate_vma_insert_page(migrate, addr, newpage, 738 - &migrate->src[i]); 726 + &src_pfns[i]); 739 727 continue; 740 728 } 741 729 ··· 748 736 * device private or coherent memory. 749 737 */ 750 738 if (mapping) { 751 - migrate->src[i] &= ~MIGRATE_PFN_MIGRATE; 739 + src_pfns[i] &= ~MIGRATE_PFN_MIGRATE; 752 740 continue; 753 741 } 754 742 } else if (is_zone_device_page(newpage)) { 755 743 /* 756 744 * Other types of ZONE_DEVICE page are not supported. 757 745 */ 758 - migrate->src[i] &= ~MIGRATE_PFN_MIGRATE; 746 + src_pfns[i] &= ~MIGRATE_PFN_MIGRATE; 759 747 continue; 760 748 } 761 749 762 - r = migrate_folio(mapping, page_folio(newpage), 763 - page_folio(page), MIGRATE_SYNC_NO_COPY); 750 + if (migrate && migrate->fault_page == page) 751 + r = migrate_folio_extra(mapping, page_folio(newpage), 752 + page_folio(page), 753 + MIGRATE_SYNC_NO_COPY, 1); 754 + else 755 + r = migrate_folio(mapping, page_folio(newpage), 756 + page_folio(page), MIGRATE_SYNC_NO_COPY); 764 757 if (r != MIGRATEPAGE_SUCCESS) 765 - migrate->src[i] &= ~MIGRATE_PFN_MIGRATE; 758 + src_pfns[i] &= ~MIGRATE_PFN_MIGRATE; 766 759 } 767 760 768 761 /* ··· 778 761 if (notified) 779 762 mmu_notifier_invalidate_range_only_end(&range); 780 763 } 781 - EXPORT_SYMBOL(migrate_vma_pages); 782 764 783 765 /** 784 - * migrate_vma_finalize() - restore CPU page table entry 766 + * migrate_device_pages() - migrate meta-data from src page to dst page 767 + * @src_pfns: src_pfns returned from migrate_device_range() 768 + * @dst_pfns: array of pfns allocated by the driver to migrate memory to 769 + * @npages: number of pages in the range 770 + * 771 + * Equivalent to migrate_vma_pages(). This is called to migrate struct page 772 + * meta-data from source struct page to destination. 773 + */ 774 + void migrate_device_pages(unsigned long *src_pfns, unsigned long *dst_pfns, 775 + unsigned long npages) 776 + { 777 + __migrate_device_pages(src_pfns, dst_pfns, npages, NULL); 778 + } 779 + EXPORT_SYMBOL(migrate_device_pages); 780 + 781 + /** 782 + * migrate_vma_pages() - migrate meta-data from src page to dst page 785 783 * @migrate: migrate struct containing all migration information 786 784 * 787 - * This replaces the special migration pte entry with either a mapping to the 788 - * new page if migration was successful for that page, or to the original page 789 - * otherwise. 790 - * 791 - * This also unlocks the pages and puts them back on the lru, or drops the extra 792 - * refcount, for device pages. 785 + * This migrates struct page meta-data from source struct page to destination 786 + * struct page. This effectively finishes the migration from source page to the 787 + * destination page. 793 788 */ 794 - void migrate_vma_finalize(struct migrate_vma *migrate) 789 + void migrate_vma_pages(struct migrate_vma *migrate) 795 790 { 796 - const unsigned long npages = migrate->npages; 791 + __migrate_device_pages(migrate->src, migrate->dst, migrate->npages, migrate); 792 + } 793 + EXPORT_SYMBOL(migrate_vma_pages); 794 + 795 + /* 796 + * migrate_device_finalize() - complete page migration 797 + * @src_pfns: src_pfns returned from migrate_device_range() 798 + * @dst_pfns: array of pfns allocated by the driver to migrate memory to 799 + * @npages: number of pages in the range 800 + * 801 + * Completes migration of the page by removing special migration entries. 802 + * Drivers must ensure copying of page data is complete and visible to the CPU 803 + * before calling this. 804 + */ 805 + void migrate_device_finalize(unsigned long *src_pfns, 806 + unsigned long *dst_pfns, unsigned long npages) 807 + { 797 808 unsigned long i; 798 809 799 810 for (i = 0; i < npages; i++) { 800 811 struct folio *dst, *src; 801 - struct page *newpage = migrate_pfn_to_page(migrate->dst[i]); 802 - struct page *page = migrate_pfn_to_page(migrate->src[i]); 812 + struct page *newpage = migrate_pfn_to_page(dst_pfns[i]); 813 + struct page *page = migrate_pfn_to_page(src_pfns[i]); 803 814 804 815 if (!page) { 805 816 if (newpage) { ··· 837 792 continue; 838 793 } 839 794 840 - if (!(migrate->src[i] & MIGRATE_PFN_MIGRATE) || !newpage) { 795 + if (!(src_pfns[i] & MIGRATE_PFN_MIGRATE) || !newpage) { 841 796 if (newpage) { 842 797 unlock_page(newpage); 843 798 put_page(newpage); ··· 864 819 } 865 820 } 866 821 } 822 + EXPORT_SYMBOL(migrate_device_finalize); 823 + 824 + /** 825 + * migrate_vma_finalize() - restore CPU page table entry 826 + * @migrate: migrate struct containing all migration information 827 + * 828 + * This replaces the special migration pte entry with either a mapping to the 829 + * new page if migration was successful for that page, or to the original page 830 + * otherwise. 831 + * 832 + * This also unlocks the pages and puts them back on the lru, or drops the extra 833 + * refcount, for device pages. 834 + */ 835 + void migrate_vma_finalize(struct migrate_vma *migrate) 836 + { 837 + migrate_device_finalize(migrate->src, migrate->dst, migrate->npages); 838 + } 867 839 EXPORT_SYMBOL(migrate_vma_finalize); 840 + 841 + /** 842 + * migrate_device_range() - migrate device private pfns to normal memory. 843 + * @src_pfns: array large enough to hold migrating source device private pfns. 844 + * @start: starting pfn in the range to migrate. 845 + * @npages: number of pages to migrate. 846 + * 847 + * migrate_vma_setup() is similar in concept to migrate_vma_setup() except that 848 + * instead of looking up pages based on virtual address mappings a range of 849 + * device pfns that should be migrated to system memory is used instead. 850 + * 851 + * This is useful when a driver needs to free device memory but doesn't know the 852 + * virtual mappings of every page that may be in device memory. For example this 853 + * is often the case when a driver is being unloaded or unbound from a device. 854 + * 855 + * Like migrate_vma_setup() this function will take a reference and lock any 856 + * migrating pages that aren't free before unmapping them. Drivers may then 857 + * allocate destination pages and start copying data from the device to CPU 858 + * memory before calling migrate_device_pages(). 859 + */ 860 + int migrate_device_range(unsigned long *src_pfns, unsigned long start, 861 + unsigned long npages) 862 + { 863 + unsigned long i, pfn; 864 + 865 + for (pfn = start, i = 0; i < npages; pfn++, i++) { 866 + struct page *page = pfn_to_page(pfn); 867 + 868 + if (!get_page_unless_zero(page)) { 869 + src_pfns[i] = 0; 870 + continue; 871 + } 872 + 873 + if (!trylock_page(page)) { 874 + src_pfns[i] = 0; 875 + put_page(page); 876 + continue; 877 + } 878 + 879 + src_pfns[i] = migrate_pfn(pfn) | MIGRATE_PFN_MIGRATE; 880 + } 881 + 882 + migrate_device_unmap(src_pfns, npages, NULL); 883 + 884 + return 0; 885 + } 886 + EXPORT_SYMBOL(migrate_device_range); 868 887 869 888 /* 870 889 * Migrate a device coherent page back to normal memory. The caller should have ··· 938 829 int migrate_device_coherent_page(struct page *page) 939 830 { 940 831 unsigned long src_pfn, dst_pfn = 0; 941 - struct migrate_vma args; 942 832 struct page *dpage; 943 833 944 834 WARN_ON_ONCE(PageCompound(page)); 945 835 946 836 lock_page(page); 947 837 src_pfn = migrate_pfn(page_to_pfn(page)) | MIGRATE_PFN_MIGRATE; 948 - args.src = &src_pfn; 949 - args.dst = &dst_pfn; 950 - args.cpages = 1; 951 - args.npages = 1; 952 - args.vma = NULL; 953 838 954 839 /* 955 840 * We don't have a VMA and don't need to walk the page tables to find 956 841 * the source page. So call migrate_vma_unmap() directly to unmap the 957 842 * page as migrate_vma_setup() will fail if args.vma == NULL. 958 843 */ 959 - migrate_vma_unmap(&args); 844 + migrate_device_unmap(&src_pfn, 1, NULL); 960 845 if (!(src_pfn & MIGRATE_PFN_MIGRATE)) 961 846 return -EBUSY; 962 847 ··· 960 857 dst_pfn = migrate_pfn(page_to_pfn(dpage)); 961 858 } 962 859 963 - migrate_vma_pages(&args); 860 + migrate_device_pages(&src_pfn, &dst_pfn, 1); 964 861 if (src_pfn & MIGRATE_PFN_MIGRATE) 965 862 copy_highpage(dpage, page); 966 - migrate_vma_finalize(&args); 863 + migrate_device_finalize(&src_pfn, &dst_pfn, 1); 967 864 968 865 if (src_pfn & MIGRATE_PFN_MIGRATE) 969 866 return 0;
+15 -13
mm/mmap.c
··· 2673 2673 if (!arch_validate_flags(vma->vm_flags)) { 2674 2674 error = -EINVAL; 2675 2675 if (file) 2676 - goto unmap_and_free_vma; 2676 + goto close_and_free_vma; 2677 2677 else 2678 2678 goto free_vma; 2679 2679 } ··· 2742 2742 validate_mm(mm); 2743 2743 return addr; 2744 2744 2745 + close_and_free_vma: 2746 + if (vma->vm_ops && vma->vm_ops->close) 2747 + vma->vm_ops->close(vma); 2745 2748 unmap_and_free_vma: 2746 2749 fput(vma->vm_file); 2747 2750 vma->vm_file = NULL; ··· 2945 2942 if (vma && 2946 2943 (!vma->anon_vma || list_is_singular(&vma->anon_vma_chain)) && 2947 2944 ((vma->vm_flags & ~VM_SOFTDIRTY) == flags)) { 2948 - mas->index = vma->vm_start; 2949 - mas->last = addr + len - 1; 2950 - vma_adjust_trans_huge(vma, addr, addr + len, 0); 2945 + mas_set_range(mas, vma->vm_start, addr + len - 1); 2946 + if (mas_preallocate(mas, vma, GFP_KERNEL)) 2947 + return -ENOMEM; 2948 + 2949 + vma_adjust_trans_huge(vma, vma->vm_start, addr + len, 0); 2951 2950 if (vma->anon_vma) { 2952 2951 anon_vma_lock_write(vma->anon_vma); 2953 2952 anon_vma_interval_tree_pre_update_vma(vma); 2954 2953 } 2955 2954 vma->vm_end = addr + len; 2956 2955 vma->vm_flags |= VM_SOFTDIRTY; 2957 - if (mas_store_gfp(mas, vma, GFP_KERNEL)) 2958 - goto mas_expand_failed; 2956 + mas_store_prealloc(mas, vma); 2959 2957 2960 2958 if (vma->anon_vma) { 2961 2959 anon_vma_interval_tree_post_update_vma(vma); ··· 2996 2992 vm_area_free(vma); 2997 2993 vma_alloc_fail: 2998 2994 vm_unacct_memory(len >> PAGE_SHIFT); 2999 - return -ENOMEM; 3000 - 3001 - mas_expand_failed: 3002 - if (vma->anon_vma) { 3003 - anon_vma_interval_tree_post_update_vma(vma); 3004 - anon_vma_unlock_write(vma->anon_vma); 3005 - } 3006 2995 return -ENOMEM; 3007 2996 } 3008 2997 ··· 3237 3240 out_vma_link: 3238 3241 if (new_vma->vm_ops && new_vma->vm_ops->close) 3239 3242 new_vma->vm_ops->close(new_vma); 3243 + 3244 + if (new_vma->vm_file) 3245 + fput(new_vma->vm_file); 3246 + 3247 + unlink_anon_vmas(new_vma); 3240 3248 out_free_mempol: 3241 3249 mpol_put(vma_policy(new_vma)); 3242 3250 out_free_vma:
+10
mm/mmu_gather.c
··· 1 1 #include <linux/gfp.h> 2 2 #include <linux/highmem.h> 3 3 #include <linux/kernel.h> 4 + #include <linux/kmsan-checks.h> 4 5 #include <linux/mmdebug.h> 5 6 #include <linux/mm_types.h> 6 7 #include <linux/mm_inline.h> ··· 266 265 static void __tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, 267 266 bool fullmm) 268 267 { 268 + /* 269 + * struct mmu_gather contains 7 1-bit fields packed into a 32-bit 270 + * unsigned int value. The remaining 25 bits remain uninitialized 271 + * and are never used, but KMSAN updates the origin for them in 272 + * zap_pXX_range() in mm/memory.c, thus creating very long origin 273 + * chains. This is technically correct, but consumes too much memory. 274 + * Unpoisoning the whole structure will prevent creating such chains. 275 + */ 276 + kmsan_unpoison_memory(tlb, sizeof(*tlb)); 269 277 tlb->mm = mm; 270 278 tlb->fullmm = fullmm; 271 279
+2
mm/mprotect.c
··· 267 267 } else { 268 268 /* It must be an none page, or what else?.. */ 269 269 WARN_ON_ONCE(!pte_none(oldpte)); 270 + #ifdef CONFIG_PTE_MARKER_UFFD_WP 270 271 if (unlikely(uffd_wp && !vma_is_anonymous(vma))) { 271 272 /* 272 273 * For file-backed mem, we need to be able to ··· 279 278 make_pte_marker(PTE_MARKER_UFFD_WP)); 280 279 pages++; 281 280 } 281 + #endif 282 282 } 283 283 } while (pte++, addr += PAGE_SIZE, addr != end); 284 284 arch_leave_lazy_mmu_mode();
+10 -2
mm/page_alloc.c
··· 3446 3446 int pindex; 3447 3447 bool free_high; 3448 3448 3449 - __count_vm_event(PGFREE); 3449 + __count_vm_events(PGFREE, 1 << order); 3450 3450 pindex = order_to_pindex(migratetype, order); 3451 3451 list_add(&page->pcp_list, &pcp->lists[pindex]); 3452 3452 pcp->count += 1 << order; ··· 3803 3803 pcp_spin_unlock_irqrestore(pcp, flags); 3804 3804 pcp_trylock_finish(UP_flags); 3805 3805 if (page) { 3806 - __count_zid_vm_events(PGALLOC, page_zonenum(page), 1); 3806 + __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order); 3807 3807 zone_statistics(preferred_zone, zone, 1); 3808 3808 } 3809 3809 return page; ··· 6823 6823 set_pageblock_migratetype(page, MIGRATE_MOVABLE); 6824 6824 cond_resched(); 6825 6825 } 6826 + 6827 + /* 6828 + * ZONE_DEVICE pages are released directly to the driver page allocator 6829 + * which will set the page count to 1 when allocating the page. 6830 + */ 6831 + if (pgmap->type == MEMORY_DEVICE_PRIVATE || 6832 + pgmap->type == MEMORY_DEVICE_COHERENT) 6833 + set_page_count(page, 0); 6826 6834 } 6827 6835 6828 6836 /*
+49
tools/testing/selftests/vm/hmm-tests.c
··· 1054 1054 hmm_buffer_free(buffer); 1055 1055 } 1056 1056 1057 + TEST_F(hmm, migrate_release) 1058 + { 1059 + struct hmm_buffer *buffer; 1060 + unsigned long npages; 1061 + unsigned long size; 1062 + unsigned long i; 1063 + int *ptr; 1064 + int ret; 1065 + 1066 + npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift; 1067 + ASSERT_NE(npages, 0); 1068 + size = npages << self->page_shift; 1069 + 1070 + buffer = malloc(sizeof(*buffer)); 1071 + ASSERT_NE(buffer, NULL); 1072 + 1073 + buffer->fd = -1; 1074 + buffer->size = size; 1075 + buffer->mirror = malloc(size); 1076 + ASSERT_NE(buffer->mirror, NULL); 1077 + 1078 + buffer->ptr = mmap(NULL, size, PROT_READ | PROT_WRITE, 1079 + MAP_PRIVATE | MAP_ANONYMOUS, buffer->fd, 0); 1080 + ASSERT_NE(buffer->ptr, MAP_FAILED); 1081 + 1082 + /* Initialize buffer in system memory. */ 1083 + for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i) 1084 + ptr[i] = i; 1085 + 1086 + /* Migrate memory to device. */ 1087 + ret = hmm_migrate_sys_to_dev(self->fd, buffer, npages); 1088 + ASSERT_EQ(ret, 0); 1089 + ASSERT_EQ(buffer->cpages, npages); 1090 + 1091 + /* Check what the device read. */ 1092 + for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i) 1093 + ASSERT_EQ(ptr[i], i); 1094 + 1095 + /* Release device memory. */ 1096 + ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_RELEASE, buffer, npages); 1097 + ASSERT_EQ(ret, 0); 1098 + 1099 + /* Fault pages back to system memory and check them. */ 1100 + for (i = 0, ptr = buffer->ptr; i < size / (2 * sizeof(*ptr)); ++i) 1101 + ASSERT_EQ(ptr[i], i); 1102 + 1103 + hmm_buffer_free(buffer); 1104 + } 1105 + 1057 1106 /* 1058 1107 * Migrate anonymous shared memory to device private memory. 1059 1108 */
+21 -1
tools/testing/selftests/vm/userfaultfd.c
··· 774 774 continue_range(uffd, msg->arg.pagefault.address, page_size); 775 775 stats->minor_faults++; 776 776 } else { 777 - /* Missing page faults */ 777 + /* 778 + * Missing page faults. 779 + * 780 + * Here we force a write check for each of the missing mode 781 + * faults. It's guaranteed because the only threads that 782 + * will trigger uffd faults are the locking threads, and 783 + * their first instruction to touch the missing page will 784 + * always be pthread_mutex_lock(). 785 + * 786 + * Note that here we relied on an NPTL glibc impl detail to 787 + * always read the lock type at the entry of the lock op 788 + * (pthread_mutex_t.__data.__type, offset 0x10) before 789 + * doing any locking operations to guarantee that. It's 790 + * actually not good to rely on this impl detail because 791 + * logically a pthread-compatible lib can implement the 792 + * locks without types and we can fail when linking with 793 + * them. However since we used to find bugs with this 794 + * strict check we still keep it around. Hopefully this 795 + * could be a good hint when it fails again. If one day 796 + * it'll break on some other impl of glibc we'll revisit. 797 + */ 778 798 if (msg->arg.pagefault.flags & UFFD_PAGEFAULT_FLAG_WRITE) 779 799 err("unexpected write fault"); 780 800