Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

lib/test_hmm: add large page allocation failure testing

Add HMM_DMIRROR_FLAG_FAIL_ALLOC flag to simulate large page allocation
failures, enabling testing of split migration code paths.

This test flag allows validation of the fallback behavior when destination
device cannot allocate compound pages. This is useful for testing the
split migration functionality.

Link: https://lkml.kernel.org/r/20251001065707.920170-13-balbirs@nvidia.com
Signed-off-by: Balbir Singh <balbirs@nvidia.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Zi Yan <ziy@nvidia.com>
Cc: Joshua Hahn <joshua.hahnjy@gmail.com>
Cc: Rakie Kim <rakie.kim@sk.com>
Cc: Byungchul Park <byungchul@sk.com>
Cc: Gregory Price <gourry@gourry.net>
Cc: Ying Huang <ying.huang@linux.alibaba.com>
Cc: Alistair Popple <apopple@nvidia.com>
Cc: Oscar Salvador <osalvador@suse.de>
Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Cc: Baolin Wang <baolin.wang@linux.alibaba.com>
Cc: "Liam R. Howlett" <Liam.Howlett@oracle.com>
Cc: Nico Pache <npache@redhat.com>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Cc: Dev Jain <dev.jain@arm.com>
Cc: Barry Song <baohua@kernel.org>
Cc: Lyude Paul <lyude@redhat.com>
Cc: Danilo Krummrich <dakr@kernel.org>
Cc: David Airlie <airlied@gmail.com>
Cc: Simona Vetter <simona@ffwll.ch>
Cc: Ralph Campbell <rcampbell@nvidia.com>
Cc: Mika Penttilä <mpenttil@redhat.com>
Cc: Matthew Brost <matthew.brost@intel.com>
Cc: Francois Dugast <francois.dugast@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>

authored by

Balbir Singh and committed by
Andrew Morton
aa3ade42 4265d67e

+44 -20
+41 -20
lib/test_hmm.c
··· 92 92 struct xarray pt; 93 93 struct mmu_interval_notifier notifier; 94 94 struct mutex mutex; 95 + __u64 flags; 95 96 }; 96 97 97 98 /* ··· 700 699 page_to_pfn(spage))) 701 700 goto next; 702 701 703 - dpage = dmirror_devmem_alloc_page(dmirror, is_large); 702 + if (dmirror->flags & HMM_DMIRROR_FLAG_FAIL_ALLOC) { 703 + dmirror->flags &= ~HMM_DMIRROR_FLAG_FAIL_ALLOC; 704 + dpage = NULL; 705 + } else 706 + dpage = dmirror_devmem_alloc_page(dmirror, is_large); 707 + 704 708 if (!dpage) { 705 709 struct folio *folio; 706 710 unsigned long i; ··· 965 959 966 960 spage = BACKING_PAGE(spage); 967 961 order = folio_order(page_folio(spage)); 968 - 969 962 if (order) 963 + *dst = MIGRATE_PFN_COMPOUND; 964 + if (*src & MIGRATE_PFN_WRITE) 965 + *dst |= MIGRATE_PFN_WRITE; 966 + 967 + if (dmirror->flags & HMM_DMIRROR_FLAG_FAIL_ALLOC) { 968 + dmirror->flags &= ~HMM_DMIRROR_FLAG_FAIL_ALLOC; 969 + *dst &= ~MIGRATE_PFN_COMPOUND; 970 + dpage = NULL; 971 + } else if (order) { 970 972 dpage = folio_page(vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 971 973 order, args->vma, addr), 0); 972 - else 974 + } else { 973 975 dpage = alloc_page_vma(GFP_HIGHUSER_MOVABLE, args->vma, addr); 974 - 975 - /* Try with smaller pages if large allocation fails */ 976 - if (!dpage && order) { 977 - dpage = alloc_page_vma(GFP_HIGHUSER_MOVABLE, args->vma, addr); 978 - if (!dpage) 979 - return VM_FAULT_OOM; 980 - order = 0; 981 976 } 977 + 978 + if (!dpage && !order) 979 + return VM_FAULT_OOM; 982 980 983 981 pr_debug("migrating from sys to dev pfn src: 0x%lx pfn dst: 0x%lx\n", 984 982 page_to_pfn(spage), page_to_pfn(dpage)); 985 - lock_page(dpage); 986 - xa_erase(&dmirror->pt, addr >> PAGE_SHIFT); 987 - copy_highpage(dpage, spage); 988 - *dst = migrate_pfn(page_to_pfn(dpage)); 989 - if (*src & MIGRATE_PFN_WRITE) 990 - *dst |= MIGRATE_PFN_WRITE; 991 - if (order) 992 - *dst |= MIGRATE_PFN_COMPOUND; 983 + 984 + if (dpage) { 985 + lock_page(dpage); 986 + *dst |= migrate_pfn(page_to_pfn(dpage)); 987 + } 993 988 994 989 for (i = 0; i < (1 << order); i++) { 995 990 struct page *src_page; 996 991 struct page *dst_page; 997 992 993 + /* Try with smaller pages if large allocation fails */ 994 + if (!dpage && order) { 995 + dpage = alloc_page_vma(GFP_HIGHUSER_MOVABLE, args->vma, addr); 996 + lock_page(dpage); 997 + dst[i] = migrate_pfn(page_to_pfn(dpage)); 998 + dst_page = pfn_to_page(page_to_pfn(dpage)); 999 + dpage = NULL; /* For the next iteration */ 1000 + } else { 1001 + dst_page = pfn_to_page(page_to_pfn(dpage) + i); 1002 + } 1003 + 998 1004 src_page = pfn_to_page(page_to_pfn(spage) + i); 999 - dst_page = pfn_to_page(page_to_pfn(dpage) + i); 1000 1005 1001 1006 xa_erase(&dmirror->pt, addr >> PAGE_SHIFT); 1007 + addr += PAGE_SIZE; 1002 1008 copy_highpage(dst_page, src_page); 1003 1009 } 1004 1010 next: 1005 - addr += PAGE_SIZE << order; 1006 1011 src += 1 << order; 1007 1012 dst += 1 << order; 1008 1013 } ··· 1529 1512 1530 1513 case HMM_DMIRROR_RELEASE: 1531 1514 dmirror_device_remove_chunks(dmirror->mdevice); 1515 + ret = 0; 1516 + break; 1517 + case HMM_DMIRROR_FLAGS: 1518 + dmirror->flags = cmd.npages; 1532 1519 ret = 0; 1533 1520 break; 1534 1521
+3
lib/test_hmm_uapi.h
··· 37 37 #define HMM_DMIRROR_EXCLUSIVE _IOWR('H', 0x05, struct hmm_dmirror_cmd) 38 38 #define HMM_DMIRROR_CHECK_EXCLUSIVE _IOWR('H', 0x06, struct hmm_dmirror_cmd) 39 39 #define HMM_DMIRROR_RELEASE _IOWR('H', 0x07, struct hmm_dmirror_cmd) 40 + #define HMM_DMIRROR_FLAGS _IOWR('H', 0x08, struct hmm_dmirror_cmd) 41 + 42 + #define HMM_DMIRROR_FLAG_FAIL_ALLOC (1ULL << 0) 40 43 41 44 /* 42 45 * Values returned in hmm_dmirror_cmd.ptr for HMM_DMIRROR_SNAPSHOT.