Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

selftests: mm: cow: flag vmsplice() hugetlb tests as XFAIL

Patch series "selftests: mm: cow: flag vmsplice() hugetlb tests as XFAIL".

The failing hugetlb vmsplice() COW tests keep confusing people, and having
tests that have been failing for years and likely will keep failing for
years to come because nobody cares enough is rather suboptimal. Let's
mark them as XFAIL and document why fixing them is not that easy as it
would appear at first sight.

More details can be found in [1], especially around how hugetlb pages
cannot really be overcommitted, and why we don't particularly care about
these vmsplice() leaks for hugetlb -- in contrast to ordinary memory.

[1] https://lore.kernel.org/all/8b42a24d-caf0-46ef-9e15-0f88d47d2f21@redhat.com/


This patch (of 2):

The vmsplice() hugetlb tests have been failing right from the start, and
we documented that in the introducing commit 7dad331be781 ("selftests/vm:
anon_cow: hugetlb tests"):

Note that some tests cases still fail. This will, for example, be
fixed once vmsplice properly uses FOLL_PIN instead of FOLL_GET for
pinning. With 2 MiB and 1 GiB hugetlb on x86_64, the expected
failures are:

Until vmsplice() is changed, these tests will likely keep failing: hugetlb
COW reuse logic is harder to change, because using the same COW reuse
logic as we use for !hugetlb could harm other (sane) users when running
out of free hugetlb pages.

More details can be found in [1], especially around how hugetlb pages
cannot really be overcommitted, and why we don't particularly care about
these vmsplice() leaks for hugetlb -- in contrast to ordinary memory.

These (expected) failures keep confusing people, so flag them accordingly.

Before:
$ ./cow
[...]
Bail out! 8 out of 778 tests failed
# Totals: pass:769 fail:8 xfail:0 xpass:0 skip:1 error:0
$ echo $?
1

After:
$ ./cow
[...]
# Totals: pass:769 fail:0 xfail:8 xpass:0 skip:1 error:0
$ echo $?
0

[1] https://lore.kernel.org/all/8b42a24d-caf0-46ef-9e15-0f88d47d2f21@redhat.com/

Link: https://lkml.kernel.org/r/20240502085259.103784-1-david@redhat.com
Link: https://lkml.kernel.org/r/20240502085259.103784-2-david@redhat.com
Signed-off-by: David Hildenbrand <david@redhat.com>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: Peter Xu <peterx@redhat.com>
Cc: Shuah Khan <shuah@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>

authored by

David Hildenbrand and committed by
Andrew Morton
4bf6a4eb 5ee9562c

+71 -35
+71 -35
tools/testing/selftests/mm/cow.c
··· 199 199 typedef int (*child_fn)(char *mem, size_t size, struct comm_pipes *comm_pipes); 200 200 201 201 static void do_test_cow_in_parent(char *mem, size_t size, bool do_mprotect, 202 - child_fn fn) 202 + child_fn fn, bool xfail) 203 203 { 204 204 struct comm_pipes comm_pipes; 205 205 char buf; ··· 247 247 else 248 248 ret = -EINVAL; 249 249 250 - ksft_test_result(!ret, "No leak from parent into child\n"); 250 + if (!ret) { 251 + ksft_test_result_pass("No leak from parent into child\n"); 252 + } else if (xfail) { 253 + /* 254 + * With hugetlb, some vmsplice() tests are currently expected to 255 + * fail because (a) harder to fix and (b) nobody really cares. 256 + * Flag them as expected failure for now. 257 + */ 258 + ksft_test_result_xfail("Leak from parent into child\n"); 259 + } else { 260 + ksft_test_result_fail("Leak from parent into child\n"); 261 + } 251 262 close_comm_pipes: 252 263 close_comm_pipes(&comm_pipes); 253 264 } 254 265 255 - static void test_cow_in_parent(char *mem, size_t size) 266 + static void test_cow_in_parent(char *mem, size_t size, bool is_hugetlb) 256 267 { 257 - do_test_cow_in_parent(mem, size, false, child_memcmp_fn); 268 + do_test_cow_in_parent(mem, size, false, child_memcmp_fn, false); 258 269 } 259 270 260 - static void test_cow_in_parent_mprotect(char *mem, size_t size) 271 + static void test_cow_in_parent_mprotect(char *mem, size_t size, bool is_hugetlb) 261 272 { 262 - do_test_cow_in_parent(mem, size, true, child_memcmp_fn); 273 + do_test_cow_in_parent(mem, size, true, child_memcmp_fn, false); 263 274 } 264 275 265 - static void test_vmsplice_in_child(char *mem, size_t size) 276 + static void test_vmsplice_in_child(char *mem, size_t size, bool is_hugetlb) 266 277 { 267 - do_test_cow_in_parent(mem, size, false, child_vmsplice_memcmp_fn); 278 + do_test_cow_in_parent(mem, size, false, child_vmsplice_memcmp_fn, 279 + is_hugetlb); 268 280 } 269 281 270 - static void test_vmsplice_in_child_mprotect(char *mem, size_t size) 282 + static void test_vmsplice_in_child_mprotect(char *mem, size_t size, 283 + bool is_hugetlb) 271 284 { 272 - do_test_cow_in_parent(mem, size, true, child_vmsplice_memcmp_fn); 285 + do_test_cow_in_parent(mem, size, true, child_vmsplice_memcmp_fn, 286 + is_hugetlb); 273 287 } 274 288 275 289 static void do_test_vmsplice_in_parent(char *mem, size_t size, 276 - bool before_fork) 290 + bool before_fork, bool xfail) 277 291 { 278 292 struct iovec iov = { 279 293 .iov_base = mem, ··· 369 355 } 370 356 } 371 357 372 - ksft_test_result(!memcmp(old, new, transferred), 373 - "No leak from child into parent\n"); 358 + if (!memcmp(old, new, transferred)) { 359 + ksft_test_result_pass("No leak from child into parent\n"); 360 + } else if (xfail) { 361 + /* 362 + * With hugetlb, some vmsplice() tests are currently expected to 363 + * fail because (a) harder to fix and (b) nobody really cares. 364 + * Flag them as expected failure for now. 365 + */ 366 + ksft_test_result_xfail("Leak from child into parent\n"); 367 + } else { 368 + ksft_test_result_fail("Leak from child into parent\n"); 369 + } 374 370 close_pipe: 375 371 close(fds[0]); 376 372 close(fds[1]); ··· 391 367 free(new); 392 368 } 393 369 394 - static void test_vmsplice_before_fork(char *mem, size_t size) 370 + static void test_vmsplice_before_fork(char *mem, size_t size, bool is_hugetlb) 395 371 { 396 - do_test_vmsplice_in_parent(mem, size, true); 372 + do_test_vmsplice_in_parent(mem, size, true, is_hugetlb); 397 373 } 398 374 399 - static void test_vmsplice_after_fork(char *mem, size_t size) 375 + static void test_vmsplice_after_fork(char *mem, size_t size, bool is_hugetlb) 400 376 { 401 - do_test_vmsplice_in_parent(mem, size, false); 377 + do_test_vmsplice_in_parent(mem, size, false, is_hugetlb); 402 378 } 403 379 404 380 #ifdef LOCAL_CONFIG_HAVE_LIBURING ··· 553 529 close_comm_pipes(&comm_pipes); 554 530 } 555 531 556 - static void test_iouring_ro(char *mem, size_t size) 532 + static void test_iouring_ro(char *mem, size_t size, bool is_hugetlb) 557 533 { 558 534 do_test_iouring(mem, size, false); 559 535 } 560 536 561 - static void test_iouring_fork(char *mem, size_t size) 537 + static void test_iouring_fork(char *mem, size_t size, bool is_hugetlb) 562 538 { 563 539 do_test_iouring(mem, size, true); 564 540 } ··· 702 678 free(tmp); 703 679 } 704 680 705 - static void test_ro_pin_on_shared(char *mem, size_t size) 681 + static void test_ro_pin_on_shared(char *mem, size_t size, bool is_hugetlb) 706 682 { 707 683 do_test_ro_pin(mem, size, RO_PIN_TEST_SHARED, false); 708 684 } 709 685 710 - static void test_ro_fast_pin_on_shared(char *mem, size_t size) 686 + static void test_ro_fast_pin_on_shared(char *mem, size_t size, bool is_hugetlb) 711 687 { 712 688 do_test_ro_pin(mem, size, RO_PIN_TEST_SHARED, true); 713 689 } 714 690 715 - static void test_ro_pin_on_ro_previously_shared(char *mem, size_t size) 691 + static void test_ro_pin_on_ro_previously_shared(char *mem, size_t size, 692 + bool is_hugetlb) 716 693 { 717 694 do_test_ro_pin(mem, size, RO_PIN_TEST_PREVIOUSLY_SHARED, false); 718 695 } 719 696 720 - static void test_ro_fast_pin_on_ro_previously_shared(char *mem, size_t size) 697 + static void test_ro_fast_pin_on_ro_previously_shared(char *mem, size_t size, 698 + bool is_hugetlb) 721 699 { 722 700 do_test_ro_pin(mem, size, RO_PIN_TEST_PREVIOUSLY_SHARED, true); 723 701 } 724 702 725 - static void test_ro_pin_on_ro_exclusive(char *mem, size_t size) 703 + static void test_ro_pin_on_ro_exclusive(char *mem, size_t size, 704 + bool is_hugetlb) 726 705 { 727 706 do_test_ro_pin(mem, size, RO_PIN_TEST_RO_EXCLUSIVE, false); 728 707 } 729 708 730 - static void test_ro_fast_pin_on_ro_exclusive(char *mem, size_t size) 709 + static void test_ro_fast_pin_on_ro_exclusive(char *mem, size_t size, 710 + bool is_hugetlb) 731 711 { 732 712 do_test_ro_pin(mem, size, RO_PIN_TEST_RO_EXCLUSIVE, true); 733 713 } 734 714 735 - typedef void (*test_fn)(char *mem, size_t size); 715 + typedef void (*test_fn)(char *mem, size_t size, bool hugetlb); 736 716 737 717 static void do_run_with_base_page(test_fn fn, bool swapout) 738 718 { ··· 768 740 } 769 741 } 770 742 771 - fn(mem, pagesize); 743 + fn(mem, pagesize, false); 772 744 munmap: 773 745 munmap(mem, pagesize); 774 746 } ··· 932 904 break; 933 905 } 934 906 935 - fn(mem, size); 907 + fn(mem, size, false); 936 908 munmap: 937 909 munmap(mmap_mem, mmap_size); 938 910 if (mremap_mem != MAP_FAILED) ··· 1025 997 } 1026 998 munmap(dummy, hugetlbsize); 1027 999 1028 - fn(mem, hugetlbsize); 1000 + fn(mem, hugetlbsize, true); 1029 1001 munmap: 1030 1002 munmap(mem, hugetlbsize); 1031 1003 } ··· 1064 1036 */ 1065 1037 { 1066 1038 "vmsplice() + unmap in child", 1067 - test_vmsplice_in_child 1039 + test_vmsplice_in_child, 1068 1040 }, 1069 1041 /* 1070 1042 * vmsplice() test, but do an additional mprotect(PROT_READ)+ ··· 1072 1044 */ 1073 1045 { 1074 1046 "vmsplice() + unmap in child with mprotect() optimization", 1075 - test_vmsplice_in_child_mprotect 1047 + test_vmsplice_in_child_mprotect, 1076 1048 }, 1077 1049 /* 1078 1050 * vmsplice() [R/O GUP] in parent before fork(), unmap in parent after ··· 1350 1322 close_comm_pipes(&comm_pipes); 1351 1323 } 1352 1324 1353 - static void test_anon_thp_collapse_unshared(char *mem, size_t size) 1325 + static void test_anon_thp_collapse_unshared(char *mem, size_t size, 1326 + bool is_hugetlb) 1354 1327 { 1328 + assert(!is_hugetlb); 1355 1329 do_test_anon_thp_collapse(mem, size, ANON_THP_COLLAPSE_UNSHARED); 1356 1330 } 1357 1331 1358 - static void test_anon_thp_collapse_fully_shared(char *mem, size_t size) 1332 + static void test_anon_thp_collapse_fully_shared(char *mem, size_t size, 1333 + bool is_hugetlb) 1359 1334 { 1335 + assert(!is_hugetlb); 1360 1336 do_test_anon_thp_collapse(mem, size, ANON_THP_COLLAPSE_FULLY_SHARED); 1361 1337 } 1362 1338 1363 - static void test_anon_thp_collapse_lower_shared(char *mem, size_t size) 1339 + static void test_anon_thp_collapse_lower_shared(char *mem, size_t size, 1340 + bool is_hugetlb) 1364 1341 { 1342 + assert(!is_hugetlb); 1365 1343 do_test_anon_thp_collapse(mem, size, ANON_THP_COLLAPSE_LOWER_SHARED); 1366 1344 } 1367 1345 1368 - static void test_anon_thp_collapse_upper_shared(char *mem, size_t size) 1346 + static void test_anon_thp_collapse_upper_shared(char *mem, size_t size, 1347 + bool is_hugetlb) 1369 1348 { 1349 + assert(!is_hugetlb); 1370 1350 do_test_anon_thp_collapse(mem, size, ANON_THP_COLLAPSE_UPPER_SHARED); 1371 1351 } 1372 1352