at v5.0-rc7 1350 lines 35 kB view raw
1/* 2 * Stress userfaultfd syscall. 3 * 4 * Copyright (C) 2015 Red Hat, Inc. 5 * 6 * This work is licensed under the terms of the GNU GPL, version 2. See 7 * the COPYING file in the top-level directory. 8 * 9 * This test allocates two virtual areas and bounces the physical 10 * memory across the two virtual areas (from area_src to area_dst) 11 * using userfaultfd. 12 * 13 * There are three threads running per CPU: 14 * 15 * 1) one per-CPU thread takes a per-page pthread_mutex in a random 16 * page of the area_dst (while the physical page may still be in 17 * area_src), and increments a per-page counter in the same page, 18 * and checks its value against a verification region. 19 * 20 * 2) another per-CPU thread handles the userfaults generated by 21 * thread 1 above. userfaultfd blocking reads or poll() modes are 22 * exercised interleaved. 23 * 24 * 3) one last per-CPU thread transfers the memory in the background 25 * at maximum bandwidth (if not already transferred by thread 26 * 2). Each cpu thread takes cares of transferring a portion of the 27 * area. 28 * 29 * When all threads of type 3 completed the transfer, one bounce is 30 * complete. area_src and area_dst are then swapped. All threads are 31 * respawned and so the bounce is immediately restarted in the 32 * opposite direction. 33 * 34 * per-CPU threads 1 by triggering userfaults inside 35 * pthread_mutex_lock will also verify the atomicity of the memory 36 * transfer (UFFDIO_COPY). 37 */ 38 39#define _GNU_SOURCE 40#include <stdio.h> 41#include <errno.h> 42#include <unistd.h> 43#include <stdlib.h> 44#include <sys/types.h> 45#include <sys/stat.h> 46#include <fcntl.h> 47#include <time.h> 48#include <signal.h> 49#include <poll.h> 50#include <string.h> 51#include <sys/mman.h> 52#include <sys/syscall.h> 53#include <sys/ioctl.h> 54#include <sys/wait.h> 55#include <pthread.h> 56#include <linux/userfaultfd.h> 57#include <setjmp.h> 58#include <stdbool.h> 59 60#include "../kselftest.h" 61 62#ifdef __NR_userfaultfd 63 64static unsigned long nr_cpus, nr_pages, nr_pages_per_cpu, page_size; 65 66#define BOUNCE_RANDOM (1<<0) 67#define BOUNCE_RACINGFAULTS (1<<1) 68#define BOUNCE_VERIFY (1<<2) 69#define BOUNCE_POLL (1<<3) 70static int bounces; 71 72#define TEST_ANON 1 73#define TEST_HUGETLB 2 74#define TEST_SHMEM 3 75static int test_type; 76 77/* exercise the test_uffdio_*_eexist every ALARM_INTERVAL_SECS */ 78#define ALARM_INTERVAL_SECS 10 79static volatile bool test_uffdio_copy_eexist = true; 80static volatile bool test_uffdio_zeropage_eexist = true; 81 82static bool map_shared; 83static int huge_fd; 84static char *huge_fd_off0; 85static unsigned long long *count_verify; 86static int uffd, uffd_flags, finished, *pipefd; 87static char *area_src, *area_src_alias, *area_dst, *area_dst_alias; 88static char *zeropage; 89pthread_attr_t attr; 90 91/* pthread_mutex_t starts at page offset 0 */ 92#define area_mutex(___area, ___nr) \ 93 ((pthread_mutex_t *) ((___area) + (___nr)*page_size)) 94/* 95 * count is placed in the page after pthread_mutex_t naturally aligned 96 * to avoid non alignment faults on non-x86 archs. 97 */ 98#define area_count(___area, ___nr) \ 99 ((volatile unsigned long long *) ((unsigned long) \ 100 ((___area) + (___nr)*page_size + \ 101 sizeof(pthread_mutex_t) + \ 102 sizeof(unsigned long long) - 1) & \ 103 ~(unsigned long)(sizeof(unsigned long long) \ 104 - 1))) 105 106const char *examples = 107 "# Run anonymous memory test on 100MiB region with 99999 bounces:\n" 108 "./userfaultfd anon 100 99999\n\n" 109 "# Run share memory test on 1GiB region with 99 bounces:\n" 110 "./userfaultfd shmem 1000 99\n\n" 111 "# Run hugetlb memory test on 256MiB region with 50 bounces (using /dev/hugepages/hugefile):\n" 112 "./userfaultfd hugetlb 256 50 /dev/hugepages/hugefile\n\n" 113 "# Run the same hugetlb test but using shmem:\n" 114 "./userfaultfd hugetlb_shared 256 50 /dev/hugepages/hugefile\n\n" 115 "# 10MiB-~6GiB 999 bounces anonymous test, " 116 "continue forever unless an error triggers\n" 117 "while ./userfaultfd anon $[RANDOM % 6000 + 10] 999; do true; done\n\n"; 118 119static void usage(void) 120{ 121 fprintf(stderr, "\nUsage: ./userfaultfd <test type> <MiB> <bounces> " 122 "[hugetlbfs_file]\n\n"); 123 fprintf(stderr, "Supported <test type>: anon, hugetlb, " 124 "hugetlb_shared, shmem\n\n"); 125 fprintf(stderr, "Examples:\n\n"); 126 fprintf(stderr, examples); 127 exit(1); 128} 129 130static int anon_release_pages(char *rel_area) 131{ 132 int ret = 0; 133 134 if (madvise(rel_area, nr_pages * page_size, MADV_DONTNEED)) { 135 perror("madvise"); 136 ret = 1; 137 } 138 139 return ret; 140} 141 142static void anon_allocate_area(void **alloc_area) 143{ 144 if (posix_memalign(alloc_area, page_size, nr_pages * page_size)) { 145 fprintf(stderr, "out of memory\n"); 146 *alloc_area = NULL; 147 } 148} 149 150static void noop_alias_mapping(__u64 *start, size_t len, unsigned long offset) 151{ 152} 153 154/* HugeTLB memory */ 155static int hugetlb_release_pages(char *rel_area) 156{ 157 int ret = 0; 158 159 if (fallocate(huge_fd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, 160 rel_area == huge_fd_off0 ? 0 : 161 nr_pages * page_size, 162 nr_pages * page_size)) { 163 perror("fallocate"); 164 ret = 1; 165 } 166 167 return ret; 168} 169 170 171static void hugetlb_allocate_area(void **alloc_area) 172{ 173 void *area_alias = NULL; 174 char **alloc_area_alias; 175 *alloc_area = mmap(NULL, nr_pages * page_size, PROT_READ | PROT_WRITE, 176 (map_shared ? MAP_SHARED : MAP_PRIVATE) | 177 MAP_HUGETLB, 178 huge_fd, *alloc_area == area_src ? 0 : 179 nr_pages * page_size); 180 if (*alloc_area == MAP_FAILED) { 181 fprintf(stderr, "mmap of hugetlbfs file failed\n"); 182 *alloc_area = NULL; 183 } 184 185 if (map_shared) { 186 area_alias = mmap(NULL, nr_pages * page_size, PROT_READ | PROT_WRITE, 187 MAP_SHARED | MAP_HUGETLB, 188 huge_fd, *alloc_area == area_src ? 0 : 189 nr_pages * page_size); 190 if (area_alias == MAP_FAILED) { 191 if (munmap(*alloc_area, nr_pages * page_size) < 0) 192 perror("hugetlb munmap"), exit(1); 193 *alloc_area = NULL; 194 return; 195 } 196 } 197 if (*alloc_area == area_src) { 198 huge_fd_off0 = *alloc_area; 199 alloc_area_alias = &area_src_alias; 200 } else { 201 alloc_area_alias = &area_dst_alias; 202 } 203 if (area_alias) 204 *alloc_area_alias = area_alias; 205} 206 207static void hugetlb_alias_mapping(__u64 *start, size_t len, unsigned long offset) 208{ 209 if (!map_shared) 210 return; 211 /* 212 * We can't zap just the pagetable with hugetlbfs because 213 * MADV_DONTEED won't work. So exercise -EEXIST on a alias 214 * mapping where the pagetables are not established initially, 215 * this way we'll exercise the -EEXEC at the fs level. 216 */ 217 *start = (unsigned long) area_dst_alias + offset; 218} 219 220/* Shared memory */ 221static int shmem_release_pages(char *rel_area) 222{ 223 int ret = 0; 224 225 if (madvise(rel_area, nr_pages * page_size, MADV_REMOVE)) { 226 perror("madvise"); 227 ret = 1; 228 } 229 230 return ret; 231} 232 233static void shmem_allocate_area(void **alloc_area) 234{ 235 *alloc_area = mmap(NULL, nr_pages * page_size, PROT_READ | PROT_WRITE, 236 MAP_ANONYMOUS | MAP_SHARED, -1, 0); 237 if (*alloc_area == MAP_FAILED) { 238 fprintf(stderr, "shared memory mmap failed\n"); 239 *alloc_area = NULL; 240 } 241} 242 243struct uffd_test_ops { 244 unsigned long expected_ioctls; 245 void (*allocate_area)(void **alloc_area); 246 int (*release_pages)(char *rel_area); 247 void (*alias_mapping)(__u64 *start, size_t len, unsigned long offset); 248}; 249 250#define ANON_EXPECTED_IOCTLS ((1 << _UFFDIO_WAKE) | \ 251 (1 << _UFFDIO_COPY) | \ 252 (1 << _UFFDIO_ZEROPAGE)) 253 254static struct uffd_test_ops anon_uffd_test_ops = { 255 .expected_ioctls = ANON_EXPECTED_IOCTLS, 256 .allocate_area = anon_allocate_area, 257 .release_pages = anon_release_pages, 258 .alias_mapping = noop_alias_mapping, 259}; 260 261static struct uffd_test_ops shmem_uffd_test_ops = { 262 .expected_ioctls = ANON_EXPECTED_IOCTLS, 263 .allocate_area = shmem_allocate_area, 264 .release_pages = shmem_release_pages, 265 .alias_mapping = noop_alias_mapping, 266}; 267 268static struct uffd_test_ops hugetlb_uffd_test_ops = { 269 .expected_ioctls = UFFD_API_RANGE_IOCTLS_BASIC, 270 .allocate_area = hugetlb_allocate_area, 271 .release_pages = hugetlb_release_pages, 272 .alias_mapping = hugetlb_alias_mapping, 273}; 274 275static struct uffd_test_ops *uffd_test_ops; 276 277static int my_bcmp(char *str1, char *str2, size_t n) 278{ 279 unsigned long i; 280 for (i = 0; i < n; i++) 281 if (str1[i] != str2[i]) 282 return 1; 283 return 0; 284} 285 286static void *locking_thread(void *arg) 287{ 288 unsigned long cpu = (unsigned long) arg; 289 struct random_data rand; 290 unsigned long page_nr = *(&(page_nr)); /* uninitialized warning */ 291 int32_t rand_nr; 292 unsigned long long count; 293 char randstate[64]; 294 unsigned int seed; 295 time_t start; 296 297 if (bounces & BOUNCE_RANDOM) { 298 seed = (unsigned int) time(NULL) - bounces; 299 if (!(bounces & BOUNCE_RACINGFAULTS)) 300 seed += cpu; 301 bzero(&rand, sizeof(rand)); 302 bzero(&randstate, sizeof(randstate)); 303 if (initstate_r(seed, randstate, sizeof(randstate), &rand)) 304 fprintf(stderr, "srandom_r error\n"), exit(1); 305 } else { 306 page_nr = -bounces; 307 if (!(bounces & BOUNCE_RACINGFAULTS)) 308 page_nr += cpu * nr_pages_per_cpu; 309 } 310 311 while (!finished) { 312 if (bounces & BOUNCE_RANDOM) { 313 if (random_r(&rand, &rand_nr)) 314 fprintf(stderr, "random_r 1 error\n"), exit(1); 315 page_nr = rand_nr; 316 if (sizeof(page_nr) > sizeof(rand_nr)) { 317 if (random_r(&rand, &rand_nr)) 318 fprintf(stderr, "random_r 2 error\n"), exit(1); 319 page_nr |= (((unsigned long) rand_nr) << 16) << 320 16; 321 } 322 } else 323 page_nr += 1; 324 page_nr %= nr_pages; 325 326 start = time(NULL); 327 if (bounces & BOUNCE_VERIFY) { 328 count = *area_count(area_dst, page_nr); 329 if (!count) 330 fprintf(stderr, 331 "page_nr %lu wrong count %Lu %Lu\n", 332 page_nr, count, 333 count_verify[page_nr]), exit(1); 334 335 336 /* 337 * We can't use bcmp (or memcmp) because that 338 * returns 0 erroneously if the memory is 339 * changing under it (even if the end of the 340 * page is never changing and always 341 * different). 342 */ 343#if 1 344 if (!my_bcmp(area_dst + page_nr * page_size, zeropage, 345 page_size)) 346 fprintf(stderr, 347 "my_bcmp page_nr %lu wrong count %Lu %Lu\n", 348 page_nr, count, 349 count_verify[page_nr]), exit(1); 350#else 351 unsigned long loops; 352 353 loops = 0; 354 /* uncomment the below line to test with mutex */ 355 /* pthread_mutex_lock(area_mutex(area_dst, page_nr)); */ 356 while (!bcmp(area_dst + page_nr * page_size, zeropage, 357 page_size)) { 358 loops += 1; 359 if (loops > 10) 360 break; 361 } 362 /* uncomment below line to test with mutex */ 363 /* pthread_mutex_unlock(area_mutex(area_dst, page_nr)); */ 364 if (loops) { 365 fprintf(stderr, 366 "page_nr %lu all zero thread %lu %p %lu\n", 367 page_nr, cpu, area_dst + page_nr * page_size, 368 loops); 369 if (loops > 10) 370 exit(1); 371 } 372#endif 373 } 374 375 pthread_mutex_lock(area_mutex(area_dst, page_nr)); 376 count = *area_count(area_dst, page_nr); 377 if (count != count_verify[page_nr]) { 378 fprintf(stderr, 379 "page_nr %lu memory corruption %Lu %Lu\n", 380 page_nr, count, 381 count_verify[page_nr]), exit(1); 382 } 383 count++; 384 *area_count(area_dst, page_nr) = count_verify[page_nr] = count; 385 pthread_mutex_unlock(area_mutex(area_dst, page_nr)); 386 387 if (time(NULL) - start > 1) 388 fprintf(stderr, 389 "userfault too slow %ld " 390 "possible false positive with overcommit\n", 391 time(NULL) - start); 392 } 393 394 return NULL; 395} 396 397static void retry_copy_page(int ufd, struct uffdio_copy *uffdio_copy, 398 unsigned long offset) 399{ 400 uffd_test_ops->alias_mapping(&uffdio_copy->dst, 401 uffdio_copy->len, 402 offset); 403 if (ioctl(ufd, UFFDIO_COPY, uffdio_copy)) { 404 /* real retval in ufdio_copy.copy */ 405 if (uffdio_copy->copy != -EEXIST) 406 fprintf(stderr, "UFFDIO_COPY retry error %Ld\n", 407 uffdio_copy->copy), exit(1); 408 } else { 409 fprintf(stderr, "UFFDIO_COPY retry unexpected %Ld\n", 410 uffdio_copy->copy), exit(1); 411 } 412} 413 414static int __copy_page(int ufd, unsigned long offset, bool retry) 415{ 416 struct uffdio_copy uffdio_copy; 417 418 if (offset >= nr_pages * page_size) 419 fprintf(stderr, "unexpected offset %lu\n", 420 offset), exit(1); 421 uffdio_copy.dst = (unsigned long) area_dst + offset; 422 uffdio_copy.src = (unsigned long) area_src + offset; 423 uffdio_copy.len = page_size; 424 uffdio_copy.mode = 0; 425 uffdio_copy.copy = 0; 426 if (ioctl(ufd, UFFDIO_COPY, &uffdio_copy)) { 427 /* real retval in ufdio_copy.copy */ 428 if (uffdio_copy.copy != -EEXIST) 429 fprintf(stderr, "UFFDIO_COPY error %Ld\n", 430 uffdio_copy.copy), exit(1); 431 } else if (uffdio_copy.copy != page_size) { 432 fprintf(stderr, "UFFDIO_COPY unexpected copy %Ld\n", 433 uffdio_copy.copy), exit(1); 434 } else { 435 if (test_uffdio_copy_eexist && retry) { 436 test_uffdio_copy_eexist = false; 437 retry_copy_page(ufd, &uffdio_copy, offset); 438 } 439 return 1; 440 } 441 return 0; 442} 443 444static int copy_page_retry(int ufd, unsigned long offset) 445{ 446 return __copy_page(ufd, offset, true); 447} 448 449static int copy_page(int ufd, unsigned long offset) 450{ 451 return __copy_page(ufd, offset, false); 452} 453 454static int uffd_read_msg(int ufd, struct uffd_msg *msg) 455{ 456 int ret = read(uffd, msg, sizeof(*msg)); 457 458 if (ret != sizeof(*msg)) { 459 if (ret < 0) { 460 if (errno == EAGAIN) 461 return 1; 462 else 463 perror("blocking read error"), exit(1); 464 } else { 465 fprintf(stderr, "short read\n"), exit(1); 466 } 467 } 468 469 return 0; 470} 471 472/* Return 1 if page fault handled by us; otherwise 0 */ 473static int uffd_handle_page_fault(struct uffd_msg *msg) 474{ 475 unsigned long offset; 476 477 if (msg->event != UFFD_EVENT_PAGEFAULT) 478 fprintf(stderr, "unexpected msg event %u\n", 479 msg->event), exit(1); 480 481 if (bounces & BOUNCE_VERIFY && 482 msg->arg.pagefault.flags & UFFD_PAGEFAULT_FLAG_WRITE) 483 fprintf(stderr, "unexpected write fault\n"), exit(1); 484 485 offset = (char *)(unsigned long)msg->arg.pagefault.address - area_dst; 486 offset &= ~(page_size-1); 487 488 return copy_page(uffd, offset); 489} 490 491static void *uffd_poll_thread(void *arg) 492{ 493 unsigned long cpu = (unsigned long) arg; 494 struct pollfd pollfd[2]; 495 struct uffd_msg msg; 496 struct uffdio_register uffd_reg; 497 int ret; 498 char tmp_chr; 499 unsigned long userfaults = 0; 500 501 pollfd[0].fd = uffd; 502 pollfd[0].events = POLLIN; 503 pollfd[1].fd = pipefd[cpu*2]; 504 pollfd[1].events = POLLIN; 505 506 for (;;) { 507 ret = poll(pollfd, 2, -1); 508 if (!ret) 509 fprintf(stderr, "poll error %d\n", ret), exit(1); 510 if (ret < 0) 511 perror("poll"), exit(1); 512 if (pollfd[1].revents & POLLIN) { 513 if (read(pollfd[1].fd, &tmp_chr, 1) != 1) 514 fprintf(stderr, "read pipefd error\n"), 515 exit(1); 516 break; 517 } 518 if (!(pollfd[0].revents & POLLIN)) 519 fprintf(stderr, "pollfd[0].revents %d\n", 520 pollfd[0].revents), exit(1); 521 if (uffd_read_msg(uffd, &msg)) 522 continue; 523 switch (msg.event) { 524 default: 525 fprintf(stderr, "unexpected msg event %u\n", 526 msg.event), exit(1); 527 break; 528 case UFFD_EVENT_PAGEFAULT: 529 userfaults += uffd_handle_page_fault(&msg); 530 break; 531 case UFFD_EVENT_FORK: 532 close(uffd); 533 uffd = msg.arg.fork.ufd; 534 pollfd[0].fd = uffd; 535 break; 536 case UFFD_EVENT_REMOVE: 537 uffd_reg.range.start = msg.arg.remove.start; 538 uffd_reg.range.len = msg.arg.remove.end - 539 msg.arg.remove.start; 540 if (ioctl(uffd, UFFDIO_UNREGISTER, &uffd_reg.range)) 541 fprintf(stderr, "remove failure\n"), exit(1); 542 break; 543 case UFFD_EVENT_REMAP: 544 area_dst = (char *)(unsigned long)msg.arg.remap.to; 545 break; 546 } 547 } 548 return (void *)userfaults; 549} 550 551pthread_mutex_t uffd_read_mutex = PTHREAD_MUTEX_INITIALIZER; 552 553static void *uffd_read_thread(void *arg) 554{ 555 unsigned long *this_cpu_userfaults; 556 struct uffd_msg msg; 557 558 this_cpu_userfaults = (unsigned long *) arg; 559 *this_cpu_userfaults = 0; 560 561 pthread_mutex_unlock(&uffd_read_mutex); 562 /* from here cancellation is ok */ 563 564 for (;;) { 565 if (uffd_read_msg(uffd, &msg)) 566 continue; 567 (*this_cpu_userfaults) += uffd_handle_page_fault(&msg); 568 } 569 return (void *)NULL; 570} 571 572static void *background_thread(void *arg) 573{ 574 unsigned long cpu = (unsigned long) arg; 575 unsigned long page_nr; 576 577 for (page_nr = cpu * nr_pages_per_cpu; 578 page_nr < (cpu+1) * nr_pages_per_cpu; 579 page_nr++) 580 copy_page_retry(uffd, page_nr * page_size); 581 582 return NULL; 583} 584 585static int stress(unsigned long *userfaults) 586{ 587 unsigned long cpu; 588 pthread_t locking_threads[nr_cpus]; 589 pthread_t uffd_threads[nr_cpus]; 590 pthread_t background_threads[nr_cpus]; 591 void **_userfaults = (void **) userfaults; 592 593 finished = 0; 594 for (cpu = 0; cpu < nr_cpus; cpu++) { 595 if (pthread_create(&locking_threads[cpu], &attr, 596 locking_thread, (void *)cpu)) 597 return 1; 598 if (bounces & BOUNCE_POLL) { 599 if (pthread_create(&uffd_threads[cpu], &attr, 600 uffd_poll_thread, (void *)cpu)) 601 return 1; 602 } else { 603 if (pthread_create(&uffd_threads[cpu], &attr, 604 uffd_read_thread, 605 &_userfaults[cpu])) 606 return 1; 607 pthread_mutex_lock(&uffd_read_mutex); 608 } 609 if (pthread_create(&background_threads[cpu], &attr, 610 background_thread, (void *)cpu)) 611 return 1; 612 } 613 for (cpu = 0; cpu < nr_cpus; cpu++) 614 if (pthread_join(background_threads[cpu], NULL)) 615 return 1; 616 617 /* 618 * Be strict and immediately zap area_src, the whole area has 619 * been transferred already by the background treads. The 620 * area_src could then be faulted in in a racy way by still 621 * running uffdio_threads reading zeropages after we zapped 622 * area_src (but they're guaranteed to get -EEXIST from 623 * UFFDIO_COPY without writing zero pages into area_dst 624 * because the background threads already completed). 625 */ 626 if (uffd_test_ops->release_pages(area_src)) 627 return 1; 628 629 630 finished = 1; 631 for (cpu = 0; cpu < nr_cpus; cpu++) 632 if (pthread_join(locking_threads[cpu], NULL)) 633 return 1; 634 635 for (cpu = 0; cpu < nr_cpus; cpu++) { 636 char c; 637 if (bounces & BOUNCE_POLL) { 638 if (write(pipefd[cpu*2+1], &c, 1) != 1) { 639 fprintf(stderr, "pipefd write error\n"); 640 return 1; 641 } 642 if (pthread_join(uffd_threads[cpu], &_userfaults[cpu])) 643 return 1; 644 } else { 645 if (pthread_cancel(uffd_threads[cpu])) 646 return 1; 647 if (pthread_join(uffd_threads[cpu], NULL)) 648 return 1; 649 } 650 } 651 652 return 0; 653} 654 655static int userfaultfd_open(int features) 656{ 657 struct uffdio_api uffdio_api; 658 659 uffd = syscall(__NR_userfaultfd, O_CLOEXEC | O_NONBLOCK); 660 if (uffd < 0) { 661 fprintf(stderr, 662 "userfaultfd syscall not available in this kernel\n"); 663 return 1; 664 } 665 uffd_flags = fcntl(uffd, F_GETFD, NULL); 666 667 uffdio_api.api = UFFD_API; 668 uffdio_api.features = features; 669 if (ioctl(uffd, UFFDIO_API, &uffdio_api)) { 670 fprintf(stderr, "UFFDIO_API\n"); 671 return 1; 672 } 673 if (uffdio_api.api != UFFD_API) { 674 fprintf(stderr, "UFFDIO_API error %Lu\n", uffdio_api.api); 675 return 1; 676 } 677 678 return 0; 679} 680 681sigjmp_buf jbuf, *sigbuf; 682 683static void sighndl(int sig, siginfo_t *siginfo, void *ptr) 684{ 685 if (sig == SIGBUS) { 686 if (sigbuf) 687 siglongjmp(*sigbuf, 1); 688 abort(); 689 } 690} 691 692/* 693 * For non-cooperative userfaultfd test we fork() a process that will 694 * generate pagefaults, will mremap the area monitored by the 695 * userfaultfd and at last this process will release the monitored 696 * area. 697 * For the anonymous and shared memory the area is divided into two 698 * parts, the first part is accessed before mremap, and the second 699 * part is accessed after mremap. Since hugetlbfs does not support 700 * mremap, the entire monitored area is accessed in a single pass for 701 * HUGETLB_TEST. 702 * The release of the pages currently generates event for shmem and 703 * anonymous memory (UFFD_EVENT_REMOVE), hence it is not checked 704 * for hugetlb. 705 * For signal test(UFFD_FEATURE_SIGBUS), signal_test = 1, we register 706 * monitored area, generate pagefaults and test that signal is delivered. 707 * Use UFFDIO_COPY to allocate missing page and retry. For signal_test = 2 708 * test robustness use case - we release monitored area, fork a process 709 * that will generate pagefaults and verify signal is generated. 710 * This also tests UFFD_FEATURE_EVENT_FORK event along with the signal 711 * feature. Using monitor thread, verify no userfault events are generated. 712 */ 713static int faulting_process(int signal_test) 714{ 715 unsigned long nr; 716 unsigned long long count; 717 unsigned long split_nr_pages; 718 unsigned long lastnr; 719 struct sigaction act; 720 unsigned long signalled = 0; 721 722 if (test_type != TEST_HUGETLB) 723 split_nr_pages = (nr_pages + 1) / 2; 724 else 725 split_nr_pages = nr_pages; 726 727 if (signal_test) { 728 sigbuf = &jbuf; 729 memset(&act, 0, sizeof(act)); 730 act.sa_sigaction = sighndl; 731 act.sa_flags = SA_SIGINFO; 732 if (sigaction(SIGBUS, &act, 0)) { 733 perror("sigaction"); 734 return 1; 735 } 736 lastnr = (unsigned long)-1; 737 } 738 739 for (nr = 0; nr < split_nr_pages; nr++) { 740 if (signal_test) { 741 if (sigsetjmp(*sigbuf, 1) != 0) { 742 if (nr == lastnr) { 743 fprintf(stderr, "Signal repeated\n"); 744 return 1; 745 } 746 747 lastnr = nr; 748 if (signal_test == 1) { 749 if (copy_page(uffd, nr * page_size)) 750 signalled++; 751 } else { 752 signalled++; 753 continue; 754 } 755 } 756 } 757 758 count = *area_count(area_dst, nr); 759 if (count != count_verify[nr]) { 760 fprintf(stderr, 761 "nr %lu memory corruption %Lu %Lu\n", 762 nr, count, 763 count_verify[nr]), exit(1); 764 } 765 } 766 767 if (signal_test) 768 return signalled != split_nr_pages; 769 770 if (test_type == TEST_HUGETLB) 771 return 0; 772 773 area_dst = mremap(area_dst, nr_pages * page_size, nr_pages * page_size, 774 MREMAP_MAYMOVE | MREMAP_FIXED, area_src); 775 if (area_dst == MAP_FAILED) 776 perror("mremap"), exit(1); 777 778 for (; nr < nr_pages; nr++) { 779 count = *area_count(area_dst, nr); 780 if (count != count_verify[nr]) { 781 fprintf(stderr, 782 "nr %lu memory corruption %Lu %Lu\n", 783 nr, count, 784 count_verify[nr]), exit(1); 785 } 786 } 787 788 if (uffd_test_ops->release_pages(area_dst)) 789 return 1; 790 791 for (nr = 0; nr < nr_pages; nr++) { 792 if (my_bcmp(area_dst + nr * page_size, zeropage, page_size)) 793 fprintf(stderr, "nr %lu is not zero\n", nr), exit(1); 794 } 795 796 return 0; 797} 798 799static void retry_uffdio_zeropage(int ufd, 800 struct uffdio_zeropage *uffdio_zeropage, 801 unsigned long offset) 802{ 803 uffd_test_ops->alias_mapping(&uffdio_zeropage->range.start, 804 uffdio_zeropage->range.len, 805 offset); 806 if (ioctl(ufd, UFFDIO_ZEROPAGE, uffdio_zeropage)) { 807 if (uffdio_zeropage->zeropage != -EEXIST) 808 fprintf(stderr, "UFFDIO_ZEROPAGE retry error %Ld\n", 809 uffdio_zeropage->zeropage), exit(1); 810 } else { 811 fprintf(stderr, "UFFDIO_ZEROPAGE retry unexpected %Ld\n", 812 uffdio_zeropage->zeropage), exit(1); 813 } 814} 815 816static int __uffdio_zeropage(int ufd, unsigned long offset, bool retry) 817{ 818 struct uffdio_zeropage uffdio_zeropage; 819 int ret; 820 unsigned long has_zeropage; 821 822 has_zeropage = uffd_test_ops->expected_ioctls & (1 << _UFFDIO_ZEROPAGE); 823 824 if (offset >= nr_pages * page_size) 825 fprintf(stderr, "unexpected offset %lu\n", 826 offset), exit(1); 827 uffdio_zeropage.range.start = (unsigned long) area_dst + offset; 828 uffdio_zeropage.range.len = page_size; 829 uffdio_zeropage.mode = 0; 830 ret = ioctl(ufd, UFFDIO_ZEROPAGE, &uffdio_zeropage); 831 if (ret) { 832 /* real retval in ufdio_zeropage.zeropage */ 833 if (has_zeropage) { 834 if (uffdio_zeropage.zeropage == -EEXIST) 835 fprintf(stderr, "UFFDIO_ZEROPAGE -EEXIST\n"), 836 exit(1); 837 else 838 fprintf(stderr, "UFFDIO_ZEROPAGE error %Ld\n", 839 uffdio_zeropage.zeropage), exit(1); 840 } else { 841 if (uffdio_zeropage.zeropage != -EINVAL) 842 fprintf(stderr, 843 "UFFDIO_ZEROPAGE not -EINVAL %Ld\n", 844 uffdio_zeropage.zeropage), exit(1); 845 } 846 } else if (has_zeropage) { 847 if (uffdio_zeropage.zeropage != page_size) { 848 fprintf(stderr, "UFFDIO_ZEROPAGE unexpected %Ld\n", 849 uffdio_zeropage.zeropage), exit(1); 850 } else { 851 if (test_uffdio_zeropage_eexist && retry) { 852 test_uffdio_zeropage_eexist = false; 853 retry_uffdio_zeropage(ufd, &uffdio_zeropage, 854 offset); 855 } 856 return 1; 857 } 858 } else { 859 fprintf(stderr, 860 "UFFDIO_ZEROPAGE succeeded %Ld\n", 861 uffdio_zeropage.zeropage), exit(1); 862 } 863 864 return 0; 865} 866 867static int uffdio_zeropage(int ufd, unsigned long offset) 868{ 869 return __uffdio_zeropage(ufd, offset, false); 870} 871 872/* exercise UFFDIO_ZEROPAGE */ 873static int userfaultfd_zeropage_test(void) 874{ 875 struct uffdio_register uffdio_register; 876 unsigned long expected_ioctls; 877 878 printf("testing UFFDIO_ZEROPAGE: "); 879 fflush(stdout); 880 881 if (uffd_test_ops->release_pages(area_dst)) 882 return 1; 883 884 if (userfaultfd_open(0) < 0) 885 return 1; 886 uffdio_register.range.start = (unsigned long) area_dst; 887 uffdio_register.range.len = nr_pages * page_size; 888 uffdio_register.mode = UFFDIO_REGISTER_MODE_MISSING; 889 if (ioctl(uffd, UFFDIO_REGISTER, &uffdio_register)) 890 fprintf(stderr, "register failure\n"), exit(1); 891 892 expected_ioctls = uffd_test_ops->expected_ioctls; 893 if ((uffdio_register.ioctls & expected_ioctls) != 894 expected_ioctls) 895 fprintf(stderr, 896 "unexpected missing ioctl for anon memory\n"), 897 exit(1); 898 899 if (uffdio_zeropage(uffd, 0)) { 900 if (my_bcmp(area_dst, zeropage, page_size)) 901 fprintf(stderr, "zeropage is not zero\n"), exit(1); 902 } 903 904 close(uffd); 905 printf("done.\n"); 906 return 0; 907} 908 909static int userfaultfd_events_test(void) 910{ 911 struct uffdio_register uffdio_register; 912 unsigned long expected_ioctls; 913 unsigned long userfaults; 914 pthread_t uffd_mon; 915 int err, features; 916 pid_t pid; 917 char c; 918 919 printf("testing events (fork, remap, remove): "); 920 fflush(stdout); 921 922 if (uffd_test_ops->release_pages(area_dst)) 923 return 1; 924 925 features = UFFD_FEATURE_EVENT_FORK | UFFD_FEATURE_EVENT_REMAP | 926 UFFD_FEATURE_EVENT_REMOVE; 927 if (userfaultfd_open(features) < 0) 928 return 1; 929 fcntl(uffd, F_SETFL, uffd_flags | O_NONBLOCK); 930 931 uffdio_register.range.start = (unsigned long) area_dst; 932 uffdio_register.range.len = nr_pages * page_size; 933 uffdio_register.mode = UFFDIO_REGISTER_MODE_MISSING; 934 if (ioctl(uffd, UFFDIO_REGISTER, &uffdio_register)) 935 fprintf(stderr, "register failure\n"), exit(1); 936 937 expected_ioctls = uffd_test_ops->expected_ioctls; 938 if ((uffdio_register.ioctls & expected_ioctls) != 939 expected_ioctls) 940 fprintf(stderr, 941 "unexpected missing ioctl for anon memory\n"), 942 exit(1); 943 944 if (pthread_create(&uffd_mon, &attr, uffd_poll_thread, NULL)) 945 perror("uffd_poll_thread create"), exit(1); 946 947 pid = fork(); 948 if (pid < 0) 949 perror("fork"), exit(1); 950 951 if (!pid) 952 return faulting_process(0); 953 954 waitpid(pid, &err, 0); 955 if (err) 956 fprintf(stderr, "faulting process failed\n"), exit(1); 957 958 if (write(pipefd[1], &c, sizeof(c)) != sizeof(c)) 959 perror("pipe write"), exit(1); 960 if (pthread_join(uffd_mon, (void **)&userfaults)) 961 return 1; 962 963 close(uffd); 964 printf("userfaults: %ld\n", userfaults); 965 966 return userfaults != nr_pages; 967} 968 969static int userfaultfd_sig_test(void) 970{ 971 struct uffdio_register uffdio_register; 972 unsigned long expected_ioctls; 973 unsigned long userfaults; 974 pthread_t uffd_mon; 975 int err, features; 976 pid_t pid; 977 char c; 978 979 printf("testing signal delivery: "); 980 fflush(stdout); 981 982 if (uffd_test_ops->release_pages(area_dst)) 983 return 1; 984 985 features = UFFD_FEATURE_EVENT_FORK|UFFD_FEATURE_SIGBUS; 986 if (userfaultfd_open(features) < 0) 987 return 1; 988 fcntl(uffd, F_SETFL, uffd_flags | O_NONBLOCK); 989 990 uffdio_register.range.start = (unsigned long) area_dst; 991 uffdio_register.range.len = nr_pages * page_size; 992 uffdio_register.mode = UFFDIO_REGISTER_MODE_MISSING; 993 if (ioctl(uffd, UFFDIO_REGISTER, &uffdio_register)) 994 fprintf(stderr, "register failure\n"), exit(1); 995 996 expected_ioctls = uffd_test_ops->expected_ioctls; 997 if ((uffdio_register.ioctls & expected_ioctls) != 998 expected_ioctls) 999 fprintf(stderr, 1000 "unexpected missing ioctl for anon memory\n"), 1001 exit(1); 1002 1003 if (faulting_process(1)) 1004 fprintf(stderr, "faulting process failed\n"), exit(1); 1005 1006 if (uffd_test_ops->release_pages(area_dst)) 1007 return 1; 1008 1009 if (pthread_create(&uffd_mon, &attr, uffd_poll_thread, NULL)) 1010 perror("uffd_poll_thread create"), exit(1); 1011 1012 pid = fork(); 1013 if (pid < 0) 1014 perror("fork"), exit(1); 1015 1016 if (!pid) 1017 exit(faulting_process(2)); 1018 1019 waitpid(pid, &err, 0); 1020 if (err) 1021 fprintf(stderr, "faulting process failed\n"), exit(1); 1022 1023 if (write(pipefd[1], &c, sizeof(c)) != sizeof(c)) 1024 perror("pipe write"), exit(1); 1025 if (pthread_join(uffd_mon, (void **)&userfaults)) 1026 return 1; 1027 1028 printf("done.\n"); 1029 if (userfaults) 1030 fprintf(stderr, "Signal test failed, userfaults: %ld\n", 1031 userfaults); 1032 close(uffd); 1033 return userfaults != 0; 1034} 1035static int userfaultfd_stress(void) 1036{ 1037 void *area; 1038 char *tmp_area; 1039 unsigned long nr; 1040 struct uffdio_register uffdio_register; 1041 unsigned long cpu; 1042 int err; 1043 unsigned long userfaults[nr_cpus]; 1044 1045 uffd_test_ops->allocate_area((void **)&area_src); 1046 if (!area_src) 1047 return 1; 1048 uffd_test_ops->allocate_area((void **)&area_dst); 1049 if (!area_dst) 1050 return 1; 1051 1052 if (userfaultfd_open(0) < 0) 1053 return 1; 1054 1055 count_verify = malloc(nr_pages * sizeof(unsigned long long)); 1056 if (!count_verify) { 1057 perror("count_verify"); 1058 return 1; 1059 } 1060 1061 for (nr = 0; nr < nr_pages; nr++) { 1062 *area_mutex(area_src, nr) = (pthread_mutex_t) 1063 PTHREAD_MUTEX_INITIALIZER; 1064 count_verify[nr] = *area_count(area_src, nr) = 1; 1065 /* 1066 * In the transition between 255 to 256, powerpc will 1067 * read out of order in my_bcmp and see both bytes as 1068 * zero, so leave a placeholder below always non-zero 1069 * after the count, to avoid my_bcmp to trigger false 1070 * positives. 1071 */ 1072 *(area_count(area_src, nr) + 1) = 1; 1073 } 1074 1075 pipefd = malloc(sizeof(int) * nr_cpus * 2); 1076 if (!pipefd) { 1077 perror("pipefd"); 1078 return 1; 1079 } 1080 for (cpu = 0; cpu < nr_cpus; cpu++) { 1081 if (pipe2(&pipefd[cpu*2], O_CLOEXEC | O_NONBLOCK)) { 1082 perror("pipe"); 1083 return 1; 1084 } 1085 } 1086 1087 if (posix_memalign(&area, page_size, page_size)) { 1088 fprintf(stderr, "out of memory\n"); 1089 return 1; 1090 } 1091 zeropage = area; 1092 bzero(zeropage, page_size); 1093 1094 pthread_mutex_lock(&uffd_read_mutex); 1095 1096 pthread_attr_init(&attr); 1097 pthread_attr_setstacksize(&attr, 16*1024*1024); 1098 1099 err = 0; 1100 while (bounces--) { 1101 unsigned long expected_ioctls; 1102 1103 printf("bounces: %d, mode:", bounces); 1104 if (bounces & BOUNCE_RANDOM) 1105 printf(" rnd"); 1106 if (bounces & BOUNCE_RACINGFAULTS) 1107 printf(" racing"); 1108 if (bounces & BOUNCE_VERIFY) 1109 printf(" ver"); 1110 if (bounces & BOUNCE_POLL) 1111 printf(" poll"); 1112 printf(", "); 1113 fflush(stdout); 1114 1115 if (bounces & BOUNCE_POLL) 1116 fcntl(uffd, F_SETFL, uffd_flags | O_NONBLOCK); 1117 else 1118 fcntl(uffd, F_SETFL, uffd_flags & ~O_NONBLOCK); 1119 1120 /* register */ 1121 uffdio_register.range.start = (unsigned long) area_dst; 1122 uffdio_register.range.len = nr_pages * page_size; 1123 uffdio_register.mode = UFFDIO_REGISTER_MODE_MISSING; 1124 if (ioctl(uffd, UFFDIO_REGISTER, &uffdio_register)) { 1125 fprintf(stderr, "register failure\n"); 1126 return 1; 1127 } 1128 expected_ioctls = uffd_test_ops->expected_ioctls; 1129 if ((uffdio_register.ioctls & expected_ioctls) != 1130 expected_ioctls) { 1131 fprintf(stderr, 1132 "unexpected missing ioctl for anon memory\n"); 1133 return 1; 1134 } 1135 1136 if (area_dst_alias) { 1137 uffdio_register.range.start = (unsigned long) 1138 area_dst_alias; 1139 if (ioctl(uffd, UFFDIO_REGISTER, &uffdio_register)) { 1140 fprintf(stderr, "register failure alias\n"); 1141 return 1; 1142 } 1143 } 1144 1145 /* 1146 * The madvise done previously isn't enough: some 1147 * uffd_thread could have read userfaults (one of 1148 * those already resolved by the background thread) 1149 * and it may be in the process of calling 1150 * UFFDIO_COPY. UFFDIO_COPY will read the zapped 1151 * area_src and it would map a zero page in it (of 1152 * course such a UFFDIO_COPY is perfectly safe as it'd 1153 * return -EEXIST). The problem comes at the next 1154 * bounce though: that racing UFFDIO_COPY would 1155 * generate zeropages in the area_src, so invalidating 1156 * the previous MADV_DONTNEED. Without this additional 1157 * MADV_DONTNEED those zeropages leftovers in the 1158 * area_src would lead to -EEXIST failure during the 1159 * next bounce, effectively leaving a zeropage in the 1160 * area_dst. 1161 * 1162 * Try to comment this out madvise to see the memory 1163 * corruption being caught pretty quick. 1164 * 1165 * khugepaged is also inhibited to collapse THP after 1166 * MADV_DONTNEED only after the UFFDIO_REGISTER, so it's 1167 * required to MADV_DONTNEED here. 1168 */ 1169 if (uffd_test_ops->release_pages(area_dst)) 1170 return 1; 1171 1172 /* bounce pass */ 1173 if (stress(userfaults)) 1174 return 1; 1175 1176 /* unregister */ 1177 if (ioctl(uffd, UFFDIO_UNREGISTER, &uffdio_register.range)) { 1178 fprintf(stderr, "unregister failure\n"); 1179 return 1; 1180 } 1181 if (area_dst_alias) { 1182 uffdio_register.range.start = (unsigned long) area_dst; 1183 if (ioctl(uffd, UFFDIO_UNREGISTER, 1184 &uffdio_register.range)) { 1185 fprintf(stderr, "unregister failure alias\n"); 1186 return 1; 1187 } 1188 } 1189 1190 /* verification */ 1191 if (bounces & BOUNCE_VERIFY) { 1192 for (nr = 0; nr < nr_pages; nr++) { 1193 if (*area_count(area_dst, nr) != count_verify[nr]) { 1194 fprintf(stderr, 1195 "error area_count %Lu %Lu %lu\n", 1196 *area_count(area_src, nr), 1197 count_verify[nr], 1198 nr); 1199 err = 1; 1200 bounces = 0; 1201 } 1202 } 1203 } 1204 1205 /* prepare next bounce */ 1206 tmp_area = area_src; 1207 area_src = area_dst; 1208 area_dst = tmp_area; 1209 1210 tmp_area = area_src_alias; 1211 area_src_alias = area_dst_alias; 1212 area_dst_alias = tmp_area; 1213 1214 printf("userfaults:"); 1215 for (cpu = 0; cpu < nr_cpus; cpu++) 1216 printf(" %lu", userfaults[cpu]); 1217 printf("\n"); 1218 } 1219 1220 if (err) 1221 return err; 1222 1223 close(uffd); 1224 return userfaultfd_zeropage_test() || userfaultfd_sig_test() 1225 || userfaultfd_events_test(); 1226} 1227 1228/* 1229 * Copied from mlock2-tests.c 1230 */ 1231unsigned long default_huge_page_size(void) 1232{ 1233 unsigned long hps = 0; 1234 char *line = NULL; 1235 size_t linelen = 0; 1236 FILE *f = fopen("/proc/meminfo", "r"); 1237 1238 if (!f) 1239 return 0; 1240 while (getline(&line, &linelen, f) > 0) { 1241 if (sscanf(line, "Hugepagesize: %lu kB", &hps) == 1) { 1242 hps <<= 10; 1243 break; 1244 } 1245 } 1246 1247 free(line); 1248 fclose(f); 1249 return hps; 1250} 1251 1252static void set_test_type(const char *type) 1253{ 1254 if (!strcmp(type, "anon")) { 1255 test_type = TEST_ANON; 1256 uffd_test_ops = &anon_uffd_test_ops; 1257 } else if (!strcmp(type, "hugetlb")) { 1258 test_type = TEST_HUGETLB; 1259 uffd_test_ops = &hugetlb_uffd_test_ops; 1260 } else if (!strcmp(type, "hugetlb_shared")) { 1261 map_shared = true; 1262 test_type = TEST_HUGETLB; 1263 uffd_test_ops = &hugetlb_uffd_test_ops; 1264 } else if (!strcmp(type, "shmem")) { 1265 map_shared = true; 1266 test_type = TEST_SHMEM; 1267 uffd_test_ops = &shmem_uffd_test_ops; 1268 } else { 1269 fprintf(stderr, "Unknown test type: %s\n", type), exit(1); 1270 } 1271 1272 if (test_type == TEST_HUGETLB) 1273 page_size = default_huge_page_size(); 1274 else 1275 page_size = sysconf(_SC_PAGE_SIZE); 1276 1277 if (!page_size) 1278 fprintf(stderr, "Unable to determine page size\n"), 1279 exit(2); 1280 if ((unsigned long) area_count(NULL, 0) + sizeof(unsigned long long) * 2 1281 > page_size) 1282 fprintf(stderr, "Impossible to run this test\n"), exit(2); 1283} 1284 1285static void sigalrm(int sig) 1286{ 1287 if (sig != SIGALRM) 1288 abort(); 1289 test_uffdio_copy_eexist = true; 1290 test_uffdio_zeropage_eexist = true; 1291 alarm(ALARM_INTERVAL_SECS); 1292} 1293 1294int main(int argc, char **argv) 1295{ 1296 if (argc < 4) 1297 usage(); 1298 1299 if (signal(SIGALRM, sigalrm) == SIG_ERR) 1300 fprintf(stderr, "failed to arm SIGALRM"), exit(1); 1301 alarm(ALARM_INTERVAL_SECS); 1302 1303 set_test_type(argv[1]); 1304 1305 nr_cpus = sysconf(_SC_NPROCESSORS_ONLN); 1306 nr_pages_per_cpu = atol(argv[2]) * 1024*1024 / page_size / 1307 nr_cpus; 1308 if (!nr_pages_per_cpu) { 1309 fprintf(stderr, "invalid MiB\n"); 1310 usage(); 1311 } 1312 1313 bounces = atoi(argv[3]); 1314 if (bounces <= 0) { 1315 fprintf(stderr, "invalid bounces\n"); 1316 usage(); 1317 } 1318 nr_pages = nr_pages_per_cpu * nr_cpus; 1319 1320 if (test_type == TEST_HUGETLB) { 1321 if (argc < 5) 1322 usage(); 1323 huge_fd = open(argv[4], O_CREAT | O_RDWR, 0755); 1324 if (huge_fd < 0) { 1325 fprintf(stderr, "Open of %s failed", argv[3]); 1326 perror("open"); 1327 exit(1); 1328 } 1329 if (ftruncate(huge_fd, 0)) { 1330 fprintf(stderr, "ftruncate %s to size 0 failed", argv[3]); 1331 perror("ftruncate"); 1332 exit(1); 1333 } 1334 } 1335 printf("nr_pages: %lu, nr_pages_per_cpu: %lu\n", 1336 nr_pages, nr_pages_per_cpu); 1337 return userfaultfd_stress(); 1338} 1339 1340#else /* __NR_userfaultfd */ 1341 1342#warning "missing __NR_userfaultfd definition" 1343 1344int main(void) 1345{ 1346 printf("skip: Skipping userfaultfd test (missing __NR_userfaultfd)\n"); 1347 return KSFT_SKIP; 1348} 1349 1350#endif /* __NR_userfaultfd */