Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * HMM stands for Heterogeneous Memory Management, it is a helper layer inside
4 * the linux kernel to help device drivers mirror a process address space in
5 * the device. This allows the device to use the same address space which
6 * makes communication and data exchange a lot easier.
7 *
8 * This framework's sole purpose is to exercise various code paths inside
9 * the kernel to make sure that HMM performs as expected and to flush out any
10 * bugs.
11 */
12
13#include "../kselftest_harness.h"
14
15#include <errno.h>
16#include <fcntl.h>
17#include <stdio.h>
18#include <stdlib.h>
19#include <stdint.h>
20#include <unistd.h>
21#include <strings.h>
22#include <time.h>
23#include <pthread.h>
24#include <sys/types.h>
25#include <sys/stat.h>
26#include <sys/mman.h>
27#include <sys/ioctl.h>
28
29#include "./local_config.h"
30#ifdef LOCAL_CONFIG_HAVE_LIBHUGETLBFS
31#include <hugetlbfs.h>
32#endif
33
34/*
35 * This is a private UAPI to the kernel test module so it isn't exported
36 * in the usual include/uapi/... directory.
37 */
38#include "../../../../lib/test_hmm_uapi.h"
39#include "../../../../mm/gup_test.h"
40
41struct hmm_buffer {
42 void *ptr;
43 void *mirror;
44 unsigned long size;
45 int fd;
46 uint64_t cpages;
47 uint64_t faults;
48};
49
50enum {
51 HMM_PRIVATE_DEVICE_ONE,
52 HMM_PRIVATE_DEVICE_TWO,
53 HMM_COHERENCE_DEVICE_ONE,
54 HMM_COHERENCE_DEVICE_TWO,
55};
56
57#define TWOMEG (1 << 21)
58#define HMM_BUFFER_SIZE (1024 << 12)
59#define HMM_PATH_MAX 64
60#define NTIMES 10
61
62#define ALIGN(x, a) (((x) + (a - 1)) & (~((a) - 1)))
63/* Just the flags we need, copied from mm.h: */
64#define FOLL_WRITE 0x01 /* check pte is writable */
65#define FOLL_LONGTERM 0x10000 /* mapping lifetime is indefinite */
66
67FIXTURE(hmm)
68{
69 int fd;
70 unsigned int page_size;
71 unsigned int page_shift;
72};
73
74FIXTURE_VARIANT(hmm)
75{
76 int device_number;
77};
78
79FIXTURE_VARIANT_ADD(hmm, hmm_device_private)
80{
81 .device_number = HMM_PRIVATE_DEVICE_ONE,
82};
83
84FIXTURE_VARIANT_ADD(hmm, hmm_device_coherent)
85{
86 .device_number = HMM_COHERENCE_DEVICE_ONE,
87};
88
89FIXTURE(hmm2)
90{
91 int fd0;
92 int fd1;
93 unsigned int page_size;
94 unsigned int page_shift;
95};
96
97FIXTURE_VARIANT(hmm2)
98{
99 int device_number0;
100 int device_number1;
101};
102
103FIXTURE_VARIANT_ADD(hmm2, hmm2_device_private)
104{
105 .device_number0 = HMM_PRIVATE_DEVICE_ONE,
106 .device_number1 = HMM_PRIVATE_DEVICE_TWO,
107};
108
109FIXTURE_VARIANT_ADD(hmm2, hmm2_device_coherent)
110{
111 .device_number0 = HMM_COHERENCE_DEVICE_ONE,
112 .device_number1 = HMM_COHERENCE_DEVICE_TWO,
113};
114
115static int hmm_open(int unit)
116{
117 char pathname[HMM_PATH_MAX];
118 int fd;
119
120 snprintf(pathname, sizeof(pathname), "/dev/hmm_dmirror%d", unit);
121 fd = open(pathname, O_RDWR, 0);
122 if (fd < 0)
123 fprintf(stderr, "could not open hmm dmirror driver (%s)\n",
124 pathname);
125 return fd;
126}
127
128static bool hmm_is_coherent_type(int dev_num)
129{
130 return (dev_num >= HMM_COHERENCE_DEVICE_ONE);
131}
132
133FIXTURE_SETUP(hmm)
134{
135 self->page_size = sysconf(_SC_PAGE_SIZE);
136 self->page_shift = ffs(self->page_size) - 1;
137
138 self->fd = hmm_open(variant->device_number);
139 if (self->fd < 0 && hmm_is_coherent_type(variant->device_number))
140 SKIP(exit(0), "DEVICE_COHERENT not available");
141 ASSERT_GE(self->fd, 0);
142}
143
144FIXTURE_SETUP(hmm2)
145{
146 self->page_size = sysconf(_SC_PAGE_SIZE);
147 self->page_shift = ffs(self->page_size) - 1;
148
149 self->fd0 = hmm_open(variant->device_number0);
150 if (self->fd0 < 0 && hmm_is_coherent_type(variant->device_number0))
151 SKIP(exit(0), "DEVICE_COHERENT not available");
152 ASSERT_GE(self->fd0, 0);
153 self->fd1 = hmm_open(variant->device_number1);
154 ASSERT_GE(self->fd1, 0);
155}
156
157FIXTURE_TEARDOWN(hmm)
158{
159 int ret = close(self->fd);
160
161 ASSERT_EQ(ret, 0);
162 self->fd = -1;
163}
164
165FIXTURE_TEARDOWN(hmm2)
166{
167 int ret = close(self->fd0);
168
169 ASSERT_EQ(ret, 0);
170 self->fd0 = -1;
171
172 ret = close(self->fd1);
173 ASSERT_EQ(ret, 0);
174 self->fd1 = -1;
175}
176
177static int hmm_dmirror_cmd(int fd,
178 unsigned long request,
179 struct hmm_buffer *buffer,
180 unsigned long npages)
181{
182 struct hmm_dmirror_cmd cmd;
183 int ret;
184
185 /* Simulate a device reading system memory. */
186 cmd.addr = (__u64)buffer->ptr;
187 cmd.ptr = (__u64)buffer->mirror;
188 cmd.npages = npages;
189
190 for (;;) {
191 ret = ioctl(fd, request, &cmd);
192 if (ret == 0)
193 break;
194 if (errno == EINTR)
195 continue;
196 return -errno;
197 }
198 buffer->cpages = cmd.cpages;
199 buffer->faults = cmd.faults;
200
201 return 0;
202}
203
204static void hmm_buffer_free(struct hmm_buffer *buffer)
205{
206 if (buffer == NULL)
207 return;
208
209 if (buffer->ptr)
210 munmap(buffer->ptr, buffer->size);
211 free(buffer->mirror);
212 free(buffer);
213}
214
215/*
216 * Create a temporary file that will be deleted on close.
217 */
218static int hmm_create_file(unsigned long size)
219{
220 char path[HMM_PATH_MAX];
221 int fd;
222
223 strcpy(path, "/tmp");
224 fd = open(path, O_TMPFILE | O_EXCL | O_RDWR, 0600);
225 if (fd >= 0) {
226 int r;
227
228 do {
229 r = ftruncate(fd, size);
230 } while (r == -1 && errno == EINTR);
231 if (!r)
232 return fd;
233 close(fd);
234 }
235 return -1;
236}
237
238/*
239 * Return a random unsigned number.
240 */
241static unsigned int hmm_random(void)
242{
243 static int fd = -1;
244 unsigned int r;
245
246 if (fd < 0) {
247 fd = open("/dev/urandom", O_RDONLY);
248 if (fd < 0) {
249 fprintf(stderr, "%s:%d failed to open /dev/urandom\n",
250 __FILE__, __LINE__);
251 return ~0U;
252 }
253 }
254 read(fd, &r, sizeof(r));
255 return r;
256}
257
258static void hmm_nanosleep(unsigned int n)
259{
260 struct timespec t;
261
262 t.tv_sec = 0;
263 t.tv_nsec = n;
264 nanosleep(&t, NULL);
265}
266
267static int hmm_migrate_sys_to_dev(int fd,
268 struct hmm_buffer *buffer,
269 unsigned long npages)
270{
271 return hmm_dmirror_cmd(fd, HMM_DMIRROR_MIGRATE_TO_DEV, buffer, npages);
272}
273
274static int hmm_migrate_dev_to_sys(int fd,
275 struct hmm_buffer *buffer,
276 unsigned long npages)
277{
278 return hmm_dmirror_cmd(fd, HMM_DMIRROR_MIGRATE_TO_SYS, buffer, npages);
279}
280
281/*
282 * Simple NULL test of device open/close.
283 */
284TEST_F(hmm, open_close)
285{
286}
287
288/*
289 * Read private anonymous memory.
290 */
291TEST_F(hmm, anon_read)
292{
293 struct hmm_buffer *buffer;
294 unsigned long npages;
295 unsigned long size;
296 unsigned long i;
297 int *ptr;
298 int ret;
299 int val;
300
301 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
302 ASSERT_NE(npages, 0);
303 size = npages << self->page_shift;
304
305 buffer = malloc(sizeof(*buffer));
306 ASSERT_NE(buffer, NULL);
307
308 buffer->fd = -1;
309 buffer->size = size;
310 buffer->mirror = malloc(size);
311 ASSERT_NE(buffer->mirror, NULL);
312
313 buffer->ptr = mmap(NULL, size,
314 PROT_READ | PROT_WRITE,
315 MAP_PRIVATE | MAP_ANONYMOUS,
316 buffer->fd, 0);
317 ASSERT_NE(buffer->ptr, MAP_FAILED);
318
319 /*
320 * Initialize buffer in system memory but leave the first two pages
321 * zero (pte_none and pfn_zero).
322 */
323 i = 2 * self->page_size / sizeof(*ptr);
324 for (ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
325 ptr[i] = i;
326
327 /* Set buffer permission to read-only. */
328 ret = mprotect(buffer->ptr, size, PROT_READ);
329 ASSERT_EQ(ret, 0);
330
331 /* Populate the CPU page table with a special zero page. */
332 val = *(int *)(buffer->ptr + self->page_size);
333 ASSERT_EQ(val, 0);
334
335 /* Simulate a device reading system memory. */
336 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_READ, buffer, npages);
337 ASSERT_EQ(ret, 0);
338 ASSERT_EQ(buffer->cpages, npages);
339 ASSERT_EQ(buffer->faults, 1);
340
341 /* Check what the device read. */
342 ptr = buffer->mirror;
343 for (i = 0; i < 2 * self->page_size / sizeof(*ptr); ++i)
344 ASSERT_EQ(ptr[i], 0);
345 for (; i < size / sizeof(*ptr); ++i)
346 ASSERT_EQ(ptr[i], i);
347
348 hmm_buffer_free(buffer);
349}
350
351/*
352 * Read private anonymous memory which has been protected with
353 * mprotect() PROT_NONE.
354 */
355TEST_F(hmm, anon_read_prot)
356{
357 struct hmm_buffer *buffer;
358 unsigned long npages;
359 unsigned long size;
360 unsigned long i;
361 int *ptr;
362 int ret;
363
364 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
365 ASSERT_NE(npages, 0);
366 size = npages << self->page_shift;
367
368 buffer = malloc(sizeof(*buffer));
369 ASSERT_NE(buffer, NULL);
370
371 buffer->fd = -1;
372 buffer->size = size;
373 buffer->mirror = malloc(size);
374 ASSERT_NE(buffer->mirror, NULL);
375
376 buffer->ptr = mmap(NULL, size,
377 PROT_READ | PROT_WRITE,
378 MAP_PRIVATE | MAP_ANONYMOUS,
379 buffer->fd, 0);
380 ASSERT_NE(buffer->ptr, MAP_FAILED);
381
382 /* Initialize buffer in system memory. */
383 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
384 ptr[i] = i;
385
386 /* Initialize mirror buffer so we can verify it isn't written. */
387 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
388 ptr[i] = -i;
389
390 /* Protect buffer from reading. */
391 ret = mprotect(buffer->ptr, size, PROT_NONE);
392 ASSERT_EQ(ret, 0);
393
394 /* Simulate a device reading system memory. */
395 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_READ, buffer, npages);
396 ASSERT_EQ(ret, -EFAULT);
397
398 /* Allow CPU to read the buffer so we can check it. */
399 ret = mprotect(buffer->ptr, size, PROT_READ);
400 ASSERT_EQ(ret, 0);
401 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
402 ASSERT_EQ(ptr[i], i);
403
404 /* Check what the device read. */
405 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
406 ASSERT_EQ(ptr[i], -i);
407
408 hmm_buffer_free(buffer);
409}
410
411/*
412 * Write private anonymous memory.
413 */
414TEST_F(hmm, anon_write)
415{
416 struct hmm_buffer *buffer;
417 unsigned long npages;
418 unsigned long size;
419 unsigned long i;
420 int *ptr;
421 int ret;
422
423 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
424 ASSERT_NE(npages, 0);
425 size = npages << self->page_shift;
426
427 buffer = malloc(sizeof(*buffer));
428 ASSERT_NE(buffer, NULL);
429
430 buffer->fd = -1;
431 buffer->size = size;
432 buffer->mirror = malloc(size);
433 ASSERT_NE(buffer->mirror, NULL);
434
435 buffer->ptr = mmap(NULL, size,
436 PROT_READ | PROT_WRITE,
437 MAP_PRIVATE | MAP_ANONYMOUS,
438 buffer->fd, 0);
439 ASSERT_NE(buffer->ptr, MAP_FAILED);
440
441 /* Initialize data that the device will write to buffer->ptr. */
442 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
443 ptr[i] = i;
444
445 /* Simulate a device writing system memory. */
446 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_WRITE, buffer, npages);
447 ASSERT_EQ(ret, 0);
448 ASSERT_EQ(buffer->cpages, npages);
449 ASSERT_EQ(buffer->faults, 1);
450
451 /* Check what the device wrote. */
452 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
453 ASSERT_EQ(ptr[i], i);
454
455 hmm_buffer_free(buffer);
456}
457
458/*
459 * Write private anonymous memory which has been protected with
460 * mprotect() PROT_READ.
461 */
462TEST_F(hmm, anon_write_prot)
463{
464 struct hmm_buffer *buffer;
465 unsigned long npages;
466 unsigned long size;
467 unsigned long i;
468 int *ptr;
469 int ret;
470
471 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
472 ASSERT_NE(npages, 0);
473 size = npages << self->page_shift;
474
475 buffer = malloc(sizeof(*buffer));
476 ASSERT_NE(buffer, NULL);
477
478 buffer->fd = -1;
479 buffer->size = size;
480 buffer->mirror = malloc(size);
481 ASSERT_NE(buffer->mirror, NULL);
482
483 buffer->ptr = mmap(NULL, size,
484 PROT_READ,
485 MAP_PRIVATE | MAP_ANONYMOUS,
486 buffer->fd, 0);
487 ASSERT_NE(buffer->ptr, MAP_FAILED);
488
489 /* Simulate a device reading a zero page of memory. */
490 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_READ, buffer, 1);
491 ASSERT_EQ(ret, 0);
492 ASSERT_EQ(buffer->cpages, 1);
493 ASSERT_EQ(buffer->faults, 1);
494
495 /* Initialize data that the device will write to buffer->ptr. */
496 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
497 ptr[i] = i;
498
499 /* Simulate a device writing system memory. */
500 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_WRITE, buffer, npages);
501 ASSERT_EQ(ret, -EPERM);
502
503 /* Check what the device wrote. */
504 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
505 ASSERT_EQ(ptr[i], 0);
506
507 /* Now allow writing and see that the zero page is replaced. */
508 ret = mprotect(buffer->ptr, size, PROT_WRITE | PROT_READ);
509 ASSERT_EQ(ret, 0);
510
511 /* Simulate a device writing system memory. */
512 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_WRITE, buffer, npages);
513 ASSERT_EQ(ret, 0);
514 ASSERT_EQ(buffer->cpages, npages);
515 ASSERT_EQ(buffer->faults, 1);
516
517 /* Check what the device wrote. */
518 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
519 ASSERT_EQ(ptr[i], i);
520
521 hmm_buffer_free(buffer);
522}
523
524/*
525 * Check that a device writing an anonymous private mapping
526 * will copy-on-write if a child process inherits the mapping.
527 */
528TEST_F(hmm, anon_write_child)
529{
530 struct hmm_buffer *buffer;
531 unsigned long npages;
532 unsigned long size;
533 unsigned long i;
534 int *ptr;
535 pid_t pid;
536 int child_fd;
537 int ret;
538
539 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
540 ASSERT_NE(npages, 0);
541 size = npages << self->page_shift;
542
543 buffer = malloc(sizeof(*buffer));
544 ASSERT_NE(buffer, NULL);
545
546 buffer->fd = -1;
547 buffer->size = size;
548 buffer->mirror = malloc(size);
549 ASSERT_NE(buffer->mirror, NULL);
550
551 buffer->ptr = mmap(NULL, size,
552 PROT_READ | PROT_WRITE,
553 MAP_PRIVATE | MAP_ANONYMOUS,
554 buffer->fd, 0);
555 ASSERT_NE(buffer->ptr, MAP_FAILED);
556
557 /* Initialize buffer->ptr so we can tell if it is written. */
558 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
559 ptr[i] = i;
560
561 /* Initialize data that the device will write to buffer->ptr. */
562 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
563 ptr[i] = -i;
564
565 pid = fork();
566 if (pid == -1)
567 ASSERT_EQ(pid, 0);
568 if (pid != 0) {
569 waitpid(pid, &ret, 0);
570 ASSERT_EQ(WIFEXITED(ret), 1);
571
572 /* Check that the parent's buffer did not change. */
573 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
574 ASSERT_EQ(ptr[i], i);
575 return;
576 }
577
578 /* Check that we see the parent's values. */
579 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
580 ASSERT_EQ(ptr[i], i);
581 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
582 ASSERT_EQ(ptr[i], -i);
583
584 /* The child process needs its own mirror to its own mm. */
585 child_fd = hmm_open(0);
586 ASSERT_GE(child_fd, 0);
587
588 /* Simulate a device writing system memory. */
589 ret = hmm_dmirror_cmd(child_fd, HMM_DMIRROR_WRITE, buffer, npages);
590 ASSERT_EQ(ret, 0);
591 ASSERT_EQ(buffer->cpages, npages);
592 ASSERT_EQ(buffer->faults, 1);
593
594 /* Check what the device wrote. */
595 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
596 ASSERT_EQ(ptr[i], -i);
597
598 close(child_fd);
599 exit(0);
600}
601
602/*
603 * Check that a device writing an anonymous shared mapping
604 * will not copy-on-write if a child process inherits the mapping.
605 */
606TEST_F(hmm, anon_write_child_shared)
607{
608 struct hmm_buffer *buffer;
609 unsigned long npages;
610 unsigned long size;
611 unsigned long i;
612 int *ptr;
613 pid_t pid;
614 int child_fd;
615 int ret;
616
617 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
618 ASSERT_NE(npages, 0);
619 size = npages << self->page_shift;
620
621 buffer = malloc(sizeof(*buffer));
622 ASSERT_NE(buffer, NULL);
623
624 buffer->fd = -1;
625 buffer->size = size;
626 buffer->mirror = malloc(size);
627 ASSERT_NE(buffer->mirror, NULL);
628
629 buffer->ptr = mmap(NULL, size,
630 PROT_READ | PROT_WRITE,
631 MAP_SHARED | MAP_ANONYMOUS,
632 buffer->fd, 0);
633 ASSERT_NE(buffer->ptr, MAP_FAILED);
634
635 /* Initialize buffer->ptr so we can tell if it is written. */
636 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
637 ptr[i] = i;
638
639 /* Initialize data that the device will write to buffer->ptr. */
640 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
641 ptr[i] = -i;
642
643 pid = fork();
644 if (pid == -1)
645 ASSERT_EQ(pid, 0);
646 if (pid != 0) {
647 waitpid(pid, &ret, 0);
648 ASSERT_EQ(WIFEXITED(ret), 1);
649
650 /* Check that the parent's buffer did change. */
651 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
652 ASSERT_EQ(ptr[i], -i);
653 return;
654 }
655
656 /* Check that we see the parent's values. */
657 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
658 ASSERT_EQ(ptr[i], i);
659 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
660 ASSERT_EQ(ptr[i], -i);
661
662 /* The child process needs its own mirror to its own mm. */
663 child_fd = hmm_open(0);
664 ASSERT_GE(child_fd, 0);
665
666 /* Simulate a device writing system memory. */
667 ret = hmm_dmirror_cmd(child_fd, HMM_DMIRROR_WRITE, buffer, npages);
668 ASSERT_EQ(ret, 0);
669 ASSERT_EQ(buffer->cpages, npages);
670 ASSERT_EQ(buffer->faults, 1);
671
672 /* Check what the device wrote. */
673 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
674 ASSERT_EQ(ptr[i], -i);
675
676 close(child_fd);
677 exit(0);
678}
679
680/*
681 * Write private anonymous huge page.
682 */
683TEST_F(hmm, anon_write_huge)
684{
685 struct hmm_buffer *buffer;
686 unsigned long npages;
687 unsigned long size;
688 unsigned long i;
689 void *old_ptr;
690 void *map;
691 int *ptr;
692 int ret;
693
694 size = 2 * TWOMEG;
695
696 buffer = malloc(sizeof(*buffer));
697 ASSERT_NE(buffer, NULL);
698
699 buffer->fd = -1;
700 buffer->size = size;
701 buffer->mirror = malloc(size);
702 ASSERT_NE(buffer->mirror, NULL);
703
704 buffer->ptr = mmap(NULL, size,
705 PROT_READ | PROT_WRITE,
706 MAP_PRIVATE | MAP_ANONYMOUS,
707 buffer->fd, 0);
708 ASSERT_NE(buffer->ptr, MAP_FAILED);
709
710 size = TWOMEG;
711 npages = size >> self->page_shift;
712 map = (void *)ALIGN((uintptr_t)buffer->ptr, size);
713 ret = madvise(map, size, MADV_HUGEPAGE);
714 ASSERT_EQ(ret, 0);
715 old_ptr = buffer->ptr;
716 buffer->ptr = map;
717
718 /* Initialize data that the device will write to buffer->ptr. */
719 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
720 ptr[i] = i;
721
722 /* Simulate a device writing system memory. */
723 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_WRITE, buffer, npages);
724 ASSERT_EQ(ret, 0);
725 ASSERT_EQ(buffer->cpages, npages);
726 ASSERT_EQ(buffer->faults, 1);
727
728 /* Check what the device wrote. */
729 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
730 ASSERT_EQ(ptr[i], i);
731
732 buffer->ptr = old_ptr;
733 hmm_buffer_free(buffer);
734}
735
736#ifdef LOCAL_CONFIG_HAVE_LIBHUGETLBFS
737/*
738 * Write huge TLBFS page.
739 */
740TEST_F(hmm, anon_write_hugetlbfs)
741{
742 struct hmm_buffer *buffer;
743 unsigned long npages;
744 unsigned long size;
745 unsigned long i;
746 int *ptr;
747 int ret;
748 long pagesizes[4];
749 int n, idx;
750
751 /* Skip test if we can't allocate a hugetlbfs page. */
752
753 n = gethugepagesizes(pagesizes, 4);
754 if (n <= 0)
755 SKIP(return, "Huge page size could not be determined");
756 for (idx = 0; --n > 0; ) {
757 if (pagesizes[n] < pagesizes[idx])
758 idx = n;
759 }
760 size = ALIGN(TWOMEG, pagesizes[idx]);
761 npages = size >> self->page_shift;
762
763 buffer = malloc(sizeof(*buffer));
764 ASSERT_NE(buffer, NULL);
765
766 buffer->ptr = get_hugepage_region(size, GHR_STRICT);
767 if (buffer->ptr == NULL) {
768 free(buffer);
769 SKIP(return, "Huge page could not be allocated");
770 }
771
772 buffer->fd = -1;
773 buffer->size = size;
774 buffer->mirror = malloc(size);
775 ASSERT_NE(buffer->mirror, NULL);
776
777 /* Initialize data that the device will write to buffer->ptr. */
778 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
779 ptr[i] = i;
780
781 /* Simulate a device writing system memory. */
782 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_WRITE, buffer, npages);
783 ASSERT_EQ(ret, 0);
784 ASSERT_EQ(buffer->cpages, npages);
785 ASSERT_EQ(buffer->faults, 1);
786
787 /* Check what the device wrote. */
788 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
789 ASSERT_EQ(ptr[i], i);
790
791 free_hugepage_region(buffer->ptr);
792 buffer->ptr = NULL;
793 hmm_buffer_free(buffer);
794}
795#endif /* LOCAL_CONFIG_HAVE_LIBHUGETLBFS */
796
797/*
798 * Read mmap'ed file memory.
799 */
800TEST_F(hmm, file_read)
801{
802 struct hmm_buffer *buffer;
803 unsigned long npages;
804 unsigned long size;
805 unsigned long i;
806 int *ptr;
807 int ret;
808 int fd;
809 ssize_t len;
810
811 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
812 ASSERT_NE(npages, 0);
813 size = npages << self->page_shift;
814
815 fd = hmm_create_file(size);
816 ASSERT_GE(fd, 0);
817
818 buffer = malloc(sizeof(*buffer));
819 ASSERT_NE(buffer, NULL);
820
821 buffer->fd = fd;
822 buffer->size = size;
823 buffer->mirror = malloc(size);
824 ASSERT_NE(buffer->mirror, NULL);
825
826 /* Write initial contents of the file. */
827 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
828 ptr[i] = i;
829 len = pwrite(fd, buffer->mirror, size, 0);
830 ASSERT_EQ(len, size);
831 memset(buffer->mirror, 0, size);
832
833 buffer->ptr = mmap(NULL, size,
834 PROT_READ,
835 MAP_SHARED,
836 buffer->fd, 0);
837 ASSERT_NE(buffer->ptr, MAP_FAILED);
838
839 /* Simulate a device reading system memory. */
840 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_READ, buffer, npages);
841 ASSERT_EQ(ret, 0);
842 ASSERT_EQ(buffer->cpages, npages);
843 ASSERT_EQ(buffer->faults, 1);
844
845 /* Check what the device read. */
846 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
847 ASSERT_EQ(ptr[i], i);
848
849 hmm_buffer_free(buffer);
850}
851
852/*
853 * Write mmap'ed file memory.
854 */
855TEST_F(hmm, file_write)
856{
857 struct hmm_buffer *buffer;
858 unsigned long npages;
859 unsigned long size;
860 unsigned long i;
861 int *ptr;
862 int ret;
863 int fd;
864 ssize_t len;
865
866 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
867 ASSERT_NE(npages, 0);
868 size = npages << self->page_shift;
869
870 fd = hmm_create_file(size);
871 ASSERT_GE(fd, 0);
872
873 buffer = malloc(sizeof(*buffer));
874 ASSERT_NE(buffer, NULL);
875
876 buffer->fd = fd;
877 buffer->size = size;
878 buffer->mirror = malloc(size);
879 ASSERT_NE(buffer->mirror, NULL);
880
881 buffer->ptr = mmap(NULL, size,
882 PROT_READ | PROT_WRITE,
883 MAP_SHARED,
884 buffer->fd, 0);
885 ASSERT_NE(buffer->ptr, MAP_FAILED);
886
887 /* Initialize data that the device will write to buffer->ptr. */
888 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
889 ptr[i] = i;
890
891 /* Simulate a device writing system memory. */
892 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_WRITE, buffer, npages);
893 ASSERT_EQ(ret, 0);
894 ASSERT_EQ(buffer->cpages, npages);
895 ASSERT_EQ(buffer->faults, 1);
896
897 /* Check what the device wrote. */
898 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
899 ASSERT_EQ(ptr[i], i);
900
901 /* Check that the device also wrote the file. */
902 len = pread(fd, buffer->mirror, size, 0);
903 ASSERT_EQ(len, size);
904 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
905 ASSERT_EQ(ptr[i], i);
906
907 hmm_buffer_free(buffer);
908}
909
910/*
911 * Migrate anonymous memory to device private memory.
912 */
913TEST_F(hmm, migrate)
914{
915 struct hmm_buffer *buffer;
916 unsigned long npages;
917 unsigned long size;
918 unsigned long i;
919 int *ptr;
920 int ret;
921
922 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
923 ASSERT_NE(npages, 0);
924 size = npages << self->page_shift;
925
926 buffer = malloc(sizeof(*buffer));
927 ASSERT_NE(buffer, NULL);
928
929 buffer->fd = -1;
930 buffer->size = size;
931 buffer->mirror = malloc(size);
932 ASSERT_NE(buffer->mirror, NULL);
933
934 buffer->ptr = mmap(NULL, size,
935 PROT_READ | PROT_WRITE,
936 MAP_PRIVATE | MAP_ANONYMOUS,
937 buffer->fd, 0);
938 ASSERT_NE(buffer->ptr, MAP_FAILED);
939
940 /* Initialize buffer in system memory. */
941 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
942 ptr[i] = i;
943
944 /* Migrate memory to device. */
945 ret = hmm_migrate_sys_to_dev(self->fd, buffer, npages);
946 ASSERT_EQ(ret, 0);
947 ASSERT_EQ(buffer->cpages, npages);
948
949 /* Check what the device read. */
950 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
951 ASSERT_EQ(ptr[i], i);
952
953 hmm_buffer_free(buffer);
954}
955
956/*
957 * Migrate anonymous memory to device private memory and fault some of it back
958 * to system memory, then try migrating the resulting mix of system and device
959 * private memory to the device.
960 */
961TEST_F(hmm, migrate_fault)
962{
963 struct hmm_buffer *buffer;
964 unsigned long npages;
965 unsigned long size;
966 unsigned long i;
967 int *ptr;
968 int ret;
969
970 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
971 ASSERT_NE(npages, 0);
972 size = npages << self->page_shift;
973
974 buffer = malloc(sizeof(*buffer));
975 ASSERT_NE(buffer, NULL);
976
977 buffer->fd = -1;
978 buffer->size = size;
979 buffer->mirror = malloc(size);
980 ASSERT_NE(buffer->mirror, NULL);
981
982 buffer->ptr = mmap(NULL, size,
983 PROT_READ | PROT_WRITE,
984 MAP_PRIVATE | MAP_ANONYMOUS,
985 buffer->fd, 0);
986 ASSERT_NE(buffer->ptr, MAP_FAILED);
987
988 /* Initialize buffer in system memory. */
989 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
990 ptr[i] = i;
991
992 /* Migrate memory to device. */
993 ret = hmm_migrate_sys_to_dev(self->fd, buffer, npages);
994 ASSERT_EQ(ret, 0);
995 ASSERT_EQ(buffer->cpages, npages);
996
997 /* Check what the device read. */
998 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
999 ASSERT_EQ(ptr[i], i);
1000
1001 /* Fault half the pages back to system memory and check them. */
1002 for (i = 0, ptr = buffer->ptr; i < size / (2 * sizeof(*ptr)); ++i)
1003 ASSERT_EQ(ptr[i], i);
1004
1005 /* Migrate memory to the device again. */
1006 ret = hmm_migrate_sys_to_dev(self->fd, buffer, npages);
1007 ASSERT_EQ(ret, 0);
1008 ASSERT_EQ(buffer->cpages, npages);
1009
1010 /* Check what the device read. */
1011 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
1012 ASSERT_EQ(ptr[i], i);
1013
1014 hmm_buffer_free(buffer);
1015}
1016
1017/*
1018 * Migrate anonymous shared memory to device private memory.
1019 */
1020TEST_F(hmm, migrate_shared)
1021{
1022 struct hmm_buffer *buffer;
1023 unsigned long npages;
1024 unsigned long size;
1025 int ret;
1026
1027 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
1028 ASSERT_NE(npages, 0);
1029 size = npages << self->page_shift;
1030
1031 buffer = malloc(sizeof(*buffer));
1032 ASSERT_NE(buffer, NULL);
1033
1034 buffer->fd = -1;
1035 buffer->size = size;
1036 buffer->mirror = malloc(size);
1037 ASSERT_NE(buffer->mirror, NULL);
1038
1039 buffer->ptr = mmap(NULL, size,
1040 PROT_READ | PROT_WRITE,
1041 MAP_SHARED | MAP_ANONYMOUS,
1042 buffer->fd, 0);
1043 ASSERT_NE(buffer->ptr, MAP_FAILED);
1044
1045 /* Migrate memory to device. */
1046 ret = hmm_migrate_sys_to_dev(self->fd, buffer, npages);
1047 ASSERT_EQ(ret, -ENOENT);
1048
1049 hmm_buffer_free(buffer);
1050}
1051
1052/*
1053 * Try to migrate various memory types to device private memory.
1054 */
1055TEST_F(hmm2, migrate_mixed)
1056{
1057 struct hmm_buffer *buffer;
1058 unsigned long npages;
1059 unsigned long size;
1060 int *ptr;
1061 unsigned char *p;
1062 int ret;
1063 int val;
1064
1065 npages = 6;
1066 size = npages << self->page_shift;
1067
1068 buffer = malloc(sizeof(*buffer));
1069 ASSERT_NE(buffer, NULL);
1070
1071 buffer->fd = -1;
1072 buffer->size = size;
1073 buffer->mirror = malloc(size);
1074 ASSERT_NE(buffer->mirror, NULL);
1075
1076 /* Reserve a range of addresses. */
1077 buffer->ptr = mmap(NULL, size,
1078 PROT_NONE,
1079 MAP_PRIVATE | MAP_ANONYMOUS,
1080 buffer->fd, 0);
1081 ASSERT_NE(buffer->ptr, MAP_FAILED);
1082 p = buffer->ptr;
1083
1084 /* Migrating a protected area should be an error. */
1085 ret = hmm_migrate_sys_to_dev(self->fd1, buffer, npages);
1086 ASSERT_EQ(ret, -EINVAL);
1087
1088 /* Punch a hole after the first page address. */
1089 ret = munmap(buffer->ptr + self->page_size, self->page_size);
1090 ASSERT_EQ(ret, 0);
1091
1092 /* We expect an error if the vma doesn't cover the range. */
1093 ret = hmm_migrate_sys_to_dev(self->fd1, buffer, 3);
1094 ASSERT_EQ(ret, -EINVAL);
1095
1096 /* Page 2 will be a read-only zero page. */
1097 ret = mprotect(buffer->ptr + 2 * self->page_size, self->page_size,
1098 PROT_READ);
1099 ASSERT_EQ(ret, 0);
1100 ptr = (int *)(buffer->ptr + 2 * self->page_size);
1101 val = *ptr + 3;
1102 ASSERT_EQ(val, 3);
1103
1104 /* Page 3 will be read-only. */
1105 ret = mprotect(buffer->ptr + 3 * self->page_size, self->page_size,
1106 PROT_READ | PROT_WRITE);
1107 ASSERT_EQ(ret, 0);
1108 ptr = (int *)(buffer->ptr + 3 * self->page_size);
1109 *ptr = val;
1110 ret = mprotect(buffer->ptr + 3 * self->page_size, self->page_size,
1111 PROT_READ);
1112 ASSERT_EQ(ret, 0);
1113
1114 /* Page 4-5 will be read-write. */
1115 ret = mprotect(buffer->ptr + 4 * self->page_size, 2 * self->page_size,
1116 PROT_READ | PROT_WRITE);
1117 ASSERT_EQ(ret, 0);
1118 ptr = (int *)(buffer->ptr + 4 * self->page_size);
1119 *ptr = val;
1120 ptr = (int *)(buffer->ptr + 5 * self->page_size);
1121 *ptr = val;
1122
1123 /* Now try to migrate pages 2-5 to device 1. */
1124 buffer->ptr = p + 2 * self->page_size;
1125 ret = hmm_migrate_sys_to_dev(self->fd1, buffer, 4);
1126 ASSERT_EQ(ret, 0);
1127 ASSERT_EQ(buffer->cpages, 4);
1128
1129 /* Page 5 won't be migrated to device 0 because it's on device 1. */
1130 buffer->ptr = p + 5 * self->page_size;
1131 ret = hmm_migrate_sys_to_dev(self->fd0, buffer, 1);
1132 ASSERT_EQ(ret, -ENOENT);
1133 buffer->ptr = p;
1134
1135 buffer->ptr = p;
1136 hmm_buffer_free(buffer);
1137}
1138
1139/*
1140 * Migrate anonymous memory to device memory and back to system memory
1141 * multiple times. In case of private zone configuration, this is done
1142 * through fault pages accessed by CPU. In case of coherent zone configuration,
1143 * the pages from the device should be explicitly migrated back to system memory.
1144 * The reason is Coherent device zone has coherent access by CPU, therefore
1145 * it will not generate any page fault.
1146 */
1147TEST_F(hmm, migrate_multiple)
1148{
1149 struct hmm_buffer *buffer;
1150 unsigned long npages;
1151 unsigned long size;
1152 unsigned long i;
1153 unsigned long c;
1154 int *ptr;
1155 int ret;
1156
1157 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
1158 ASSERT_NE(npages, 0);
1159 size = npages << self->page_shift;
1160
1161 for (c = 0; c < NTIMES; c++) {
1162 buffer = malloc(sizeof(*buffer));
1163 ASSERT_NE(buffer, NULL);
1164
1165 buffer->fd = -1;
1166 buffer->size = size;
1167 buffer->mirror = malloc(size);
1168 ASSERT_NE(buffer->mirror, NULL);
1169
1170 buffer->ptr = mmap(NULL, size,
1171 PROT_READ | PROT_WRITE,
1172 MAP_PRIVATE | MAP_ANONYMOUS,
1173 buffer->fd, 0);
1174 ASSERT_NE(buffer->ptr, MAP_FAILED);
1175
1176 /* Initialize buffer in system memory. */
1177 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1178 ptr[i] = i;
1179
1180 /* Migrate memory to device. */
1181 ret = hmm_migrate_sys_to_dev(self->fd, buffer, npages);
1182 ASSERT_EQ(ret, 0);
1183 ASSERT_EQ(buffer->cpages, npages);
1184
1185 /* Check what the device read. */
1186 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
1187 ASSERT_EQ(ptr[i], i);
1188
1189 /* Migrate back to system memory and check them. */
1190 if (hmm_is_coherent_type(variant->device_number)) {
1191 ret = hmm_migrate_dev_to_sys(self->fd, buffer, npages);
1192 ASSERT_EQ(ret, 0);
1193 ASSERT_EQ(buffer->cpages, npages);
1194 }
1195
1196 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1197 ASSERT_EQ(ptr[i], i);
1198
1199 hmm_buffer_free(buffer);
1200 }
1201}
1202
1203/*
1204 * Read anonymous memory multiple times.
1205 */
1206TEST_F(hmm, anon_read_multiple)
1207{
1208 struct hmm_buffer *buffer;
1209 unsigned long npages;
1210 unsigned long size;
1211 unsigned long i;
1212 unsigned long c;
1213 int *ptr;
1214 int ret;
1215
1216 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
1217 ASSERT_NE(npages, 0);
1218 size = npages << self->page_shift;
1219
1220 for (c = 0; c < NTIMES; c++) {
1221 buffer = malloc(sizeof(*buffer));
1222 ASSERT_NE(buffer, NULL);
1223
1224 buffer->fd = -1;
1225 buffer->size = size;
1226 buffer->mirror = malloc(size);
1227 ASSERT_NE(buffer->mirror, NULL);
1228
1229 buffer->ptr = mmap(NULL, size,
1230 PROT_READ | PROT_WRITE,
1231 MAP_PRIVATE | MAP_ANONYMOUS,
1232 buffer->fd, 0);
1233 ASSERT_NE(buffer->ptr, MAP_FAILED);
1234
1235 /* Initialize buffer in system memory. */
1236 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1237 ptr[i] = i + c;
1238
1239 /* Simulate a device reading system memory. */
1240 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_READ, buffer,
1241 npages);
1242 ASSERT_EQ(ret, 0);
1243 ASSERT_EQ(buffer->cpages, npages);
1244 ASSERT_EQ(buffer->faults, 1);
1245
1246 /* Check what the device read. */
1247 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
1248 ASSERT_EQ(ptr[i], i + c);
1249
1250 hmm_buffer_free(buffer);
1251 }
1252}
1253
1254void *unmap_buffer(void *p)
1255{
1256 struct hmm_buffer *buffer = p;
1257
1258 /* Delay for a bit and then unmap buffer while it is being read. */
1259 hmm_nanosleep(hmm_random() % 32000);
1260 munmap(buffer->ptr + buffer->size / 2, buffer->size / 2);
1261 buffer->ptr = NULL;
1262
1263 return NULL;
1264}
1265
1266/*
1267 * Try reading anonymous memory while it is being unmapped.
1268 */
1269TEST_F(hmm, anon_teardown)
1270{
1271 unsigned long npages;
1272 unsigned long size;
1273 unsigned long c;
1274 void *ret;
1275
1276 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
1277 ASSERT_NE(npages, 0);
1278 size = npages << self->page_shift;
1279
1280 for (c = 0; c < NTIMES; ++c) {
1281 pthread_t thread;
1282 struct hmm_buffer *buffer;
1283 unsigned long i;
1284 int *ptr;
1285 int rc;
1286
1287 buffer = malloc(sizeof(*buffer));
1288 ASSERT_NE(buffer, NULL);
1289
1290 buffer->fd = -1;
1291 buffer->size = size;
1292 buffer->mirror = malloc(size);
1293 ASSERT_NE(buffer->mirror, NULL);
1294
1295 buffer->ptr = mmap(NULL, size,
1296 PROT_READ | PROT_WRITE,
1297 MAP_PRIVATE | MAP_ANONYMOUS,
1298 buffer->fd, 0);
1299 ASSERT_NE(buffer->ptr, MAP_FAILED);
1300
1301 /* Initialize buffer in system memory. */
1302 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1303 ptr[i] = i + c;
1304
1305 rc = pthread_create(&thread, NULL, unmap_buffer, buffer);
1306 ASSERT_EQ(rc, 0);
1307
1308 /* Simulate a device reading system memory. */
1309 rc = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_READ, buffer,
1310 npages);
1311 if (rc == 0) {
1312 ASSERT_EQ(buffer->cpages, npages);
1313 ASSERT_EQ(buffer->faults, 1);
1314
1315 /* Check what the device read. */
1316 for (i = 0, ptr = buffer->mirror;
1317 i < size / sizeof(*ptr);
1318 ++i)
1319 ASSERT_EQ(ptr[i], i + c);
1320 }
1321
1322 pthread_join(thread, &ret);
1323 hmm_buffer_free(buffer);
1324 }
1325}
1326
1327/*
1328 * Test memory snapshot without faulting in pages accessed by the device.
1329 */
1330TEST_F(hmm, mixedmap)
1331{
1332 struct hmm_buffer *buffer;
1333 unsigned long npages;
1334 unsigned long size;
1335 unsigned char *m;
1336 int ret;
1337
1338 npages = 1;
1339 size = npages << self->page_shift;
1340
1341 buffer = malloc(sizeof(*buffer));
1342 ASSERT_NE(buffer, NULL);
1343
1344 buffer->fd = -1;
1345 buffer->size = size;
1346 buffer->mirror = malloc(npages);
1347 ASSERT_NE(buffer->mirror, NULL);
1348
1349
1350 /* Reserve a range of addresses. */
1351 buffer->ptr = mmap(NULL, size,
1352 PROT_READ | PROT_WRITE,
1353 MAP_PRIVATE,
1354 self->fd, 0);
1355 ASSERT_NE(buffer->ptr, MAP_FAILED);
1356
1357 /* Simulate a device snapshotting CPU pagetables. */
1358 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_SNAPSHOT, buffer, npages);
1359 ASSERT_EQ(ret, 0);
1360 ASSERT_EQ(buffer->cpages, npages);
1361
1362 /* Check what the device saw. */
1363 m = buffer->mirror;
1364 ASSERT_EQ(m[0], HMM_DMIRROR_PROT_READ);
1365
1366 hmm_buffer_free(buffer);
1367}
1368
1369/*
1370 * Test memory snapshot without faulting in pages accessed by the device.
1371 */
1372TEST_F(hmm2, snapshot)
1373{
1374 struct hmm_buffer *buffer;
1375 unsigned long npages;
1376 unsigned long size;
1377 int *ptr;
1378 unsigned char *p;
1379 unsigned char *m;
1380 int ret;
1381 int val;
1382
1383 npages = 7;
1384 size = npages << self->page_shift;
1385
1386 buffer = malloc(sizeof(*buffer));
1387 ASSERT_NE(buffer, NULL);
1388
1389 buffer->fd = -1;
1390 buffer->size = size;
1391 buffer->mirror = malloc(npages);
1392 ASSERT_NE(buffer->mirror, NULL);
1393
1394 /* Reserve a range of addresses. */
1395 buffer->ptr = mmap(NULL, size,
1396 PROT_NONE,
1397 MAP_PRIVATE | MAP_ANONYMOUS,
1398 buffer->fd, 0);
1399 ASSERT_NE(buffer->ptr, MAP_FAILED);
1400 p = buffer->ptr;
1401
1402 /* Punch a hole after the first page address. */
1403 ret = munmap(buffer->ptr + self->page_size, self->page_size);
1404 ASSERT_EQ(ret, 0);
1405
1406 /* Page 2 will be read-only zero page. */
1407 ret = mprotect(buffer->ptr + 2 * self->page_size, self->page_size,
1408 PROT_READ);
1409 ASSERT_EQ(ret, 0);
1410 ptr = (int *)(buffer->ptr + 2 * self->page_size);
1411 val = *ptr + 3;
1412 ASSERT_EQ(val, 3);
1413
1414 /* Page 3 will be read-only. */
1415 ret = mprotect(buffer->ptr + 3 * self->page_size, self->page_size,
1416 PROT_READ | PROT_WRITE);
1417 ASSERT_EQ(ret, 0);
1418 ptr = (int *)(buffer->ptr + 3 * self->page_size);
1419 *ptr = val;
1420 ret = mprotect(buffer->ptr + 3 * self->page_size, self->page_size,
1421 PROT_READ);
1422 ASSERT_EQ(ret, 0);
1423
1424 /* Page 4-6 will be read-write. */
1425 ret = mprotect(buffer->ptr + 4 * self->page_size, 3 * self->page_size,
1426 PROT_READ | PROT_WRITE);
1427 ASSERT_EQ(ret, 0);
1428 ptr = (int *)(buffer->ptr + 4 * self->page_size);
1429 *ptr = val;
1430
1431 /* Page 5 will be migrated to device 0. */
1432 buffer->ptr = p + 5 * self->page_size;
1433 ret = hmm_migrate_sys_to_dev(self->fd0, buffer, 1);
1434 ASSERT_EQ(ret, 0);
1435 ASSERT_EQ(buffer->cpages, 1);
1436
1437 /* Page 6 will be migrated to device 1. */
1438 buffer->ptr = p + 6 * self->page_size;
1439 ret = hmm_migrate_sys_to_dev(self->fd1, buffer, 1);
1440 ASSERT_EQ(ret, 0);
1441 ASSERT_EQ(buffer->cpages, 1);
1442
1443 /* Simulate a device snapshotting CPU pagetables. */
1444 buffer->ptr = p;
1445 ret = hmm_dmirror_cmd(self->fd0, HMM_DMIRROR_SNAPSHOT, buffer, npages);
1446 ASSERT_EQ(ret, 0);
1447 ASSERT_EQ(buffer->cpages, npages);
1448
1449 /* Check what the device saw. */
1450 m = buffer->mirror;
1451 ASSERT_EQ(m[0], HMM_DMIRROR_PROT_ERROR);
1452 ASSERT_EQ(m[1], HMM_DMIRROR_PROT_ERROR);
1453 ASSERT_EQ(m[2], HMM_DMIRROR_PROT_ZERO | HMM_DMIRROR_PROT_READ);
1454 ASSERT_EQ(m[3], HMM_DMIRROR_PROT_READ);
1455 ASSERT_EQ(m[4], HMM_DMIRROR_PROT_WRITE);
1456 if (!hmm_is_coherent_type(variant->device_number0)) {
1457 ASSERT_EQ(m[5], HMM_DMIRROR_PROT_DEV_PRIVATE_LOCAL |
1458 HMM_DMIRROR_PROT_WRITE);
1459 ASSERT_EQ(m[6], HMM_DMIRROR_PROT_NONE);
1460 } else {
1461 ASSERT_EQ(m[5], HMM_DMIRROR_PROT_DEV_COHERENT_LOCAL |
1462 HMM_DMIRROR_PROT_WRITE);
1463 ASSERT_EQ(m[6], HMM_DMIRROR_PROT_DEV_COHERENT_REMOTE |
1464 HMM_DMIRROR_PROT_WRITE);
1465 }
1466
1467 hmm_buffer_free(buffer);
1468}
1469
1470#ifdef LOCAL_CONFIG_HAVE_LIBHUGETLBFS
1471/*
1472 * Test the hmm_range_fault() HMM_PFN_PMD flag for large pages that
1473 * should be mapped by a large page table entry.
1474 */
1475TEST_F(hmm, compound)
1476{
1477 struct hmm_buffer *buffer;
1478 unsigned long npages;
1479 unsigned long size;
1480 int *ptr;
1481 unsigned char *m;
1482 int ret;
1483 long pagesizes[4];
1484 int n, idx;
1485 unsigned long i;
1486
1487 /* Skip test if we can't allocate a hugetlbfs page. */
1488
1489 n = gethugepagesizes(pagesizes, 4);
1490 if (n <= 0)
1491 return;
1492 for (idx = 0; --n > 0; ) {
1493 if (pagesizes[n] < pagesizes[idx])
1494 idx = n;
1495 }
1496 size = ALIGN(TWOMEG, pagesizes[idx]);
1497 npages = size >> self->page_shift;
1498
1499 buffer = malloc(sizeof(*buffer));
1500 ASSERT_NE(buffer, NULL);
1501
1502 buffer->ptr = get_hugepage_region(size, GHR_STRICT);
1503 if (buffer->ptr == NULL) {
1504 free(buffer);
1505 return;
1506 }
1507
1508 buffer->size = size;
1509 buffer->mirror = malloc(npages);
1510 ASSERT_NE(buffer->mirror, NULL);
1511
1512 /* Initialize the pages the device will snapshot in buffer->ptr. */
1513 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1514 ptr[i] = i;
1515
1516 /* Simulate a device snapshotting CPU pagetables. */
1517 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_SNAPSHOT, buffer, npages);
1518 ASSERT_EQ(ret, 0);
1519 ASSERT_EQ(buffer->cpages, npages);
1520
1521 /* Check what the device saw. */
1522 m = buffer->mirror;
1523 for (i = 0; i < npages; ++i)
1524 ASSERT_EQ(m[i], HMM_DMIRROR_PROT_WRITE |
1525 HMM_DMIRROR_PROT_PMD);
1526
1527 /* Make the region read-only. */
1528 ret = mprotect(buffer->ptr, size, PROT_READ);
1529 ASSERT_EQ(ret, 0);
1530
1531 /* Simulate a device snapshotting CPU pagetables. */
1532 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_SNAPSHOT, buffer, npages);
1533 ASSERT_EQ(ret, 0);
1534 ASSERT_EQ(buffer->cpages, npages);
1535
1536 /* Check what the device saw. */
1537 m = buffer->mirror;
1538 for (i = 0; i < npages; ++i)
1539 ASSERT_EQ(m[i], HMM_DMIRROR_PROT_READ |
1540 HMM_DMIRROR_PROT_PMD);
1541
1542 free_hugepage_region(buffer->ptr);
1543 buffer->ptr = NULL;
1544 hmm_buffer_free(buffer);
1545}
1546#endif /* LOCAL_CONFIG_HAVE_LIBHUGETLBFS */
1547
1548/*
1549 * Test two devices reading the same memory (double mapped).
1550 */
1551TEST_F(hmm2, double_map)
1552{
1553 struct hmm_buffer *buffer;
1554 unsigned long npages;
1555 unsigned long size;
1556 unsigned long i;
1557 int *ptr;
1558 int ret;
1559
1560 npages = 6;
1561 size = npages << self->page_shift;
1562
1563 buffer = malloc(sizeof(*buffer));
1564 ASSERT_NE(buffer, NULL);
1565
1566 buffer->fd = -1;
1567 buffer->size = size;
1568 buffer->mirror = malloc(npages);
1569 ASSERT_NE(buffer->mirror, NULL);
1570
1571 /* Reserve a range of addresses. */
1572 buffer->ptr = mmap(NULL, size,
1573 PROT_READ | PROT_WRITE,
1574 MAP_PRIVATE | MAP_ANONYMOUS,
1575 buffer->fd, 0);
1576 ASSERT_NE(buffer->ptr, MAP_FAILED);
1577
1578 /* Initialize buffer in system memory. */
1579 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1580 ptr[i] = i;
1581
1582 /* Make region read-only. */
1583 ret = mprotect(buffer->ptr, size, PROT_READ);
1584 ASSERT_EQ(ret, 0);
1585
1586 /* Simulate device 0 reading system memory. */
1587 ret = hmm_dmirror_cmd(self->fd0, HMM_DMIRROR_READ, buffer, npages);
1588 ASSERT_EQ(ret, 0);
1589 ASSERT_EQ(buffer->cpages, npages);
1590 ASSERT_EQ(buffer->faults, 1);
1591
1592 /* Check what the device read. */
1593 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
1594 ASSERT_EQ(ptr[i], i);
1595
1596 /* Simulate device 1 reading system memory. */
1597 ret = hmm_dmirror_cmd(self->fd1, HMM_DMIRROR_READ, buffer, npages);
1598 ASSERT_EQ(ret, 0);
1599 ASSERT_EQ(buffer->cpages, npages);
1600 ASSERT_EQ(buffer->faults, 1);
1601
1602 /* Check what the device read. */
1603 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
1604 ASSERT_EQ(ptr[i], i);
1605
1606 /* Migrate pages to device 1 and try to read from device 0. */
1607 ret = hmm_migrate_sys_to_dev(self->fd1, buffer, npages);
1608 ASSERT_EQ(ret, 0);
1609 ASSERT_EQ(buffer->cpages, npages);
1610
1611 ret = hmm_dmirror_cmd(self->fd0, HMM_DMIRROR_READ, buffer, npages);
1612 ASSERT_EQ(ret, 0);
1613 ASSERT_EQ(buffer->cpages, npages);
1614 ASSERT_EQ(buffer->faults, 1);
1615
1616 /* Check what device 0 read. */
1617 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
1618 ASSERT_EQ(ptr[i], i);
1619
1620 hmm_buffer_free(buffer);
1621}
1622
1623/*
1624 * Basic check of exclusive faulting.
1625 */
1626TEST_F(hmm, exclusive)
1627{
1628 struct hmm_buffer *buffer;
1629 unsigned long npages;
1630 unsigned long size;
1631 unsigned long i;
1632 int *ptr;
1633 int ret;
1634
1635 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
1636 ASSERT_NE(npages, 0);
1637 size = npages << self->page_shift;
1638
1639 buffer = malloc(sizeof(*buffer));
1640 ASSERT_NE(buffer, NULL);
1641
1642 buffer->fd = -1;
1643 buffer->size = size;
1644 buffer->mirror = malloc(size);
1645 ASSERT_NE(buffer->mirror, NULL);
1646
1647 buffer->ptr = mmap(NULL, size,
1648 PROT_READ | PROT_WRITE,
1649 MAP_PRIVATE | MAP_ANONYMOUS,
1650 buffer->fd, 0);
1651 ASSERT_NE(buffer->ptr, MAP_FAILED);
1652
1653 /* Initialize buffer in system memory. */
1654 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1655 ptr[i] = i;
1656
1657 /* Map memory exclusively for device access. */
1658 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_EXCLUSIVE, buffer, npages);
1659 ASSERT_EQ(ret, 0);
1660 ASSERT_EQ(buffer->cpages, npages);
1661
1662 /* Check what the device read. */
1663 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
1664 ASSERT_EQ(ptr[i], i);
1665
1666 /* Fault pages back to system memory and check them. */
1667 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1668 ASSERT_EQ(ptr[i]++, i);
1669
1670 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1671 ASSERT_EQ(ptr[i], i+1);
1672
1673 /* Check atomic access revoked */
1674 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_CHECK_EXCLUSIVE, buffer, npages);
1675 ASSERT_EQ(ret, 0);
1676
1677 hmm_buffer_free(buffer);
1678}
1679
1680TEST_F(hmm, exclusive_mprotect)
1681{
1682 struct hmm_buffer *buffer;
1683 unsigned long npages;
1684 unsigned long size;
1685 unsigned long i;
1686 int *ptr;
1687 int ret;
1688
1689 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
1690 ASSERT_NE(npages, 0);
1691 size = npages << self->page_shift;
1692
1693 buffer = malloc(sizeof(*buffer));
1694 ASSERT_NE(buffer, NULL);
1695
1696 buffer->fd = -1;
1697 buffer->size = size;
1698 buffer->mirror = malloc(size);
1699 ASSERT_NE(buffer->mirror, NULL);
1700
1701 buffer->ptr = mmap(NULL, size,
1702 PROT_READ | PROT_WRITE,
1703 MAP_PRIVATE | MAP_ANONYMOUS,
1704 buffer->fd, 0);
1705 ASSERT_NE(buffer->ptr, MAP_FAILED);
1706
1707 /* Initialize buffer in system memory. */
1708 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1709 ptr[i] = i;
1710
1711 /* Map memory exclusively for device access. */
1712 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_EXCLUSIVE, buffer, npages);
1713 ASSERT_EQ(ret, 0);
1714 ASSERT_EQ(buffer->cpages, npages);
1715
1716 /* Check what the device read. */
1717 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
1718 ASSERT_EQ(ptr[i], i);
1719
1720 ret = mprotect(buffer->ptr, size, PROT_READ);
1721 ASSERT_EQ(ret, 0);
1722
1723 /* Simulate a device writing system memory. */
1724 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_WRITE, buffer, npages);
1725 ASSERT_EQ(ret, -EPERM);
1726
1727 hmm_buffer_free(buffer);
1728}
1729
1730/*
1731 * Check copy-on-write works.
1732 */
1733TEST_F(hmm, exclusive_cow)
1734{
1735 struct hmm_buffer *buffer;
1736 unsigned long npages;
1737 unsigned long size;
1738 unsigned long i;
1739 int *ptr;
1740 int ret;
1741
1742 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift;
1743 ASSERT_NE(npages, 0);
1744 size = npages << self->page_shift;
1745
1746 buffer = malloc(sizeof(*buffer));
1747 ASSERT_NE(buffer, NULL);
1748
1749 buffer->fd = -1;
1750 buffer->size = size;
1751 buffer->mirror = malloc(size);
1752 ASSERT_NE(buffer->mirror, NULL);
1753
1754 buffer->ptr = mmap(NULL, size,
1755 PROT_READ | PROT_WRITE,
1756 MAP_PRIVATE | MAP_ANONYMOUS,
1757 buffer->fd, 0);
1758 ASSERT_NE(buffer->ptr, MAP_FAILED);
1759
1760 /* Initialize buffer in system memory. */
1761 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1762 ptr[i] = i;
1763
1764 /* Map memory exclusively for device access. */
1765 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_EXCLUSIVE, buffer, npages);
1766 ASSERT_EQ(ret, 0);
1767 ASSERT_EQ(buffer->cpages, npages);
1768
1769 fork();
1770
1771 /* Fault pages back to system memory and check them. */
1772 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1773 ASSERT_EQ(ptr[i]++, i);
1774
1775 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1776 ASSERT_EQ(ptr[i], i+1);
1777
1778 hmm_buffer_free(buffer);
1779}
1780
1781static int gup_test_exec(int gup_fd, unsigned long addr, int cmd,
1782 int npages, int size, int flags)
1783{
1784 struct gup_test gup = {
1785 .nr_pages_per_call = npages,
1786 .addr = addr,
1787 .gup_flags = FOLL_WRITE | flags,
1788 .size = size,
1789 };
1790
1791 if (ioctl(gup_fd, cmd, &gup)) {
1792 perror("ioctl on error\n");
1793 return errno;
1794 }
1795
1796 return 0;
1797}
1798
1799/*
1800 * Test get user device pages through gup_test. Setting PIN_LONGTERM flag.
1801 * This should trigger a migration back to system memory for both, private
1802 * and coherent type pages.
1803 * This test makes use of gup_test module. Make sure GUP_TEST_CONFIG is added
1804 * to your configuration before you run it.
1805 */
1806TEST_F(hmm, hmm_gup_test)
1807{
1808 struct hmm_buffer *buffer;
1809 int gup_fd;
1810 unsigned long npages;
1811 unsigned long size;
1812 unsigned long i;
1813 int *ptr;
1814 int ret;
1815 unsigned char *m;
1816
1817 gup_fd = open("/sys/kernel/debug/gup_test", O_RDWR);
1818 if (gup_fd == -1)
1819 SKIP(return, "Skipping test, could not find gup_test driver");
1820
1821 npages = 4;
1822 size = npages << self->page_shift;
1823
1824 buffer = malloc(sizeof(*buffer));
1825 ASSERT_NE(buffer, NULL);
1826
1827 buffer->fd = -1;
1828 buffer->size = size;
1829 buffer->mirror = malloc(size);
1830 ASSERT_NE(buffer->mirror, NULL);
1831
1832 buffer->ptr = mmap(NULL, size,
1833 PROT_READ | PROT_WRITE,
1834 MAP_PRIVATE | MAP_ANONYMOUS,
1835 buffer->fd, 0);
1836 ASSERT_NE(buffer->ptr, MAP_FAILED);
1837
1838 /* Initialize buffer in system memory. */
1839 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1840 ptr[i] = i;
1841
1842 /* Migrate memory to device. */
1843 ret = hmm_migrate_sys_to_dev(self->fd, buffer, npages);
1844 ASSERT_EQ(ret, 0);
1845 ASSERT_EQ(buffer->cpages, npages);
1846 /* Check what the device read. */
1847 for (i = 0, ptr = buffer->mirror; i < size / sizeof(*ptr); ++i)
1848 ASSERT_EQ(ptr[i], i);
1849
1850 ASSERT_EQ(gup_test_exec(gup_fd,
1851 (unsigned long)buffer->ptr,
1852 GUP_BASIC_TEST, 1, self->page_size, 0), 0);
1853 ASSERT_EQ(gup_test_exec(gup_fd,
1854 (unsigned long)buffer->ptr + 1 * self->page_size,
1855 GUP_FAST_BENCHMARK, 1, self->page_size, 0), 0);
1856 ASSERT_EQ(gup_test_exec(gup_fd,
1857 (unsigned long)buffer->ptr + 2 * self->page_size,
1858 PIN_FAST_BENCHMARK, 1, self->page_size, FOLL_LONGTERM), 0);
1859 ASSERT_EQ(gup_test_exec(gup_fd,
1860 (unsigned long)buffer->ptr + 3 * self->page_size,
1861 PIN_LONGTERM_BENCHMARK, 1, self->page_size, 0), 0);
1862
1863 /* Take snapshot to CPU pagetables */
1864 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_SNAPSHOT, buffer, npages);
1865 ASSERT_EQ(ret, 0);
1866 ASSERT_EQ(buffer->cpages, npages);
1867 m = buffer->mirror;
1868 if (hmm_is_coherent_type(variant->device_number)) {
1869 ASSERT_EQ(HMM_DMIRROR_PROT_DEV_COHERENT_LOCAL | HMM_DMIRROR_PROT_WRITE, m[0]);
1870 ASSERT_EQ(HMM_DMIRROR_PROT_DEV_COHERENT_LOCAL | HMM_DMIRROR_PROT_WRITE, m[1]);
1871 } else {
1872 ASSERT_EQ(HMM_DMIRROR_PROT_WRITE, m[0]);
1873 ASSERT_EQ(HMM_DMIRROR_PROT_WRITE, m[1]);
1874 }
1875 ASSERT_EQ(HMM_DMIRROR_PROT_WRITE, m[2]);
1876 ASSERT_EQ(HMM_DMIRROR_PROT_WRITE, m[3]);
1877 /*
1878 * Check again the content on the pages. Make sure there's no
1879 * corrupted data.
1880 */
1881 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1882 ASSERT_EQ(ptr[i], i);
1883
1884 close(gup_fd);
1885 hmm_buffer_free(buffer);
1886}
1887
1888/*
1889 * Test copy-on-write in device pages.
1890 * In case of writing to COW private page(s), a page fault will migrate pages
1891 * back to system memory first. Then, these pages will be duplicated. In case
1892 * of COW device coherent type, pages are duplicated directly from device
1893 * memory.
1894 */
1895TEST_F(hmm, hmm_cow_in_device)
1896{
1897 struct hmm_buffer *buffer;
1898 unsigned long npages;
1899 unsigned long size;
1900 unsigned long i;
1901 int *ptr;
1902 int ret;
1903 unsigned char *m;
1904 pid_t pid;
1905 int status;
1906
1907 npages = 4;
1908 size = npages << self->page_shift;
1909
1910 buffer = malloc(sizeof(*buffer));
1911 ASSERT_NE(buffer, NULL);
1912
1913 buffer->fd = -1;
1914 buffer->size = size;
1915 buffer->mirror = malloc(size);
1916 ASSERT_NE(buffer->mirror, NULL);
1917
1918 buffer->ptr = mmap(NULL, size,
1919 PROT_READ | PROT_WRITE,
1920 MAP_PRIVATE | MAP_ANONYMOUS,
1921 buffer->fd, 0);
1922 ASSERT_NE(buffer->ptr, MAP_FAILED);
1923
1924 /* Initialize buffer in system memory. */
1925 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1926 ptr[i] = i;
1927
1928 /* Migrate memory to device. */
1929
1930 ret = hmm_migrate_sys_to_dev(self->fd, buffer, npages);
1931 ASSERT_EQ(ret, 0);
1932 ASSERT_EQ(buffer->cpages, npages);
1933
1934 pid = fork();
1935 if (pid == -1)
1936 ASSERT_EQ(pid, 0);
1937 if (!pid) {
1938 /* Child process waitd for SIGTERM from the parent. */
1939 while (1) {
1940 }
1941 perror("Should not reach this\n");
1942 exit(0);
1943 }
1944 /* Parent process writes to COW pages(s) and gets a
1945 * new copy in system. In case of device private pages,
1946 * this write causes a migration to system mem first.
1947 */
1948 for (i = 0, ptr = buffer->ptr; i < size / sizeof(*ptr); ++i)
1949 ptr[i] = i;
1950
1951 /* Terminate child and wait */
1952 EXPECT_EQ(0, kill(pid, SIGTERM));
1953 EXPECT_EQ(pid, waitpid(pid, &status, 0));
1954 EXPECT_NE(0, WIFSIGNALED(status));
1955 EXPECT_EQ(SIGTERM, WTERMSIG(status));
1956
1957 /* Take snapshot to CPU pagetables */
1958 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_SNAPSHOT, buffer, npages);
1959 ASSERT_EQ(ret, 0);
1960 ASSERT_EQ(buffer->cpages, npages);
1961 m = buffer->mirror;
1962 for (i = 0; i < npages; i++)
1963 ASSERT_EQ(HMM_DMIRROR_PROT_WRITE, m[i]);
1964
1965 hmm_buffer_free(buffer);
1966}
1967TEST_HARNESS_MAIN