Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-only
2/* Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES */
3#include <stdlib.h>
4#include <sys/mman.h>
5#include <sys/eventfd.h>
6
7#define __EXPORTED_HEADERS__
8#include <linux/vfio.h>
9
10#include "iommufd_utils.h"
11
12static unsigned long HUGEPAGE_SIZE;
13
14#define MOCK_PAGE_SIZE (PAGE_SIZE / 2)
15
16static unsigned long get_huge_page_size(void)
17{
18 char buf[80];
19 int ret;
20 int fd;
21
22 fd = open("/sys/kernel/mm/transparent_hugepage/hpage_pmd_size",
23 O_RDONLY);
24 if (fd < 0)
25 return 2 * 1024 * 1024;
26
27 ret = read(fd, buf, sizeof(buf));
28 close(fd);
29 if (ret <= 0 || ret == sizeof(buf))
30 return 2 * 1024 * 1024;
31 buf[ret] = 0;
32 return strtoul(buf, NULL, 10);
33}
34
35static __attribute__((constructor)) void setup_sizes(void)
36{
37 void *vrc;
38 int rc;
39
40 PAGE_SIZE = sysconf(_SC_PAGE_SIZE);
41 HUGEPAGE_SIZE = get_huge_page_size();
42
43 BUFFER_SIZE = PAGE_SIZE * 16;
44 rc = posix_memalign(&buffer, HUGEPAGE_SIZE, BUFFER_SIZE);
45 assert(!rc);
46 assert(buffer);
47 assert((uintptr_t)buffer % HUGEPAGE_SIZE == 0);
48 vrc = mmap(buffer, BUFFER_SIZE, PROT_READ | PROT_WRITE,
49 MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
50 assert(vrc == buffer);
51}
52
53FIXTURE(iommufd)
54{
55 int fd;
56};
57
58FIXTURE_SETUP(iommufd)
59{
60 self->fd = open("/dev/iommu", O_RDWR);
61 ASSERT_NE(-1, self->fd);
62}
63
64FIXTURE_TEARDOWN(iommufd)
65{
66 teardown_iommufd(self->fd, _metadata);
67}
68
69TEST_F(iommufd, simple_close)
70{
71}
72
73TEST_F(iommufd, cmd_fail)
74{
75 struct iommu_destroy cmd = { .size = sizeof(cmd), .id = 0 };
76
77 /* object id is invalid */
78 EXPECT_ERRNO(ENOENT, _test_ioctl_destroy(self->fd, 0));
79 /* Bad pointer */
80 EXPECT_ERRNO(EFAULT, ioctl(self->fd, IOMMU_DESTROY, NULL));
81 /* Unknown ioctl */
82 EXPECT_ERRNO(ENOTTY,
83 ioctl(self->fd, _IO(IOMMUFD_TYPE, IOMMUFD_CMD_BASE - 1),
84 &cmd));
85}
86
87TEST_F(iommufd, cmd_length)
88{
89#define TEST_LENGTH(_struct, _ioctl, _last) \
90 { \
91 size_t min_size = offsetofend(struct _struct, _last); \
92 struct { \
93 struct _struct cmd; \
94 uint8_t extra; \
95 } cmd = { .cmd = { .size = min_size - 1 }, \
96 .extra = UINT8_MAX }; \
97 int old_errno; \
98 int rc; \
99 \
100 EXPECT_ERRNO(EINVAL, ioctl(self->fd, _ioctl, &cmd)); \
101 cmd.cmd.size = sizeof(struct _struct) + 1; \
102 EXPECT_ERRNO(E2BIG, ioctl(self->fd, _ioctl, &cmd)); \
103 cmd.cmd.size = sizeof(struct _struct); \
104 rc = ioctl(self->fd, _ioctl, &cmd); \
105 old_errno = errno; \
106 cmd.cmd.size = sizeof(struct _struct) + 1; \
107 cmd.extra = 0; \
108 if (rc) { \
109 EXPECT_ERRNO(old_errno, \
110 ioctl(self->fd, _ioctl, &cmd)); \
111 } else { \
112 ASSERT_EQ(0, ioctl(self->fd, _ioctl, &cmd)); \
113 } \
114 }
115
116 TEST_LENGTH(iommu_destroy, IOMMU_DESTROY, id);
117 TEST_LENGTH(iommu_hw_info, IOMMU_GET_HW_INFO, __reserved);
118 TEST_LENGTH(iommu_hwpt_alloc, IOMMU_HWPT_ALLOC, __reserved);
119 TEST_LENGTH(iommu_ioas_alloc, IOMMU_IOAS_ALLOC, out_ioas_id);
120 TEST_LENGTH(iommu_ioas_iova_ranges, IOMMU_IOAS_IOVA_RANGES,
121 out_iova_alignment);
122 TEST_LENGTH(iommu_ioas_allow_iovas, IOMMU_IOAS_ALLOW_IOVAS,
123 allowed_iovas);
124 TEST_LENGTH(iommu_ioas_map, IOMMU_IOAS_MAP, iova);
125 TEST_LENGTH(iommu_ioas_copy, IOMMU_IOAS_COPY, src_iova);
126 TEST_LENGTH(iommu_ioas_unmap, IOMMU_IOAS_UNMAP, length);
127 TEST_LENGTH(iommu_option, IOMMU_OPTION, val64);
128 TEST_LENGTH(iommu_vfio_ioas, IOMMU_VFIO_IOAS, __reserved);
129#undef TEST_LENGTH
130}
131
132TEST_F(iommufd, cmd_ex_fail)
133{
134 struct {
135 struct iommu_destroy cmd;
136 __u64 future;
137 } cmd = { .cmd = { .size = sizeof(cmd), .id = 0 } };
138
139 /* object id is invalid and command is longer */
140 EXPECT_ERRNO(ENOENT, ioctl(self->fd, IOMMU_DESTROY, &cmd));
141 /* future area is non-zero */
142 cmd.future = 1;
143 EXPECT_ERRNO(E2BIG, ioctl(self->fd, IOMMU_DESTROY, &cmd));
144 /* Original command "works" */
145 cmd.cmd.size = sizeof(cmd.cmd);
146 EXPECT_ERRNO(ENOENT, ioctl(self->fd, IOMMU_DESTROY, &cmd));
147 /* Short command fails */
148 cmd.cmd.size = sizeof(cmd.cmd) - 1;
149 EXPECT_ERRNO(EINVAL, ioctl(self->fd, IOMMU_DESTROY, &cmd));
150}
151
152TEST_F(iommufd, global_options)
153{
154 struct iommu_option cmd = {
155 .size = sizeof(cmd),
156 .option_id = IOMMU_OPTION_RLIMIT_MODE,
157 .op = IOMMU_OPTION_OP_GET,
158 .val64 = 1,
159 };
160
161 cmd.option_id = IOMMU_OPTION_RLIMIT_MODE;
162 ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
163 ASSERT_EQ(0, cmd.val64);
164
165 /* This requires root */
166 cmd.op = IOMMU_OPTION_OP_SET;
167 cmd.val64 = 1;
168 ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
169 cmd.val64 = 2;
170 EXPECT_ERRNO(EINVAL, ioctl(self->fd, IOMMU_OPTION, &cmd));
171
172 cmd.op = IOMMU_OPTION_OP_GET;
173 ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
174 ASSERT_EQ(1, cmd.val64);
175
176 cmd.op = IOMMU_OPTION_OP_SET;
177 cmd.val64 = 0;
178 ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
179
180 cmd.op = IOMMU_OPTION_OP_GET;
181 cmd.option_id = IOMMU_OPTION_HUGE_PAGES;
182 EXPECT_ERRNO(ENOENT, ioctl(self->fd, IOMMU_OPTION, &cmd));
183 cmd.op = IOMMU_OPTION_OP_SET;
184 EXPECT_ERRNO(ENOENT, ioctl(self->fd, IOMMU_OPTION, &cmd));
185}
186
187FIXTURE(iommufd_ioas)
188{
189 int fd;
190 uint32_t ioas_id;
191 uint32_t stdev_id;
192 uint32_t hwpt_id;
193 uint32_t device_id;
194 uint64_t base_iova;
195};
196
197FIXTURE_VARIANT(iommufd_ioas)
198{
199 unsigned int mock_domains;
200 unsigned int memory_limit;
201};
202
203FIXTURE_SETUP(iommufd_ioas)
204{
205 unsigned int i;
206
207
208 self->fd = open("/dev/iommu", O_RDWR);
209 ASSERT_NE(-1, self->fd);
210 test_ioctl_ioas_alloc(&self->ioas_id);
211
212 if (!variant->memory_limit) {
213 test_ioctl_set_default_memory_limit();
214 } else {
215 test_ioctl_set_temp_memory_limit(variant->memory_limit);
216 }
217
218 for (i = 0; i != variant->mock_domains; i++) {
219 test_cmd_mock_domain(self->ioas_id, &self->stdev_id,
220 &self->hwpt_id, &self->device_id);
221 self->base_iova = MOCK_APERTURE_START;
222 }
223}
224
225FIXTURE_TEARDOWN(iommufd_ioas)
226{
227 test_ioctl_set_default_memory_limit();
228 teardown_iommufd(self->fd, _metadata);
229}
230
231FIXTURE_VARIANT_ADD(iommufd_ioas, no_domain)
232{
233};
234
235FIXTURE_VARIANT_ADD(iommufd_ioas, mock_domain)
236{
237 .mock_domains = 1,
238};
239
240FIXTURE_VARIANT_ADD(iommufd_ioas, two_mock_domain)
241{
242 .mock_domains = 2,
243};
244
245FIXTURE_VARIANT_ADD(iommufd_ioas, mock_domain_limit)
246{
247 .mock_domains = 1,
248 .memory_limit = 16,
249};
250
251TEST_F(iommufd_ioas, ioas_auto_destroy)
252{
253}
254
255TEST_F(iommufd_ioas, ioas_destroy)
256{
257 if (self->stdev_id) {
258 /* IOAS cannot be freed while a device has a HWPT using it */
259 EXPECT_ERRNO(EBUSY,
260 _test_ioctl_destroy(self->fd, self->ioas_id));
261 } else {
262 /* Can allocate and manually free an IOAS table */
263 test_ioctl_destroy(self->ioas_id);
264 }
265}
266
267TEST_F(iommufd_ioas, alloc_hwpt_nested)
268{
269 const uint32_t min_data_len =
270 offsetofend(struct iommu_hwpt_selftest, iotlb);
271 struct iommu_hwpt_selftest data = {
272 .iotlb = IOMMU_TEST_IOTLB_DEFAULT,
273 };
274 uint32_t nested_hwpt_id[2] = {};
275 uint32_t parent_hwpt_id = 0;
276 uint32_t parent_hwpt_id_not_work = 0;
277 uint32_t test_hwpt_id = 0;
278
279 if (self->device_id) {
280 /* Negative tests */
281 test_err_hwpt_alloc(ENOENT, self->ioas_id, self->device_id, 0,
282 &test_hwpt_id);
283 test_err_hwpt_alloc(EINVAL, self->device_id, self->device_id, 0,
284 &test_hwpt_id);
285
286 test_cmd_hwpt_alloc(self->device_id, self->ioas_id,
287 IOMMU_HWPT_ALLOC_NEST_PARENT,
288 &parent_hwpt_id);
289
290 test_cmd_hwpt_alloc(self->device_id, self->ioas_id, 0,
291 &parent_hwpt_id_not_work);
292
293 /* Negative nested tests */
294 test_err_hwpt_alloc_nested(EINVAL, self->device_id,
295 parent_hwpt_id, 0,
296 &nested_hwpt_id[0],
297 IOMMU_HWPT_DATA_NONE, &data,
298 sizeof(data));
299 test_err_hwpt_alloc_nested(EOPNOTSUPP, self->device_id,
300 parent_hwpt_id, 0,
301 &nested_hwpt_id[0],
302 IOMMU_HWPT_DATA_SELFTEST + 1, &data,
303 sizeof(data));
304 test_err_hwpt_alloc_nested(EINVAL, self->device_id,
305 parent_hwpt_id, 0,
306 &nested_hwpt_id[0],
307 IOMMU_HWPT_DATA_SELFTEST, &data,
308 min_data_len - 1);
309 test_err_hwpt_alloc_nested(EFAULT, self->device_id,
310 parent_hwpt_id, 0,
311 &nested_hwpt_id[0],
312 IOMMU_HWPT_DATA_SELFTEST, NULL,
313 sizeof(data));
314 test_err_hwpt_alloc_nested(
315 EOPNOTSUPP, self->device_id, parent_hwpt_id,
316 IOMMU_HWPT_ALLOC_NEST_PARENT, &nested_hwpt_id[0],
317 IOMMU_HWPT_DATA_SELFTEST, &data, sizeof(data));
318 test_err_hwpt_alloc_nested(EINVAL, self->device_id,
319 parent_hwpt_id_not_work, 0,
320 &nested_hwpt_id[0],
321 IOMMU_HWPT_DATA_SELFTEST, &data,
322 sizeof(data));
323
324 /* Allocate two nested hwpts sharing one common parent hwpt */
325 test_cmd_hwpt_alloc_nested(self->device_id, parent_hwpt_id, 0,
326 &nested_hwpt_id[0],
327 IOMMU_HWPT_DATA_SELFTEST, &data,
328 sizeof(data));
329 test_cmd_hwpt_alloc_nested(self->device_id, parent_hwpt_id, 0,
330 &nested_hwpt_id[1],
331 IOMMU_HWPT_DATA_SELFTEST, &data,
332 sizeof(data));
333
334 /* Negative test: a nested hwpt on top of a nested hwpt */
335 test_err_hwpt_alloc_nested(EINVAL, self->device_id,
336 nested_hwpt_id[0], 0, &test_hwpt_id,
337 IOMMU_HWPT_DATA_SELFTEST, &data,
338 sizeof(data));
339 /* Negative test: parent hwpt now cannot be freed */
340 EXPECT_ERRNO(EBUSY,
341 _test_ioctl_destroy(self->fd, parent_hwpt_id));
342
343 /* Attach device to nested_hwpt_id[0] that then will be busy */
344 test_cmd_mock_domain_replace(self->stdev_id, nested_hwpt_id[0]);
345 EXPECT_ERRNO(EBUSY,
346 _test_ioctl_destroy(self->fd, nested_hwpt_id[0]));
347
348 /* Switch from nested_hwpt_id[0] to nested_hwpt_id[1] */
349 test_cmd_mock_domain_replace(self->stdev_id, nested_hwpt_id[1]);
350 EXPECT_ERRNO(EBUSY,
351 _test_ioctl_destroy(self->fd, nested_hwpt_id[1]));
352 test_ioctl_destroy(nested_hwpt_id[0]);
353
354 /* Detach from nested_hwpt_id[1] and destroy it */
355 test_cmd_mock_domain_replace(self->stdev_id, parent_hwpt_id);
356 test_ioctl_destroy(nested_hwpt_id[1]);
357
358 /* Detach from the parent hw_pagetable and destroy it */
359 test_cmd_mock_domain_replace(self->stdev_id, self->ioas_id);
360 test_ioctl_destroy(parent_hwpt_id);
361 test_ioctl_destroy(parent_hwpt_id_not_work);
362 } else {
363 test_err_hwpt_alloc(ENOENT, self->device_id, self->ioas_id, 0,
364 &parent_hwpt_id);
365 test_err_hwpt_alloc_nested(ENOENT, self->device_id,
366 parent_hwpt_id, 0,
367 &nested_hwpt_id[0],
368 IOMMU_HWPT_DATA_SELFTEST, &data,
369 sizeof(data));
370 test_err_hwpt_alloc_nested(ENOENT, self->device_id,
371 parent_hwpt_id, 0,
372 &nested_hwpt_id[1],
373 IOMMU_HWPT_DATA_SELFTEST, &data,
374 sizeof(data));
375 test_err_mock_domain_replace(ENOENT, self->stdev_id,
376 nested_hwpt_id[0]);
377 test_err_mock_domain_replace(ENOENT, self->stdev_id,
378 nested_hwpt_id[1]);
379 }
380}
381
382TEST_F(iommufd_ioas, hwpt_attach)
383{
384 /* Create a device attached directly to a hwpt */
385 if (self->stdev_id) {
386 test_cmd_mock_domain(self->hwpt_id, NULL, NULL, NULL);
387 } else {
388 test_err_mock_domain(ENOENT, self->hwpt_id, NULL, NULL);
389 }
390}
391
392TEST_F(iommufd_ioas, ioas_area_destroy)
393{
394 /* Adding an area does not change ability to destroy */
395 test_ioctl_ioas_map_fixed(buffer, PAGE_SIZE, self->base_iova);
396 if (self->stdev_id)
397 EXPECT_ERRNO(EBUSY,
398 _test_ioctl_destroy(self->fd, self->ioas_id));
399 else
400 test_ioctl_destroy(self->ioas_id);
401}
402
403TEST_F(iommufd_ioas, ioas_area_auto_destroy)
404{
405 int i;
406
407 /* Can allocate and automatically free an IOAS table with many areas */
408 for (i = 0; i != 10; i++) {
409 test_ioctl_ioas_map_fixed(buffer, PAGE_SIZE,
410 self->base_iova + i * PAGE_SIZE);
411 }
412}
413
414TEST_F(iommufd_ioas, get_hw_info)
415{
416 struct iommu_test_hw_info buffer_exact;
417 struct iommu_test_hw_info_buffer_larger {
418 struct iommu_test_hw_info info;
419 uint64_t trailing_bytes;
420 } buffer_larger;
421 struct iommu_test_hw_info_buffer_smaller {
422 __u32 flags;
423 } buffer_smaller;
424
425 if (self->device_id) {
426 /* Provide a zero-size user_buffer */
427 test_cmd_get_hw_info(self->device_id, NULL, 0);
428 /* Provide a user_buffer with exact size */
429 test_cmd_get_hw_info(self->device_id, &buffer_exact, sizeof(buffer_exact));
430 /*
431 * Provide a user_buffer with size larger than the exact size to check if
432 * kernel zero the trailing bytes.
433 */
434 test_cmd_get_hw_info(self->device_id, &buffer_larger, sizeof(buffer_larger));
435 /*
436 * Provide a user_buffer with size smaller than the exact size to check if
437 * the fields within the size range still gets updated.
438 */
439 test_cmd_get_hw_info(self->device_id, &buffer_smaller, sizeof(buffer_smaller));
440 } else {
441 test_err_get_hw_info(ENOENT, self->device_id,
442 &buffer_exact, sizeof(buffer_exact));
443 test_err_get_hw_info(ENOENT, self->device_id,
444 &buffer_larger, sizeof(buffer_larger));
445 }
446}
447
448TEST_F(iommufd_ioas, area)
449{
450 int i;
451
452 /* Unmap fails if nothing is mapped */
453 for (i = 0; i != 10; i++)
454 test_err_ioctl_ioas_unmap(ENOENT, i * PAGE_SIZE, PAGE_SIZE);
455
456 /* Unmap works */
457 for (i = 0; i != 10; i++)
458 test_ioctl_ioas_map_fixed(buffer, PAGE_SIZE,
459 self->base_iova + i * PAGE_SIZE);
460 for (i = 0; i != 10; i++)
461 test_ioctl_ioas_unmap(self->base_iova + i * PAGE_SIZE,
462 PAGE_SIZE);
463
464 /* Split fails */
465 test_ioctl_ioas_map_fixed(buffer, PAGE_SIZE * 2,
466 self->base_iova + 16 * PAGE_SIZE);
467 test_err_ioctl_ioas_unmap(ENOENT, self->base_iova + 16 * PAGE_SIZE,
468 PAGE_SIZE);
469 test_err_ioctl_ioas_unmap(ENOENT, self->base_iova + 17 * PAGE_SIZE,
470 PAGE_SIZE);
471
472 /* Over map fails */
473 test_err_ioctl_ioas_map_fixed(EEXIST, buffer, PAGE_SIZE * 2,
474 self->base_iova + 16 * PAGE_SIZE);
475 test_err_ioctl_ioas_map_fixed(EEXIST, buffer, PAGE_SIZE,
476 self->base_iova + 16 * PAGE_SIZE);
477 test_err_ioctl_ioas_map_fixed(EEXIST, buffer, PAGE_SIZE,
478 self->base_iova + 17 * PAGE_SIZE);
479 test_err_ioctl_ioas_map_fixed(EEXIST, buffer, PAGE_SIZE * 2,
480 self->base_iova + 15 * PAGE_SIZE);
481 test_err_ioctl_ioas_map_fixed(EEXIST, buffer, PAGE_SIZE * 3,
482 self->base_iova + 15 * PAGE_SIZE);
483
484 /* unmap all works */
485 test_ioctl_ioas_unmap(0, UINT64_MAX);
486
487 /* Unmap all succeeds on an empty IOAS */
488 test_ioctl_ioas_unmap(0, UINT64_MAX);
489}
490
491TEST_F(iommufd_ioas, unmap_fully_contained_areas)
492{
493 uint64_t unmap_len;
494 int i;
495
496 /* Give no_domain some space to rewind base_iova */
497 self->base_iova += 4 * PAGE_SIZE;
498
499 for (i = 0; i != 4; i++)
500 test_ioctl_ioas_map_fixed(buffer, 8 * PAGE_SIZE,
501 self->base_iova + i * 16 * PAGE_SIZE);
502
503 /* Unmap not fully contained area doesn't work */
504 test_err_ioctl_ioas_unmap(ENOENT, self->base_iova - 4 * PAGE_SIZE,
505 8 * PAGE_SIZE);
506 test_err_ioctl_ioas_unmap(ENOENT,
507 self->base_iova + 3 * 16 * PAGE_SIZE +
508 8 * PAGE_SIZE - 4 * PAGE_SIZE,
509 8 * PAGE_SIZE);
510
511 /* Unmap fully contained areas works */
512 ASSERT_EQ(0, _test_ioctl_ioas_unmap(self->fd, self->ioas_id,
513 self->base_iova - 4 * PAGE_SIZE,
514 3 * 16 * PAGE_SIZE + 8 * PAGE_SIZE +
515 4 * PAGE_SIZE,
516 &unmap_len));
517 ASSERT_EQ(32 * PAGE_SIZE, unmap_len);
518}
519
520TEST_F(iommufd_ioas, area_auto_iova)
521{
522 struct iommu_test_cmd test_cmd = {
523 .size = sizeof(test_cmd),
524 .op = IOMMU_TEST_OP_ADD_RESERVED,
525 .id = self->ioas_id,
526 .add_reserved = { .start = PAGE_SIZE * 4,
527 .length = PAGE_SIZE * 100 },
528 };
529 struct iommu_iova_range ranges[1] = {};
530 struct iommu_ioas_allow_iovas allow_cmd = {
531 .size = sizeof(allow_cmd),
532 .ioas_id = self->ioas_id,
533 .num_iovas = 1,
534 .allowed_iovas = (uintptr_t)ranges,
535 };
536 __u64 iovas[10];
537 int i;
538
539 /* Simple 4k pages */
540 for (i = 0; i != 10; i++)
541 test_ioctl_ioas_map(buffer, PAGE_SIZE, &iovas[i]);
542 for (i = 0; i != 10; i++)
543 test_ioctl_ioas_unmap(iovas[i], PAGE_SIZE);
544
545 /* Kernel automatically aligns IOVAs properly */
546 for (i = 0; i != 10; i++) {
547 size_t length = PAGE_SIZE * (i + 1);
548
549 if (self->stdev_id) {
550 test_ioctl_ioas_map(buffer, length, &iovas[i]);
551 } else {
552 test_ioctl_ioas_map((void *)(1UL << 31), length,
553 &iovas[i]);
554 }
555 EXPECT_EQ(0, iovas[i] % (1UL << (ffs(length) - 1)));
556 }
557 for (i = 0; i != 10; i++)
558 test_ioctl_ioas_unmap(iovas[i], PAGE_SIZE * (i + 1));
559
560 /* Avoids a reserved region */
561 ASSERT_EQ(0,
562 ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ADD_RESERVED),
563 &test_cmd));
564 for (i = 0; i != 10; i++) {
565 size_t length = PAGE_SIZE * (i + 1);
566
567 test_ioctl_ioas_map(buffer, length, &iovas[i]);
568 EXPECT_EQ(0, iovas[i] % (1UL << (ffs(length) - 1)));
569 EXPECT_EQ(false,
570 iovas[i] > test_cmd.add_reserved.start &&
571 iovas[i] <
572 test_cmd.add_reserved.start +
573 test_cmd.add_reserved.length);
574 }
575 for (i = 0; i != 10; i++)
576 test_ioctl_ioas_unmap(iovas[i], PAGE_SIZE * (i + 1));
577
578 /* Allowed region intersects with a reserved region */
579 ranges[0].start = PAGE_SIZE;
580 ranges[0].last = PAGE_SIZE * 600;
581 EXPECT_ERRNO(EADDRINUSE,
582 ioctl(self->fd, IOMMU_IOAS_ALLOW_IOVAS, &allow_cmd));
583
584 /* Allocate from an allowed region */
585 if (self->stdev_id) {
586 ranges[0].start = MOCK_APERTURE_START + PAGE_SIZE;
587 ranges[0].last = MOCK_APERTURE_START + PAGE_SIZE * 600 - 1;
588 } else {
589 ranges[0].start = PAGE_SIZE * 200;
590 ranges[0].last = PAGE_SIZE * 600 - 1;
591 }
592 ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_ALLOW_IOVAS, &allow_cmd));
593 for (i = 0; i != 10; i++) {
594 size_t length = PAGE_SIZE * (i + 1);
595
596 test_ioctl_ioas_map(buffer, length, &iovas[i]);
597 EXPECT_EQ(0, iovas[i] % (1UL << (ffs(length) - 1)));
598 EXPECT_EQ(true, iovas[i] >= ranges[0].start);
599 EXPECT_EQ(true, iovas[i] <= ranges[0].last);
600 EXPECT_EQ(true, iovas[i] + length > ranges[0].start);
601 EXPECT_EQ(true, iovas[i] + length <= ranges[0].last + 1);
602 }
603 for (i = 0; i != 10; i++)
604 test_ioctl_ioas_unmap(iovas[i], PAGE_SIZE * (i + 1));
605}
606
607TEST_F(iommufd_ioas, area_allowed)
608{
609 struct iommu_test_cmd test_cmd = {
610 .size = sizeof(test_cmd),
611 .op = IOMMU_TEST_OP_ADD_RESERVED,
612 .id = self->ioas_id,
613 .add_reserved = { .start = PAGE_SIZE * 4,
614 .length = PAGE_SIZE * 100 },
615 };
616 struct iommu_iova_range ranges[1] = {};
617 struct iommu_ioas_allow_iovas allow_cmd = {
618 .size = sizeof(allow_cmd),
619 .ioas_id = self->ioas_id,
620 .num_iovas = 1,
621 .allowed_iovas = (uintptr_t)ranges,
622 };
623
624 /* Reserved intersects an allowed */
625 allow_cmd.num_iovas = 1;
626 ranges[0].start = self->base_iova;
627 ranges[0].last = ranges[0].start + PAGE_SIZE * 600;
628 ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_ALLOW_IOVAS, &allow_cmd));
629 test_cmd.add_reserved.start = ranges[0].start + PAGE_SIZE;
630 test_cmd.add_reserved.length = PAGE_SIZE;
631 EXPECT_ERRNO(EADDRINUSE,
632 ioctl(self->fd,
633 _IOMMU_TEST_CMD(IOMMU_TEST_OP_ADD_RESERVED),
634 &test_cmd));
635 allow_cmd.num_iovas = 0;
636 ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_ALLOW_IOVAS, &allow_cmd));
637
638 /* Allowed intersects a reserved */
639 ASSERT_EQ(0,
640 ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ADD_RESERVED),
641 &test_cmd));
642 allow_cmd.num_iovas = 1;
643 ranges[0].start = self->base_iova;
644 ranges[0].last = ranges[0].start + PAGE_SIZE * 600;
645 EXPECT_ERRNO(EADDRINUSE,
646 ioctl(self->fd, IOMMU_IOAS_ALLOW_IOVAS, &allow_cmd));
647}
648
649TEST_F(iommufd_ioas, copy_area)
650{
651 struct iommu_ioas_copy copy_cmd = {
652 .size = sizeof(copy_cmd),
653 .flags = IOMMU_IOAS_MAP_FIXED_IOVA,
654 .dst_ioas_id = self->ioas_id,
655 .src_ioas_id = self->ioas_id,
656 .length = PAGE_SIZE,
657 };
658
659 test_ioctl_ioas_map_fixed(buffer, PAGE_SIZE, self->base_iova);
660
661 /* Copy inside a single IOAS */
662 copy_cmd.src_iova = self->base_iova;
663 copy_cmd.dst_iova = self->base_iova + PAGE_SIZE;
664 ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_COPY, ©_cmd));
665
666 /* Copy between IOAS's */
667 copy_cmd.src_iova = self->base_iova;
668 copy_cmd.dst_iova = 0;
669 test_ioctl_ioas_alloc(©_cmd.dst_ioas_id);
670 ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_COPY, ©_cmd));
671}
672
673TEST_F(iommufd_ioas, iova_ranges)
674{
675 struct iommu_test_cmd test_cmd = {
676 .size = sizeof(test_cmd),
677 .op = IOMMU_TEST_OP_ADD_RESERVED,
678 .id = self->ioas_id,
679 .add_reserved = { .start = PAGE_SIZE, .length = PAGE_SIZE },
680 };
681 struct iommu_iova_range *ranges = buffer;
682 struct iommu_ioas_iova_ranges ranges_cmd = {
683 .size = sizeof(ranges_cmd),
684 .ioas_id = self->ioas_id,
685 .num_iovas = BUFFER_SIZE / sizeof(*ranges),
686 .allowed_iovas = (uintptr_t)ranges,
687 };
688
689 /* Range can be read */
690 ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_IOVA_RANGES, &ranges_cmd));
691 EXPECT_EQ(1, ranges_cmd.num_iovas);
692 if (!self->stdev_id) {
693 EXPECT_EQ(0, ranges[0].start);
694 EXPECT_EQ(SIZE_MAX, ranges[0].last);
695 EXPECT_EQ(1, ranges_cmd.out_iova_alignment);
696 } else {
697 EXPECT_EQ(MOCK_APERTURE_START, ranges[0].start);
698 EXPECT_EQ(MOCK_APERTURE_LAST, ranges[0].last);
699 EXPECT_EQ(MOCK_PAGE_SIZE, ranges_cmd.out_iova_alignment);
700 }
701
702 /* Buffer too small */
703 memset(ranges, 0, BUFFER_SIZE);
704 ranges_cmd.num_iovas = 0;
705 EXPECT_ERRNO(EMSGSIZE,
706 ioctl(self->fd, IOMMU_IOAS_IOVA_RANGES, &ranges_cmd));
707 EXPECT_EQ(1, ranges_cmd.num_iovas);
708 EXPECT_EQ(0, ranges[0].start);
709 EXPECT_EQ(0, ranges[0].last);
710
711 /* 2 ranges */
712 ASSERT_EQ(0,
713 ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ADD_RESERVED),
714 &test_cmd));
715 ranges_cmd.num_iovas = BUFFER_SIZE / sizeof(*ranges);
716 ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_IOVA_RANGES, &ranges_cmd));
717 if (!self->stdev_id) {
718 EXPECT_EQ(2, ranges_cmd.num_iovas);
719 EXPECT_EQ(0, ranges[0].start);
720 EXPECT_EQ(PAGE_SIZE - 1, ranges[0].last);
721 EXPECT_EQ(PAGE_SIZE * 2, ranges[1].start);
722 EXPECT_EQ(SIZE_MAX, ranges[1].last);
723 } else {
724 EXPECT_EQ(1, ranges_cmd.num_iovas);
725 EXPECT_EQ(MOCK_APERTURE_START, ranges[0].start);
726 EXPECT_EQ(MOCK_APERTURE_LAST, ranges[0].last);
727 }
728
729 /* Buffer too small */
730 memset(ranges, 0, BUFFER_SIZE);
731 ranges_cmd.num_iovas = 1;
732 if (!self->stdev_id) {
733 EXPECT_ERRNO(EMSGSIZE, ioctl(self->fd, IOMMU_IOAS_IOVA_RANGES,
734 &ranges_cmd));
735 EXPECT_EQ(2, ranges_cmd.num_iovas);
736 EXPECT_EQ(0, ranges[0].start);
737 EXPECT_EQ(PAGE_SIZE - 1, ranges[0].last);
738 } else {
739 ASSERT_EQ(0,
740 ioctl(self->fd, IOMMU_IOAS_IOVA_RANGES, &ranges_cmd));
741 EXPECT_EQ(1, ranges_cmd.num_iovas);
742 EXPECT_EQ(MOCK_APERTURE_START, ranges[0].start);
743 EXPECT_EQ(MOCK_APERTURE_LAST, ranges[0].last);
744 }
745 EXPECT_EQ(0, ranges[1].start);
746 EXPECT_EQ(0, ranges[1].last);
747}
748
749TEST_F(iommufd_ioas, access_domain_destory)
750{
751 struct iommu_test_cmd access_cmd = {
752 .size = sizeof(access_cmd),
753 .op = IOMMU_TEST_OP_ACCESS_PAGES,
754 .access_pages = { .iova = self->base_iova + PAGE_SIZE,
755 .length = PAGE_SIZE},
756 };
757 size_t buf_size = 2 * HUGEPAGE_SIZE;
758 uint8_t *buf;
759
760 buf = mmap(0, buf_size, PROT_READ | PROT_WRITE,
761 MAP_SHARED | MAP_ANONYMOUS | MAP_HUGETLB | MAP_POPULATE, -1,
762 0);
763 ASSERT_NE(MAP_FAILED, buf);
764 test_ioctl_ioas_map_fixed(buf, buf_size, self->base_iova);
765
766 test_cmd_create_access(self->ioas_id, &access_cmd.id,
767 MOCK_FLAGS_ACCESS_CREATE_NEEDS_PIN_PAGES);
768 access_cmd.access_pages.uptr = (uintptr_t)buf + PAGE_SIZE;
769 ASSERT_EQ(0,
770 ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
771 &access_cmd));
772
773 /* Causes a complicated unpin across a huge page boundary */
774 if (self->stdev_id)
775 test_ioctl_destroy(self->stdev_id);
776
777 test_cmd_destroy_access_pages(
778 access_cmd.id, access_cmd.access_pages.out_access_pages_id);
779 test_cmd_destroy_access(access_cmd.id);
780 ASSERT_EQ(0, munmap(buf, buf_size));
781}
782
783TEST_F(iommufd_ioas, access_pin)
784{
785 struct iommu_test_cmd access_cmd = {
786 .size = sizeof(access_cmd),
787 .op = IOMMU_TEST_OP_ACCESS_PAGES,
788 .access_pages = { .iova = MOCK_APERTURE_START,
789 .length = BUFFER_SIZE,
790 .uptr = (uintptr_t)buffer },
791 };
792 struct iommu_test_cmd check_map_cmd = {
793 .size = sizeof(check_map_cmd),
794 .op = IOMMU_TEST_OP_MD_CHECK_MAP,
795 .check_map = { .iova = MOCK_APERTURE_START,
796 .length = BUFFER_SIZE,
797 .uptr = (uintptr_t)buffer },
798 };
799 uint32_t access_pages_id;
800 unsigned int npages;
801
802 test_cmd_create_access(self->ioas_id, &access_cmd.id,
803 MOCK_FLAGS_ACCESS_CREATE_NEEDS_PIN_PAGES);
804
805 for (npages = 1; npages < BUFFER_SIZE / PAGE_SIZE; npages++) {
806 uint32_t mock_stdev_id;
807 uint32_t mock_hwpt_id;
808
809 access_cmd.access_pages.length = npages * PAGE_SIZE;
810
811 /* Single map/unmap */
812 test_ioctl_ioas_map_fixed(buffer, BUFFER_SIZE,
813 MOCK_APERTURE_START);
814 ASSERT_EQ(0, ioctl(self->fd,
815 _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
816 &access_cmd));
817 test_cmd_destroy_access_pages(
818 access_cmd.id,
819 access_cmd.access_pages.out_access_pages_id);
820
821 /* Double user */
822 ASSERT_EQ(0, ioctl(self->fd,
823 _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
824 &access_cmd));
825 access_pages_id = access_cmd.access_pages.out_access_pages_id;
826 ASSERT_EQ(0, ioctl(self->fd,
827 _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
828 &access_cmd));
829 test_cmd_destroy_access_pages(
830 access_cmd.id,
831 access_cmd.access_pages.out_access_pages_id);
832 test_cmd_destroy_access_pages(access_cmd.id, access_pages_id);
833
834 /* Add/remove a domain with a user */
835 ASSERT_EQ(0, ioctl(self->fd,
836 _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
837 &access_cmd));
838 test_cmd_mock_domain(self->ioas_id, &mock_stdev_id,
839 &mock_hwpt_id, NULL);
840 check_map_cmd.id = mock_hwpt_id;
841 ASSERT_EQ(0, ioctl(self->fd,
842 _IOMMU_TEST_CMD(IOMMU_TEST_OP_MD_CHECK_MAP),
843 &check_map_cmd));
844
845 test_ioctl_destroy(mock_stdev_id);
846 test_cmd_destroy_access_pages(
847 access_cmd.id,
848 access_cmd.access_pages.out_access_pages_id);
849
850 test_ioctl_ioas_unmap(MOCK_APERTURE_START, BUFFER_SIZE);
851 }
852 test_cmd_destroy_access(access_cmd.id);
853}
854
855TEST_F(iommufd_ioas, access_pin_unmap)
856{
857 struct iommu_test_cmd access_pages_cmd = {
858 .size = sizeof(access_pages_cmd),
859 .op = IOMMU_TEST_OP_ACCESS_PAGES,
860 .access_pages = { .iova = MOCK_APERTURE_START,
861 .length = BUFFER_SIZE,
862 .uptr = (uintptr_t)buffer },
863 };
864
865 test_cmd_create_access(self->ioas_id, &access_pages_cmd.id,
866 MOCK_FLAGS_ACCESS_CREATE_NEEDS_PIN_PAGES);
867 test_ioctl_ioas_map_fixed(buffer, BUFFER_SIZE, MOCK_APERTURE_START);
868 ASSERT_EQ(0,
869 ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
870 &access_pages_cmd));
871
872 /* Trigger the unmap op */
873 test_ioctl_ioas_unmap(MOCK_APERTURE_START, BUFFER_SIZE);
874
875 /* kernel removed the item for us */
876 test_err_destroy_access_pages(
877 ENOENT, access_pages_cmd.id,
878 access_pages_cmd.access_pages.out_access_pages_id);
879}
880
881static void check_access_rw(struct __test_metadata *_metadata, int fd,
882 unsigned int access_id, uint64_t iova,
883 unsigned int def_flags)
884{
885 uint16_t tmp[32];
886 struct iommu_test_cmd access_cmd = {
887 .size = sizeof(access_cmd),
888 .op = IOMMU_TEST_OP_ACCESS_RW,
889 .id = access_id,
890 .access_rw = { .uptr = (uintptr_t)tmp },
891 };
892 uint16_t *buffer16 = buffer;
893 unsigned int i;
894 void *tmp2;
895
896 for (i = 0; i != BUFFER_SIZE / sizeof(*buffer16); i++)
897 buffer16[i] = rand();
898
899 for (access_cmd.access_rw.iova = iova + PAGE_SIZE - 50;
900 access_cmd.access_rw.iova < iova + PAGE_SIZE + 50;
901 access_cmd.access_rw.iova++) {
902 for (access_cmd.access_rw.length = 1;
903 access_cmd.access_rw.length < sizeof(tmp);
904 access_cmd.access_rw.length++) {
905 access_cmd.access_rw.flags = def_flags;
906 ASSERT_EQ(0, ioctl(fd,
907 _IOMMU_TEST_CMD(
908 IOMMU_TEST_OP_ACCESS_RW),
909 &access_cmd));
910 ASSERT_EQ(0,
911 memcmp(buffer + (access_cmd.access_rw.iova -
912 iova),
913 tmp, access_cmd.access_rw.length));
914
915 for (i = 0; i != ARRAY_SIZE(tmp); i++)
916 tmp[i] = rand();
917 access_cmd.access_rw.flags = def_flags |
918 MOCK_ACCESS_RW_WRITE;
919 ASSERT_EQ(0, ioctl(fd,
920 _IOMMU_TEST_CMD(
921 IOMMU_TEST_OP_ACCESS_RW),
922 &access_cmd));
923 ASSERT_EQ(0,
924 memcmp(buffer + (access_cmd.access_rw.iova -
925 iova),
926 tmp, access_cmd.access_rw.length));
927 }
928 }
929
930 /* Multi-page test */
931 tmp2 = malloc(BUFFER_SIZE);
932 ASSERT_NE(NULL, tmp2);
933 access_cmd.access_rw.iova = iova;
934 access_cmd.access_rw.length = BUFFER_SIZE;
935 access_cmd.access_rw.flags = def_flags;
936 access_cmd.access_rw.uptr = (uintptr_t)tmp2;
937 ASSERT_EQ(0, ioctl(fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_RW),
938 &access_cmd));
939 ASSERT_EQ(0, memcmp(buffer, tmp2, access_cmd.access_rw.length));
940 free(tmp2);
941}
942
943TEST_F(iommufd_ioas, access_rw)
944{
945 __u32 access_id;
946 __u64 iova;
947
948 test_cmd_create_access(self->ioas_id, &access_id, 0);
949 test_ioctl_ioas_map(buffer, BUFFER_SIZE, &iova);
950 check_access_rw(_metadata, self->fd, access_id, iova, 0);
951 check_access_rw(_metadata, self->fd, access_id, iova,
952 MOCK_ACCESS_RW_SLOW_PATH);
953 test_ioctl_ioas_unmap(iova, BUFFER_SIZE);
954 test_cmd_destroy_access(access_id);
955}
956
957TEST_F(iommufd_ioas, access_rw_unaligned)
958{
959 __u32 access_id;
960 __u64 iova;
961
962 test_cmd_create_access(self->ioas_id, &access_id, 0);
963
964 /* Unaligned pages */
965 iova = self->base_iova + MOCK_PAGE_SIZE;
966 test_ioctl_ioas_map_fixed(buffer, BUFFER_SIZE, iova);
967 check_access_rw(_metadata, self->fd, access_id, iova, 0);
968 test_ioctl_ioas_unmap(iova, BUFFER_SIZE);
969 test_cmd_destroy_access(access_id);
970}
971
972TEST_F(iommufd_ioas, fork_gone)
973{
974 __u32 access_id;
975 pid_t child;
976
977 test_cmd_create_access(self->ioas_id, &access_id, 0);
978
979 /* Create a mapping with a different mm */
980 child = fork();
981 if (!child) {
982 test_ioctl_ioas_map_fixed(buffer, BUFFER_SIZE,
983 MOCK_APERTURE_START);
984 exit(0);
985 }
986 ASSERT_NE(-1, child);
987 ASSERT_EQ(child, waitpid(child, NULL, 0));
988
989 if (self->stdev_id) {
990 /*
991 * If a domain already existed then everything was pinned within
992 * the fork, so this copies from one domain to another.
993 */
994 test_cmd_mock_domain(self->ioas_id, NULL, NULL, NULL);
995 check_access_rw(_metadata, self->fd, access_id,
996 MOCK_APERTURE_START, 0);
997
998 } else {
999 /*
1000 * Otherwise we need to actually pin pages which can't happen
1001 * since the fork is gone.
1002 */
1003 test_err_mock_domain(EFAULT, self->ioas_id, NULL, NULL);
1004 }
1005
1006 test_cmd_destroy_access(access_id);
1007}
1008
1009TEST_F(iommufd_ioas, fork_present)
1010{
1011 __u32 access_id;
1012 int pipefds[2];
1013 uint64_t tmp;
1014 pid_t child;
1015 int efd;
1016
1017 test_cmd_create_access(self->ioas_id, &access_id, 0);
1018
1019 ASSERT_EQ(0, pipe2(pipefds, O_CLOEXEC));
1020 efd = eventfd(0, EFD_CLOEXEC);
1021 ASSERT_NE(-1, efd);
1022
1023 /* Create a mapping with a different mm */
1024 child = fork();
1025 if (!child) {
1026 __u64 iova;
1027 uint64_t one = 1;
1028
1029 close(pipefds[1]);
1030 test_ioctl_ioas_map_fixed(buffer, BUFFER_SIZE,
1031 MOCK_APERTURE_START);
1032 if (write(efd, &one, sizeof(one)) != sizeof(one))
1033 exit(100);
1034 if (read(pipefds[0], &iova, 1) != 1)
1035 exit(100);
1036 exit(0);
1037 }
1038 close(pipefds[0]);
1039 ASSERT_NE(-1, child);
1040 ASSERT_EQ(8, read(efd, &tmp, sizeof(tmp)));
1041
1042 /* Read pages from the remote process */
1043 test_cmd_mock_domain(self->ioas_id, NULL, NULL, NULL);
1044 check_access_rw(_metadata, self->fd, access_id, MOCK_APERTURE_START, 0);
1045
1046 ASSERT_EQ(0, close(pipefds[1]));
1047 ASSERT_EQ(child, waitpid(child, NULL, 0));
1048
1049 test_cmd_destroy_access(access_id);
1050}
1051
1052TEST_F(iommufd_ioas, ioas_option_huge_pages)
1053{
1054 struct iommu_option cmd = {
1055 .size = sizeof(cmd),
1056 .option_id = IOMMU_OPTION_HUGE_PAGES,
1057 .op = IOMMU_OPTION_OP_GET,
1058 .val64 = 3,
1059 .object_id = self->ioas_id,
1060 };
1061
1062 ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
1063 ASSERT_EQ(1, cmd.val64);
1064
1065 cmd.op = IOMMU_OPTION_OP_SET;
1066 cmd.val64 = 0;
1067 ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
1068
1069 cmd.op = IOMMU_OPTION_OP_GET;
1070 cmd.val64 = 3;
1071 ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
1072 ASSERT_EQ(0, cmd.val64);
1073
1074 cmd.op = IOMMU_OPTION_OP_SET;
1075 cmd.val64 = 2;
1076 EXPECT_ERRNO(EINVAL, ioctl(self->fd, IOMMU_OPTION, &cmd));
1077
1078 cmd.op = IOMMU_OPTION_OP_SET;
1079 cmd.val64 = 1;
1080 ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
1081}
1082
1083TEST_F(iommufd_ioas, ioas_iova_alloc)
1084{
1085 unsigned int length;
1086 __u64 iova;
1087
1088 for (length = 1; length != PAGE_SIZE * 2; length++) {
1089 if (variant->mock_domains && (length % MOCK_PAGE_SIZE)) {
1090 test_err_ioctl_ioas_map(EINVAL, buffer, length, &iova);
1091 } else {
1092 test_ioctl_ioas_map(buffer, length, &iova);
1093 test_ioctl_ioas_unmap(iova, length);
1094 }
1095 }
1096}
1097
1098TEST_F(iommufd_ioas, ioas_align_change)
1099{
1100 struct iommu_option cmd = {
1101 .size = sizeof(cmd),
1102 .option_id = IOMMU_OPTION_HUGE_PAGES,
1103 .op = IOMMU_OPTION_OP_SET,
1104 .object_id = self->ioas_id,
1105 /* 0 means everything must be aligned to PAGE_SIZE */
1106 .val64 = 0,
1107 };
1108
1109 /*
1110 * We cannot upgrade the alignment using OPTION_HUGE_PAGES when a domain
1111 * and map are present.
1112 */
1113 if (variant->mock_domains)
1114 return;
1115
1116 /*
1117 * We can upgrade to PAGE_SIZE alignment when things are aligned right
1118 */
1119 test_ioctl_ioas_map_fixed(buffer, PAGE_SIZE, MOCK_APERTURE_START);
1120 ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
1121
1122 /* Misalignment is rejected at map time */
1123 test_err_ioctl_ioas_map_fixed(EINVAL, buffer + MOCK_PAGE_SIZE,
1124 PAGE_SIZE,
1125 MOCK_APERTURE_START + PAGE_SIZE);
1126 ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
1127
1128 /* Reduce alignment */
1129 cmd.val64 = 1;
1130 ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
1131
1132 /* Confirm misalignment is rejected during alignment upgrade */
1133 test_ioctl_ioas_map_fixed(buffer + MOCK_PAGE_SIZE, PAGE_SIZE,
1134 MOCK_APERTURE_START + PAGE_SIZE);
1135 cmd.val64 = 0;
1136 EXPECT_ERRNO(EADDRINUSE, ioctl(self->fd, IOMMU_OPTION, &cmd));
1137
1138 test_ioctl_ioas_unmap(MOCK_APERTURE_START + PAGE_SIZE, PAGE_SIZE);
1139 test_ioctl_ioas_unmap(MOCK_APERTURE_START, PAGE_SIZE);
1140}
1141
1142TEST_F(iommufd_ioas, copy_sweep)
1143{
1144 struct iommu_ioas_copy copy_cmd = {
1145 .size = sizeof(copy_cmd),
1146 .flags = IOMMU_IOAS_MAP_FIXED_IOVA,
1147 .src_ioas_id = self->ioas_id,
1148 .dst_iova = MOCK_APERTURE_START,
1149 .length = MOCK_PAGE_SIZE,
1150 };
1151 unsigned int dst_ioas_id;
1152 uint64_t last_iova;
1153 uint64_t iova;
1154
1155 test_ioctl_ioas_alloc(&dst_ioas_id);
1156 copy_cmd.dst_ioas_id = dst_ioas_id;
1157
1158 if (variant->mock_domains)
1159 last_iova = MOCK_APERTURE_START + BUFFER_SIZE - 1;
1160 else
1161 last_iova = MOCK_APERTURE_START + BUFFER_SIZE - 2;
1162
1163 test_ioctl_ioas_map_fixed(buffer, last_iova - MOCK_APERTURE_START + 1,
1164 MOCK_APERTURE_START);
1165
1166 for (iova = MOCK_APERTURE_START - PAGE_SIZE; iova <= last_iova;
1167 iova += 511) {
1168 copy_cmd.src_iova = iova;
1169 if (iova < MOCK_APERTURE_START ||
1170 iova + copy_cmd.length - 1 > last_iova) {
1171 EXPECT_ERRNO(ENOENT, ioctl(self->fd, IOMMU_IOAS_COPY,
1172 ©_cmd));
1173 } else {
1174 ASSERT_EQ(0,
1175 ioctl(self->fd, IOMMU_IOAS_COPY, ©_cmd));
1176 test_ioctl_ioas_unmap_id(dst_ioas_id, copy_cmd.dst_iova,
1177 copy_cmd.length);
1178 }
1179 }
1180
1181 test_ioctl_destroy(dst_ioas_id);
1182}
1183
1184FIXTURE(iommufd_mock_domain)
1185{
1186 int fd;
1187 uint32_t ioas_id;
1188 uint32_t hwpt_id;
1189 uint32_t hwpt_ids[2];
1190 uint32_t stdev_ids[2];
1191 uint32_t idev_ids[2];
1192 int mmap_flags;
1193 size_t mmap_buf_size;
1194};
1195
1196FIXTURE_VARIANT(iommufd_mock_domain)
1197{
1198 unsigned int mock_domains;
1199 bool hugepages;
1200};
1201
1202FIXTURE_SETUP(iommufd_mock_domain)
1203{
1204 unsigned int i;
1205
1206 self->fd = open("/dev/iommu", O_RDWR);
1207 ASSERT_NE(-1, self->fd);
1208 test_ioctl_ioas_alloc(&self->ioas_id);
1209
1210 ASSERT_GE(ARRAY_SIZE(self->hwpt_ids), variant->mock_domains);
1211
1212 for (i = 0; i != variant->mock_domains; i++)
1213 test_cmd_mock_domain(self->ioas_id, &self->stdev_ids[i],
1214 &self->hwpt_ids[i], &self->idev_ids[i]);
1215 self->hwpt_id = self->hwpt_ids[0];
1216
1217 self->mmap_flags = MAP_SHARED | MAP_ANONYMOUS;
1218 self->mmap_buf_size = PAGE_SIZE * 8;
1219 if (variant->hugepages) {
1220 /*
1221 * MAP_POPULATE will cause the kernel to fail mmap if THPs are
1222 * not available.
1223 */
1224 self->mmap_flags |= MAP_HUGETLB | MAP_POPULATE;
1225 self->mmap_buf_size = HUGEPAGE_SIZE * 2;
1226 }
1227}
1228
1229FIXTURE_TEARDOWN(iommufd_mock_domain)
1230{
1231 teardown_iommufd(self->fd, _metadata);
1232}
1233
1234FIXTURE_VARIANT_ADD(iommufd_mock_domain, one_domain)
1235{
1236 .mock_domains = 1,
1237 .hugepages = false,
1238};
1239
1240FIXTURE_VARIANT_ADD(iommufd_mock_domain, two_domains)
1241{
1242 .mock_domains = 2,
1243 .hugepages = false,
1244};
1245
1246FIXTURE_VARIANT_ADD(iommufd_mock_domain, one_domain_hugepage)
1247{
1248 .mock_domains = 1,
1249 .hugepages = true,
1250};
1251
1252FIXTURE_VARIANT_ADD(iommufd_mock_domain, two_domains_hugepage)
1253{
1254 .mock_domains = 2,
1255 .hugepages = true,
1256};
1257
1258/* Have the kernel check that the user pages made it to the iommu_domain */
1259#define check_mock_iova(_ptr, _iova, _length) \
1260 ({ \
1261 struct iommu_test_cmd check_map_cmd = { \
1262 .size = sizeof(check_map_cmd), \
1263 .op = IOMMU_TEST_OP_MD_CHECK_MAP, \
1264 .id = self->hwpt_id, \
1265 .check_map = { .iova = _iova, \
1266 .length = _length, \
1267 .uptr = (uintptr_t)(_ptr) }, \
1268 }; \
1269 ASSERT_EQ(0, \
1270 ioctl(self->fd, \
1271 _IOMMU_TEST_CMD(IOMMU_TEST_OP_MD_CHECK_MAP), \
1272 &check_map_cmd)); \
1273 if (self->hwpt_ids[1]) { \
1274 check_map_cmd.id = self->hwpt_ids[1]; \
1275 ASSERT_EQ(0, \
1276 ioctl(self->fd, \
1277 _IOMMU_TEST_CMD( \
1278 IOMMU_TEST_OP_MD_CHECK_MAP), \
1279 &check_map_cmd)); \
1280 } \
1281 })
1282
1283TEST_F(iommufd_mock_domain, basic)
1284{
1285 size_t buf_size = self->mmap_buf_size;
1286 uint8_t *buf;
1287 __u64 iova;
1288
1289 /* Simple one page map */
1290 test_ioctl_ioas_map(buffer, PAGE_SIZE, &iova);
1291 check_mock_iova(buffer, iova, PAGE_SIZE);
1292
1293 buf = mmap(0, buf_size, PROT_READ | PROT_WRITE, self->mmap_flags, -1,
1294 0);
1295 ASSERT_NE(MAP_FAILED, buf);
1296
1297 /* EFAULT half way through mapping */
1298 ASSERT_EQ(0, munmap(buf + buf_size / 2, buf_size / 2));
1299 test_err_ioctl_ioas_map(EFAULT, buf, buf_size, &iova);
1300
1301 /* EFAULT on first page */
1302 ASSERT_EQ(0, munmap(buf, buf_size / 2));
1303 test_err_ioctl_ioas_map(EFAULT, buf, buf_size, &iova);
1304}
1305
1306TEST_F(iommufd_mock_domain, ro_unshare)
1307{
1308 uint8_t *buf;
1309 __u64 iova;
1310 int fd;
1311
1312 fd = open("/proc/self/exe", O_RDONLY);
1313 ASSERT_NE(-1, fd);
1314
1315 buf = mmap(0, PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
1316 ASSERT_NE(MAP_FAILED, buf);
1317 close(fd);
1318
1319 /*
1320 * There have been lots of changes to the "unshare" mechanism in
1321 * get_user_pages(), make sure it works right. The write to the page
1322 * after we map it for reading should not change the assigned PFN.
1323 */
1324 ASSERT_EQ(0,
1325 _test_ioctl_ioas_map(self->fd, self->ioas_id, buf, PAGE_SIZE,
1326 &iova, IOMMU_IOAS_MAP_READABLE));
1327 check_mock_iova(buf, iova, PAGE_SIZE);
1328 memset(buf, 1, PAGE_SIZE);
1329 check_mock_iova(buf, iova, PAGE_SIZE);
1330 ASSERT_EQ(0, munmap(buf, PAGE_SIZE));
1331}
1332
1333TEST_F(iommufd_mock_domain, all_aligns)
1334{
1335 size_t test_step = variant->hugepages ? (self->mmap_buf_size / 16) :
1336 MOCK_PAGE_SIZE;
1337 size_t buf_size = self->mmap_buf_size;
1338 unsigned int start;
1339 unsigned int end;
1340 uint8_t *buf;
1341
1342 buf = mmap(0, buf_size, PROT_READ | PROT_WRITE, self->mmap_flags, -1,
1343 0);
1344 ASSERT_NE(MAP_FAILED, buf);
1345 check_refs(buf, buf_size, 0);
1346
1347 /*
1348 * Map every combination of page size and alignment within a big region,
1349 * less for hugepage case as it takes so long to finish.
1350 */
1351 for (start = 0; start < buf_size; start += test_step) {
1352 if (variant->hugepages)
1353 end = buf_size;
1354 else
1355 end = start + MOCK_PAGE_SIZE;
1356 for (; end < buf_size; end += MOCK_PAGE_SIZE) {
1357 size_t length = end - start;
1358 __u64 iova;
1359
1360 test_ioctl_ioas_map(buf + start, length, &iova);
1361 check_mock_iova(buf + start, iova, length);
1362 check_refs(buf + start / PAGE_SIZE * PAGE_SIZE,
1363 end / PAGE_SIZE * PAGE_SIZE -
1364 start / PAGE_SIZE * PAGE_SIZE,
1365 1);
1366
1367 test_ioctl_ioas_unmap(iova, length);
1368 }
1369 }
1370 check_refs(buf, buf_size, 0);
1371 ASSERT_EQ(0, munmap(buf, buf_size));
1372}
1373
1374TEST_F(iommufd_mock_domain, all_aligns_copy)
1375{
1376 size_t test_step = variant->hugepages ? self->mmap_buf_size / 16 :
1377 MOCK_PAGE_SIZE;
1378 size_t buf_size = self->mmap_buf_size;
1379 unsigned int start;
1380 unsigned int end;
1381 uint8_t *buf;
1382
1383 buf = mmap(0, buf_size, PROT_READ | PROT_WRITE, self->mmap_flags, -1,
1384 0);
1385 ASSERT_NE(MAP_FAILED, buf);
1386 check_refs(buf, buf_size, 0);
1387
1388 /*
1389 * Map every combination of page size and alignment within a big region,
1390 * less for hugepage case as it takes so long to finish.
1391 */
1392 for (start = 0; start < buf_size; start += test_step) {
1393 if (variant->hugepages)
1394 end = buf_size;
1395 else
1396 end = start + MOCK_PAGE_SIZE;
1397 for (; end < buf_size; end += MOCK_PAGE_SIZE) {
1398 size_t length = end - start;
1399 unsigned int old_id;
1400 uint32_t mock_stdev_id;
1401 __u64 iova;
1402
1403 test_ioctl_ioas_map(buf + start, length, &iova);
1404
1405 /* Add and destroy a domain while the area exists */
1406 old_id = self->hwpt_ids[1];
1407 test_cmd_mock_domain(self->ioas_id, &mock_stdev_id,
1408 &self->hwpt_ids[1], NULL);
1409
1410 check_mock_iova(buf + start, iova, length);
1411 check_refs(buf + start / PAGE_SIZE * PAGE_SIZE,
1412 end / PAGE_SIZE * PAGE_SIZE -
1413 start / PAGE_SIZE * PAGE_SIZE,
1414 1);
1415
1416 test_ioctl_destroy(mock_stdev_id);
1417 self->hwpt_ids[1] = old_id;
1418
1419 test_ioctl_ioas_unmap(iova, length);
1420 }
1421 }
1422 check_refs(buf, buf_size, 0);
1423 ASSERT_EQ(0, munmap(buf, buf_size));
1424}
1425
1426TEST_F(iommufd_mock_domain, user_copy)
1427{
1428 struct iommu_test_cmd access_cmd = {
1429 .size = sizeof(access_cmd),
1430 .op = IOMMU_TEST_OP_ACCESS_PAGES,
1431 .access_pages = { .length = BUFFER_SIZE,
1432 .uptr = (uintptr_t)buffer },
1433 };
1434 struct iommu_ioas_copy copy_cmd = {
1435 .size = sizeof(copy_cmd),
1436 .flags = IOMMU_IOAS_MAP_FIXED_IOVA,
1437 .dst_ioas_id = self->ioas_id,
1438 .dst_iova = MOCK_APERTURE_START,
1439 .length = BUFFER_SIZE,
1440 };
1441 struct iommu_ioas_unmap unmap_cmd = {
1442 .size = sizeof(unmap_cmd),
1443 .ioas_id = self->ioas_id,
1444 .iova = MOCK_APERTURE_START,
1445 .length = BUFFER_SIZE,
1446 };
1447 unsigned int new_ioas_id, ioas_id;
1448
1449 /* Pin the pages in an IOAS with no domains then copy to an IOAS with domains */
1450 test_ioctl_ioas_alloc(&ioas_id);
1451 test_ioctl_ioas_map_id(ioas_id, buffer, BUFFER_SIZE,
1452 ©_cmd.src_iova);
1453
1454 test_cmd_create_access(ioas_id, &access_cmd.id,
1455 MOCK_FLAGS_ACCESS_CREATE_NEEDS_PIN_PAGES);
1456
1457 access_cmd.access_pages.iova = copy_cmd.src_iova;
1458 ASSERT_EQ(0,
1459 ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
1460 &access_cmd));
1461 copy_cmd.src_ioas_id = ioas_id;
1462 ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_COPY, ©_cmd));
1463 check_mock_iova(buffer, MOCK_APERTURE_START, BUFFER_SIZE);
1464
1465 /* Now replace the ioas with a new one */
1466 test_ioctl_ioas_alloc(&new_ioas_id);
1467 test_ioctl_ioas_map_id(new_ioas_id, buffer, BUFFER_SIZE,
1468 ©_cmd.src_iova);
1469 test_cmd_access_replace_ioas(access_cmd.id, new_ioas_id);
1470
1471 /* Destroy the old ioas and cleanup copied mapping */
1472 ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_UNMAP, &unmap_cmd));
1473 test_ioctl_destroy(ioas_id);
1474
1475 /* Then run the same test again with the new ioas */
1476 access_cmd.access_pages.iova = copy_cmd.src_iova;
1477 ASSERT_EQ(0,
1478 ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
1479 &access_cmd));
1480 copy_cmd.src_ioas_id = new_ioas_id;
1481 ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_COPY, ©_cmd));
1482 check_mock_iova(buffer, MOCK_APERTURE_START, BUFFER_SIZE);
1483
1484 test_cmd_destroy_access_pages(
1485 access_cmd.id, access_cmd.access_pages.out_access_pages_id);
1486 test_cmd_destroy_access(access_cmd.id);
1487
1488 test_ioctl_destroy(new_ioas_id);
1489}
1490
1491TEST_F(iommufd_mock_domain, replace)
1492{
1493 uint32_t ioas_id;
1494
1495 test_ioctl_ioas_alloc(&ioas_id);
1496
1497 test_cmd_mock_domain_replace(self->stdev_ids[0], ioas_id);
1498
1499 /*
1500 * Replacing the IOAS causes the prior HWPT to be deallocated, thus we
1501 * should get enoent when we try to use it.
1502 */
1503 if (variant->mock_domains == 1)
1504 test_err_mock_domain_replace(ENOENT, self->stdev_ids[0],
1505 self->hwpt_ids[0]);
1506
1507 test_cmd_mock_domain_replace(self->stdev_ids[0], ioas_id);
1508 if (variant->mock_domains >= 2) {
1509 test_cmd_mock_domain_replace(self->stdev_ids[0],
1510 self->hwpt_ids[1]);
1511 test_cmd_mock_domain_replace(self->stdev_ids[0],
1512 self->hwpt_ids[1]);
1513 test_cmd_mock_domain_replace(self->stdev_ids[0],
1514 self->hwpt_ids[0]);
1515 }
1516
1517 test_cmd_mock_domain_replace(self->stdev_ids[0], self->ioas_id);
1518 test_ioctl_destroy(ioas_id);
1519}
1520
1521TEST_F(iommufd_mock_domain, alloc_hwpt)
1522{
1523 int i;
1524
1525 for (i = 0; i != variant->mock_domains; i++) {
1526 uint32_t hwpt_id[2];
1527 uint32_t stddev_id;
1528
1529 test_err_hwpt_alloc(EOPNOTSUPP,
1530 self->idev_ids[i], self->ioas_id,
1531 ~IOMMU_HWPT_ALLOC_NEST_PARENT, &hwpt_id[0]);
1532 test_cmd_hwpt_alloc(self->idev_ids[i], self->ioas_id,
1533 0, &hwpt_id[0]);
1534 test_cmd_hwpt_alloc(self->idev_ids[i], self->ioas_id,
1535 IOMMU_HWPT_ALLOC_NEST_PARENT, &hwpt_id[1]);
1536
1537 /* Do a hw_pagetable rotation test */
1538 test_cmd_mock_domain_replace(self->stdev_ids[i], hwpt_id[0]);
1539 EXPECT_ERRNO(EBUSY, _test_ioctl_destroy(self->fd, hwpt_id[0]));
1540 test_cmd_mock_domain_replace(self->stdev_ids[i], hwpt_id[1]);
1541 EXPECT_ERRNO(EBUSY, _test_ioctl_destroy(self->fd, hwpt_id[1]));
1542 test_cmd_mock_domain_replace(self->stdev_ids[i], self->ioas_id);
1543 test_ioctl_destroy(hwpt_id[1]);
1544
1545 test_cmd_mock_domain(hwpt_id[0], &stddev_id, NULL, NULL);
1546 test_ioctl_destroy(stddev_id);
1547 test_ioctl_destroy(hwpt_id[0]);
1548 }
1549}
1550
1551FIXTURE(iommufd_dirty_tracking)
1552{
1553 int fd;
1554 uint32_t ioas_id;
1555 uint32_t hwpt_id;
1556 uint32_t stdev_id;
1557 uint32_t idev_id;
1558 unsigned long page_size;
1559 unsigned long bitmap_size;
1560 void *bitmap;
1561 void *buffer;
1562};
1563
1564FIXTURE_VARIANT(iommufd_dirty_tracking)
1565{
1566 unsigned long buffer_size;
1567};
1568
1569FIXTURE_SETUP(iommufd_dirty_tracking)
1570{
1571 void *vrc;
1572 int rc;
1573
1574 self->fd = open("/dev/iommu", O_RDWR);
1575 ASSERT_NE(-1, self->fd);
1576
1577 rc = posix_memalign(&self->buffer, HUGEPAGE_SIZE, variant->buffer_size);
1578 if (rc || !self->buffer) {
1579 SKIP(return, "Skipping buffer_size=%lu due to errno=%d",
1580 variant->buffer_size, rc);
1581 }
1582
1583 assert((uintptr_t)self->buffer % HUGEPAGE_SIZE == 0);
1584 vrc = mmap(self->buffer, variant->buffer_size, PROT_READ | PROT_WRITE,
1585 MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
1586 assert(vrc == self->buffer);
1587
1588 self->page_size = MOCK_PAGE_SIZE;
1589 self->bitmap_size =
1590 variant->buffer_size / self->page_size / BITS_PER_BYTE;
1591
1592 /* Provision with an extra (MOCK_PAGE_SIZE) for the unaligned case */
1593 rc = posix_memalign(&self->bitmap, PAGE_SIZE,
1594 self->bitmap_size + MOCK_PAGE_SIZE);
1595 assert(!rc);
1596 assert(self->bitmap);
1597 assert((uintptr_t)self->bitmap % PAGE_SIZE == 0);
1598
1599 test_ioctl_ioas_alloc(&self->ioas_id);
1600 test_cmd_mock_domain(self->ioas_id, &self->stdev_id, &self->hwpt_id,
1601 &self->idev_id);
1602}
1603
1604FIXTURE_TEARDOWN(iommufd_dirty_tracking)
1605{
1606 munmap(self->buffer, variant->buffer_size);
1607 munmap(self->bitmap, self->bitmap_size);
1608 teardown_iommufd(self->fd, _metadata);
1609}
1610
1611FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty128k)
1612{
1613 /* one u32 index bitmap */
1614 .buffer_size = 128UL * 1024UL,
1615};
1616
1617FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty256k)
1618{
1619 /* one u64 index bitmap */
1620 .buffer_size = 256UL * 1024UL,
1621};
1622
1623FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty640k)
1624{
1625 /* two u64 index and trailing end bitmap */
1626 .buffer_size = 640UL * 1024UL,
1627};
1628
1629FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty128M)
1630{
1631 /* 4K bitmap (128M IOVA range) */
1632 .buffer_size = 128UL * 1024UL * 1024UL,
1633};
1634
1635FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty256M)
1636{
1637 /* 8K bitmap (256M IOVA range) */
1638 .buffer_size = 256UL * 1024UL * 1024UL,
1639};
1640
1641TEST_F(iommufd_dirty_tracking, enforce_dirty)
1642{
1643 uint32_t ioas_id, stddev_id, idev_id;
1644 uint32_t hwpt_id, _hwpt_id;
1645 uint32_t dev_flags;
1646
1647 /* Regular case */
1648 dev_flags = MOCK_FLAGS_DEVICE_NO_DIRTY;
1649 test_cmd_hwpt_alloc(self->idev_id, self->ioas_id,
1650 IOMMU_HWPT_ALLOC_DIRTY_TRACKING, &hwpt_id);
1651 test_cmd_mock_domain(hwpt_id, &stddev_id, NULL, NULL);
1652 test_err_mock_domain_flags(EINVAL, hwpt_id, dev_flags, &stddev_id,
1653 NULL);
1654 test_ioctl_destroy(stddev_id);
1655 test_ioctl_destroy(hwpt_id);
1656
1657 /* IOMMU device does not support dirty tracking */
1658 test_ioctl_ioas_alloc(&ioas_id);
1659 test_cmd_mock_domain_flags(ioas_id, dev_flags, &stddev_id, &_hwpt_id,
1660 &idev_id);
1661 test_err_hwpt_alloc(EOPNOTSUPP, idev_id, ioas_id,
1662 IOMMU_HWPT_ALLOC_DIRTY_TRACKING, &hwpt_id);
1663 test_ioctl_destroy(stddev_id);
1664}
1665
1666TEST_F(iommufd_dirty_tracking, set_dirty_tracking)
1667{
1668 uint32_t stddev_id;
1669 uint32_t hwpt_id;
1670
1671 test_cmd_hwpt_alloc(self->idev_id, self->ioas_id,
1672 IOMMU_HWPT_ALLOC_DIRTY_TRACKING, &hwpt_id);
1673 test_cmd_mock_domain(hwpt_id, &stddev_id, NULL, NULL);
1674 test_cmd_set_dirty_tracking(hwpt_id, true);
1675 test_cmd_set_dirty_tracking(hwpt_id, false);
1676
1677 test_ioctl_destroy(stddev_id);
1678 test_ioctl_destroy(hwpt_id);
1679}
1680
1681TEST_F(iommufd_dirty_tracking, device_dirty_capability)
1682{
1683 uint32_t caps = 0;
1684 uint32_t stddev_id;
1685 uint32_t hwpt_id;
1686
1687 test_cmd_hwpt_alloc(self->idev_id, self->ioas_id, 0, &hwpt_id);
1688 test_cmd_mock_domain(hwpt_id, &stddev_id, NULL, NULL);
1689 test_cmd_get_hw_capabilities(self->idev_id, caps,
1690 IOMMU_HW_CAP_DIRTY_TRACKING);
1691 ASSERT_EQ(IOMMU_HW_CAP_DIRTY_TRACKING,
1692 caps & IOMMU_HW_CAP_DIRTY_TRACKING);
1693
1694 test_ioctl_destroy(stddev_id);
1695 test_ioctl_destroy(hwpt_id);
1696}
1697
1698TEST_F(iommufd_dirty_tracking, get_dirty_bitmap)
1699{
1700 uint32_t stddev_id;
1701 uint32_t hwpt_id;
1702 uint32_t ioas_id;
1703
1704 test_ioctl_ioas_alloc(&ioas_id);
1705 test_ioctl_ioas_map_fixed_id(ioas_id, self->buffer,
1706 variant->buffer_size, MOCK_APERTURE_START);
1707
1708 test_cmd_hwpt_alloc(self->idev_id, ioas_id,
1709 IOMMU_HWPT_ALLOC_DIRTY_TRACKING, &hwpt_id);
1710 test_cmd_mock_domain(hwpt_id, &stddev_id, NULL, NULL);
1711
1712 test_cmd_set_dirty_tracking(hwpt_id, true);
1713
1714 test_mock_dirty_bitmaps(hwpt_id, variant->buffer_size,
1715 MOCK_APERTURE_START, self->page_size,
1716 self->bitmap, self->bitmap_size, 0, _metadata);
1717
1718 /* PAGE_SIZE unaligned bitmap */
1719 test_mock_dirty_bitmaps(hwpt_id, variant->buffer_size,
1720 MOCK_APERTURE_START, self->page_size,
1721 self->bitmap + MOCK_PAGE_SIZE,
1722 self->bitmap_size, 0, _metadata);
1723
1724 test_ioctl_destroy(stddev_id);
1725 test_ioctl_destroy(hwpt_id);
1726}
1727
1728TEST_F(iommufd_dirty_tracking, get_dirty_bitmap_no_clear)
1729{
1730 uint32_t stddev_id;
1731 uint32_t hwpt_id;
1732 uint32_t ioas_id;
1733
1734 test_ioctl_ioas_alloc(&ioas_id);
1735 test_ioctl_ioas_map_fixed_id(ioas_id, self->buffer,
1736 variant->buffer_size, MOCK_APERTURE_START);
1737
1738 test_cmd_hwpt_alloc(self->idev_id, ioas_id,
1739 IOMMU_HWPT_ALLOC_DIRTY_TRACKING, &hwpt_id);
1740 test_cmd_mock_domain(hwpt_id, &stddev_id, NULL, NULL);
1741
1742 test_cmd_set_dirty_tracking(hwpt_id, true);
1743
1744 test_mock_dirty_bitmaps(hwpt_id, variant->buffer_size,
1745 MOCK_APERTURE_START, self->page_size,
1746 self->bitmap, self->bitmap_size,
1747 IOMMU_HWPT_GET_DIRTY_BITMAP_NO_CLEAR,
1748 _metadata);
1749
1750 /* Unaligned bitmap */
1751 test_mock_dirty_bitmaps(hwpt_id, variant->buffer_size,
1752 MOCK_APERTURE_START, self->page_size,
1753 self->bitmap + MOCK_PAGE_SIZE,
1754 self->bitmap_size,
1755 IOMMU_HWPT_GET_DIRTY_BITMAP_NO_CLEAR,
1756 _metadata);
1757
1758 test_ioctl_destroy(stddev_id);
1759 test_ioctl_destroy(hwpt_id);
1760}
1761
1762/* VFIO compatibility IOCTLs */
1763
1764TEST_F(iommufd, simple_ioctls)
1765{
1766 ASSERT_EQ(VFIO_API_VERSION, ioctl(self->fd, VFIO_GET_API_VERSION));
1767 ASSERT_EQ(1, ioctl(self->fd, VFIO_CHECK_EXTENSION, VFIO_TYPE1v2_IOMMU));
1768}
1769
1770TEST_F(iommufd, unmap_cmd)
1771{
1772 struct vfio_iommu_type1_dma_unmap unmap_cmd = {
1773 .iova = MOCK_APERTURE_START,
1774 .size = PAGE_SIZE,
1775 };
1776
1777 unmap_cmd.argsz = 1;
1778 EXPECT_ERRNO(EINVAL, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, &unmap_cmd));
1779
1780 unmap_cmd.argsz = sizeof(unmap_cmd);
1781 unmap_cmd.flags = 1 << 31;
1782 EXPECT_ERRNO(EINVAL, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, &unmap_cmd));
1783
1784 unmap_cmd.flags = 0;
1785 EXPECT_ERRNO(ENODEV, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, &unmap_cmd));
1786}
1787
1788TEST_F(iommufd, map_cmd)
1789{
1790 struct vfio_iommu_type1_dma_map map_cmd = {
1791 .iova = MOCK_APERTURE_START,
1792 .size = PAGE_SIZE,
1793 .vaddr = (__u64)buffer,
1794 };
1795
1796 map_cmd.argsz = 1;
1797 EXPECT_ERRNO(EINVAL, ioctl(self->fd, VFIO_IOMMU_MAP_DMA, &map_cmd));
1798
1799 map_cmd.argsz = sizeof(map_cmd);
1800 map_cmd.flags = 1 << 31;
1801 EXPECT_ERRNO(EINVAL, ioctl(self->fd, VFIO_IOMMU_MAP_DMA, &map_cmd));
1802
1803 /* Requires a domain to be attached */
1804 map_cmd.flags = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE;
1805 EXPECT_ERRNO(ENODEV, ioctl(self->fd, VFIO_IOMMU_MAP_DMA, &map_cmd));
1806}
1807
1808TEST_F(iommufd, info_cmd)
1809{
1810 struct vfio_iommu_type1_info info_cmd = {};
1811
1812 /* Invalid argsz */
1813 info_cmd.argsz = 1;
1814 EXPECT_ERRNO(EINVAL, ioctl(self->fd, VFIO_IOMMU_GET_INFO, &info_cmd));
1815
1816 info_cmd.argsz = sizeof(info_cmd);
1817 EXPECT_ERRNO(ENODEV, ioctl(self->fd, VFIO_IOMMU_GET_INFO, &info_cmd));
1818}
1819
1820TEST_F(iommufd, set_iommu_cmd)
1821{
1822 /* Requires a domain to be attached */
1823 EXPECT_ERRNO(ENODEV,
1824 ioctl(self->fd, VFIO_SET_IOMMU, VFIO_TYPE1v2_IOMMU));
1825 EXPECT_ERRNO(ENODEV, ioctl(self->fd, VFIO_SET_IOMMU, VFIO_TYPE1_IOMMU));
1826}
1827
1828TEST_F(iommufd, vfio_ioas)
1829{
1830 struct iommu_vfio_ioas vfio_ioas_cmd = {
1831 .size = sizeof(vfio_ioas_cmd),
1832 .op = IOMMU_VFIO_IOAS_GET,
1833 };
1834 __u32 ioas_id;
1835
1836 /* ENODEV if there is no compat ioas */
1837 EXPECT_ERRNO(ENODEV, ioctl(self->fd, IOMMU_VFIO_IOAS, &vfio_ioas_cmd));
1838
1839 /* Invalid id for set */
1840 vfio_ioas_cmd.op = IOMMU_VFIO_IOAS_SET;
1841 EXPECT_ERRNO(ENOENT, ioctl(self->fd, IOMMU_VFIO_IOAS, &vfio_ioas_cmd));
1842
1843 /* Valid id for set*/
1844 test_ioctl_ioas_alloc(&ioas_id);
1845 vfio_ioas_cmd.ioas_id = ioas_id;
1846 ASSERT_EQ(0, ioctl(self->fd, IOMMU_VFIO_IOAS, &vfio_ioas_cmd));
1847
1848 /* Same id comes back from get */
1849 vfio_ioas_cmd.op = IOMMU_VFIO_IOAS_GET;
1850 ASSERT_EQ(0, ioctl(self->fd, IOMMU_VFIO_IOAS, &vfio_ioas_cmd));
1851 ASSERT_EQ(ioas_id, vfio_ioas_cmd.ioas_id);
1852
1853 /* Clear works */
1854 vfio_ioas_cmd.op = IOMMU_VFIO_IOAS_CLEAR;
1855 ASSERT_EQ(0, ioctl(self->fd, IOMMU_VFIO_IOAS, &vfio_ioas_cmd));
1856 vfio_ioas_cmd.op = IOMMU_VFIO_IOAS_GET;
1857 EXPECT_ERRNO(ENODEV, ioctl(self->fd, IOMMU_VFIO_IOAS, &vfio_ioas_cmd));
1858}
1859
1860FIXTURE(vfio_compat_mock_domain)
1861{
1862 int fd;
1863 uint32_t ioas_id;
1864};
1865
1866FIXTURE_VARIANT(vfio_compat_mock_domain)
1867{
1868 unsigned int version;
1869};
1870
1871FIXTURE_SETUP(vfio_compat_mock_domain)
1872{
1873 struct iommu_vfio_ioas vfio_ioas_cmd = {
1874 .size = sizeof(vfio_ioas_cmd),
1875 .op = IOMMU_VFIO_IOAS_SET,
1876 };
1877
1878 self->fd = open("/dev/iommu", O_RDWR);
1879 ASSERT_NE(-1, self->fd);
1880
1881 /* Create what VFIO would consider a group */
1882 test_ioctl_ioas_alloc(&self->ioas_id);
1883 test_cmd_mock_domain(self->ioas_id, NULL, NULL, NULL);
1884
1885 /* Attach it to the vfio compat */
1886 vfio_ioas_cmd.ioas_id = self->ioas_id;
1887 ASSERT_EQ(0, ioctl(self->fd, IOMMU_VFIO_IOAS, &vfio_ioas_cmd));
1888 ASSERT_EQ(0, ioctl(self->fd, VFIO_SET_IOMMU, variant->version));
1889}
1890
1891FIXTURE_TEARDOWN(vfio_compat_mock_domain)
1892{
1893 teardown_iommufd(self->fd, _metadata);
1894}
1895
1896FIXTURE_VARIANT_ADD(vfio_compat_mock_domain, Ver1v2)
1897{
1898 .version = VFIO_TYPE1v2_IOMMU,
1899};
1900
1901FIXTURE_VARIANT_ADD(vfio_compat_mock_domain, Ver1v0)
1902{
1903 .version = VFIO_TYPE1_IOMMU,
1904};
1905
1906TEST_F(vfio_compat_mock_domain, simple_close)
1907{
1908}
1909
1910TEST_F(vfio_compat_mock_domain, option_huge_pages)
1911{
1912 struct iommu_option cmd = {
1913 .size = sizeof(cmd),
1914 .option_id = IOMMU_OPTION_HUGE_PAGES,
1915 .op = IOMMU_OPTION_OP_GET,
1916 .val64 = 3,
1917 .object_id = self->ioas_id,
1918 };
1919
1920 ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
1921 if (variant->version == VFIO_TYPE1_IOMMU) {
1922 ASSERT_EQ(0, cmd.val64);
1923 } else {
1924 ASSERT_EQ(1, cmd.val64);
1925 }
1926}
1927
1928/*
1929 * Execute an ioctl command stored in buffer and check that the result does not
1930 * overflow memory.
1931 */
1932static bool is_filled(const void *buf, uint8_t c, size_t len)
1933{
1934 const uint8_t *cbuf = buf;
1935
1936 for (; len; cbuf++, len--)
1937 if (*cbuf != c)
1938 return false;
1939 return true;
1940}
1941
1942#define ioctl_check_buf(fd, cmd) \
1943 ({ \
1944 size_t _cmd_len = *(__u32 *)buffer; \
1945 \
1946 memset(buffer + _cmd_len, 0xAA, BUFFER_SIZE - _cmd_len); \
1947 ASSERT_EQ(0, ioctl(fd, cmd, buffer)); \
1948 ASSERT_EQ(true, is_filled(buffer + _cmd_len, 0xAA, \
1949 BUFFER_SIZE - _cmd_len)); \
1950 })
1951
1952static void check_vfio_info_cap_chain(struct __test_metadata *_metadata,
1953 struct vfio_iommu_type1_info *info_cmd)
1954{
1955 const struct vfio_info_cap_header *cap;
1956
1957 ASSERT_GE(info_cmd->argsz, info_cmd->cap_offset + sizeof(*cap));
1958 cap = buffer + info_cmd->cap_offset;
1959 while (true) {
1960 size_t cap_size;
1961
1962 if (cap->next)
1963 cap_size = (buffer + cap->next) - (void *)cap;
1964 else
1965 cap_size = (buffer + info_cmd->argsz) - (void *)cap;
1966
1967 switch (cap->id) {
1968 case VFIO_IOMMU_TYPE1_INFO_CAP_IOVA_RANGE: {
1969 struct vfio_iommu_type1_info_cap_iova_range *data =
1970 (void *)cap;
1971
1972 ASSERT_EQ(1, data->header.version);
1973 ASSERT_EQ(1, data->nr_iovas);
1974 EXPECT_EQ(MOCK_APERTURE_START,
1975 data->iova_ranges[0].start);
1976 EXPECT_EQ(MOCK_APERTURE_LAST, data->iova_ranges[0].end);
1977 break;
1978 }
1979 case VFIO_IOMMU_TYPE1_INFO_DMA_AVAIL: {
1980 struct vfio_iommu_type1_info_dma_avail *data =
1981 (void *)cap;
1982
1983 ASSERT_EQ(1, data->header.version);
1984 ASSERT_EQ(sizeof(*data), cap_size);
1985 break;
1986 }
1987 default:
1988 ASSERT_EQ(false, true);
1989 break;
1990 }
1991 if (!cap->next)
1992 break;
1993
1994 ASSERT_GE(info_cmd->argsz, cap->next + sizeof(*cap));
1995 ASSERT_GE(buffer + cap->next, (void *)cap);
1996 cap = buffer + cap->next;
1997 }
1998}
1999
2000TEST_F(vfio_compat_mock_domain, get_info)
2001{
2002 struct vfio_iommu_type1_info *info_cmd = buffer;
2003 unsigned int i;
2004 size_t caplen;
2005
2006 /* Pre-cap ABI */
2007 *info_cmd = (struct vfio_iommu_type1_info){
2008 .argsz = offsetof(struct vfio_iommu_type1_info, cap_offset),
2009 };
2010 ioctl_check_buf(self->fd, VFIO_IOMMU_GET_INFO);
2011 ASSERT_NE(0, info_cmd->iova_pgsizes);
2012 ASSERT_EQ(VFIO_IOMMU_INFO_PGSIZES | VFIO_IOMMU_INFO_CAPS,
2013 info_cmd->flags);
2014
2015 /* Read the cap chain size */
2016 *info_cmd = (struct vfio_iommu_type1_info){
2017 .argsz = sizeof(*info_cmd),
2018 };
2019 ioctl_check_buf(self->fd, VFIO_IOMMU_GET_INFO);
2020 ASSERT_NE(0, info_cmd->iova_pgsizes);
2021 ASSERT_EQ(VFIO_IOMMU_INFO_PGSIZES | VFIO_IOMMU_INFO_CAPS,
2022 info_cmd->flags);
2023 ASSERT_EQ(0, info_cmd->cap_offset);
2024 ASSERT_LT(sizeof(*info_cmd), info_cmd->argsz);
2025
2026 /* Read the caps, kernel should never create a corrupted caps */
2027 caplen = info_cmd->argsz;
2028 for (i = sizeof(*info_cmd); i < caplen; i++) {
2029 *info_cmd = (struct vfio_iommu_type1_info){
2030 .argsz = i,
2031 };
2032 ioctl_check_buf(self->fd, VFIO_IOMMU_GET_INFO);
2033 ASSERT_EQ(VFIO_IOMMU_INFO_PGSIZES | VFIO_IOMMU_INFO_CAPS,
2034 info_cmd->flags);
2035 if (!info_cmd->cap_offset)
2036 continue;
2037 check_vfio_info_cap_chain(_metadata, info_cmd);
2038 }
2039}
2040
2041static void shuffle_array(unsigned long *array, size_t nelms)
2042{
2043 unsigned int i;
2044
2045 /* Shuffle */
2046 for (i = 0; i != nelms; i++) {
2047 unsigned long tmp = array[i];
2048 unsigned int other = rand() % (nelms - i);
2049
2050 array[i] = array[other];
2051 array[other] = tmp;
2052 }
2053}
2054
2055TEST_F(vfio_compat_mock_domain, map)
2056{
2057 struct vfio_iommu_type1_dma_map map_cmd = {
2058 .argsz = sizeof(map_cmd),
2059 .flags = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE,
2060 .vaddr = (uintptr_t)buffer,
2061 .size = BUFFER_SIZE,
2062 .iova = MOCK_APERTURE_START,
2063 };
2064 struct vfio_iommu_type1_dma_unmap unmap_cmd = {
2065 .argsz = sizeof(unmap_cmd),
2066 .size = BUFFER_SIZE,
2067 .iova = MOCK_APERTURE_START,
2068 };
2069 unsigned long pages_iova[BUFFER_SIZE / PAGE_SIZE];
2070 unsigned int i;
2071
2072 /* Simple map/unmap */
2073 ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_MAP_DMA, &map_cmd));
2074 ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, &unmap_cmd));
2075 ASSERT_EQ(BUFFER_SIZE, unmap_cmd.size);
2076
2077 /* UNMAP_FLAG_ALL requires 0 iova/size */
2078 ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_MAP_DMA, &map_cmd));
2079 unmap_cmd.flags = VFIO_DMA_UNMAP_FLAG_ALL;
2080 EXPECT_ERRNO(EINVAL, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, &unmap_cmd));
2081
2082 unmap_cmd.iova = 0;
2083 unmap_cmd.size = 0;
2084 ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, &unmap_cmd));
2085 ASSERT_EQ(BUFFER_SIZE, unmap_cmd.size);
2086
2087 /* Small pages */
2088 for (i = 0; i != ARRAY_SIZE(pages_iova); i++) {
2089 map_cmd.iova = pages_iova[i] =
2090 MOCK_APERTURE_START + i * PAGE_SIZE;
2091 map_cmd.vaddr = (uintptr_t)buffer + i * PAGE_SIZE;
2092 map_cmd.size = PAGE_SIZE;
2093 ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_MAP_DMA, &map_cmd));
2094 }
2095 shuffle_array(pages_iova, ARRAY_SIZE(pages_iova));
2096
2097 unmap_cmd.flags = 0;
2098 unmap_cmd.size = PAGE_SIZE;
2099 for (i = 0; i != ARRAY_SIZE(pages_iova); i++) {
2100 unmap_cmd.iova = pages_iova[i];
2101 ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, &unmap_cmd));
2102 }
2103}
2104
2105TEST_F(vfio_compat_mock_domain, huge_map)
2106{
2107 size_t buf_size = HUGEPAGE_SIZE * 2;
2108 struct vfio_iommu_type1_dma_map map_cmd = {
2109 .argsz = sizeof(map_cmd),
2110 .flags = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE,
2111 .size = buf_size,
2112 .iova = MOCK_APERTURE_START,
2113 };
2114 struct vfio_iommu_type1_dma_unmap unmap_cmd = {
2115 .argsz = sizeof(unmap_cmd),
2116 };
2117 unsigned long pages_iova[16];
2118 unsigned int i;
2119 void *buf;
2120
2121 /* Test huge pages and splitting */
2122 buf = mmap(0, buf_size, PROT_READ | PROT_WRITE,
2123 MAP_SHARED | MAP_ANONYMOUS | MAP_HUGETLB | MAP_POPULATE, -1,
2124 0);
2125 ASSERT_NE(MAP_FAILED, buf);
2126 map_cmd.vaddr = (uintptr_t)buf;
2127 ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_MAP_DMA, &map_cmd));
2128
2129 unmap_cmd.size = buf_size / ARRAY_SIZE(pages_iova);
2130 for (i = 0; i != ARRAY_SIZE(pages_iova); i++)
2131 pages_iova[i] = MOCK_APERTURE_START + (i * unmap_cmd.size);
2132 shuffle_array(pages_iova, ARRAY_SIZE(pages_iova));
2133
2134 /* type1 mode can cut up larger mappings, type1v2 always fails */
2135 for (i = 0; i != ARRAY_SIZE(pages_iova); i++) {
2136 unmap_cmd.iova = pages_iova[i];
2137 unmap_cmd.size = buf_size / ARRAY_SIZE(pages_iova);
2138 if (variant->version == VFIO_TYPE1_IOMMU) {
2139 ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA,
2140 &unmap_cmd));
2141 } else {
2142 EXPECT_ERRNO(ENOENT,
2143 ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA,
2144 &unmap_cmd));
2145 }
2146 }
2147}
2148
2149TEST_HARNESS_MAIN