Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-only
2/* Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES */
3#include <stdlib.h>
4#include <sys/mman.h>
5#include <sys/eventfd.h>
6
7#define __EXPORTED_HEADERS__
8#include <linux/vfio.h>
9
10#include "iommufd_utils.h"
11
12static unsigned long HUGEPAGE_SIZE;
13
14#define MOCK_PAGE_SIZE (PAGE_SIZE / 2)
15
16static unsigned long get_huge_page_size(void)
17{
18 char buf[80];
19 int ret;
20 int fd;
21
22 fd = open("/sys/kernel/mm/transparent_hugepage/hpage_pmd_size",
23 O_RDONLY);
24 if (fd < 0)
25 return 2 * 1024 * 1024;
26
27 ret = read(fd, buf, sizeof(buf));
28 close(fd);
29 if (ret <= 0 || ret == sizeof(buf))
30 return 2 * 1024 * 1024;
31 buf[ret] = 0;
32 return strtoul(buf, NULL, 10);
33}
34
35static __attribute__((constructor)) void setup_sizes(void)
36{
37 void *vrc;
38 int rc;
39
40 PAGE_SIZE = sysconf(_SC_PAGE_SIZE);
41 HUGEPAGE_SIZE = get_huge_page_size();
42
43 BUFFER_SIZE = PAGE_SIZE * 16;
44 rc = posix_memalign(&buffer, HUGEPAGE_SIZE, BUFFER_SIZE);
45 assert(!rc);
46 assert(buffer);
47 assert((uintptr_t)buffer % HUGEPAGE_SIZE == 0);
48 vrc = mmap(buffer, BUFFER_SIZE, PROT_READ | PROT_WRITE,
49 MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
50 assert(vrc == buffer);
51}
52
53FIXTURE(iommufd)
54{
55 int fd;
56};
57
58FIXTURE_SETUP(iommufd)
59{
60 self->fd = open("/dev/iommu", O_RDWR);
61 ASSERT_NE(-1, self->fd);
62}
63
64FIXTURE_TEARDOWN(iommufd)
65{
66 teardown_iommufd(self->fd, _metadata);
67}
68
69TEST_F(iommufd, simple_close)
70{
71}
72
73TEST_F(iommufd, cmd_fail)
74{
75 struct iommu_destroy cmd = { .size = sizeof(cmd), .id = 0 };
76
77 /* object id is invalid */
78 EXPECT_ERRNO(ENOENT, _test_ioctl_destroy(self->fd, 0));
79 /* Bad pointer */
80 EXPECT_ERRNO(EFAULT, ioctl(self->fd, IOMMU_DESTROY, NULL));
81 /* Unknown ioctl */
82 EXPECT_ERRNO(ENOTTY,
83 ioctl(self->fd, _IO(IOMMUFD_TYPE, IOMMUFD_CMD_BASE - 1),
84 &cmd));
85}
86
87TEST_F(iommufd, cmd_length)
88{
89#define TEST_LENGTH(_struct, _ioctl, _last) \
90 { \
91 size_t min_size = offsetofend(struct _struct, _last); \
92 struct { \
93 struct _struct cmd; \
94 uint8_t extra; \
95 } cmd = { .cmd = { .size = min_size - 1 }, \
96 .extra = UINT8_MAX }; \
97 int old_errno; \
98 int rc; \
99 \
100 EXPECT_ERRNO(EINVAL, ioctl(self->fd, _ioctl, &cmd)); \
101 cmd.cmd.size = sizeof(struct _struct) + 1; \
102 EXPECT_ERRNO(E2BIG, ioctl(self->fd, _ioctl, &cmd)); \
103 cmd.cmd.size = sizeof(struct _struct); \
104 rc = ioctl(self->fd, _ioctl, &cmd); \
105 old_errno = errno; \
106 cmd.cmd.size = sizeof(struct _struct) + 1; \
107 cmd.extra = 0; \
108 if (rc) { \
109 EXPECT_ERRNO(old_errno, \
110 ioctl(self->fd, _ioctl, &cmd)); \
111 } else { \
112 ASSERT_EQ(0, ioctl(self->fd, _ioctl, &cmd)); \
113 } \
114 }
115
116 TEST_LENGTH(iommu_destroy, IOMMU_DESTROY, id);
117 TEST_LENGTH(iommu_hw_info, IOMMU_GET_HW_INFO, __reserved);
118 TEST_LENGTH(iommu_hwpt_alloc, IOMMU_HWPT_ALLOC, __reserved);
119 TEST_LENGTH(iommu_hwpt_invalidate, IOMMU_HWPT_INVALIDATE, __reserved);
120 TEST_LENGTH(iommu_ioas_alloc, IOMMU_IOAS_ALLOC, out_ioas_id);
121 TEST_LENGTH(iommu_ioas_iova_ranges, IOMMU_IOAS_IOVA_RANGES,
122 out_iova_alignment);
123 TEST_LENGTH(iommu_ioas_allow_iovas, IOMMU_IOAS_ALLOW_IOVAS,
124 allowed_iovas);
125 TEST_LENGTH(iommu_ioas_map, IOMMU_IOAS_MAP, iova);
126 TEST_LENGTH(iommu_ioas_copy, IOMMU_IOAS_COPY, src_iova);
127 TEST_LENGTH(iommu_ioas_unmap, IOMMU_IOAS_UNMAP, length);
128 TEST_LENGTH(iommu_option, IOMMU_OPTION, val64);
129 TEST_LENGTH(iommu_vfio_ioas, IOMMU_VFIO_IOAS, __reserved);
130#undef TEST_LENGTH
131}
132
133TEST_F(iommufd, cmd_ex_fail)
134{
135 struct {
136 struct iommu_destroy cmd;
137 __u64 future;
138 } cmd = { .cmd = { .size = sizeof(cmd), .id = 0 } };
139
140 /* object id is invalid and command is longer */
141 EXPECT_ERRNO(ENOENT, ioctl(self->fd, IOMMU_DESTROY, &cmd));
142 /* future area is non-zero */
143 cmd.future = 1;
144 EXPECT_ERRNO(E2BIG, ioctl(self->fd, IOMMU_DESTROY, &cmd));
145 /* Original command "works" */
146 cmd.cmd.size = sizeof(cmd.cmd);
147 EXPECT_ERRNO(ENOENT, ioctl(self->fd, IOMMU_DESTROY, &cmd));
148 /* Short command fails */
149 cmd.cmd.size = sizeof(cmd.cmd) - 1;
150 EXPECT_ERRNO(EINVAL, ioctl(self->fd, IOMMU_DESTROY, &cmd));
151}
152
153TEST_F(iommufd, global_options)
154{
155 struct iommu_option cmd = {
156 .size = sizeof(cmd),
157 .option_id = IOMMU_OPTION_RLIMIT_MODE,
158 .op = IOMMU_OPTION_OP_GET,
159 .val64 = 1,
160 };
161
162 cmd.option_id = IOMMU_OPTION_RLIMIT_MODE;
163 ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
164 ASSERT_EQ(0, cmd.val64);
165
166 /* This requires root */
167 cmd.op = IOMMU_OPTION_OP_SET;
168 cmd.val64 = 1;
169 ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
170 cmd.val64 = 2;
171 EXPECT_ERRNO(EINVAL, ioctl(self->fd, IOMMU_OPTION, &cmd));
172
173 cmd.op = IOMMU_OPTION_OP_GET;
174 ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
175 ASSERT_EQ(1, cmd.val64);
176
177 cmd.op = IOMMU_OPTION_OP_SET;
178 cmd.val64 = 0;
179 ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
180
181 cmd.op = IOMMU_OPTION_OP_GET;
182 cmd.option_id = IOMMU_OPTION_HUGE_PAGES;
183 EXPECT_ERRNO(ENOENT, ioctl(self->fd, IOMMU_OPTION, &cmd));
184 cmd.op = IOMMU_OPTION_OP_SET;
185 EXPECT_ERRNO(ENOENT, ioctl(self->fd, IOMMU_OPTION, &cmd));
186}
187
188FIXTURE(iommufd_ioas)
189{
190 int fd;
191 uint32_t ioas_id;
192 uint32_t stdev_id;
193 uint32_t hwpt_id;
194 uint32_t device_id;
195 uint64_t base_iova;
196};
197
198FIXTURE_VARIANT(iommufd_ioas)
199{
200 unsigned int mock_domains;
201 unsigned int memory_limit;
202};
203
204FIXTURE_SETUP(iommufd_ioas)
205{
206 unsigned int i;
207
208
209 self->fd = open("/dev/iommu", O_RDWR);
210 ASSERT_NE(-1, self->fd);
211 test_ioctl_ioas_alloc(&self->ioas_id);
212
213 if (!variant->memory_limit) {
214 test_ioctl_set_default_memory_limit();
215 } else {
216 test_ioctl_set_temp_memory_limit(variant->memory_limit);
217 }
218
219 for (i = 0; i != variant->mock_domains; i++) {
220 test_cmd_mock_domain(self->ioas_id, &self->stdev_id,
221 &self->hwpt_id, &self->device_id);
222 self->base_iova = MOCK_APERTURE_START;
223 }
224}
225
226FIXTURE_TEARDOWN(iommufd_ioas)
227{
228 test_ioctl_set_default_memory_limit();
229 teardown_iommufd(self->fd, _metadata);
230}
231
232FIXTURE_VARIANT_ADD(iommufd_ioas, no_domain)
233{
234};
235
236FIXTURE_VARIANT_ADD(iommufd_ioas, mock_domain)
237{
238 .mock_domains = 1,
239};
240
241FIXTURE_VARIANT_ADD(iommufd_ioas, two_mock_domain)
242{
243 .mock_domains = 2,
244};
245
246FIXTURE_VARIANT_ADD(iommufd_ioas, mock_domain_limit)
247{
248 .mock_domains = 1,
249 .memory_limit = 16,
250};
251
252TEST_F(iommufd_ioas, ioas_auto_destroy)
253{
254}
255
256TEST_F(iommufd_ioas, ioas_destroy)
257{
258 if (self->stdev_id) {
259 /* IOAS cannot be freed while a device has a HWPT using it */
260 EXPECT_ERRNO(EBUSY,
261 _test_ioctl_destroy(self->fd, self->ioas_id));
262 } else {
263 /* Can allocate and manually free an IOAS table */
264 test_ioctl_destroy(self->ioas_id);
265 }
266}
267
268TEST_F(iommufd_ioas, alloc_hwpt_nested)
269{
270 const uint32_t min_data_len =
271 offsetofend(struct iommu_hwpt_selftest, iotlb);
272 struct iommu_hwpt_selftest data = {
273 .iotlb = IOMMU_TEST_IOTLB_DEFAULT,
274 };
275 struct iommu_hwpt_invalidate_selftest inv_reqs[2] = {};
276 uint32_t nested_hwpt_id[2] = {};
277 uint32_t num_inv;
278 uint32_t parent_hwpt_id = 0;
279 uint32_t parent_hwpt_id_not_work = 0;
280 uint32_t test_hwpt_id = 0;
281
282 if (self->device_id) {
283 /* Negative tests */
284 test_err_hwpt_alloc(ENOENT, self->ioas_id, self->device_id, 0,
285 &test_hwpt_id);
286 test_err_hwpt_alloc(EINVAL, self->device_id, self->device_id, 0,
287 &test_hwpt_id);
288
289 test_cmd_hwpt_alloc(self->device_id, self->ioas_id,
290 IOMMU_HWPT_ALLOC_NEST_PARENT,
291 &parent_hwpt_id);
292
293 test_cmd_hwpt_alloc(self->device_id, self->ioas_id, 0,
294 &parent_hwpt_id_not_work);
295
296 /* Negative nested tests */
297 test_err_hwpt_alloc_nested(EINVAL, self->device_id,
298 parent_hwpt_id, 0,
299 &nested_hwpt_id[0],
300 IOMMU_HWPT_DATA_NONE, &data,
301 sizeof(data));
302 test_err_hwpt_alloc_nested(EOPNOTSUPP, self->device_id,
303 parent_hwpt_id, 0,
304 &nested_hwpt_id[0],
305 IOMMU_HWPT_DATA_SELFTEST + 1, &data,
306 sizeof(data));
307 test_err_hwpt_alloc_nested(EINVAL, self->device_id,
308 parent_hwpt_id, 0,
309 &nested_hwpt_id[0],
310 IOMMU_HWPT_DATA_SELFTEST, &data,
311 min_data_len - 1);
312 test_err_hwpt_alloc_nested(EFAULT, self->device_id,
313 parent_hwpt_id, 0,
314 &nested_hwpt_id[0],
315 IOMMU_HWPT_DATA_SELFTEST, NULL,
316 sizeof(data));
317 test_err_hwpt_alloc_nested(
318 EOPNOTSUPP, self->device_id, parent_hwpt_id,
319 IOMMU_HWPT_ALLOC_NEST_PARENT, &nested_hwpt_id[0],
320 IOMMU_HWPT_DATA_SELFTEST, &data, sizeof(data));
321 test_err_hwpt_alloc_nested(EINVAL, self->device_id,
322 parent_hwpt_id_not_work, 0,
323 &nested_hwpt_id[0],
324 IOMMU_HWPT_DATA_SELFTEST, &data,
325 sizeof(data));
326
327 /* Allocate two nested hwpts sharing one common parent hwpt */
328 test_cmd_hwpt_alloc_nested(self->device_id, parent_hwpt_id, 0,
329 &nested_hwpt_id[0],
330 IOMMU_HWPT_DATA_SELFTEST, &data,
331 sizeof(data));
332 test_cmd_hwpt_alloc_nested(self->device_id, parent_hwpt_id, 0,
333 &nested_hwpt_id[1],
334 IOMMU_HWPT_DATA_SELFTEST, &data,
335 sizeof(data));
336 test_cmd_hwpt_check_iotlb_all(nested_hwpt_id[0],
337 IOMMU_TEST_IOTLB_DEFAULT);
338 test_cmd_hwpt_check_iotlb_all(nested_hwpt_id[1],
339 IOMMU_TEST_IOTLB_DEFAULT);
340
341 /* Negative test: a nested hwpt on top of a nested hwpt */
342 test_err_hwpt_alloc_nested(EINVAL, self->device_id,
343 nested_hwpt_id[0], 0, &test_hwpt_id,
344 IOMMU_HWPT_DATA_SELFTEST, &data,
345 sizeof(data));
346 /* Negative test: parent hwpt now cannot be freed */
347 EXPECT_ERRNO(EBUSY,
348 _test_ioctl_destroy(self->fd, parent_hwpt_id));
349
350 /* hwpt_invalidate only supports a user-managed hwpt (nested) */
351 num_inv = 1;
352 test_err_hwpt_invalidate(ENOENT, parent_hwpt_id, inv_reqs,
353 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
354 sizeof(*inv_reqs), &num_inv);
355 assert(!num_inv);
356
357 /* Check data_type by passing zero-length array */
358 num_inv = 0;
359 test_cmd_hwpt_invalidate(nested_hwpt_id[0], inv_reqs,
360 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
361 sizeof(*inv_reqs), &num_inv);
362 assert(!num_inv);
363
364 /* Negative test: Invalid data_type */
365 num_inv = 1;
366 test_err_hwpt_invalidate(EINVAL, nested_hwpt_id[0], inv_reqs,
367 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST_INVALID,
368 sizeof(*inv_reqs), &num_inv);
369 assert(!num_inv);
370
371 /* Negative test: structure size sanity */
372 num_inv = 1;
373 test_err_hwpt_invalidate(EINVAL, nested_hwpt_id[0], inv_reqs,
374 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
375 sizeof(*inv_reqs) + 1, &num_inv);
376 assert(!num_inv);
377
378 num_inv = 1;
379 test_err_hwpt_invalidate(EINVAL, nested_hwpt_id[0], inv_reqs,
380 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
381 1, &num_inv);
382 assert(!num_inv);
383
384 /* Negative test: invalid flag is passed */
385 num_inv = 1;
386 inv_reqs[0].flags = 0xffffffff;
387 test_err_hwpt_invalidate(EOPNOTSUPP, nested_hwpt_id[0], inv_reqs,
388 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
389 sizeof(*inv_reqs), &num_inv);
390 assert(!num_inv);
391
392 /* Negative test: invalid data_uptr when array is not empty */
393 num_inv = 1;
394 inv_reqs[0].flags = 0;
395 test_err_hwpt_invalidate(EINVAL, nested_hwpt_id[0], NULL,
396 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
397 sizeof(*inv_reqs), &num_inv);
398 assert(!num_inv);
399
400 /* Negative test: invalid entry_len when array is not empty */
401 num_inv = 1;
402 inv_reqs[0].flags = 0;
403 test_err_hwpt_invalidate(EINVAL, nested_hwpt_id[0], inv_reqs,
404 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
405 0, &num_inv);
406 assert(!num_inv);
407
408 /* Negative test: invalid iotlb_id */
409 num_inv = 1;
410 inv_reqs[0].flags = 0;
411 inv_reqs[0].iotlb_id = MOCK_NESTED_DOMAIN_IOTLB_ID_MAX + 1;
412 test_err_hwpt_invalidate(EINVAL, nested_hwpt_id[0], inv_reqs,
413 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
414 sizeof(*inv_reqs), &num_inv);
415 assert(!num_inv);
416
417 /*
418 * Invalidate the 1st iotlb entry but fail the 2nd request
419 * due to invalid flags configuration in the 2nd request.
420 */
421 num_inv = 2;
422 inv_reqs[0].flags = 0;
423 inv_reqs[0].iotlb_id = 0;
424 inv_reqs[1].flags = 0xffffffff;
425 inv_reqs[1].iotlb_id = 1;
426 test_err_hwpt_invalidate(EOPNOTSUPP, nested_hwpt_id[0], inv_reqs,
427 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
428 sizeof(*inv_reqs), &num_inv);
429 assert(num_inv == 1);
430 test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 0, 0);
431 test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 1,
432 IOMMU_TEST_IOTLB_DEFAULT);
433 test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 2,
434 IOMMU_TEST_IOTLB_DEFAULT);
435 test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 3,
436 IOMMU_TEST_IOTLB_DEFAULT);
437
438 /*
439 * Invalidate the 1st iotlb entry but fail the 2nd request
440 * due to invalid iotlb_id configuration in the 2nd request.
441 */
442 num_inv = 2;
443 inv_reqs[0].flags = 0;
444 inv_reqs[0].iotlb_id = 0;
445 inv_reqs[1].flags = 0;
446 inv_reqs[1].iotlb_id = MOCK_NESTED_DOMAIN_IOTLB_ID_MAX + 1;
447 test_err_hwpt_invalidate(EINVAL, nested_hwpt_id[0], inv_reqs,
448 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
449 sizeof(*inv_reqs), &num_inv);
450 assert(num_inv == 1);
451 test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 0, 0);
452 test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 1,
453 IOMMU_TEST_IOTLB_DEFAULT);
454 test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 2,
455 IOMMU_TEST_IOTLB_DEFAULT);
456 test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 3,
457 IOMMU_TEST_IOTLB_DEFAULT);
458
459 /* Invalidate the 2nd iotlb entry and verify */
460 num_inv = 1;
461 inv_reqs[0].flags = 0;
462 inv_reqs[0].iotlb_id = 1;
463 test_cmd_hwpt_invalidate(nested_hwpt_id[0], inv_reqs,
464 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
465 sizeof(*inv_reqs), &num_inv);
466 assert(num_inv == 1);
467 test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 0, 0);
468 test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 1, 0);
469 test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 2,
470 IOMMU_TEST_IOTLB_DEFAULT);
471 test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 3,
472 IOMMU_TEST_IOTLB_DEFAULT);
473
474 /* Invalidate the 3rd and 4th iotlb entries and verify */
475 num_inv = 2;
476 inv_reqs[0].flags = 0;
477 inv_reqs[0].iotlb_id = 2;
478 inv_reqs[1].flags = 0;
479 inv_reqs[1].iotlb_id = 3;
480 test_cmd_hwpt_invalidate(nested_hwpt_id[0], inv_reqs,
481 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
482 sizeof(*inv_reqs), &num_inv);
483 assert(num_inv == 2);
484 test_cmd_hwpt_check_iotlb_all(nested_hwpt_id[0], 0);
485
486 /* Invalidate all iotlb entries for nested_hwpt_id[1] and verify */
487 num_inv = 1;
488 inv_reqs[0].flags = IOMMU_TEST_INVALIDATE_FLAG_ALL;
489 test_cmd_hwpt_invalidate(nested_hwpt_id[1], inv_reqs,
490 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
491 sizeof(*inv_reqs), &num_inv);
492 assert(num_inv == 1);
493 test_cmd_hwpt_check_iotlb_all(nested_hwpt_id[1], 0);
494
495 /* Attach device to nested_hwpt_id[0] that then will be busy */
496 test_cmd_mock_domain_replace(self->stdev_id, nested_hwpt_id[0]);
497 EXPECT_ERRNO(EBUSY,
498 _test_ioctl_destroy(self->fd, nested_hwpt_id[0]));
499
500 /* Switch from nested_hwpt_id[0] to nested_hwpt_id[1] */
501 test_cmd_mock_domain_replace(self->stdev_id, nested_hwpt_id[1]);
502 EXPECT_ERRNO(EBUSY,
503 _test_ioctl_destroy(self->fd, nested_hwpt_id[1]));
504 test_ioctl_destroy(nested_hwpt_id[0]);
505
506 /* Detach from nested_hwpt_id[1] and destroy it */
507 test_cmd_mock_domain_replace(self->stdev_id, parent_hwpt_id);
508 test_ioctl_destroy(nested_hwpt_id[1]);
509
510 /* Detach from the parent hw_pagetable and destroy it */
511 test_cmd_mock_domain_replace(self->stdev_id, self->ioas_id);
512 test_ioctl_destroy(parent_hwpt_id);
513 test_ioctl_destroy(parent_hwpt_id_not_work);
514 } else {
515 test_err_hwpt_alloc(ENOENT, self->device_id, self->ioas_id, 0,
516 &parent_hwpt_id);
517 test_err_hwpt_alloc_nested(ENOENT, self->device_id,
518 parent_hwpt_id, 0,
519 &nested_hwpt_id[0],
520 IOMMU_HWPT_DATA_SELFTEST, &data,
521 sizeof(data));
522 test_err_hwpt_alloc_nested(ENOENT, self->device_id,
523 parent_hwpt_id, 0,
524 &nested_hwpt_id[1],
525 IOMMU_HWPT_DATA_SELFTEST, &data,
526 sizeof(data));
527 test_err_mock_domain_replace(ENOENT, self->stdev_id,
528 nested_hwpt_id[0]);
529 test_err_mock_domain_replace(ENOENT, self->stdev_id,
530 nested_hwpt_id[1]);
531 }
532}
533
534TEST_F(iommufd_ioas, hwpt_attach)
535{
536 /* Create a device attached directly to a hwpt */
537 if (self->stdev_id) {
538 test_cmd_mock_domain(self->hwpt_id, NULL, NULL, NULL);
539 } else {
540 test_err_mock_domain(ENOENT, self->hwpt_id, NULL, NULL);
541 }
542}
543
544TEST_F(iommufd_ioas, ioas_area_destroy)
545{
546 /* Adding an area does not change ability to destroy */
547 test_ioctl_ioas_map_fixed(buffer, PAGE_SIZE, self->base_iova);
548 if (self->stdev_id)
549 EXPECT_ERRNO(EBUSY,
550 _test_ioctl_destroy(self->fd, self->ioas_id));
551 else
552 test_ioctl_destroy(self->ioas_id);
553}
554
555TEST_F(iommufd_ioas, ioas_area_auto_destroy)
556{
557 int i;
558
559 /* Can allocate and automatically free an IOAS table with many areas */
560 for (i = 0; i != 10; i++) {
561 test_ioctl_ioas_map_fixed(buffer, PAGE_SIZE,
562 self->base_iova + i * PAGE_SIZE);
563 }
564}
565
566TEST_F(iommufd_ioas, get_hw_info)
567{
568 struct iommu_test_hw_info buffer_exact;
569 struct iommu_test_hw_info_buffer_larger {
570 struct iommu_test_hw_info info;
571 uint64_t trailing_bytes;
572 } buffer_larger;
573 struct iommu_test_hw_info_buffer_smaller {
574 __u32 flags;
575 } buffer_smaller;
576
577 if (self->device_id) {
578 /* Provide a zero-size user_buffer */
579 test_cmd_get_hw_info(self->device_id, NULL, 0);
580 /* Provide a user_buffer with exact size */
581 test_cmd_get_hw_info(self->device_id, &buffer_exact, sizeof(buffer_exact));
582 /*
583 * Provide a user_buffer with size larger than the exact size to check if
584 * kernel zero the trailing bytes.
585 */
586 test_cmd_get_hw_info(self->device_id, &buffer_larger, sizeof(buffer_larger));
587 /*
588 * Provide a user_buffer with size smaller than the exact size to check if
589 * the fields within the size range still gets updated.
590 */
591 test_cmd_get_hw_info(self->device_id, &buffer_smaller, sizeof(buffer_smaller));
592 } else {
593 test_err_get_hw_info(ENOENT, self->device_id,
594 &buffer_exact, sizeof(buffer_exact));
595 test_err_get_hw_info(ENOENT, self->device_id,
596 &buffer_larger, sizeof(buffer_larger));
597 }
598}
599
600TEST_F(iommufd_ioas, area)
601{
602 int i;
603
604 /* Unmap fails if nothing is mapped */
605 for (i = 0; i != 10; i++)
606 test_err_ioctl_ioas_unmap(ENOENT, i * PAGE_SIZE, PAGE_SIZE);
607
608 /* Unmap works */
609 for (i = 0; i != 10; i++)
610 test_ioctl_ioas_map_fixed(buffer, PAGE_SIZE,
611 self->base_iova + i * PAGE_SIZE);
612 for (i = 0; i != 10; i++)
613 test_ioctl_ioas_unmap(self->base_iova + i * PAGE_SIZE,
614 PAGE_SIZE);
615
616 /* Split fails */
617 test_ioctl_ioas_map_fixed(buffer, PAGE_SIZE * 2,
618 self->base_iova + 16 * PAGE_SIZE);
619 test_err_ioctl_ioas_unmap(ENOENT, self->base_iova + 16 * PAGE_SIZE,
620 PAGE_SIZE);
621 test_err_ioctl_ioas_unmap(ENOENT, self->base_iova + 17 * PAGE_SIZE,
622 PAGE_SIZE);
623
624 /* Over map fails */
625 test_err_ioctl_ioas_map_fixed(EEXIST, buffer, PAGE_SIZE * 2,
626 self->base_iova + 16 * PAGE_SIZE);
627 test_err_ioctl_ioas_map_fixed(EEXIST, buffer, PAGE_SIZE,
628 self->base_iova + 16 * PAGE_SIZE);
629 test_err_ioctl_ioas_map_fixed(EEXIST, buffer, PAGE_SIZE,
630 self->base_iova + 17 * PAGE_SIZE);
631 test_err_ioctl_ioas_map_fixed(EEXIST, buffer, PAGE_SIZE * 2,
632 self->base_iova + 15 * PAGE_SIZE);
633 test_err_ioctl_ioas_map_fixed(EEXIST, buffer, PAGE_SIZE * 3,
634 self->base_iova + 15 * PAGE_SIZE);
635
636 /* unmap all works */
637 test_ioctl_ioas_unmap(0, UINT64_MAX);
638
639 /* Unmap all succeeds on an empty IOAS */
640 test_ioctl_ioas_unmap(0, UINT64_MAX);
641}
642
643TEST_F(iommufd_ioas, unmap_fully_contained_areas)
644{
645 uint64_t unmap_len;
646 int i;
647
648 /* Give no_domain some space to rewind base_iova */
649 self->base_iova += 4 * PAGE_SIZE;
650
651 for (i = 0; i != 4; i++)
652 test_ioctl_ioas_map_fixed(buffer, 8 * PAGE_SIZE,
653 self->base_iova + i * 16 * PAGE_SIZE);
654
655 /* Unmap not fully contained area doesn't work */
656 test_err_ioctl_ioas_unmap(ENOENT, self->base_iova - 4 * PAGE_SIZE,
657 8 * PAGE_SIZE);
658 test_err_ioctl_ioas_unmap(ENOENT,
659 self->base_iova + 3 * 16 * PAGE_SIZE +
660 8 * PAGE_SIZE - 4 * PAGE_SIZE,
661 8 * PAGE_SIZE);
662
663 /* Unmap fully contained areas works */
664 ASSERT_EQ(0, _test_ioctl_ioas_unmap(self->fd, self->ioas_id,
665 self->base_iova - 4 * PAGE_SIZE,
666 3 * 16 * PAGE_SIZE + 8 * PAGE_SIZE +
667 4 * PAGE_SIZE,
668 &unmap_len));
669 ASSERT_EQ(32 * PAGE_SIZE, unmap_len);
670}
671
672TEST_F(iommufd_ioas, area_auto_iova)
673{
674 struct iommu_test_cmd test_cmd = {
675 .size = sizeof(test_cmd),
676 .op = IOMMU_TEST_OP_ADD_RESERVED,
677 .id = self->ioas_id,
678 .add_reserved = { .start = PAGE_SIZE * 4,
679 .length = PAGE_SIZE * 100 },
680 };
681 struct iommu_iova_range ranges[1] = {};
682 struct iommu_ioas_allow_iovas allow_cmd = {
683 .size = sizeof(allow_cmd),
684 .ioas_id = self->ioas_id,
685 .num_iovas = 1,
686 .allowed_iovas = (uintptr_t)ranges,
687 };
688 __u64 iovas[10];
689 int i;
690
691 /* Simple 4k pages */
692 for (i = 0; i != 10; i++)
693 test_ioctl_ioas_map(buffer, PAGE_SIZE, &iovas[i]);
694 for (i = 0; i != 10; i++)
695 test_ioctl_ioas_unmap(iovas[i], PAGE_SIZE);
696
697 /* Kernel automatically aligns IOVAs properly */
698 for (i = 0; i != 10; i++) {
699 size_t length = PAGE_SIZE * (i + 1);
700
701 if (self->stdev_id) {
702 test_ioctl_ioas_map(buffer, length, &iovas[i]);
703 } else {
704 test_ioctl_ioas_map((void *)(1UL << 31), length,
705 &iovas[i]);
706 }
707 EXPECT_EQ(0, iovas[i] % (1UL << (ffs(length) - 1)));
708 }
709 for (i = 0; i != 10; i++)
710 test_ioctl_ioas_unmap(iovas[i], PAGE_SIZE * (i + 1));
711
712 /* Avoids a reserved region */
713 ASSERT_EQ(0,
714 ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ADD_RESERVED),
715 &test_cmd));
716 for (i = 0; i != 10; i++) {
717 size_t length = PAGE_SIZE * (i + 1);
718
719 test_ioctl_ioas_map(buffer, length, &iovas[i]);
720 EXPECT_EQ(0, iovas[i] % (1UL << (ffs(length) - 1)));
721 EXPECT_EQ(false,
722 iovas[i] > test_cmd.add_reserved.start &&
723 iovas[i] <
724 test_cmd.add_reserved.start +
725 test_cmd.add_reserved.length);
726 }
727 for (i = 0; i != 10; i++)
728 test_ioctl_ioas_unmap(iovas[i], PAGE_SIZE * (i + 1));
729
730 /* Allowed region intersects with a reserved region */
731 ranges[0].start = PAGE_SIZE;
732 ranges[0].last = PAGE_SIZE * 600;
733 EXPECT_ERRNO(EADDRINUSE,
734 ioctl(self->fd, IOMMU_IOAS_ALLOW_IOVAS, &allow_cmd));
735
736 /* Allocate from an allowed region */
737 if (self->stdev_id) {
738 ranges[0].start = MOCK_APERTURE_START + PAGE_SIZE;
739 ranges[0].last = MOCK_APERTURE_START + PAGE_SIZE * 600 - 1;
740 } else {
741 ranges[0].start = PAGE_SIZE * 200;
742 ranges[0].last = PAGE_SIZE * 600 - 1;
743 }
744 ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_ALLOW_IOVAS, &allow_cmd));
745 for (i = 0; i != 10; i++) {
746 size_t length = PAGE_SIZE * (i + 1);
747
748 test_ioctl_ioas_map(buffer, length, &iovas[i]);
749 EXPECT_EQ(0, iovas[i] % (1UL << (ffs(length) - 1)));
750 EXPECT_EQ(true, iovas[i] >= ranges[0].start);
751 EXPECT_EQ(true, iovas[i] <= ranges[0].last);
752 EXPECT_EQ(true, iovas[i] + length > ranges[0].start);
753 EXPECT_EQ(true, iovas[i] + length <= ranges[0].last + 1);
754 }
755 for (i = 0; i != 10; i++)
756 test_ioctl_ioas_unmap(iovas[i], PAGE_SIZE * (i + 1));
757}
758
759TEST_F(iommufd_ioas, area_allowed)
760{
761 struct iommu_test_cmd test_cmd = {
762 .size = sizeof(test_cmd),
763 .op = IOMMU_TEST_OP_ADD_RESERVED,
764 .id = self->ioas_id,
765 .add_reserved = { .start = PAGE_SIZE * 4,
766 .length = PAGE_SIZE * 100 },
767 };
768 struct iommu_iova_range ranges[1] = {};
769 struct iommu_ioas_allow_iovas allow_cmd = {
770 .size = sizeof(allow_cmd),
771 .ioas_id = self->ioas_id,
772 .num_iovas = 1,
773 .allowed_iovas = (uintptr_t)ranges,
774 };
775
776 /* Reserved intersects an allowed */
777 allow_cmd.num_iovas = 1;
778 ranges[0].start = self->base_iova;
779 ranges[0].last = ranges[0].start + PAGE_SIZE * 600;
780 ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_ALLOW_IOVAS, &allow_cmd));
781 test_cmd.add_reserved.start = ranges[0].start + PAGE_SIZE;
782 test_cmd.add_reserved.length = PAGE_SIZE;
783 EXPECT_ERRNO(EADDRINUSE,
784 ioctl(self->fd,
785 _IOMMU_TEST_CMD(IOMMU_TEST_OP_ADD_RESERVED),
786 &test_cmd));
787 allow_cmd.num_iovas = 0;
788 ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_ALLOW_IOVAS, &allow_cmd));
789
790 /* Allowed intersects a reserved */
791 ASSERT_EQ(0,
792 ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ADD_RESERVED),
793 &test_cmd));
794 allow_cmd.num_iovas = 1;
795 ranges[0].start = self->base_iova;
796 ranges[0].last = ranges[0].start + PAGE_SIZE * 600;
797 EXPECT_ERRNO(EADDRINUSE,
798 ioctl(self->fd, IOMMU_IOAS_ALLOW_IOVAS, &allow_cmd));
799}
800
801TEST_F(iommufd_ioas, copy_area)
802{
803 struct iommu_ioas_copy copy_cmd = {
804 .size = sizeof(copy_cmd),
805 .flags = IOMMU_IOAS_MAP_FIXED_IOVA,
806 .dst_ioas_id = self->ioas_id,
807 .src_ioas_id = self->ioas_id,
808 .length = PAGE_SIZE,
809 };
810
811 test_ioctl_ioas_map_fixed(buffer, PAGE_SIZE, self->base_iova);
812
813 /* Copy inside a single IOAS */
814 copy_cmd.src_iova = self->base_iova;
815 copy_cmd.dst_iova = self->base_iova + PAGE_SIZE;
816 ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_COPY, ©_cmd));
817
818 /* Copy between IOAS's */
819 copy_cmd.src_iova = self->base_iova;
820 copy_cmd.dst_iova = 0;
821 test_ioctl_ioas_alloc(©_cmd.dst_ioas_id);
822 ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_COPY, ©_cmd));
823}
824
825TEST_F(iommufd_ioas, iova_ranges)
826{
827 struct iommu_test_cmd test_cmd = {
828 .size = sizeof(test_cmd),
829 .op = IOMMU_TEST_OP_ADD_RESERVED,
830 .id = self->ioas_id,
831 .add_reserved = { .start = PAGE_SIZE, .length = PAGE_SIZE },
832 };
833 struct iommu_iova_range *ranges = buffer;
834 struct iommu_ioas_iova_ranges ranges_cmd = {
835 .size = sizeof(ranges_cmd),
836 .ioas_id = self->ioas_id,
837 .num_iovas = BUFFER_SIZE / sizeof(*ranges),
838 .allowed_iovas = (uintptr_t)ranges,
839 };
840
841 /* Range can be read */
842 ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_IOVA_RANGES, &ranges_cmd));
843 EXPECT_EQ(1, ranges_cmd.num_iovas);
844 if (!self->stdev_id) {
845 EXPECT_EQ(0, ranges[0].start);
846 EXPECT_EQ(SIZE_MAX, ranges[0].last);
847 EXPECT_EQ(1, ranges_cmd.out_iova_alignment);
848 } else {
849 EXPECT_EQ(MOCK_APERTURE_START, ranges[0].start);
850 EXPECT_EQ(MOCK_APERTURE_LAST, ranges[0].last);
851 EXPECT_EQ(MOCK_PAGE_SIZE, ranges_cmd.out_iova_alignment);
852 }
853
854 /* Buffer too small */
855 memset(ranges, 0, BUFFER_SIZE);
856 ranges_cmd.num_iovas = 0;
857 EXPECT_ERRNO(EMSGSIZE,
858 ioctl(self->fd, IOMMU_IOAS_IOVA_RANGES, &ranges_cmd));
859 EXPECT_EQ(1, ranges_cmd.num_iovas);
860 EXPECT_EQ(0, ranges[0].start);
861 EXPECT_EQ(0, ranges[0].last);
862
863 /* 2 ranges */
864 ASSERT_EQ(0,
865 ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ADD_RESERVED),
866 &test_cmd));
867 ranges_cmd.num_iovas = BUFFER_SIZE / sizeof(*ranges);
868 ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_IOVA_RANGES, &ranges_cmd));
869 if (!self->stdev_id) {
870 EXPECT_EQ(2, ranges_cmd.num_iovas);
871 EXPECT_EQ(0, ranges[0].start);
872 EXPECT_EQ(PAGE_SIZE - 1, ranges[0].last);
873 EXPECT_EQ(PAGE_SIZE * 2, ranges[1].start);
874 EXPECT_EQ(SIZE_MAX, ranges[1].last);
875 } else {
876 EXPECT_EQ(1, ranges_cmd.num_iovas);
877 EXPECT_EQ(MOCK_APERTURE_START, ranges[0].start);
878 EXPECT_EQ(MOCK_APERTURE_LAST, ranges[0].last);
879 }
880
881 /* Buffer too small */
882 memset(ranges, 0, BUFFER_SIZE);
883 ranges_cmd.num_iovas = 1;
884 if (!self->stdev_id) {
885 EXPECT_ERRNO(EMSGSIZE, ioctl(self->fd, IOMMU_IOAS_IOVA_RANGES,
886 &ranges_cmd));
887 EXPECT_EQ(2, ranges_cmd.num_iovas);
888 EXPECT_EQ(0, ranges[0].start);
889 EXPECT_EQ(PAGE_SIZE - 1, ranges[0].last);
890 } else {
891 ASSERT_EQ(0,
892 ioctl(self->fd, IOMMU_IOAS_IOVA_RANGES, &ranges_cmd));
893 EXPECT_EQ(1, ranges_cmd.num_iovas);
894 EXPECT_EQ(MOCK_APERTURE_START, ranges[0].start);
895 EXPECT_EQ(MOCK_APERTURE_LAST, ranges[0].last);
896 }
897 EXPECT_EQ(0, ranges[1].start);
898 EXPECT_EQ(0, ranges[1].last);
899}
900
901TEST_F(iommufd_ioas, access_domain_destory)
902{
903 struct iommu_test_cmd access_cmd = {
904 .size = sizeof(access_cmd),
905 .op = IOMMU_TEST_OP_ACCESS_PAGES,
906 .access_pages = { .iova = self->base_iova + PAGE_SIZE,
907 .length = PAGE_SIZE},
908 };
909 size_t buf_size = 2 * HUGEPAGE_SIZE;
910 uint8_t *buf;
911
912 buf = mmap(0, buf_size, PROT_READ | PROT_WRITE,
913 MAP_SHARED | MAP_ANONYMOUS | MAP_HUGETLB | MAP_POPULATE, -1,
914 0);
915 ASSERT_NE(MAP_FAILED, buf);
916 test_ioctl_ioas_map_fixed(buf, buf_size, self->base_iova);
917
918 test_cmd_create_access(self->ioas_id, &access_cmd.id,
919 MOCK_FLAGS_ACCESS_CREATE_NEEDS_PIN_PAGES);
920 access_cmd.access_pages.uptr = (uintptr_t)buf + PAGE_SIZE;
921 ASSERT_EQ(0,
922 ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
923 &access_cmd));
924
925 /* Causes a complicated unpin across a huge page boundary */
926 if (self->stdev_id)
927 test_ioctl_destroy(self->stdev_id);
928
929 test_cmd_destroy_access_pages(
930 access_cmd.id, access_cmd.access_pages.out_access_pages_id);
931 test_cmd_destroy_access(access_cmd.id);
932 ASSERT_EQ(0, munmap(buf, buf_size));
933}
934
935TEST_F(iommufd_ioas, access_pin)
936{
937 struct iommu_test_cmd access_cmd = {
938 .size = sizeof(access_cmd),
939 .op = IOMMU_TEST_OP_ACCESS_PAGES,
940 .access_pages = { .iova = MOCK_APERTURE_START,
941 .length = BUFFER_SIZE,
942 .uptr = (uintptr_t)buffer },
943 };
944 struct iommu_test_cmd check_map_cmd = {
945 .size = sizeof(check_map_cmd),
946 .op = IOMMU_TEST_OP_MD_CHECK_MAP,
947 .check_map = { .iova = MOCK_APERTURE_START,
948 .length = BUFFER_SIZE,
949 .uptr = (uintptr_t)buffer },
950 };
951 uint32_t access_pages_id;
952 unsigned int npages;
953
954 test_cmd_create_access(self->ioas_id, &access_cmd.id,
955 MOCK_FLAGS_ACCESS_CREATE_NEEDS_PIN_PAGES);
956
957 for (npages = 1; npages < BUFFER_SIZE / PAGE_SIZE; npages++) {
958 uint32_t mock_stdev_id;
959 uint32_t mock_hwpt_id;
960
961 access_cmd.access_pages.length = npages * PAGE_SIZE;
962
963 /* Single map/unmap */
964 test_ioctl_ioas_map_fixed(buffer, BUFFER_SIZE,
965 MOCK_APERTURE_START);
966 ASSERT_EQ(0, ioctl(self->fd,
967 _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
968 &access_cmd));
969 test_cmd_destroy_access_pages(
970 access_cmd.id,
971 access_cmd.access_pages.out_access_pages_id);
972
973 /* Double user */
974 ASSERT_EQ(0, ioctl(self->fd,
975 _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
976 &access_cmd));
977 access_pages_id = access_cmd.access_pages.out_access_pages_id;
978 ASSERT_EQ(0, ioctl(self->fd,
979 _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
980 &access_cmd));
981 test_cmd_destroy_access_pages(
982 access_cmd.id,
983 access_cmd.access_pages.out_access_pages_id);
984 test_cmd_destroy_access_pages(access_cmd.id, access_pages_id);
985
986 /* Add/remove a domain with a user */
987 ASSERT_EQ(0, ioctl(self->fd,
988 _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
989 &access_cmd));
990 test_cmd_mock_domain(self->ioas_id, &mock_stdev_id,
991 &mock_hwpt_id, NULL);
992 check_map_cmd.id = mock_hwpt_id;
993 ASSERT_EQ(0, ioctl(self->fd,
994 _IOMMU_TEST_CMD(IOMMU_TEST_OP_MD_CHECK_MAP),
995 &check_map_cmd));
996
997 test_ioctl_destroy(mock_stdev_id);
998 test_cmd_destroy_access_pages(
999 access_cmd.id,
1000 access_cmd.access_pages.out_access_pages_id);
1001
1002 test_ioctl_ioas_unmap(MOCK_APERTURE_START, BUFFER_SIZE);
1003 }
1004 test_cmd_destroy_access(access_cmd.id);
1005}
1006
1007TEST_F(iommufd_ioas, access_pin_unmap)
1008{
1009 struct iommu_test_cmd access_pages_cmd = {
1010 .size = sizeof(access_pages_cmd),
1011 .op = IOMMU_TEST_OP_ACCESS_PAGES,
1012 .access_pages = { .iova = MOCK_APERTURE_START,
1013 .length = BUFFER_SIZE,
1014 .uptr = (uintptr_t)buffer },
1015 };
1016
1017 test_cmd_create_access(self->ioas_id, &access_pages_cmd.id,
1018 MOCK_FLAGS_ACCESS_CREATE_NEEDS_PIN_PAGES);
1019 test_ioctl_ioas_map_fixed(buffer, BUFFER_SIZE, MOCK_APERTURE_START);
1020 ASSERT_EQ(0,
1021 ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
1022 &access_pages_cmd));
1023
1024 /* Trigger the unmap op */
1025 test_ioctl_ioas_unmap(MOCK_APERTURE_START, BUFFER_SIZE);
1026
1027 /* kernel removed the item for us */
1028 test_err_destroy_access_pages(
1029 ENOENT, access_pages_cmd.id,
1030 access_pages_cmd.access_pages.out_access_pages_id);
1031}
1032
1033static void check_access_rw(struct __test_metadata *_metadata, int fd,
1034 unsigned int access_id, uint64_t iova,
1035 unsigned int def_flags)
1036{
1037 uint16_t tmp[32];
1038 struct iommu_test_cmd access_cmd = {
1039 .size = sizeof(access_cmd),
1040 .op = IOMMU_TEST_OP_ACCESS_RW,
1041 .id = access_id,
1042 .access_rw = { .uptr = (uintptr_t)tmp },
1043 };
1044 uint16_t *buffer16 = buffer;
1045 unsigned int i;
1046 void *tmp2;
1047
1048 for (i = 0; i != BUFFER_SIZE / sizeof(*buffer16); i++)
1049 buffer16[i] = rand();
1050
1051 for (access_cmd.access_rw.iova = iova + PAGE_SIZE - 50;
1052 access_cmd.access_rw.iova < iova + PAGE_SIZE + 50;
1053 access_cmd.access_rw.iova++) {
1054 for (access_cmd.access_rw.length = 1;
1055 access_cmd.access_rw.length < sizeof(tmp);
1056 access_cmd.access_rw.length++) {
1057 access_cmd.access_rw.flags = def_flags;
1058 ASSERT_EQ(0, ioctl(fd,
1059 _IOMMU_TEST_CMD(
1060 IOMMU_TEST_OP_ACCESS_RW),
1061 &access_cmd));
1062 ASSERT_EQ(0,
1063 memcmp(buffer + (access_cmd.access_rw.iova -
1064 iova),
1065 tmp, access_cmd.access_rw.length));
1066
1067 for (i = 0; i != ARRAY_SIZE(tmp); i++)
1068 tmp[i] = rand();
1069 access_cmd.access_rw.flags = def_flags |
1070 MOCK_ACCESS_RW_WRITE;
1071 ASSERT_EQ(0, ioctl(fd,
1072 _IOMMU_TEST_CMD(
1073 IOMMU_TEST_OP_ACCESS_RW),
1074 &access_cmd));
1075 ASSERT_EQ(0,
1076 memcmp(buffer + (access_cmd.access_rw.iova -
1077 iova),
1078 tmp, access_cmd.access_rw.length));
1079 }
1080 }
1081
1082 /* Multi-page test */
1083 tmp2 = malloc(BUFFER_SIZE);
1084 ASSERT_NE(NULL, tmp2);
1085 access_cmd.access_rw.iova = iova;
1086 access_cmd.access_rw.length = BUFFER_SIZE;
1087 access_cmd.access_rw.flags = def_flags;
1088 access_cmd.access_rw.uptr = (uintptr_t)tmp2;
1089 ASSERT_EQ(0, ioctl(fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_RW),
1090 &access_cmd));
1091 ASSERT_EQ(0, memcmp(buffer, tmp2, access_cmd.access_rw.length));
1092 free(tmp2);
1093}
1094
1095TEST_F(iommufd_ioas, access_rw)
1096{
1097 __u32 access_id;
1098 __u64 iova;
1099
1100 test_cmd_create_access(self->ioas_id, &access_id, 0);
1101 test_ioctl_ioas_map(buffer, BUFFER_SIZE, &iova);
1102 check_access_rw(_metadata, self->fd, access_id, iova, 0);
1103 check_access_rw(_metadata, self->fd, access_id, iova,
1104 MOCK_ACCESS_RW_SLOW_PATH);
1105 test_ioctl_ioas_unmap(iova, BUFFER_SIZE);
1106 test_cmd_destroy_access(access_id);
1107}
1108
1109TEST_F(iommufd_ioas, access_rw_unaligned)
1110{
1111 __u32 access_id;
1112 __u64 iova;
1113
1114 test_cmd_create_access(self->ioas_id, &access_id, 0);
1115
1116 /* Unaligned pages */
1117 iova = self->base_iova + MOCK_PAGE_SIZE;
1118 test_ioctl_ioas_map_fixed(buffer, BUFFER_SIZE, iova);
1119 check_access_rw(_metadata, self->fd, access_id, iova, 0);
1120 test_ioctl_ioas_unmap(iova, BUFFER_SIZE);
1121 test_cmd_destroy_access(access_id);
1122}
1123
1124TEST_F(iommufd_ioas, fork_gone)
1125{
1126 __u32 access_id;
1127 pid_t child;
1128
1129 test_cmd_create_access(self->ioas_id, &access_id, 0);
1130
1131 /* Create a mapping with a different mm */
1132 child = fork();
1133 if (!child) {
1134 test_ioctl_ioas_map_fixed(buffer, BUFFER_SIZE,
1135 MOCK_APERTURE_START);
1136 exit(0);
1137 }
1138 ASSERT_NE(-1, child);
1139 ASSERT_EQ(child, waitpid(child, NULL, 0));
1140
1141 if (self->stdev_id) {
1142 /*
1143 * If a domain already existed then everything was pinned within
1144 * the fork, so this copies from one domain to another.
1145 */
1146 test_cmd_mock_domain(self->ioas_id, NULL, NULL, NULL);
1147 check_access_rw(_metadata, self->fd, access_id,
1148 MOCK_APERTURE_START, 0);
1149
1150 } else {
1151 /*
1152 * Otherwise we need to actually pin pages which can't happen
1153 * since the fork is gone.
1154 */
1155 test_err_mock_domain(EFAULT, self->ioas_id, NULL, NULL);
1156 }
1157
1158 test_cmd_destroy_access(access_id);
1159}
1160
1161TEST_F(iommufd_ioas, fork_present)
1162{
1163 __u32 access_id;
1164 int pipefds[2];
1165 uint64_t tmp;
1166 pid_t child;
1167 int efd;
1168
1169 test_cmd_create_access(self->ioas_id, &access_id, 0);
1170
1171 ASSERT_EQ(0, pipe2(pipefds, O_CLOEXEC));
1172 efd = eventfd(0, EFD_CLOEXEC);
1173 ASSERT_NE(-1, efd);
1174
1175 /* Create a mapping with a different mm */
1176 child = fork();
1177 if (!child) {
1178 __u64 iova;
1179 uint64_t one = 1;
1180
1181 close(pipefds[1]);
1182 test_ioctl_ioas_map_fixed(buffer, BUFFER_SIZE,
1183 MOCK_APERTURE_START);
1184 if (write(efd, &one, sizeof(one)) != sizeof(one))
1185 exit(100);
1186 if (read(pipefds[0], &iova, 1) != 1)
1187 exit(100);
1188 exit(0);
1189 }
1190 close(pipefds[0]);
1191 ASSERT_NE(-1, child);
1192 ASSERT_EQ(8, read(efd, &tmp, sizeof(tmp)));
1193
1194 /* Read pages from the remote process */
1195 test_cmd_mock_domain(self->ioas_id, NULL, NULL, NULL);
1196 check_access_rw(_metadata, self->fd, access_id, MOCK_APERTURE_START, 0);
1197
1198 ASSERT_EQ(0, close(pipefds[1]));
1199 ASSERT_EQ(child, waitpid(child, NULL, 0));
1200
1201 test_cmd_destroy_access(access_id);
1202}
1203
1204TEST_F(iommufd_ioas, ioas_option_huge_pages)
1205{
1206 struct iommu_option cmd = {
1207 .size = sizeof(cmd),
1208 .option_id = IOMMU_OPTION_HUGE_PAGES,
1209 .op = IOMMU_OPTION_OP_GET,
1210 .val64 = 3,
1211 .object_id = self->ioas_id,
1212 };
1213
1214 ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
1215 ASSERT_EQ(1, cmd.val64);
1216
1217 cmd.op = IOMMU_OPTION_OP_SET;
1218 cmd.val64 = 0;
1219 ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
1220
1221 cmd.op = IOMMU_OPTION_OP_GET;
1222 cmd.val64 = 3;
1223 ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
1224 ASSERT_EQ(0, cmd.val64);
1225
1226 cmd.op = IOMMU_OPTION_OP_SET;
1227 cmd.val64 = 2;
1228 EXPECT_ERRNO(EINVAL, ioctl(self->fd, IOMMU_OPTION, &cmd));
1229
1230 cmd.op = IOMMU_OPTION_OP_SET;
1231 cmd.val64 = 1;
1232 ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
1233}
1234
1235TEST_F(iommufd_ioas, ioas_iova_alloc)
1236{
1237 unsigned int length;
1238 __u64 iova;
1239
1240 for (length = 1; length != PAGE_SIZE * 2; length++) {
1241 if (variant->mock_domains && (length % MOCK_PAGE_SIZE)) {
1242 test_err_ioctl_ioas_map(EINVAL, buffer, length, &iova);
1243 } else {
1244 test_ioctl_ioas_map(buffer, length, &iova);
1245 test_ioctl_ioas_unmap(iova, length);
1246 }
1247 }
1248}
1249
1250TEST_F(iommufd_ioas, ioas_align_change)
1251{
1252 struct iommu_option cmd = {
1253 .size = sizeof(cmd),
1254 .option_id = IOMMU_OPTION_HUGE_PAGES,
1255 .op = IOMMU_OPTION_OP_SET,
1256 .object_id = self->ioas_id,
1257 /* 0 means everything must be aligned to PAGE_SIZE */
1258 .val64 = 0,
1259 };
1260
1261 /*
1262 * We cannot upgrade the alignment using OPTION_HUGE_PAGES when a domain
1263 * and map are present.
1264 */
1265 if (variant->mock_domains)
1266 return;
1267
1268 /*
1269 * We can upgrade to PAGE_SIZE alignment when things are aligned right
1270 */
1271 test_ioctl_ioas_map_fixed(buffer, PAGE_SIZE, MOCK_APERTURE_START);
1272 ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
1273
1274 /* Misalignment is rejected at map time */
1275 test_err_ioctl_ioas_map_fixed(EINVAL, buffer + MOCK_PAGE_SIZE,
1276 PAGE_SIZE,
1277 MOCK_APERTURE_START + PAGE_SIZE);
1278 ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
1279
1280 /* Reduce alignment */
1281 cmd.val64 = 1;
1282 ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
1283
1284 /* Confirm misalignment is rejected during alignment upgrade */
1285 test_ioctl_ioas_map_fixed(buffer + MOCK_PAGE_SIZE, PAGE_SIZE,
1286 MOCK_APERTURE_START + PAGE_SIZE);
1287 cmd.val64 = 0;
1288 EXPECT_ERRNO(EADDRINUSE, ioctl(self->fd, IOMMU_OPTION, &cmd));
1289
1290 test_ioctl_ioas_unmap(MOCK_APERTURE_START + PAGE_SIZE, PAGE_SIZE);
1291 test_ioctl_ioas_unmap(MOCK_APERTURE_START, PAGE_SIZE);
1292}
1293
1294TEST_F(iommufd_ioas, copy_sweep)
1295{
1296 struct iommu_ioas_copy copy_cmd = {
1297 .size = sizeof(copy_cmd),
1298 .flags = IOMMU_IOAS_MAP_FIXED_IOVA,
1299 .src_ioas_id = self->ioas_id,
1300 .dst_iova = MOCK_APERTURE_START,
1301 .length = MOCK_PAGE_SIZE,
1302 };
1303 unsigned int dst_ioas_id;
1304 uint64_t last_iova;
1305 uint64_t iova;
1306
1307 test_ioctl_ioas_alloc(&dst_ioas_id);
1308 copy_cmd.dst_ioas_id = dst_ioas_id;
1309
1310 if (variant->mock_domains)
1311 last_iova = MOCK_APERTURE_START + BUFFER_SIZE - 1;
1312 else
1313 last_iova = MOCK_APERTURE_START + BUFFER_SIZE - 2;
1314
1315 test_ioctl_ioas_map_fixed(buffer, last_iova - MOCK_APERTURE_START + 1,
1316 MOCK_APERTURE_START);
1317
1318 for (iova = MOCK_APERTURE_START - PAGE_SIZE; iova <= last_iova;
1319 iova += 511) {
1320 copy_cmd.src_iova = iova;
1321 if (iova < MOCK_APERTURE_START ||
1322 iova + copy_cmd.length - 1 > last_iova) {
1323 EXPECT_ERRNO(ENOENT, ioctl(self->fd, IOMMU_IOAS_COPY,
1324 ©_cmd));
1325 } else {
1326 ASSERT_EQ(0,
1327 ioctl(self->fd, IOMMU_IOAS_COPY, ©_cmd));
1328 test_ioctl_ioas_unmap_id(dst_ioas_id, copy_cmd.dst_iova,
1329 copy_cmd.length);
1330 }
1331 }
1332
1333 test_ioctl_destroy(dst_ioas_id);
1334}
1335
1336FIXTURE(iommufd_mock_domain)
1337{
1338 int fd;
1339 uint32_t ioas_id;
1340 uint32_t hwpt_id;
1341 uint32_t hwpt_ids[2];
1342 uint32_t stdev_ids[2];
1343 uint32_t idev_ids[2];
1344 int mmap_flags;
1345 size_t mmap_buf_size;
1346};
1347
1348FIXTURE_VARIANT(iommufd_mock_domain)
1349{
1350 unsigned int mock_domains;
1351 bool hugepages;
1352};
1353
1354FIXTURE_SETUP(iommufd_mock_domain)
1355{
1356 unsigned int i;
1357
1358 self->fd = open("/dev/iommu", O_RDWR);
1359 ASSERT_NE(-1, self->fd);
1360 test_ioctl_ioas_alloc(&self->ioas_id);
1361
1362 ASSERT_GE(ARRAY_SIZE(self->hwpt_ids), variant->mock_domains);
1363
1364 for (i = 0; i != variant->mock_domains; i++)
1365 test_cmd_mock_domain(self->ioas_id, &self->stdev_ids[i],
1366 &self->hwpt_ids[i], &self->idev_ids[i]);
1367 self->hwpt_id = self->hwpt_ids[0];
1368
1369 self->mmap_flags = MAP_SHARED | MAP_ANONYMOUS;
1370 self->mmap_buf_size = PAGE_SIZE * 8;
1371 if (variant->hugepages) {
1372 /*
1373 * MAP_POPULATE will cause the kernel to fail mmap if THPs are
1374 * not available.
1375 */
1376 self->mmap_flags |= MAP_HUGETLB | MAP_POPULATE;
1377 self->mmap_buf_size = HUGEPAGE_SIZE * 2;
1378 }
1379}
1380
1381FIXTURE_TEARDOWN(iommufd_mock_domain)
1382{
1383 teardown_iommufd(self->fd, _metadata);
1384}
1385
1386FIXTURE_VARIANT_ADD(iommufd_mock_domain, one_domain)
1387{
1388 .mock_domains = 1,
1389 .hugepages = false,
1390};
1391
1392FIXTURE_VARIANT_ADD(iommufd_mock_domain, two_domains)
1393{
1394 .mock_domains = 2,
1395 .hugepages = false,
1396};
1397
1398FIXTURE_VARIANT_ADD(iommufd_mock_domain, one_domain_hugepage)
1399{
1400 .mock_domains = 1,
1401 .hugepages = true,
1402};
1403
1404FIXTURE_VARIANT_ADD(iommufd_mock_domain, two_domains_hugepage)
1405{
1406 .mock_domains = 2,
1407 .hugepages = true,
1408};
1409
1410/* Have the kernel check that the user pages made it to the iommu_domain */
1411#define check_mock_iova(_ptr, _iova, _length) \
1412 ({ \
1413 struct iommu_test_cmd check_map_cmd = { \
1414 .size = sizeof(check_map_cmd), \
1415 .op = IOMMU_TEST_OP_MD_CHECK_MAP, \
1416 .id = self->hwpt_id, \
1417 .check_map = { .iova = _iova, \
1418 .length = _length, \
1419 .uptr = (uintptr_t)(_ptr) }, \
1420 }; \
1421 ASSERT_EQ(0, \
1422 ioctl(self->fd, \
1423 _IOMMU_TEST_CMD(IOMMU_TEST_OP_MD_CHECK_MAP), \
1424 &check_map_cmd)); \
1425 if (self->hwpt_ids[1]) { \
1426 check_map_cmd.id = self->hwpt_ids[1]; \
1427 ASSERT_EQ(0, \
1428 ioctl(self->fd, \
1429 _IOMMU_TEST_CMD( \
1430 IOMMU_TEST_OP_MD_CHECK_MAP), \
1431 &check_map_cmd)); \
1432 } \
1433 })
1434
1435TEST_F(iommufd_mock_domain, basic)
1436{
1437 size_t buf_size = self->mmap_buf_size;
1438 uint8_t *buf;
1439 __u64 iova;
1440
1441 /* Simple one page map */
1442 test_ioctl_ioas_map(buffer, PAGE_SIZE, &iova);
1443 check_mock_iova(buffer, iova, PAGE_SIZE);
1444
1445 buf = mmap(0, buf_size, PROT_READ | PROT_WRITE, self->mmap_flags, -1,
1446 0);
1447 ASSERT_NE(MAP_FAILED, buf);
1448
1449 /* EFAULT half way through mapping */
1450 ASSERT_EQ(0, munmap(buf + buf_size / 2, buf_size / 2));
1451 test_err_ioctl_ioas_map(EFAULT, buf, buf_size, &iova);
1452
1453 /* EFAULT on first page */
1454 ASSERT_EQ(0, munmap(buf, buf_size / 2));
1455 test_err_ioctl_ioas_map(EFAULT, buf, buf_size, &iova);
1456}
1457
1458TEST_F(iommufd_mock_domain, ro_unshare)
1459{
1460 uint8_t *buf;
1461 __u64 iova;
1462 int fd;
1463
1464 fd = open("/proc/self/exe", O_RDONLY);
1465 ASSERT_NE(-1, fd);
1466
1467 buf = mmap(0, PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
1468 ASSERT_NE(MAP_FAILED, buf);
1469 close(fd);
1470
1471 /*
1472 * There have been lots of changes to the "unshare" mechanism in
1473 * get_user_pages(), make sure it works right. The write to the page
1474 * after we map it for reading should not change the assigned PFN.
1475 */
1476 ASSERT_EQ(0,
1477 _test_ioctl_ioas_map(self->fd, self->ioas_id, buf, PAGE_SIZE,
1478 &iova, IOMMU_IOAS_MAP_READABLE));
1479 check_mock_iova(buf, iova, PAGE_SIZE);
1480 memset(buf, 1, PAGE_SIZE);
1481 check_mock_iova(buf, iova, PAGE_SIZE);
1482 ASSERT_EQ(0, munmap(buf, PAGE_SIZE));
1483}
1484
1485TEST_F(iommufd_mock_domain, all_aligns)
1486{
1487 size_t test_step = variant->hugepages ? (self->mmap_buf_size / 16) :
1488 MOCK_PAGE_SIZE;
1489 size_t buf_size = self->mmap_buf_size;
1490 unsigned int start;
1491 unsigned int end;
1492 uint8_t *buf;
1493
1494 buf = mmap(0, buf_size, PROT_READ | PROT_WRITE, self->mmap_flags, -1,
1495 0);
1496 ASSERT_NE(MAP_FAILED, buf);
1497 check_refs(buf, buf_size, 0);
1498
1499 /*
1500 * Map every combination of page size and alignment within a big region,
1501 * less for hugepage case as it takes so long to finish.
1502 */
1503 for (start = 0; start < buf_size; start += test_step) {
1504 if (variant->hugepages)
1505 end = buf_size;
1506 else
1507 end = start + MOCK_PAGE_SIZE;
1508 for (; end < buf_size; end += MOCK_PAGE_SIZE) {
1509 size_t length = end - start;
1510 __u64 iova;
1511
1512 test_ioctl_ioas_map(buf + start, length, &iova);
1513 check_mock_iova(buf + start, iova, length);
1514 check_refs(buf + start / PAGE_SIZE * PAGE_SIZE,
1515 end / PAGE_SIZE * PAGE_SIZE -
1516 start / PAGE_SIZE * PAGE_SIZE,
1517 1);
1518
1519 test_ioctl_ioas_unmap(iova, length);
1520 }
1521 }
1522 check_refs(buf, buf_size, 0);
1523 ASSERT_EQ(0, munmap(buf, buf_size));
1524}
1525
1526TEST_F(iommufd_mock_domain, all_aligns_copy)
1527{
1528 size_t test_step = variant->hugepages ? self->mmap_buf_size / 16 :
1529 MOCK_PAGE_SIZE;
1530 size_t buf_size = self->mmap_buf_size;
1531 unsigned int start;
1532 unsigned int end;
1533 uint8_t *buf;
1534
1535 buf = mmap(0, buf_size, PROT_READ | PROT_WRITE, self->mmap_flags, -1,
1536 0);
1537 ASSERT_NE(MAP_FAILED, buf);
1538 check_refs(buf, buf_size, 0);
1539
1540 /*
1541 * Map every combination of page size and alignment within a big region,
1542 * less for hugepage case as it takes so long to finish.
1543 */
1544 for (start = 0; start < buf_size; start += test_step) {
1545 if (variant->hugepages)
1546 end = buf_size;
1547 else
1548 end = start + MOCK_PAGE_SIZE;
1549 for (; end < buf_size; end += MOCK_PAGE_SIZE) {
1550 size_t length = end - start;
1551 unsigned int old_id;
1552 uint32_t mock_stdev_id;
1553 __u64 iova;
1554
1555 test_ioctl_ioas_map(buf + start, length, &iova);
1556
1557 /* Add and destroy a domain while the area exists */
1558 old_id = self->hwpt_ids[1];
1559 test_cmd_mock_domain(self->ioas_id, &mock_stdev_id,
1560 &self->hwpt_ids[1], NULL);
1561
1562 check_mock_iova(buf + start, iova, length);
1563 check_refs(buf + start / PAGE_SIZE * PAGE_SIZE,
1564 end / PAGE_SIZE * PAGE_SIZE -
1565 start / PAGE_SIZE * PAGE_SIZE,
1566 1);
1567
1568 test_ioctl_destroy(mock_stdev_id);
1569 self->hwpt_ids[1] = old_id;
1570
1571 test_ioctl_ioas_unmap(iova, length);
1572 }
1573 }
1574 check_refs(buf, buf_size, 0);
1575 ASSERT_EQ(0, munmap(buf, buf_size));
1576}
1577
1578TEST_F(iommufd_mock_domain, user_copy)
1579{
1580 struct iommu_test_cmd access_cmd = {
1581 .size = sizeof(access_cmd),
1582 .op = IOMMU_TEST_OP_ACCESS_PAGES,
1583 .access_pages = { .length = BUFFER_SIZE,
1584 .uptr = (uintptr_t)buffer },
1585 };
1586 struct iommu_ioas_copy copy_cmd = {
1587 .size = sizeof(copy_cmd),
1588 .flags = IOMMU_IOAS_MAP_FIXED_IOVA,
1589 .dst_ioas_id = self->ioas_id,
1590 .dst_iova = MOCK_APERTURE_START,
1591 .length = BUFFER_SIZE,
1592 };
1593 struct iommu_ioas_unmap unmap_cmd = {
1594 .size = sizeof(unmap_cmd),
1595 .ioas_id = self->ioas_id,
1596 .iova = MOCK_APERTURE_START,
1597 .length = BUFFER_SIZE,
1598 };
1599 unsigned int new_ioas_id, ioas_id;
1600
1601 /* Pin the pages in an IOAS with no domains then copy to an IOAS with domains */
1602 test_ioctl_ioas_alloc(&ioas_id);
1603 test_ioctl_ioas_map_id(ioas_id, buffer, BUFFER_SIZE,
1604 ©_cmd.src_iova);
1605
1606 test_cmd_create_access(ioas_id, &access_cmd.id,
1607 MOCK_FLAGS_ACCESS_CREATE_NEEDS_PIN_PAGES);
1608
1609 access_cmd.access_pages.iova = copy_cmd.src_iova;
1610 ASSERT_EQ(0,
1611 ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
1612 &access_cmd));
1613 copy_cmd.src_ioas_id = ioas_id;
1614 ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_COPY, ©_cmd));
1615 check_mock_iova(buffer, MOCK_APERTURE_START, BUFFER_SIZE);
1616
1617 /* Now replace the ioas with a new one */
1618 test_ioctl_ioas_alloc(&new_ioas_id);
1619 test_ioctl_ioas_map_id(new_ioas_id, buffer, BUFFER_SIZE,
1620 ©_cmd.src_iova);
1621 test_cmd_access_replace_ioas(access_cmd.id, new_ioas_id);
1622
1623 /* Destroy the old ioas and cleanup copied mapping */
1624 ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_UNMAP, &unmap_cmd));
1625 test_ioctl_destroy(ioas_id);
1626
1627 /* Then run the same test again with the new ioas */
1628 access_cmd.access_pages.iova = copy_cmd.src_iova;
1629 ASSERT_EQ(0,
1630 ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
1631 &access_cmd));
1632 copy_cmd.src_ioas_id = new_ioas_id;
1633 ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_COPY, ©_cmd));
1634 check_mock_iova(buffer, MOCK_APERTURE_START, BUFFER_SIZE);
1635
1636 test_cmd_destroy_access_pages(
1637 access_cmd.id, access_cmd.access_pages.out_access_pages_id);
1638 test_cmd_destroy_access(access_cmd.id);
1639
1640 test_ioctl_destroy(new_ioas_id);
1641}
1642
1643TEST_F(iommufd_mock_domain, replace)
1644{
1645 uint32_t ioas_id;
1646
1647 test_ioctl_ioas_alloc(&ioas_id);
1648
1649 test_cmd_mock_domain_replace(self->stdev_ids[0], ioas_id);
1650
1651 /*
1652 * Replacing the IOAS causes the prior HWPT to be deallocated, thus we
1653 * should get enoent when we try to use it.
1654 */
1655 if (variant->mock_domains == 1)
1656 test_err_mock_domain_replace(ENOENT, self->stdev_ids[0],
1657 self->hwpt_ids[0]);
1658
1659 test_cmd_mock_domain_replace(self->stdev_ids[0], ioas_id);
1660 if (variant->mock_domains >= 2) {
1661 test_cmd_mock_domain_replace(self->stdev_ids[0],
1662 self->hwpt_ids[1]);
1663 test_cmd_mock_domain_replace(self->stdev_ids[0],
1664 self->hwpt_ids[1]);
1665 test_cmd_mock_domain_replace(self->stdev_ids[0],
1666 self->hwpt_ids[0]);
1667 }
1668
1669 test_cmd_mock_domain_replace(self->stdev_ids[0], self->ioas_id);
1670 test_ioctl_destroy(ioas_id);
1671}
1672
1673TEST_F(iommufd_mock_domain, alloc_hwpt)
1674{
1675 int i;
1676
1677 for (i = 0; i != variant->mock_domains; i++) {
1678 uint32_t hwpt_id[2];
1679 uint32_t stddev_id;
1680
1681 test_err_hwpt_alloc(EOPNOTSUPP,
1682 self->idev_ids[i], self->ioas_id,
1683 ~IOMMU_HWPT_ALLOC_NEST_PARENT, &hwpt_id[0]);
1684 test_cmd_hwpt_alloc(self->idev_ids[i], self->ioas_id,
1685 0, &hwpt_id[0]);
1686 test_cmd_hwpt_alloc(self->idev_ids[i], self->ioas_id,
1687 IOMMU_HWPT_ALLOC_NEST_PARENT, &hwpt_id[1]);
1688
1689 /* Do a hw_pagetable rotation test */
1690 test_cmd_mock_domain_replace(self->stdev_ids[i], hwpt_id[0]);
1691 EXPECT_ERRNO(EBUSY, _test_ioctl_destroy(self->fd, hwpt_id[0]));
1692 test_cmd_mock_domain_replace(self->stdev_ids[i], hwpt_id[1]);
1693 EXPECT_ERRNO(EBUSY, _test_ioctl_destroy(self->fd, hwpt_id[1]));
1694 test_cmd_mock_domain_replace(self->stdev_ids[i], self->ioas_id);
1695 test_ioctl_destroy(hwpt_id[1]);
1696
1697 test_cmd_mock_domain(hwpt_id[0], &stddev_id, NULL, NULL);
1698 test_ioctl_destroy(stddev_id);
1699 test_ioctl_destroy(hwpt_id[0]);
1700 }
1701}
1702
1703FIXTURE(iommufd_dirty_tracking)
1704{
1705 int fd;
1706 uint32_t ioas_id;
1707 uint32_t hwpt_id;
1708 uint32_t stdev_id;
1709 uint32_t idev_id;
1710 unsigned long page_size;
1711 unsigned long bitmap_size;
1712 void *bitmap;
1713 void *buffer;
1714};
1715
1716FIXTURE_VARIANT(iommufd_dirty_tracking)
1717{
1718 unsigned long buffer_size;
1719};
1720
1721FIXTURE_SETUP(iommufd_dirty_tracking)
1722{
1723 void *vrc;
1724 int rc;
1725
1726 self->fd = open("/dev/iommu", O_RDWR);
1727 ASSERT_NE(-1, self->fd);
1728
1729 rc = posix_memalign(&self->buffer, HUGEPAGE_SIZE, variant->buffer_size);
1730 if (rc || !self->buffer) {
1731 SKIP(return, "Skipping buffer_size=%lu due to errno=%d",
1732 variant->buffer_size, rc);
1733 }
1734
1735 assert((uintptr_t)self->buffer % HUGEPAGE_SIZE == 0);
1736 vrc = mmap(self->buffer, variant->buffer_size, PROT_READ | PROT_WRITE,
1737 MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
1738 assert(vrc == self->buffer);
1739
1740 self->page_size = MOCK_PAGE_SIZE;
1741 self->bitmap_size =
1742 variant->buffer_size / self->page_size / BITS_PER_BYTE;
1743
1744 /* Provision with an extra (MOCK_PAGE_SIZE) for the unaligned case */
1745 rc = posix_memalign(&self->bitmap, PAGE_SIZE,
1746 self->bitmap_size + MOCK_PAGE_SIZE);
1747 assert(!rc);
1748 assert(self->bitmap);
1749 assert((uintptr_t)self->bitmap % PAGE_SIZE == 0);
1750
1751 test_ioctl_ioas_alloc(&self->ioas_id);
1752 test_cmd_mock_domain(self->ioas_id, &self->stdev_id, &self->hwpt_id,
1753 &self->idev_id);
1754}
1755
1756FIXTURE_TEARDOWN(iommufd_dirty_tracking)
1757{
1758 munmap(self->buffer, variant->buffer_size);
1759 munmap(self->bitmap, self->bitmap_size);
1760 teardown_iommufd(self->fd, _metadata);
1761}
1762
1763FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty128k)
1764{
1765 /* one u32 index bitmap */
1766 .buffer_size = 128UL * 1024UL,
1767};
1768
1769FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty256k)
1770{
1771 /* one u64 index bitmap */
1772 .buffer_size = 256UL * 1024UL,
1773};
1774
1775FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty640k)
1776{
1777 /* two u64 index and trailing end bitmap */
1778 .buffer_size = 640UL * 1024UL,
1779};
1780
1781FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty128M)
1782{
1783 /* 4K bitmap (128M IOVA range) */
1784 .buffer_size = 128UL * 1024UL * 1024UL,
1785};
1786
1787FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty256M)
1788{
1789 /* 8K bitmap (256M IOVA range) */
1790 .buffer_size = 256UL * 1024UL * 1024UL,
1791};
1792
1793TEST_F(iommufd_dirty_tracking, enforce_dirty)
1794{
1795 uint32_t ioas_id, stddev_id, idev_id;
1796 uint32_t hwpt_id, _hwpt_id;
1797 uint32_t dev_flags;
1798
1799 /* Regular case */
1800 dev_flags = MOCK_FLAGS_DEVICE_NO_DIRTY;
1801 test_cmd_hwpt_alloc(self->idev_id, self->ioas_id,
1802 IOMMU_HWPT_ALLOC_DIRTY_TRACKING, &hwpt_id);
1803 test_cmd_mock_domain(hwpt_id, &stddev_id, NULL, NULL);
1804 test_err_mock_domain_flags(EINVAL, hwpt_id, dev_flags, &stddev_id,
1805 NULL);
1806 test_ioctl_destroy(stddev_id);
1807 test_ioctl_destroy(hwpt_id);
1808
1809 /* IOMMU device does not support dirty tracking */
1810 test_ioctl_ioas_alloc(&ioas_id);
1811 test_cmd_mock_domain_flags(ioas_id, dev_flags, &stddev_id, &_hwpt_id,
1812 &idev_id);
1813 test_err_hwpt_alloc(EOPNOTSUPP, idev_id, ioas_id,
1814 IOMMU_HWPT_ALLOC_DIRTY_TRACKING, &hwpt_id);
1815 test_ioctl_destroy(stddev_id);
1816}
1817
1818TEST_F(iommufd_dirty_tracking, set_dirty_tracking)
1819{
1820 uint32_t stddev_id;
1821 uint32_t hwpt_id;
1822
1823 test_cmd_hwpt_alloc(self->idev_id, self->ioas_id,
1824 IOMMU_HWPT_ALLOC_DIRTY_TRACKING, &hwpt_id);
1825 test_cmd_mock_domain(hwpt_id, &stddev_id, NULL, NULL);
1826 test_cmd_set_dirty_tracking(hwpt_id, true);
1827 test_cmd_set_dirty_tracking(hwpt_id, false);
1828
1829 test_ioctl_destroy(stddev_id);
1830 test_ioctl_destroy(hwpt_id);
1831}
1832
1833TEST_F(iommufd_dirty_tracking, device_dirty_capability)
1834{
1835 uint32_t caps = 0;
1836 uint32_t stddev_id;
1837 uint32_t hwpt_id;
1838
1839 test_cmd_hwpt_alloc(self->idev_id, self->ioas_id, 0, &hwpt_id);
1840 test_cmd_mock_domain(hwpt_id, &stddev_id, NULL, NULL);
1841 test_cmd_get_hw_capabilities(self->idev_id, caps,
1842 IOMMU_HW_CAP_DIRTY_TRACKING);
1843 ASSERT_EQ(IOMMU_HW_CAP_DIRTY_TRACKING,
1844 caps & IOMMU_HW_CAP_DIRTY_TRACKING);
1845
1846 test_ioctl_destroy(stddev_id);
1847 test_ioctl_destroy(hwpt_id);
1848}
1849
1850TEST_F(iommufd_dirty_tracking, get_dirty_bitmap)
1851{
1852 uint32_t stddev_id;
1853 uint32_t hwpt_id;
1854 uint32_t ioas_id;
1855
1856 test_ioctl_ioas_alloc(&ioas_id);
1857 test_ioctl_ioas_map_fixed_id(ioas_id, self->buffer,
1858 variant->buffer_size, MOCK_APERTURE_START);
1859
1860 test_cmd_hwpt_alloc(self->idev_id, ioas_id,
1861 IOMMU_HWPT_ALLOC_DIRTY_TRACKING, &hwpt_id);
1862 test_cmd_mock_domain(hwpt_id, &stddev_id, NULL, NULL);
1863
1864 test_cmd_set_dirty_tracking(hwpt_id, true);
1865
1866 test_mock_dirty_bitmaps(hwpt_id, variant->buffer_size,
1867 MOCK_APERTURE_START, self->page_size,
1868 self->bitmap, self->bitmap_size, 0, _metadata);
1869
1870 /* PAGE_SIZE unaligned bitmap */
1871 test_mock_dirty_bitmaps(hwpt_id, variant->buffer_size,
1872 MOCK_APERTURE_START, self->page_size,
1873 self->bitmap + MOCK_PAGE_SIZE,
1874 self->bitmap_size, 0, _metadata);
1875
1876 test_ioctl_destroy(stddev_id);
1877 test_ioctl_destroy(hwpt_id);
1878}
1879
1880TEST_F(iommufd_dirty_tracking, get_dirty_bitmap_no_clear)
1881{
1882 uint32_t stddev_id;
1883 uint32_t hwpt_id;
1884 uint32_t ioas_id;
1885
1886 test_ioctl_ioas_alloc(&ioas_id);
1887 test_ioctl_ioas_map_fixed_id(ioas_id, self->buffer,
1888 variant->buffer_size, MOCK_APERTURE_START);
1889
1890 test_cmd_hwpt_alloc(self->idev_id, ioas_id,
1891 IOMMU_HWPT_ALLOC_DIRTY_TRACKING, &hwpt_id);
1892 test_cmd_mock_domain(hwpt_id, &stddev_id, NULL, NULL);
1893
1894 test_cmd_set_dirty_tracking(hwpt_id, true);
1895
1896 test_mock_dirty_bitmaps(hwpt_id, variant->buffer_size,
1897 MOCK_APERTURE_START, self->page_size,
1898 self->bitmap, self->bitmap_size,
1899 IOMMU_HWPT_GET_DIRTY_BITMAP_NO_CLEAR,
1900 _metadata);
1901
1902 /* Unaligned bitmap */
1903 test_mock_dirty_bitmaps(hwpt_id, variant->buffer_size,
1904 MOCK_APERTURE_START, self->page_size,
1905 self->bitmap + MOCK_PAGE_SIZE,
1906 self->bitmap_size,
1907 IOMMU_HWPT_GET_DIRTY_BITMAP_NO_CLEAR,
1908 _metadata);
1909
1910 test_ioctl_destroy(stddev_id);
1911 test_ioctl_destroy(hwpt_id);
1912}
1913
1914/* VFIO compatibility IOCTLs */
1915
1916TEST_F(iommufd, simple_ioctls)
1917{
1918 ASSERT_EQ(VFIO_API_VERSION, ioctl(self->fd, VFIO_GET_API_VERSION));
1919 ASSERT_EQ(1, ioctl(self->fd, VFIO_CHECK_EXTENSION, VFIO_TYPE1v2_IOMMU));
1920}
1921
1922TEST_F(iommufd, unmap_cmd)
1923{
1924 struct vfio_iommu_type1_dma_unmap unmap_cmd = {
1925 .iova = MOCK_APERTURE_START,
1926 .size = PAGE_SIZE,
1927 };
1928
1929 unmap_cmd.argsz = 1;
1930 EXPECT_ERRNO(EINVAL, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, &unmap_cmd));
1931
1932 unmap_cmd.argsz = sizeof(unmap_cmd);
1933 unmap_cmd.flags = 1 << 31;
1934 EXPECT_ERRNO(EINVAL, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, &unmap_cmd));
1935
1936 unmap_cmd.flags = 0;
1937 EXPECT_ERRNO(ENODEV, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, &unmap_cmd));
1938}
1939
1940TEST_F(iommufd, map_cmd)
1941{
1942 struct vfio_iommu_type1_dma_map map_cmd = {
1943 .iova = MOCK_APERTURE_START,
1944 .size = PAGE_SIZE,
1945 .vaddr = (__u64)buffer,
1946 };
1947
1948 map_cmd.argsz = 1;
1949 EXPECT_ERRNO(EINVAL, ioctl(self->fd, VFIO_IOMMU_MAP_DMA, &map_cmd));
1950
1951 map_cmd.argsz = sizeof(map_cmd);
1952 map_cmd.flags = 1 << 31;
1953 EXPECT_ERRNO(EINVAL, ioctl(self->fd, VFIO_IOMMU_MAP_DMA, &map_cmd));
1954
1955 /* Requires a domain to be attached */
1956 map_cmd.flags = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE;
1957 EXPECT_ERRNO(ENODEV, ioctl(self->fd, VFIO_IOMMU_MAP_DMA, &map_cmd));
1958}
1959
1960TEST_F(iommufd, info_cmd)
1961{
1962 struct vfio_iommu_type1_info info_cmd = {};
1963
1964 /* Invalid argsz */
1965 info_cmd.argsz = 1;
1966 EXPECT_ERRNO(EINVAL, ioctl(self->fd, VFIO_IOMMU_GET_INFO, &info_cmd));
1967
1968 info_cmd.argsz = sizeof(info_cmd);
1969 EXPECT_ERRNO(ENODEV, ioctl(self->fd, VFIO_IOMMU_GET_INFO, &info_cmd));
1970}
1971
1972TEST_F(iommufd, set_iommu_cmd)
1973{
1974 /* Requires a domain to be attached */
1975 EXPECT_ERRNO(ENODEV,
1976 ioctl(self->fd, VFIO_SET_IOMMU, VFIO_TYPE1v2_IOMMU));
1977 EXPECT_ERRNO(ENODEV, ioctl(self->fd, VFIO_SET_IOMMU, VFIO_TYPE1_IOMMU));
1978}
1979
1980TEST_F(iommufd, vfio_ioas)
1981{
1982 struct iommu_vfio_ioas vfio_ioas_cmd = {
1983 .size = sizeof(vfio_ioas_cmd),
1984 .op = IOMMU_VFIO_IOAS_GET,
1985 };
1986 __u32 ioas_id;
1987
1988 /* ENODEV if there is no compat ioas */
1989 EXPECT_ERRNO(ENODEV, ioctl(self->fd, IOMMU_VFIO_IOAS, &vfio_ioas_cmd));
1990
1991 /* Invalid id for set */
1992 vfio_ioas_cmd.op = IOMMU_VFIO_IOAS_SET;
1993 EXPECT_ERRNO(ENOENT, ioctl(self->fd, IOMMU_VFIO_IOAS, &vfio_ioas_cmd));
1994
1995 /* Valid id for set*/
1996 test_ioctl_ioas_alloc(&ioas_id);
1997 vfio_ioas_cmd.ioas_id = ioas_id;
1998 ASSERT_EQ(0, ioctl(self->fd, IOMMU_VFIO_IOAS, &vfio_ioas_cmd));
1999
2000 /* Same id comes back from get */
2001 vfio_ioas_cmd.op = IOMMU_VFIO_IOAS_GET;
2002 ASSERT_EQ(0, ioctl(self->fd, IOMMU_VFIO_IOAS, &vfio_ioas_cmd));
2003 ASSERT_EQ(ioas_id, vfio_ioas_cmd.ioas_id);
2004
2005 /* Clear works */
2006 vfio_ioas_cmd.op = IOMMU_VFIO_IOAS_CLEAR;
2007 ASSERT_EQ(0, ioctl(self->fd, IOMMU_VFIO_IOAS, &vfio_ioas_cmd));
2008 vfio_ioas_cmd.op = IOMMU_VFIO_IOAS_GET;
2009 EXPECT_ERRNO(ENODEV, ioctl(self->fd, IOMMU_VFIO_IOAS, &vfio_ioas_cmd));
2010}
2011
2012FIXTURE(vfio_compat_mock_domain)
2013{
2014 int fd;
2015 uint32_t ioas_id;
2016};
2017
2018FIXTURE_VARIANT(vfio_compat_mock_domain)
2019{
2020 unsigned int version;
2021};
2022
2023FIXTURE_SETUP(vfio_compat_mock_domain)
2024{
2025 struct iommu_vfio_ioas vfio_ioas_cmd = {
2026 .size = sizeof(vfio_ioas_cmd),
2027 .op = IOMMU_VFIO_IOAS_SET,
2028 };
2029
2030 self->fd = open("/dev/iommu", O_RDWR);
2031 ASSERT_NE(-1, self->fd);
2032
2033 /* Create what VFIO would consider a group */
2034 test_ioctl_ioas_alloc(&self->ioas_id);
2035 test_cmd_mock_domain(self->ioas_id, NULL, NULL, NULL);
2036
2037 /* Attach it to the vfio compat */
2038 vfio_ioas_cmd.ioas_id = self->ioas_id;
2039 ASSERT_EQ(0, ioctl(self->fd, IOMMU_VFIO_IOAS, &vfio_ioas_cmd));
2040 ASSERT_EQ(0, ioctl(self->fd, VFIO_SET_IOMMU, variant->version));
2041}
2042
2043FIXTURE_TEARDOWN(vfio_compat_mock_domain)
2044{
2045 teardown_iommufd(self->fd, _metadata);
2046}
2047
2048FIXTURE_VARIANT_ADD(vfio_compat_mock_domain, Ver1v2)
2049{
2050 .version = VFIO_TYPE1v2_IOMMU,
2051};
2052
2053FIXTURE_VARIANT_ADD(vfio_compat_mock_domain, Ver1v0)
2054{
2055 .version = VFIO_TYPE1_IOMMU,
2056};
2057
2058TEST_F(vfio_compat_mock_domain, simple_close)
2059{
2060}
2061
2062TEST_F(vfio_compat_mock_domain, option_huge_pages)
2063{
2064 struct iommu_option cmd = {
2065 .size = sizeof(cmd),
2066 .option_id = IOMMU_OPTION_HUGE_PAGES,
2067 .op = IOMMU_OPTION_OP_GET,
2068 .val64 = 3,
2069 .object_id = self->ioas_id,
2070 };
2071
2072 ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
2073 if (variant->version == VFIO_TYPE1_IOMMU) {
2074 ASSERT_EQ(0, cmd.val64);
2075 } else {
2076 ASSERT_EQ(1, cmd.val64);
2077 }
2078}
2079
2080/*
2081 * Execute an ioctl command stored in buffer and check that the result does not
2082 * overflow memory.
2083 */
2084static bool is_filled(const void *buf, uint8_t c, size_t len)
2085{
2086 const uint8_t *cbuf = buf;
2087
2088 for (; len; cbuf++, len--)
2089 if (*cbuf != c)
2090 return false;
2091 return true;
2092}
2093
2094#define ioctl_check_buf(fd, cmd) \
2095 ({ \
2096 size_t _cmd_len = *(__u32 *)buffer; \
2097 \
2098 memset(buffer + _cmd_len, 0xAA, BUFFER_SIZE - _cmd_len); \
2099 ASSERT_EQ(0, ioctl(fd, cmd, buffer)); \
2100 ASSERT_EQ(true, is_filled(buffer + _cmd_len, 0xAA, \
2101 BUFFER_SIZE - _cmd_len)); \
2102 })
2103
2104static void check_vfio_info_cap_chain(struct __test_metadata *_metadata,
2105 struct vfio_iommu_type1_info *info_cmd)
2106{
2107 const struct vfio_info_cap_header *cap;
2108
2109 ASSERT_GE(info_cmd->argsz, info_cmd->cap_offset + sizeof(*cap));
2110 cap = buffer + info_cmd->cap_offset;
2111 while (true) {
2112 size_t cap_size;
2113
2114 if (cap->next)
2115 cap_size = (buffer + cap->next) - (void *)cap;
2116 else
2117 cap_size = (buffer + info_cmd->argsz) - (void *)cap;
2118
2119 switch (cap->id) {
2120 case VFIO_IOMMU_TYPE1_INFO_CAP_IOVA_RANGE: {
2121 struct vfio_iommu_type1_info_cap_iova_range *data =
2122 (void *)cap;
2123
2124 ASSERT_EQ(1, data->header.version);
2125 ASSERT_EQ(1, data->nr_iovas);
2126 EXPECT_EQ(MOCK_APERTURE_START,
2127 data->iova_ranges[0].start);
2128 EXPECT_EQ(MOCK_APERTURE_LAST, data->iova_ranges[0].end);
2129 break;
2130 }
2131 case VFIO_IOMMU_TYPE1_INFO_DMA_AVAIL: {
2132 struct vfio_iommu_type1_info_dma_avail *data =
2133 (void *)cap;
2134
2135 ASSERT_EQ(1, data->header.version);
2136 ASSERT_EQ(sizeof(*data), cap_size);
2137 break;
2138 }
2139 default:
2140 ASSERT_EQ(false, true);
2141 break;
2142 }
2143 if (!cap->next)
2144 break;
2145
2146 ASSERT_GE(info_cmd->argsz, cap->next + sizeof(*cap));
2147 ASSERT_GE(buffer + cap->next, (void *)cap);
2148 cap = buffer + cap->next;
2149 }
2150}
2151
2152TEST_F(vfio_compat_mock_domain, get_info)
2153{
2154 struct vfio_iommu_type1_info *info_cmd = buffer;
2155 unsigned int i;
2156 size_t caplen;
2157
2158 /* Pre-cap ABI */
2159 *info_cmd = (struct vfio_iommu_type1_info){
2160 .argsz = offsetof(struct vfio_iommu_type1_info, cap_offset),
2161 };
2162 ioctl_check_buf(self->fd, VFIO_IOMMU_GET_INFO);
2163 ASSERT_NE(0, info_cmd->iova_pgsizes);
2164 ASSERT_EQ(VFIO_IOMMU_INFO_PGSIZES | VFIO_IOMMU_INFO_CAPS,
2165 info_cmd->flags);
2166
2167 /* Read the cap chain size */
2168 *info_cmd = (struct vfio_iommu_type1_info){
2169 .argsz = sizeof(*info_cmd),
2170 };
2171 ioctl_check_buf(self->fd, VFIO_IOMMU_GET_INFO);
2172 ASSERT_NE(0, info_cmd->iova_pgsizes);
2173 ASSERT_EQ(VFIO_IOMMU_INFO_PGSIZES | VFIO_IOMMU_INFO_CAPS,
2174 info_cmd->flags);
2175 ASSERT_EQ(0, info_cmd->cap_offset);
2176 ASSERT_LT(sizeof(*info_cmd), info_cmd->argsz);
2177
2178 /* Read the caps, kernel should never create a corrupted caps */
2179 caplen = info_cmd->argsz;
2180 for (i = sizeof(*info_cmd); i < caplen; i++) {
2181 *info_cmd = (struct vfio_iommu_type1_info){
2182 .argsz = i,
2183 };
2184 ioctl_check_buf(self->fd, VFIO_IOMMU_GET_INFO);
2185 ASSERT_EQ(VFIO_IOMMU_INFO_PGSIZES | VFIO_IOMMU_INFO_CAPS,
2186 info_cmd->flags);
2187 if (!info_cmd->cap_offset)
2188 continue;
2189 check_vfio_info_cap_chain(_metadata, info_cmd);
2190 }
2191}
2192
2193static void shuffle_array(unsigned long *array, size_t nelms)
2194{
2195 unsigned int i;
2196
2197 /* Shuffle */
2198 for (i = 0; i != nelms; i++) {
2199 unsigned long tmp = array[i];
2200 unsigned int other = rand() % (nelms - i);
2201
2202 array[i] = array[other];
2203 array[other] = tmp;
2204 }
2205}
2206
2207TEST_F(vfio_compat_mock_domain, map)
2208{
2209 struct vfio_iommu_type1_dma_map map_cmd = {
2210 .argsz = sizeof(map_cmd),
2211 .flags = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE,
2212 .vaddr = (uintptr_t)buffer,
2213 .size = BUFFER_SIZE,
2214 .iova = MOCK_APERTURE_START,
2215 };
2216 struct vfio_iommu_type1_dma_unmap unmap_cmd = {
2217 .argsz = sizeof(unmap_cmd),
2218 .size = BUFFER_SIZE,
2219 .iova = MOCK_APERTURE_START,
2220 };
2221 unsigned long pages_iova[BUFFER_SIZE / PAGE_SIZE];
2222 unsigned int i;
2223
2224 /* Simple map/unmap */
2225 ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_MAP_DMA, &map_cmd));
2226 ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, &unmap_cmd));
2227 ASSERT_EQ(BUFFER_SIZE, unmap_cmd.size);
2228
2229 /* UNMAP_FLAG_ALL requires 0 iova/size */
2230 ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_MAP_DMA, &map_cmd));
2231 unmap_cmd.flags = VFIO_DMA_UNMAP_FLAG_ALL;
2232 EXPECT_ERRNO(EINVAL, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, &unmap_cmd));
2233
2234 unmap_cmd.iova = 0;
2235 unmap_cmd.size = 0;
2236 ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, &unmap_cmd));
2237 ASSERT_EQ(BUFFER_SIZE, unmap_cmd.size);
2238
2239 /* Small pages */
2240 for (i = 0; i != ARRAY_SIZE(pages_iova); i++) {
2241 map_cmd.iova = pages_iova[i] =
2242 MOCK_APERTURE_START + i * PAGE_SIZE;
2243 map_cmd.vaddr = (uintptr_t)buffer + i * PAGE_SIZE;
2244 map_cmd.size = PAGE_SIZE;
2245 ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_MAP_DMA, &map_cmd));
2246 }
2247 shuffle_array(pages_iova, ARRAY_SIZE(pages_iova));
2248
2249 unmap_cmd.flags = 0;
2250 unmap_cmd.size = PAGE_SIZE;
2251 for (i = 0; i != ARRAY_SIZE(pages_iova); i++) {
2252 unmap_cmd.iova = pages_iova[i];
2253 ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, &unmap_cmd));
2254 }
2255}
2256
2257TEST_F(vfio_compat_mock_domain, huge_map)
2258{
2259 size_t buf_size = HUGEPAGE_SIZE * 2;
2260 struct vfio_iommu_type1_dma_map map_cmd = {
2261 .argsz = sizeof(map_cmd),
2262 .flags = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE,
2263 .size = buf_size,
2264 .iova = MOCK_APERTURE_START,
2265 };
2266 struct vfio_iommu_type1_dma_unmap unmap_cmd = {
2267 .argsz = sizeof(unmap_cmd),
2268 };
2269 unsigned long pages_iova[16];
2270 unsigned int i;
2271 void *buf;
2272
2273 /* Test huge pages and splitting */
2274 buf = mmap(0, buf_size, PROT_READ | PROT_WRITE,
2275 MAP_SHARED | MAP_ANONYMOUS | MAP_HUGETLB | MAP_POPULATE, -1,
2276 0);
2277 ASSERT_NE(MAP_FAILED, buf);
2278 map_cmd.vaddr = (uintptr_t)buf;
2279 ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_MAP_DMA, &map_cmd));
2280
2281 unmap_cmd.size = buf_size / ARRAY_SIZE(pages_iova);
2282 for (i = 0; i != ARRAY_SIZE(pages_iova); i++)
2283 pages_iova[i] = MOCK_APERTURE_START + (i * unmap_cmd.size);
2284 shuffle_array(pages_iova, ARRAY_SIZE(pages_iova));
2285
2286 /* type1 mode can cut up larger mappings, type1v2 always fails */
2287 for (i = 0; i != ARRAY_SIZE(pages_iova); i++) {
2288 unmap_cmd.iova = pages_iova[i];
2289 unmap_cmd.size = buf_size / ARRAY_SIZE(pages_iova);
2290 if (variant->version == VFIO_TYPE1_IOMMU) {
2291 ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA,
2292 &unmap_cmd));
2293 } else {
2294 EXPECT_ERRNO(ENOENT,
2295 ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA,
2296 &unmap_cmd));
2297 }
2298 }
2299}
2300
2301TEST_HARNESS_MAIN