Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

iommufd: Add a selftest

Cover the essential functionality of the iommufd with a directed test from
userspace. This aims to achieve reasonable functional coverage using the
in-kernel self test framework.

A second test does a failure injection sweep of the success paths to study
error unwind behaviors.

This allows achieving high coverage of the corner cases in pages.c.

The selftest requires CONFIG_IOMMUFD_TEST to be enabled, and several huge
pages which may require:

echo 4 > /proc/sys/vm/nr_hugepages

Link: https://lore.kernel.org/r/19-v6-a196d26f289e+11787-iommufd_jgg@nvidia.com
Tested-by: Nicolin Chen <nicolinc@nvidia.com>
Tested-by: Matthew Rosato <mjrosato@linux.ibm.com> # s390
Tested-by: Yi Liu <yi.l.liu@intel.com>
Tested-by: Eric Auger <eric.auger@redhat.com> # aarch64
Signed-off-by: Nicolin Chen <nicolinc@nvidia.com>
Signed-off-by: Yi Liu <yi.l.liu@intel.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>

+2530
+1
tools/testing/selftests/Makefile
··· 27 27 TARGETS += futex 28 28 TARGETS += gpio 29 29 TARGETS += intel_pstate 30 + TARGETS += iommu 30 31 TARGETS += ipc 31 32 TARGETS += ir 32 33 TARGETS += kcmp
+3
tools/testing/selftests/iommu/.gitignore
··· 1 + # SPDX-License-Identifier: GPL-2.0-only 2 + /iommufd 3 + /iommufd_fail_nth
+12
tools/testing/selftests/iommu/Makefile
··· 1 + # SPDX-License-Identifier: GPL-2.0-only 2 + CFLAGS += -Wall -O2 -Wno-unused-function 3 + CFLAGS += -I../../../../include/uapi/ 4 + CFLAGS += -I../../../../include/ 5 + 6 + CFLAGS += -D_GNU_SOURCE 7 + 8 + TEST_GEN_PROGS := 9 + TEST_GEN_PROGS += iommufd 10 + TEST_GEN_PROGS += iommufd_fail_nth 11 + 12 + include ../lib.mk
+2
tools/testing/selftests/iommu/config
··· 1 + CONFIG_IOMMUFD 2 + CONFIG_IOMMUFD_TEST
+1654
tools/testing/selftests/iommu/iommufd.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-only 2 + /* Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES */ 3 + #include <stdlib.h> 4 + #include <sys/mman.h> 5 + #include <sys/eventfd.h> 6 + 7 + #define __EXPORTED_HEADERS__ 8 + #include <linux/vfio.h> 9 + 10 + #include "iommufd_utils.h" 11 + 12 + static void *buffer; 13 + 14 + static unsigned long PAGE_SIZE; 15 + static unsigned long HUGEPAGE_SIZE; 16 + 17 + #define MOCK_PAGE_SIZE (PAGE_SIZE / 2) 18 + 19 + static unsigned long get_huge_page_size(void) 20 + { 21 + char buf[80]; 22 + int ret; 23 + int fd; 24 + 25 + fd = open("/sys/kernel/mm/transparent_hugepage/hpage_pmd_size", 26 + O_RDONLY); 27 + if (fd < 0) 28 + return 2 * 1024 * 1024; 29 + 30 + ret = read(fd, buf, sizeof(buf)); 31 + close(fd); 32 + if (ret <= 0 || ret == sizeof(buf)) 33 + return 2 * 1024 * 1024; 34 + buf[ret] = 0; 35 + return strtoul(buf, NULL, 10); 36 + } 37 + 38 + static __attribute__((constructor)) void setup_sizes(void) 39 + { 40 + void *vrc; 41 + int rc; 42 + 43 + PAGE_SIZE = sysconf(_SC_PAGE_SIZE); 44 + HUGEPAGE_SIZE = get_huge_page_size(); 45 + 46 + BUFFER_SIZE = PAGE_SIZE * 16; 47 + rc = posix_memalign(&buffer, HUGEPAGE_SIZE, BUFFER_SIZE); 48 + assert(!rc); 49 + assert(buffer); 50 + assert((uintptr_t)buffer % HUGEPAGE_SIZE == 0); 51 + vrc = mmap(buffer, BUFFER_SIZE, PROT_READ | PROT_WRITE, 52 + MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0); 53 + assert(vrc == buffer); 54 + } 55 + 56 + FIXTURE(iommufd) 57 + { 58 + int fd; 59 + }; 60 + 61 + FIXTURE_SETUP(iommufd) 62 + { 63 + self->fd = open("/dev/iommu", O_RDWR); 64 + ASSERT_NE(-1, self->fd); 65 + } 66 + 67 + FIXTURE_TEARDOWN(iommufd) 68 + { 69 + teardown_iommufd(self->fd, _metadata); 70 + } 71 + 72 + TEST_F(iommufd, simple_close) 73 + { 74 + } 75 + 76 + TEST_F(iommufd, cmd_fail) 77 + { 78 + struct iommu_destroy cmd = { .size = sizeof(cmd), .id = 0 }; 79 + 80 + /* object id is invalid */ 81 + EXPECT_ERRNO(ENOENT, _test_ioctl_destroy(self->fd, 0)); 82 + /* Bad pointer */ 83 + EXPECT_ERRNO(EFAULT, ioctl(self->fd, IOMMU_DESTROY, NULL)); 84 + /* Unknown ioctl */ 85 + EXPECT_ERRNO(ENOTTY, 86 + ioctl(self->fd, _IO(IOMMUFD_TYPE, IOMMUFD_CMD_BASE - 1), 87 + &cmd)); 88 + } 89 + 90 + TEST_F(iommufd, cmd_length) 91 + { 92 + #define TEST_LENGTH(_struct, _ioctl) \ 93 + { \ 94 + struct { \ 95 + struct _struct cmd; \ 96 + uint8_t extra; \ 97 + } cmd = { .cmd = { .size = sizeof(struct _struct) - 1 }, \ 98 + .extra = UINT8_MAX }; \ 99 + int old_errno; \ 100 + int rc; \ 101 + \ 102 + EXPECT_ERRNO(EINVAL, ioctl(self->fd, _ioctl, &cmd)); \ 103 + cmd.cmd.size = sizeof(struct _struct) + 1; \ 104 + EXPECT_ERRNO(E2BIG, ioctl(self->fd, _ioctl, &cmd)); \ 105 + cmd.cmd.size = sizeof(struct _struct); \ 106 + rc = ioctl(self->fd, _ioctl, &cmd); \ 107 + old_errno = errno; \ 108 + cmd.cmd.size = sizeof(struct _struct) + 1; \ 109 + cmd.extra = 0; \ 110 + if (rc) { \ 111 + EXPECT_ERRNO(old_errno, \ 112 + ioctl(self->fd, _ioctl, &cmd)); \ 113 + } else { \ 114 + ASSERT_EQ(0, ioctl(self->fd, _ioctl, &cmd)); \ 115 + } \ 116 + } 117 + 118 + TEST_LENGTH(iommu_destroy, IOMMU_DESTROY); 119 + TEST_LENGTH(iommu_ioas_alloc, IOMMU_IOAS_ALLOC); 120 + TEST_LENGTH(iommu_ioas_iova_ranges, IOMMU_IOAS_IOVA_RANGES); 121 + TEST_LENGTH(iommu_ioas_allow_iovas, IOMMU_IOAS_ALLOW_IOVAS); 122 + TEST_LENGTH(iommu_ioas_map, IOMMU_IOAS_MAP); 123 + TEST_LENGTH(iommu_ioas_copy, IOMMU_IOAS_COPY); 124 + TEST_LENGTH(iommu_ioas_unmap, IOMMU_IOAS_UNMAP); 125 + TEST_LENGTH(iommu_option, IOMMU_OPTION); 126 + TEST_LENGTH(iommu_vfio_ioas, IOMMU_VFIO_IOAS); 127 + #undef TEST_LENGTH 128 + } 129 + 130 + TEST_F(iommufd, cmd_ex_fail) 131 + { 132 + struct { 133 + struct iommu_destroy cmd; 134 + __u64 future; 135 + } cmd = { .cmd = { .size = sizeof(cmd), .id = 0 } }; 136 + 137 + /* object id is invalid and command is longer */ 138 + EXPECT_ERRNO(ENOENT, ioctl(self->fd, IOMMU_DESTROY, &cmd)); 139 + /* future area is non-zero */ 140 + cmd.future = 1; 141 + EXPECT_ERRNO(E2BIG, ioctl(self->fd, IOMMU_DESTROY, &cmd)); 142 + /* Original command "works" */ 143 + cmd.cmd.size = sizeof(cmd.cmd); 144 + EXPECT_ERRNO(ENOENT, ioctl(self->fd, IOMMU_DESTROY, &cmd)); 145 + /* Short command fails */ 146 + cmd.cmd.size = sizeof(cmd.cmd) - 1; 147 + EXPECT_ERRNO(EINVAL, ioctl(self->fd, IOMMU_DESTROY, &cmd)); 148 + } 149 + 150 + TEST_F(iommufd, global_options) 151 + { 152 + struct iommu_option cmd = { 153 + .size = sizeof(cmd), 154 + .option_id = IOMMU_OPTION_RLIMIT_MODE, 155 + .op = IOMMU_OPTION_OP_GET, 156 + .val64 = 1, 157 + }; 158 + 159 + cmd.option_id = IOMMU_OPTION_RLIMIT_MODE; 160 + ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd)); 161 + ASSERT_EQ(0, cmd.val64); 162 + 163 + /* This requires root */ 164 + cmd.op = IOMMU_OPTION_OP_SET; 165 + cmd.val64 = 1; 166 + ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd)); 167 + cmd.val64 = 2; 168 + EXPECT_ERRNO(EINVAL, ioctl(self->fd, IOMMU_OPTION, &cmd)); 169 + 170 + cmd.op = IOMMU_OPTION_OP_GET; 171 + ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd)); 172 + ASSERT_EQ(1, cmd.val64); 173 + 174 + cmd.op = IOMMU_OPTION_OP_SET; 175 + cmd.val64 = 0; 176 + ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd)); 177 + 178 + cmd.op = IOMMU_OPTION_OP_GET; 179 + cmd.option_id = IOMMU_OPTION_HUGE_PAGES; 180 + EXPECT_ERRNO(ENOENT, ioctl(self->fd, IOMMU_OPTION, &cmd)); 181 + cmd.op = IOMMU_OPTION_OP_SET; 182 + EXPECT_ERRNO(ENOENT, ioctl(self->fd, IOMMU_OPTION, &cmd)); 183 + } 184 + 185 + FIXTURE(iommufd_ioas) 186 + { 187 + int fd; 188 + uint32_t ioas_id; 189 + uint32_t domain_id; 190 + uint64_t base_iova; 191 + }; 192 + 193 + FIXTURE_VARIANT(iommufd_ioas) 194 + { 195 + unsigned int mock_domains; 196 + unsigned int memory_limit; 197 + }; 198 + 199 + FIXTURE_SETUP(iommufd_ioas) 200 + { 201 + unsigned int i; 202 + 203 + 204 + self->fd = open("/dev/iommu", O_RDWR); 205 + ASSERT_NE(-1, self->fd); 206 + test_ioctl_ioas_alloc(&self->ioas_id); 207 + 208 + if (!variant->memory_limit) { 209 + test_ioctl_set_default_memory_limit(); 210 + } else { 211 + test_ioctl_set_temp_memory_limit(variant->memory_limit); 212 + } 213 + 214 + for (i = 0; i != variant->mock_domains; i++) { 215 + test_cmd_mock_domain(self->ioas_id, NULL, &self->domain_id); 216 + self->base_iova = MOCK_APERTURE_START; 217 + } 218 + } 219 + 220 + FIXTURE_TEARDOWN(iommufd_ioas) 221 + { 222 + test_ioctl_set_default_memory_limit(); 223 + teardown_iommufd(self->fd, _metadata); 224 + } 225 + 226 + FIXTURE_VARIANT_ADD(iommufd_ioas, no_domain) 227 + { 228 + }; 229 + 230 + FIXTURE_VARIANT_ADD(iommufd_ioas, mock_domain) 231 + { 232 + .mock_domains = 1, 233 + }; 234 + 235 + FIXTURE_VARIANT_ADD(iommufd_ioas, two_mock_domain) 236 + { 237 + .mock_domains = 2, 238 + }; 239 + 240 + FIXTURE_VARIANT_ADD(iommufd_ioas, mock_domain_limit) 241 + { 242 + .mock_domains = 1, 243 + .memory_limit = 16, 244 + }; 245 + 246 + TEST_F(iommufd_ioas, ioas_auto_destroy) 247 + { 248 + } 249 + 250 + TEST_F(iommufd_ioas, ioas_destroy) 251 + { 252 + if (self->domain_id) { 253 + /* IOAS cannot be freed while a domain is on it */ 254 + EXPECT_ERRNO(EBUSY, 255 + _test_ioctl_destroy(self->fd, self->ioas_id)); 256 + } else { 257 + /* Can allocate and manually free an IOAS table */ 258 + test_ioctl_destroy(self->ioas_id); 259 + } 260 + } 261 + 262 + TEST_F(iommufd_ioas, ioas_area_destroy) 263 + { 264 + /* Adding an area does not change ability to destroy */ 265 + test_ioctl_ioas_map_fixed(buffer, PAGE_SIZE, self->base_iova); 266 + if (self->domain_id) 267 + EXPECT_ERRNO(EBUSY, 268 + _test_ioctl_destroy(self->fd, self->ioas_id)); 269 + else 270 + test_ioctl_destroy(self->ioas_id); 271 + } 272 + 273 + TEST_F(iommufd_ioas, ioas_area_auto_destroy) 274 + { 275 + int i; 276 + 277 + /* Can allocate and automatically free an IOAS table with many areas */ 278 + for (i = 0; i != 10; i++) { 279 + test_ioctl_ioas_map_fixed(buffer, PAGE_SIZE, 280 + self->base_iova + i * PAGE_SIZE); 281 + } 282 + } 283 + 284 + TEST_F(iommufd_ioas, area) 285 + { 286 + int i; 287 + 288 + /* Unmap fails if nothing is mapped */ 289 + for (i = 0; i != 10; i++) 290 + test_err_ioctl_ioas_unmap(ENOENT, i * PAGE_SIZE, PAGE_SIZE); 291 + 292 + /* Unmap works */ 293 + for (i = 0; i != 10; i++) 294 + test_ioctl_ioas_map_fixed(buffer, PAGE_SIZE, 295 + self->base_iova + i * PAGE_SIZE); 296 + for (i = 0; i != 10; i++) 297 + test_ioctl_ioas_unmap(self->base_iova + i * PAGE_SIZE, 298 + PAGE_SIZE); 299 + 300 + /* Split fails */ 301 + test_ioctl_ioas_map_fixed(buffer, PAGE_SIZE * 2, 302 + self->base_iova + 16 * PAGE_SIZE); 303 + test_err_ioctl_ioas_unmap(ENOENT, self->base_iova + 16 * PAGE_SIZE, 304 + PAGE_SIZE); 305 + test_err_ioctl_ioas_unmap(ENOENT, self->base_iova + 17 * PAGE_SIZE, 306 + PAGE_SIZE); 307 + 308 + /* Over map fails */ 309 + test_err_ioctl_ioas_map_fixed(EEXIST, buffer, PAGE_SIZE * 2, 310 + self->base_iova + 16 * PAGE_SIZE); 311 + test_err_ioctl_ioas_map_fixed(EEXIST, buffer, PAGE_SIZE, 312 + self->base_iova + 16 * PAGE_SIZE); 313 + test_err_ioctl_ioas_map_fixed(EEXIST, buffer, PAGE_SIZE, 314 + self->base_iova + 17 * PAGE_SIZE); 315 + test_err_ioctl_ioas_map_fixed(EEXIST, buffer, PAGE_SIZE * 2, 316 + self->base_iova + 15 * PAGE_SIZE); 317 + test_err_ioctl_ioas_map_fixed(EEXIST, buffer, PAGE_SIZE * 3, 318 + self->base_iova + 15 * PAGE_SIZE); 319 + 320 + /* unmap all works */ 321 + test_ioctl_ioas_unmap(0, UINT64_MAX); 322 + 323 + /* Unmap all succeeds on an empty IOAS */ 324 + test_ioctl_ioas_unmap(0, UINT64_MAX); 325 + } 326 + 327 + TEST_F(iommufd_ioas, unmap_fully_contained_areas) 328 + { 329 + uint64_t unmap_len; 330 + int i; 331 + 332 + /* Give no_domain some space to rewind base_iova */ 333 + self->base_iova += 4 * PAGE_SIZE; 334 + 335 + for (i = 0; i != 4; i++) 336 + test_ioctl_ioas_map_fixed(buffer, 8 * PAGE_SIZE, 337 + self->base_iova + i * 16 * PAGE_SIZE); 338 + 339 + /* Unmap not fully contained area doesn't work */ 340 + test_err_ioctl_ioas_unmap(ENOENT, self->base_iova - 4 * PAGE_SIZE, 341 + 8 * PAGE_SIZE); 342 + test_err_ioctl_ioas_unmap(ENOENT, 343 + self->base_iova + 3 * 16 * PAGE_SIZE + 344 + 8 * PAGE_SIZE - 4 * PAGE_SIZE, 345 + 8 * PAGE_SIZE); 346 + 347 + /* Unmap fully contained areas works */ 348 + ASSERT_EQ(0, _test_ioctl_ioas_unmap(self->fd, self->ioas_id, 349 + self->base_iova - 4 * PAGE_SIZE, 350 + 3 * 16 * PAGE_SIZE + 8 * PAGE_SIZE + 351 + 4 * PAGE_SIZE, 352 + &unmap_len)); 353 + ASSERT_EQ(32 * PAGE_SIZE, unmap_len); 354 + } 355 + 356 + TEST_F(iommufd_ioas, area_auto_iova) 357 + { 358 + struct iommu_test_cmd test_cmd = { 359 + .size = sizeof(test_cmd), 360 + .op = IOMMU_TEST_OP_ADD_RESERVED, 361 + .id = self->ioas_id, 362 + .add_reserved = { .start = PAGE_SIZE * 4, 363 + .length = PAGE_SIZE * 100 }, 364 + }; 365 + struct iommu_iova_range ranges[1] = {}; 366 + struct iommu_ioas_allow_iovas allow_cmd = { 367 + .size = sizeof(allow_cmd), 368 + .ioas_id = self->ioas_id, 369 + .num_iovas = 1, 370 + .allowed_iovas = (uintptr_t)ranges, 371 + }; 372 + __u64 iovas[10]; 373 + int i; 374 + 375 + /* Simple 4k pages */ 376 + for (i = 0; i != 10; i++) 377 + test_ioctl_ioas_map(buffer, PAGE_SIZE, &iovas[i]); 378 + for (i = 0; i != 10; i++) 379 + test_ioctl_ioas_unmap(iovas[i], PAGE_SIZE); 380 + 381 + /* Kernel automatically aligns IOVAs properly */ 382 + for (i = 0; i != 10; i++) { 383 + size_t length = PAGE_SIZE * (i + 1); 384 + 385 + if (self->domain_id) { 386 + test_ioctl_ioas_map(buffer, length, &iovas[i]); 387 + } else { 388 + test_ioctl_ioas_map((void *)(1UL << 31), length, 389 + &iovas[i]); 390 + } 391 + EXPECT_EQ(0, iovas[i] % (1UL << (ffs(length) - 1))); 392 + } 393 + for (i = 0; i != 10; i++) 394 + test_ioctl_ioas_unmap(iovas[i], PAGE_SIZE * (i + 1)); 395 + 396 + /* Avoids a reserved region */ 397 + ASSERT_EQ(0, 398 + ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ADD_RESERVED), 399 + &test_cmd)); 400 + for (i = 0; i != 10; i++) { 401 + size_t length = PAGE_SIZE * (i + 1); 402 + 403 + test_ioctl_ioas_map(buffer, length, &iovas[i]); 404 + EXPECT_EQ(0, iovas[i] % (1UL << (ffs(length) - 1))); 405 + EXPECT_EQ(false, 406 + iovas[i] > test_cmd.add_reserved.start && 407 + iovas[i] < 408 + test_cmd.add_reserved.start + 409 + test_cmd.add_reserved.length); 410 + } 411 + for (i = 0; i != 10; i++) 412 + test_ioctl_ioas_unmap(iovas[i], PAGE_SIZE * (i + 1)); 413 + 414 + /* Allowed region intersects with a reserved region */ 415 + ranges[0].start = PAGE_SIZE; 416 + ranges[0].last = PAGE_SIZE * 600; 417 + EXPECT_ERRNO(EADDRINUSE, 418 + ioctl(self->fd, IOMMU_IOAS_ALLOW_IOVAS, &allow_cmd)); 419 + 420 + /* Allocate from an allowed region */ 421 + if (self->domain_id) { 422 + ranges[0].start = MOCK_APERTURE_START + PAGE_SIZE; 423 + ranges[0].last = MOCK_APERTURE_START + PAGE_SIZE * 600 - 1; 424 + } else { 425 + ranges[0].start = PAGE_SIZE * 200; 426 + ranges[0].last = PAGE_SIZE * 600 - 1; 427 + } 428 + ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_ALLOW_IOVAS, &allow_cmd)); 429 + for (i = 0; i != 10; i++) { 430 + size_t length = PAGE_SIZE * (i + 1); 431 + 432 + test_ioctl_ioas_map(buffer, length, &iovas[i]); 433 + EXPECT_EQ(0, iovas[i] % (1UL << (ffs(length) - 1))); 434 + EXPECT_EQ(true, iovas[i] >= ranges[0].start); 435 + EXPECT_EQ(true, iovas[i] <= ranges[0].last); 436 + EXPECT_EQ(true, iovas[i] + length > ranges[0].start); 437 + EXPECT_EQ(true, iovas[i] + length <= ranges[0].last + 1); 438 + } 439 + for (i = 0; i != 10; i++) 440 + test_ioctl_ioas_unmap(iovas[i], PAGE_SIZE * (i + 1)); 441 + } 442 + 443 + TEST_F(iommufd_ioas, area_allowed) 444 + { 445 + struct iommu_test_cmd test_cmd = { 446 + .size = sizeof(test_cmd), 447 + .op = IOMMU_TEST_OP_ADD_RESERVED, 448 + .id = self->ioas_id, 449 + .add_reserved = { .start = PAGE_SIZE * 4, 450 + .length = PAGE_SIZE * 100 }, 451 + }; 452 + struct iommu_iova_range ranges[1] = {}; 453 + struct iommu_ioas_allow_iovas allow_cmd = { 454 + .size = sizeof(allow_cmd), 455 + .ioas_id = self->ioas_id, 456 + .num_iovas = 1, 457 + .allowed_iovas = (uintptr_t)ranges, 458 + }; 459 + 460 + /* Reserved intersects an allowed */ 461 + allow_cmd.num_iovas = 1; 462 + ranges[0].start = self->base_iova; 463 + ranges[0].last = ranges[0].start + PAGE_SIZE * 600; 464 + ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_ALLOW_IOVAS, &allow_cmd)); 465 + test_cmd.add_reserved.start = ranges[0].start + PAGE_SIZE; 466 + test_cmd.add_reserved.length = PAGE_SIZE; 467 + EXPECT_ERRNO(EADDRINUSE, 468 + ioctl(self->fd, 469 + _IOMMU_TEST_CMD(IOMMU_TEST_OP_ADD_RESERVED), 470 + &test_cmd)); 471 + allow_cmd.num_iovas = 0; 472 + ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_ALLOW_IOVAS, &allow_cmd)); 473 + 474 + /* Allowed intersects a reserved */ 475 + ASSERT_EQ(0, 476 + ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ADD_RESERVED), 477 + &test_cmd)); 478 + allow_cmd.num_iovas = 1; 479 + ranges[0].start = self->base_iova; 480 + ranges[0].last = ranges[0].start + PAGE_SIZE * 600; 481 + EXPECT_ERRNO(EADDRINUSE, 482 + ioctl(self->fd, IOMMU_IOAS_ALLOW_IOVAS, &allow_cmd)); 483 + } 484 + 485 + TEST_F(iommufd_ioas, copy_area) 486 + { 487 + struct iommu_ioas_copy copy_cmd = { 488 + .size = sizeof(copy_cmd), 489 + .flags = IOMMU_IOAS_MAP_FIXED_IOVA, 490 + .dst_ioas_id = self->ioas_id, 491 + .src_ioas_id = self->ioas_id, 492 + .length = PAGE_SIZE, 493 + }; 494 + 495 + test_ioctl_ioas_map_fixed(buffer, PAGE_SIZE, self->base_iova); 496 + 497 + /* Copy inside a single IOAS */ 498 + copy_cmd.src_iova = self->base_iova; 499 + copy_cmd.dst_iova = self->base_iova + PAGE_SIZE; 500 + ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_COPY, &copy_cmd)); 501 + 502 + /* Copy between IOAS's */ 503 + copy_cmd.src_iova = self->base_iova; 504 + copy_cmd.dst_iova = 0; 505 + test_ioctl_ioas_alloc(&copy_cmd.dst_ioas_id); 506 + ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_COPY, &copy_cmd)); 507 + } 508 + 509 + TEST_F(iommufd_ioas, iova_ranges) 510 + { 511 + struct iommu_test_cmd test_cmd = { 512 + .size = sizeof(test_cmd), 513 + .op = IOMMU_TEST_OP_ADD_RESERVED, 514 + .id = self->ioas_id, 515 + .add_reserved = { .start = PAGE_SIZE, .length = PAGE_SIZE }, 516 + }; 517 + struct iommu_iova_range *ranges = buffer; 518 + struct iommu_ioas_iova_ranges ranges_cmd = { 519 + .size = sizeof(ranges_cmd), 520 + .ioas_id = self->ioas_id, 521 + .num_iovas = BUFFER_SIZE / sizeof(*ranges), 522 + .allowed_iovas = (uintptr_t)ranges, 523 + }; 524 + 525 + /* Range can be read */ 526 + ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_IOVA_RANGES, &ranges_cmd)); 527 + EXPECT_EQ(1, ranges_cmd.num_iovas); 528 + if (!self->domain_id) { 529 + EXPECT_EQ(0, ranges[0].start); 530 + EXPECT_EQ(SIZE_MAX, ranges[0].last); 531 + EXPECT_EQ(1, ranges_cmd.out_iova_alignment); 532 + } else { 533 + EXPECT_EQ(MOCK_APERTURE_START, ranges[0].start); 534 + EXPECT_EQ(MOCK_APERTURE_LAST, ranges[0].last); 535 + EXPECT_EQ(MOCK_PAGE_SIZE, ranges_cmd.out_iova_alignment); 536 + } 537 + 538 + /* Buffer too small */ 539 + memset(ranges, 0, BUFFER_SIZE); 540 + ranges_cmd.num_iovas = 0; 541 + EXPECT_ERRNO(EMSGSIZE, 542 + ioctl(self->fd, IOMMU_IOAS_IOVA_RANGES, &ranges_cmd)); 543 + EXPECT_EQ(1, ranges_cmd.num_iovas); 544 + EXPECT_EQ(0, ranges[0].start); 545 + EXPECT_EQ(0, ranges[0].last); 546 + 547 + /* 2 ranges */ 548 + ASSERT_EQ(0, 549 + ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ADD_RESERVED), 550 + &test_cmd)); 551 + ranges_cmd.num_iovas = BUFFER_SIZE / sizeof(*ranges); 552 + ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_IOVA_RANGES, &ranges_cmd)); 553 + if (!self->domain_id) { 554 + EXPECT_EQ(2, ranges_cmd.num_iovas); 555 + EXPECT_EQ(0, ranges[0].start); 556 + EXPECT_EQ(PAGE_SIZE - 1, ranges[0].last); 557 + EXPECT_EQ(PAGE_SIZE * 2, ranges[1].start); 558 + EXPECT_EQ(SIZE_MAX, ranges[1].last); 559 + } else { 560 + EXPECT_EQ(1, ranges_cmd.num_iovas); 561 + EXPECT_EQ(MOCK_APERTURE_START, ranges[0].start); 562 + EXPECT_EQ(MOCK_APERTURE_LAST, ranges[0].last); 563 + } 564 + 565 + /* Buffer too small */ 566 + memset(ranges, 0, BUFFER_SIZE); 567 + ranges_cmd.num_iovas = 1; 568 + if (!self->domain_id) { 569 + EXPECT_ERRNO(EMSGSIZE, ioctl(self->fd, IOMMU_IOAS_IOVA_RANGES, 570 + &ranges_cmd)); 571 + EXPECT_EQ(2, ranges_cmd.num_iovas); 572 + EXPECT_EQ(0, ranges[0].start); 573 + EXPECT_EQ(PAGE_SIZE - 1, ranges[0].last); 574 + } else { 575 + ASSERT_EQ(0, 576 + ioctl(self->fd, IOMMU_IOAS_IOVA_RANGES, &ranges_cmd)); 577 + EXPECT_EQ(1, ranges_cmd.num_iovas); 578 + EXPECT_EQ(MOCK_APERTURE_START, ranges[0].start); 579 + EXPECT_EQ(MOCK_APERTURE_LAST, ranges[0].last); 580 + } 581 + EXPECT_EQ(0, ranges[1].start); 582 + EXPECT_EQ(0, ranges[1].last); 583 + } 584 + 585 + TEST_F(iommufd_ioas, access_pin) 586 + { 587 + struct iommu_test_cmd access_cmd = { 588 + .size = sizeof(access_cmd), 589 + .op = IOMMU_TEST_OP_ACCESS_PAGES, 590 + .access_pages = { .iova = MOCK_APERTURE_START, 591 + .length = BUFFER_SIZE, 592 + .uptr = (uintptr_t)buffer }, 593 + }; 594 + struct iommu_test_cmd check_map_cmd = { 595 + .size = sizeof(check_map_cmd), 596 + .op = IOMMU_TEST_OP_MD_CHECK_MAP, 597 + .check_map = { .iova = MOCK_APERTURE_START, 598 + .length = BUFFER_SIZE, 599 + .uptr = (uintptr_t)buffer }, 600 + }; 601 + uint32_t access_pages_id; 602 + unsigned int npages; 603 + 604 + test_cmd_create_access(self->ioas_id, &access_cmd.id, 605 + MOCK_FLAGS_ACCESS_CREATE_NEEDS_PIN_PAGES); 606 + 607 + for (npages = 1; npages < BUFFER_SIZE / PAGE_SIZE; npages++) { 608 + uint32_t mock_device_id; 609 + uint32_t mock_hwpt_id; 610 + 611 + access_cmd.access_pages.length = npages * PAGE_SIZE; 612 + 613 + /* Single map/unmap */ 614 + test_ioctl_ioas_map_fixed(buffer, BUFFER_SIZE, 615 + MOCK_APERTURE_START); 616 + ASSERT_EQ(0, ioctl(self->fd, 617 + _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES), 618 + &access_cmd)); 619 + test_cmd_destroy_access_pages( 620 + access_cmd.id, 621 + access_cmd.access_pages.out_access_pages_id); 622 + 623 + /* Double user */ 624 + ASSERT_EQ(0, ioctl(self->fd, 625 + _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES), 626 + &access_cmd)); 627 + access_pages_id = access_cmd.access_pages.out_access_pages_id; 628 + ASSERT_EQ(0, ioctl(self->fd, 629 + _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES), 630 + &access_cmd)); 631 + test_cmd_destroy_access_pages( 632 + access_cmd.id, 633 + access_cmd.access_pages.out_access_pages_id); 634 + test_cmd_destroy_access_pages(access_cmd.id, access_pages_id); 635 + 636 + /* Add/remove a domain with a user */ 637 + ASSERT_EQ(0, ioctl(self->fd, 638 + _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES), 639 + &access_cmd)); 640 + test_cmd_mock_domain(self->ioas_id, &mock_device_id, 641 + &mock_hwpt_id); 642 + check_map_cmd.id = mock_hwpt_id; 643 + ASSERT_EQ(0, ioctl(self->fd, 644 + _IOMMU_TEST_CMD(IOMMU_TEST_OP_MD_CHECK_MAP), 645 + &check_map_cmd)); 646 + 647 + test_ioctl_destroy(mock_device_id); 648 + test_ioctl_destroy(mock_hwpt_id); 649 + test_cmd_destroy_access_pages( 650 + access_cmd.id, 651 + access_cmd.access_pages.out_access_pages_id); 652 + 653 + test_ioctl_ioas_unmap(MOCK_APERTURE_START, BUFFER_SIZE); 654 + } 655 + test_cmd_destroy_access(access_cmd.id); 656 + } 657 + 658 + TEST_F(iommufd_ioas, access_pin_unmap) 659 + { 660 + struct iommu_test_cmd access_pages_cmd = { 661 + .size = sizeof(access_pages_cmd), 662 + .op = IOMMU_TEST_OP_ACCESS_PAGES, 663 + .access_pages = { .iova = MOCK_APERTURE_START, 664 + .length = BUFFER_SIZE, 665 + .uptr = (uintptr_t)buffer }, 666 + }; 667 + 668 + test_cmd_create_access(self->ioas_id, &access_pages_cmd.id, 669 + MOCK_FLAGS_ACCESS_CREATE_NEEDS_PIN_PAGES); 670 + test_ioctl_ioas_map_fixed(buffer, BUFFER_SIZE, MOCK_APERTURE_START); 671 + ASSERT_EQ(0, 672 + ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES), 673 + &access_pages_cmd)); 674 + 675 + /* Trigger the unmap op */ 676 + test_ioctl_ioas_unmap(MOCK_APERTURE_START, BUFFER_SIZE); 677 + 678 + /* kernel removed the item for us */ 679 + test_err_destroy_access_pages( 680 + ENOENT, access_pages_cmd.id, 681 + access_pages_cmd.access_pages.out_access_pages_id); 682 + } 683 + 684 + static void check_access_rw(struct __test_metadata *_metadata, int fd, 685 + unsigned int access_id, uint64_t iova, 686 + unsigned int def_flags) 687 + { 688 + uint16_t tmp[32]; 689 + struct iommu_test_cmd access_cmd = { 690 + .size = sizeof(access_cmd), 691 + .op = IOMMU_TEST_OP_ACCESS_RW, 692 + .id = access_id, 693 + .access_rw = { .uptr = (uintptr_t)tmp }, 694 + }; 695 + uint16_t *buffer16 = buffer; 696 + unsigned int i; 697 + void *tmp2; 698 + 699 + for (i = 0; i != BUFFER_SIZE / sizeof(*buffer16); i++) 700 + buffer16[i] = rand(); 701 + 702 + for (access_cmd.access_rw.iova = iova + PAGE_SIZE - 50; 703 + access_cmd.access_rw.iova < iova + PAGE_SIZE + 50; 704 + access_cmd.access_rw.iova++) { 705 + for (access_cmd.access_rw.length = 1; 706 + access_cmd.access_rw.length < sizeof(tmp); 707 + access_cmd.access_rw.length++) { 708 + access_cmd.access_rw.flags = def_flags; 709 + ASSERT_EQ(0, ioctl(fd, 710 + _IOMMU_TEST_CMD( 711 + IOMMU_TEST_OP_ACCESS_RW), 712 + &access_cmd)); 713 + ASSERT_EQ(0, 714 + memcmp(buffer + (access_cmd.access_rw.iova - 715 + iova), 716 + tmp, access_cmd.access_rw.length)); 717 + 718 + for (i = 0; i != ARRAY_SIZE(tmp); i++) 719 + tmp[i] = rand(); 720 + access_cmd.access_rw.flags = def_flags | 721 + MOCK_ACCESS_RW_WRITE; 722 + ASSERT_EQ(0, ioctl(fd, 723 + _IOMMU_TEST_CMD( 724 + IOMMU_TEST_OP_ACCESS_RW), 725 + &access_cmd)); 726 + ASSERT_EQ(0, 727 + memcmp(buffer + (access_cmd.access_rw.iova - 728 + iova), 729 + tmp, access_cmd.access_rw.length)); 730 + } 731 + } 732 + 733 + /* Multi-page test */ 734 + tmp2 = malloc(BUFFER_SIZE); 735 + ASSERT_NE(NULL, tmp2); 736 + access_cmd.access_rw.iova = iova; 737 + access_cmd.access_rw.length = BUFFER_SIZE; 738 + access_cmd.access_rw.flags = def_flags; 739 + access_cmd.access_rw.uptr = (uintptr_t)tmp2; 740 + ASSERT_EQ(0, ioctl(fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_RW), 741 + &access_cmd)); 742 + ASSERT_EQ(0, memcmp(buffer, tmp2, access_cmd.access_rw.length)); 743 + free(tmp2); 744 + } 745 + 746 + TEST_F(iommufd_ioas, access_rw) 747 + { 748 + __u32 access_id; 749 + __u64 iova; 750 + 751 + test_cmd_create_access(self->ioas_id, &access_id, 0); 752 + test_ioctl_ioas_map(buffer, BUFFER_SIZE, &iova); 753 + check_access_rw(_metadata, self->fd, access_id, iova, 0); 754 + check_access_rw(_metadata, self->fd, access_id, iova, 755 + MOCK_ACCESS_RW_SLOW_PATH); 756 + test_ioctl_ioas_unmap(iova, BUFFER_SIZE); 757 + test_cmd_destroy_access(access_id); 758 + } 759 + 760 + TEST_F(iommufd_ioas, access_rw_unaligned) 761 + { 762 + __u32 access_id; 763 + __u64 iova; 764 + 765 + test_cmd_create_access(self->ioas_id, &access_id, 0); 766 + 767 + /* Unaligned pages */ 768 + iova = self->base_iova + MOCK_PAGE_SIZE; 769 + test_ioctl_ioas_map_fixed(buffer, BUFFER_SIZE, iova); 770 + check_access_rw(_metadata, self->fd, access_id, iova, 0); 771 + test_ioctl_ioas_unmap(iova, BUFFER_SIZE); 772 + test_cmd_destroy_access(access_id); 773 + } 774 + 775 + TEST_F(iommufd_ioas, fork_gone) 776 + { 777 + __u32 access_id; 778 + pid_t child; 779 + 780 + test_cmd_create_access(self->ioas_id, &access_id, 0); 781 + 782 + /* Create a mapping with a different mm */ 783 + child = fork(); 784 + if (!child) { 785 + test_ioctl_ioas_map_fixed(buffer, BUFFER_SIZE, 786 + MOCK_APERTURE_START); 787 + exit(0); 788 + } 789 + ASSERT_NE(-1, child); 790 + ASSERT_EQ(child, waitpid(child, NULL, 0)); 791 + 792 + if (self->domain_id) { 793 + /* 794 + * If a domain already existed then everything was pinned within 795 + * the fork, so this copies from one domain to another. 796 + */ 797 + test_cmd_mock_domain(self->ioas_id, NULL, NULL); 798 + check_access_rw(_metadata, self->fd, access_id, 799 + MOCK_APERTURE_START, 0); 800 + 801 + } else { 802 + /* 803 + * Otherwise we need to actually pin pages which can't happen 804 + * since the fork is gone. 805 + */ 806 + test_err_mock_domain(EFAULT, self->ioas_id, NULL, NULL); 807 + } 808 + 809 + test_cmd_destroy_access(access_id); 810 + } 811 + 812 + TEST_F(iommufd_ioas, fork_present) 813 + { 814 + __u32 access_id; 815 + int pipefds[2]; 816 + uint64_t tmp; 817 + pid_t child; 818 + int efd; 819 + 820 + test_cmd_create_access(self->ioas_id, &access_id, 0); 821 + 822 + ASSERT_EQ(0, pipe2(pipefds, O_CLOEXEC)); 823 + efd = eventfd(0, EFD_CLOEXEC); 824 + ASSERT_NE(-1, efd); 825 + 826 + /* Create a mapping with a different mm */ 827 + child = fork(); 828 + if (!child) { 829 + __u64 iova; 830 + uint64_t one = 1; 831 + 832 + close(pipefds[1]); 833 + test_ioctl_ioas_map_fixed(buffer, BUFFER_SIZE, 834 + MOCK_APERTURE_START); 835 + if (write(efd, &one, sizeof(one)) != sizeof(one)) 836 + exit(100); 837 + if (read(pipefds[0], &iova, 1) != 1) 838 + exit(100); 839 + exit(0); 840 + } 841 + close(pipefds[0]); 842 + ASSERT_NE(-1, child); 843 + ASSERT_EQ(8, read(efd, &tmp, sizeof(tmp))); 844 + 845 + /* Read pages from the remote process */ 846 + test_cmd_mock_domain(self->ioas_id, NULL, NULL); 847 + check_access_rw(_metadata, self->fd, access_id, MOCK_APERTURE_START, 0); 848 + 849 + ASSERT_EQ(0, close(pipefds[1])); 850 + ASSERT_EQ(child, waitpid(child, NULL, 0)); 851 + 852 + test_cmd_destroy_access(access_id); 853 + } 854 + 855 + TEST_F(iommufd_ioas, ioas_option_huge_pages) 856 + { 857 + struct iommu_option cmd = { 858 + .size = sizeof(cmd), 859 + .option_id = IOMMU_OPTION_HUGE_PAGES, 860 + .op = IOMMU_OPTION_OP_GET, 861 + .val64 = 3, 862 + .object_id = self->ioas_id, 863 + }; 864 + 865 + ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd)); 866 + ASSERT_EQ(1, cmd.val64); 867 + 868 + cmd.op = IOMMU_OPTION_OP_SET; 869 + cmd.val64 = 0; 870 + ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd)); 871 + 872 + cmd.op = IOMMU_OPTION_OP_GET; 873 + cmd.val64 = 3; 874 + ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd)); 875 + ASSERT_EQ(0, cmd.val64); 876 + 877 + cmd.op = IOMMU_OPTION_OP_SET; 878 + cmd.val64 = 2; 879 + EXPECT_ERRNO(EINVAL, ioctl(self->fd, IOMMU_OPTION, &cmd)); 880 + 881 + cmd.op = IOMMU_OPTION_OP_SET; 882 + cmd.val64 = 1; 883 + ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd)); 884 + } 885 + 886 + TEST_F(iommufd_ioas, ioas_iova_alloc) 887 + { 888 + unsigned int length; 889 + __u64 iova; 890 + 891 + for (length = 1; length != PAGE_SIZE * 2; length++) { 892 + if (variant->mock_domains && (length % MOCK_PAGE_SIZE)) { 893 + test_err_ioctl_ioas_map(EINVAL, buffer, length, &iova); 894 + } else { 895 + test_ioctl_ioas_map(buffer, length, &iova); 896 + test_ioctl_ioas_unmap(iova, length); 897 + } 898 + } 899 + } 900 + 901 + TEST_F(iommufd_ioas, ioas_align_change) 902 + { 903 + struct iommu_option cmd = { 904 + .size = sizeof(cmd), 905 + .option_id = IOMMU_OPTION_HUGE_PAGES, 906 + .op = IOMMU_OPTION_OP_SET, 907 + .object_id = self->ioas_id, 908 + /* 0 means everything must be aligned to PAGE_SIZE */ 909 + .val64 = 0, 910 + }; 911 + 912 + /* 913 + * We cannot upgrade the alignment using OPTION_HUGE_PAGES when a domain 914 + * and map are present. 915 + */ 916 + if (variant->mock_domains) 917 + return; 918 + 919 + /* 920 + * We can upgrade to PAGE_SIZE alignment when things are aligned right 921 + */ 922 + test_ioctl_ioas_map_fixed(buffer, PAGE_SIZE, MOCK_APERTURE_START); 923 + ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd)); 924 + 925 + /* Misalignment is rejected at map time */ 926 + test_err_ioctl_ioas_map_fixed(EINVAL, buffer + MOCK_PAGE_SIZE, 927 + PAGE_SIZE, 928 + MOCK_APERTURE_START + PAGE_SIZE); 929 + ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd)); 930 + 931 + /* Reduce alignment */ 932 + cmd.val64 = 1; 933 + ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd)); 934 + 935 + /* Confirm misalignment is rejected during alignment upgrade */ 936 + test_ioctl_ioas_map_fixed(buffer + MOCK_PAGE_SIZE, PAGE_SIZE, 937 + MOCK_APERTURE_START + PAGE_SIZE); 938 + cmd.val64 = 0; 939 + EXPECT_ERRNO(EADDRINUSE, ioctl(self->fd, IOMMU_OPTION, &cmd)); 940 + 941 + test_ioctl_ioas_unmap(MOCK_APERTURE_START + PAGE_SIZE, PAGE_SIZE); 942 + test_ioctl_ioas_unmap(MOCK_APERTURE_START, PAGE_SIZE); 943 + } 944 + 945 + TEST_F(iommufd_ioas, copy_sweep) 946 + { 947 + struct iommu_ioas_copy copy_cmd = { 948 + .size = sizeof(copy_cmd), 949 + .flags = IOMMU_IOAS_MAP_FIXED_IOVA, 950 + .src_ioas_id = self->ioas_id, 951 + .dst_iova = MOCK_APERTURE_START, 952 + .length = MOCK_PAGE_SIZE, 953 + }; 954 + unsigned int dst_ioas_id; 955 + uint64_t last_iova; 956 + uint64_t iova; 957 + 958 + test_ioctl_ioas_alloc(&dst_ioas_id); 959 + copy_cmd.dst_ioas_id = dst_ioas_id; 960 + 961 + if (variant->mock_domains) 962 + last_iova = MOCK_APERTURE_START + BUFFER_SIZE - 1; 963 + else 964 + last_iova = MOCK_APERTURE_START + BUFFER_SIZE - 2; 965 + 966 + test_ioctl_ioas_map_fixed(buffer, last_iova - MOCK_APERTURE_START + 1, 967 + MOCK_APERTURE_START); 968 + 969 + for (iova = MOCK_APERTURE_START - PAGE_SIZE; iova <= last_iova; 970 + iova += 511) { 971 + copy_cmd.src_iova = iova; 972 + if (iova < MOCK_APERTURE_START || 973 + iova + copy_cmd.length - 1 > last_iova) { 974 + EXPECT_ERRNO(ENOENT, ioctl(self->fd, IOMMU_IOAS_COPY, 975 + &copy_cmd)); 976 + } else { 977 + ASSERT_EQ(0, 978 + ioctl(self->fd, IOMMU_IOAS_COPY, &copy_cmd)); 979 + test_ioctl_ioas_unmap_id(dst_ioas_id, copy_cmd.dst_iova, 980 + copy_cmd.length); 981 + } 982 + } 983 + 984 + test_ioctl_destroy(dst_ioas_id); 985 + } 986 + 987 + FIXTURE(iommufd_mock_domain) 988 + { 989 + int fd; 990 + uint32_t ioas_id; 991 + uint32_t domain_id; 992 + uint32_t domain_ids[2]; 993 + int mmap_flags; 994 + size_t mmap_buf_size; 995 + }; 996 + 997 + FIXTURE_VARIANT(iommufd_mock_domain) 998 + { 999 + unsigned int mock_domains; 1000 + bool hugepages; 1001 + }; 1002 + 1003 + FIXTURE_SETUP(iommufd_mock_domain) 1004 + { 1005 + unsigned int i; 1006 + 1007 + self->fd = open("/dev/iommu", O_RDWR); 1008 + ASSERT_NE(-1, self->fd); 1009 + test_ioctl_ioas_alloc(&self->ioas_id); 1010 + 1011 + ASSERT_GE(ARRAY_SIZE(self->domain_ids), variant->mock_domains); 1012 + 1013 + for (i = 0; i != variant->mock_domains; i++) 1014 + test_cmd_mock_domain(self->ioas_id, NULL, &self->domain_ids[i]); 1015 + self->domain_id = self->domain_ids[0]; 1016 + 1017 + self->mmap_flags = MAP_SHARED | MAP_ANONYMOUS; 1018 + self->mmap_buf_size = PAGE_SIZE * 8; 1019 + if (variant->hugepages) { 1020 + /* 1021 + * MAP_POPULATE will cause the kernel to fail mmap if THPs are 1022 + * not available. 1023 + */ 1024 + self->mmap_flags |= MAP_HUGETLB | MAP_POPULATE; 1025 + self->mmap_buf_size = HUGEPAGE_SIZE * 2; 1026 + } 1027 + } 1028 + 1029 + FIXTURE_TEARDOWN(iommufd_mock_domain) 1030 + { 1031 + teardown_iommufd(self->fd, _metadata); 1032 + } 1033 + 1034 + FIXTURE_VARIANT_ADD(iommufd_mock_domain, one_domain) 1035 + { 1036 + .mock_domains = 1, 1037 + .hugepages = false, 1038 + }; 1039 + 1040 + FIXTURE_VARIANT_ADD(iommufd_mock_domain, two_domains) 1041 + { 1042 + .mock_domains = 2, 1043 + .hugepages = false, 1044 + }; 1045 + 1046 + FIXTURE_VARIANT_ADD(iommufd_mock_domain, one_domain_hugepage) 1047 + { 1048 + .mock_domains = 1, 1049 + .hugepages = true, 1050 + }; 1051 + 1052 + FIXTURE_VARIANT_ADD(iommufd_mock_domain, two_domains_hugepage) 1053 + { 1054 + .mock_domains = 2, 1055 + .hugepages = true, 1056 + }; 1057 + 1058 + /* Have the kernel check that the user pages made it to the iommu_domain */ 1059 + #define check_mock_iova(_ptr, _iova, _length) \ 1060 + ({ \ 1061 + struct iommu_test_cmd check_map_cmd = { \ 1062 + .size = sizeof(check_map_cmd), \ 1063 + .op = IOMMU_TEST_OP_MD_CHECK_MAP, \ 1064 + .id = self->domain_id, \ 1065 + .check_map = { .iova = _iova, \ 1066 + .length = _length, \ 1067 + .uptr = (uintptr_t)(_ptr) }, \ 1068 + }; \ 1069 + ASSERT_EQ(0, \ 1070 + ioctl(self->fd, \ 1071 + _IOMMU_TEST_CMD(IOMMU_TEST_OP_MD_CHECK_MAP), \ 1072 + &check_map_cmd)); \ 1073 + if (self->domain_ids[1]) { \ 1074 + check_map_cmd.id = self->domain_ids[1]; \ 1075 + ASSERT_EQ(0, \ 1076 + ioctl(self->fd, \ 1077 + _IOMMU_TEST_CMD( \ 1078 + IOMMU_TEST_OP_MD_CHECK_MAP), \ 1079 + &check_map_cmd)); \ 1080 + } \ 1081 + }) 1082 + 1083 + TEST_F(iommufd_mock_domain, basic) 1084 + { 1085 + size_t buf_size = self->mmap_buf_size; 1086 + uint8_t *buf; 1087 + __u64 iova; 1088 + 1089 + /* Simple one page map */ 1090 + test_ioctl_ioas_map(buffer, PAGE_SIZE, &iova); 1091 + check_mock_iova(buffer, iova, PAGE_SIZE); 1092 + 1093 + buf = mmap(0, buf_size, PROT_READ | PROT_WRITE, self->mmap_flags, -1, 1094 + 0); 1095 + ASSERT_NE(MAP_FAILED, buf); 1096 + 1097 + /* EFAULT half way through mapping */ 1098 + ASSERT_EQ(0, munmap(buf + buf_size / 2, buf_size / 2)); 1099 + test_err_ioctl_ioas_map(EFAULT, buf, buf_size, &iova); 1100 + 1101 + /* EFAULT on first page */ 1102 + ASSERT_EQ(0, munmap(buf, buf_size / 2)); 1103 + test_err_ioctl_ioas_map(EFAULT, buf, buf_size, &iova); 1104 + } 1105 + 1106 + TEST_F(iommufd_mock_domain, ro_unshare) 1107 + { 1108 + uint8_t *buf; 1109 + __u64 iova; 1110 + int fd; 1111 + 1112 + fd = open("/proc/self/exe", O_RDONLY); 1113 + ASSERT_NE(-1, fd); 1114 + 1115 + buf = mmap(0, PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0); 1116 + ASSERT_NE(MAP_FAILED, buf); 1117 + close(fd); 1118 + 1119 + /* 1120 + * There have been lots of changes to the "unshare" mechanism in 1121 + * get_user_pages(), make sure it works right. The write to the page 1122 + * after we map it for reading should not change the assigned PFN. 1123 + */ 1124 + ASSERT_EQ(0, 1125 + _test_ioctl_ioas_map(self->fd, self->ioas_id, buf, PAGE_SIZE, 1126 + &iova, IOMMU_IOAS_MAP_READABLE)); 1127 + check_mock_iova(buf, iova, PAGE_SIZE); 1128 + memset(buf, 1, PAGE_SIZE); 1129 + check_mock_iova(buf, iova, PAGE_SIZE); 1130 + ASSERT_EQ(0, munmap(buf, PAGE_SIZE)); 1131 + } 1132 + 1133 + TEST_F(iommufd_mock_domain, all_aligns) 1134 + { 1135 + size_t test_step = variant->hugepages ? (self->mmap_buf_size / 16) : 1136 + MOCK_PAGE_SIZE; 1137 + size_t buf_size = self->mmap_buf_size; 1138 + unsigned int start; 1139 + unsigned int end; 1140 + uint8_t *buf; 1141 + 1142 + buf = mmap(0, buf_size, PROT_READ | PROT_WRITE, self->mmap_flags, -1, 1143 + 0); 1144 + ASSERT_NE(MAP_FAILED, buf); 1145 + check_refs(buf, buf_size, 0); 1146 + 1147 + /* 1148 + * Map every combination of page size and alignment within a big region, 1149 + * less for hugepage case as it takes so long to finish. 1150 + */ 1151 + for (start = 0; start < buf_size; start += test_step) { 1152 + if (variant->hugepages) 1153 + end = buf_size; 1154 + else 1155 + end = start + MOCK_PAGE_SIZE; 1156 + for (; end < buf_size; end += MOCK_PAGE_SIZE) { 1157 + size_t length = end - start; 1158 + __u64 iova; 1159 + 1160 + test_ioctl_ioas_map(buf + start, length, &iova); 1161 + check_mock_iova(buf + start, iova, length); 1162 + check_refs(buf + start / PAGE_SIZE * PAGE_SIZE, 1163 + end / PAGE_SIZE * PAGE_SIZE - 1164 + start / PAGE_SIZE * PAGE_SIZE, 1165 + 1); 1166 + 1167 + test_ioctl_ioas_unmap(iova, length); 1168 + } 1169 + } 1170 + check_refs(buf, buf_size, 0); 1171 + ASSERT_EQ(0, munmap(buf, buf_size)); 1172 + } 1173 + 1174 + TEST_F(iommufd_mock_domain, all_aligns_copy) 1175 + { 1176 + size_t test_step = variant->hugepages ? self->mmap_buf_size / 16 : 1177 + MOCK_PAGE_SIZE; 1178 + size_t buf_size = self->mmap_buf_size; 1179 + unsigned int start; 1180 + unsigned int end; 1181 + uint8_t *buf; 1182 + 1183 + buf = mmap(0, buf_size, PROT_READ | PROT_WRITE, self->mmap_flags, -1, 1184 + 0); 1185 + ASSERT_NE(MAP_FAILED, buf); 1186 + check_refs(buf, buf_size, 0); 1187 + 1188 + /* 1189 + * Map every combination of page size and alignment within a big region, 1190 + * less for hugepage case as it takes so long to finish. 1191 + */ 1192 + for (start = 0; start < buf_size; start += test_step) { 1193 + if (variant->hugepages) 1194 + end = buf_size; 1195 + else 1196 + end = start + MOCK_PAGE_SIZE; 1197 + for (; end < buf_size; end += MOCK_PAGE_SIZE) { 1198 + size_t length = end - start; 1199 + unsigned int old_id; 1200 + uint32_t mock_device_id; 1201 + __u64 iova; 1202 + 1203 + test_ioctl_ioas_map(buf + start, length, &iova); 1204 + 1205 + /* Add and destroy a domain while the area exists */ 1206 + old_id = self->domain_ids[1]; 1207 + test_cmd_mock_domain(self->ioas_id, &mock_device_id, 1208 + &self->domain_ids[1]); 1209 + 1210 + check_mock_iova(buf + start, iova, length); 1211 + check_refs(buf + start / PAGE_SIZE * PAGE_SIZE, 1212 + end / PAGE_SIZE * PAGE_SIZE - 1213 + start / PAGE_SIZE * PAGE_SIZE, 1214 + 1); 1215 + 1216 + test_ioctl_destroy(mock_device_id); 1217 + test_ioctl_destroy(self->domain_ids[1]); 1218 + self->domain_ids[1] = old_id; 1219 + 1220 + test_ioctl_ioas_unmap(iova, length); 1221 + } 1222 + } 1223 + check_refs(buf, buf_size, 0); 1224 + ASSERT_EQ(0, munmap(buf, buf_size)); 1225 + } 1226 + 1227 + TEST_F(iommufd_mock_domain, user_copy) 1228 + { 1229 + struct iommu_test_cmd access_cmd = { 1230 + .size = sizeof(access_cmd), 1231 + .op = IOMMU_TEST_OP_ACCESS_PAGES, 1232 + .access_pages = { .length = BUFFER_SIZE, 1233 + .uptr = (uintptr_t)buffer }, 1234 + }; 1235 + struct iommu_ioas_copy copy_cmd = { 1236 + .size = sizeof(copy_cmd), 1237 + .flags = IOMMU_IOAS_MAP_FIXED_IOVA, 1238 + .dst_ioas_id = self->ioas_id, 1239 + .dst_iova = MOCK_APERTURE_START, 1240 + .length = BUFFER_SIZE, 1241 + }; 1242 + unsigned int ioas_id; 1243 + 1244 + /* Pin the pages in an IOAS with no domains then copy to an IOAS with domains */ 1245 + test_ioctl_ioas_alloc(&ioas_id); 1246 + test_ioctl_ioas_map_id(ioas_id, buffer, BUFFER_SIZE, 1247 + &copy_cmd.src_iova); 1248 + 1249 + test_cmd_create_access(ioas_id, &access_cmd.id, 1250 + MOCK_FLAGS_ACCESS_CREATE_NEEDS_PIN_PAGES); 1251 + 1252 + access_cmd.access_pages.iova = copy_cmd.src_iova; 1253 + ASSERT_EQ(0, 1254 + ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES), 1255 + &access_cmd)); 1256 + copy_cmd.src_ioas_id = ioas_id; 1257 + ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_COPY, &copy_cmd)); 1258 + check_mock_iova(buffer, MOCK_APERTURE_START, BUFFER_SIZE); 1259 + 1260 + test_cmd_destroy_access_pages( 1261 + access_cmd.id, access_cmd.access_pages.out_access_pages_id); 1262 + test_cmd_destroy_access(access_cmd.id) test_ioctl_destroy(ioas_id); 1263 + 1264 + test_ioctl_destroy(ioas_id); 1265 + } 1266 + 1267 + /* VFIO compatibility IOCTLs */ 1268 + 1269 + TEST_F(iommufd, simple_ioctls) 1270 + { 1271 + ASSERT_EQ(VFIO_API_VERSION, ioctl(self->fd, VFIO_GET_API_VERSION)); 1272 + ASSERT_EQ(1, ioctl(self->fd, VFIO_CHECK_EXTENSION, VFIO_TYPE1v2_IOMMU)); 1273 + } 1274 + 1275 + TEST_F(iommufd, unmap_cmd) 1276 + { 1277 + struct vfio_iommu_type1_dma_unmap unmap_cmd = { 1278 + .iova = MOCK_APERTURE_START, 1279 + .size = PAGE_SIZE, 1280 + }; 1281 + 1282 + unmap_cmd.argsz = 1; 1283 + EXPECT_ERRNO(EINVAL, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, &unmap_cmd)); 1284 + 1285 + unmap_cmd.argsz = sizeof(unmap_cmd); 1286 + unmap_cmd.flags = 1 << 31; 1287 + EXPECT_ERRNO(EINVAL, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, &unmap_cmd)); 1288 + 1289 + unmap_cmd.flags = 0; 1290 + EXPECT_ERRNO(ENODEV, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, &unmap_cmd)); 1291 + } 1292 + 1293 + TEST_F(iommufd, map_cmd) 1294 + { 1295 + struct vfio_iommu_type1_dma_map map_cmd = { 1296 + .iova = MOCK_APERTURE_START, 1297 + .size = PAGE_SIZE, 1298 + .vaddr = (__u64)buffer, 1299 + }; 1300 + 1301 + map_cmd.argsz = 1; 1302 + EXPECT_ERRNO(EINVAL, ioctl(self->fd, VFIO_IOMMU_MAP_DMA, &map_cmd)); 1303 + 1304 + map_cmd.argsz = sizeof(map_cmd); 1305 + map_cmd.flags = 1 << 31; 1306 + EXPECT_ERRNO(EINVAL, ioctl(self->fd, VFIO_IOMMU_MAP_DMA, &map_cmd)); 1307 + 1308 + /* Requires a domain to be attached */ 1309 + map_cmd.flags = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE; 1310 + EXPECT_ERRNO(ENODEV, ioctl(self->fd, VFIO_IOMMU_MAP_DMA, &map_cmd)); 1311 + } 1312 + 1313 + TEST_F(iommufd, info_cmd) 1314 + { 1315 + struct vfio_iommu_type1_info info_cmd = {}; 1316 + 1317 + /* Invalid argsz */ 1318 + info_cmd.argsz = 1; 1319 + EXPECT_ERRNO(EINVAL, ioctl(self->fd, VFIO_IOMMU_GET_INFO, &info_cmd)); 1320 + 1321 + info_cmd.argsz = sizeof(info_cmd); 1322 + EXPECT_ERRNO(ENODEV, ioctl(self->fd, VFIO_IOMMU_GET_INFO, &info_cmd)); 1323 + } 1324 + 1325 + TEST_F(iommufd, set_iommu_cmd) 1326 + { 1327 + /* Requires a domain to be attached */ 1328 + EXPECT_ERRNO(ENODEV, 1329 + ioctl(self->fd, VFIO_SET_IOMMU, VFIO_TYPE1v2_IOMMU)); 1330 + EXPECT_ERRNO(ENODEV, ioctl(self->fd, VFIO_SET_IOMMU, VFIO_TYPE1_IOMMU)); 1331 + } 1332 + 1333 + TEST_F(iommufd, vfio_ioas) 1334 + { 1335 + struct iommu_vfio_ioas vfio_ioas_cmd = { 1336 + .size = sizeof(vfio_ioas_cmd), 1337 + .op = IOMMU_VFIO_IOAS_GET, 1338 + }; 1339 + __u32 ioas_id; 1340 + 1341 + /* ENODEV if there is no compat ioas */ 1342 + EXPECT_ERRNO(ENODEV, ioctl(self->fd, IOMMU_VFIO_IOAS, &vfio_ioas_cmd)); 1343 + 1344 + /* Invalid id for set */ 1345 + vfio_ioas_cmd.op = IOMMU_VFIO_IOAS_SET; 1346 + EXPECT_ERRNO(ENOENT, ioctl(self->fd, IOMMU_VFIO_IOAS, &vfio_ioas_cmd)); 1347 + 1348 + /* Valid id for set*/ 1349 + test_ioctl_ioas_alloc(&ioas_id); 1350 + vfio_ioas_cmd.ioas_id = ioas_id; 1351 + ASSERT_EQ(0, ioctl(self->fd, IOMMU_VFIO_IOAS, &vfio_ioas_cmd)); 1352 + 1353 + /* Same id comes back from get */ 1354 + vfio_ioas_cmd.op = IOMMU_VFIO_IOAS_GET; 1355 + ASSERT_EQ(0, ioctl(self->fd, IOMMU_VFIO_IOAS, &vfio_ioas_cmd)); 1356 + ASSERT_EQ(ioas_id, vfio_ioas_cmd.ioas_id); 1357 + 1358 + /* Clear works */ 1359 + vfio_ioas_cmd.op = IOMMU_VFIO_IOAS_CLEAR; 1360 + ASSERT_EQ(0, ioctl(self->fd, IOMMU_VFIO_IOAS, &vfio_ioas_cmd)); 1361 + vfio_ioas_cmd.op = IOMMU_VFIO_IOAS_GET; 1362 + EXPECT_ERRNO(ENODEV, ioctl(self->fd, IOMMU_VFIO_IOAS, &vfio_ioas_cmd)); 1363 + } 1364 + 1365 + FIXTURE(vfio_compat_mock_domain) 1366 + { 1367 + int fd; 1368 + uint32_t ioas_id; 1369 + }; 1370 + 1371 + FIXTURE_VARIANT(vfio_compat_mock_domain) 1372 + { 1373 + unsigned int version; 1374 + }; 1375 + 1376 + FIXTURE_SETUP(vfio_compat_mock_domain) 1377 + { 1378 + struct iommu_vfio_ioas vfio_ioas_cmd = { 1379 + .size = sizeof(vfio_ioas_cmd), 1380 + .op = IOMMU_VFIO_IOAS_SET, 1381 + }; 1382 + 1383 + self->fd = open("/dev/iommu", O_RDWR); 1384 + ASSERT_NE(-1, self->fd); 1385 + 1386 + /* Create what VFIO would consider a group */ 1387 + test_ioctl_ioas_alloc(&self->ioas_id); 1388 + test_cmd_mock_domain(self->ioas_id, NULL, NULL); 1389 + 1390 + /* Attach it to the vfio compat */ 1391 + vfio_ioas_cmd.ioas_id = self->ioas_id; 1392 + ASSERT_EQ(0, ioctl(self->fd, IOMMU_VFIO_IOAS, &vfio_ioas_cmd)); 1393 + ASSERT_EQ(0, ioctl(self->fd, VFIO_SET_IOMMU, variant->version)); 1394 + } 1395 + 1396 + FIXTURE_TEARDOWN(vfio_compat_mock_domain) 1397 + { 1398 + teardown_iommufd(self->fd, _metadata); 1399 + } 1400 + 1401 + FIXTURE_VARIANT_ADD(vfio_compat_mock_domain, Ver1v2) 1402 + { 1403 + .version = VFIO_TYPE1v2_IOMMU, 1404 + }; 1405 + 1406 + FIXTURE_VARIANT_ADD(vfio_compat_mock_domain, Ver1v0) 1407 + { 1408 + .version = VFIO_TYPE1_IOMMU, 1409 + }; 1410 + 1411 + TEST_F(vfio_compat_mock_domain, simple_close) 1412 + { 1413 + } 1414 + 1415 + TEST_F(vfio_compat_mock_domain, option_huge_pages) 1416 + { 1417 + struct iommu_option cmd = { 1418 + .size = sizeof(cmd), 1419 + .option_id = IOMMU_OPTION_HUGE_PAGES, 1420 + .op = IOMMU_OPTION_OP_GET, 1421 + .val64 = 3, 1422 + .object_id = self->ioas_id, 1423 + }; 1424 + 1425 + ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd)); 1426 + if (variant->version == VFIO_TYPE1_IOMMU) { 1427 + ASSERT_EQ(0, cmd.val64); 1428 + } else { 1429 + ASSERT_EQ(1, cmd.val64); 1430 + } 1431 + } 1432 + 1433 + /* 1434 + * Execute an ioctl command stored in buffer and check that the result does not 1435 + * overflow memory. 1436 + */ 1437 + static bool is_filled(const void *buf, uint8_t c, size_t len) 1438 + { 1439 + const uint8_t *cbuf = buf; 1440 + 1441 + for (; len; cbuf++, len--) 1442 + if (*cbuf != c) 1443 + return false; 1444 + return true; 1445 + } 1446 + 1447 + #define ioctl_check_buf(fd, cmd) \ 1448 + ({ \ 1449 + size_t _cmd_len = *(__u32 *)buffer; \ 1450 + \ 1451 + memset(buffer + _cmd_len, 0xAA, BUFFER_SIZE - _cmd_len); \ 1452 + ASSERT_EQ(0, ioctl(fd, cmd, buffer)); \ 1453 + ASSERT_EQ(true, is_filled(buffer + _cmd_len, 0xAA, \ 1454 + BUFFER_SIZE - _cmd_len)); \ 1455 + }) 1456 + 1457 + static void check_vfio_info_cap_chain(struct __test_metadata *_metadata, 1458 + struct vfio_iommu_type1_info *info_cmd) 1459 + { 1460 + const struct vfio_info_cap_header *cap; 1461 + 1462 + ASSERT_GE(info_cmd->argsz, info_cmd->cap_offset + sizeof(*cap)); 1463 + cap = buffer + info_cmd->cap_offset; 1464 + while (true) { 1465 + size_t cap_size; 1466 + 1467 + if (cap->next) 1468 + cap_size = (buffer + cap->next) - (void *)cap; 1469 + else 1470 + cap_size = (buffer + info_cmd->argsz) - (void *)cap; 1471 + 1472 + switch (cap->id) { 1473 + case VFIO_IOMMU_TYPE1_INFO_CAP_IOVA_RANGE: { 1474 + struct vfio_iommu_type1_info_cap_iova_range *data = 1475 + (void *)cap; 1476 + 1477 + ASSERT_EQ(1, data->header.version); 1478 + ASSERT_EQ(1, data->nr_iovas); 1479 + EXPECT_EQ(MOCK_APERTURE_START, 1480 + data->iova_ranges[0].start); 1481 + EXPECT_EQ(MOCK_APERTURE_LAST, data->iova_ranges[0].end); 1482 + break; 1483 + } 1484 + case VFIO_IOMMU_TYPE1_INFO_DMA_AVAIL: { 1485 + struct vfio_iommu_type1_info_dma_avail *data = 1486 + (void *)cap; 1487 + 1488 + ASSERT_EQ(1, data->header.version); 1489 + ASSERT_EQ(sizeof(*data), cap_size); 1490 + break; 1491 + } 1492 + default: 1493 + ASSERT_EQ(false, true); 1494 + break; 1495 + } 1496 + if (!cap->next) 1497 + break; 1498 + 1499 + ASSERT_GE(info_cmd->argsz, cap->next + sizeof(*cap)); 1500 + ASSERT_GE(buffer + cap->next, (void *)cap); 1501 + cap = buffer + cap->next; 1502 + } 1503 + } 1504 + 1505 + TEST_F(vfio_compat_mock_domain, get_info) 1506 + { 1507 + struct vfio_iommu_type1_info *info_cmd = buffer; 1508 + unsigned int i; 1509 + size_t caplen; 1510 + 1511 + /* Pre-cap ABI */ 1512 + *info_cmd = (struct vfio_iommu_type1_info){ 1513 + .argsz = offsetof(struct vfio_iommu_type1_info, cap_offset), 1514 + }; 1515 + ioctl_check_buf(self->fd, VFIO_IOMMU_GET_INFO); 1516 + ASSERT_NE(0, info_cmd->iova_pgsizes); 1517 + ASSERT_EQ(VFIO_IOMMU_INFO_PGSIZES | VFIO_IOMMU_INFO_CAPS, 1518 + info_cmd->flags); 1519 + 1520 + /* Read the cap chain size */ 1521 + *info_cmd = (struct vfio_iommu_type1_info){ 1522 + .argsz = sizeof(*info_cmd), 1523 + }; 1524 + ioctl_check_buf(self->fd, VFIO_IOMMU_GET_INFO); 1525 + ASSERT_NE(0, info_cmd->iova_pgsizes); 1526 + ASSERT_EQ(VFIO_IOMMU_INFO_PGSIZES | VFIO_IOMMU_INFO_CAPS, 1527 + info_cmd->flags); 1528 + ASSERT_EQ(0, info_cmd->cap_offset); 1529 + ASSERT_LT(sizeof(*info_cmd), info_cmd->argsz); 1530 + 1531 + /* Read the caps, kernel should never create a corrupted caps */ 1532 + caplen = info_cmd->argsz; 1533 + for (i = sizeof(*info_cmd); i < caplen; i++) { 1534 + *info_cmd = (struct vfio_iommu_type1_info){ 1535 + .argsz = i, 1536 + }; 1537 + ioctl_check_buf(self->fd, VFIO_IOMMU_GET_INFO); 1538 + ASSERT_EQ(VFIO_IOMMU_INFO_PGSIZES | VFIO_IOMMU_INFO_CAPS, 1539 + info_cmd->flags); 1540 + if (!info_cmd->cap_offset) 1541 + continue; 1542 + check_vfio_info_cap_chain(_metadata, info_cmd); 1543 + } 1544 + } 1545 + 1546 + static void shuffle_array(unsigned long *array, size_t nelms) 1547 + { 1548 + unsigned int i; 1549 + 1550 + /* Shuffle */ 1551 + for (i = 0; i != nelms; i++) { 1552 + unsigned long tmp = array[i]; 1553 + unsigned int other = rand() % (nelms - i); 1554 + 1555 + array[i] = array[other]; 1556 + array[other] = tmp; 1557 + } 1558 + } 1559 + 1560 + TEST_F(vfio_compat_mock_domain, map) 1561 + { 1562 + struct vfio_iommu_type1_dma_map map_cmd = { 1563 + .argsz = sizeof(map_cmd), 1564 + .flags = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE, 1565 + .vaddr = (uintptr_t)buffer, 1566 + .size = BUFFER_SIZE, 1567 + .iova = MOCK_APERTURE_START, 1568 + }; 1569 + struct vfio_iommu_type1_dma_unmap unmap_cmd = { 1570 + .argsz = sizeof(unmap_cmd), 1571 + .size = BUFFER_SIZE, 1572 + .iova = MOCK_APERTURE_START, 1573 + }; 1574 + unsigned long pages_iova[BUFFER_SIZE / PAGE_SIZE]; 1575 + unsigned int i; 1576 + 1577 + /* Simple map/unmap */ 1578 + ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_MAP_DMA, &map_cmd)); 1579 + ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, &unmap_cmd)); 1580 + ASSERT_EQ(BUFFER_SIZE, unmap_cmd.size); 1581 + 1582 + /* UNMAP_FLAG_ALL requres 0 iova/size */ 1583 + ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_MAP_DMA, &map_cmd)); 1584 + unmap_cmd.flags = VFIO_DMA_UNMAP_FLAG_ALL; 1585 + EXPECT_ERRNO(EINVAL, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, &unmap_cmd)); 1586 + 1587 + unmap_cmd.iova = 0; 1588 + unmap_cmd.size = 0; 1589 + ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, &unmap_cmd)); 1590 + ASSERT_EQ(BUFFER_SIZE, unmap_cmd.size); 1591 + 1592 + /* Small pages */ 1593 + for (i = 0; i != ARRAY_SIZE(pages_iova); i++) { 1594 + map_cmd.iova = pages_iova[i] = 1595 + MOCK_APERTURE_START + i * PAGE_SIZE; 1596 + map_cmd.vaddr = (uintptr_t)buffer + i * PAGE_SIZE; 1597 + map_cmd.size = PAGE_SIZE; 1598 + ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_MAP_DMA, &map_cmd)); 1599 + } 1600 + shuffle_array(pages_iova, ARRAY_SIZE(pages_iova)); 1601 + 1602 + unmap_cmd.flags = 0; 1603 + unmap_cmd.size = PAGE_SIZE; 1604 + for (i = 0; i != ARRAY_SIZE(pages_iova); i++) { 1605 + unmap_cmd.iova = pages_iova[i]; 1606 + ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, &unmap_cmd)); 1607 + } 1608 + } 1609 + 1610 + TEST_F(vfio_compat_mock_domain, huge_map) 1611 + { 1612 + size_t buf_size = HUGEPAGE_SIZE * 2; 1613 + struct vfio_iommu_type1_dma_map map_cmd = { 1614 + .argsz = sizeof(map_cmd), 1615 + .flags = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE, 1616 + .size = buf_size, 1617 + .iova = MOCK_APERTURE_START, 1618 + }; 1619 + struct vfio_iommu_type1_dma_unmap unmap_cmd = { 1620 + .argsz = sizeof(unmap_cmd), 1621 + }; 1622 + unsigned long pages_iova[16]; 1623 + unsigned int i; 1624 + void *buf; 1625 + 1626 + /* Test huge pages and splitting */ 1627 + buf = mmap(0, buf_size, PROT_READ | PROT_WRITE, 1628 + MAP_SHARED | MAP_ANONYMOUS | MAP_HUGETLB | MAP_POPULATE, -1, 1629 + 0); 1630 + ASSERT_NE(MAP_FAILED, buf); 1631 + map_cmd.vaddr = (uintptr_t)buf; 1632 + ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_MAP_DMA, &map_cmd)); 1633 + 1634 + unmap_cmd.size = buf_size / ARRAY_SIZE(pages_iova); 1635 + for (i = 0; i != ARRAY_SIZE(pages_iova); i++) 1636 + pages_iova[i] = MOCK_APERTURE_START + (i * unmap_cmd.size); 1637 + shuffle_array(pages_iova, ARRAY_SIZE(pages_iova)); 1638 + 1639 + /* type1 mode can cut up larger mappings, type1v2 always fails */ 1640 + for (i = 0; i != ARRAY_SIZE(pages_iova); i++) { 1641 + unmap_cmd.iova = pages_iova[i]; 1642 + unmap_cmd.size = buf_size / ARRAY_SIZE(pages_iova); 1643 + if (variant->version == VFIO_TYPE1_IOMMU) { 1644 + ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, 1645 + &unmap_cmd)); 1646 + } else { 1647 + EXPECT_ERRNO(ENOENT, 1648 + ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, 1649 + &unmap_cmd)); 1650 + } 1651 + } 1652 + } 1653 + 1654 + TEST_HARNESS_MAIN
+580
tools/testing/selftests/iommu/iommufd_fail_nth.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-only 2 + /* Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES 3 + * 4 + * These tests are "kernel integrity" tests. They are looking for kernel 5 + * WARN/OOPS/kasn/etc splats triggered by kernel sanitizers & debugging 6 + * features. It does not attempt to verify that the system calls are doing what 7 + * they are supposed to do. 8 + * 9 + * The basic philosophy is to run a sequence of calls that will succeed and then 10 + * sweep every failure injection point on that call chain to look for 11 + * interesting things in error handling. 12 + * 13 + * This test is best run with: 14 + * echo 1 > /proc/sys/kernel/panic_on_warn 15 + * If something is actually going wrong. 16 + */ 17 + #include <fcntl.h> 18 + #include <dirent.h> 19 + 20 + #define __EXPORTED_HEADERS__ 21 + #include <linux/vfio.h> 22 + 23 + #include "iommufd_utils.h" 24 + 25 + static bool have_fault_injection; 26 + 27 + static int writeat(int dfd, const char *fn, const char *val) 28 + { 29 + size_t val_len = strlen(val); 30 + ssize_t res; 31 + int fd; 32 + 33 + fd = openat(dfd, fn, O_WRONLY); 34 + if (fd == -1) 35 + return -1; 36 + res = write(fd, val, val_len); 37 + assert(res == val_len); 38 + close(fd); 39 + return 0; 40 + } 41 + 42 + static __attribute__((constructor)) void setup_buffer(void) 43 + { 44 + BUFFER_SIZE = 2*1024*1024; 45 + 46 + buffer = mmap(0, BUFFER_SIZE, PROT_READ | PROT_WRITE, 47 + MAP_SHARED | MAP_ANONYMOUS, -1, 0); 48 + } 49 + 50 + /* 51 + * This sets up fail_injection in a way that is useful for this test. 52 + * It does not attempt to restore things back to how they were. 53 + */ 54 + static __attribute__((constructor)) void setup_fault_injection(void) 55 + { 56 + DIR *debugfs = opendir("/sys/kernel/debug/"); 57 + struct dirent *dent; 58 + 59 + if (!debugfs) 60 + return; 61 + 62 + /* Allow any allocation call to be fault injected */ 63 + if (writeat(dirfd(debugfs), "failslab/ignore-gfp-wait", "N")) 64 + return; 65 + writeat(dirfd(debugfs), "fail_page_alloc/ignore-gfp-wait", "N"); 66 + writeat(dirfd(debugfs), "fail_page_alloc/ignore-gfp-highmem", "N"); 67 + 68 + while ((dent = readdir(debugfs))) { 69 + char fn[300]; 70 + 71 + if (strncmp(dent->d_name, "fail", 4) != 0) 72 + continue; 73 + 74 + /* We are looking for kernel splats, quiet down the log */ 75 + snprintf(fn, sizeof(fn), "%s/verbose", dent->d_name); 76 + writeat(dirfd(debugfs), fn, "0"); 77 + } 78 + closedir(debugfs); 79 + have_fault_injection = true; 80 + } 81 + 82 + struct fail_nth_state { 83 + int proc_fd; 84 + unsigned int iteration; 85 + }; 86 + 87 + static void fail_nth_first(struct __test_metadata *_metadata, 88 + struct fail_nth_state *nth_state) 89 + { 90 + char buf[300]; 91 + 92 + snprintf(buf, sizeof(buf), "/proc/self/task/%u/fail-nth", getpid()); 93 + nth_state->proc_fd = open(buf, O_RDWR); 94 + ASSERT_NE(-1, nth_state->proc_fd); 95 + } 96 + 97 + static bool fail_nth_next(struct __test_metadata *_metadata, 98 + struct fail_nth_state *nth_state, 99 + int test_result) 100 + { 101 + static const char disable_nth[] = "0"; 102 + char buf[300]; 103 + 104 + /* 105 + * This is just an arbitrary limit based on the current kernel 106 + * situation. Changes in the kernel can dramtically change the number of 107 + * required fault injection sites, so if this hits it doesn't 108 + * necessarily mean a test failure, just that the limit has to be made 109 + * bigger. 110 + */ 111 + ASSERT_GT(400, nth_state->iteration); 112 + if (nth_state->iteration != 0) { 113 + ssize_t res; 114 + ssize_t res2; 115 + 116 + buf[0] = 0; 117 + /* 118 + * Annoyingly disabling the nth can also fail. This means 119 + * the test passed without triggering failure 120 + */ 121 + res = pread(nth_state->proc_fd, buf, sizeof(buf), 0); 122 + if (res == -1 && errno == EFAULT) { 123 + buf[0] = '1'; 124 + buf[1] = '\n'; 125 + res = 2; 126 + } 127 + 128 + res2 = pwrite(nth_state->proc_fd, disable_nth, 129 + ARRAY_SIZE(disable_nth) - 1, 0); 130 + if (res2 == -1 && errno == EFAULT) { 131 + res2 = pwrite(nth_state->proc_fd, disable_nth, 132 + ARRAY_SIZE(disable_nth) - 1, 0); 133 + buf[0] = '1'; 134 + buf[1] = '\n'; 135 + } 136 + ASSERT_EQ(ARRAY_SIZE(disable_nth) - 1, res2); 137 + 138 + /* printf(" nth %u result=%d nth=%u\n", nth_state->iteration, 139 + test_result, atoi(buf)); */ 140 + fflush(stdout); 141 + ASSERT_LT(1, res); 142 + if (res != 2 || buf[0] != '0' || buf[1] != '\n') 143 + return false; 144 + } else { 145 + /* printf(" nth %u result=%d\n", nth_state->iteration, 146 + test_result); */ 147 + } 148 + nth_state->iteration++; 149 + return true; 150 + } 151 + 152 + /* 153 + * This is called during the test to start failure injection. It allows the test 154 + * to do some setup that has already been swept and thus reduce the required 155 + * iterations. 156 + */ 157 + void __fail_nth_enable(struct __test_metadata *_metadata, 158 + struct fail_nth_state *nth_state) 159 + { 160 + char buf[300]; 161 + size_t len; 162 + 163 + if (!nth_state->iteration) 164 + return; 165 + 166 + len = snprintf(buf, sizeof(buf), "%u", nth_state->iteration); 167 + ASSERT_EQ(len, pwrite(nth_state->proc_fd, buf, len, 0)); 168 + } 169 + #define fail_nth_enable() __fail_nth_enable(_metadata, _nth_state) 170 + 171 + #define TEST_FAIL_NTH(fixture_name, name) \ 172 + static int test_nth_##name(struct __test_metadata *_metadata, \ 173 + FIXTURE_DATA(fixture_name) *self, \ 174 + const FIXTURE_VARIANT(fixture_name) \ 175 + *variant, \ 176 + struct fail_nth_state *_nth_state); \ 177 + TEST_F(fixture_name, name) \ 178 + { \ 179 + struct fail_nth_state nth_state = {}; \ 180 + int test_result = 0; \ 181 + \ 182 + if (!have_fault_injection) \ 183 + SKIP(return, \ 184 + "fault injection is not enabled in the kernel"); \ 185 + fail_nth_first(_metadata, &nth_state); \ 186 + ASSERT_EQ(0, test_nth_##name(_metadata, self, variant, \ 187 + &nth_state)); \ 188 + while (fail_nth_next(_metadata, &nth_state, test_result)) { \ 189 + fixture_name##_teardown(_metadata, self, variant); \ 190 + fixture_name##_setup(_metadata, self, variant); \ 191 + test_result = test_nth_##name(_metadata, self, \ 192 + variant, &nth_state); \ 193 + }; \ 194 + ASSERT_EQ(0, test_result); \ 195 + } \ 196 + static int test_nth_##name( \ 197 + struct __test_metadata __attribute__((unused)) *_metadata, \ 198 + FIXTURE_DATA(fixture_name) __attribute__((unused)) *self, \ 199 + const FIXTURE_VARIANT(fixture_name) __attribute__((unused)) \ 200 + *variant, \ 201 + struct fail_nth_state *_nth_state) 202 + 203 + FIXTURE(basic_fail_nth) 204 + { 205 + int fd; 206 + uint32_t access_id; 207 + }; 208 + 209 + FIXTURE_SETUP(basic_fail_nth) 210 + { 211 + self->fd = -1; 212 + self->access_id = 0; 213 + } 214 + 215 + FIXTURE_TEARDOWN(basic_fail_nth) 216 + { 217 + int rc; 218 + 219 + if (self->access_id) { 220 + /* The access FD holds the iommufd open until it closes */ 221 + rc = _test_cmd_destroy_access(self->access_id); 222 + assert(rc == 0); 223 + } 224 + teardown_iommufd(self->fd, _metadata); 225 + } 226 + 227 + /* Cover ioas.c */ 228 + TEST_FAIL_NTH(basic_fail_nth, basic) 229 + { 230 + struct iommu_iova_range ranges[10]; 231 + uint32_t ioas_id; 232 + __u64 iova; 233 + 234 + fail_nth_enable(); 235 + 236 + self->fd = open("/dev/iommu", O_RDWR); 237 + if (self->fd == -1) 238 + return -1; 239 + 240 + if (_test_ioctl_ioas_alloc(self->fd, &ioas_id)) 241 + return -1; 242 + 243 + { 244 + struct iommu_ioas_iova_ranges ranges_cmd = { 245 + .size = sizeof(ranges_cmd), 246 + .num_iovas = ARRAY_SIZE(ranges), 247 + .ioas_id = ioas_id, 248 + .allowed_iovas = (uintptr_t)ranges, 249 + }; 250 + if (ioctl(self->fd, IOMMU_IOAS_IOVA_RANGES, &ranges_cmd)) 251 + return -1; 252 + } 253 + 254 + { 255 + struct iommu_ioas_allow_iovas allow_cmd = { 256 + .size = sizeof(allow_cmd), 257 + .ioas_id = ioas_id, 258 + .num_iovas = 1, 259 + .allowed_iovas = (uintptr_t)ranges, 260 + }; 261 + 262 + ranges[0].start = 16*1024; 263 + ranges[0].last = BUFFER_SIZE + 16 * 1024 * 600 - 1; 264 + if (ioctl(self->fd, IOMMU_IOAS_ALLOW_IOVAS, &allow_cmd)) 265 + return -1; 266 + } 267 + 268 + if (_test_ioctl_ioas_map(self->fd, ioas_id, buffer, BUFFER_SIZE, &iova, 269 + IOMMU_IOAS_MAP_WRITEABLE | 270 + IOMMU_IOAS_MAP_READABLE)) 271 + return -1; 272 + 273 + { 274 + struct iommu_ioas_copy copy_cmd = { 275 + .size = sizeof(copy_cmd), 276 + .flags = IOMMU_IOAS_MAP_WRITEABLE | 277 + IOMMU_IOAS_MAP_READABLE, 278 + .dst_ioas_id = ioas_id, 279 + .src_ioas_id = ioas_id, 280 + .src_iova = iova, 281 + .length = sizeof(ranges), 282 + }; 283 + 284 + if (ioctl(self->fd, IOMMU_IOAS_COPY, &copy_cmd)) 285 + return -1; 286 + } 287 + 288 + if (_test_ioctl_ioas_unmap(self->fd, ioas_id, iova, BUFFER_SIZE, 289 + NULL)) 290 + return -1; 291 + /* Failure path of no IOVA to unmap */ 292 + _test_ioctl_ioas_unmap(self->fd, ioas_id, iova, BUFFER_SIZE, NULL); 293 + return 0; 294 + } 295 + 296 + /* iopt_area_fill_domains() and iopt_area_fill_domain() */ 297 + TEST_FAIL_NTH(basic_fail_nth, map_domain) 298 + { 299 + uint32_t ioas_id; 300 + __u32 device_id; 301 + __u32 hwpt_id; 302 + __u64 iova; 303 + 304 + self->fd = open("/dev/iommu", O_RDWR); 305 + if (self->fd == -1) 306 + return -1; 307 + 308 + if (_test_ioctl_ioas_alloc(self->fd, &ioas_id)) 309 + return -1; 310 + 311 + if (_test_ioctl_set_temp_memory_limit(self->fd, 32)) 312 + return -1; 313 + 314 + fail_nth_enable(); 315 + 316 + if (_test_cmd_mock_domain(self->fd, ioas_id, &device_id, &hwpt_id)) 317 + return -1; 318 + 319 + if (_test_ioctl_ioas_map(self->fd, ioas_id, buffer, 262144, &iova, 320 + IOMMU_IOAS_MAP_WRITEABLE | 321 + IOMMU_IOAS_MAP_READABLE)) 322 + return -1; 323 + 324 + if (_test_ioctl_destroy(self->fd, device_id)) 325 + return -1; 326 + if (_test_ioctl_destroy(self->fd, hwpt_id)) 327 + return -1; 328 + 329 + if (_test_cmd_mock_domain(self->fd, ioas_id, &device_id, &hwpt_id)) 330 + return -1; 331 + return 0; 332 + } 333 + 334 + TEST_FAIL_NTH(basic_fail_nth, map_two_domains) 335 + { 336 + uint32_t ioas_id; 337 + __u32 device_id2; 338 + __u32 device_id; 339 + __u32 hwpt_id2; 340 + __u32 hwpt_id; 341 + __u64 iova; 342 + 343 + self->fd = open("/dev/iommu", O_RDWR); 344 + if (self->fd == -1) 345 + return -1; 346 + 347 + if (_test_ioctl_ioas_alloc(self->fd, &ioas_id)) 348 + return -1; 349 + 350 + if (_test_ioctl_set_temp_memory_limit(self->fd, 32)) 351 + return -1; 352 + 353 + if (_test_cmd_mock_domain(self->fd, ioas_id, &device_id, &hwpt_id)) 354 + return -1; 355 + 356 + fail_nth_enable(); 357 + 358 + if (_test_cmd_mock_domain(self->fd, ioas_id, &device_id2, &hwpt_id2)) 359 + return -1; 360 + 361 + if (_test_ioctl_ioas_map(self->fd, ioas_id, buffer, 262144, &iova, 362 + IOMMU_IOAS_MAP_WRITEABLE | 363 + IOMMU_IOAS_MAP_READABLE)) 364 + return -1; 365 + 366 + if (_test_ioctl_destroy(self->fd, device_id)) 367 + return -1; 368 + if (_test_ioctl_destroy(self->fd, hwpt_id)) 369 + return -1; 370 + 371 + if (_test_ioctl_destroy(self->fd, device_id2)) 372 + return -1; 373 + if (_test_ioctl_destroy(self->fd, hwpt_id2)) 374 + return -1; 375 + 376 + if (_test_cmd_mock_domain(self->fd, ioas_id, &device_id, &hwpt_id)) 377 + return -1; 378 + if (_test_cmd_mock_domain(self->fd, ioas_id, &device_id2, &hwpt_id2)) 379 + return -1; 380 + return 0; 381 + } 382 + 383 + TEST_FAIL_NTH(basic_fail_nth, access_rw) 384 + { 385 + uint64_t tmp_big[4096]; 386 + uint32_t ioas_id; 387 + uint16_t tmp[32]; 388 + __u64 iova; 389 + 390 + self->fd = open("/dev/iommu", O_RDWR); 391 + if (self->fd == -1) 392 + return -1; 393 + 394 + if (_test_ioctl_ioas_alloc(self->fd, &ioas_id)) 395 + return -1; 396 + 397 + if (_test_ioctl_set_temp_memory_limit(self->fd, 32)) 398 + return -1; 399 + 400 + if (_test_ioctl_ioas_map(self->fd, ioas_id, buffer, 262144, &iova, 401 + IOMMU_IOAS_MAP_WRITEABLE | 402 + IOMMU_IOAS_MAP_READABLE)) 403 + return -1; 404 + 405 + fail_nth_enable(); 406 + 407 + if (_test_cmd_create_access(self->fd, ioas_id, &self->access_id, 0)) 408 + return -1; 409 + 410 + { 411 + struct iommu_test_cmd access_cmd = { 412 + .size = sizeof(access_cmd), 413 + .op = IOMMU_TEST_OP_ACCESS_RW, 414 + .id = self->access_id, 415 + .access_rw = { .iova = iova, 416 + .length = sizeof(tmp), 417 + .uptr = (uintptr_t)tmp }, 418 + }; 419 + 420 + // READ 421 + if (ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_RW), 422 + &access_cmd)) 423 + return -1; 424 + 425 + access_cmd.access_rw.flags = MOCK_ACCESS_RW_WRITE; 426 + if (ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_RW), 427 + &access_cmd)) 428 + return -1; 429 + 430 + access_cmd.access_rw.flags = MOCK_ACCESS_RW_SLOW_PATH; 431 + if (ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_RW), 432 + &access_cmd)) 433 + return -1; 434 + access_cmd.access_rw.flags = MOCK_ACCESS_RW_SLOW_PATH | 435 + MOCK_ACCESS_RW_WRITE; 436 + if (ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_RW), 437 + &access_cmd)) 438 + return -1; 439 + } 440 + 441 + { 442 + struct iommu_test_cmd access_cmd = { 443 + .size = sizeof(access_cmd), 444 + .op = IOMMU_TEST_OP_ACCESS_RW, 445 + .id = self->access_id, 446 + .access_rw = { .iova = iova, 447 + .flags = MOCK_ACCESS_RW_SLOW_PATH, 448 + .length = sizeof(tmp_big), 449 + .uptr = (uintptr_t)tmp_big }, 450 + }; 451 + 452 + if (ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_RW), 453 + &access_cmd)) 454 + return -1; 455 + } 456 + if (_test_cmd_destroy_access(self->access_id)) 457 + return -1; 458 + self->access_id = 0; 459 + return 0; 460 + } 461 + 462 + /* pages.c access functions */ 463 + TEST_FAIL_NTH(basic_fail_nth, access_pin) 464 + { 465 + uint32_t access_pages_id; 466 + uint32_t ioas_id; 467 + __u64 iova; 468 + 469 + self->fd = open("/dev/iommu", O_RDWR); 470 + if (self->fd == -1) 471 + return -1; 472 + 473 + if (_test_ioctl_ioas_alloc(self->fd, &ioas_id)) 474 + return -1; 475 + 476 + if (_test_ioctl_set_temp_memory_limit(self->fd, 32)) 477 + return -1; 478 + 479 + if (_test_ioctl_ioas_map(self->fd, ioas_id, buffer, BUFFER_SIZE, &iova, 480 + IOMMU_IOAS_MAP_WRITEABLE | 481 + IOMMU_IOAS_MAP_READABLE)) 482 + return -1; 483 + 484 + if (_test_cmd_create_access(self->fd, ioas_id, &self->access_id, 485 + MOCK_FLAGS_ACCESS_CREATE_NEEDS_PIN_PAGES)) 486 + return -1; 487 + 488 + fail_nth_enable(); 489 + 490 + { 491 + struct iommu_test_cmd access_cmd = { 492 + .size = sizeof(access_cmd), 493 + .op = IOMMU_TEST_OP_ACCESS_PAGES, 494 + .id = self->access_id, 495 + .access_pages = { .iova = iova, 496 + .length = BUFFER_SIZE, 497 + .uptr = (uintptr_t)buffer }, 498 + }; 499 + 500 + if (ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_RW), 501 + &access_cmd)) 502 + return -1; 503 + access_pages_id = access_cmd.access_pages.out_access_pages_id; 504 + } 505 + 506 + if (_test_cmd_destroy_access_pages(self->fd, self->access_id, 507 + access_pages_id)) 508 + return -1; 509 + 510 + if (_test_cmd_destroy_access(self->access_id)) 511 + return -1; 512 + self->access_id = 0; 513 + return 0; 514 + } 515 + 516 + /* iopt_pages_fill_xarray() */ 517 + TEST_FAIL_NTH(basic_fail_nth, access_pin_domain) 518 + { 519 + uint32_t access_pages_id; 520 + uint32_t ioas_id; 521 + __u32 device_id; 522 + __u32 hwpt_id; 523 + __u64 iova; 524 + 525 + self->fd = open("/dev/iommu", O_RDWR); 526 + if (self->fd == -1) 527 + return -1; 528 + 529 + if (_test_ioctl_ioas_alloc(self->fd, &ioas_id)) 530 + return -1; 531 + 532 + if (_test_ioctl_set_temp_memory_limit(self->fd, 32)) 533 + return -1; 534 + 535 + if (_test_cmd_mock_domain(self->fd, ioas_id, &device_id, &hwpt_id)) 536 + return -1; 537 + 538 + if (_test_ioctl_ioas_map(self->fd, ioas_id, buffer, BUFFER_SIZE, &iova, 539 + IOMMU_IOAS_MAP_WRITEABLE | 540 + IOMMU_IOAS_MAP_READABLE)) 541 + return -1; 542 + 543 + if (_test_cmd_create_access(self->fd, ioas_id, &self->access_id, 544 + MOCK_FLAGS_ACCESS_CREATE_NEEDS_PIN_PAGES)) 545 + return -1; 546 + 547 + fail_nth_enable(); 548 + 549 + { 550 + struct iommu_test_cmd access_cmd = { 551 + .size = sizeof(access_cmd), 552 + .op = IOMMU_TEST_OP_ACCESS_PAGES, 553 + .id = self->access_id, 554 + .access_pages = { .iova = iova, 555 + .length = BUFFER_SIZE, 556 + .uptr = (uintptr_t)buffer }, 557 + }; 558 + 559 + if (ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_RW), 560 + &access_cmd)) 561 + return -1; 562 + access_pages_id = access_cmd.access_pages.out_access_pages_id; 563 + } 564 + 565 + if (_test_cmd_destroy_access_pages(self->fd, self->access_id, 566 + access_pages_id)) 567 + return -1; 568 + 569 + if (_test_cmd_destroy_access(self->access_id)) 570 + return -1; 571 + self->access_id = 0; 572 + 573 + if (_test_ioctl_destroy(self->fd, device_id)) 574 + return -1; 575 + if (_test_ioctl_destroy(self->fd, hwpt_id)) 576 + return -1; 577 + return 0; 578 + } 579 + 580 + TEST_HARNESS_MAIN
+278
tools/testing/selftests/iommu/iommufd_utils.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0-only */ 2 + /* Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES */ 3 + #ifndef __SELFTEST_IOMMUFD_UTILS 4 + #define __SELFTEST_IOMMUFD_UTILS 5 + 6 + #include <unistd.h> 7 + #include <stddef.h> 8 + #include <sys/fcntl.h> 9 + #include <sys/ioctl.h> 10 + #include <stdint.h> 11 + #include <assert.h> 12 + 13 + #include "../kselftest_harness.h" 14 + #include "../../../../drivers/iommu/iommufd/iommufd_test.h" 15 + 16 + /* Hack to make assertions more readable */ 17 + #define _IOMMU_TEST_CMD(x) IOMMU_TEST_CMD 18 + 19 + static void *buffer; 20 + static unsigned long BUFFER_SIZE; 21 + 22 + /* 23 + * Have the kernel check the refcount on pages. I don't know why a freshly 24 + * mmap'd anon non-compound page starts out with a ref of 3 25 + */ 26 + #define check_refs(_ptr, _length, _refs) \ 27 + ({ \ 28 + struct iommu_test_cmd test_cmd = { \ 29 + .size = sizeof(test_cmd), \ 30 + .op = IOMMU_TEST_OP_MD_CHECK_REFS, \ 31 + .check_refs = { .length = _length, \ 32 + .uptr = (uintptr_t)(_ptr), \ 33 + .refs = _refs }, \ 34 + }; \ 35 + ASSERT_EQ(0, \ 36 + ioctl(self->fd, \ 37 + _IOMMU_TEST_CMD(IOMMU_TEST_OP_MD_CHECK_REFS), \ 38 + &test_cmd)); \ 39 + }) 40 + 41 + static int _test_cmd_mock_domain(int fd, unsigned int ioas_id, __u32 *device_id, 42 + __u32 *hwpt_id) 43 + { 44 + struct iommu_test_cmd cmd = { 45 + .size = sizeof(cmd), 46 + .op = IOMMU_TEST_OP_MOCK_DOMAIN, 47 + .id = ioas_id, 48 + .mock_domain = {}, 49 + }; 50 + int ret; 51 + 52 + ret = ioctl(fd, IOMMU_TEST_CMD, &cmd); 53 + if (ret) 54 + return ret; 55 + if (device_id) 56 + *device_id = cmd.mock_domain.out_device_id; 57 + assert(cmd.id != 0); 58 + if (hwpt_id) 59 + *hwpt_id = cmd.mock_domain.out_hwpt_id; 60 + return 0; 61 + } 62 + #define test_cmd_mock_domain(ioas_id, device_id, hwpt_id) \ 63 + ASSERT_EQ(0, _test_cmd_mock_domain(self->fd, ioas_id, device_id, \ 64 + hwpt_id)) 65 + #define test_err_mock_domain(_errno, ioas_id, device_id, hwpt_id) \ 66 + EXPECT_ERRNO(_errno, _test_cmd_mock_domain(self->fd, ioas_id, \ 67 + device_id, hwpt_id)) 68 + 69 + static int _test_cmd_create_access(int fd, unsigned int ioas_id, 70 + __u32 *access_id, unsigned int flags) 71 + { 72 + struct iommu_test_cmd cmd = { 73 + .size = sizeof(cmd), 74 + .op = IOMMU_TEST_OP_CREATE_ACCESS, 75 + .id = ioas_id, 76 + .create_access = { .flags = flags }, 77 + }; 78 + int ret; 79 + 80 + ret = ioctl(fd, IOMMU_TEST_CMD, &cmd); 81 + if (ret) 82 + return ret; 83 + *access_id = cmd.create_access.out_access_fd; 84 + return 0; 85 + } 86 + #define test_cmd_create_access(ioas_id, access_id, flags) \ 87 + ASSERT_EQ(0, _test_cmd_create_access(self->fd, ioas_id, access_id, \ 88 + flags)) 89 + 90 + static int _test_cmd_destroy_access(unsigned int access_id) 91 + { 92 + return close(access_id); 93 + } 94 + #define test_cmd_destroy_access(access_id) \ 95 + ASSERT_EQ(0, _test_cmd_destroy_access(access_id)) 96 + 97 + static int _test_cmd_destroy_access_pages(int fd, unsigned int access_id, 98 + unsigned int access_pages_id) 99 + { 100 + struct iommu_test_cmd cmd = { 101 + .size = sizeof(cmd), 102 + .op = IOMMU_TEST_OP_DESTROY_ACCESS_PAGES, 103 + .id = access_id, 104 + .destroy_access_pages = { .access_pages_id = access_pages_id }, 105 + }; 106 + return ioctl(fd, IOMMU_TEST_CMD, &cmd); 107 + } 108 + #define test_cmd_destroy_access_pages(access_id, access_pages_id) \ 109 + ASSERT_EQ(0, _test_cmd_destroy_access_pages(self->fd, access_id, \ 110 + access_pages_id)) 111 + #define test_err_destroy_access_pages(_errno, access_id, access_pages_id) \ 112 + EXPECT_ERRNO(_errno, _test_cmd_destroy_access_pages( \ 113 + self->fd, access_id, access_pages_id)) 114 + 115 + static int _test_ioctl_destroy(int fd, unsigned int id) 116 + { 117 + struct iommu_destroy cmd = { 118 + .size = sizeof(cmd), 119 + .id = id, 120 + }; 121 + return ioctl(fd, IOMMU_DESTROY, &cmd); 122 + } 123 + #define test_ioctl_destroy(id) ASSERT_EQ(0, _test_ioctl_destroy(self->fd, id)) 124 + 125 + static int _test_ioctl_ioas_alloc(int fd, __u32 *id) 126 + { 127 + struct iommu_ioas_alloc cmd = { 128 + .size = sizeof(cmd), 129 + }; 130 + int ret; 131 + 132 + ret = ioctl(fd, IOMMU_IOAS_ALLOC, &cmd); 133 + if (ret) 134 + return ret; 135 + *id = cmd.out_ioas_id; 136 + return 0; 137 + } 138 + #define test_ioctl_ioas_alloc(id) \ 139 + ({ \ 140 + ASSERT_EQ(0, _test_ioctl_ioas_alloc(self->fd, id)); \ 141 + ASSERT_NE(0, *(id)); \ 142 + }) 143 + 144 + static int _test_ioctl_ioas_map(int fd, unsigned int ioas_id, void *buffer, 145 + size_t length, __u64 *iova, unsigned int flags) 146 + { 147 + struct iommu_ioas_map cmd = { 148 + .size = sizeof(cmd), 149 + .flags = flags, 150 + .ioas_id = ioas_id, 151 + .user_va = (uintptr_t)buffer, 152 + .length = length, 153 + }; 154 + int ret; 155 + 156 + if (flags & IOMMU_IOAS_MAP_FIXED_IOVA) 157 + cmd.iova = *iova; 158 + 159 + ret = ioctl(fd, IOMMU_IOAS_MAP, &cmd); 160 + *iova = cmd.iova; 161 + return ret; 162 + } 163 + #define test_ioctl_ioas_map(buffer, length, iova_p) \ 164 + ASSERT_EQ(0, _test_ioctl_ioas_map(self->fd, self->ioas_id, buffer, \ 165 + length, iova_p, \ 166 + IOMMU_IOAS_MAP_WRITEABLE | \ 167 + IOMMU_IOAS_MAP_READABLE)) 168 + 169 + #define test_err_ioctl_ioas_map(_errno, buffer, length, iova_p) \ 170 + EXPECT_ERRNO(_errno, \ 171 + _test_ioctl_ioas_map(self->fd, self->ioas_id, buffer, \ 172 + length, iova_p, \ 173 + IOMMU_IOAS_MAP_WRITEABLE | \ 174 + IOMMU_IOAS_MAP_READABLE)) 175 + 176 + #define test_ioctl_ioas_map_id(ioas_id, buffer, length, iova_p) \ 177 + ASSERT_EQ(0, _test_ioctl_ioas_map(self->fd, ioas_id, buffer, length, \ 178 + iova_p, \ 179 + IOMMU_IOAS_MAP_WRITEABLE | \ 180 + IOMMU_IOAS_MAP_READABLE)) 181 + 182 + #define test_ioctl_ioas_map_fixed(buffer, length, iova) \ 183 + ({ \ 184 + __u64 __iova = iova; \ 185 + ASSERT_EQ(0, _test_ioctl_ioas_map( \ 186 + self->fd, self->ioas_id, buffer, length, \ 187 + &__iova, \ 188 + IOMMU_IOAS_MAP_FIXED_IOVA | \ 189 + IOMMU_IOAS_MAP_WRITEABLE | \ 190 + IOMMU_IOAS_MAP_READABLE)); \ 191 + }) 192 + 193 + #define test_err_ioctl_ioas_map_fixed(_errno, buffer, length, iova) \ 194 + ({ \ 195 + __u64 __iova = iova; \ 196 + EXPECT_ERRNO(_errno, \ 197 + _test_ioctl_ioas_map( \ 198 + self->fd, self->ioas_id, buffer, length, \ 199 + &__iova, \ 200 + IOMMU_IOAS_MAP_FIXED_IOVA | \ 201 + IOMMU_IOAS_MAP_WRITEABLE | \ 202 + IOMMU_IOAS_MAP_READABLE)); \ 203 + }) 204 + 205 + static int _test_ioctl_ioas_unmap(int fd, unsigned int ioas_id, uint64_t iova, 206 + size_t length, uint64_t *out_len) 207 + { 208 + struct iommu_ioas_unmap cmd = { 209 + .size = sizeof(cmd), 210 + .ioas_id = ioas_id, 211 + .iova = iova, 212 + .length = length, 213 + }; 214 + int ret; 215 + 216 + ret = ioctl(fd, IOMMU_IOAS_UNMAP, &cmd); 217 + if (out_len) 218 + *out_len = cmd.length; 219 + return ret; 220 + } 221 + #define test_ioctl_ioas_unmap(iova, length) \ 222 + ASSERT_EQ(0, _test_ioctl_ioas_unmap(self->fd, self->ioas_id, iova, \ 223 + length, NULL)) 224 + 225 + #define test_ioctl_ioas_unmap_id(ioas_id, iova, length) \ 226 + ASSERT_EQ(0, _test_ioctl_ioas_unmap(self->fd, ioas_id, iova, length, \ 227 + NULL)) 228 + 229 + #define test_err_ioctl_ioas_unmap(_errno, iova, length) \ 230 + EXPECT_ERRNO(_errno, _test_ioctl_ioas_unmap(self->fd, self->ioas_id, \ 231 + iova, length, NULL)) 232 + 233 + static int _test_ioctl_set_temp_memory_limit(int fd, unsigned int limit) 234 + { 235 + struct iommu_test_cmd memlimit_cmd = { 236 + .size = sizeof(memlimit_cmd), 237 + .op = IOMMU_TEST_OP_SET_TEMP_MEMORY_LIMIT, 238 + .memory_limit = { .limit = limit }, 239 + }; 240 + 241 + return ioctl(fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_SET_TEMP_MEMORY_LIMIT), 242 + &memlimit_cmd); 243 + } 244 + 245 + #define test_ioctl_set_temp_memory_limit(limit) \ 246 + ASSERT_EQ(0, _test_ioctl_set_temp_memory_limit(self->fd, limit)) 247 + 248 + #define test_ioctl_set_default_memory_limit() \ 249 + test_ioctl_set_temp_memory_limit(65536) 250 + 251 + static void teardown_iommufd(int fd, struct __test_metadata *_metadata) 252 + { 253 + struct iommu_test_cmd test_cmd = { 254 + .size = sizeof(test_cmd), 255 + .op = IOMMU_TEST_OP_MD_CHECK_REFS, 256 + .check_refs = { .length = BUFFER_SIZE, 257 + .uptr = (uintptr_t)buffer }, 258 + }; 259 + 260 + if (fd == -1) 261 + return; 262 + 263 + EXPECT_EQ(0, close(fd)); 264 + 265 + fd = open("/dev/iommu", O_RDWR); 266 + EXPECT_NE(-1, fd); 267 + EXPECT_EQ(0, ioctl(fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_MD_CHECK_REFS), 268 + &test_cmd)); 269 + EXPECT_EQ(0, close(fd)); 270 + } 271 + 272 + #define EXPECT_ERRNO(expected_errno, cmd) \ 273 + ({ \ 274 + ASSERT_EQ(-1, cmd); \ 275 + EXPECT_EQ(expected_errno, errno); \ 276 + }) 277 + 278 + #endif