1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES */
5 #include <sys/eventfd.h>
7 #define __EXPORTED_HEADERS__
8 #include <linux/vfio.h>
10 #include "iommufd_utils.h"
12 static unsigned long HUGEPAGE_SIZE;
14 #define MOCK_PAGE_SIZE (PAGE_SIZE / 2)
16 static unsigned long get_huge_page_size(void)
22 fd = open("/sys/kernel/mm/transparent_hugepage/hpage_pmd_size",
25 return 2 * 1024 * 1024;
27 ret = read(fd, buf, sizeof(buf));
29 if (ret <= 0 || ret == sizeof(buf))
30 return 2 * 1024 * 1024;
32 return strtoul(buf, NULL, 10);
35 static __attribute__((constructor)) void setup_sizes(void)
40 PAGE_SIZE = sysconf(_SC_PAGE_SIZE);
41 HUGEPAGE_SIZE = get_huge_page_size();
43 BUFFER_SIZE = PAGE_SIZE * 16;
44 rc = posix_memalign(&buffer, HUGEPAGE_SIZE, BUFFER_SIZE);
47 assert((uintptr_t)buffer % HUGEPAGE_SIZE == 0);
48 vrc = mmap(buffer, BUFFER_SIZE, PROT_READ | PROT_WRITE,
49 MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
50 assert(vrc == buffer);
58 FIXTURE_SETUP(iommufd)
60 self->fd = open("/dev/iommu", O_RDWR);
61 ASSERT_NE(-1, self->fd);
64 FIXTURE_TEARDOWN(iommufd)
66 teardown_iommufd(self->fd, _metadata);
69 TEST_F(iommufd, simple_close)
73 TEST_F(iommufd, cmd_fail)
75 struct iommu_destroy cmd = { .size = sizeof(cmd), .id = 0 };
77 /* object id is invalid */
78 EXPECT_ERRNO(ENOENT, _test_ioctl_destroy(self->fd, 0));
80 EXPECT_ERRNO(EFAULT, ioctl(self->fd, IOMMU_DESTROY, NULL));
83 ioctl(self->fd, _IO(IOMMUFD_TYPE, IOMMUFD_CMD_BASE - 1),
87 TEST_F(iommufd, cmd_length)
89 #define TEST_LENGTH(_struct, _ioctl, _last) \
91 size_t min_size = offsetofend(struct _struct, _last); \
95 } cmd = { .cmd = { .size = min_size - 1 }, \
96 .extra = UINT8_MAX }; \
100 EXPECT_ERRNO(EINVAL, ioctl(self->fd, _ioctl, &cmd)); \
101 cmd.cmd.size = sizeof(struct _struct) + 1; \
102 EXPECT_ERRNO(E2BIG, ioctl(self->fd, _ioctl, &cmd)); \
103 cmd.cmd.size = sizeof(struct _struct); \
104 rc = ioctl(self->fd, _ioctl, &cmd); \
106 cmd.cmd.size = sizeof(struct _struct) + 1; \
109 EXPECT_ERRNO(old_errno, \
110 ioctl(self->fd, _ioctl, &cmd)); \
112 ASSERT_EQ(0, ioctl(self->fd, _ioctl, &cmd)); \
116 TEST_LENGTH(iommu_destroy, IOMMU_DESTROY, id);
117 TEST_LENGTH(iommu_hw_info, IOMMU_GET_HW_INFO, __reserved);
118 TEST_LENGTH(iommu_hwpt_alloc, IOMMU_HWPT_ALLOC, __reserved);
119 TEST_LENGTH(iommu_hwpt_invalidate, IOMMU_HWPT_INVALIDATE, __reserved);
120 TEST_LENGTH(iommu_ioas_alloc, IOMMU_IOAS_ALLOC, out_ioas_id);
121 TEST_LENGTH(iommu_ioas_iova_ranges, IOMMU_IOAS_IOVA_RANGES,
123 TEST_LENGTH(iommu_ioas_allow_iovas, IOMMU_IOAS_ALLOW_IOVAS,
125 TEST_LENGTH(iommu_ioas_map, IOMMU_IOAS_MAP, iova);
126 TEST_LENGTH(iommu_ioas_copy, IOMMU_IOAS_COPY, src_iova);
127 TEST_LENGTH(iommu_ioas_unmap, IOMMU_IOAS_UNMAP, length);
128 TEST_LENGTH(iommu_option, IOMMU_OPTION, val64);
129 TEST_LENGTH(iommu_vfio_ioas, IOMMU_VFIO_IOAS, __reserved);
133 TEST_F(iommufd, cmd_ex_fail)
136 struct iommu_destroy cmd;
138 } cmd = { .cmd = { .size = sizeof(cmd), .id = 0 } };
140 /* object id is invalid and command is longer */
141 EXPECT_ERRNO(ENOENT, ioctl(self->fd, IOMMU_DESTROY, &cmd));
142 /* future area is non-zero */
144 EXPECT_ERRNO(E2BIG, ioctl(self->fd, IOMMU_DESTROY, &cmd));
145 /* Original command "works" */
146 cmd.cmd.size = sizeof(cmd.cmd);
147 EXPECT_ERRNO(ENOENT, ioctl(self->fd, IOMMU_DESTROY, &cmd));
148 /* Short command fails */
149 cmd.cmd.size = sizeof(cmd.cmd) - 1;
150 EXPECT_ERRNO(EINVAL, ioctl(self->fd, IOMMU_DESTROY, &cmd));
153 TEST_F(iommufd, global_options)
155 struct iommu_option cmd = {
157 .option_id = IOMMU_OPTION_RLIMIT_MODE,
158 .op = IOMMU_OPTION_OP_GET,
162 cmd.option_id = IOMMU_OPTION_RLIMIT_MODE;
163 ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
164 ASSERT_EQ(0, cmd.val64);
166 /* This requires root */
167 cmd.op = IOMMU_OPTION_OP_SET;
169 ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
171 EXPECT_ERRNO(EINVAL, ioctl(self->fd, IOMMU_OPTION, &cmd));
173 cmd.op = IOMMU_OPTION_OP_GET;
174 ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
175 ASSERT_EQ(1, cmd.val64);
177 cmd.op = IOMMU_OPTION_OP_SET;
179 ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
181 cmd.op = IOMMU_OPTION_OP_GET;
182 cmd.option_id = IOMMU_OPTION_HUGE_PAGES;
183 EXPECT_ERRNO(ENOENT, ioctl(self->fd, IOMMU_OPTION, &cmd));
184 cmd.op = IOMMU_OPTION_OP_SET;
185 EXPECT_ERRNO(ENOENT, ioctl(self->fd, IOMMU_OPTION, &cmd));
188 FIXTURE(iommufd_ioas)
198 FIXTURE_VARIANT(iommufd_ioas)
200 unsigned int mock_domains;
201 unsigned int memory_limit;
204 FIXTURE_SETUP(iommufd_ioas)
209 self->fd = open("/dev/iommu", O_RDWR);
210 ASSERT_NE(-1, self->fd);
211 test_ioctl_ioas_alloc(&self->ioas_id);
213 if (!variant->memory_limit) {
214 test_ioctl_set_default_memory_limit();
216 test_ioctl_set_temp_memory_limit(variant->memory_limit);
219 for (i = 0; i != variant->mock_domains; i++) {
220 test_cmd_mock_domain(self->ioas_id, &self->stdev_id,
221 &self->hwpt_id, &self->device_id);
222 self->base_iova = MOCK_APERTURE_START;
226 FIXTURE_TEARDOWN(iommufd_ioas)
228 test_ioctl_set_default_memory_limit();
229 teardown_iommufd(self->fd, _metadata);
232 FIXTURE_VARIANT_ADD(iommufd_ioas, no_domain)
236 FIXTURE_VARIANT_ADD(iommufd_ioas, mock_domain)
241 FIXTURE_VARIANT_ADD(iommufd_ioas, two_mock_domain)
246 FIXTURE_VARIANT_ADD(iommufd_ioas, mock_domain_limit)
252 TEST_F(iommufd_ioas, ioas_auto_destroy)
256 TEST_F(iommufd_ioas, ioas_destroy)
258 if (self->stdev_id) {
259 /* IOAS cannot be freed while a device has a HWPT using it */
261 _test_ioctl_destroy(self->fd, self->ioas_id));
263 /* Can allocate and manually free an IOAS table */
264 test_ioctl_destroy(self->ioas_id);
268 TEST_F(iommufd_ioas, alloc_hwpt_nested)
270 const uint32_t min_data_len =
271 offsetofend(struct iommu_hwpt_selftest, iotlb);
272 struct iommu_hwpt_selftest data = {
273 .iotlb = IOMMU_TEST_IOTLB_DEFAULT,
275 struct iommu_hwpt_invalidate_selftest inv_reqs[2] = {};
276 uint32_t nested_hwpt_id[2] = {};
278 uint32_t parent_hwpt_id = 0;
279 uint32_t parent_hwpt_id_not_work = 0;
280 uint32_t test_hwpt_id = 0;
282 if (self->device_id) {
284 test_err_hwpt_alloc(ENOENT, self->ioas_id, self->device_id, 0,
286 test_err_hwpt_alloc(EINVAL, self->device_id, self->device_id, 0,
289 test_cmd_hwpt_alloc(self->device_id, self->ioas_id,
290 IOMMU_HWPT_ALLOC_NEST_PARENT,
293 test_cmd_hwpt_alloc(self->device_id, self->ioas_id, 0,
294 &parent_hwpt_id_not_work);
296 /* Negative nested tests */
297 test_err_hwpt_alloc_nested(EINVAL, self->device_id,
300 IOMMU_HWPT_DATA_NONE, &data,
302 test_err_hwpt_alloc_nested(EOPNOTSUPP, self->device_id,
305 IOMMU_HWPT_DATA_SELFTEST + 1, &data,
307 test_err_hwpt_alloc_nested(EINVAL, self->device_id,
310 IOMMU_HWPT_DATA_SELFTEST, &data,
312 test_err_hwpt_alloc_nested(EFAULT, self->device_id,
315 IOMMU_HWPT_DATA_SELFTEST, NULL,
317 test_err_hwpt_alloc_nested(
318 EOPNOTSUPP, self->device_id, parent_hwpt_id,
319 IOMMU_HWPT_ALLOC_NEST_PARENT, &nested_hwpt_id[0],
320 IOMMU_HWPT_DATA_SELFTEST, &data, sizeof(data));
321 test_err_hwpt_alloc_nested(EINVAL, self->device_id,
322 parent_hwpt_id_not_work, 0,
324 IOMMU_HWPT_DATA_SELFTEST, &data,
327 /* Allocate two nested hwpts sharing one common parent hwpt */
328 test_cmd_hwpt_alloc_nested(self->device_id, parent_hwpt_id, 0,
330 IOMMU_HWPT_DATA_SELFTEST, &data,
332 test_cmd_hwpt_alloc_nested(self->device_id, parent_hwpt_id, 0,
334 IOMMU_HWPT_DATA_SELFTEST, &data,
336 test_cmd_hwpt_check_iotlb_all(nested_hwpt_id[0],
337 IOMMU_TEST_IOTLB_DEFAULT);
338 test_cmd_hwpt_check_iotlb_all(nested_hwpt_id[1],
339 IOMMU_TEST_IOTLB_DEFAULT);
341 /* Negative test: a nested hwpt on top of a nested hwpt */
342 test_err_hwpt_alloc_nested(EINVAL, self->device_id,
343 nested_hwpt_id[0], 0, &test_hwpt_id,
344 IOMMU_HWPT_DATA_SELFTEST, &data,
346 /* Negative test: parent hwpt now cannot be freed */
348 _test_ioctl_destroy(self->fd, parent_hwpt_id));
350 /* hwpt_invalidate only supports a user-managed hwpt (nested) */
352 test_err_hwpt_invalidate(ENOENT, parent_hwpt_id, inv_reqs,
353 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
354 sizeof(*inv_reqs), &num_inv);
357 /* Check data_type by passing zero-length array */
359 test_cmd_hwpt_invalidate(nested_hwpt_id[0], inv_reqs,
360 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
361 sizeof(*inv_reqs), &num_inv);
364 /* Negative test: Invalid data_type */
366 test_err_hwpt_invalidate(EINVAL, nested_hwpt_id[0], inv_reqs,
367 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST_INVALID,
368 sizeof(*inv_reqs), &num_inv);
371 /* Negative test: structure size sanity */
373 test_err_hwpt_invalidate(EINVAL, nested_hwpt_id[0], inv_reqs,
374 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
375 sizeof(*inv_reqs) + 1, &num_inv);
379 test_err_hwpt_invalidate(EINVAL, nested_hwpt_id[0], inv_reqs,
380 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
384 /* Negative test: invalid flag is passed */
386 inv_reqs[0].flags = 0xffffffff;
387 test_err_hwpt_invalidate(EOPNOTSUPP, nested_hwpt_id[0], inv_reqs,
388 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
389 sizeof(*inv_reqs), &num_inv);
392 /* Negative test: invalid data_uptr when array is not empty */
394 inv_reqs[0].flags = 0;
395 test_err_hwpt_invalidate(EINVAL, nested_hwpt_id[0], NULL,
396 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
397 sizeof(*inv_reqs), &num_inv);
400 /* Negative test: invalid entry_len when array is not empty */
402 inv_reqs[0].flags = 0;
403 test_err_hwpt_invalidate(EINVAL, nested_hwpt_id[0], inv_reqs,
404 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
408 /* Negative test: invalid iotlb_id */
410 inv_reqs[0].flags = 0;
411 inv_reqs[0].iotlb_id = MOCK_NESTED_DOMAIN_IOTLB_ID_MAX + 1;
412 test_err_hwpt_invalidate(EINVAL, nested_hwpt_id[0], inv_reqs,
413 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
414 sizeof(*inv_reqs), &num_inv);
418 * Invalidate the 1st iotlb entry but fail the 2nd request
419 * due to invalid flags configuration in the 2nd request.
422 inv_reqs[0].flags = 0;
423 inv_reqs[0].iotlb_id = 0;
424 inv_reqs[1].flags = 0xffffffff;
425 inv_reqs[1].iotlb_id = 1;
426 test_err_hwpt_invalidate(EOPNOTSUPP, nested_hwpt_id[0], inv_reqs,
427 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
428 sizeof(*inv_reqs), &num_inv);
429 assert(num_inv == 1);
430 test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 0, 0);
431 test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 1,
432 IOMMU_TEST_IOTLB_DEFAULT);
433 test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 2,
434 IOMMU_TEST_IOTLB_DEFAULT);
435 test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 3,
436 IOMMU_TEST_IOTLB_DEFAULT);
439 * Invalidate the 1st iotlb entry but fail the 2nd request
440 * due to invalid iotlb_id configuration in the 2nd request.
443 inv_reqs[0].flags = 0;
444 inv_reqs[0].iotlb_id = 0;
445 inv_reqs[1].flags = 0;
446 inv_reqs[1].iotlb_id = MOCK_NESTED_DOMAIN_IOTLB_ID_MAX + 1;
447 test_err_hwpt_invalidate(EINVAL, nested_hwpt_id[0], inv_reqs,
448 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
449 sizeof(*inv_reqs), &num_inv);
450 assert(num_inv == 1);
451 test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 0, 0);
452 test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 1,
453 IOMMU_TEST_IOTLB_DEFAULT);
454 test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 2,
455 IOMMU_TEST_IOTLB_DEFAULT);
456 test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 3,
457 IOMMU_TEST_IOTLB_DEFAULT);
459 /* Invalidate the 2nd iotlb entry and verify */
461 inv_reqs[0].flags = 0;
462 inv_reqs[0].iotlb_id = 1;
463 test_cmd_hwpt_invalidate(nested_hwpt_id[0], inv_reqs,
464 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
465 sizeof(*inv_reqs), &num_inv);
466 assert(num_inv == 1);
467 test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 0, 0);
468 test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 1, 0);
469 test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 2,
470 IOMMU_TEST_IOTLB_DEFAULT);
471 test_cmd_hwpt_check_iotlb(nested_hwpt_id[0], 3,
472 IOMMU_TEST_IOTLB_DEFAULT);
474 /* Invalidate the 3rd and 4th iotlb entries and verify */
476 inv_reqs[0].flags = 0;
477 inv_reqs[0].iotlb_id = 2;
478 inv_reqs[1].flags = 0;
479 inv_reqs[1].iotlb_id = 3;
480 test_cmd_hwpt_invalidate(nested_hwpt_id[0], inv_reqs,
481 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
482 sizeof(*inv_reqs), &num_inv);
483 assert(num_inv == 2);
484 test_cmd_hwpt_check_iotlb_all(nested_hwpt_id[0], 0);
486 /* Invalidate all iotlb entries for nested_hwpt_id[1] and verify */
488 inv_reqs[0].flags = IOMMU_TEST_INVALIDATE_FLAG_ALL;
489 test_cmd_hwpt_invalidate(nested_hwpt_id[1], inv_reqs,
490 IOMMU_HWPT_INVALIDATE_DATA_SELFTEST,
491 sizeof(*inv_reqs), &num_inv);
492 assert(num_inv == 1);
493 test_cmd_hwpt_check_iotlb_all(nested_hwpt_id[1], 0);
495 /* Attach device to nested_hwpt_id[0] that then will be busy */
496 test_cmd_mock_domain_replace(self->stdev_id, nested_hwpt_id[0]);
498 _test_ioctl_destroy(self->fd, nested_hwpt_id[0]));
500 /* Switch from nested_hwpt_id[0] to nested_hwpt_id[1] */
501 test_cmd_mock_domain_replace(self->stdev_id, nested_hwpt_id[1]);
503 _test_ioctl_destroy(self->fd, nested_hwpt_id[1]));
504 test_ioctl_destroy(nested_hwpt_id[0]);
506 /* Detach from nested_hwpt_id[1] and destroy it */
507 test_cmd_mock_domain_replace(self->stdev_id, parent_hwpt_id);
508 test_ioctl_destroy(nested_hwpt_id[1]);
510 /* Detach from the parent hw_pagetable and destroy it */
511 test_cmd_mock_domain_replace(self->stdev_id, self->ioas_id);
512 test_ioctl_destroy(parent_hwpt_id);
513 test_ioctl_destroy(parent_hwpt_id_not_work);
515 test_err_hwpt_alloc(ENOENT, self->device_id, self->ioas_id, 0,
517 test_err_hwpt_alloc_nested(ENOENT, self->device_id,
520 IOMMU_HWPT_DATA_SELFTEST, &data,
522 test_err_hwpt_alloc_nested(ENOENT, self->device_id,
525 IOMMU_HWPT_DATA_SELFTEST, &data,
527 test_err_mock_domain_replace(ENOENT, self->stdev_id,
529 test_err_mock_domain_replace(ENOENT, self->stdev_id,
534 TEST_F(iommufd_ioas, hwpt_attach)
536 /* Create a device attached directly to a hwpt */
537 if (self->stdev_id) {
538 test_cmd_mock_domain(self->hwpt_id, NULL, NULL, NULL);
540 test_err_mock_domain(ENOENT, self->hwpt_id, NULL, NULL);
544 TEST_F(iommufd_ioas, ioas_area_destroy)
546 /* Adding an area does not change ability to destroy */
547 test_ioctl_ioas_map_fixed(buffer, PAGE_SIZE, self->base_iova);
550 _test_ioctl_destroy(self->fd, self->ioas_id));
552 test_ioctl_destroy(self->ioas_id);
555 TEST_F(iommufd_ioas, ioas_area_auto_destroy)
559 /* Can allocate and automatically free an IOAS table with many areas */
560 for (i = 0; i != 10; i++) {
561 test_ioctl_ioas_map_fixed(buffer, PAGE_SIZE,
562 self->base_iova + i * PAGE_SIZE);
566 TEST_F(iommufd_ioas, get_hw_info)
568 struct iommu_test_hw_info buffer_exact;
569 struct iommu_test_hw_info_buffer_larger {
570 struct iommu_test_hw_info info;
571 uint64_t trailing_bytes;
573 struct iommu_test_hw_info_buffer_smaller {
577 if (self->device_id) {
578 /* Provide a zero-size user_buffer */
579 test_cmd_get_hw_info(self->device_id, NULL, 0);
580 /* Provide a user_buffer with exact size */
581 test_cmd_get_hw_info(self->device_id, &buffer_exact, sizeof(buffer_exact));
583 * Provide a user_buffer with size larger than the exact size to check if
584 * kernel zero the trailing bytes.
586 test_cmd_get_hw_info(self->device_id, &buffer_larger, sizeof(buffer_larger));
588 * Provide a user_buffer with size smaller than the exact size to check if
589 * the fields within the size range still gets updated.
591 test_cmd_get_hw_info(self->device_id, &buffer_smaller, sizeof(buffer_smaller));
593 test_err_get_hw_info(ENOENT, self->device_id,
594 &buffer_exact, sizeof(buffer_exact));
595 test_err_get_hw_info(ENOENT, self->device_id,
596 &buffer_larger, sizeof(buffer_larger));
600 TEST_F(iommufd_ioas, area)
604 /* Unmap fails if nothing is mapped */
605 for (i = 0; i != 10; i++)
606 test_err_ioctl_ioas_unmap(ENOENT, i * PAGE_SIZE, PAGE_SIZE);
609 for (i = 0; i != 10; i++)
610 test_ioctl_ioas_map_fixed(buffer, PAGE_SIZE,
611 self->base_iova + i * PAGE_SIZE);
612 for (i = 0; i != 10; i++)
613 test_ioctl_ioas_unmap(self->base_iova + i * PAGE_SIZE,
617 test_ioctl_ioas_map_fixed(buffer, PAGE_SIZE * 2,
618 self->base_iova + 16 * PAGE_SIZE);
619 test_err_ioctl_ioas_unmap(ENOENT, self->base_iova + 16 * PAGE_SIZE,
621 test_err_ioctl_ioas_unmap(ENOENT, self->base_iova + 17 * PAGE_SIZE,
625 test_err_ioctl_ioas_map_fixed(EEXIST, buffer, PAGE_SIZE * 2,
626 self->base_iova + 16 * PAGE_SIZE);
627 test_err_ioctl_ioas_map_fixed(EEXIST, buffer, PAGE_SIZE,
628 self->base_iova + 16 * PAGE_SIZE);
629 test_err_ioctl_ioas_map_fixed(EEXIST, buffer, PAGE_SIZE,
630 self->base_iova + 17 * PAGE_SIZE);
631 test_err_ioctl_ioas_map_fixed(EEXIST, buffer, PAGE_SIZE * 2,
632 self->base_iova + 15 * PAGE_SIZE);
633 test_err_ioctl_ioas_map_fixed(EEXIST, buffer, PAGE_SIZE * 3,
634 self->base_iova + 15 * PAGE_SIZE);
636 /* unmap all works */
637 test_ioctl_ioas_unmap(0, UINT64_MAX);
639 /* Unmap all succeeds on an empty IOAS */
640 test_ioctl_ioas_unmap(0, UINT64_MAX);
643 TEST_F(iommufd_ioas, unmap_fully_contained_areas)
648 /* Give no_domain some space to rewind base_iova */
649 self->base_iova += 4 * PAGE_SIZE;
651 for (i = 0; i != 4; i++)
652 test_ioctl_ioas_map_fixed(buffer, 8 * PAGE_SIZE,
653 self->base_iova + i * 16 * PAGE_SIZE);
655 /* Unmap not fully contained area doesn't work */
656 test_err_ioctl_ioas_unmap(ENOENT, self->base_iova - 4 * PAGE_SIZE,
658 test_err_ioctl_ioas_unmap(ENOENT,
659 self->base_iova + 3 * 16 * PAGE_SIZE +
660 8 * PAGE_SIZE - 4 * PAGE_SIZE,
663 /* Unmap fully contained areas works */
664 ASSERT_EQ(0, _test_ioctl_ioas_unmap(self->fd, self->ioas_id,
665 self->base_iova - 4 * PAGE_SIZE,
666 3 * 16 * PAGE_SIZE + 8 * PAGE_SIZE +
669 ASSERT_EQ(32 * PAGE_SIZE, unmap_len);
672 TEST_F(iommufd_ioas, area_auto_iova)
674 struct iommu_test_cmd test_cmd = {
675 .size = sizeof(test_cmd),
676 .op = IOMMU_TEST_OP_ADD_RESERVED,
678 .add_reserved = { .start = PAGE_SIZE * 4,
679 .length = PAGE_SIZE * 100 },
681 struct iommu_iova_range ranges[1] = {};
682 struct iommu_ioas_allow_iovas allow_cmd = {
683 .size = sizeof(allow_cmd),
684 .ioas_id = self->ioas_id,
686 .allowed_iovas = (uintptr_t)ranges,
691 /* Simple 4k pages */
692 for (i = 0; i != 10; i++)
693 test_ioctl_ioas_map(buffer, PAGE_SIZE, &iovas[i]);
694 for (i = 0; i != 10; i++)
695 test_ioctl_ioas_unmap(iovas[i], PAGE_SIZE);
697 /* Kernel automatically aligns IOVAs properly */
698 for (i = 0; i != 10; i++) {
699 size_t length = PAGE_SIZE * (i + 1);
701 if (self->stdev_id) {
702 test_ioctl_ioas_map(buffer, length, &iovas[i]);
704 test_ioctl_ioas_map((void *)(1UL << 31), length,
707 EXPECT_EQ(0, iovas[i] % (1UL << (ffs(length) - 1)));
709 for (i = 0; i != 10; i++)
710 test_ioctl_ioas_unmap(iovas[i], PAGE_SIZE * (i + 1));
712 /* Avoids a reserved region */
714 ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ADD_RESERVED),
716 for (i = 0; i != 10; i++) {
717 size_t length = PAGE_SIZE * (i + 1);
719 test_ioctl_ioas_map(buffer, length, &iovas[i]);
720 EXPECT_EQ(0, iovas[i] % (1UL << (ffs(length) - 1)));
722 iovas[i] > test_cmd.add_reserved.start &&
724 test_cmd.add_reserved.start +
725 test_cmd.add_reserved.length);
727 for (i = 0; i != 10; i++)
728 test_ioctl_ioas_unmap(iovas[i], PAGE_SIZE * (i + 1));
730 /* Allowed region intersects with a reserved region */
731 ranges[0].start = PAGE_SIZE;
732 ranges[0].last = PAGE_SIZE * 600;
733 EXPECT_ERRNO(EADDRINUSE,
734 ioctl(self->fd, IOMMU_IOAS_ALLOW_IOVAS, &allow_cmd));
736 /* Allocate from an allowed region */
737 if (self->stdev_id) {
738 ranges[0].start = MOCK_APERTURE_START + PAGE_SIZE;
739 ranges[0].last = MOCK_APERTURE_START + PAGE_SIZE * 600 - 1;
741 ranges[0].start = PAGE_SIZE * 200;
742 ranges[0].last = PAGE_SIZE * 600 - 1;
744 ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_ALLOW_IOVAS, &allow_cmd));
745 for (i = 0; i != 10; i++) {
746 size_t length = PAGE_SIZE * (i + 1);
748 test_ioctl_ioas_map(buffer, length, &iovas[i]);
749 EXPECT_EQ(0, iovas[i] % (1UL << (ffs(length) - 1)));
750 EXPECT_EQ(true, iovas[i] >= ranges[0].start);
751 EXPECT_EQ(true, iovas[i] <= ranges[0].last);
752 EXPECT_EQ(true, iovas[i] + length > ranges[0].start);
753 EXPECT_EQ(true, iovas[i] + length <= ranges[0].last + 1);
755 for (i = 0; i != 10; i++)
756 test_ioctl_ioas_unmap(iovas[i], PAGE_SIZE * (i + 1));
759 TEST_F(iommufd_ioas, area_allowed)
761 struct iommu_test_cmd test_cmd = {
762 .size = sizeof(test_cmd),
763 .op = IOMMU_TEST_OP_ADD_RESERVED,
765 .add_reserved = { .start = PAGE_SIZE * 4,
766 .length = PAGE_SIZE * 100 },
768 struct iommu_iova_range ranges[1] = {};
769 struct iommu_ioas_allow_iovas allow_cmd = {
770 .size = sizeof(allow_cmd),
771 .ioas_id = self->ioas_id,
773 .allowed_iovas = (uintptr_t)ranges,
776 /* Reserved intersects an allowed */
777 allow_cmd.num_iovas = 1;
778 ranges[0].start = self->base_iova;
779 ranges[0].last = ranges[0].start + PAGE_SIZE * 600;
780 ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_ALLOW_IOVAS, &allow_cmd));
781 test_cmd.add_reserved.start = ranges[0].start + PAGE_SIZE;
782 test_cmd.add_reserved.length = PAGE_SIZE;
783 EXPECT_ERRNO(EADDRINUSE,
785 _IOMMU_TEST_CMD(IOMMU_TEST_OP_ADD_RESERVED),
787 allow_cmd.num_iovas = 0;
788 ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_ALLOW_IOVAS, &allow_cmd));
790 /* Allowed intersects a reserved */
792 ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ADD_RESERVED),
794 allow_cmd.num_iovas = 1;
795 ranges[0].start = self->base_iova;
796 ranges[0].last = ranges[0].start + PAGE_SIZE * 600;
797 EXPECT_ERRNO(EADDRINUSE,
798 ioctl(self->fd, IOMMU_IOAS_ALLOW_IOVAS, &allow_cmd));
801 TEST_F(iommufd_ioas, copy_area)
803 struct iommu_ioas_copy copy_cmd = {
804 .size = sizeof(copy_cmd),
805 .flags = IOMMU_IOAS_MAP_FIXED_IOVA,
806 .dst_ioas_id = self->ioas_id,
807 .src_ioas_id = self->ioas_id,
811 test_ioctl_ioas_map_fixed(buffer, PAGE_SIZE, self->base_iova);
813 /* Copy inside a single IOAS */
814 copy_cmd.src_iova = self->base_iova;
815 copy_cmd.dst_iova = self->base_iova + PAGE_SIZE;
816 ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_COPY, ©_cmd));
818 /* Copy between IOAS's */
819 copy_cmd.src_iova = self->base_iova;
820 copy_cmd.dst_iova = 0;
821 test_ioctl_ioas_alloc(©_cmd.dst_ioas_id);
822 ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_COPY, ©_cmd));
825 TEST_F(iommufd_ioas, iova_ranges)
827 struct iommu_test_cmd test_cmd = {
828 .size = sizeof(test_cmd),
829 .op = IOMMU_TEST_OP_ADD_RESERVED,
831 .add_reserved = { .start = PAGE_SIZE, .length = PAGE_SIZE },
833 struct iommu_iova_range *ranges = buffer;
834 struct iommu_ioas_iova_ranges ranges_cmd = {
835 .size = sizeof(ranges_cmd),
836 .ioas_id = self->ioas_id,
837 .num_iovas = BUFFER_SIZE / sizeof(*ranges),
838 .allowed_iovas = (uintptr_t)ranges,
841 /* Range can be read */
842 ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_IOVA_RANGES, &ranges_cmd));
843 EXPECT_EQ(1, ranges_cmd.num_iovas);
844 if (!self->stdev_id) {
845 EXPECT_EQ(0, ranges[0].start);
846 EXPECT_EQ(SIZE_MAX, ranges[0].last);
847 EXPECT_EQ(1, ranges_cmd.out_iova_alignment);
849 EXPECT_EQ(MOCK_APERTURE_START, ranges[0].start);
850 EXPECT_EQ(MOCK_APERTURE_LAST, ranges[0].last);
851 EXPECT_EQ(MOCK_PAGE_SIZE, ranges_cmd.out_iova_alignment);
854 /* Buffer too small */
855 memset(ranges, 0, BUFFER_SIZE);
856 ranges_cmd.num_iovas = 0;
857 EXPECT_ERRNO(EMSGSIZE,
858 ioctl(self->fd, IOMMU_IOAS_IOVA_RANGES, &ranges_cmd));
859 EXPECT_EQ(1, ranges_cmd.num_iovas);
860 EXPECT_EQ(0, ranges[0].start);
861 EXPECT_EQ(0, ranges[0].last);
865 ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ADD_RESERVED),
867 ranges_cmd.num_iovas = BUFFER_SIZE / sizeof(*ranges);
868 ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_IOVA_RANGES, &ranges_cmd));
869 if (!self->stdev_id) {
870 EXPECT_EQ(2, ranges_cmd.num_iovas);
871 EXPECT_EQ(0, ranges[0].start);
872 EXPECT_EQ(PAGE_SIZE - 1, ranges[0].last);
873 EXPECT_EQ(PAGE_SIZE * 2, ranges[1].start);
874 EXPECT_EQ(SIZE_MAX, ranges[1].last);
876 EXPECT_EQ(1, ranges_cmd.num_iovas);
877 EXPECT_EQ(MOCK_APERTURE_START, ranges[0].start);
878 EXPECT_EQ(MOCK_APERTURE_LAST, ranges[0].last);
881 /* Buffer too small */
882 memset(ranges, 0, BUFFER_SIZE);
883 ranges_cmd.num_iovas = 1;
884 if (!self->stdev_id) {
885 EXPECT_ERRNO(EMSGSIZE, ioctl(self->fd, IOMMU_IOAS_IOVA_RANGES,
887 EXPECT_EQ(2, ranges_cmd.num_iovas);
888 EXPECT_EQ(0, ranges[0].start);
889 EXPECT_EQ(PAGE_SIZE - 1, ranges[0].last);
892 ioctl(self->fd, IOMMU_IOAS_IOVA_RANGES, &ranges_cmd));
893 EXPECT_EQ(1, ranges_cmd.num_iovas);
894 EXPECT_EQ(MOCK_APERTURE_START, ranges[0].start);
895 EXPECT_EQ(MOCK_APERTURE_LAST, ranges[0].last);
897 EXPECT_EQ(0, ranges[1].start);
898 EXPECT_EQ(0, ranges[1].last);
901 TEST_F(iommufd_ioas, access_domain_destory)
903 struct iommu_test_cmd access_cmd = {
904 .size = sizeof(access_cmd),
905 .op = IOMMU_TEST_OP_ACCESS_PAGES,
906 .access_pages = { .iova = self->base_iova + PAGE_SIZE,
907 .length = PAGE_SIZE},
909 size_t buf_size = 2 * HUGEPAGE_SIZE;
912 buf = mmap(0, buf_size, PROT_READ | PROT_WRITE,
913 MAP_SHARED | MAP_ANONYMOUS | MAP_HUGETLB | MAP_POPULATE, -1,
915 ASSERT_NE(MAP_FAILED, buf);
916 test_ioctl_ioas_map_fixed(buf, buf_size, self->base_iova);
918 test_cmd_create_access(self->ioas_id, &access_cmd.id,
919 MOCK_FLAGS_ACCESS_CREATE_NEEDS_PIN_PAGES);
920 access_cmd.access_pages.uptr = (uintptr_t)buf + PAGE_SIZE;
922 ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
925 /* Causes a complicated unpin across a huge page boundary */
927 test_ioctl_destroy(self->stdev_id);
929 test_cmd_destroy_access_pages(
930 access_cmd.id, access_cmd.access_pages.out_access_pages_id);
931 test_cmd_destroy_access(access_cmd.id);
932 ASSERT_EQ(0, munmap(buf, buf_size));
935 TEST_F(iommufd_ioas, access_pin)
937 struct iommu_test_cmd access_cmd = {
938 .size = sizeof(access_cmd),
939 .op = IOMMU_TEST_OP_ACCESS_PAGES,
940 .access_pages = { .iova = MOCK_APERTURE_START,
941 .length = BUFFER_SIZE,
942 .uptr = (uintptr_t)buffer },
944 struct iommu_test_cmd check_map_cmd = {
945 .size = sizeof(check_map_cmd),
946 .op = IOMMU_TEST_OP_MD_CHECK_MAP,
947 .check_map = { .iova = MOCK_APERTURE_START,
948 .length = BUFFER_SIZE,
949 .uptr = (uintptr_t)buffer },
951 uint32_t access_pages_id;
954 test_cmd_create_access(self->ioas_id, &access_cmd.id,
955 MOCK_FLAGS_ACCESS_CREATE_NEEDS_PIN_PAGES);
957 for (npages = 1; npages < BUFFER_SIZE / PAGE_SIZE; npages++) {
958 uint32_t mock_stdev_id;
959 uint32_t mock_hwpt_id;
961 access_cmd.access_pages.length = npages * PAGE_SIZE;
963 /* Single map/unmap */
964 test_ioctl_ioas_map_fixed(buffer, BUFFER_SIZE,
965 MOCK_APERTURE_START);
966 ASSERT_EQ(0, ioctl(self->fd,
967 _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
969 test_cmd_destroy_access_pages(
971 access_cmd.access_pages.out_access_pages_id);
974 ASSERT_EQ(0, ioctl(self->fd,
975 _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
977 access_pages_id = access_cmd.access_pages.out_access_pages_id;
978 ASSERT_EQ(0, ioctl(self->fd,
979 _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
981 test_cmd_destroy_access_pages(
983 access_cmd.access_pages.out_access_pages_id);
984 test_cmd_destroy_access_pages(access_cmd.id, access_pages_id);
986 /* Add/remove a domain with a user */
987 ASSERT_EQ(0, ioctl(self->fd,
988 _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
990 test_cmd_mock_domain(self->ioas_id, &mock_stdev_id,
991 &mock_hwpt_id, NULL);
992 check_map_cmd.id = mock_hwpt_id;
993 ASSERT_EQ(0, ioctl(self->fd,
994 _IOMMU_TEST_CMD(IOMMU_TEST_OP_MD_CHECK_MAP),
997 test_ioctl_destroy(mock_stdev_id);
998 test_cmd_destroy_access_pages(
1000 access_cmd.access_pages.out_access_pages_id);
1002 test_ioctl_ioas_unmap(MOCK_APERTURE_START, BUFFER_SIZE);
1004 test_cmd_destroy_access(access_cmd.id);
1007 TEST_F(iommufd_ioas, access_pin_unmap)
1009 struct iommu_test_cmd access_pages_cmd = {
1010 .size = sizeof(access_pages_cmd),
1011 .op = IOMMU_TEST_OP_ACCESS_PAGES,
1012 .access_pages = { .iova = MOCK_APERTURE_START,
1013 .length = BUFFER_SIZE,
1014 .uptr = (uintptr_t)buffer },
1017 test_cmd_create_access(self->ioas_id, &access_pages_cmd.id,
1018 MOCK_FLAGS_ACCESS_CREATE_NEEDS_PIN_PAGES);
1019 test_ioctl_ioas_map_fixed(buffer, BUFFER_SIZE, MOCK_APERTURE_START);
1021 ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
1022 &access_pages_cmd));
1024 /* Trigger the unmap op */
1025 test_ioctl_ioas_unmap(MOCK_APERTURE_START, BUFFER_SIZE);
1027 /* kernel removed the item for us */
1028 test_err_destroy_access_pages(
1029 ENOENT, access_pages_cmd.id,
1030 access_pages_cmd.access_pages.out_access_pages_id);
1033 static void check_access_rw(struct __test_metadata *_metadata, int fd,
1034 unsigned int access_id, uint64_t iova,
1035 unsigned int def_flags)
1038 struct iommu_test_cmd access_cmd = {
1039 .size = sizeof(access_cmd),
1040 .op = IOMMU_TEST_OP_ACCESS_RW,
1042 .access_rw = { .uptr = (uintptr_t)tmp },
1044 uint16_t *buffer16 = buffer;
1048 for (i = 0; i != BUFFER_SIZE / sizeof(*buffer16); i++)
1049 buffer16[i] = rand();
1051 for (access_cmd.access_rw.iova = iova + PAGE_SIZE - 50;
1052 access_cmd.access_rw.iova < iova + PAGE_SIZE + 50;
1053 access_cmd.access_rw.iova++) {
1054 for (access_cmd.access_rw.length = 1;
1055 access_cmd.access_rw.length < sizeof(tmp);
1056 access_cmd.access_rw.length++) {
1057 access_cmd.access_rw.flags = def_flags;
1058 ASSERT_EQ(0, ioctl(fd,
1060 IOMMU_TEST_OP_ACCESS_RW),
1063 memcmp(buffer + (access_cmd.access_rw.iova -
1065 tmp, access_cmd.access_rw.length));
1067 for (i = 0; i != ARRAY_SIZE(tmp); i++)
1069 access_cmd.access_rw.flags = def_flags |
1070 MOCK_ACCESS_RW_WRITE;
1071 ASSERT_EQ(0, ioctl(fd,
1073 IOMMU_TEST_OP_ACCESS_RW),
1076 memcmp(buffer + (access_cmd.access_rw.iova -
1078 tmp, access_cmd.access_rw.length));
1082 /* Multi-page test */
1083 tmp2 = malloc(BUFFER_SIZE);
1084 ASSERT_NE(NULL, tmp2);
1085 access_cmd.access_rw.iova = iova;
1086 access_cmd.access_rw.length = BUFFER_SIZE;
1087 access_cmd.access_rw.flags = def_flags;
1088 access_cmd.access_rw.uptr = (uintptr_t)tmp2;
1089 ASSERT_EQ(0, ioctl(fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_RW),
1091 ASSERT_EQ(0, memcmp(buffer, tmp2, access_cmd.access_rw.length));
1095 TEST_F(iommufd_ioas, access_rw)
1100 test_cmd_create_access(self->ioas_id, &access_id, 0);
1101 test_ioctl_ioas_map(buffer, BUFFER_SIZE, &iova);
1102 check_access_rw(_metadata, self->fd, access_id, iova, 0);
1103 check_access_rw(_metadata, self->fd, access_id, iova,
1104 MOCK_ACCESS_RW_SLOW_PATH);
1105 test_ioctl_ioas_unmap(iova, BUFFER_SIZE);
1106 test_cmd_destroy_access(access_id);
1109 TEST_F(iommufd_ioas, access_rw_unaligned)
1114 test_cmd_create_access(self->ioas_id, &access_id, 0);
1116 /* Unaligned pages */
1117 iova = self->base_iova + MOCK_PAGE_SIZE;
1118 test_ioctl_ioas_map_fixed(buffer, BUFFER_SIZE, iova);
1119 check_access_rw(_metadata, self->fd, access_id, iova, 0);
1120 test_ioctl_ioas_unmap(iova, BUFFER_SIZE);
1121 test_cmd_destroy_access(access_id);
1124 TEST_F(iommufd_ioas, fork_gone)
1129 test_cmd_create_access(self->ioas_id, &access_id, 0);
1131 /* Create a mapping with a different mm */
1134 test_ioctl_ioas_map_fixed(buffer, BUFFER_SIZE,
1135 MOCK_APERTURE_START);
1138 ASSERT_NE(-1, child);
1139 ASSERT_EQ(child, waitpid(child, NULL, 0));
1141 if (self->stdev_id) {
1143 * If a domain already existed then everything was pinned within
1144 * the fork, so this copies from one domain to another.
1146 test_cmd_mock_domain(self->ioas_id, NULL, NULL, NULL);
1147 check_access_rw(_metadata, self->fd, access_id,
1148 MOCK_APERTURE_START, 0);
1152 * Otherwise we need to actually pin pages which can't happen
1153 * since the fork is gone.
1155 test_err_mock_domain(EFAULT, self->ioas_id, NULL, NULL);
1158 test_cmd_destroy_access(access_id);
1161 TEST_F(iommufd_ioas, fork_present)
1169 test_cmd_create_access(self->ioas_id, &access_id, 0);
1171 ASSERT_EQ(0, pipe2(pipefds, O_CLOEXEC));
1172 efd = eventfd(0, EFD_CLOEXEC);
1175 /* Create a mapping with a different mm */
1182 test_ioctl_ioas_map_fixed(buffer, BUFFER_SIZE,
1183 MOCK_APERTURE_START);
1184 if (write(efd, &one, sizeof(one)) != sizeof(one))
1186 if (read(pipefds[0], &iova, 1) != 1)
1191 ASSERT_NE(-1, child);
1192 ASSERT_EQ(8, read(efd, &tmp, sizeof(tmp)));
1194 /* Read pages from the remote process */
1195 test_cmd_mock_domain(self->ioas_id, NULL, NULL, NULL);
1196 check_access_rw(_metadata, self->fd, access_id, MOCK_APERTURE_START, 0);
1198 ASSERT_EQ(0, close(pipefds[1]));
1199 ASSERT_EQ(child, waitpid(child, NULL, 0));
1201 test_cmd_destroy_access(access_id);
1204 TEST_F(iommufd_ioas, ioas_option_huge_pages)
1206 struct iommu_option cmd = {
1207 .size = sizeof(cmd),
1208 .option_id = IOMMU_OPTION_HUGE_PAGES,
1209 .op = IOMMU_OPTION_OP_GET,
1211 .object_id = self->ioas_id,
1214 ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
1215 ASSERT_EQ(1, cmd.val64);
1217 cmd.op = IOMMU_OPTION_OP_SET;
1219 ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
1221 cmd.op = IOMMU_OPTION_OP_GET;
1223 ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
1224 ASSERT_EQ(0, cmd.val64);
1226 cmd.op = IOMMU_OPTION_OP_SET;
1228 EXPECT_ERRNO(EINVAL, ioctl(self->fd, IOMMU_OPTION, &cmd));
1230 cmd.op = IOMMU_OPTION_OP_SET;
1232 ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
1235 TEST_F(iommufd_ioas, ioas_iova_alloc)
1237 unsigned int length;
1240 for (length = 1; length != PAGE_SIZE * 2; length++) {
1241 if (variant->mock_domains && (length % MOCK_PAGE_SIZE)) {
1242 test_err_ioctl_ioas_map(EINVAL, buffer, length, &iova);
1244 test_ioctl_ioas_map(buffer, length, &iova);
1245 test_ioctl_ioas_unmap(iova, length);
1250 TEST_F(iommufd_ioas, ioas_align_change)
1252 struct iommu_option cmd = {
1253 .size = sizeof(cmd),
1254 .option_id = IOMMU_OPTION_HUGE_PAGES,
1255 .op = IOMMU_OPTION_OP_SET,
1256 .object_id = self->ioas_id,
1257 /* 0 means everything must be aligned to PAGE_SIZE */
1262 * We cannot upgrade the alignment using OPTION_HUGE_PAGES when a domain
1263 * and map are present.
1265 if (variant->mock_domains)
1269 * We can upgrade to PAGE_SIZE alignment when things are aligned right
1271 test_ioctl_ioas_map_fixed(buffer, PAGE_SIZE, MOCK_APERTURE_START);
1272 ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
1274 /* Misalignment is rejected at map time */
1275 test_err_ioctl_ioas_map_fixed(EINVAL, buffer + MOCK_PAGE_SIZE,
1277 MOCK_APERTURE_START + PAGE_SIZE);
1278 ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
1280 /* Reduce alignment */
1282 ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
1284 /* Confirm misalignment is rejected during alignment upgrade */
1285 test_ioctl_ioas_map_fixed(buffer + MOCK_PAGE_SIZE, PAGE_SIZE,
1286 MOCK_APERTURE_START + PAGE_SIZE);
1288 EXPECT_ERRNO(EADDRINUSE, ioctl(self->fd, IOMMU_OPTION, &cmd));
1290 test_ioctl_ioas_unmap(MOCK_APERTURE_START + PAGE_SIZE, PAGE_SIZE);
1291 test_ioctl_ioas_unmap(MOCK_APERTURE_START, PAGE_SIZE);
1294 TEST_F(iommufd_ioas, copy_sweep)
1296 struct iommu_ioas_copy copy_cmd = {
1297 .size = sizeof(copy_cmd),
1298 .flags = IOMMU_IOAS_MAP_FIXED_IOVA,
1299 .src_ioas_id = self->ioas_id,
1300 .dst_iova = MOCK_APERTURE_START,
1301 .length = MOCK_PAGE_SIZE,
1303 unsigned int dst_ioas_id;
1307 test_ioctl_ioas_alloc(&dst_ioas_id);
1308 copy_cmd.dst_ioas_id = dst_ioas_id;
1310 if (variant->mock_domains)
1311 last_iova = MOCK_APERTURE_START + BUFFER_SIZE - 1;
1313 last_iova = MOCK_APERTURE_START + BUFFER_SIZE - 2;
1315 test_ioctl_ioas_map_fixed(buffer, last_iova - MOCK_APERTURE_START + 1,
1316 MOCK_APERTURE_START);
1318 for (iova = MOCK_APERTURE_START - PAGE_SIZE; iova <= last_iova;
1320 copy_cmd.src_iova = iova;
1321 if (iova < MOCK_APERTURE_START ||
1322 iova + copy_cmd.length - 1 > last_iova) {
1323 EXPECT_ERRNO(ENOENT, ioctl(self->fd, IOMMU_IOAS_COPY,
1327 ioctl(self->fd, IOMMU_IOAS_COPY, ©_cmd));
1328 test_ioctl_ioas_unmap_id(dst_ioas_id, copy_cmd.dst_iova,
1333 test_ioctl_destroy(dst_ioas_id);
1336 FIXTURE(iommufd_mock_domain)
1341 uint32_t hwpt_ids[2];
1342 uint32_t stdev_ids[2];
1343 uint32_t idev_ids[2];
1345 size_t mmap_buf_size;
1348 FIXTURE_VARIANT(iommufd_mock_domain)
1350 unsigned int mock_domains;
1354 FIXTURE_SETUP(iommufd_mock_domain)
1358 self->fd = open("/dev/iommu", O_RDWR);
1359 ASSERT_NE(-1, self->fd);
1360 test_ioctl_ioas_alloc(&self->ioas_id);
1362 ASSERT_GE(ARRAY_SIZE(self->hwpt_ids), variant->mock_domains);
1364 for (i = 0; i != variant->mock_domains; i++)
1365 test_cmd_mock_domain(self->ioas_id, &self->stdev_ids[i],
1366 &self->hwpt_ids[i], &self->idev_ids[i]);
1367 self->hwpt_id = self->hwpt_ids[0];
1369 self->mmap_flags = MAP_SHARED | MAP_ANONYMOUS;
1370 self->mmap_buf_size = PAGE_SIZE * 8;
1371 if (variant->hugepages) {
1373 * MAP_POPULATE will cause the kernel to fail mmap if THPs are
1376 self->mmap_flags |= MAP_HUGETLB | MAP_POPULATE;
1377 self->mmap_buf_size = HUGEPAGE_SIZE * 2;
1381 FIXTURE_TEARDOWN(iommufd_mock_domain)
1383 teardown_iommufd(self->fd, _metadata);
1386 FIXTURE_VARIANT_ADD(iommufd_mock_domain, one_domain)
1392 FIXTURE_VARIANT_ADD(iommufd_mock_domain, two_domains)
1398 FIXTURE_VARIANT_ADD(iommufd_mock_domain, one_domain_hugepage)
1404 FIXTURE_VARIANT_ADD(iommufd_mock_domain, two_domains_hugepage)
1410 /* Have the kernel check that the user pages made it to the iommu_domain */
1411 #define check_mock_iova(_ptr, _iova, _length) \
1413 struct iommu_test_cmd check_map_cmd = { \
1414 .size = sizeof(check_map_cmd), \
1415 .op = IOMMU_TEST_OP_MD_CHECK_MAP, \
1416 .id = self->hwpt_id, \
1417 .check_map = { .iova = _iova, \
1418 .length = _length, \
1419 .uptr = (uintptr_t)(_ptr) }, \
1423 _IOMMU_TEST_CMD(IOMMU_TEST_OP_MD_CHECK_MAP), \
1425 if (self->hwpt_ids[1]) { \
1426 check_map_cmd.id = self->hwpt_ids[1]; \
1430 IOMMU_TEST_OP_MD_CHECK_MAP), \
1435 TEST_F(iommufd_mock_domain, basic)
1437 size_t buf_size = self->mmap_buf_size;
1441 /* Simple one page map */
1442 test_ioctl_ioas_map(buffer, PAGE_SIZE, &iova);
1443 check_mock_iova(buffer, iova, PAGE_SIZE);
1445 buf = mmap(0, buf_size, PROT_READ | PROT_WRITE, self->mmap_flags, -1,
1447 ASSERT_NE(MAP_FAILED, buf);
1449 /* EFAULT half way through mapping */
1450 ASSERT_EQ(0, munmap(buf + buf_size / 2, buf_size / 2));
1451 test_err_ioctl_ioas_map(EFAULT, buf, buf_size, &iova);
1453 /* EFAULT on first page */
1454 ASSERT_EQ(0, munmap(buf, buf_size / 2));
1455 test_err_ioctl_ioas_map(EFAULT, buf, buf_size, &iova);
1458 TEST_F(iommufd_mock_domain, ro_unshare)
1464 fd = open("/proc/self/exe", O_RDONLY);
1467 buf = mmap(0, PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
1468 ASSERT_NE(MAP_FAILED, buf);
1472 * There have been lots of changes to the "unshare" mechanism in
1473 * get_user_pages(), make sure it works right. The write to the page
1474 * after we map it for reading should not change the assigned PFN.
1477 _test_ioctl_ioas_map(self->fd, self->ioas_id, buf, PAGE_SIZE,
1478 &iova, IOMMU_IOAS_MAP_READABLE));
1479 check_mock_iova(buf, iova, PAGE_SIZE);
1480 memset(buf, 1, PAGE_SIZE);
1481 check_mock_iova(buf, iova, PAGE_SIZE);
1482 ASSERT_EQ(0, munmap(buf, PAGE_SIZE));
1485 TEST_F(iommufd_mock_domain, all_aligns)
1487 size_t test_step = variant->hugepages ? (self->mmap_buf_size / 16) :
1489 size_t buf_size = self->mmap_buf_size;
1494 buf = mmap(0, buf_size, PROT_READ | PROT_WRITE, self->mmap_flags, -1,
1496 ASSERT_NE(MAP_FAILED, buf);
1497 check_refs(buf, buf_size, 0);
1500 * Map every combination of page size and alignment within a big region,
1501 * less for hugepage case as it takes so long to finish.
1503 for (start = 0; start < buf_size; start += test_step) {
1504 if (variant->hugepages)
1507 end = start + MOCK_PAGE_SIZE;
1508 for (; end < buf_size; end += MOCK_PAGE_SIZE) {
1509 size_t length = end - start;
1512 test_ioctl_ioas_map(buf + start, length, &iova);
1513 check_mock_iova(buf + start, iova, length);
1514 check_refs(buf + start / PAGE_SIZE * PAGE_SIZE,
1515 end / PAGE_SIZE * PAGE_SIZE -
1516 start / PAGE_SIZE * PAGE_SIZE,
1519 test_ioctl_ioas_unmap(iova, length);
1522 check_refs(buf, buf_size, 0);
1523 ASSERT_EQ(0, munmap(buf, buf_size));
1526 TEST_F(iommufd_mock_domain, all_aligns_copy)
1528 size_t test_step = variant->hugepages ? self->mmap_buf_size / 16 :
1530 size_t buf_size = self->mmap_buf_size;
1535 buf = mmap(0, buf_size, PROT_READ | PROT_WRITE, self->mmap_flags, -1,
1537 ASSERT_NE(MAP_FAILED, buf);
1538 check_refs(buf, buf_size, 0);
1541 * Map every combination of page size and alignment within a big region,
1542 * less for hugepage case as it takes so long to finish.
1544 for (start = 0; start < buf_size; start += test_step) {
1545 if (variant->hugepages)
1548 end = start + MOCK_PAGE_SIZE;
1549 for (; end < buf_size; end += MOCK_PAGE_SIZE) {
1550 size_t length = end - start;
1551 unsigned int old_id;
1552 uint32_t mock_stdev_id;
1555 test_ioctl_ioas_map(buf + start, length, &iova);
1557 /* Add and destroy a domain while the area exists */
1558 old_id = self->hwpt_ids[1];
1559 test_cmd_mock_domain(self->ioas_id, &mock_stdev_id,
1560 &self->hwpt_ids[1], NULL);
1562 check_mock_iova(buf + start, iova, length);
1563 check_refs(buf + start / PAGE_SIZE * PAGE_SIZE,
1564 end / PAGE_SIZE * PAGE_SIZE -
1565 start / PAGE_SIZE * PAGE_SIZE,
1568 test_ioctl_destroy(mock_stdev_id);
1569 self->hwpt_ids[1] = old_id;
1571 test_ioctl_ioas_unmap(iova, length);
1574 check_refs(buf, buf_size, 0);
1575 ASSERT_EQ(0, munmap(buf, buf_size));
1578 TEST_F(iommufd_mock_domain, user_copy)
1580 struct iommu_test_cmd access_cmd = {
1581 .size = sizeof(access_cmd),
1582 .op = IOMMU_TEST_OP_ACCESS_PAGES,
1583 .access_pages = { .length = BUFFER_SIZE,
1584 .uptr = (uintptr_t)buffer },
1586 struct iommu_ioas_copy copy_cmd = {
1587 .size = sizeof(copy_cmd),
1588 .flags = IOMMU_IOAS_MAP_FIXED_IOVA,
1589 .dst_ioas_id = self->ioas_id,
1590 .dst_iova = MOCK_APERTURE_START,
1591 .length = BUFFER_SIZE,
1593 struct iommu_ioas_unmap unmap_cmd = {
1594 .size = sizeof(unmap_cmd),
1595 .ioas_id = self->ioas_id,
1596 .iova = MOCK_APERTURE_START,
1597 .length = BUFFER_SIZE,
1599 unsigned int new_ioas_id, ioas_id;
1601 /* Pin the pages in an IOAS with no domains then copy to an IOAS with domains */
1602 test_ioctl_ioas_alloc(&ioas_id);
1603 test_ioctl_ioas_map_id(ioas_id, buffer, BUFFER_SIZE,
1604 ©_cmd.src_iova);
1606 test_cmd_create_access(ioas_id, &access_cmd.id,
1607 MOCK_FLAGS_ACCESS_CREATE_NEEDS_PIN_PAGES);
1609 access_cmd.access_pages.iova = copy_cmd.src_iova;
1611 ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
1613 copy_cmd.src_ioas_id = ioas_id;
1614 ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_COPY, ©_cmd));
1615 check_mock_iova(buffer, MOCK_APERTURE_START, BUFFER_SIZE);
1617 /* Now replace the ioas with a new one */
1618 test_ioctl_ioas_alloc(&new_ioas_id);
1619 test_ioctl_ioas_map_id(new_ioas_id, buffer, BUFFER_SIZE,
1620 ©_cmd.src_iova);
1621 test_cmd_access_replace_ioas(access_cmd.id, new_ioas_id);
1623 /* Destroy the old ioas and cleanup copied mapping */
1624 ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_UNMAP, &unmap_cmd));
1625 test_ioctl_destroy(ioas_id);
1627 /* Then run the same test again with the new ioas */
1628 access_cmd.access_pages.iova = copy_cmd.src_iova;
1630 ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_ACCESS_PAGES),
1632 copy_cmd.src_ioas_id = new_ioas_id;
1633 ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_COPY, ©_cmd));
1634 check_mock_iova(buffer, MOCK_APERTURE_START, BUFFER_SIZE);
1636 test_cmd_destroy_access_pages(
1637 access_cmd.id, access_cmd.access_pages.out_access_pages_id);
1638 test_cmd_destroy_access(access_cmd.id);
1640 test_ioctl_destroy(new_ioas_id);
1643 TEST_F(iommufd_mock_domain, replace)
1647 test_ioctl_ioas_alloc(&ioas_id);
1649 test_cmd_mock_domain_replace(self->stdev_ids[0], ioas_id);
1652 * Replacing the IOAS causes the prior HWPT to be deallocated, thus we
1653 * should get enoent when we try to use it.
1655 if (variant->mock_domains == 1)
1656 test_err_mock_domain_replace(ENOENT, self->stdev_ids[0],
1659 test_cmd_mock_domain_replace(self->stdev_ids[0], ioas_id);
1660 if (variant->mock_domains >= 2) {
1661 test_cmd_mock_domain_replace(self->stdev_ids[0],
1663 test_cmd_mock_domain_replace(self->stdev_ids[0],
1665 test_cmd_mock_domain_replace(self->stdev_ids[0],
1669 test_cmd_mock_domain_replace(self->stdev_ids[0], self->ioas_id);
1670 test_ioctl_destroy(ioas_id);
1673 TEST_F(iommufd_mock_domain, alloc_hwpt)
1677 for (i = 0; i != variant->mock_domains; i++) {
1678 uint32_t hwpt_id[2];
1681 test_err_hwpt_alloc(EOPNOTSUPP,
1682 self->idev_ids[i], self->ioas_id,
1683 ~IOMMU_HWPT_ALLOC_NEST_PARENT, &hwpt_id[0]);
1684 test_cmd_hwpt_alloc(self->idev_ids[i], self->ioas_id,
1686 test_cmd_hwpt_alloc(self->idev_ids[i], self->ioas_id,
1687 IOMMU_HWPT_ALLOC_NEST_PARENT, &hwpt_id[1]);
1689 /* Do a hw_pagetable rotation test */
1690 test_cmd_mock_domain_replace(self->stdev_ids[i], hwpt_id[0]);
1691 EXPECT_ERRNO(EBUSY, _test_ioctl_destroy(self->fd, hwpt_id[0]));
1692 test_cmd_mock_domain_replace(self->stdev_ids[i], hwpt_id[1]);
1693 EXPECT_ERRNO(EBUSY, _test_ioctl_destroy(self->fd, hwpt_id[1]));
1694 test_cmd_mock_domain_replace(self->stdev_ids[i], self->ioas_id);
1695 test_ioctl_destroy(hwpt_id[1]);
1697 test_cmd_mock_domain(hwpt_id[0], &stddev_id, NULL, NULL);
1698 test_ioctl_destroy(stddev_id);
1699 test_ioctl_destroy(hwpt_id[0]);
1703 FIXTURE(iommufd_dirty_tracking)
1710 unsigned long page_size;
1711 unsigned long bitmap_size;
1716 FIXTURE_VARIANT(iommufd_dirty_tracking)
1718 unsigned long buffer_size;
1721 FIXTURE_SETUP(iommufd_dirty_tracking)
1726 self->fd = open("/dev/iommu", O_RDWR);
1727 ASSERT_NE(-1, self->fd);
1729 rc = posix_memalign(&self->buffer, HUGEPAGE_SIZE, variant->buffer_size);
1730 if (rc || !self->buffer) {
1731 SKIP(return, "Skipping buffer_size=%lu due to errno=%d",
1732 variant->buffer_size, rc);
1735 assert((uintptr_t)self->buffer % HUGEPAGE_SIZE == 0);
1736 vrc = mmap(self->buffer, variant->buffer_size, PROT_READ | PROT_WRITE,
1737 MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
1738 assert(vrc == self->buffer);
1740 self->page_size = MOCK_PAGE_SIZE;
1742 variant->buffer_size / self->page_size / BITS_PER_BYTE;
1744 /* Provision with an extra (MOCK_PAGE_SIZE) for the unaligned case */
1745 rc = posix_memalign(&self->bitmap, PAGE_SIZE,
1746 self->bitmap_size + MOCK_PAGE_SIZE);
1748 assert(self->bitmap);
1749 assert((uintptr_t)self->bitmap % PAGE_SIZE == 0);
1751 test_ioctl_ioas_alloc(&self->ioas_id);
1752 test_cmd_mock_domain(self->ioas_id, &self->stdev_id, &self->hwpt_id,
1756 FIXTURE_TEARDOWN(iommufd_dirty_tracking)
1758 munmap(self->buffer, variant->buffer_size);
1759 munmap(self->bitmap, self->bitmap_size);
1760 teardown_iommufd(self->fd, _metadata);
1763 FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty128k)
1765 /* one u32 index bitmap */
1766 .buffer_size = 128UL * 1024UL,
1769 FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty256k)
1771 /* one u64 index bitmap */
1772 .buffer_size = 256UL * 1024UL,
1775 FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty640k)
1777 /* two u64 index and trailing end bitmap */
1778 .buffer_size = 640UL * 1024UL,
1781 FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty128M)
1783 /* 4K bitmap (128M IOVA range) */
1784 .buffer_size = 128UL * 1024UL * 1024UL,
1787 FIXTURE_VARIANT_ADD(iommufd_dirty_tracking, domain_dirty256M)
1789 /* 8K bitmap (256M IOVA range) */
1790 .buffer_size = 256UL * 1024UL * 1024UL,
1793 TEST_F(iommufd_dirty_tracking, enforce_dirty)
1795 uint32_t ioas_id, stddev_id, idev_id;
1796 uint32_t hwpt_id, _hwpt_id;
1800 dev_flags = MOCK_FLAGS_DEVICE_NO_DIRTY;
1801 test_cmd_hwpt_alloc(self->idev_id, self->ioas_id,
1802 IOMMU_HWPT_ALLOC_DIRTY_TRACKING, &hwpt_id);
1803 test_cmd_mock_domain(hwpt_id, &stddev_id, NULL, NULL);
1804 test_err_mock_domain_flags(EINVAL, hwpt_id, dev_flags, &stddev_id,
1806 test_ioctl_destroy(stddev_id);
1807 test_ioctl_destroy(hwpt_id);
1809 /* IOMMU device does not support dirty tracking */
1810 test_ioctl_ioas_alloc(&ioas_id);
1811 test_cmd_mock_domain_flags(ioas_id, dev_flags, &stddev_id, &_hwpt_id,
1813 test_err_hwpt_alloc(EOPNOTSUPP, idev_id, ioas_id,
1814 IOMMU_HWPT_ALLOC_DIRTY_TRACKING, &hwpt_id);
1815 test_ioctl_destroy(stddev_id);
1818 TEST_F(iommufd_dirty_tracking, set_dirty_tracking)
1823 test_cmd_hwpt_alloc(self->idev_id, self->ioas_id,
1824 IOMMU_HWPT_ALLOC_DIRTY_TRACKING, &hwpt_id);
1825 test_cmd_mock_domain(hwpt_id, &stddev_id, NULL, NULL);
1826 test_cmd_set_dirty_tracking(hwpt_id, true);
1827 test_cmd_set_dirty_tracking(hwpt_id, false);
1829 test_ioctl_destroy(stddev_id);
1830 test_ioctl_destroy(hwpt_id);
1833 TEST_F(iommufd_dirty_tracking, device_dirty_capability)
1839 test_cmd_hwpt_alloc(self->idev_id, self->ioas_id, 0, &hwpt_id);
1840 test_cmd_mock_domain(hwpt_id, &stddev_id, NULL, NULL);
1841 test_cmd_get_hw_capabilities(self->idev_id, caps,
1842 IOMMU_HW_CAP_DIRTY_TRACKING);
1843 ASSERT_EQ(IOMMU_HW_CAP_DIRTY_TRACKING,
1844 caps & IOMMU_HW_CAP_DIRTY_TRACKING);
1846 test_ioctl_destroy(stddev_id);
1847 test_ioctl_destroy(hwpt_id);
1850 TEST_F(iommufd_dirty_tracking, get_dirty_bitmap)
1856 test_ioctl_ioas_alloc(&ioas_id);
1857 test_ioctl_ioas_map_fixed_id(ioas_id, self->buffer,
1858 variant->buffer_size, MOCK_APERTURE_START);
1860 test_cmd_hwpt_alloc(self->idev_id, ioas_id,
1861 IOMMU_HWPT_ALLOC_DIRTY_TRACKING, &hwpt_id);
1862 test_cmd_mock_domain(hwpt_id, &stddev_id, NULL, NULL);
1864 test_cmd_set_dirty_tracking(hwpt_id, true);
1866 test_mock_dirty_bitmaps(hwpt_id, variant->buffer_size,
1867 MOCK_APERTURE_START, self->page_size,
1868 self->bitmap, self->bitmap_size, 0, _metadata);
1870 /* PAGE_SIZE unaligned bitmap */
1871 test_mock_dirty_bitmaps(hwpt_id, variant->buffer_size,
1872 MOCK_APERTURE_START, self->page_size,
1873 self->bitmap + MOCK_PAGE_SIZE,
1874 self->bitmap_size, 0, _metadata);
1876 test_ioctl_destroy(stddev_id);
1877 test_ioctl_destroy(hwpt_id);
1880 TEST_F(iommufd_dirty_tracking, get_dirty_bitmap_no_clear)
1886 test_ioctl_ioas_alloc(&ioas_id);
1887 test_ioctl_ioas_map_fixed_id(ioas_id, self->buffer,
1888 variant->buffer_size, MOCK_APERTURE_START);
1890 test_cmd_hwpt_alloc(self->idev_id, ioas_id,
1891 IOMMU_HWPT_ALLOC_DIRTY_TRACKING, &hwpt_id);
1892 test_cmd_mock_domain(hwpt_id, &stddev_id, NULL, NULL);
1894 test_cmd_set_dirty_tracking(hwpt_id, true);
1896 test_mock_dirty_bitmaps(hwpt_id, variant->buffer_size,
1897 MOCK_APERTURE_START, self->page_size,
1898 self->bitmap, self->bitmap_size,
1899 IOMMU_HWPT_GET_DIRTY_BITMAP_NO_CLEAR,
1902 /* Unaligned bitmap */
1903 test_mock_dirty_bitmaps(hwpt_id, variant->buffer_size,
1904 MOCK_APERTURE_START, self->page_size,
1905 self->bitmap + MOCK_PAGE_SIZE,
1907 IOMMU_HWPT_GET_DIRTY_BITMAP_NO_CLEAR,
1910 test_ioctl_destroy(stddev_id);
1911 test_ioctl_destroy(hwpt_id);
1914 /* VFIO compatibility IOCTLs */
1916 TEST_F(iommufd, simple_ioctls)
1918 ASSERT_EQ(VFIO_API_VERSION, ioctl(self->fd, VFIO_GET_API_VERSION));
1919 ASSERT_EQ(1, ioctl(self->fd, VFIO_CHECK_EXTENSION, VFIO_TYPE1v2_IOMMU));
1922 TEST_F(iommufd, unmap_cmd)
1924 struct vfio_iommu_type1_dma_unmap unmap_cmd = {
1925 .iova = MOCK_APERTURE_START,
1929 unmap_cmd.argsz = 1;
1930 EXPECT_ERRNO(EINVAL, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, &unmap_cmd));
1932 unmap_cmd.argsz = sizeof(unmap_cmd);
1933 unmap_cmd.flags = 1 << 31;
1934 EXPECT_ERRNO(EINVAL, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, &unmap_cmd));
1936 unmap_cmd.flags = 0;
1937 EXPECT_ERRNO(ENODEV, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, &unmap_cmd));
1940 TEST_F(iommufd, map_cmd)
1942 struct vfio_iommu_type1_dma_map map_cmd = {
1943 .iova = MOCK_APERTURE_START,
1945 .vaddr = (__u64)buffer,
1949 EXPECT_ERRNO(EINVAL, ioctl(self->fd, VFIO_IOMMU_MAP_DMA, &map_cmd));
1951 map_cmd.argsz = sizeof(map_cmd);
1952 map_cmd.flags = 1 << 31;
1953 EXPECT_ERRNO(EINVAL, ioctl(self->fd, VFIO_IOMMU_MAP_DMA, &map_cmd));
1955 /* Requires a domain to be attached */
1956 map_cmd.flags = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE;
1957 EXPECT_ERRNO(ENODEV, ioctl(self->fd, VFIO_IOMMU_MAP_DMA, &map_cmd));
1960 TEST_F(iommufd, info_cmd)
1962 struct vfio_iommu_type1_info info_cmd = {};
1966 EXPECT_ERRNO(EINVAL, ioctl(self->fd, VFIO_IOMMU_GET_INFO, &info_cmd));
1968 info_cmd.argsz = sizeof(info_cmd);
1969 EXPECT_ERRNO(ENODEV, ioctl(self->fd, VFIO_IOMMU_GET_INFO, &info_cmd));
1972 TEST_F(iommufd, set_iommu_cmd)
1974 /* Requires a domain to be attached */
1975 EXPECT_ERRNO(ENODEV,
1976 ioctl(self->fd, VFIO_SET_IOMMU, VFIO_TYPE1v2_IOMMU));
1977 EXPECT_ERRNO(ENODEV, ioctl(self->fd, VFIO_SET_IOMMU, VFIO_TYPE1_IOMMU));
1980 TEST_F(iommufd, vfio_ioas)
1982 struct iommu_vfio_ioas vfio_ioas_cmd = {
1983 .size = sizeof(vfio_ioas_cmd),
1984 .op = IOMMU_VFIO_IOAS_GET,
1988 /* ENODEV if there is no compat ioas */
1989 EXPECT_ERRNO(ENODEV, ioctl(self->fd, IOMMU_VFIO_IOAS, &vfio_ioas_cmd));
1991 /* Invalid id for set */
1992 vfio_ioas_cmd.op = IOMMU_VFIO_IOAS_SET;
1993 EXPECT_ERRNO(ENOENT, ioctl(self->fd, IOMMU_VFIO_IOAS, &vfio_ioas_cmd));
1995 /* Valid id for set*/
1996 test_ioctl_ioas_alloc(&ioas_id);
1997 vfio_ioas_cmd.ioas_id = ioas_id;
1998 ASSERT_EQ(0, ioctl(self->fd, IOMMU_VFIO_IOAS, &vfio_ioas_cmd));
2000 /* Same id comes back from get */
2001 vfio_ioas_cmd.op = IOMMU_VFIO_IOAS_GET;
2002 ASSERT_EQ(0, ioctl(self->fd, IOMMU_VFIO_IOAS, &vfio_ioas_cmd));
2003 ASSERT_EQ(ioas_id, vfio_ioas_cmd.ioas_id);
2006 vfio_ioas_cmd.op = IOMMU_VFIO_IOAS_CLEAR;
2007 ASSERT_EQ(0, ioctl(self->fd, IOMMU_VFIO_IOAS, &vfio_ioas_cmd));
2008 vfio_ioas_cmd.op = IOMMU_VFIO_IOAS_GET;
2009 EXPECT_ERRNO(ENODEV, ioctl(self->fd, IOMMU_VFIO_IOAS, &vfio_ioas_cmd));
2012 FIXTURE(vfio_compat_mock_domain)
2018 FIXTURE_VARIANT(vfio_compat_mock_domain)
2020 unsigned int version;
2023 FIXTURE_SETUP(vfio_compat_mock_domain)
2025 struct iommu_vfio_ioas vfio_ioas_cmd = {
2026 .size = sizeof(vfio_ioas_cmd),
2027 .op = IOMMU_VFIO_IOAS_SET,
2030 self->fd = open("/dev/iommu", O_RDWR);
2031 ASSERT_NE(-1, self->fd);
2033 /* Create what VFIO would consider a group */
2034 test_ioctl_ioas_alloc(&self->ioas_id);
2035 test_cmd_mock_domain(self->ioas_id, NULL, NULL, NULL);
2037 /* Attach it to the vfio compat */
2038 vfio_ioas_cmd.ioas_id = self->ioas_id;
2039 ASSERT_EQ(0, ioctl(self->fd, IOMMU_VFIO_IOAS, &vfio_ioas_cmd));
2040 ASSERT_EQ(0, ioctl(self->fd, VFIO_SET_IOMMU, variant->version));
2043 FIXTURE_TEARDOWN(vfio_compat_mock_domain)
2045 teardown_iommufd(self->fd, _metadata);
2048 FIXTURE_VARIANT_ADD(vfio_compat_mock_domain, Ver1v2)
2050 .version = VFIO_TYPE1v2_IOMMU,
2053 FIXTURE_VARIANT_ADD(vfio_compat_mock_domain, Ver1v0)
2055 .version = VFIO_TYPE1_IOMMU,
2058 TEST_F(vfio_compat_mock_domain, simple_close)
2062 TEST_F(vfio_compat_mock_domain, option_huge_pages)
2064 struct iommu_option cmd = {
2065 .size = sizeof(cmd),
2066 .option_id = IOMMU_OPTION_HUGE_PAGES,
2067 .op = IOMMU_OPTION_OP_GET,
2069 .object_id = self->ioas_id,
2072 ASSERT_EQ(0, ioctl(self->fd, IOMMU_OPTION, &cmd));
2073 if (variant->version == VFIO_TYPE1_IOMMU) {
2074 ASSERT_EQ(0, cmd.val64);
2076 ASSERT_EQ(1, cmd.val64);
2081 * Execute an ioctl command stored in buffer and check that the result does not
2084 static bool is_filled(const void *buf, uint8_t c, size_t len)
2086 const uint8_t *cbuf = buf;
2088 for (; len; cbuf++, len--)
2094 #define ioctl_check_buf(fd, cmd) \
2096 size_t _cmd_len = *(__u32 *)buffer; \
2098 memset(buffer + _cmd_len, 0xAA, BUFFER_SIZE - _cmd_len); \
2099 ASSERT_EQ(0, ioctl(fd, cmd, buffer)); \
2100 ASSERT_EQ(true, is_filled(buffer + _cmd_len, 0xAA, \
2101 BUFFER_SIZE - _cmd_len)); \
2104 static void check_vfio_info_cap_chain(struct __test_metadata *_metadata,
2105 struct vfio_iommu_type1_info *info_cmd)
2107 const struct vfio_info_cap_header *cap;
2109 ASSERT_GE(info_cmd->argsz, info_cmd->cap_offset + sizeof(*cap));
2110 cap = buffer + info_cmd->cap_offset;
2115 cap_size = (buffer + cap->next) - (void *)cap;
2117 cap_size = (buffer + info_cmd->argsz) - (void *)cap;
2120 case VFIO_IOMMU_TYPE1_INFO_CAP_IOVA_RANGE: {
2121 struct vfio_iommu_type1_info_cap_iova_range *data =
2124 ASSERT_EQ(1, data->header.version);
2125 ASSERT_EQ(1, data->nr_iovas);
2126 EXPECT_EQ(MOCK_APERTURE_START,
2127 data->iova_ranges[0].start);
2128 EXPECT_EQ(MOCK_APERTURE_LAST, data->iova_ranges[0].end);
2131 case VFIO_IOMMU_TYPE1_INFO_DMA_AVAIL: {
2132 struct vfio_iommu_type1_info_dma_avail *data =
2135 ASSERT_EQ(1, data->header.version);
2136 ASSERT_EQ(sizeof(*data), cap_size);
2140 ASSERT_EQ(false, true);
2146 ASSERT_GE(info_cmd->argsz, cap->next + sizeof(*cap));
2147 ASSERT_GE(buffer + cap->next, (void *)cap);
2148 cap = buffer + cap->next;
2152 TEST_F(vfio_compat_mock_domain, get_info)
2154 struct vfio_iommu_type1_info *info_cmd = buffer;
2159 *info_cmd = (struct vfio_iommu_type1_info){
2160 .argsz = offsetof(struct vfio_iommu_type1_info, cap_offset),
2162 ioctl_check_buf(self->fd, VFIO_IOMMU_GET_INFO);
2163 ASSERT_NE(0, info_cmd->iova_pgsizes);
2164 ASSERT_EQ(VFIO_IOMMU_INFO_PGSIZES | VFIO_IOMMU_INFO_CAPS,
2167 /* Read the cap chain size */
2168 *info_cmd = (struct vfio_iommu_type1_info){
2169 .argsz = sizeof(*info_cmd),
2171 ioctl_check_buf(self->fd, VFIO_IOMMU_GET_INFO);
2172 ASSERT_NE(0, info_cmd->iova_pgsizes);
2173 ASSERT_EQ(VFIO_IOMMU_INFO_PGSIZES | VFIO_IOMMU_INFO_CAPS,
2175 ASSERT_EQ(0, info_cmd->cap_offset);
2176 ASSERT_LT(sizeof(*info_cmd), info_cmd->argsz);
2178 /* Read the caps, kernel should never create a corrupted caps */
2179 caplen = info_cmd->argsz;
2180 for (i = sizeof(*info_cmd); i < caplen; i++) {
2181 *info_cmd = (struct vfio_iommu_type1_info){
2184 ioctl_check_buf(self->fd, VFIO_IOMMU_GET_INFO);
2185 ASSERT_EQ(VFIO_IOMMU_INFO_PGSIZES | VFIO_IOMMU_INFO_CAPS,
2187 if (!info_cmd->cap_offset)
2189 check_vfio_info_cap_chain(_metadata, info_cmd);
2193 static void shuffle_array(unsigned long *array, size_t nelms)
2198 for (i = 0; i != nelms; i++) {
2199 unsigned long tmp = array[i];
2200 unsigned int other = rand() % (nelms - i);
2202 array[i] = array[other];
2207 TEST_F(vfio_compat_mock_domain, map)
2209 struct vfio_iommu_type1_dma_map map_cmd = {
2210 .argsz = sizeof(map_cmd),
2211 .flags = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE,
2212 .vaddr = (uintptr_t)buffer,
2213 .size = BUFFER_SIZE,
2214 .iova = MOCK_APERTURE_START,
2216 struct vfio_iommu_type1_dma_unmap unmap_cmd = {
2217 .argsz = sizeof(unmap_cmd),
2218 .size = BUFFER_SIZE,
2219 .iova = MOCK_APERTURE_START,
2221 unsigned long pages_iova[BUFFER_SIZE / PAGE_SIZE];
2224 /* Simple map/unmap */
2225 ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_MAP_DMA, &map_cmd));
2226 ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, &unmap_cmd));
2227 ASSERT_EQ(BUFFER_SIZE, unmap_cmd.size);
2229 /* UNMAP_FLAG_ALL requires 0 iova/size */
2230 ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_MAP_DMA, &map_cmd));
2231 unmap_cmd.flags = VFIO_DMA_UNMAP_FLAG_ALL;
2232 EXPECT_ERRNO(EINVAL, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, &unmap_cmd));
2236 ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, &unmap_cmd));
2237 ASSERT_EQ(BUFFER_SIZE, unmap_cmd.size);
2240 for (i = 0; i != ARRAY_SIZE(pages_iova); i++) {
2241 map_cmd.iova = pages_iova[i] =
2242 MOCK_APERTURE_START + i * PAGE_SIZE;
2243 map_cmd.vaddr = (uintptr_t)buffer + i * PAGE_SIZE;
2244 map_cmd.size = PAGE_SIZE;
2245 ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_MAP_DMA, &map_cmd));
2247 shuffle_array(pages_iova, ARRAY_SIZE(pages_iova));
2249 unmap_cmd.flags = 0;
2250 unmap_cmd.size = PAGE_SIZE;
2251 for (i = 0; i != ARRAY_SIZE(pages_iova); i++) {
2252 unmap_cmd.iova = pages_iova[i];
2253 ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA, &unmap_cmd));
2257 TEST_F(vfio_compat_mock_domain, huge_map)
2259 size_t buf_size = HUGEPAGE_SIZE * 2;
2260 struct vfio_iommu_type1_dma_map map_cmd = {
2261 .argsz = sizeof(map_cmd),
2262 .flags = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE,
2264 .iova = MOCK_APERTURE_START,
2266 struct vfio_iommu_type1_dma_unmap unmap_cmd = {
2267 .argsz = sizeof(unmap_cmd),
2269 unsigned long pages_iova[16];
2273 /* Test huge pages and splitting */
2274 buf = mmap(0, buf_size, PROT_READ | PROT_WRITE,
2275 MAP_SHARED | MAP_ANONYMOUS | MAP_HUGETLB | MAP_POPULATE, -1,
2277 ASSERT_NE(MAP_FAILED, buf);
2278 map_cmd.vaddr = (uintptr_t)buf;
2279 ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_MAP_DMA, &map_cmd));
2281 unmap_cmd.size = buf_size / ARRAY_SIZE(pages_iova);
2282 for (i = 0; i != ARRAY_SIZE(pages_iova); i++)
2283 pages_iova[i] = MOCK_APERTURE_START + (i * unmap_cmd.size);
2284 shuffle_array(pages_iova, ARRAY_SIZE(pages_iova));
2286 /* type1 mode can cut up larger mappings, type1v2 always fails */
2287 for (i = 0; i != ARRAY_SIZE(pages_iova); i++) {
2288 unmap_cmd.iova = pages_iova[i];
2289 unmap_cmd.size = buf_size / ARRAY_SIZE(pages_iova);
2290 if (variant->version == VFIO_TYPE1_IOMMU) {
2291 ASSERT_EQ(0, ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA,
2294 EXPECT_ERRNO(ENOENT,
2295 ioctl(self->fd, VFIO_IOMMU_UNMAP_DMA,