1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES.
4 * Kernel side components to support tools/testing/selftests/iommu
6 #include <linux/slab.h>
7 #include <linux/iommu.h>
8 #include <linux/xarray.h>
9 #include <linux/file.h>
10 #include <linux/anon_inodes.h>
11 #include <linux/fault-inject.h>
12 #include <linux/platform_device.h>
13 #include <uapi/linux/iommufd.h>
15 #include "../iommu-priv.h"
16 #include "io_pagetable.h"
17 #include "iommufd_private.h"
18 #include "iommufd_test.h"
20 static DECLARE_FAULT_ATTR(fail_iommufd);
21 static struct dentry *dbgfs_root;
22 static struct platform_device *selftest_iommu_dev;
23 static const struct iommu_ops mock_ops;
24 static struct iommu_domain_ops domain_nested_ops;
26 size_t iommufd_test_memory_limit = 65536;
30 MOCK_IO_PAGE_SIZE = PAGE_SIZE / 2,
33 * Like a real page table alignment requires the low bits of the address
34 * to be zero. xarray also requires the high bit to be zero, so we store
35 * the pfns shifted. The upper bits are used for metadata.
37 MOCK_PFN_MASK = ULONG_MAX / MOCK_IO_PAGE_SIZE,
39 _MOCK_PFN_START = MOCK_PFN_MASK + 1,
40 MOCK_PFN_START_IOVA = _MOCK_PFN_START,
41 MOCK_PFN_LAST_IOVA = _MOCK_PFN_START,
42 MOCK_PFN_DIRTY_IOVA = _MOCK_PFN_START << 1,
46 * Syzkaller has trouble randomizing the correct iova to use since it is linked
47 * to the map ioctl's output, and it has no ide about that. So, simplify things.
48 * In syzkaller mode the 64 bit IOVA is converted into an nth area and offset
49 * value. This has a much smaller randomization space and syzkaller can hit it.
51 static unsigned long iommufd_test_syz_conv_iova(struct io_pagetable *iopt,
58 struct syz_layout *syz = (void *)iova;
59 unsigned int nth = syz->nth_area;
60 struct iopt_area *area;
62 down_read(&iopt->iova_rwsem);
63 for (area = iopt_area_iter_first(iopt, 0, ULONG_MAX); area;
64 area = iopt_area_iter_next(area, 0, ULONG_MAX)) {
66 up_read(&iopt->iova_rwsem);
67 return iopt_area_iova(area) + syz->offset;
71 up_read(&iopt->iova_rwsem);
76 void iommufd_test_syz_conv_iova_id(struct iommufd_ucmd *ucmd,
77 unsigned int ioas_id, u64 *iova, u32 *flags)
79 struct iommufd_ioas *ioas;
81 if (!(*flags & MOCK_FLAGS_ACCESS_SYZ))
83 *flags &= ~(u32)MOCK_FLAGS_ACCESS_SYZ;
85 ioas = iommufd_get_ioas(ucmd->ictx, ioas_id);
88 *iova = iommufd_test_syz_conv_iova(&ioas->iopt, iova);
89 iommufd_put_object(&ioas->obj);
92 struct mock_iommu_domain {
94 struct iommu_domain domain;
98 struct mock_iommu_domain_nested {
99 struct iommu_domain domain;
100 struct mock_iommu_domain *parent;
101 u32 iotlb[MOCK_NESTED_DOMAIN_IOTLB_NUM];
104 enum selftest_obj_type {
113 struct selftest_obj {
114 struct iommufd_object obj;
115 enum selftest_obj_type type;
119 struct iommufd_device *idev;
120 struct iommufd_ctx *ictx;
121 struct mock_dev *mock_dev;
126 static int mock_domain_nop_attach(struct iommu_domain *domain,
129 struct mock_dev *mdev = container_of(dev, struct mock_dev, dev);
131 if (domain->dirty_ops && (mdev->flags & MOCK_FLAGS_DEVICE_NO_DIRTY))
137 static const struct iommu_domain_ops mock_blocking_ops = {
138 .attach_dev = mock_domain_nop_attach,
141 static struct iommu_domain mock_blocking_domain = {
142 .type = IOMMU_DOMAIN_BLOCKED,
143 .ops = &mock_blocking_ops,
146 static void *mock_domain_hw_info(struct device *dev, u32 *length, u32 *type)
148 struct iommu_test_hw_info *info;
150 info = kzalloc(sizeof(*info), GFP_KERNEL);
152 return ERR_PTR(-ENOMEM);
154 info->test_reg = IOMMU_HW_INFO_SELFTEST_REGVAL;
155 *length = sizeof(*info);
156 *type = IOMMU_HW_INFO_TYPE_SELFTEST;
161 static int mock_domain_set_dirty_tracking(struct iommu_domain *domain,
164 struct mock_iommu_domain *mock =
165 container_of(domain, struct mock_iommu_domain, domain);
166 unsigned long flags = mock->flags;
168 if (enable && !domain->dirty_ops)
172 if (!(enable ^ !!(flags & MOCK_DIRTY_TRACK)))
175 flags = (enable ? flags | MOCK_DIRTY_TRACK : flags & ~MOCK_DIRTY_TRACK);
181 static int mock_domain_read_and_clear_dirty(struct iommu_domain *domain,
182 unsigned long iova, size_t size,
184 struct iommu_dirty_bitmap *dirty)
186 struct mock_iommu_domain *mock =
187 container_of(domain, struct mock_iommu_domain, domain);
188 unsigned long i, max = size / MOCK_IO_PAGE_SIZE;
191 if (!(mock->flags & MOCK_DIRTY_TRACK) && dirty->bitmap)
194 for (i = 0; i < max; i++) {
195 unsigned long cur = iova + i * MOCK_IO_PAGE_SIZE;
197 ent = xa_load(&mock->pfns, cur / MOCK_IO_PAGE_SIZE);
198 if (ent && (xa_to_value(ent) & MOCK_PFN_DIRTY_IOVA)) {
200 if (!(flags & IOMMU_DIRTY_NO_CLEAR)) {
203 val = xa_to_value(ent) & ~MOCK_PFN_DIRTY_IOVA;
204 old = xa_store(&mock->pfns,
205 cur / MOCK_IO_PAGE_SIZE,
206 xa_mk_value(val), GFP_KERNEL);
207 WARN_ON_ONCE(ent != old);
209 iommu_dirty_bitmap_record(dirty, cur,
217 const struct iommu_dirty_ops dirty_ops = {
218 .set_dirty_tracking = mock_domain_set_dirty_tracking,
219 .read_and_clear_dirty = mock_domain_read_and_clear_dirty,
222 static struct iommu_domain *mock_domain_alloc_paging(struct device *dev)
224 struct mock_iommu_domain *mock;
226 mock = kzalloc(sizeof(*mock), GFP_KERNEL);
229 mock->domain.geometry.aperture_start = MOCK_APERTURE_START;
230 mock->domain.geometry.aperture_end = MOCK_APERTURE_LAST;
231 mock->domain.pgsize_bitmap = MOCK_IO_PAGE_SIZE;
232 mock->domain.ops = mock_ops.default_domain_ops;
233 mock->domain.type = IOMMU_DOMAIN_UNMANAGED;
234 xa_init(&mock->pfns);
235 return &mock->domain;
238 static struct iommu_domain *
239 __mock_domain_alloc_nested(struct mock_iommu_domain *mock_parent,
240 const struct iommu_hwpt_selftest *user_cfg)
242 struct mock_iommu_domain_nested *mock_nested;
245 mock_nested = kzalloc(sizeof(*mock_nested), GFP_KERNEL);
247 return ERR_PTR(-ENOMEM);
248 mock_nested->parent = mock_parent;
249 mock_nested->domain.ops = &domain_nested_ops;
250 mock_nested->domain.type = IOMMU_DOMAIN_NESTED;
251 for (i = 0; i < MOCK_NESTED_DOMAIN_IOTLB_NUM; i++)
252 mock_nested->iotlb[i] = user_cfg->iotlb;
253 return &mock_nested->domain;
256 static struct iommu_domain *
257 mock_domain_alloc_user(struct device *dev, u32 flags,
258 struct iommu_domain *parent,
259 const struct iommu_user_data *user_data)
261 struct mock_iommu_domain *mock_parent;
262 struct iommu_hwpt_selftest user_cfg;
265 /* must be mock_domain */
267 struct mock_dev *mdev = container_of(dev, struct mock_dev, dev);
268 bool has_dirty_flag = flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING;
269 bool no_dirty_ops = mdev->flags & MOCK_FLAGS_DEVICE_NO_DIRTY;
270 struct iommu_domain *domain;
272 if (flags & (~(IOMMU_HWPT_ALLOC_NEST_PARENT |
273 IOMMU_HWPT_ALLOC_DIRTY_TRACKING)))
274 return ERR_PTR(-EOPNOTSUPP);
275 if (user_data || (has_dirty_flag && no_dirty_ops))
276 return ERR_PTR(-EOPNOTSUPP);
277 domain = mock_domain_alloc_paging(NULL);
279 return ERR_PTR(-ENOMEM);
281 container_of(domain, struct mock_iommu_domain, domain)
282 ->domain.dirty_ops = &dirty_ops;
286 /* must be mock_domain_nested */
287 if (user_data->type != IOMMU_HWPT_DATA_SELFTEST || flags)
288 return ERR_PTR(-EOPNOTSUPP);
289 if (!parent || parent->ops != mock_ops.default_domain_ops)
290 return ERR_PTR(-EINVAL);
292 mock_parent = container_of(parent, struct mock_iommu_domain, domain);
294 return ERR_PTR(-EINVAL);
296 rc = iommu_copy_struct_from_user(&user_cfg, user_data,
297 IOMMU_HWPT_DATA_SELFTEST, iotlb);
301 return __mock_domain_alloc_nested(mock_parent, &user_cfg);
304 static void mock_domain_free(struct iommu_domain *domain)
306 struct mock_iommu_domain *mock =
307 container_of(domain, struct mock_iommu_domain, domain);
309 WARN_ON(!xa_empty(&mock->pfns));
313 static int mock_domain_map_pages(struct iommu_domain *domain,
314 unsigned long iova, phys_addr_t paddr,
315 size_t pgsize, size_t pgcount, int prot,
316 gfp_t gfp, size_t *mapped)
318 struct mock_iommu_domain *mock =
319 container_of(domain, struct mock_iommu_domain, domain);
320 unsigned long flags = MOCK_PFN_START_IOVA;
321 unsigned long start_iova = iova;
324 * xarray does not reliably work with fault injection because it does a
325 * retry allocation, so put our own failure point.
327 if (iommufd_should_fail())
330 WARN_ON(iova % MOCK_IO_PAGE_SIZE);
331 WARN_ON(pgsize % MOCK_IO_PAGE_SIZE);
332 for (; pgcount; pgcount--) {
335 for (cur = 0; cur != pgsize; cur += MOCK_IO_PAGE_SIZE) {
338 if (pgcount == 1 && cur + MOCK_IO_PAGE_SIZE == pgsize)
339 flags = MOCK_PFN_LAST_IOVA;
340 old = xa_store(&mock->pfns, iova / MOCK_IO_PAGE_SIZE,
341 xa_mk_value((paddr / MOCK_IO_PAGE_SIZE) |
344 if (xa_is_err(old)) {
345 for (; start_iova != iova;
346 start_iova += MOCK_IO_PAGE_SIZE)
347 xa_erase(&mock->pfns,
353 iova += MOCK_IO_PAGE_SIZE;
354 paddr += MOCK_IO_PAGE_SIZE;
355 *mapped += MOCK_IO_PAGE_SIZE;
362 static size_t mock_domain_unmap_pages(struct iommu_domain *domain,
363 unsigned long iova, size_t pgsize,
365 struct iommu_iotlb_gather *iotlb_gather)
367 struct mock_iommu_domain *mock =
368 container_of(domain, struct mock_iommu_domain, domain);
373 WARN_ON(iova % MOCK_IO_PAGE_SIZE);
374 WARN_ON(pgsize % MOCK_IO_PAGE_SIZE);
376 for (; pgcount; pgcount--) {
379 for (cur = 0; cur != pgsize; cur += MOCK_IO_PAGE_SIZE) {
380 ent = xa_erase(&mock->pfns, iova / MOCK_IO_PAGE_SIZE);
383 * iommufd generates unmaps that must be a strict
384 * superset of the map's performend So every starting
385 * IOVA should have been an iova passed to map, and the
387 * First IOVA must be present and have been a first IOVA
388 * passed to map_pages
391 WARN_ON(ent && !(xa_to_value(ent) &
392 MOCK_PFN_START_IOVA));
395 if (pgcount == 1 && cur + MOCK_IO_PAGE_SIZE == pgsize)
396 WARN_ON(ent && !(xa_to_value(ent) &
397 MOCK_PFN_LAST_IOVA));
399 iova += MOCK_IO_PAGE_SIZE;
400 ret += MOCK_IO_PAGE_SIZE;
406 static phys_addr_t mock_domain_iova_to_phys(struct iommu_domain *domain,
409 struct mock_iommu_domain *mock =
410 container_of(domain, struct mock_iommu_domain, domain);
413 WARN_ON(iova % MOCK_IO_PAGE_SIZE);
414 ent = xa_load(&mock->pfns, iova / MOCK_IO_PAGE_SIZE);
416 return (xa_to_value(ent) & MOCK_PFN_MASK) * MOCK_IO_PAGE_SIZE;
419 static bool mock_domain_capable(struct device *dev, enum iommu_cap cap)
421 struct mock_dev *mdev = container_of(dev, struct mock_dev, dev);
424 case IOMMU_CAP_CACHE_COHERENCY:
426 case IOMMU_CAP_DIRTY_TRACKING:
427 return !(mdev->flags & MOCK_FLAGS_DEVICE_NO_DIRTY);
435 static struct iommu_device mock_iommu_device = {
438 static struct iommu_device *mock_probe_device(struct device *dev)
440 return &mock_iommu_device;
443 static const struct iommu_ops mock_ops = {
445 * IOMMU_DOMAIN_BLOCKED cannot be returned from def_domain_type()
446 * because it is zero.
448 .default_domain = &mock_blocking_domain,
449 .blocked_domain = &mock_blocking_domain,
450 .owner = THIS_MODULE,
451 .pgsize_bitmap = MOCK_IO_PAGE_SIZE,
452 .hw_info = mock_domain_hw_info,
453 .domain_alloc_paging = mock_domain_alloc_paging,
454 .domain_alloc_user = mock_domain_alloc_user,
455 .capable = mock_domain_capable,
456 .device_group = generic_device_group,
457 .probe_device = mock_probe_device,
458 .default_domain_ops =
459 &(struct iommu_domain_ops){
460 .free = mock_domain_free,
461 .attach_dev = mock_domain_nop_attach,
462 .map_pages = mock_domain_map_pages,
463 .unmap_pages = mock_domain_unmap_pages,
464 .iova_to_phys = mock_domain_iova_to_phys,
468 static void mock_domain_free_nested(struct iommu_domain *domain)
470 struct mock_iommu_domain_nested *mock_nested =
471 container_of(domain, struct mock_iommu_domain_nested, domain);
476 static struct iommu_domain_ops domain_nested_ops = {
477 .free = mock_domain_free_nested,
478 .attach_dev = mock_domain_nop_attach,
481 static inline struct iommufd_hw_pagetable *
482 __get_md_pagetable(struct iommufd_ucmd *ucmd, u32 mockpt_id, u32 hwpt_type)
484 struct iommufd_object *obj;
486 obj = iommufd_get_object(ucmd->ictx, mockpt_id, hwpt_type);
488 return ERR_CAST(obj);
489 return container_of(obj, struct iommufd_hw_pagetable, obj);
492 static inline struct iommufd_hw_pagetable *
493 get_md_pagetable(struct iommufd_ucmd *ucmd, u32 mockpt_id,
494 struct mock_iommu_domain **mock)
496 struct iommufd_hw_pagetable *hwpt;
498 hwpt = __get_md_pagetable(ucmd, mockpt_id, IOMMUFD_OBJ_HWPT_PAGING);
501 if (hwpt->domain->type != IOMMU_DOMAIN_UNMANAGED ||
502 hwpt->domain->ops != mock_ops.default_domain_ops) {
503 iommufd_put_object(&hwpt->obj);
504 return ERR_PTR(-EINVAL);
506 *mock = container_of(hwpt->domain, struct mock_iommu_domain, domain);
510 static inline struct iommufd_hw_pagetable *
511 get_md_pagetable_nested(struct iommufd_ucmd *ucmd, u32 mockpt_id,
512 struct mock_iommu_domain_nested **mock_nested)
514 struct iommufd_hw_pagetable *hwpt;
516 hwpt = __get_md_pagetable(ucmd, mockpt_id, IOMMUFD_OBJ_HWPT_NESTED);
519 if (hwpt->domain->type != IOMMU_DOMAIN_NESTED ||
520 hwpt->domain->ops != &domain_nested_ops) {
521 iommufd_put_object(&hwpt->obj);
522 return ERR_PTR(-EINVAL);
524 *mock_nested = container_of(hwpt->domain,
525 struct mock_iommu_domain_nested, domain);
529 struct mock_bus_type {
531 struct notifier_block nb;
534 static struct mock_bus_type iommufd_mock_bus_type = {
536 .name = "iommufd_mock",
540 static atomic_t mock_dev_num;
542 static void mock_dev_release(struct device *dev)
544 struct mock_dev *mdev = container_of(dev, struct mock_dev, dev);
546 atomic_dec(&mock_dev_num);
550 static struct mock_dev *mock_dev_create(unsigned long dev_flags)
552 struct mock_dev *mdev;
555 if (dev_flags & ~(MOCK_FLAGS_DEVICE_NO_DIRTY))
556 return ERR_PTR(-EINVAL);
558 mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
560 return ERR_PTR(-ENOMEM);
562 device_initialize(&mdev->dev);
563 mdev->flags = dev_flags;
564 mdev->dev.release = mock_dev_release;
565 mdev->dev.bus = &iommufd_mock_bus_type.bus;
567 rc = dev_set_name(&mdev->dev, "iommufd_mock%u",
568 atomic_inc_return(&mock_dev_num));
572 rc = device_add(&mdev->dev);
578 put_device(&mdev->dev);
582 static void mock_dev_destroy(struct mock_dev *mdev)
584 device_unregister(&mdev->dev);
587 bool iommufd_selftest_is_mock_dev(struct device *dev)
589 return dev->release == mock_dev_release;
592 /* Create an hw_pagetable with the mock domain so we can test the domain ops */
593 static int iommufd_test_mock_domain(struct iommufd_ucmd *ucmd,
594 struct iommu_test_cmd *cmd)
596 struct iommufd_device *idev;
597 struct selftest_obj *sobj;
603 sobj = iommufd_object_alloc(ucmd->ictx, sobj, IOMMUFD_OBJ_SELFTEST);
605 return PTR_ERR(sobj);
607 sobj->idev.ictx = ucmd->ictx;
608 sobj->type = TYPE_IDEV;
610 if (cmd->op == IOMMU_TEST_OP_MOCK_DOMAIN_FLAGS)
611 dev_flags = cmd->mock_domain_flags.dev_flags;
613 sobj->idev.mock_dev = mock_dev_create(dev_flags);
614 if (IS_ERR(sobj->idev.mock_dev)) {
615 rc = PTR_ERR(sobj->idev.mock_dev);
619 idev = iommufd_device_bind(ucmd->ictx, &sobj->idev.mock_dev->dev,
625 sobj->idev.idev = idev;
627 rc = iommufd_device_attach(idev, &pt_id);
631 /* Userspace must destroy the device_id to destroy the object */
632 cmd->mock_domain.out_hwpt_id = pt_id;
633 cmd->mock_domain.out_stdev_id = sobj->obj.id;
634 cmd->mock_domain.out_idev_id = idev_id;
635 rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd));
638 iommufd_object_finalize(ucmd->ictx, &sobj->obj);
642 iommufd_device_detach(idev);
644 iommufd_device_unbind(idev);
646 mock_dev_destroy(sobj->idev.mock_dev);
648 iommufd_object_abort(ucmd->ictx, &sobj->obj);
652 /* Replace the mock domain with a manually allocated hw_pagetable */
653 static int iommufd_test_mock_domain_replace(struct iommufd_ucmd *ucmd,
654 unsigned int device_id, u32 pt_id,
655 struct iommu_test_cmd *cmd)
657 struct iommufd_object *dev_obj;
658 struct selftest_obj *sobj;
662 * Prefer to use the OBJ_SELFTEST because the destroy_rwsem will ensure
663 * it doesn't race with detach, which is not allowed.
666 iommufd_get_object(ucmd->ictx, device_id, IOMMUFD_OBJ_SELFTEST);
668 return PTR_ERR(dev_obj);
670 sobj = container_of(dev_obj, struct selftest_obj, obj);
671 if (sobj->type != TYPE_IDEV) {
676 rc = iommufd_device_replace(sobj->idev.idev, &pt_id);
680 cmd->mock_domain_replace.pt_id = pt_id;
681 rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd));
684 iommufd_put_object(dev_obj);
688 /* Add an additional reserved IOVA to the IOAS */
689 static int iommufd_test_add_reserved(struct iommufd_ucmd *ucmd,
690 unsigned int mockpt_id,
691 unsigned long start, size_t length)
693 struct iommufd_ioas *ioas;
696 ioas = iommufd_get_ioas(ucmd->ictx, mockpt_id);
698 return PTR_ERR(ioas);
699 down_write(&ioas->iopt.iova_rwsem);
700 rc = iopt_reserve_iova(&ioas->iopt, start, start + length - 1, NULL);
701 up_write(&ioas->iopt.iova_rwsem);
702 iommufd_put_object(&ioas->obj);
706 /* Check that every pfn under each iova matches the pfn under a user VA */
707 static int iommufd_test_md_check_pa(struct iommufd_ucmd *ucmd,
708 unsigned int mockpt_id, unsigned long iova,
709 size_t length, void __user *uptr)
711 struct iommufd_hw_pagetable *hwpt;
712 struct mock_iommu_domain *mock;
716 if (iova % MOCK_IO_PAGE_SIZE || length % MOCK_IO_PAGE_SIZE ||
717 (uintptr_t)uptr % MOCK_IO_PAGE_SIZE ||
718 check_add_overflow((uintptr_t)uptr, (uintptr_t)length, &end))
721 hwpt = get_md_pagetable(ucmd, mockpt_id, &mock);
723 return PTR_ERR(hwpt);
725 for (; length; length -= MOCK_IO_PAGE_SIZE) {
726 struct page *pages[1];
731 npages = get_user_pages_fast((uintptr_t)uptr & PAGE_MASK, 1, 0,
737 if (WARN_ON(npages != 1)) {
741 pfn = page_to_pfn(pages[0]);
744 ent = xa_load(&mock->pfns, iova / MOCK_IO_PAGE_SIZE);
746 (xa_to_value(ent) & MOCK_PFN_MASK) * MOCK_IO_PAGE_SIZE !=
747 pfn * PAGE_SIZE + ((uintptr_t)uptr % PAGE_SIZE)) {
751 iova += MOCK_IO_PAGE_SIZE;
752 uptr += MOCK_IO_PAGE_SIZE;
757 iommufd_put_object(&hwpt->obj);
761 /* Check that the page ref count matches, to look for missing pin/unpins */
762 static int iommufd_test_md_check_refs(struct iommufd_ucmd *ucmd,
763 void __user *uptr, size_t length,
768 if (length % PAGE_SIZE || (uintptr_t)uptr % PAGE_SIZE ||
769 check_add_overflow((uintptr_t)uptr, (uintptr_t)length, &end))
772 for (; length; length -= PAGE_SIZE) {
773 struct page *pages[1];
776 npages = get_user_pages_fast((uintptr_t)uptr, 1, 0, pages);
779 if (WARN_ON(npages != 1))
781 if (!PageCompound(pages[0])) {
784 count = page_ref_count(pages[0]);
785 if (count / GUP_PIN_COUNTING_BIAS != refs) {
796 struct selftest_access {
797 struct iommufd_access *access;
800 struct list_head items;
801 unsigned int next_id;
805 struct selftest_access_item {
806 struct list_head items_elm;
812 static const struct file_operations iommfd_test_staccess_fops;
814 static struct selftest_access *iommufd_access_get(int fd)
820 return ERR_PTR(-EBADFD);
822 if (file->f_op != &iommfd_test_staccess_fops) {
824 return ERR_PTR(-EBADFD);
826 return file->private_data;
829 static void iommufd_test_access_unmap(void *data, unsigned long iova,
830 unsigned long length)
832 unsigned long iova_last = iova + length - 1;
833 struct selftest_access *staccess = data;
834 struct selftest_access_item *item;
835 struct selftest_access_item *tmp;
837 mutex_lock(&staccess->lock);
838 list_for_each_entry_safe(item, tmp, &staccess->items, items_elm) {
839 if (iova > item->iova + item->length - 1 ||
840 iova_last < item->iova)
842 list_del(&item->items_elm);
843 iommufd_access_unpin_pages(staccess->access, item->iova,
847 mutex_unlock(&staccess->lock);
850 static int iommufd_test_access_item_destroy(struct iommufd_ucmd *ucmd,
851 unsigned int access_id,
852 unsigned int item_id)
854 struct selftest_access_item *item;
855 struct selftest_access *staccess;
857 staccess = iommufd_access_get(access_id);
858 if (IS_ERR(staccess))
859 return PTR_ERR(staccess);
861 mutex_lock(&staccess->lock);
862 list_for_each_entry(item, &staccess->items, items_elm) {
863 if (item->id == item_id) {
864 list_del(&item->items_elm);
865 iommufd_access_unpin_pages(staccess->access, item->iova,
867 mutex_unlock(&staccess->lock);
869 fput(staccess->file);
873 mutex_unlock(&staccess->lock);
874 fput(staccess->file);
878 static int iommufd_test_staccess_release(struct inode *inode,
881 struct selftest_access *staccess = filep->private_data;
883 if (staccess->access) {
884 iommufd_test_access_unmap(staccess, 0, ULONG_MAX);
885 iommufd_access_destroy(staccess->access);
887 mutex_destroy(&staccess->lock);
892 static const struct iommufd_access_ops selftest_access_ops_pin = {
893 .needs_pin_pages = 1,
894 .unmap = iommufd_test_access_unmap,
897 static const struct iommufd_access_ops selftest_access_ops = {
898 .unmap = iommufd_test_access_unmap,
901 static const struct file_operations iommfd_test_staccess_fops = {
902 .release = iommufd_test_staccess_release,
905 static struct selftest_access *iommufd_test_alloc_access(void)
907 struct selftest_access *staccess;
910 staccess = kzalloc(sizeof(*staccess), GFP_KERNEL_ACCOUNT);
912 return ERR_PTR(-ENOMEM);
913 INIT_LIST_HEAD(&staccess->items);
914 mutex_init(&staccess->lock);
916 filep = anon_inode_getfile("[iommufd_test_staccess]",
917 &iommfd_test_staccess_fops, staccess,
921 return ERR_CAST(filep);
923 staccess->file = filep;
927 static int iommufd_test_create_access(struct iommufd_ucmd *ucmd,
928 unsigned int ioas_id, unsigned int flags)
930 struct iommu_test_cmd *cmd = ucmd->cmd;
931 struct selftest_access *staccess;
932 struct iommufd_access *access;
937 if (flags & ~MOCK_FLAGS_ACCESS_CREATE_NEEDS_PIN_PAGES)
940 staccess = iommufd_test_alloc_access();
941 if (IS_ERR(staccess))
942 return PTR_ERR(staccess);
944 fdno = get_unused_fd_flags(O_CLOEXEC);
947 goto out_free_staccess;
950 access = iommufd_access_create(
952 (flags & MOCK_FLAGS_ACCESS_CREATE_NEEDS_PIN_PAGES) ?
953 &selftest_access_ops_pin :
954 &selftest_access_ops,
956 if (IS_ERR(access)) {
957 rc = PTR_ERR(access);
960 rc = iommufd_access_attach(access, ioas_id);
963 cmd->create_access.out_access_fd = fdno;
964 rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd));
968 staccess->access = access;
969 fd_install(fdno, staccess->file);
973 iommufd_access_destroy(access);
977 fput(staccess->file);
981 static int iommufd_test_access_replace_ioas(struct iommufd_ucmd *ucmd,
982 unsigned int access_id,
983 unsigned int ioas_id)
985 struct selftest_access *staccess;
988 staccess = iommufd_access_get(access_id);
989 if (IS_ERR(staccess))
990 return PTR_ERR(staccess);
992 rc = iommufd_access_replace(staccess->access, ioas_id);
993 fput(staccess->file);
997 /* Check that the pages in a page array match the pages in the user VA */
998 static int iommufd_test_check_pages(void __user *uptr, struct page **pages,
1001 for (; npages; npages--) {
1002 struct page *tmp_pages[1];
1005 rc = get_user_pages_fast((uintptr_t)uptr, 1, 0, tmp_pages);
1008 if (WARN_ON(rc != 1))
1010 put_page(tmp_pages[0]);
1011 if (tmp_pages[0] != *pages)
1019 static int iommufd_test_access_pages(struct iommufd_ucmd *ucmd,
1020 unsigned int access_id, unsigned long iova,
1021 size_t length, void __user *uptr,
1024 struct iommu_test_cmd *cmd = ucmd->cmd;
1025 struct selftest_access_item *item;
1026 struct selftest_access *staccess;
1027 struct page **pages;
1031 /* Prevent syzkaller from triggering a WARN_ON in kvzalloc() */
1032 if (length > 16*1024*1024)
1035 if (flags & ~(MOCK_FLAGS_ACCESS_WRITE | MOCK_FLAGS_ACCESS_SYZ))
1038 staccess = iommufd_access_get(access_id);
1039 if (IS_ERR(staccess))
1040 return PTR_ERR(staccess);
1042 if (staccess->access->ops != &selftest_access_ops_pin) {
1047 if (flags & MOCK_FLAGS_ACCESS_SYZ)
1048 iova = iommufd_test_syz_conv_iova(&staccess->access->ioas->iopt,
1049 &cmd->access_pages.iova);
1051 npages = (ALIGN(iova + length, PAGE_SIZE) -
1052 ALIGN_DOWN(iova, PAGE_SIZE)) /
1054 pages = kvcalloc(npages, sizeof(*pages), GFP_KERNEL_ACCOUNT);
1061 * Drivers will need to think very carefully about this locking. The
1062 * core code can do multiple unmaps instantaneously after
1063 * iommufd_access_pin_pages() and *all* the unmaps must not return until
1064 * the range is unpinned. This simple implementation puts a global lock
1065 * around the pin, which may not suit drivers that want this to be a
1066 * performance path. drivers that get this wrong will trigger WARN_ON
1067 * races and cause EDEADLOCK failures to userspace.
1069 mutex_lock(&staccess->lock);
1070 rc = iommufd_access_pin_pages(staccess->access, iova, length, pages,
1071 flags & MOCK_FLAGS_ACCESS_WRITE);
1075 /* For syzkaller allow uptr to be NULL to skip this check */
1077 rc = iommufd_test_check_pages(
1078 uptr - (iova - ALIGN_DOWN(iova, PAGE_SIZE)), pages,
1084 item = kzalloc(sizeof(*item), GFP_KERNEL_ACCOUNT);
1091 item->length = length;
1092 item->id = staccess->next_id++;
1093 list_add_tail(&item->items_elm, &staccess->items);
1095 cmd->access_pages.out_access_pages_id = item->id;
1096 rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd));
1102 list_del(&item->items_elm);
1105 iommufd_access_unpin_pages(staccess->access, iova, length);
1107 mutex_unlock(&staccess->lock);
1110 fput(staccess->file);
1114 static int iommufd_test_access_rw(struct iommufd_ucmd *ucmd,
1115 unsigned int access_id, unsigned long iova,
1116 size_t length, void __user *ubuf,
1119 struct iommu_test_cmd *cmd = ucmd->cmd;
1120 struct selftest_access *staccess;
1124 /* Prevent syzkaller from triggering a WARN_ON in kvzalloc() */
1125 if (length > 16*1024*1024)
1128 if (flags & ~(MOCK_ACCESS_RW_WRITE | MOCK_ACCESS_RW_SLOW_PATH |
1129 MOCK_FLAGS_ACCESS_SYZ))
1132 staccess = iommufd_access_get(access_id);
1133 if (IS_ERR(staccess))
1134 return PTR_ERR(staccess);
1136 tmp = kvzalloc(length, GFP_KERNEL_ACCOUNT);
1142 if (flags & MOCK_ACCESS_RW_WRITE) {
1143 if (copy_from_user(tmp, ubuf, length)) {
1149 if (flags & MOCK_FLAGS_ACCESS_SYZ)
1150 iova = iommufd_test_syz_conv_iova(&staccess->access->ioas->iopt,
1151 &cmd->access_rw.iova);
1153 rc = iommufd_access_rw(staccess->access, iova, tmp, length, flags);
1156 if (!(flags & MOCK_ACCESS_RW_WRITE)) {
1157 if (copy_to_user(ubuf, tmp, length)) {
1166 fput(staccess->file);
1169 static_assert((unsigned int)MOCK_ACCESS_RW_WRITE == IOMMUFD_ACCESS_RW_WRITE);
1170 static_assert((unsigned int)MOCK_ACCESS_RW_SLOW_PATH ==
1171 __IOMMUFD_ACCESS_RW_SLOW_PATH);
1173 static int iommufd_test_dirty(struct iommufd_ucmd *ucmd, unsigned int mockpt_id,
1174 unsigned long iova, size_t length,
1175 unsigned long page_size, void __user *uptr,
1178 unsigned long bitmap_size, i, max;
1179 struct iommu_test_cmd *cmd = ucmd->cmd;
1180 struct iommufd_hw_pagetable *hwpt;
1181 struct mock_iommu_domain *mock;
1185 if (!page_size || !length || iova % page_size || length % page_size ||
1189 hwpt = get_md_pagetable(ucmd, mockpt_id, &mock);
1191 return PTR_ERR(hwpt);
1193 if (!(mock->flags & MOCK_DIRTY_TRACK)) {
1198 max = length / page_size;
1199 bitmap_size = max / BITS_PER_BYTE;
1201 tmp = kvzalloc(bitmap_size, GFP_KERNEL_ACCOUNT);
1207 if (copy_from_user(tmp, uptr, bitmap_size)) {
1212 for (i = 0; i < max; i++) {
1213 unsigned long cur = iova + i * page_size;
1216 if (!test_bit(i, (unsigned long *)tmp))
1219 ent = xa_load(&mock->pfns, cur / page_size);
1223 val = xa_to_value(ent) | MOCK_PFN_DIRTY_IOVA;
1224 old = xa_store(&mock->pfns, cur / page_size,
1225 xa_mk_value(val), GFP_KERNEL);
1226 WARN_ON_ONCE(ent != old);
1231 cmd->dirty.out_nr_dirty = count;
1232 rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd));
1236 iommufd_put_object(&hwpt->obj);
1240 void iommufd_selftest_destroy(struct iommufd_object *obj)
1242 struct selftest_obj *sobj = container_of(obj, struct selftest_obj, obj);
1244 switch (sobj->type) {
1246 iommufd_device_detach(sobj->idev.idev);
1247 iommufd_device_unbind(sobj->idev.idev);
1248 mock_dev_destroy(sobj->idev.mock_dev);
1253 int iommufd_test(struct iommufd_ucmd *ucmd)
1255 struct iommu_test_cmd *cmd = ucmd->cmd;
1258 case IOMMU_TEST_OP_ADD_RESERVED:
1259 return iommufd_test_add_reserved(ucmd, cmd->id,
1260 cmd->add_reserved.start,
1261 cmd->add_reserved.length);
1262 case IOMMU_TEST_OP_MOCK_DOMAIN:
1263 case IOMMU_TEST_OP_MOCK_DOMAIN_FLAGS:
1264 return iommufd_test_mock_domain(ucmd, cmd);
1265 case IOMMU_TEST_OP_MOCK_DOMAIN_REPLACE:
1266 return iommufd_test_mock_domain_replace(
1267 ucmd, cmd->id, cmd->mock_domain_replace.pt_id, cmd);
1268 case IOMMU_TEST_OP_MD_CHECK_MAP:
1269 return iommufd_test_md_check_pa(
1270 ucmd, cmd->id, cmd->check_map.iova,
1271 cmd->check_map.length,
1272 u64_to_user_ptr(cmd->check_map.uptr));
1273 case IOMMU_TEST_OP_MD_CHECK_REFS:
1274 return iommufd_test_md_check_refs(
1275 ucmd, u64_to_user_ptr(cmd->check_refs.uptr),
1276 cmd->check_refs.length, cmd->check_refs.refs);
1277 case IOMMU_TEST_OP_CREATE_ACCESS:
1278 return iommufd_test_create_access(ucmd, cmd->id,
1279 cmd->create_access.flags);
1280 case IOMMU_TEST_OP_ACCESS_REPLACE_IOAS:
1281 return iommufd_test_access_replace_ioas(
1282 ucmd, cmd->id, cmd->access_replace_ioas.ioas_id);
1283 case IOMMU_TEST_OP_ACCESS_PAGES:
1284 return iommufd_test_access_pages(
1285 ucmd, cmd->id, cmd->access_pages.iova,
1286 cmd->access_pages.length,
1287 u64_to_user_ptr(cmd->access_pages.uptr),
1288 cmd->access_pages.flags);
1289 case IOMMU_TEST_OP_ACCESS_RW:
1290 return iommufd_test_access_rw(
1291 ucmd, cmd->id, cmd->access_rw.iova,
1292 cmd->access_rw.length,
1293 u64_to_user_ptr(cmd->access_rw.uptr),
1294 cmd->access_rw.flags);
1295 case IOMMU_TEST_OP_DESTROY_ACCESS_PAGES:
1296 return iommufd_test_access_item_destroy(
1297 ucmd, cmd->id, cmd->destroy_access_pages.access_pages_id);
1298 case IOMMU_TEST_OP_SET_TEMP_MEMORY_LIMIT:
1299 /* Protect _batch_init(), can not be less than elmsz */
1300 if (cmd->memory_limit.limit <
1301 sizeof(unsigned long) + sizeof(u32))
1303 iommufd_test_memory_limit = cmd->memory_limit.limit;
1305 case IOMMU_TEST_OP_DIRTY:
1306 return iommufd_test_dirty(ucmd, cmd->id, cmd->dirty.iova,
1308 cmd->dirty.page_size,
1309 u64_to_user_ptr(cmd->dirty.uptr),
1316 bool iommufd_should_fail(void)
1318 return should_fail(&fail_iommufd, 1);
1321 int __init iommufd_test_init(void)
1323 struct platform_device_info pdevinfo = {
1324 .name = "iommufd_selftest_iommu",
1329 fault_create_debugfs_attr("fail_iommufd", NULL, &fail_iommufd);
1331 selftest_iommu_dev = platform_device_register_full(&pdevinfo);
1332 if (IS_ERR(selftest_iommu_dev)) {
1333 rc = PTR_ERR(selftest_iommu_dev);
1337 rc = bus_register(&iommufd_mock_bus_type.bus);
1341 rc = iommu_device_sysfs_add(&mock_iommu_device,
1342 &selftest_iommu_dev->dev, NULL, "%s",
1343 dev_name(&selftest_iommu_dev->dev));
1347 rc = iommu_device_register_bus(&mock_iommu_device, &mock_ops,
1348 &iommufd_mock_bus_type.bus,
1349 &iommufd_mock_bus_type.nb);
1355 iommu_device_sysfs_remove(&mock_iommu_device);
1357 bus_unregister(&iommufd_mock_bus_type.bus);
1359 platform_device_unregister(selftest_iommu_dev);
1361 debugfs_remove_recursive(dbgfs_root);
1365 void iommufd_test_exit(void)
1367 iommu_device_sysfs_remove(&mock_iommu_device);
1368 iommu_device_unregister_bus(&mock_iommu_device,
1369 &iommufd_mock_bus_type.bus,
1370 &iommufd_mock_bus_type.nb);
1371 bus_unregister(&iommufd_mock_bus_type.bus);
1372 platform_device_unregister(selftest_iommu_dev);
1373 debugfs_remove_recursive(dbgfs_root);