iommu/dma: Use for_each_sg in iommu_dma_alloc
authorChristoph Hellwig <hch@lst.de>
Mon, 20 May 2019 07:29:28 +0000 (09:29 +0200)
committerJoerg Roedel <jroedel@suse.de>
Mon, 27 May 2019 15:31:10 +0000 (17:31 +0200)
arch_dma_prep_coherent can handle physically contiguous ranges larger
than PAGE_SIZE just fine, which means we don't need a page-based
iterator.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Robin Murphy <robin.murphy@arm.com>
Signed-off-by: Joerg Roedel <jroedel@suse.de>
drivers/iommu/dma-iommu.c

index aac12433ffef247f1ee50069d809e0e462cab2a1..9b7f120d738101ea6721555771be55ef2564b8e3 100644 (file)
@@ -606,15 +606,11 @@ struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp,
                goto out_free_iova;
 
        if (!(prot & IOMMU_CACHE)) {
-               struct sg_mapping_iter miter;
-               /*
-                * The CPU-centric flushing implied by SG_MITER_TO_SG isn't
-                * sufficient here, so skip it by using the "wrong" direction.
-                */
-               sg_miter_start(&miter, sgt.sgl, sgt.orig_nents, SG_MITER_FROM_SG);
-               while (sg_miter_next(&miter))
-                       arch_dma_prep_coherent(miter.page, PAGE_SIZE);
-               sg_miter_stop(&miter);
+               struct scatterlist *sg;
+               int i;
+
+               for_each_sg(sgt.sgl, sg, sgt.orig_nents, i)
+                       arch_dma_prep_coherent(sg_page(sg), sg->length);
        }
 
        if (iommu_map_sg(domain, iova, sgt.sgl, sgt.orig_nents, prot)