Revert "scatterlist: use sg_phys()"
authorDan Williams <dan.j.williams@intel.com>
Tue, 15 Dec 2015 20:54:06 +0000 (12:54 -0800)
committerDan Williams <dan.j.williams@intel.com>
Tue, 15 Dec 2015 20:54:06 +0000 (12:54 -0800)
commit db0fa0cb0157 "scatterlist: use sg_phys()" did replacements of
the form:

    phys_addr_t phys = page_to_phys(sg_page(s));
    phys_addr_t phys = sg_phys(s) & PAGE_MASK;

However, this breaks platforms where sizeof(phys_addr_t) >
sizeof(unsigned long).  Revert for 4.3 and 4.4 to make room for a
combined helper in 4.5.

Cc: <stable@vger.kernel.org>
Cc: Jens Axboe <axboe@fb.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Russell King <linux@arm.linux.org.uk>
Cc: David Woodhouse <dwmw2@infradead.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Fixes: db0fa0cb0157 ("scatterlist: use sg_phys()")
Suggested-by: Joerg Roedel <joro@8bytes.org>
Reported-by: Vitaly Lavrov <vel21ripn@gmail.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
arch/arm/mm/dma-mapping.c
arch/microblaze/kernel/dma.c
drivers/iommu/intel-iommu.c
drivers/iommu/iommu.c
drivers/staging/android/ion/ion_chunk_heap.c

index e62400e5fb99fdbf864af966e718a98decf85e29..534a60ae282e702d3b06e8dad289d8f109cd80ef 100644 (file)
@@ -1521,7 +1521,7 @@ static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
                return -ENOMEM;
 
        for (count = 0, s = sg; count < (size >> PAGE_SHIFT); s = sg_next(s)) {
-               phys_addr_t phys = sg_phys(s) & PAGE_MASK;
+               phys_addr_t phys = page_to_phys(sg_page(s));
                unsigned int len = PAGE_ALIGN(s->offset + s->length);
 
                if (!is_coherent &&
index c89da63129545a9c300e61062e9d958d4c786790..bf4dec229437a836ee1829504be7bf603ce73b37 100644 (file)
@@ -61,7 +61,8 @@ static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
        /* FIXME this part of code is untested */
        for_each_sg(sgl, sg, nents, i) {
                sg->dma_address = sg_phys(sg);
-               __dma_sync(sg_phys(sg), sg->length, direction);
+               __dma_sync(page_to_phys(sg_page(sg)) + sg->offset,
+                                                       sg->length, direction);
        }
 
        return nents;
index f1042daef9ada83e931ae450623ce491ebd55959..ac7387686ddc7b2a7c7757f2cb3fbd003c8a23af 100644 (file)
@@ -2159,7 +2159,7 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
                        sg_res = aligned_nrpages(sg->offset, sg->length);
                        sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
                        sg->dma_length = sg->length;
-                       pteval = (sg_phys(sg) & PAGE_MASK) | prot;
+                       pteval = page_to_phys(sg_page(sg)) | prot;
                        phys_pfn = pteval >> VTD_PAGE_SHIFT;
                }
 
@@ -3704,7 +3704,7 @@ static int intel_nontranslate_map_sg(struct device *hddev,
 
        for_each_sg(sglist, sg, nelems, i) {
                BUG_ON(!sg_page(sg));
-               sg->dma_address = sg_phys(sg);
+               sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset;
                sg->dma_length = sg->length;
        }
        return nelems;
index abae363c7b9bd8932066e760beea7f76f22e7b8d..0e3b0092ec92c9d4d1dcb06e713edcf99c4a9293 100644 (file)
@@ -1430,7 +1430,7 @@ size_t default_iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
        min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap);
 
        for_each_sg(sg, s, nents, i) {
-               phys_addr_t phys = sg_phys(s);
+               phys_addr_t phys = page_to_phys(sg_page(s)) + s->offset;
 
                /*
                 * We are mapping on IOMMU page boundaries, so offset within
index 195c41d7bd53b3b6926507cdfea2a8de42d45578..0813163f962f09e328d6fc448238201f84cc55cd 100644 (file)
@@ -81,7 +81,7 @@ static int ion_chunk_heap_allocate(struct ion_heap *heap,
 err:
        sg = table->sgl;
        for (i -= 1; i >= 0; i--) {
-               gen_pool_free(chunk_heap->pool, sg_phys(sg) & PAGE_MASK,
+               gen_pool_free(chunk_heap->pool, page_to_phys(sg_page(sg)),
                              sg->length);
                sg = sg_next(sg);
        }
@@ -109,7 +109,7 @@ static void ion_chunk_heap_free(struct ion_buffer *buffer)
                                                        DMA_BIDIRECTIONAL);
 
        for_each_sg(table->sgl, sg, table->nents, i) {
-               gen_pool_free(chunk_heap->pool, sg_phys(sg) & PAGE_MASK,
+               gen_pool_free(chunk_heap->pool, page_to_phys(sg_page(sg)),
                              sg->length);
        }
        chunk_heap->allocated -= allocated_size;