Merge branch 'stable/for-linus-4.11' of git://git.kernel.org/pub/scm/linux/kernel...
authorLinus Torvalds <torvalds@linux-foundation.org>
Tue, 7 Mar 2017 18:23:17 +0000 (10:23 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 7 Mar 2017 18:23:17 +0000 (10:23 -0800)
Pull swiotlb updates from Konrad Rzeszutek Wilk:
 "Two tiny implementations of the DMA API for callback in ARM (for Xen)"

* 'stable/for-linus-4.11' of git://git.kernel.org/pub/scm/linux/kernel/git/konrad/swiotlb:
  swiotlb-xen: implement xen_swiotlb_get_sgtable callback
  swiotlb-xen: implement xen_swiotlb_dma_mmap callback

1  2 
arch/arm/xen/mm.c
drivers/xen/swiotlb-xen.c

diff --combined arch/arm/xen/mm.c
index ce18c91b50a1cbac3fb6d38af60c63af9b031185,76ea48a614e1631c1a0bab9e3913c133eaefd9ea..f0325d96b97aed734f86deb3a9f5b3266b5a16ed
@@@ -182,10 -182,10 +182,10 @@@ void xen_destroy_contiguous_region(phys
  }
  EXPORT_SYMBOL_GPL(xen_destroy_contiguous_region);
  
 -struct dma_map_ops *xen_dma_ops;
 +const struct dma_map_ops *xen_dma_ops;
  EXPORT_SYMBOL(xen_dma_ops);
  
 -static struct dma_map_ops xen_swiotlb_dma_ops = {
 +static const struct dma_map_ops xen_swiotlb_dma_ops = {
        .alloc = xen_swiotlb_alloc_coherent,
        .free = xen_swiotlb_free_coherent,
        .sync_single_for_cpu = xen_swiotlb_sync_single_for_cpu,
        .unmap_page = xen_swiotlb_unmap_page,
        .dma_supported = xen_swiotlb_dma_supported,
        .set_dma_mask = xen_swiotlb_set_dma_mask,
+       .mmap = xen_swiotlb_dma_mmap,
+       .get_sgtable = xen_swiotlb_get_sgtable,
  };
  
  int __init xen_mm_init(void)
index f8afc6dcc29f2769694308092a4b543e5e0bed49,23e30b4e1fb6004c92ba4ff0e4e15611966e5ef3..e8cef1ad0fe31e0139903399d70730c7eafdc399
@@@ -414,9 -414,9 +414,9 @@@ dma_addr_t xen_swiotlb_map_page(struct 
        if (map == SWIOTLB_MAP_ERROR)
                return DMA_ERROR_CODE;
  
 +      dev_addr = xen_phys_to_bus(map);
        xen_dma_map_page(dev, pfn_to_page(map >> PAGE_SHIFT),
                                        dev_addr, map & ~PAGE_MASK, size, dir, attrs);
 -      dev_addr = xen_phys_to_bus(map);
  
        /*
         * Ensure that the address returned is DMA'ble
@@@ -575,14 -575,13 +575,14 @@@ xen_swiotlb_map_sg_attrs(struct device 
                                sg_dma_len(sgl) = 0;
                                return 0;
                        }
 +                      dev_addr = xen_phys_to_bus(map);
                        xen_dma_map_page(hwdev, pfn_to_page(map >> PAGE_SHIFT),
                                                dev_addr,
                                                map & ~PAGE_MASK,
                                                sg->length,
                                                dir,
                                                attrs);
 -                      sg->dma_address = xen_phys_to_bus(map);
 +                      sg->dma_address = dev_addr;
                } else {
                        /* we are not interested in the dma_addr returned by
                         * xen_dma_map_page, only in the potential cache flushes executed
@@@ -681,3 -680,50 +681,50 @@@ xen_swiotlb_set_dma_mask(struct device 
        return 0;
  }
  EXPORT_SYMBOL_GPL(xen_swiotlb_set_dma_mask);
+ /*
+  * Create userspace mapping for the DMA-coherent memory.
+  * This function should be called with the pages from the current domain only,
+  * passing pages mapped from other domains would lead to memory corruption.
+  */
+ int
+ xen_swiotlb_dma_mmap(struct device *dev, struct vm_area_struct *vma,
+                    void *cpu_addr, dma_addr_t dma_addr, size_t size,
+                    unsigned long attrs)
+ {
+ #if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
+       if (__generic_dma_ops(dev)->mmap)
+               return __generic_dma_ops(dev)->mmap(dev, vma, cpu_addr,
+                                                   dma_addr, size, attrs);
+ #endif
+       return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
+ }
+ EXPORT_SYMBOL_GPL(xen_swiotlb_dma_mmap);
+ /*
+  * This function should be called with the pages from the current domain only,
+  * passing pages mapped from other domains would lead to memory corruption.
+  */
+ int
+ xen_swiotlb_get_sgtable(struct device *dev, struct sg_table *sgt,
+                       void *cpu_addr, dma_addr_t handle, size_t size,
+                       unsigned long attrs)
+ {
+ #if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
+       if (__generic_dma_ops(dev)->get_sgtable) {
+ #if 0
+       /*
+        * This check verifies that the page belongs to the current domain and
+        * is not one mapped from another domain.
+        * This check is for debug only, and should not go to production build
+        */
+               unsigned long bfn = PHYS_PFN(dma_to_phys(dev, handle));
+               BUG_ON (!page_is_ram(bfn));
+ #endif
+               return __generic_dma_ops(dev)->get_sgtable(dev, sgt, cpu_addr,
+                                                          handle, size, attrs);
+       }
+ #endif
+       return dma_common_get_sgtable(dev, sgt, cpu_addr, handle, size);
+ }
+ EXPORT_SYMBOL_GPL(xen_swiotlb_get_sgtable);