powerpc/dma: use the dma-direct allocator for coherent platforms
authorChristoph Hellwig <hch@lst.de>
Wed, 13 Feb 2019 07:01:28 +0000 (08:01 +0100)
committerMichael Ellerman <mpe@ellerman.id.au>
Mon, 18 Feb 2019 11:41:04 +0000 (22:41 +1100)
The generic code allows a few nice things such as node local allocations
and dipping into the CMA area.  The lookup of the right zone for a given
dma mask works a little different, but the results should be the same.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Tested-by: Christian Zigotzky <chzigotzky@xenosoft.de>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
arch/powerpc/include/asm/pgtable.h
arch/powerpc/kernel/dma-iommu.c
arch/powerpc/kernel/dma-swiotlb.c
arch/powerpc/kernel/dma.c
arch/powerpc/mm/mem.c

index dad1d27e196d92f0e1a5ca5a2194e91af70197ed..505550fb293566d76c0fddebcc2ecf0c5ed0fc05 100644 (file)
@@ -66,7 +66,6 @@ extern unsigned long empty_zero_page[];
 
 extern pgd_t swapper_pg_dir[];
 
-int dma_pfn_limit_to_zone(u64 pfn_limit);
 extern void paging_init(void);
 
 /*
index 67fbfaa4e3b25765a2f25f926adfa85d12fab9dd..c75ba4e3a50c0bcd1bbf5ab8aae2d4adfb27c41a 100644 (file)
@@ -40,8 +40,7 @@ static void *dma_iommu_alloc_coherent(struct device *dev, size_t size,
                                      unsigned long attrs)
 {
        if (dma_iommu_alloc_bypass(dev))
-               return __dma_nommu_alloc_coherent(dev, size, dma_handle, flag,
-                               attrs);
+               return dma_direct_alloc(dev, size, dma_handle, flag, attrs);
        return iommu_alloc_coherent(dev, get_iommu_table_base(dev), size,
                                    dma_handle, dev->coherent_dma_mask, flag,
                                    dev_to_node(dev));
@@ -52,7 +51,7 @@ static void dma_iommu_free_coherent(struct device *dev, size_t size,
                                    unsigned long attrs)
 {
        if (dma_iommu_alloc_bypass(dev))
-               __dma_nommu_free_coherent(dev, size, vaddr, dma_handle, attrs);
+               dma_direct_free(dev, size, vaddr, dma_handle, attrs);
        else
                iommu_free_coherent(get_iommu_table_base(dev), size, vaddr,
                                dma_handle);
index 6d2677b2daa6bae4f3946f820e12f9a5b82ed1a4..3a15a7d945e91a6f8441ffa1f560d038015169ee 100644 (file)
@@ -32,8 +32,8 @@ unsigned int ppc_swiotlb_enable;
  * for everything else.
  */
 const struct dma_map_ops powerpc_swiotlb_dma_ops = {
-       .alloc = __dma_nommu_alloc_coherent,
-       .free = __dma_nommu_free_coherent,
+       .alloc = dma_direct_alloc,
+       .free = dma_direct_free,
        .map_sg = dma_direct_map_sg,
        .unmap_sg = dma_direct_unmap_sg,
        .dma_supported = dma_direct_supported,
index a3546a82f6d771c2b026da5b37f9078a267f7113..f983f8d435a6b39685c57b296e8554c77885f718 100644 (file)
  * default the offset is PCI_DRAM_OFFSET.
  */
 
-static u64 __maybe_unused get_pfn_limit(struct device *dev)
-{
-       u64 pfn = (dev->coherent_dma_mask >> PAGE_SHIFT) + 1;
-
-#ifdef CONFIG_SWIOTLB
-       if (dev->bus_dma_mask && dev->dma_ops == &powerpc_swiotlb_dma_ops)
-               pfn = min_t(u64, pfn, dev->bus_dma_mask >> PAGE_SHIFT);
-#endif
-
-       return pfn;
-}
-
-#ifndef CONFIG_NOT_COHERENT_CACHE
-void *__dma_nommu_alloc_coherent(struct device *dev, size_t size,
-                                 dma_addr_t *dma_handle, gfp_t flag,
-                                 unsigned long attrs)
-{
-       void *ret;
-       struct page *page;
-       int node = dev_to_node(dev);
-#ifdef CONFIG_FSL_SOC
-       u64 pfn = get_pfn_limit(dev);
-       int zone;
-
-       /*
-        * This code should be OK on other platforms, but we have drivers that
-        * don't set coherent_dma_mask. As a workaround we just ifdef it. This
-        * whole routine needs some serious cleanup.
-        */
-
-       zone = dma_pfn_limit_to_zone(pfn);
-       if (zone < 0) {
-               dev_err(dev, "%s: No suitable zone for pfn %#llx\n",
-                       __func__, pfn);
-               return NULL;
-       }
-
-       switch (zone) {
-#ifdef CONFIG_ZONE_DMA
-       case ZONE_DMA:
-               flag |= GFP_DMA;
-               break;
-#endif
-       };
-#endif /* CONFIG_FSL_SOC */
-
-       page = alloc_pages_node(node, flag, get_order(size));
-       if (page == NULL)
-               return NULL;
-       ret = page_address(page);
-       memset(ret, 0, size);
-       *dma_handle = phys_to_dma(dev,__pa(ret));
-
-       return ret;
-}
-
-void __dma_nommu_free_coherent(struct device *dev, size_t size,
-                               void *vaddr, dma_addr_t dma_handle,
-                               unsigned long attrs)
-{
-       free_pages((unsigned long)vaddr, get_order(size));
-}
-#endif /* !CONFIG_NOT_COHERENT_CACHE */
-
 int dma_nommu_map_sg(struct device *dev, struct scatterlist *sgl,
                int nents, enum dma_data_direction direction,
                unsigned long attrs)
@@ -163,8 +99,13 @@ static inline void dma_nommu_sync_single(struct device *dev,
 #endif
 
 const struct dma_map_ops dma_nommu_ops = {
+#ifdef CONFIG_NOT_COHERENT_CACHE
        .alloc                          = __dma_nommu_alloc_coherent,
        .free                           = __dma_nommu_free_coherent,
+#else
+       .alloc                          = dma_direct_alloc,
+       .free                           = dma_direct_free,
+#endif
        .map_sg                         = dma_nommu_map_sg,
        .unmap_sg                       = dma_nommu_unmap_sg,
        .dma_supported                  = dma_direct_supported,
index 33cc6f676fa6224b76fe0f37399a30b2d852b449..a10ee3645a6c8520f50a4d3b45e0a9fac5529a23 100644 (file)
@@ -69,15 +69,12 @@ pte_t *kmap_pte;
 EXPORT_SYMBOL(kmap_pte);
 pgprot_t kmap_prot;
 EXPORT_SYMBOL(kmap_prot);
-#define TOP_ZONE ZONE_HIGHMEM
 
 static inline pte_t *virt_to_kpte(unsigned long vaddr)
 {
        return pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr),
                        vaddr), vaddr), vaddr);
 }
-#else
-#define TOP_ZONE ZONE_NORMAL
 #endif
 
 int page_is_ram(unsigned long pfn)
@@ -261,25 +258,6 @@ static int __init mark_nonram_nosave(void)
  */
 static unsigned long max_zone_pfns[MAX_NR_ZONES];
 
-/*
- * Find the least restrictive zone that is entirely below the
- * specified pfn limit.  Returns < 0 if no suitable zone is found.
- *
- * pfn_limit must be u64 because it can exceed 32 bits even on 32-bit
- * systems -- the DMA limit can be higher than any possible real pfn.
- */
-int dma_pfn_limit_to_zone(u64 pfn_limit)
-{
-       int i;
-
-       for (i = TOP_ZONE; i >= 0; i--) {
-               if (max_zone_pfns[i] <= pfn_limit)
-                       return i;
-       }
-
-       return -EPERM;
-}
-
 /*
  * paging_init() sets up the page tables - in fact we've already done this.
  */