dma-mapping: make the get_required_mask method available unconditionally
authorChristoph Hellwig <hch@lst.de>
Thu, 6 Sep 2018 23:27:24 +0000 (19:27 -0400)
committerChristoph Hellwig <hch@lst.de>
Mon, 1 Oct 2018 14:27:00 +0000 (07:27 -0700)
This save some duplication for ia64, and makes the interface more
general.  In the long run we want each dma_map_ops instance to fill this
out, but this will take a little more prep work.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Acked-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
arch/ia64/include/asm/dma-mapping.h
arch/ia64/include/asm/machvec.h
arch/ia64/include/asm/machvec_init.h
arch/ia64/include/asm/machvec_sn2.h
arch/ia64/pci/pci.c
arch/ia64/sn/pci/pci_dma.c
drivers/base/platform.c
drivers/pci/controller/vmd.c
include/linux/dma-mapping.h

index 76e4d6632d68f68d57a83b6d5e44ac855b44c092..522745ae67bb652858563e3175e97c49a0cb64c7 100644 (file)
@@ -10,8 +10,6 @@
 #include <linux/scatterlist.h>
 #include <linux/dma-debug.h>
 
-#define ARCH_HAS_DMA_GET_REQUIRED_MASK
-
 extern const struct dma_map_ops *dma_ops;
 extern struct ia64_machine_vector ia64_mv;
 extern void set_iommu_machvec(void);
index 267f4f17019166111a969e8666ce89230e8165a0..5133739966bcfa00570aca667c88d96fe71e771a 100644 (file)
@@ -44,7 +44,6 @@ typedef void ia64_mv_kernel_launch_event_t(void);
 
 /* DMA-mapping interface: */
 typedef void ia64_mv_dma_init (void);
-typedef u64 ia64_mv_dma_get_required_mask (struct device *);
 typedef const struct dma_map_ops *ia64_mv_dma_get_ops(struct device *);
 
 /*
@@ -127,7 +126,6 @@ extern void machvec_tlb_migrate_finish (struct mm_struct *);
 #  define platform_global_tlb_purge    ia64_mv.global_tlb_purge
 #  define platform_tlb_migrate_finish  ia64_mv.tlb_migrate_finish
 #  define platform_dma_init            ia64_mv.dma_init
-#  define platform_dma_get_required_mask ia64_mv.dma_get_required_mask
 #  define platform_dma_get_ops         ia64_mv.dma_get_ops
 #  define platform_irq_to_vector       ia64_mv.irq_to_vector
 #  define platform_local_vector_to_irq ia64_mv.local_vector_to_irq
@@ -171,7 +169,6 @@ struct ia64_machine_vector {
        ia64_mv_global_tlb_purge_t *global_tlb_purge;
        ia64_mv_tlb_migrate_finish_t *tlb_migrate_finish;
        ia64_mv_dma_init *dma_init;
-       ia64_mv_dma_get_required_mask *dma_get_required_mask;
        ia64_mv_dma_get_ops *dma_get_ops;
        ia64_mv_irq_to_vector *irq_to_vector;
        ia64_mv_local_vector_to_irq *local_vector_to_irq;
@@ -211,7 +208,6 @@ struct ia64_machine_vector {
        platform_global_tlb_purge,              \
        platform_tlb_migrate_finish,            \
        platform_dma_init,                      \
-       platform_dma_get_required_mask,         \
        platform_dma_get_ops,                   \
        platform_irq_to_vector,                 \
        platform_local_vector_to_irq,           \
@@ -286,9 +282,6 @@ extern const struct dma_map_ops *dma_get_ops(struct device *);
 #ifndef platform_dma_get_ops
 # define platform_dma_get_ops          dma_get_ops
 #endif
-#ifndef platform_dma_get_required_mask
-# define  platform_dma_get_required_mask       ia64_dma_get_required_mask
-#endif
 #ifndef platform_irq_to_vector
 # define platform_irq_to_vector                __ia64_irq_to_vector
 #endif
index 2b32fd06b7c64c2ad83a00cab46bfbbfa99f7f9b..2aafb69a37874dd5a7f0d0a098a28ed2b0ef8aba 100644 (file)
@@ -4,7 +4,6 @@
 
 extern ia64_mv_send_ipi_t ia64_send_ipi;
 extern ia64_mv_global_tlb_purge_t ia64_global_tlb_purge;
-extern ia64_mv_dma_get_required_mask ia64_dma_get_required_mask;
 extern ia64_mv_irq_to_vector __ia64_irq_to_vector;
 extern ia64_mv_local_vector_to_irq __ia64_local_vector_to_irq;
 extern ia64_mv_pci_get_legacy_mem_t ia64_pci_get_legacy_mem;
index ece9fa85be8864330b783ff37985f58370e68c43..b5153d300289724622ae936d560b40a94e471500 100644 (file)
@@ -55,7 +55,6 @@ extern ia64_mv_readb_t __sn_readb_relaxed;
 extern ia64_mv_readw_t __sn_readw_relaxed;
 extern ia64_mv_readl_t __sn_readl_relaxed;
 extern ia64_mv_readq_t __sn_readq_relaxed;
-extern ia64_mv_dma_get_required_mask   sn_dma_get_required_mask;
 extern ia64_mv_dma_init                        sn_dma_init;
 extern ia64_mv_migrate_t               sn_migrate;
 extern ia64_mv_kernel_launch_event_t   sn_kernel_launch_event;
@@ -100,7 +99,6 @@ extern ia64_mv_pci_fixup_bus_t               sn_pci_fixup_bus;
 #define platform_pci_get_legacy_mem    sn_pci_get_legacy_mem
 #define platform_pci_legacy_read       sn_pci_legacy_read
 #define platform_pci_legacy_write      sn_pci_legacy_write
-#define platform_dma_get_required_mask sn_dma_get_required_mask
 #define platform_dma_init              sn_dma_init
 #define platform_migrate               sn_migrate
 #define platform_kernel_launch_event    sn_kernel_launch_event
index 7ccc64d5fe3ee09e3243cd54c9f7fc81985ef539..5d71800df4313a13970aa30d311740a118fe466b 100644 (file)
@@ -568,32 +568,6 @@ static void __init set_pci_dfl_cacheline_size(void)
        pci_dfl_cache_line_size = (1 << cci.pcci_line_size) / 4;
 }
 
-u64 ia64_dma_get_required_mask(struct device *dev)
-{
-       u32 low_totalram = ((max_pfn - 1) << PAGE_SHIFT);
-       u32 high_totalram = ((max_pfn - 1) >> (32 - PAGE_SHIFT));
-       u64 mask;
-
-       if (!high_totalram) {
-               /* convert to mask just covering totalram */
-               low_totalram = (1 << (fls(low_totalram) - 1));
-               low_totalram += low_totalram - 1;
-               mask = low_totalram;
-       } else {
-               high_totalram = (1 << (fls(high_totalram) - 1));
-               high_totalram += high_totalram - 1;
-               mask = (((u64)high_totalram) << 32) + 0xffffffff;
-       }
-       return mask;
-}
-EXPORT_SYMBOL_GPL(ia64_dma_get_required_mask);
-
-u64 dma_get_required_mask(struct device *dev)
-{
-       return platform_dma_get_required_mask(dev);
-}
-EXPORT_SYMBOL_GPL(dma_get_required_mask);
-
 static int __init pcibios_init(void)
 {
        set_pci_dfl_cacheline_size();
index 74c934a997bb45d5a462feea7c25d0711f2f0159..96eb2567718a29e71dbcf9e91bf01ed1e4ff80d8 100644 (file)
@@ -344,11 +344,10 @@ static int sn_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
        return 0;
 }
 
-u64 sn_dma_get_required_mask(struct device *dev)
+static u64 sn_dma_get_required_mask(struct device *dev)
 {
        return DMA_BIT_MASK(64);
 }
-EXPORT_SYMBOL_GPL(sn_dma_get_required_mask);
 
 char *sn_pci_get_legacy_mem(struct pci_bus *bus)
 {
@@ -473,6 +472,7 @@ static struct dma_map_ops sn_dma_ops = {
        .sync_sg_for_device     = sn_dma_sync_sg_for_device,
        .mapping_error          = sn_dma_mapping_error,
        .dma_supported          = sn_dma_supported,
+       .get_required_mask      = sn_dma_get_required_mask,
 };
 
 void sn_dma_init(void)
index dff82a3c2caa90162076a56c7911554d6fb2d09f..cfe22fded980b104ef8f09670901bbe1175667be 100644 (file)
@@ -1179,8 +1179,7 @@ int __init platform_bus_init(void)
        return error;
 }
 
-#ifndef ARCH_HAS_DMA_GET_REQUIRED_MASK
-u64 dma_get_required_mask(struct device *dev)
+static u64 dma_default_get_required_mask(struct device *dev)
 {
        u32 low_totalram = ((max_pfn - 1) << PAGE_SHIFT);
        u32 high_totalram = ((max_pfn - 1) >> (32 - PAGE_SHIFT));
@@ -1198,6 +1197,16 @@ u64 dma_get_required_mask(struct device *dev)
        }
        return mask;
 }
+
+#ifndef ARCH_HAS_DMA_GET_REQUIRED_MASK
+u64 dma_get_required_mask(struct device *dev)
+{
+       const struct dma_map_ops *ops = get_dma_ops(dev);
+
+       if (ops->get_required_mask)
+               return ops->get_required_mask(dev);
+       return dma_default_get_required_mask(dev);
+}
 EXPORT_SYMBOL_GPL(dma_get_required_mask);
 #endif
 
index fd2dbd7eed7bca808f44470ba060725acc1ec061..f31ed62d518c0f79c5eeeea081b2df70271e839b 100644 (file)
@@ -404,12 +404,10 @@ static int vmd_dma_supported(struct device *dev, u64 mask)
        return vmd_dma_ops(dev)->dma_supported(to_vmd_dev(dev), mask);
 }
 
-#ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK
 static u64 vmd_get_required_mask(struct device *dev)
 {
        return vmd_dma_ops(dev)->get_required_mask(to_vmd_dev(dev));
 }
-#endif
 
 static void vmd_teardown_dma_ops(struct vmd_dev *vmd)
 {
@@ -450,9 +448,7 @@ static void vmd_setup_dma_ops(struct vmd_dev *vmd)
        ASSIGN_VMD_DMA_OPS(source, dest, sync_sg_for_device);
        ASSIGN_VMD_DMA_OPS(source, dest, mapping_error);
        ASSIGN_VMD_DMA_OPS(source, dest, dma_supported);
-#ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK
        ASSIGN_VMD_DMA_OPS(source, dest, get_required_mask);
-#endif
        add_dma_domain(domain);
 }
 #undef ASSIGN_VMD_DMA_OPS
index d23fc45c820831aa9a214ab696a822b64dfe4d41..562af6b45f230bb27d89e009be2707007c3eeb34 100644 (file)
@@ -130,9 +130,7 @@ struct dma_map_ops {
                        enum dma_data_direction direction);
        int (*mapping_error)(struct device *dev, dma_addr_t dma_addr);
        int (*dma_supported)(struct device *dev, u64 mask);
-#ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK
        u64 (*get_required_mask)(struct device *dev);
-#endif
 };
 
 extern const struct dma_map_ops dma_direct_ops;