drm: Implement drm_need_swiotlb() in drm_cache.c
authorThomas Zimmermann <tzimmermann@suse.de>
Tue, 12 Jan 2021 08:10:31 +0000 (09:10 +0100)
committerThomas Zimmermann <tzimmermann@suse.de>
Wed, 13 Jan 2021 13:22:29 +0000 (14:22 +0100)
The function is declared in drm_cache.h. I also removed the curly
braces from the for loop to adhere to kernel coding style.

Signed-off-by: Thomas Zimmermann <tzimmermann@suse.de>
Reviewed-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Link: https://patchwork.freedesktop.org/patch/msgid/20210112081035.6882-3-tzimmermann@suse.de
drivers/gpu/drm/drm_cache.c
drivers/gpu/drm/drm_memory.c

index 0fe3c496002a904aa9bd95e19593812dcd16c833..49551a7fa22f1909db33a67499293caeef898dbf 100644 (file)
@@ -30,6 +30,7 @@
 
 #include <linux/export.h>
 #include <linux/highmem.h>
+#include <xen/xen.h>
 
 #include <drm/drm_cache.h>
 
@@ -176,3 +177,34 @@ drm_clflush_virt_range(void *addr, unsigned long length)
 #endif
 }
 EXPORT_SYMBOL(drm_clflush_virt_range);
+
+bool drm_need_swiotlb(int dma_bits)
+{
+       struct resource *tmp;
+       resource_size_t max_iomem = 0;
+
+       /*
+        * Xen paravirtual hosts require swiotlb regardless of requested dma
+        * transfer size.
+        *
+        * NOTE: Really, what it requires is use of the dma_alloc_coherent
+        *       allocator used in ttm_dma_populate() instead of
+        *       ttm_populate_and_map_pages(), which bounce buffers so much in
+        *       Xen it leads to swiotlb buffer exhaustion.
+        */
+       if (xen_pv_domain())
+               return true;
+
+       /*
+        * Enforce dma_alloc_coherent when memory encryption is active as well
+        * for the same reasons as for Xen paravirtual hosts.
+        */
+       if (mem_encrypt_active())
+               return true;
+
+       for (tmp = iomem_resource.child; tmp; tmp = tmp->sibling)
+               max_iomem = max(max_iomem,  tmp->end);
+
+       return max_iomem > ((u64)1 << dma_bits);
+}
+EXPORT_SYMBOL(drm_need_swiotlb);
index f4f2bffdd5bdbb52fc1f6b3cab15331cff5c0b1e..e4f20a2eb6e793163756a22c1666b5ee894f9f78 100644 (file)
@@ -37,7 +37,6 @@
 #include <linux/highmem.h>
 #include <linux/pci.h>
 #include <linux/vmalloc.h>
-#include <xen/xen.h>
 
 #include <drm/drm_agpsupport.h>
 #include <drm/drm_cache.h>
@@ -138,35 +137,3 @@ void drm_legacy_ioremapfree(struct drm_local_map *map, struct drm_device *dev)
                iounmap(map->handle);
 }
 EXPORT_SYMBOL(drm_legacy_ioremapfree);
-
-bool drm_need_swiotlb(int dma_bits)
-{
-       struct resource *tmp;
-       resource_size_t max_iomem = 0;
-
-       /*
-        * Xen paravirtual hosts require swiotlb regardless of requested dma
-        * transfer size.
-        *
-        * NOTE: Really, what it requires is use of the dma_alloc_coherent
-        *       allocator used in ttm_dma_populate() instead of
-        *       ttm_populate_and_map_pages(), which bounce buffers so much in
-        *       Xen it leads to swiotlb buffer exhaustion.
-        */
-       if (xen_pv_domain())
-               return true;
-
-       /*
-        * Enforce dma_alloc_coherent when memory encryption is active as well
-        * for the same reasons as for Xen paravirtual hosts.
-        */
-       if (mem_encrypt_active())
-               return true;
-
-       for (tmp = iomem_resource.child; tmp; tmp = tmp->sibling) {
-               max_iomem = max(max_iomem,  tmp->end);
-       }
-
-       return max_iomem > ((u64)1 << dma_bits);
-}
-EXPORT_SYMBOL(drm_need_swiotlb);