Merge branch 'for-5.1' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/cgroup
[sfrench/cifs-2.6.git] / arch / arm / xen / mm.c
1 #include <linux/cpu.h>
2 #include <linux/dma-mapping.h>
3 #include <linux/gfp.h>
4 #include <linux/highmem.h>
5 #include <linux/export.h>
6 #include <linux/memblock.h>
7 #include <linux/of_address.h>
8 #include <linux/slab.h>
9 #include <linux/types.h>
10 #include <linux/vmalloc.h>
11 #include <linux/swiotlb.h>
12
13 #include <xen/xen.h>
14 #include <xen/interface/grant_table.h>
15 #include <xen/interface/memory.h>
16 #include <xen/page.h>
17 #include <xen/swiotlb-xen.h>
18
19 #include <asm/cacheflush.h>
20 #include <asm/xen/hypercall.h>
21 #include <asm/xen/interface.h>
22
23 unsigned long xen_get_swiotlb_free_pages(unsigned int order)
24 {
25         struct memblock_region *reg;
26         gfp_t flags = __GFP_NOWARN|__GFP_KSWAPD_RECLAIM;
27
28         for_each_memblock(memory, reg) {
29                 if (reg->base < (phys_addr_t)0xffffffff) {
30                         flags |= __GFP_DMA;
31                         break;
32                 }
33         }
34         return __get_free_pages(flags, order);
35 }
36
37 enum dma_cache_op {
38        DMA_UNMAP,
39        DMA_MAP,
40 };
41 static bool hypercall_cflush = false;
42
43 /* functions called by SWIOTLB */
44
45 static void dma_cache_maint(dma_addr_t handle, unsigned long offset,
46         size_t size, enum dma_data_direction dir, enum dma_cache_op op)
47 {
48         struct gnttab_cache_flush cflush;
49         unsigned long xen_pfn;
50         size_t left = size;
51
52         xen_pfn = (handle >> XEN_PAGE_SHIFT) + offset / XEN_PAGE_SIZE;
53         offset %= XEN_PAGE_SIZE;
54
55         do {
56                 size_t len = left;
57         
58                 /* buffers in highmem or foreign pages cannot cross page
59                  * boundaries */
60                 if (len + offset > XEN_PAGE_SIZE)
61                         len = XEN_PAGE_SIZE - offset;
62
63                 cflush.op = 0;
64                 cflush.a.dev_bus_addr = xen_pfn << XEN_PAGE_SHIFT;
65                 cflush.offset = offset;
66                 cflush.length = len;
67
68                 if (op == DMA_UNMAP && dir != DMA_TO_DEVICE)
69                         cflush.op = GNTTAB_CACHE_INVAL;
70                 if (op == DMA_MAP) {
71                         if (dir == DMA_FROM_DEVICE)
72                                 cflush.op = GNTTAB_CACHE_INVAL;
73                         else
74                                 cflush.op = GNTTAB_CACHE_CLEAN;
75                 }
76                 if (cflush.op)
77                         HYPERVISOR_grant_table_op(GNTTABOP_cache_flush, &cflush, 1);
78
79                 offset = 0;
80                 xen_pfn++;
81                 left -= len;
82         } while (left);
83 }
84
85 static void __xen_dma_page_dev_to_cpu(struct device *hwdev, dma_addr_t handle,
86                 size_t size, enum dma_data_direction dir)
87 {
88         dma_cache_maint(handle & PAGE_MASK, handle & ~PAGE_MASK, size, dir, DMA_UNMAP);
89 }
90
91 static void __xen_dma_page_cpu_to_dev(struct device *hwdev, dma_addr_t handle,
92                 size_t size, enum dma_data_direction dir)
93 {
94         dma_cache_maint(handle & PAGE_MASK, handle & ~PAGE_MASK, size, dir, DMA_MAP);
95 }
96
97 void __xen_dma_map_page(struct device *hwdev, struct page *page,
98              dma_addr_t dev_addr, unsigned long offset, size_t size,
99              enum dma_data_direction dir, unsigned long attrs)
100 {
101         if (is_device_dma_coherent(hwdev))
102                 return;
103         if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
104                 return;
105
106         __xen_dma_page_cpu_to_dev(hwdev, dev_addr, size, dir);
107 }
108
109 void __xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
110                 size_t size, enum dma_data_direction dir,
111                 unsigned long attrs)
112
113 {
114         if (is_device_dma_coherent(hwdev))
115                 return;
116         if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
117                 return;
118
119         __xen_dma_page_dev_to_cpu(hwdev, handle, size, dir);
120 }
121
122 void __xen_dma_sync_single_for_cpu(struct device *hwdev,
123                 dma_addr_t handle, size_t size, enum dma_data_direction dir)
124 {
125         if (is_device_dma_coherent(hwdev))
126                 return;
127         __xen_dma_page_dev_to_cpu(hwdev, handle, size, dir);
128 }
129
130 void __xen_dma_sync_single_for_device(struct device *hwdev,
131                 dma_addr_t handle, size_t size, enum dma_data_direction dir)
132 {
133         if (is_device_dma_coherent(hwdev))
134                 return;
135         __xen_dma_page_cpu_to_dev(hwdev, handle, size, dir);
136 }
137
138 bool xen_arch_need_swiotlb(struct device *dev,
139                            phys_addr_t phys,
140                            dma_addr_t dev_addr)
141 {
142         unsigned int xen_pfn = XEN_PFN_DOWN(phys);
143         unsigned int bfn = XEN_PFN_DOWN(dev_addr);
144
145         /*
146          * The swiotlb buffer should be used if
147          *      - Xen doesn't have the cache flush hypercall
148          *      - The Linux page refers to foreign memory
149          *      - The device doesn't support coherent DMA request
150          *
151          * The Linux page may be spanned acrros multiple Xen page, although
152          * it's not possible to have a mix of local and foreign Xen page.
153          * Furthermore, range_straddles_page_boundary is already checking
154          * if buffer is physically contiguous in the host RAM.
155          *
156          * Therefore we only need to check the first Xen page to know if we
157          * require a bounce buffer because the device doesn't support coherent
158          * memory and we are not able to flush the cache.
159          */
160         return (!hypercall_cflush && (xen_pfn != bfn) &&
161                 !is_device_dma_coherent(dev));
162 }
163
164 int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
165                                  unsigned int address_bits,
166                                  dma_addr_t *dma_handle)
167 {
168         if (!xen_initial_domain())
169                 return -EINVAL;
170
171         /* we assume that dom0 is mapped 1:1 for now */
172         *dma_handle = pstart;
173         return 0;
174 }
175 EXPORT_SYMBOL_GPL(xen_create_contiguous_region);
176
177 void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order)
178 {
179         return;
180 }
181 EXPORT_SYMBOL_GPL(xen_destroy_contiguous_region);
182
183 const struct dma_map_ops *xen_dma_ops;
184 EXPORT_SYMBOL(xen_dma_ops);
185
186 int __init xen_mm_init(void)
187 {
188         struct gnttab_cache_flush cflush;
189         if (!xen_initial_domain())
190                 return 0;
191         xen_swiotlb_init(1, false);
192         xen_dma_ops = &xen_swiotlb_dma_ops;
193
194         cflush.op = 0;
195         cflush.a.dev_bus_addr = 0;
196         cflush.offset = 0;
197         cflush.length = 0;
198         if (HYPERVISOR_grant_table_op(GNTTABOP_cache_flush, &cflush, 1) != -ENOSYS)
199                 hypercall_cflush = true;
200         return 0;
201 }
202 arch_initcall(xen_mm_init);