Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
[sfrench/cifs-2.6.git] / arch / arm64 / mm / dma-mapping.c
1 /*
2  * SWIOTLB-based DMA API implementation
3  *
4  * Copyright (C) 2012 ARM Ltd.
5  * Author: Catalin Marinas <catalin.marinas@arm.com>
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
18  */
19
20 #include <linux/gfp.h>
21 #include <linux/acpi.h>
22 #include <linux/memblock.h>
23 #include <linux/cache.h>
24 #include <linux/export.h>
25 #include <linux/slab.h>
26 #include <linux/genalloc.h>
27 #include <linux/dma-direct.h>
28 #include <linux/dma-noncoherent.h>
29 #include <linux/dma-contiguous.h>
30 #include <linux/vmalloc.h>
31 #include <linux/swiotlb.h>
32 #include <linux/pci.h>
33
34 #include <asm/cacheflush.h>
35
36 pgprot_t arch_dma_mmap_pgprot(struct device *dev, pgprot_t prot,
37                 unsigned long attrs)
38 {
39         if (!dev_is_dma_coherent(dev) || (attrs & DMA_ATTR_WRITE_COMBINE))
40                 return pgprot_writecombine(prot);
41         return prot;
42 }
43
44 void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
45                 size_t size, enum dma_data_direction dir)
46 {
47         __dma_map_area(phys_to_virt(paddr), size, dir);
48 }
49
50 void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
51                 size_t size, enum dma_data_direction dir)
52 {
53         __dma_unmap_area(phys_to_virt(paddr), size, dir);
54 }
55
56 void arch_dma_prep_coherent(struct page *page, size_t size)
57 {
58         __dma_flush_area(page_address(page), size);
59 }
60
61 #ifdef CONFIG_IOMMU_DMA
62 static int __swiotlb_get_sgtable_page(struct sg_table *sgt,
63                                       struct page *page, size_t size)
64 {
65         int ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
66
67         if (!ret)
68                 sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
69
70         return ret;
71 }
72
73 static int __swiotlb_mmap_pfn(struct vm_area_struct *vma,
74                               unsigned long pfn, size_t size)
75 {
76         int ret = -ENXIO;
77         unsigned long nr_vma_pages = vma_pages(vma);
78         unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
79         unsigned long off = vma->vm_pgoff;
80
81         if (off < nr_pages && nr_vma_pages <= (nr_pages - off)) {
82                 ret = remap_pfn_range(vma, vma->vm_start,
83                                       pfn + off,
84                                       vma->vm_end - vma->vm_start,
85                                       vma->vm_page_prot);
86         }
87
88         return ret;
89 }
90 #endif /* CONFIG_IOMMU_DMA */
91
92 static int __init arm64_dma_init(void)
93 {
94         WARN_TAINT(ARCH_DMA_MINALIGN < cache_line_size(),
95                    TAINT_CPU_OUT_OF_SPEC,
96                    "ARCH_DMA_MINALIGN smaller than CTR_EL0.CWG (%d < %d)",
97                    ARCH_DMA_MINALIGN, cache_line_size());
98         return dma_atomic_pool_init(GFP_DMA32, __pgprot(PROT_NORMAL_NC));
99 }
100 arch_initcall(arm64_dma_init);
101
102 #ifdef CONFIG_IOMMU_DMA
103 #include <linux/dma-iommu.h>
104 #include <linux/platform_device.h>
105 #include <linux/amba/bus.h>
106
107 /* Thankfully, all cache ops are by VA so we can ignore phys here */
108 static void flush_page(struct device *dev, const void *virt, phys_addr_t phys)
109 {
110         __dma_flush_area(virt, PAGE_SIZE);
111 }
112
113 static void *__iommu_alloc_attrs(struct device *dev, size_t size,
114                                  dma_addr_t *handle, gfp_t gfp,
115                                  unsigned long attrs)
116 {
117         bool coherent = dev_is_dma_coherent(dev);
118         int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
119         size_t iosize = size;
120         void *addr;
121
122         if (WARN(!dev, "cannot create IOMMU mapping for unknown device\n"))
123                 return NULL;
124
125         size = PAGE_ALIGN(size);
126
127         /*
128          * Some drivers rely on this, and we probably don't want the
129          * possibility of stale kernel data being read by devices anyway.
130          */
131         gfp |= __GFP_ZERO;
132
133         if (!gfpflags_allow_blocking(gfp)) {
134                 struct page *page;
135                 /*
136                  * In atomic context we can't remap anything, so we'll only
137                  * get the virtually contiguous buffer we need by way of a
138                  * physically contiguous allocation.
139                  */
140                 if (coherent) {
141                         page = alloc_pages(gfp, get_order(size));
142                         addr = page ? page_address(page) : NULL;
143                 } else {
144                         addr = dma_alloc_from_pool(size, &page, gfp);
145                 }
146                 if (!addr)
147                         return NULL;
148
149                 *handle = iommu_dma_map_page(dev, page, 0, iosize, ioprot);
150                 if (*handle == DMA_MAPPING_ERROR) {
151                         if (coherent)
152                                 __free_pages(page, get_order(size));
153                         else
154                                 dma_free_from_pool(addr, size);
155                         addr = NULL;
156                 }
157         } else if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
158                 pgprot_t prot = arch_dma_mmap_pgprot(dev, PAGE_KERNEL, attrs);
159                 struct page *page;
160
161                 page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
162                                         get_order(size), gfp & __GFP_NOWARN);
163                 if (!page)
164                         return NULL;
165
166                 *handle = iommu_dma_map_page(dev, page, 0, iosize, ioprot);
167                 if (*handle == DMA_MAPPING_ERROR) {
168                         dma_release_from_contiguous(dev, page,
169                                                     size >> PAGE_SHIFT);
170                         return NULL;
171                 }
172                 addr = dma_common_contiguous_remap(page, size, VM_USERMAP,
173                                                    prot,
174                                                    __builtin_return_address(0));
175                 if (addr) {
176                         if (!coherent)
177                                 __dma_flush_area(page_to_virt(page), iosize);
178                         memset(addr, 0, size);
179                 } else {
180                         iommu_dma_unmap_page(dev, *handle, iosize, 0, attrs);
181                         dma_release_from_contiguous(dev, page,
182                                                     size >> PAGE_SHIFT);
183                 }
184         } else {
185                 pgprot_t prot = arch_dma_mmap_pgprot(dev, PAGE_KERNEL, attrs);
186                 struct page **pages;
187
188                 pages = iommu_dma_alloc(dev, iosize, gfp, attrs, ioprot,
189                                         handle, flush_page);
190                 if (!pages)
191                         return NULL;
192
193                 addr = dma_common_pages_remap(pages, size, VM_USERMAP, prot,
194                                               __builtin_return_address(0));
195                 if (!addr)
196                         iommu_dma_free(dev, pages, iosize, handle);
197         }
198         return addr;
199 }
200
201 static void __iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
202                                dma_addr_t handle, unsigned long attrs)
203 {
204         size_t iosize = size;
205
206         size = PAGE_ALIGN(size);
207         /*
208          * @cpu_addr will be one of 4 things depending on how it was allocated:
209          * - A remapped array of pages for contiguous allocations.
210          * - A remapped array of pages from iommu_dma_alloc(), for all
211          *   non-atomic allocations.
212          * - A non-cacheable alias from the atomic pool, for atomic
213          *   allocations by non-coherent devices.
214          * - A normal lowmem address, for atomic allocations by
215          *   coherent devices.
216          * Hence how dodgy the below logic looks...
217          */
218         if (dma_in_atomic_pool(cpu_addr, size)) {
219                 iommu_dma_unmap_page(dev, handle, iosize, 0, 0);
220                 dma_free_from_pool(cpu_addr, size);
221         } else if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
222                 struct page *page = vmalloc_to_page(cpu_addr);
223
224                 iommu_dma_unmap_page(dev, handle, iosize, 0, attrs);
225                 dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);
226                 dma_common_free_remap(cpu_addr, size, VM_USERMAP);
227         } else if (is_vmalloc_addr(cpu_addr)){
228                 struct vm_struct *area = find_vm_area(cpu_addr);
229
230                 if (WARN_ON(!area || !area->pages))
231                         return;
232                 iommu_dma_free(dev, area->pages, iosize, &handle);
233                 dma_common_free_remap(cpu_addr, size, VM_USERMAP);
234         } else {
235                 iommu_dma_unmap_page(dev, handle, iosize, 0, 0);
236                 __free_pages(virt_to_page(cpu_addr), get_order(size));
237         }
238 }
239
240 static int __iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
241                               void *cpu_addr, dma_addr_t dma_addr, size_t size,
242                               unsigned long attrs)
243 {
244         struct vm_struct *area;
245         int ret;
246
247         vma->vm_page_prot = arch_dma_mmap_pgprot(dev, vma->vm_page_prot, attrs);
248
249         if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
250                 return ret;
251
252         if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
253                 /*
254                  * DMA_ATTR_FORCE_CONTIGUOUS allocations are always remapped,
255                  * hence in the vmalloc space.
256                  */
257                 unsigned long pfn = vmalloc_to_pfn(cpu_addr);
258                 return __swiotlb_mmap_pfn(vma, pfn, size);
259         }
260
261         area = find_vm_area(cpu_addr);
262         if (WARN_ON(!area || !area->pages))
263                 return -ENXIO;
264
265         return iommu_dma_mmap(area->pages, size, vma);
266 }
267
268 static int __iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
269                                void *cpu_addr, dma_addr_t dma_addr,
270                                size_t size, unsigned long attrs)
271 {
272         unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
273         struct vm_struct *area = find_vm_area(cpu_addr);
274
275         if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
276                 /*
277                  * DMA_ATTR_FORCE_CONTIGUOUS allocations are always remapped,
278                  * hence in the vmalloc space.
279                  */
280                 struct page *page = vmalloc_to_page(cpu_addr);
281                 return __swiotlb_get_sgtable_page(sgt, page, size);
282         }
283
284         if (WARN_ON(!area || !area->pages))
285                 return -ENXIO;
286
287         return sg_alloc_table_from_pages(sgt, area->pages, count, 0, size,
288                                          GFP_KERNEL);
289 }
290
291 static void __iommu_sync_single_for_cpu(struct device *dev,
292                                         dma_addr_t dev_addr, size_t size,
293                                         enum dma_data_direction dir)
294 {
295         phys_addr_t phys;
296
297         if (dev_is_dma_coherent(dev))
298                 return;
299
300         phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dev_addr);
301         arch_sync_dma_for_cpu(dev, phys, size, dir);
302 }
303
304 static void __iommu_sync_single_for_device(struct device *dev,
305                                            dma_addr_t dev_addr, size_t size,
306                                            enum dma_data_direction dir)
307 {
308         phys_addr_t phys;
309
310         if (dev_is_dma_coherent(dev))
311                 return;
312
313         phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dev_addr);
314         arch_sync_dma_for_device(dev, phys, size, dir);
315 }
316
317 static dma_addr_t __iommu_map_page(struct device *dev, struct page *page,
318                                    unsigned long offset, size_t size,
319                                    enum dma_data_direction dir,
320                                    unsigned long attrs)
321 {
322         bool coherent = dev_is_dma_coherent(dev);
323         int prot = dma_info_to_prot(dir, coherent, attrs);
324         dma_addr_t dev_addr = iommu_dma_map_page(dev, page, offset, size, prot);
325
326         if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
327             dev_addr != DMA_MAPPING_ERROR)
328                 __dma_map_area(page_address(page) + offset, size, dir);
329
330         return dev_addr;
331 }
332
333 static void __iommu_unmap_page(struct device *dev, dma_addr_t dev_addr,
334                                size_t size, enum dma_data_direction dir,
335                                unsigned long attrs)
336 {
337         if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
338                 __iommu_sync_single_for_cpu(dev, dev_addr, size, dir);
339
340         iommu_dma_unmap_page(dev, dev_addr, size, dir, attrs);
341 }
342
343 static void __iommu_sync_sg_for_cpu(struct device *dev,
344                                     struct scatterlist *sgl, int nelems,
345                                     enum dma_data_direction dir)
346 {
347         struct scatterlist *sg;
348         int i;
349
350         if (dev_is_dma_coherent(dev))
351                 return;
352
353         for_each_sg(sgl, sg, nelems, i)
354                 arch_sync_dma_for_cpu(dev, sg_phys(sg), sg->length, dir);
355 }
356
357 static void __iommu_sync_sg_for_device(struct device *dev,
358                                        struct scatterlist *sgl, int nelems,
359                                        enum dma_data_direction dir)
360 {
361         struct scatterlist *sg;
362         int i;
363
364         if (dev_is_dma_coherent(dev))
365                 return;
366
367         for_each_sg(sgl, sg, nelems, i)
368                 arch_sync_dma_for_device(dev, sg_phys(sg), sg->length, dir);
369 }
370
371 static int __iommu_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
372                                 int nelems, enum dma_data_direction dir,
373                                 unsigned long attrs)
374 {
375         bool coherent = dev_is_dma_coherent(dev);
376
377         if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
378                 __iommu_sync_sg_for_device(dev, sgl, nelems, dir);
379
380         return iommu_dma_map_sg(dev, sgl, nelems,
381                                 dma_info_to_prot(dir, coherent, attrs));
382 }
383
384 static void __iommu_unmap_sg_attrs(struct device *dev,
385                                    struct scatterlist *sgl, int nelems,
386                                    enum dma_data_direction dir,
387                                    unsigned long attrs)
388 {
389         if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
390                 __iommu_sync_sg_for_cpu(dev, sgl, nelems, dir);
391
392         iommu_dma_unmap_sg(dev, sgl, nelems, dir, attrs);
393 }
394
395 static const struct dma_map_ops iommu_dma_ops = {
396         .alloc = __iommu_alloc_attrs,
397         .free = __iommu_free_attrs,
398         .mmap = __iommu_mmap_attrs,
399         .get_sgtable = __iommu_get_sgtable,
400         .map_page = __iommu_map_page,
401         .unmap_page = __iommu_unmap_page,
402         .map_sg = __iommu_map_sg_attrs,
403         .unmap_sg = __iommu_unmap_sg_attrs,
404         .sync_single_for_cpu = __iommu_sync_single_for_cpu,
405         .sync_single_for_device = __iommu_sync_single_for_device,
406         .sync_sg_for_cpu = __iommu_sync_sg_for_cpu,
407         .sync_sg_for_device = __iommu_sync_sg_for_device,
408         .map_resource = iommu_dma_map_resource,
409         .unmap_resource = iommu_dma_unmap_resource,
410 };
411
412 static int __init __iommu_dma_init(void)
413 {
414         return iommu_dma_init();
415 }
416 arch_initcall(__iommu_dma_init);
417
418 static void __iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
419                                   const struct iommu_ops *ops)
420 {
421         struct iommu_domain *domain;
422
423         if (!ops)
424                 return;
425
426         /*
427          * The IOMMU core code allocates the default DMA domain, which the
428          * underlying IOMMU driver needs to support via the dma-iommu layer.
429          */
430         domain = iommu_get_domain_for_dev(dev);
431
432         if (!domain)
433                 goto out_err;
434
435         if (domain->type == IOMMU_DOMAIN_DMA) {
436                 if (iommu_dma_init_domain(domain, dma_base, size, dev))
437                         goto out_err;
438
439                 dev->dma_ops = &iommu_dma_ops;
440         }
441
442         return;
443
444 out_err:
445          pr_warn("Failed to set up IOMMU for device %s; retaining platform DMA ops\n",
446                  dev_name(dev));
447 }
448
449 void arch_teardown_dma_ops(struct device *dev)
450 {
451         dev->dma_ops = NULL;
452 }
453
454 #else
455
456 static void __iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
457                                   const struct iommu_ops *iommu)
458 { }
459
460 #endif  /* CONFIG_IOMMU_DMA */
461
462 void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
463                         const struct iommu_ops *iommu, bool coherent)
464 {
465         dev->dma_coherent = coherent;
466         __iommu_setup_dma_ops(dev, dma_base, size, iommu);
467
468 #ifdef CONFIG_XEN
469         if (xen_initial_domain())
470                 dev->dma_ops = xen_dma_ops;
471 #endif
472 }