Merge branch 'dmi-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jdelvar...
[sfrench/cifs-2.6.git] / arch / mips / mm / dma-default.c
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 2000  Ani Joshi <ajoshi@unixbox.com>
7  * Copyright (C) 2000, 2001, 06  Ralf Baechle <ralf@linux-mips.org>
8  * swiped from i386, and cloned for MIPS by Geert, polished by Ralf.
9  */
10
11 #include <linux/types.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/mm.h>
14 #include <linux/export.h>
15 #include <linux/scatterlist.h>
16 #include <linux/string.h>
17 #include <linux/gfp.h>
18 #include <linux/highmem.h>
19 #include <linux/dma-contiguous.h>
20
21 #include <asm/cache.h>
22 #include <asm/cpu-type.h>
23 #include <asm/io.h>
24
25 #include <dma-coherence.h>
26
27 #if defined(CONFIG_DMA_MAYBE_COHERENT) && !defined(CONFIG_DMA_PERDEV_COHERENT)
28 /* User defined DMA coherency from command line. */
29 enum coherent_io_user_state coherentio = IO_COHERENCE_DEFAULT;
30 EXPORT_SYMBOL_GPL(coherentio);
31 int hw_coherentio = 0;  /* Actual hardware supported DMA coherency setting. */
32
33 static int __init setcoherentio(char *str)
34 {
35         coherentio = IO_COHERENCE_ENABLED;
36         pr_info("Hardware DMA cache coherency (command line)\n");
37         return 0;
38 }
39 early_param("coherentio", setcoherentio);
40
41 static int __init setnocoherentio(char *str)
42 {
43         coherentio = IO_COHERENCE_DISABLED;
44         pr_info("Software DMA cache coherency (command line)\n");
45         return 0;
46 }
47 early_param("nocoherentio", setnocoherentio);
48 #endif
49
50 static inline struct page *dma_addr_to_page(struct device *dev,
51         dma_addr_t dma_addr)
52 {
53         return pfn_to_page(
54                 plat_dma_addr_to_phys(dev, dma_addr) >> PAGE_SHIFT);
55 }
56
57 /*
58  * The affected CPUs below in 'cpu_needs_post_dma_flush()' can
59  * speculatively fill random cachelines with stale data at any time,
60  * requiring an extra flush post-DMA.
61  *
62  * Warning on the terminology - Linux calls an uncached area coherent;
63  * MIPS terminology calls memory areas with hardware maintained coherency
64  * coherent.
65  *
66  * Note that the R14000 and R16000 should also be checked for in this
67  * condition.  However this function is only called on non-I/O-coherent
68  * systems and only the R10000 and R12000 are used in such systems, the
69  * SGI IP28 Indigo² rsp. SGI IP32 aka O2.
70  */
71 static inline bool cpu_needs_post_dma_flush(struct device *dev)
72 {
73         if (plat_device_is_coherent(dev))
74                 return false;
75
76         switch (boot_cpu_type()) {
77         case CPU_R10000:
78         case CPU_R12000:
79         case CPU_BMIPS5000:
80                 return true;
81
82         default:
83                 /*
84                  * Presence of MAARs suggests that the CPU supports
85                  * speculatively prefetching data, and therefore requires
86                  * the post-DMA flush/invalidate.
87                  */
88                 return cpu_has_maar;
89         }
90 }
91
92 static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp)
93 {
94         gfp_t dma_flag;
95
96 #ifdef CONFIG_ISA
97         if (dev == NULL)
98                 dma_flag = __GFP_DMA;
99         else
100 #endif
101 #if defined(CONFIG_ZONE_DMA32) && defined(CONFIG_ZONE_DMA)
102              if (dev == NULL || dev->coherent_dma_mask < DMA_BIT_MASK(32))
103                         dma_flag = __GFP_DMA;
104         else if (dev->coherent_dma_mask < DMA_BIT_MASK(64))
105                         dma_flag = __GFP_DMA32;
106         else
107 #endif
108 #if defined(CONFIG_ZONE_DMA32) && !defined(CONFIG_ZONE_DMA)
109              if (dev == NULL || dev->coherent_dma_mask < DMA_BIT_MASK(64))
110                 dma_flag = __GFP_DMA32;
111         else
112 #endif
113 #if defined(CONFIG_ZONE_DMA) && !defined(CONFIG_ZONE_DMA32)
114              if (dev == NULL ||
115                  dev->coherent_dma_mask < DMA_BIT_MASK(sizeof(phys_addr_t) * 8))
116                 dma_flag = __GFP_DMA;
117         else
118 #endif
119                 dma_flag = 0;
120
121         /* Don't invoke OOM killer */
122         gfp |= __GFP_NORETRY;
123
124         return gfp | dma_flag;
125 }
126
127 static void *mips_dma_alloc_coherent(struct device *dev, size_t size,
128         dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
129 {
130         void *ret;
131         struct page *page = NULL;
132         unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
133
134         gfp = massage_gfp_flags(dev, gfp);
135
136         if (IS_ENABLED(CONFIG_DMA_CMA) && gfpflags_allow_blocking(gfp))
137                 page = dma_alloc_from_contiguous(dev, count, get_order(size),
138                                                  gfp);
139         if (!page)
140                 page = alloc_pages(gfp, get_order(size));
141
142         if (!page)
143                 return NULL;
144
145         ret = page_address(page);
146         memset(ret, 0, size);
147         *dma_handle = plat_map_dma_mem(dev, ret, size);
148         if (!(attrs & DMA_ATTR_NON_CONSISTENT) &&
149             !plat_device_is_coherent(dev)) {
150                 dma_cache_wback_inv((unsigned long) ret, size);
151                 ret = UNCAC_ADDR(ret);
152         }
153
154         return ret;
155 }
156
157 static void mips_dma_free_coherent(struct device *dev, size_t size, void *vaddr,
158         dma_addr_t dma_handle, unsigned long attrs)
159 {
160         unsigned long addr = (unsigned long) vaddr;
161         unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
162         struct page *page = NULL;
163
164         plat_unmap_dma_mem(dev, dma_handle, size, DMA_BIDIRECTIONAL);
165
166         if (!(attrs & DMA_ATTR_NON_CONSISTENT) && !plat_device_is_coherent(dev))
167                 addr = CAC_ADDR(addr);
168
169         page = virt_to_page((void *) addr);
170
171         if (!dma_release_from_contiguous(dev, page, count))
172                 __free_pages(page, get_order(size));
173 }
174
175 static int mips_dma_mmap(struct device *dev, struct vm_area_struct *vma,
176         void *cpu_addr, dma_addr_t dma_addr, size_t size,
177         unsigned long attrs)
178 {
179         unsigned long user_count = vma_pages(vma);
180         unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
181         unsigned long addr = (unsigned long)cpu_addr;
182         unsigned long off = vma->vm_pgoff;
183         unsigned long pfn;
184         int ret = -ENXIO;
185
186         if (!plat_device_is_coherent(dev))
187                 addr = CAC_ADDR(addr);
188
189         pfn = page_to_pfn(virt_to_page((void *)addr));
190
191         if (attrs & DMA_ATTR_WRITE_COMBINE)
192                 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
193         else
194                 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
195
196         if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
197                 return ret;
198
199         if (off < count && user_count <= (count - off)) {
200                 ret = remap_pfn_range(vma, vma->vm_start,
201                                       pfn + off,
202                                       user_count << PAGE_SHIFT,
203                                       vma->vm_page_prot);
204         }
205
206         return ret;
207 }
208
209 static inline void __dma_sync_virtual(void *addr, size_t size,
210         enum dma_data_direction direction)
211 {
212         switch (direction) {
213         case DMA_TO_DEVICE:
214                 dma_cache_wback((unsigned long)addr, size);
215                 break;
216
217         case DMA_FROM_DEVICE:
218                 dma_cache_inv((unsigned long)addr, size);
219                 break;
220
221         case DMA_BIDIRECTIONAL:
222                 dma_cache_wback_inv((unsigned long)addr, size);
223                 break;
224
225         default:
226                 BUG();
227         }
228 }
229
230 /*
231  * A single sg entry may refer to multiple physically contiguous
232  * pages. But we still need to process highmem pages individually.
233  * If highmem is not configured then the bulk of this loop gets
234  * optimized out.
235  */
236 static inline void __dma_sync(struct page *page,
237         unsigned long offset, size_t size, enum dma_data_direction direction)
238 {
239         size_t left = size;
240
241         do {
242                 size_t len = left;
243
244                 if (PageHighMem(page)) {
245                         void *addr;
246
247                         if (offset + len > PAGE_SIZE) {
248                                 if (offset >= PAGE_SIZE) {
249                                         page += offset >> PAGE_SHIFT;
250                                         offset &= ~PAGE_MASK;
251                                 }
252                                 len = PAGE_SIZE - offset;
253                         }
254
255                         addr = kmap_atomic(page);
256                         __dma_sync_virtual(addr + offset, len, direction);
257                         kunmap_atomic(addr);
258                 } else
259                         __dma_sync_virtual(page_address(page) + offset,
260                                            size, direction);
261                 offset = 0;
262                 page++;
263                 left -= len;
264         } while (left);
265 }
266
267 static void mips_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,
268         size_t size, enum dma_data_direction direction, unsigned long attrs)
269 {
270         if (cpu_needs_post_dma_flush(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
271                 __dma_sync(dma_addr_to_page(dev, dma_addr),
272                            dma_addr & ~PAGE_MASK, size, direction);
273         plat_post_dma_flush(dev);
274         plat_unmap_dma_mem(dev, dma_addr, size, direction);
275 }
276
277 static int mips_dma_map_sg(struct device *dev, struct scatterlist *sglist,
278         int nents, enum dma_data_direction direction, unsigned long attrs)
279 {
280         int i;
281         struct scatterlist *sg;
282
283         for_each_sg(sglist, sg, nents, i) {
284                 if (!plat_device_is_coherent(dev) &&
285                     !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
286                         __dma_sync(sg_page(sg), sg->offset, sg->length,
287                                    direction);
288 #ifdef CONFIG_NEED_SG_DMA_LENGTH
289                 sg->dma_length = sg->length;
290 #endif
291                 sg->dma_address = plat_map_dma_mem_page(dev, sg_page(sg)) +
292                                   sg->offset;
293         }
294
295         return nents;
296 }
297
298 static dma_addr_t mips_dma_map_page(struct device *dev, struct page *page,
299         unsigned long offset, size_t size, enum dma_data_direction direction,
300         unsigned long attrs)
301 {
302         if (!plat_device_is_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
303                 __dma_sync(page, offset, size, direction);
304
305         return plat_map_dma_mem_page(dev, page) + offset;
306 }
307
308 static void mips_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
309         int nhwentries, enum dma_data_direction direction,
310         unsigned long attrs)
311 {
312         int i;
313         struct scatterlist *sg;
314
315         for_each_sg(sglist, sg, nhwentries, i) {
316                 if (!plat_device_is_coherent(dev) &&
317                     !(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
318                     direction != DMA_TO_DEVICE)
319                         __dma_sync(sg_page(sg), sg->offset, sg->length,
320                                    direction);
321                 plat_unmap_dma_mem(dev, sg->dma_address, sg->length, direction);
322         }
323 }
324
325 static void mips_dma_sync_single_for_cpu(struct device *dev,
326         dma_addr_t dma_handle, size_t size, enum dma_data_direction direction)
327 {
328         if (cpu_needs_post_dma_flush(dev))
329                 __dma_sync(dma_addr_to_page(dev, dma_handle),
330                            dma_handle & ~PAGE_MASK, size, direction);
331         plat_post_dma_flush(dev);
332 }
333
334 static void mips_dma_sync_single_for_device(struct device *dev,
335         dma_addr_t dma_handle, size_t size, enum dma_data_direction direction)
336 {
337         if (!plat_device_is_coherent(dev))
338                 __dma_sync(dma_addr_to_page(dev, dma_handle),
339                            dma_handle & ~PAGE_MASK, size, direction);
340 }
341
342 static void mips_dma_sync_sg_for_cpu(struct device *dev,
343         struct scatterlist *sglist, int nelems,
344         enum dma_data_direction direction)
345 {
346         int i;
347         struct scatterlist *sg;
348
349         if (cpu_needs_post_dma_flush(dev)) {
350                 for_each_sg(sglist, sg, nelems, i) {
351                         __dma_sync(sg_page(sg), sg->offset, sg->length,
352                                    direction);
353                 }
354         }
355         plat_post_dma_flush(dev);
356 }
357
358 static void mips_dma_sync_sg_for_device(struct device *dev,
359         struct scatterlist *sglist, int nelems,
360         enum dma_data_direction direction)
361 {
362         int i;
363         struct scatterlist *sg;
364
365         if (!plat_device_is_coherent(dev)) {
366                 for_each_sg(sglist, sg, nelems, i) {
367                         __dma_sync(sg_page(sg), sg->offset, sg->length,
368                                    direction);
369                 }
370         }
371 }
372
373 static int mips_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
374 {
375         return 0;
376 }
377
378 static int mips_dma_supported(struct device *dev, u64 mask)
379 {
380         return plat_dma_supported(dev, mask);
381 }
382
383 static void mips_dma_cache_sync(struct device *dev, void *vaddr, size_t size,
384                          enum dma_data_direction direction)
385 {
386         BUG_ON(direction == DMA_NONE);
387
388         if (!plat_device_is_coherent(dev))
389                 __dma_sync_virtual(vaddr, size, direction);
390 }
391
392 static const struct dma_map_ops mips_default_dma_map_ops = {
393         .alloc = mips_dma_alloc_coherent,
394         .free = mips_dma_free_coherent,
395         .mmap = mips_dma_mmap,
396         .map_page = mips_dma_map_page,
397         .unmap_page = mips_dma_unmap_page,
398         .map_sg = mips_dma_map_sg,
399         .unmap_sg = mips_dma_unmap_sg,
400         .sync_single_for_cpu = mips_dma_sync_single_for_cpu,
401         .sync_single_for_device = mips_dma_sync_single_for_device,
402         .sync_sg_for_cpu = mips_dma_sync_sg_for_cpu,
403         .sync_sg_for_device = mips_dma_sync_sg_for_device,
404         .mapping_error = mips_dma_mapping_error,
405         .dma_supported = mips_dma_supported,
406         .cache_sync = mips_dma_cache_sync,
407 };
408
409 const struct dma_map_ops *mips_dma_map_ops = &mips_default_dma_map_ops;
410 EXPORT_SYMBOL(mips_dma_map_ops);
411
412 #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
413
414 static int __init mips_dma_init(void)
415 {
416         dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
417
418         return 0;
419 }
420 fs_initcall(mips_dma_init);