Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland...
[sfrench/cifs-2.6.git] / arch / mips / mm / dma-default.c
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 2000  Ani Joshi <ajoshi@unixbox.com>
7  * Copyright (C) 2000, 2001, 06  Ralf Baechle <ralf@linux-mips.org>
8  * swiped from i386, and cloned for MIPS by Geert, polished by Ralf.
9  */
10
11 #include <linux/types.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/mm.h>
14 #include <linux/module.h>
15 #include <linux/scatterlist.h>
16 #include <linux/string.h>
17 #include <linux/gfp.h>
18
19 #include <asm/cache.h>
20 #include <asm/io.h>
21
22 #include <dma-coherence.h>
23
24 static inline unsigned long dma_addr_to_virt(struct device *dev,
25         dma_addr_t dma_addr)
26 {
27         unsigned long addr = plat_dma_addr_to_phys(dev, dma_addr);
28
29         return (unsigned long)phys_to_virt(addr);
30 }
31
32 /*
33  * Warning on the terminology - Linux calls an uncached area coherent;
34  * MIPS terminology calls memory areas with hardware maintained coherency
35  * coherent.
36  */
37
38 static inline int cpu_is_noncoherent_r10000(struct device *dev)
39 {
40         return !plat_device_is_coherent(dev) &&
41                (current_cpu_type() == CPU_R10000 ||
42                current_cpu_type() == CPU_R12000);
43 }
44
45 static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp)
46 {
47         /* ignore region specifiers */
48         gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);
49
50 #ifdef CONFIG_ZONE_DMA
51         if (dev == NULL)
52                 gfp |= __GFP_DMA;
53         else if (dev->coherent_dma_mask < DMA_BIT_MASK(24))
54                 gfp |= __GFP_DMA;
55         else
56 #endif
57 #ifdef CONFIG_ZONE_DMA32
58              if (dev->coherent_dma_mask < DMA_BIT_MASK(32))
59                 gfp |= __GFP_DMA32;
60         else
61 #endif
62                 ;
63
64         /* Don't invoke OOM killer */
65         gfp |= __GFP_NORETRY;
66
67         return gfp;
68 }
69
70 void *dma_alloc_noncoherent(struct device *dev, size_t size,
71         dma_addr_t * dma_handle, gfp_t gfp)
72 {
73         void *ret;
74
75         gfp = massage_gfp_flags(dev, gfp);
76
77         ret = (void *) __get_free_pages(gfp, get_order(size));
78
79         if (ret != NULL) {
80                 memset(ret, 0, size);
81                 *dma_handle = plat_map_dma_mem(dev, ret, size);
82         }
83
84         return ret;
85 }
86
87 EXPORT_SYMBOL(dma_alloc_noncoherent);
88
89 void *dma_alloc_coherent(struct device *dev, size_t size,
90         dma_addr_t * dma_handle, gfp_t gfp)
91 {
92         void *ret;
93
94         if (dma_alloc_from_coherent(dev, size, dma_handle, &ret))
95                 return ret;
96
97         gfp = massage_gfp_flags(dev, gfp);
98
99         ret = (void *) __get_free_pages(gfp, get_order(size));
100
101         if (ret) {
102                 memset(ret, 0, size);
103                 *dma_handle = plat_map_dma_mem(dev, ret, size);
104
105                 if (!plat_device_is_coherent(dev)) {
106                         dma_cache_wback_inv((unsigned long) ret, size);
107                         ret = UNCAC_ADDR(ret);
108                 }
109         }
110
111         return ret;
112 }
113
114 EXPORT_SYMBOL(dma_alloc_coherent);
115
116 void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr,
117         dma_addr_t dma_handle)
118 {
119         plat_unmap_dma_mem(dev, dma_handle, size, DMA_BIDIRECTIONAL);
120         free_pages((unsigned long) vaddr, get_order(size));
121 }
122
123 EXPORT_SYMBOL(dma_free_noncoherent);
124
125 void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
126         dma_addr_t dma_handle)
127 {
128         unsigned long addr = (unsigned long) vaddr;
129         int order = get_order(size);
130
131         if (dma_release_from_coherent(dev, order, vaddr))
132                 return;
133
134         plat_unmap_dma_mem(dev, dma_handle, size, DMA_BIDIRECTIONAL);
135
136         if (!plat_device_is_coherent(dev))
137                 addr = CAC_ADDR(addr);
138
139         free_pages(addr, get_order(size));
140 }
141
142 EXPORT_SYMBOL(dma_free_coherent);
143
144 static inline void __dma_sync(unsigned long addr, size_t size,
145         enum dma_data_direction direction)
146 {
147         switch (direction) {
148         case DMA_TO_DEVICE:
149                 dma_cache_wback(addr, size);
150                 break;
151
152         case DMA_FROM_DEVICE:
153                 dma_cache_inv(addr, size);
154                 break;
155
156         case DMA_BIDIRECTIONAL:
157                 dma_cache_wback_inv(addr, size);
158                 break;
159
160         default:
161                 BUG();
162         }
163 }
164
165 dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
166         enum dma_data_direction direction)
167 {
168         unsigned long addr = (unsigned long) ptr;
169
170         if (!plat_device_is_coherent(dev))
171                 __dma_sync(addr, size, direction);
172
173         return plat_map_dma_mem(dev, ptr, size);
174 }
175
176 EXPORT_SYMBOL(dma_map_single);
177
178 void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
179         enum dma_data_direction direction)
180 {
181         if (cpu_is_noncoherent_r10000(dev))
182                 __dma_sync(dma_addr_to_virt(dev, dma_addr), size,
183                            direction);
184
185         plat_unmap_dma_mem(dev, dma_addr, size, direction);
186 }
187
188 EXPORT_SYMBOL(dma_unmap_single);
189
190 int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
191         enum dma_data_direction direction)
192 {
193         int i;
194
195         BUG_ON(direction == DMA_NONE);
196
197         for (i = 0; i < nents; i++, sg++) {
198                 unsigned long addr;
199
200                 addr = (unsigned long) sg_virt(sg);
201                 if (!plat_device_is_coherent(dev) && addr)
202                         __dma_sync(addr, sg->length, direction);
203                 sg->dma_address = plat_map_dma_mem(dev,
204                                                    (void *)addr, sg->length);
205         }
206
207         return nents;
208 }
209
210 EXPORT_SYMBOL(dma_map_sg);
211
212 dma_addr_t dma_map_page(struct device *dev, struct page *page,
213         unsigned long offset, size_t size, enum dma_data_direction direction)
214 {
215         BUG_ON(direction == DMA_NONE);
216
217         if (!plat_device_is_coherent(dev)) {
218                 unsigned long addr;
219
220                 addr = (unsigned long) page_address(page) + offset;
221                 __dma_sync(addr, size, direction);
222         }
223
224         return plat_map_dma_mem_page(dev, page) + offset;
225 }
226
227 EXPORT_SYMBOL(dma_map_page);
228
229 void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
230         enum dma_data_direction direction)
231 {
232         unsigned long addr;
233         int i;
234
235         BUG_ON(direction == DMA_NONE);
236
237         for (i = 0; i < nhwentries; i++, sg++) {
238                 if (!plat_device_is_coherent(dev) &&
239                     direction != DMA_TO_DEVICE) {
240                         addr = (unsigned long) sg_virt(sg);
241                         if (addr)
242                                 __dma_sync(addr, sg->length, direction);
243                 }
244                 plat_unmap_dma_mem(dev, sg->dma_address, sg->length, direction);
245         }
246 }
247
248 EXPORT_SYMBOL(dma_unmap_sg);
249
250 void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
251         size_t size, enum dma_data_direction direction)
252 {
253         BUG_ON(direction == DMA_NONE);
254
255         if (cpu_is_noncoherent_r10000(dev)) {
256                 unsigned long addr;
257
258                 addr = dma_addr_to_virt(dev, dma_handle);
259                 __dma_sync(addr, size, direction);
260         }
261 }
262
263 EXPORT_SYMBOL(dma_sync_single_for_cpu);
264
265 void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
266         size_t size, enum dma_data_direction direction)
267 {
268         BUG_ON(direction == DMA_NONE);
269
270         plat_extra_sync_for_device(dev);
271         if (!plat_device_is_coherent(dev)) {
272                 unsigned long addr;
273
274                 addr = dma_addr_to_virt(dev, dma_handle);
275                 __dma_sync(addr, size, direction);
276         }
277 }
278
279 EXPORT_SYMBOL(dma_sync_single_for_device);
280
281 void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
282         unsigned long offset, size_t size, enum dma_data_direction direction)
283 {
284         BUG_ON(direction == DMA_NONE);
285
286         if (cpu_is_noncoherent_r10000(dev)) {
287                 unsigned long addr;
288
289                 addr = dma_addr_to_virt(dev, dma_handle);
290                 __dma_sync(addr + offset, size, direction);
291         }
292 }
293
294 EXPORT_SYMBOL(dma_sync_single_range_for_cpu);
295
296 void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
297         unsigned long offset, size_t size, enum dma_data_direction direction)
298 {
299         BUG_ON(direction == DMA_NONE);
300
301         plat_extra_sync_for_device(dev);
302         if (!plat_device_is_coherent(dev)) {
303                 unsigned long addr;
304
305                 addr = dma_addr_to_virt(dev, dma_handle);
306                 __dma_sync(addr + offset, size, direction);
307         }
308 }
309
310 EXPORT_SYMBOL(dma_sync_single_range_for_device);
311
312 void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
313         enum dma_data_direction direction)
314 {
315         int i;
316
317         BUG_ON(direction == DMA_NONE);
318
319         /* Make sure that gcc doesn't leave the empty loop body.  */
320         for (i = 0; i < nelems; i++, sg++) {
321                 if (cpu_is_noncoherent_r10000(dev))
322                         __dma_sync((unsigned long)page_address(sg_page(sg)),
323                                    sg->length, direction);
324         }
325 }
326
327 EXPORT_SYMBOL(dma_sync_sg_for_cpu);
328
329 void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
330         enum dma_data_direction direction)
331 {
332         int i;
333
334         BUG_ON(direction == DMA_NONE);
335
336         /* Make sure that gcc doesn't leave the empty loop body.  */
337         for (i = 0; i < nelems; i++, sg++) {
338                 if (!plat_device_is_coherent(dev))
339                         __dma_sync((unsigned long)page_address(sg_page(sg)),
340                                    sg->length, direction);
341         }
342 }
343
344 EXPORT_SYMBOL(dma_sync_sg_for_device);
345
346 int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
347 {
348         return plat_dma_mapping_error(dev, dma_addr);
349 }
350
351 EXPORT_SYMBOL(dma_mapping_error);
352
353 int dma_supported(struct device *dev, u64 mask)
354 {
355         return plat_dma_supported(dev, mask);
356 }
357
358 EXPORT_SYMBOL(dma_supported);
359
360 int dma_is_consistent(struct device *dev, dma_addr_t dma_addr)
361 {
362         return plat_device_is_coherent(dev);
363 }
364
365 EXPORT_SYMBOL(dma_is_consistent);
366
367 void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
368                enum dma_data_direction direction)
369 {
370         BUG_ON(direction == DMA_NONE);
371
372         plat_extra_sync_for_device(dev);
373         if (!plat_device_is_coherent(dev))
374                 __dma_sync((unsigned long)vaddr, size, direction);
375 }
376
377 EXPORT_SYMBOL(dma_cache_sync);