Merge branch 'preempt' into release
[sfrench/cifs-2.6.git] / arch / powerpc / kernel / dma.c
1 /*
2  * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation
3  *
4  * Provide default implementations of the DMA mapping callbacks for
5  * directly mapped busses.
6  */
7
8 #include <linux/device.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/dma-debug.h>
11 #include <linux/lmb.h>
12 #include <asm/bug.h>
13 #include <asm/abs_addr.h>
14
15 /*
16  * Generic direct DMA implementation
17  *
18  * This implementation supports a per-device offset that can be applied if
19  * the address at which memory is visible to devices is not 0. Platform code
20  * can set archdata.dma_data to an unsigned long holding the offset. By
21  * default the offset is PCI_DRAM_OFFSET.
22  */
23
24 unsigned long get_dma_direct_offset(struct device *dev)
25 {
26         if (dev)
27                 return (unsigned long)dev->archdata.dma_data;
28
29         return PCI_DRAM_OFFSET;
30 }
31
32 void *dma_direct_alloc_coherent(struct device *dev, size_t size,
33                                 dma_addr_t *dma_handle, gfp_t flag)
34 {
35         void *ret;
36 #ifdef CONFIG_NOT_COHERENT_CACHE
37         ret = __dma_alloc_coherent(dev, size, dma_handle, flag);
38         if (ret == NULL)
39                 return NULL;
40         *dma_handle += get_dma_direct_offset(dev);
41         return ret;
42 #else
43         struct page *page;
44         int node = dev_to_node(dev);
45
46         /* ignore region specifiers */
47         flag  &= ~(__GFP_HIGHMEM);
48
49         page = alloc_pages_node(node, flag, get_order(size));
50         if (page == NULL)
51                 return NULL;
52         ret = page_address(page);
53         memset(ret, 0, size);
54         *dma_handle = virt_to_abs(ret) + get_dma_direct_offset(dev);
55
56         return ret;
57 #endif
58 }
59
60 void dma_direct_free_coherent(struct device *dev, size_t size,
61                               void *vaddr, dma_addr_t dma_handle)
62 {
63 #ifdef CONFIG_NOT_COHERENT_CACHE
64         __dma_free_coherent(size, vaddr);
65 #else
66         free_pages((unsigned long)vaddr, get_order(size));
67 #endif
68 }
69
70 static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
71                              int nents, enum dma_data_direction direction,
72                              struct dma_attrs *attrs)
73 {
74         struct scatterlist *sg;
75         int i;
76
77         for_each_sg(sgl, sg, nents, i) {
78                 sg->dma_address = sg_phys(sg) + get_dma_direct_offset(dev);
79                 sg->dma_length = sg->length;
80                 __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
81         }
82
83         return nents;
84 }
85
86 static void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sg,
87                                 int nents, enum dma_data_direction direction,
88                                 struct dma_attrs *attrs)
89 {
90 }
91
92 static int dma_direct_dma_supported(struct device *dev, u64 mask)
93 {
94 #ifdef CONFIG_PPC64
95         /* Could be improved so platforms can set the limit in case
96          * they have limited DMA windows
97          */
98         return mask >= (lmb_end_of_DRAM() - 1);
99 #else
100         return 1;
101 #endif
102 }
103
104 static inline dma_addr_t dma_direct_map_page(struct device *dev,
105                                              struct page *page,
106                                              unsigned long offset,
107                                              size_t size,
108                                              enum dma_data_direction dir,
109                                              struct dma_attrs *attrs)
110 {
111         BUG_ON(dir == DMA_NONE);
112         __dma_sync_page(page, offset, size, dir);
113         return page_to_phys(page) + offset + get_dma_direct_offset(dev);
114 }
115
116 static inline void dma_direct_unmap_page(struct device *dev,
117                                          dma_addr_t dma_address,
118                                          size_t size,
119                                          enum dma_data_direction direction,
120                                          struct dma_attrs *attrs)
121 {
122 }
123
124 #ifdef CONFIG_NOT_COHERENT_CACHE
125 static inline void dma_direct_sync_sg(struct device *dev,
126                 struct scatterlist *sgl, int nents,
127                 enum dma_data_direction direction)
128 {
129         struct scatterlist *sg;
130         int i;
131
132         for_each_sg(sgl, sg, nents, i)
133                 __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
134 }
135
136 static inline void dma_direct_sync_single_range(struct device *dev,
137                 dma_addr_t dma_handle, unsigned long offset, size_t size,
138                 enum dma_data_direction direction)
139 {
140         __dma_sync(bus_to_virt(dma_handle+offset), size, direction);
141 }
142 #endif
143
144 struct dma_map_ops dma_direct_ops = {
145         .alloc_coherent = dma_direct_alloc_coherent,
146         .free_coherent  = dma_direct_free_coherent,
147         .map_sg         = dma_direct_map_sg,
148         .unmap_sg       = dma_direct_unmap_sg,
149         .dma_supported  = dma_direct_dma_supported,
150         .map_page       = dma_direct_map_page,
151         .unmap_page     = dma_direct_unmap_page,
152 #ifdef CONFIG_NOT_COHERENT_CACHE
153         .sync_single_range_for_cpu      = dma_direct_sync_single_range,
154         .sync_single_range_for_device   = dma_direct_sync_single_range,
155         .sync_sg_for_cpu                = dma_direct_sync_sg,
156         .sync_sg_for_device             = dma_direct_sync_sg,
157 #endif
158 };
159 EXPORT_SYMBOL(dma_direct_ops);
160
161 #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
162
163 static int __init dma_init(void)
164 {
165        dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
166
167        return 0;
168 }
169 fs_initcall(dma_init);