1 #ifndef _ASM_X86_DMA_MAPPING_H
2 #define _ASM_X86_DMA_MAPPING_H
5 * IOMMU interface. See Documentation/PCI/PCI-DMA-mapping.txt and
6 * Documentation/DMA-API.txt for documentation.
9 #include <linux/scatterlist.h>
10 #include <linux/dma-attrs.h>
12 #include <asm/swiotlb.h>
13 #include <asm-generic/dma-coherent.h>
15 extern dma_addr_t bad_dma_address;
16 extern int iommu_merge;
17 extern struct device x86_dma_fallback_dev;
18 extern int panic_on_overflow;
20 extern struct dma_map_ops *dma_ops;
22 static inline struct dma_map_ops *get_dma_ops(struct device *dev)
27 if (unlikely(!dev) || !dev->archdata.dma_ops)
30 return dev->archdata.dma_ops;
34 /* Make sure we keep the same behaviour */
35 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
37 struct dma_map_ops *ops = get_dma_ops(dev);
38 if (ops->mapping_error)
39 return ops->mapping_error(dev, dma_addr);
41 return (dma_addr == bad_dma_address);
44 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
45 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
46 #define dma_is_consistent(d, h) (1)
48 extern int dma_supported(struct device *hwdev, u64 mask);
49 extern int dma_set_mask(struct device *dev, u64 mask);
51 extern void *dma_generic_alloc_coherent(struct device *dev, size_t size,
52 dma_addr_t *dma_addr, gfp_t flag);
54 static inline dma_addr_t
55 dma_map_single(struct device *hwdev, void *ptr, size_t size,
56 enum dma_data_direction dir)
58 struct dma_map_ops *ops = get_dma_ops(hwdev);
60 BUG_ON(!valid_dma_direction(dir));
61 return ops->map_page(hwdev, virt_to_page(ptr),
62 (unsigned long)ptr & ~PAGE_MASK, size,
67 dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
68 enum dma_data_direction dir)
70 struct dma_map_ops *ops = get_dma_ops(dev);
72 BUG_ON(!valid_dma_direction(dir));
74 ops->unmap_page(dev, addr, size, dir, NULL);
78 dma_map_sg(struct device *hwdev, struct scatterlist *sg,
79 int nents, enum dma_data_direction dir)
81 struct dma_map_ops *ops = get_dma_ops(hwdev);
83 BUG_ON(!valid_dma_direction(dir));
84 return ops->map_sg(hwdev, sg, nents, dir, NULL);
88 dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
89 enum dma_data_direction dir)
91 struct dma_map_ops *ops = get_dma_ops(hwdev);
93 BUG_ON(!valid_dma_direction(dir));
95 ops->unmap_sg(hwdev, sg, nents, dir, NULL);
99 dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
100 size_t size, enum dma_data_direction dir)
102 struct dma_map_ops *ops = get_dma_ops(hwdev);
104 BUG_ON(!valid_dma_direction(dir));
105 if (ops->sync_single_for_cpu)
106 ops->sync_single_for_cpu(hwdev, dma_handle, size, dir);
107 flush_write_buffers();
111 dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle,
112 size_t size, enum dma_data_direction dir)
114 struct dma_map_ops *ops = get_dma_ops(hwdev);
116 BUG_ON(!valid_dma_direction(dir));
117 if (ops->sync_single_for_device)
118 ops->sync_single_for_device(hwdev, dma_handle, size, dir);
119 flush_write_buffers();
123 dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
124 unsigned long offset, size_t size,
125 enum dma_data_direction dir)
127 struct dma_map_ops *ops = get_dma_ops(hwdev);
129 BUG_ON(!valid_dma_direction(dir));
130 if (ops->sync_single_range_for_cpu)
131 ops->sync_single_range_for_cpu(hwdev, dma_handle, offset,
133 flush_write_buffers();
137 dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle,
138 unsigned long offset, size_t size,
139 enum dma_data_direction dir)
141 struct dma_map_ops *ops = get_dma_ops(hwdev);
143 BUG_ON(!valid_dma_direction(dir));
144 if (ops->sync_single_range_for_device)
145 ops->sync_single_range_for_device(hwdev, dma_handle,
147 flush_write_buffers();
151 dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
152 int nelems, enum dma_data_direction dir)
154 struct dma_map_ops *ops = get_dma_ops(hwdev);
156 BUG_ON(!valid_dma_direction(dir));
157 if (ops->sync_sg_for_cpu)
158 ops->sync_sg_for_cpu(hwdev, sg, nelems, dir);
159 flush_write_buffers();
163 dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
164 int nelems, enum dma_data_direction dir)
166 struct dma_map_ops *ops = get_dma_ops(hwdev);
168 BUG_ON(!valid_dma_direction(dir));
169 if (ops->sync_sg_for_device)
170 ops->sync_sg_for_device(hwdev, sg, nelems, dir);
172 flush_write_buffers();
175 static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
176 size_t offset, size_t size,
177 enum dma_data_direction dir)
179 struct dma_map_ops *ops = get_dma_ops(dev);
181 BUG_ON(!valid_dma_direction(dir));
182 return ops->map_page(dev, page, offset, size, dir, NULL);
185 static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
186 size_t size, enum dma_data_direction dir)
188 dma_unmap_single(dev, addr, size, dir);
192 dma_cache_sync(struct device *dev, void *vaddr, size_t size,
193 enum dma_data_direction dir)
195 flush_write_buffers();
198 static inline int dma_get_cache_alignment(void)
200 /* no easy way to get cache size on all x86, so return the
201 * maximum possible, to be safe */
202 return boot_cpu_data.x86_clflush_size;
205 static inline unsigned long dma_alloc_coherent_mask(struct device *dev,
208 unsigned long dma_mask = 0;
210 dma_mask = dev->coherent_dma_mask;
212 dma_mask = (gfp & GFP_DMA) ? DMA_24BIT_MASK : DMA_32BIT_MASK;
217 static inline gfp_t dma_alloc_coherent_gfp_flags(struct device *dev, gfp_t gfp)
219 unsigned long dma_mask = dma_alloc_coherent_mask(dev, gfp);
221 if (dma_mask <= DMA_24BIT_MASK)
224 if (dma_mask <= DMA_32BIT_MASK && !(gfp & GFP_DMA))
231 dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
234 struct dma_map_ops *ops = get_dma_ops(dev);
237 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
239 if (dma_alloc_from_coherent(dev, size, dma_handle, &memory))
243 dev = &x86_dma_fallback_dev;
247 if (!is_device_dma_capable(dev))
250 if (!ops->alloc_coherent)
253 return ops->alloc_coherent(dev, size, dma_handle,
254 dma_alloc_coherent_gfp_flags(dev, gfp));
257 static inline void dma_free_coherent(struct device *dev, size_t size,
258 void *vaddr, dma_addr_t bus)
260 struct dma_map_ops *ops = get_dma_ops(dev);
262 WARN_ON(irqs_disabled()); /* for portability */
264 if (dma_release_from_coherent(dev, get_order(size), vaddr))
267 if (ops->free_coherent)
268 ops->free_coherent(dev, size, vaddr, bus);