1 // SPDX-License-Identifier: GPL-2.0
3 * arch-independent dma-mapping routines
5 * Copyright (c) 2006 SUSE Linux Products GmbH
6 * Copyright (c) 2006 Tejun Heo <teheo@suse.de>
8 #include <linux/memblock.h> /* for max_pfn */
9 #include <linux/acpi.h>
10 #include <linux/dma-map-ops.h>
11 #include <linux/export.h>
12 #include <linux/gfp.h>
13 #include <linux/of_device.h>
14 #include <linux/slab.h>
15 #include <linux/vmalloc.h>
19 bool dma_default_coherent;
27 dma_addr_t dma_handle;
31 static void dmam_release(struct device *dev, void *res)
33 struct dma_devres *this = res;
35 dma_free_attrs(dev, this->size, this->vaddr, this->dma_handle,
39 static int dmam_match(struct device *dev, void *res, void *match_data)
41 struct dma_devres *this = res, *match = match_data;
43 if (this->vaddr == match->vaddr) {
44 WARN_ON(this->size != match->size ||
45 this->dma_handle != match->dma_handle);
52 * dmam_free_coherent - Managed dma_free_coherent()
53 * @dev: Device to free coherent memory for
54 * @size: Size of allocation
55 * @vaddr: Virtual address of the memory to free
56 * @dma_handle: DMA handle of the memory to free
58 * Managed dma_free_coherent().
60 void dmam_free_coherent(struct device *dev, size_t size, void *vaddr,
61 dma_addr_t dma_handle)
63 struct dma_devres match_data = { size, vaddr, dma_handle };
65 dma_free_coherent(dev, size, vaddr, dma_handle);
66 WARN_ON(devres_destroy(dev, dmam_release, dmam_match, &match_data));
68 EXPORT_SYMBOL(dmam_free_coherent);
71 * dmam_alloc_attrs - Managed dma_alloc_attrs()
72 * @dev: Device to allocate non_coherent memory for
73 * @size: Size of allocation
74 * @dma_handle: Out argument for allocated DMA handle
75 * @gfp: Allocation flags
76 * @attrs: Flags in the DMA_ATTR_* namespace.
78 * Managed dma_alloc_attrs(). Memory allocated using this function will be
79 * automatically released on driver detach.
82 * Pointer to allocated memory on success, NULL on failure.
84 void *dmam_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
85 gfp_t gfp, unsigned long attrs)
87 struct dma_devres *dr;
90 dr = devres_alloc(dmam_release, sizeof(*dr), gfp);
94 vaddr = dma_alloc_attrs(dev, size, dma_handle, gfp, attrs);
101 dr->dma_handle = *dma_handle;
109 EXPORT_SYMBOL(dmam_alloc_attrs);
111 static bool dma_go_direct(struct device *dev, dma_addr_t mask,
112 const struct dma_map_ops *ops)
116 #ifdef CONFIG_DMA_OPS_BYPASS
117 if (dev->dma_ops_bypass)
118 return min_not_zero(mask, dev->bus_dma_limit) >=
119 dma_direct_get_required_mask(dev);
126 * Check if the devices uses a direct mapping for streaming DMA operations.
127 * This allows IOMMU drivers to set a bypass mode if the DMA mask is large
130 static inline bool dma_alloc_direct(struct device *dev,
131 const struct dma_map_ops *ops)
133 return dma_go_direct(dev, dev->coherent_dma_mask, ops);
136 static inline bool dma_map_direct(struct device *dev,
137 const struct dma_map_ops *ops)
139 return dma_go_direct(dev, *dev->dma_mask, ops);
142 dma_addr_t dma_map_page_attrs(struct device *dev, struct page *page,
143 size_t offset, size_t size, enum dma_data_direction dir,
146 const struct dma_map_ops *ops = get_dma_ops(dev);
149 BUG_ON(!valid_dma_direction(dir));
151 if (WARN_ON_ONCE(!dev->dma_mask))
152 return DMA_MAPPING_ERROR;
154 if (dma_map_direct(dev, ops) ||
155 arch_dma_map_page_direct(dev, page_to_phys(page) + offset + size))
156 addr = dma_direct_map_page(dev, page, offset, size, dir, attrs);
158 addr = ops->map_page(dev, page, offset, size, dir, attrs);
159 debug_dma_map_page(dev, page, offset, size, dir, addr, attrs);
163 EXPORT_SYMBOL(dma_map_page_attrs);
165 void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, size_t size,
166 enum dma_data_direction dir, unsigned long attrs)
168 const struct dma_map_ops *ops = get_dma_ops(dev);
170 BUG_ON(!valid_dma_direction(dir));
171 if (dma_map_direct(dev, ops) ||
172 arch_dma_unmap_page_direct(dev, addr + size))
173 dma_direct_unmap_page(dev, addr, size, dir, attrs);
174 else if (ops->unmap_page)
175 ops->unmap_page(dev, addr, size, dir, attrs);
176 debug_dma_unmap_page(dev, addr, size, dir);
178 EXPORT_SYMBOL(dma_unmap_page_attrs);
180 static int __dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
181 int nents, enum dma_data_direction dir, unsigned long attrs)
183 const struct dma_map_ops *ops = get_dma_ops(dev);
186 BUG_ON(!valid_dma_direction(dir));
188 if (WARN_ON_ONCE(!dev->dma_mask))
191 if (dma_map_direct(dev, ops) ||
192 arch_dma_map_sg_direct(dev, sg, nents))
193 ents = dma_direct_map_sg(dev, sg, nents, dir, attrs);
195 ents = ops->map_sg(dev, sg, nents, dir, attrs);
198 debug_dma_map_sg(dev, sg, nents, ents, dir, attrs);
199 else if (WARN_ON_ONCE(ents != -EINVAL && ents != -ENOMEM &&
200 ents != -EIO && ents != -EREMOTEIO))
207 * dma_map_sg_attrs - Map the given buffer for DMA
208 * @dev: The device for which to perform the DMA operation
209 * @sg: The sg_table object describing the buffer
210 * @nents: Number of entries to map
211 * @dir: DMA direction
212 * @attrs: Optional DMA attributes for the map operation
214 * Maps a buffer described by a scatterlist passed in the sg argument with
215 * nents segments for the @dir DMA operation by the @dev device.
217 * Returns the number of mapped entries (which can be less than nents)
218 * on success. Zero is returned for any error.
220 * dma_unmap_sg_attrs() should be used to unmap the buffer with the
221 * original sg and original nents (not the value returned by this funciton).
223 unsigned int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
224 int nents, enum dma_data_direction dir, unsigned long attrs)
228 ret = __dma_map_sg_attrs(dev, sg, nents, dir, attrs);
233 EXPORT_SYMBOL(dma_map_sg_attrs);
236 * dma_map_sgtable - Map the given buffer for DMA
237 * @dev: The device for which to perform the DMA operation
238 * @sgt: The sg_table object describing the buffer
239 * @dir: DMA direction
240 * @attrs: Optional DMA attributes for the map operation
242 * Maps a buffer described by a scatterlist stored in the given sg_table
243 * object for the @dir DMA operation by the @dev device. After success, the
244 * ownership for the buffer is transferred to the DMA domain. One has to
245 * call dma_sync_sgtable_for_cpu() or dma_unmap_sgtable() to move the
246 * ownership of the buffer back to the CPU domain before touching the
249 * Returns 0 on success or a negative error code on error. The following
250 * error codes are supported with the given meaning:
252 * -EINVAL An invalid argument, unaligned access or other error
253 * in usage. Will not succeed if retried.
254 * -ENOMEM Insufficient resources (like memory or IOVA space) to
255 * complete the mapping. Should succeed if retried later.
256 * -EIO Legacy error code with an unknown meaning. eg. this is
257 * returned if a lower level call returned
259 * -EREMOTEIO The DMA device cannot access P2PDMA memory specified
260 * in the sg_table. This will not succeed if retried.
262 int dma_map_sgtable(struct device *dev, struct sg_table *sgt,
263 enum dma_data_direction dir, unsigned long attrs)
267 nents = __dma_map_sg_attrs(dev, sgt->sgl, sgt->orig_nents, dir, attrs);
273 EXPORT_SYMBOL_GPL(dma_map_sgtable);
275 void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
276 int nents, enum dma_data_direction dir,
279 const struct dma_map_ops *ops = get_dma_ops(dev);
281 BUG_ON(!valid_dma_direction(dir));
282 debug_dma_unmap_sg(dev, sg, nents, dir);
283 if (dma_map_direct(dev, ops) ||
284 arch_dma_unmap_sg_direct(dev, sg, nents))
285 dma_direct_unmap_sg(dev, sg, nents, dir, attrs);
286 else if (ops->unmap_sg)
287 ops->unmap_sg(dev, sg, nents, dir, attrs);
289 EXPORT_SYMBOL(dma_unmap_sg_attrs);
291 dma_addr_t dma_map_resource(struct device *dev, phys_addr_t phys_addr,
292 size_t size, enum dma_data_direction dir, unsigned long attrs)
294 const struct dma_map_ops *ops = get_dma_ops(dev);
295 dma_addr_t addr = DMA_MAPPING_ERROR;
297 BUG_ON(!valid_dma_direction(dir));
299 if (WARN_ON_ONCE(!dev->dma_mask))
300 return DMA_MAPPING_ERROR;
302 if (dma_map_direct(dev, ops))
303 addr = dma_direct_map_resource(dev, phys_addr, size, dir, attrs);
304 else if (ops->map_resource)
305 addr = ops->map_resource(dev, phys_addr, size, dir, attrs);
307 debug_dma_map_resource(dev, phys_addr, size, dir, addr, attrs);
310 EXPORT_SYMBOL(dma_map_resource);
312 void dma_unmap_resource(struct device *dev, dma_addr_t addr, size_t size,
313 enum dma_data_direction dir, unsigned long attrs)
315 const struct dma_map_ops *ops = get_dma_ops(dev);
317 BUG_ON(!valid_dma_direction(dir));
318 if (!dma_map_direct(dev, ops) && ops->unmap_resource)
319 ops->unmap_resource(dev, addr, size, dir, attrs);
320 debug_dma_unmap_resource(dev, addr, size, dir);
322 EXPORT_SYMBOL(dma_unmap_resource);
324 void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size,
325 enum dma_data_direction dir)
327 const struct dma_map_ops *ops = get_dma_ops(dev);
329 BUG_ON(!valid_dma_direction(dir));
330 if (dma_map_direct(dev, ops))
331 dma_direct_sync_single_for_cpu(dev, addr, size, dir);
332 else if (ops->sync_single_for_cpu)
333 ops->sync_single_for_cpu(dev, addr, size, dir);
334 debug_dma_sync_single_for_cpu(dev, addr, size, dir);
336 EXPORT_SYMBOL(dma_sync_single_for_cpu);
338 void dma_sync_single_for_device(struct device *dev, dma_addr_t addr,
339 size_t size, enum dma_data_direction dir)
341 const struct dma_map_ops *ops = get_dma_ops(dev);
343 BUG_ON(!valid_dma_direction(dir));
344 if (dma_map_direct(dev, ops))
345 dma_direct_sync_single_for_device(dev, addr, size, dir);
346 else if (ops->sync_single_for_device)
347 ops->sync_single_for_device(dev, addr, size, dir);
348 debug_dma_sync_single_for_device(dev, addr, size, dir);
350 EXPORT_SYMBOL(dma_sync_single_for_device);
352 void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
353 int nelems, enum dma_data_direction dir)
355 const struct dma_map_ops *ops = get_dma_ops(dev);
357 BUG_ON(!valid_dma_direction(dir));
358 if (dma_map_direct(dev, ops))
359 dma_direct_sync_sg_for_cpu(dev, sg, nelems, dir);
360 else if (ops->sync_sg_for_cpu)
361 ops->sync_sg_for_cpu(dev, sg, nelems, dir);
362 debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir);
364 EXPORT_SYMBOL(dma_sync_sg_for_cpu);
366 void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
367 int nelems, enum dma_data_direction dir)
369 const struct dma_map_ops *ops = get_dma_ops(dev);
371 BUG_ON(!valid_dma_direction(dir));
372 if (dma_map_direct(dev, ops))
373 dma_direct_sync_sg_for_device(dev, sg, nelems, dir);
374 else if (ops->sync_sg_for_device)
375 ops->sync_sg_for_device(dev, sg, nelems, dir);
376 debug_dma_sync_sg_for_device(dev, sg, nelems, dir);
378 EXPORT_SYMBOL(dma_sync_sg_for_device);
381 * The whole dma_get_sgtable() idea is fundamentally unsafe - it seems
382 * that the intention is to allow exporting memory allocated via the
383 * coherent DMA APIs through the dma_buf API, which only accepts a
384 * scattertable. This presents a couple of problems:
385 * 1. Not all memory allocated via the coherent DMA APIs is backed by
387 * 2. Passing coherent DMA memory into the streaming APIs is not allowed
388 * as we will try to flush the memory through a different alias to that
389 * actually being used (and the flushes are redundant.)
391 int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt,
392 void *cpu_addr, dma_addr_t dma_addr, size_t size,
395 const struct dma_map_ops *ops = get_dma_ops(dev);
397 if (dma_alloc_direct(dev, ops))
398 return dma_direct_get_sgtable(dev, sgt, cpu_addr, dma_addr,
400 if (!ops->get_sgtable)
402 return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size, attrs);
404 EXPORT_SYMBOL(dma_get_sgtable_attrs);
408 * Return the page attributes used for mapping dma_alloc_* memory, either in
409 * kernel space if remapping is needed, or to userspace through dma_mmap_*.
411 pgprot_t dma_pgprot(struct device *dev, pgprot_t prot, unsigned long attrs)
413 if (dev_is_dma_coherent(dev))
415 #ifdef CONFIG_ARCH_HAS_DMA_WRITE_COMBINE
416 if (attrs & DMA_ATTR_WRITE_COMBINE)
417 return pgprot_writecombine(prot);
419 return pgprot_dmacoherent(prot);
421 #endif /* CONFIG_MMU */
424 * dma_can_mmap - check if a given device supports dma_mmap_*
425 * @dev: device to check
427 * Returns %true if @dev supports dma_mmap_coherent() and dma_mmap_attrs() to
428 * map DMA allocations to userspace.
430 bool dma_can_mmap(struct device *dev)
432 const struct dma_map_ops *ops = get_dma_ops(dev);
434 if (dma_alloc_direct(dev, ops))
435 return dma_direct_can_mmap(dev);
436 return ops->mmap != NULL;
438 EXPORT_SYMBOL_GPL(dma_can_mmap);
441 * dma_mmap_attrs - map a coherent DMA allocation into user space
442 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
443 * @vma: vm_area_struct describing requested user mapping
444 * @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs
445 * @dma_addr: device-view address returned from dma_alloc_attrs
446 * @size: size of memory originally requested in dma_alloc_attrs
447 * @attrs: attributes of mapping properties requested in dma_alloc_attrs
449 * Map a coherent DMA buffer previously allocated by dma_alloc_attrs into user
450 * space. The coherent DMA buffer must not be freed by the driver until the
451 * user space mapping has been released.
453 int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
454 void *cpu_addr, dma_addr_t dma_addr, size_t size,
457 const struct dma_map_ops *ops = get_dma_ops(dev);
459 if (dma_alloc_direct(dev, ops))
460 return dma_direct_mmap(dev, vma, cpu_addr, dma_addr, size,
464 return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
466 EXPORT_SYMBOL(dma_mmap_attrs);
468 u64 dma_get_required_mask(struct device *dev)
470 const struct dma_map_ops *ops = get_dma_ops(dev);
472 if (dma_alloc_direct(dev, ops))
473 return dma_direct_get_required_mask(dev);
474 if (ops->get_required_mask)
475 return ops->get_required_mask(dev);
478 * We require every DMA ops implementation to at least support a 32-bit
479 * DMA mask (and use bounce buffering if that isn't supported in
480 * hardware). As the direct mapping code has its own routine to
481 * actually report an optimal mask we default to 32-bit here as that
482 * is the right thing for most IOMMUs, and at least not actively
483 * harmful in general.
485 return DMA_BIT_MASK(32);
487 EXPORT_SYMBOL_GPL(dma_get_required_mask);
489 void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
490 gfp_t flag, unsigned long attrs)
492 const struct dma_map_ops *ops = get_dma_ops(dev);
495 WARN_ON_ONCE(!dev->coherent_dma_mask);
497 if (dma_alloc_from_dev_coherent(dev, size, dma_handle, &cpu_addr))
500 /* let the implementation decide on the zone to allocate from: */
501 flag &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);
503 if (dma_alloc_direct(dev, ops))
504 cpu_addr = dma_direct_alloc(dev, size, dma_handle, flag, attrs);
506 cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs);
510 debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr, attrs);
513 EXPORT_SYMBOL(dma_alloc_attrs);
515 void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
516 dma_addr_t dma_handle, unsigned long attrs)
518 const struct dma_map_ops *ops = get_dma_ops(dev);
520 if (dma_release_from_dev_coherent(dev, get_order(size), cpu_addr))
523 * On non-coherent platforms which implement DMA-coherent buffers via
524 * non-cacheable remaps, ops->free() may call vunmap(). Thus getting
525 * this far in IRQ context is a) at risk of a BUG_ON() or trying to
526 * sleep on some machines, and b) an indication that the driver is
527 * probably misusing the coherent API anyway.
529 WARN_ON(irqs_disabled());
534 debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
535 if (dma_alloc_direct(dev, ops))
536 dma_direct_free(dev, size, cpu_addr, dma_handle, attrs);
538 ops->free(dev, size, cpu_addr, dma_handle, attrs);
540 EXPORT_SYMBOL(dma_free_attrs);
542 static struct page *__dma_alloc_pages(struct device *dev, size_t size,
543 dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp)
545 const struct dma_map_ops *ops = get_dma_ops(dev);
547 if (WARN_ON_ONCE(!dev->coherent_dma_mask))
549 if (WARN_ON_ONCE(gfp & (__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM)))
552 size = PAGE_ALIGN(size);
553 if (dma_alloc_direct(dev, ops))
554 return dma_direct_alloc_pages(dev, size, dma_handle, dir, gfp);
555 if (!ops->alloc_pages)
557 return ops->alloc_pages(dev, size, dma_handle, dir, gfp);
560 struct page *dma_alloc_pages(struct device *dev, size_t size,
561 dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp)
563 struct page *page = __dma_alloc_pages(dev, size, dma_handle, dir, gfp);
566 debug_dma_map_page(dev, page, 0, size, dir, *dma_handle, 0);
569 EXPORT_SYMBOL_GPL(dma_alloc_pages);
571 static void __dma_free_pages(struct device *dev, size_t size, struct page *page,
572 dma_addr_t dma_handle, enum dma_data_direction dir)
574 const struct dma_map_ops *ops = get_dma_ops(dev);
576 size = PAGE_ALIGN(size);
577 if (dma_alloc_direct(dev, ops))
578 dma_direct_free_pages(dev, size, page, dma_handle, dir);
579 else if (ops->free_pages)
580 ops->free_pages(dev, size, page, dma_handle, dir);
583 void dma_free_pages(struct device *dev, size_t size, struct page *page,
584 dma_addr_t dma_handle, enum dma_data_direction dir)
586 debug_dma_unmap_page(dev, dma_handle, size, dir);
587 __dma_free_pages(dev, size, page, dma_handle, dir);
589 EXPORT_SYMBOL_GPL(dma_free_pages);
591 int dma_mmap_pages(struct device *dev, struct vm_area_struct *vma,
592 size_t size, struct page *page)
594 unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
596 if (vma->vm_pgoff >= count || vma_pages(vma) > count - vma->vm_pgoff)
598 return remap_pfn_range(vma, vma->vm_start,
599 page_to_pfn(page) + vma->vm_pgoff,
600 vma_pages(vma) << PAGE_SHIFT, vma->vm_page_prot);
602 EXPORT_SYMBOL_GPL(dma_mmap_pages);
604 static struct sg_table *alloc_single_sgt(struct device *dev, size_t size,
605 enum dma_data_direction dir, gfp_t gfp)
607 struct sg_table *sgt;
610 sgt = kmalloc(sizeof(*sgt), gfp);
613 if (sg_alloc_table(sgt, 1, gfp))
615 page = __dma_alloc_pages(dev, size, &sgt->sgl->dma_address, dir, gfp);
618 sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
619 sg_dma_len(sgt->sgl) = sgt->sgl->length;
628 struct sg_table *dma_alloc_noncontiguous(struct device *dev, size_t size,
629 enum dma_data_direction dir, gfp_t gfp, unsigned long attrs)
631 const struct dma_map_ops *ops = get_dma_ops(dev);
632 struct sg_table *sgt;
634 if (WARN_ON_ONCE(attrs & ~DMA_ATTR_ALLOC_SINGLE_PAGES))
637 if (ops && ops->alloc_noncontiguous)
638 sgt = ops->alloc_noncontiguous(dev, size, dir, gfp, attrs);
640 sgt = alloc_single_sgt(dev, size, dir, gfp);
644 debug_dma_map_sg(dev, sgt->sgl, sgt->orig_nents, 1, dir, attrs);
648 EXPORT_SYMBOL_GPL(dma_alloc_noncontiguous);
650 static void free_single_sgt(struct device *dev, size_t size,
651 struct sg_table *sgt, enum dma_data_direction dir)
653 __dma_free_pages(dev, size, sg_page(sgt->sgl), sgt->sgl->dma_address,
659 void dma_free_noncontiguous(struct device *dev, size_t size,
660 struct sg_table *sgt, enum dma_data_direction dir)
662 const struct dma_map_ops *ops = get_dma_ops(dev);
664 debug_dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir);
665 if (ops && ops->free_noncontiguous)
666 ops->free_noncontiguous(dev, size, sgt, dir);
668 free_single_sgt(dev, size, sgt, dir);
670 EXPORT_SYMBOL_GPL(dma_free_noncontiguous);
672 void *dma_vmap_noncontiguous(struct device *dev, size_t size,
673 struct sg_table *sgt)
675 const struct dma_map_ops *ops = get_dma_ops(dev);
676 unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
678 if (ops && ops->alloc_noncontiguous)
679 return vmap(sgt_handle(sgt)->pages, count, VM_MAP, PAGE_KERNEL);
680 return page_address(sg_page(sgt->sgl));
682 EXPORT_SYMBOL_GPL(dma_vmap_noncontiguous);
684 void dma_vunmap_noncontiguous(struct device *dev, void *vaddr)
686 const struct dma_map_ops *ops = get_dma_ops(dev);
688 if (ops && ops->alloc_noncontiguous)
691 EXPORT_SYMBOL_GPL(dma_vunmap_noncontiguous);
693 int dma_mmap_noncontiguous(struct device *dev, struct vm_area_struct *vma,
694 size_t size, struct sg_table *sgt)
696 const struct dma_map_ops *ops = get_dma_ops(dev);
698 if (ops && ops->alloc_noncontiguous) {
699 unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
701 if (vma->vm_pgoff >= count ||
702 vma_pages(vma) > count - vma->vm_pgoff)
704 return vm_map_pages(vma, sgt_handle(sgt)->pages, count);
706 return dma_mmap_pages(dev, vma, size, sg_page(sgt->sgl));
708 EXPORT_SYMBOL_GPL(dma_mmap_noncontiguous);
710 static int dma_supported(struct device *dev, u64 mask)
712 const struct dma_map_ops *ops = get_dma_ops(dev);
715 * ->dma_supported sets the bypass flag, so we must always call
716 * into the method here unless the device is truly direct mapped.
719 return dma_direct_supported(dev, mask);
720 if (!ops->dma_supported)
722 return ops->dma_supported(dev, mask);
725 bool dma_pci_p2pdma_supported(struct device *dev)
727 const struct dma_map_ops *ops = get_dma_ops(dev);
729 /* if ops is not set, dma direct will be used which supports P2PDMA */
734 * Note: dma_ops_bypass is not checked here because P2PDMA should
735 * not be used with dma mapping ops that do not have support even
736 * if the specific device is bypassing them.
739 return ops->flags & DMA_F_PCI_P2PDMA_SUPPORTED;
741 EXPORT_SYMBOL_GPL(dma_pci_p2pdma_supported);
743 #ifdef CONFIG_ARCH_HAS_DMA_SET_MASK
744 void arch_dma_set_mask(struct device *dev, u64 mask);
746 #define arch_dma_set_mask(dev, mask) do { } while (0)
749 int dma_set_mask(struct device *dev, u64 mask)
752 * Truncate the mask to the actually supported dma_addr_t width to
753 * avoid generating unsupportable addresses.
755 mask = (dma_addr_t)mask;
757 if (!dev->dma_mask || !dma_supported(dev, mask))
760 arch_dma_set_mask(dev, mask);
761 *dev->dma_mask = mask;
764 EXPORT_SYMBOL(dma_set_mask);
766 int dma_set_coherent_mask(struct device *dev, u64 mask)
769 * Truncate the mask to the actually supported dma_addr_t width to
770 * avoid generating unsupportable addresses.
772 mask = (dma_addr_t)mask;
774 if (!dma_supported(dev, mask))
777 dev->coherent_dma_mask = mask;
780 EXPORT_SYMBOL(dma_set_coherent_mask);
782 size_t dma_max_mapping_size(struct device *dev)
784 const struct dma_map_ops *ops = get_dma_ops(dev);
785 size_t size = SIZE_MAX;
787 if (dma_map_direct(dev, ops))
788 size = dma_direct_max_mapping_size(dev);
789 else if (ops && ops->max_mapping_size)
790 size = ops->max_mapping_size(dev);
794 EXPORT_SYMBOL_GPL(dma_max_mapping_size);
796 size_t dma_opt_mapping_size(struct device *dev)
798 const struct dma_map_ops *ops = get_dma_ops(dev);
799 size_t size = SIZE_MAX;
801 if (ops && ops->opt_mapping_size)
802 size = ops->opt_mapping_size();
804 return min(dma_max_mapping_size(dev), size);
806 EXPORT_SYMBOL_GPL(dma_opt_mapping_size);
808 bool dma_need_sync(struct device *dev, dma_addr_t dma_addr)
810 const struct dma_map_ops *ops = get_dma_ops(dev);
812 if (dma_map_direct(dev, ops))
813 return dma_direct_need_sync(dev, dma_addr);
814 return ops->sync_single_for_cpu || ops->sync_single_for_device;
816 EXPORT_SYMBOL_GPL(dma_need_sync);
818 unsigned long dma_get_merge_boundary(struct device *dev)
820 const struct dma_map_ops *ops = get_dma_ops(dev);
822 if (!ops || !ops->get_merge_boundary)
823 return 0; /* can't merge */
825 return ops->get_merge_boundary(dev);
827 EXPORT_SYMBOL_GPL(dma_get_merge_boundary);