1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
4 #include <linux/cache.h>
5 #include <linux/dma-mapping.h>
6 #include <linux/dma-contiguous.h>
7 #include <linux/dma-noncoherent.h>
8 #include <linux/genalloc.h>
9 #include <linux/highmem.h>
12 #include <linux/scatterlist.h>
13 #include <linux/types.h>
14 #include <linux/version.h>
15 #include <asm/cache.h>
17 static struct gen_pool *atomic_pool;
18 static size_t atomic_pool_size __initdata = SZ_256K;
20 static int __init early_coherent_pool(char *p)
22 atomic_pool_size = memparse(p, &p);
25 early_param("coherent_pool", early_coherent_pool);
27 static int __init atomic_pool_init(void)
30 size_t size = atomic_pool_size;
34 atomic_pool = gen_pool_create(PAGE_SHIFT, -1);
38 page = alloc_pages(GFP_KERNEL | GFP_DMA, get_order(size));
42 ptr = dma_common_contiguous_remap(page, size, VM_ALLOC,
43 pgprot_noncached(PAGE_KERNEL),
44 __builtin_return_address(0));
48 ret = gen_pool_add_virt(atomic_pool, (unsigned long)ptr,
49 page_to_phys(page), atomic_pool_size, -1);
53 gen_pool_set_algo(atomic_pool, gen_pool_first_fit_order_align, NULL);
55 pr_info("DMA: preallocated %zu KiB pool for atomic coherent pool\n",
56 atomic_pool_size / 1024);
58 pr_info("DMA: vaddr: 0x%x phy: 0x%lx,\n", (unsigned int)ptr,
63 postcore_initcall(atomic_pool_init);
65 static void *csky_dma_alloc_atomic(struct device *dev, size_t size,
66 dma_addr_t *dma_handle)
70 addr = gen_pool_alloc(atomic_pool, size);
72 *dma_handle = gen_pool_virt_to_phys(atomic_pool, addr);
77 static void csky_dma_free_atomic(struct device *dev, size_t size, void *vaddr,
78 dma_addr_t dma_handle, unsigned long attrs)
80 gen_pool_free(atomic_pool, (unsigned long)vaddr, size);
83 static void __dma_clear_buffer(struct page *page, size_t size)
85 if (PageHighMem(page)) {
86 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
89 void *ptr = kmap_atomic(page);
90 size_t _size = (size < PAGE_SIZE) ? size : PAGE_SIZE;
92 memset(ptr, 0, _size);
93 dma_wbinv_range((unsigned long)ptr,
94 (unsigned long)ptr + _size);
103 void *ptr = page_address(page);
105 memset(ptr, 0, size);
106 dma_wbinv_range((unsigned long)ptr, (unsigned long)ptr + size);
110 static void *csky_dma_alloc_nonatomic(struct device *dev, size_t size,
111 dma_addr_t *dma_handle, gfp_t gfp,
116 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
118 if (DMA_ATTR_NON_CONSISTENT & attrs) {
119 pr_err("csky %s can't support DMA_ATTR_NON_CONSISTENT.\n", __func__);
123 if (IS_ENABLED(CONFIG_DMA_CMA))
124 page = dma_alloc_from_contiguous(dev, count, get_order(size),
127 page = alloc_pages(gfp, get_order(size));
130 pr_err("csky %s no more free pages.\n", __func__);
134 *dma_handle = page_to_phys(page);
136 __dma_clear_buffer(page, size);
138 if (attrs & DMA_ATTR_NO_KERNEL_MAPPING)
141 vaddr = dma_common_contiguous_remap(page, PAGE_ALIGN(size), VM_USERMAP,
142 pgprot_noncached(PAGE_KERNEL), __builtin_return_address(0));
149 static void csky_dma_free_nonatomic(
153 dma_addr_t dma_handle,
157 struct page *page = phys_to_page(dma_handle);
158 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
160 if ((unsigned int)vaddr >= VMALLOC_START)
161 dma_common_free_remap(vaddr, size, VM_USERMAP);
163 if (IS_ENABLED(CONFIG_DMA_CMA))
164 dma_release_from_contiguous(dev, page, count);
166 __free_pages(page, get_order(size));
169 void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
170 gfp_t gfp, unsigned long attrs)
172 if (gfpflags_allow_blocking(gfp))
173 return csky_dma_alloc_nonatomic(dev, size, dma_handle, gfp,
176 return csky_dma_alloc_atomic(dev, size, dma_handle);
179 void arch_dma_free(struct device *dev, size_t size, void *vaddr,
180 dma_addr_t dma_handle, unsigned long attrs)
182 if (!addr_in_gen_pool(atomic_pool, (unsigned int) vaddr, size))
183 csky_dma_free_nonatomic(dev, size, vaddr, dma_handle, attrs);
185 csky_dma_free_atomic(dev, size, vaddr, dma_handle, attrs);
188 static inline void cache_op(phys_addr_t paddr, size_t size,
189 void (*fn)(unsigned long start, unsigned long end))
191 struct page *page = pfn_to_page(paddr >> PAGE_SHIFT);
192 unsigned int offset = paddr & ~PAGE_MASK;
199 if (PageHighMem(page)) {
202 if (offset + len > PAGE_SIZE) {
203 if (offset >= PAGE_SIZE) {
204 page += offset >> PAGE_SHIFT;
205 offset &= ~PAGE_MASK;
207 len = PAGE_SIZE - offset;
210 addr = kmap_atomic(page);
211 start = (unsigned long)(addr + offset);
212 fn(start, start + len);
215 start = (unsigned long)phys_to_virt(paddr);
216 fn(start, start + size);
224 void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
225 size_t size, enum dma_data_direction dir)
229 cache_op(paddr, size, dma_wb_range);
231 case DMA_FROM_DEVICE:
232 case DMA_BIDIRECTIONAL:
233 cache_op(paddr, size, dma_wbinv_range);
240 void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
241 size_t size, enum dma_data_direction dir)
245 cache_op(paddr, size, dma_wb_range);
247 case DMA_FROM_DEVICE:
248 case DMA_BIDIRECTIONAL:
249 cache_op(paddr, size, dma_wbinv_range);