Merge tag 'ioremap-5.5' of git://git.infradead.org/users/hch/ioremap
[sfrench/cifs-2.6.git] / arch / csky / mm / dma-mapping.c
1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
3
4 #include <linux/cache.h>
5 #include <linux/dma-mapping.h>
6 #include <linux/dma-contiguous.h>
7 #include <linux/dma-noncoherent.h>
8 #include <linux/genalloc.h>
9 #include <linux/highmem.h>
10 #include <linux/io.h>
11 #include <linux/mm.h>
12 #include <linux/scatterlist.h>
13 #include <linux/types.h>
14 #include <linux/version.h>
15 #include <asm/cache.h>
16
17 static inline void cache_op(phys_addr_t paddr, size_t size,
18                             void (*fn)(unsigned long start, unsigned long end))
19 {
20         struct page *page    = phys_to_page(paddr);
21         void *start          = __va(page_to_phys(page));
22         unsigned long offset = offset_in_page(paddr);
23         size_t left          = size;
24
25         do {
26                 size_t len = left;
27
28                 if (offset + len > PAGE_SIZE)
29                         len = PAGE_SIZE - offset;
30
31                 if (PageHighMem(page)) {
32                         start = kmap_atomic(page);
33
34                         fn((unsigned long)start + offset,
35                                         (unsigned long)start + offset + len);
36
37                         kunmap_atomic(start);
38                 } else {
39                         fn((unsigned long)start + offset,
40                                         (unsigned long)start + offset + len);
41                 }
42                 offset = 0;
43
44                 page++;
45                 start += PAGE_SIZE;
46                 left -= len;
47         } while (left);
48 }
49
50 static void dma_wbinv_set_zero_range(unsigned long start, unsigned long end)
51 {
52         memset((void *)start, 0, end - start);
53         dma_wbinv_range(start, end);
54 }
55
56 void arch_dma_prep_coherent(struct page *page, size_t size)
57 {
58         cache_op(page_to_phys(page), size, dma_wbinv_set_zero_range);
59 }
60
61 void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
62                               size_t size, enum dma_data_direction dir)
63 {
64         switch (dir) {
65         case DMA_TO_DEVICE:
66                 cache_op(paddr, size, dma_wb_range);
67                 break;
68         case DMA_FROM_DEVICE:
69         case DMA_BIDIRECTIONAL:
70                 cache_op(paddr, size, dma_wbinv_range);
71                 break;
72         default:
73                 BUG();
74         }
75 }
76
77 void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
78                            size_t size, enum dma_data_direction dir)
79 {
80         switch (dir) {
81         case DMA_TO_DEVICE:
82                 return;
83         case DMA_FROM_DEVICE:
84         case DMA_BIDIRECTIONAL:
85                 cache_op(paddr, size, dma_inv_range);
86                 break;
87         default:
88                 BUG();
89         }
90 }