Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/ide
[sfrench/cifs-2.6.git] / arch / mips / mm / cache.c
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 1994 - 2003, 06, 07 by Ralf Baechle (ralf@linux-mips.org)
7  * Copyright (C) 2007 MIPS Technologies, Inc.
8  */
9 #include <linux/fs.h>
10 #include <linux/fcntl.h>
11 #include <linux/kernel.h>
12 #include <linux/linkage.h>
13 #include <linux/export.h>
14 #include <linux/sched.h>
15 #include <linux/syscalls.h>
16 #include <linux/mm.h>
17
18 #include <asm/cacheflush.h>
19 #include <asm/highmem.h>
20 #include <asm/processor.h>
21 #include <asm/cpu.h>
22 #include <asm/cpu-features.h>
23 #include <asm/setup.h>
24
25 /* Cache operations. */
26 void (*flush_cache_all)(void);
27 void (*__flush_cache_all)(void);
28 EXPORT_SYMBOL_GPL(__flush_cache_all);
29 void (*flush_cache_mm)(struct mm_struct *mm);
30 void (*flush_cache_range)(struct vm_area_struct *vma, unsigned long start,
31         unsigned long end);
32 void (*flush_cache_page)(struct vm_area_struct *vma, unsigned long page,
33         unsigned long pfn);
34 void (*flush_icache_range)(unsigned long start, unsigned long end);
35 EXPORT_SYMBOL_GPL(flush_icache_range);
36 void (*local_flush_icache_range)(unsigned long start, unsigned long end);
37 EXPORT_SYMBOL_GPL(local_flush_icache_range);
38 void (*__flush_icache_user_range)(unsigned long start, unsigned long end);
39 EXPORT_SYMBOL_GPL(__flush_icache_user_range);
40 void (*__local_flush_icache_user_range)(unsigned long start, unsigned long end);
41 EXPORT_SYMBOL_GPL(__local_flush_icache_user_range);
42
43 void (*__flush_cache_vmap)(void);
44 void (*__flush_cache_vunmap)(void);
45
46 void (*__flush_kernel_vmap_range)(unsigned long vaddr, int size);
47 EXPORT_SYMBOL_GPL(__flush_kernel_vmap_range);
48
49 /* MIPS specific cache operations */
50 void (*flush_cache_sigtramp)(unsigned long addr);
51 void (*local_flush_data_cache_page)(void * addr);
52 void (*flush_data_cache_page)(unsigned long addr);
53 void (*flush_icache_all)(void);
54
55 EXPORT_SYMBOL_GPL(local_flush_data_cache_page);
56 EXPORT_SYMBOL(flush_data_cache_page);
57 EXPORT_SYMBOL(flush_icache_all);
58
59 #ifdef CONFIG_DMA_NONCOHERENT
60
61 /* DMA cache operations. */
62 void (*_dma_cache_wback_inv)(unsigned long start, unsigned long size);
63 void (*_dma_cache_wback)(unsigned long start, unsigned long size);
64 void (*_dma_cache_inv)(unsigned long start, unsigned long size);
65
66 EXPORT_SYMBOL(_dma_cache_wback_inv);
67
68 #endif /* CONFIG_DMA_NONCOHERENT */
69
70 /*
71  * We could optimize the case where the cache argument is not BCACHE but
72  * that seems very atypical use ...
73  */
74 SYSCALL_DEFINE3(cacheflush, unsigned long, addr, unsigned long, bytes,
75         unsigned int, cache)
76 {
77         if (bytes == 0)
78                 return 0;
79         if (!access_ok(VERIFY_WRITE, (void __user *) addr, bytes))
80                 return -EFAULT;
81
82         __flush_icache_user_range(addr, addr + bytes);
83
84         return 0;
85 }
86
87 void __flush_dcache_page(struct page *page)
88 {
89         struct address_space *mapping = page_mapping_file(page);
90         unsigned long addr;
91
92         if (mapping && !mapping_mapped(mapping)) {
93                 SetPageDcacheDirty(page);
94                 return;
95         }
96
97         /*
98          * We could delay the flush for the !page_mapping case too.  But that
99          * case is for exec env/arg pages and those are %99 certainly going to
100          * get faulted into the tlb (and thus flushed) anyways.
101          */
102         if (PageHighMem(page))
103                 addr = (unsigned long)kmap_atomic(page);
104         else
105                 addr = (unsigned long)page_address(page);
106
107         flush_data_cache_page(addr);
108
109         if (PageHighMem(page))
110                 __kunmap_atomic((void *)addr);
111 }
112
113 EXPORT_SYMBOL(__flush_dcache_page);
114
115 void __flush_anon_page(struct page *page, unsigned long vmaddr)
116 {
117         unsigned long addr = (unsigned long) page_address(page);
118
119         if (pages_do_alias(addr, vmaddr)) {
120                 if (page_mapcount(page) && !Page_dcache_dirty(page)) {
121                         void *kaddr;
122
123                         kaddr = kmap_coherent(page, vmaddr);
124                         flush_data_cache_page((unsigned long)kaddr);
125                         kunmap_coherent();
126                 } else
127                         flush_data_cache_page(addr);
128         }
129 }
130
131 EXPORT_SYMBOL(__flush_anon_page);
132
133 void __update_cache(unsigned long address, pte_t pte)
134 {
135         struct page *page;
136         unsigned long pfn, addr;
137         int exec = !pte_no_exec(pte) && !cpu_has_ic_fills_f_dc;
138
139         pfn = pte_pfn(pte);
140         if (unlikely(!pfn_valid(pfn)))
141                 return;
142         page = pfn_to_page(pfn);
143         if (Page_dcache_dirty(page)) {
144                 if (PageHighMem(page))
145                         addr = (unsigned long)kmap_atomic(page);
146                 else
147                         addr = (unsigned long)page_address(page);
148
149                 if (exec || pages_do_alias(addr, address & PAGE_MASK))
150                         flush_data_cache_page(addr);
151
152                 if (PageHighMem(page))
153                         __kunmap_atomic((void *)addr);
154
155                 ClearPageDcacheDirty(page);
156         }
157 }
158
159 unsigned long _page_cachable_default;
160 EXPORT_SYMBOL(_page_cachable_default);
161
162 static inline void setup_protection_map(void)
163 {
164         if (cpu_has_rixi) {
165                 protection_map[0]  = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
166                 protection_map[1]  = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC);
167                 protection_map[2]  = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
168                 protection_map[3]  = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC);
169                 protection_map[4]  = __pgprot(_page_cachable_default | _PAGE_PRESENT);
170                 protection_map[5]  = __pgprot(_page_cachable_default | _PAGE_PRESENT);
171                 protection_map[6]  = __pgprot(_page_cachable_default | _PAGE_PRESENT);
172                 protection_map[7]  = __pgprot(_page_cachable_default | _PAGE_PRESENT);
173
174                 protection_map[8]  = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
175                 protection_map[9]  = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC);
176                 protection_map[10] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE | _PAGE_NO_READ);
177                 protection_map[11] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE);
178                 protection_map[12] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
179                 protection_map[13] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
180                 protection_map[14] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_WRITE);
181                 protection_map[15] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_WRITE);
182
183         } else {
184                 protection_map[0] = PAGE_NONE;
185                 protection_map[1] = PAGE_READONLY;
186                 protection_map[2] = PAGE_COPY;
187                 protection_map[3] = PAGE_COPY;
188                 protection_map[4] = PAGE_READONLY;
189                 protection_map[5] = PAGE_READONLY;
190                 protection_map[6] = PAGE_COPY;
191                 protection_map[7] = PAGE_COPY;
192                 protection_map[8] = PAGE_NONE;
193                 protection_map[9] = PAGE_READONLY;
194                 protection_map[10] = PAGE_SHARED;
195                 protection_map[11] = PAGE_SHARED;
196                 protection_map[12] = PAGE_READONLY;
197                 protection_map[13] = PAGE_READONLY;
198                 protection_map[14] = PAGE_SHARED;
199                 protection_map[15] = PAGE_SHARED;
200         }
201 }
202
203 void cpu_cache_init(void)
204 {
205         if (cpu_has_3k_cache) {
206                 extern void __weak r3k_cache_init(void);
207
208                 r3k_cache_init();
209         }
210         if (cpu_has_6k_cache) {
211                 extern void __weak r6k_cache_init(void);
212
213                 r6k_cache_init();
214         }
215         if (cpu_has_4k_cache) {
216                 extern void __weak r4k_cache_init(void);
217
218                 r4k_cache_init();
219         }
220         if (cpu_has_8k_cache) {
221                 extern void __weak r8k_cache_init(void);
222
223                 r8k_cache_init();
224         }
225         if (cpu_has_tx39_cache) {
226                 extern void __weak tx39_cache_init(void);
227
228                 tx39_cache_init();
229         }
230
231         if (cpu_has_octeon_cache) {
232                 extern void __weak octeon_cache_init(void);
233
234                 octeon_cache_init();
235         }
236
237         setup_protection_map();
238 }
239
240 int __weak __uncached_access(struct file *file, unsigned long addr)
241 {
242         if (file->f_flags & O_DSYNC)
243                 return 1;
244
245         return addr >= __pa(high_memory);
246 }