[ARM] 5545/2: add flush_kernel_dcache_page() for ARM
authorNicolas Pitre <nico@cam.org>
Fri, 12 Jun 2009 02:09:29 +0000 (03:09 +0100)
committerRussell King <rmk+kernel@arm.linux.org.uk>
Sun, 14 Jun 2009 10:20:37 +0000 (11:20 +0100)
Without this, the default implementation is a no op which is completely
wrong with a VIVT cache, and usage of sg_copy_buffer() produces
unpredictable results.

Tested-by: Sebastian Andrzej Siewior <bigeasy@breakpoint.cc>
CC: stable@kernel.org
Signed-off-by: Nicolas Pitre <nico@marvell.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
arch/arm/include/asm/cacheflush.h

index bb7d695f3900f70d635a3597cd19d7cb68c0d732..1a711ea8418b6045c581a576caa3f85496ee2673 100644 (file)
@@ -429,6 +429,14 @@ static inline void flush_anon_page(struct vm_area_struct *vma,
                __flush_anon_page(vma, page, vmaddr);
 }
 
                __flush_anon_page(vma, page, vmaddr);
 }
 
+#define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
+static inline void flush_kernel_dcache_page(struct page *page)
+{
+       /* highmem pages are always flushed upon kunmap already */
+       if ((cache_is_vivt() || cache_is_vipt_aliasing()) && !PageHighMem(page))
+               __cpuc_flush_dcache_page(page_address(page));
+}
+
 #define flush_dcache_mmap_lock(mapping) \
        spin_lock_irq(&(mapping)->tree_lock)
 #define flush_dcache_mmap_unlock(mapping) \
 #define flush_dcache_mmap_lock(mapping) \
        spin_lock_irq(&(mapping)->tree_lock)
 #define flush_dcache_mmap_unlock(mapping) \