ARM: Fix ptrace accesses
authorRussell King <rmk+kernel@arm.linux.org.uk>
Thu, 5 Nov 2009 13:29:36 +0000 (13:29 +0000)
committerRussell King <rmk+kernel@arm.linux.org.uk>
Mon, 14 Dec 2009 14:54:28 +0000 (14:54 +0000)
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
arch/arm/include/asm/cacheflush.h
arch/arm/include/asm/smp_plat.h
arch/arm/mm/flush.c

index 730aefcfbee3eb8e0c46ea0a6dc0b9ba0fd1e27f..3d2ef54c7cb99a44105212ba31ab9a8fbd0b732b 100644 (file)
@@ -316,12 +316,8 @@ static inline void outer_flush_range(unsigned long start, unsigned long end)
  * processes address space.  Really, we want to allow our "user
  * space" model to handle this.
  */
-#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
-       do {                                                    \
-               memcpy(dst, src, len);                          \
-               flush_ptrace_access(vma, page, vaddr, dst, len, 1);\
-       } while (0)
-
+extern void copy_to_user_page(struct vm_area_struct *, struct page *,
+       unsigned long, void *, const void *, unsigned long);
 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
        do {                                                    \
                memcpy(dst, src, len);                          \
@@ -355,17 +351,6 @@ vivt_flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsig
        }
 }
 
-static inline void
-vivt_flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
-                        unsigned long uaddr, void *kaddr,
-                        unsigned long len, int write)
-{
-       if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
-               unsigned long addr = (unsigned long)kaddr;
-               __cpuc_coherent_kern_range(addr, addr + len);
-       }
-}
-
 #ifndef CONFIG_CPU_CACHE_VIPT
 #define flush_cache_mm(mm) \
                vivt_flush_cache_mm(mm)
@@ -373,15 +358,10 @@ vivt_flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
                vivt_flush_cache_range(vma,start,end)
 #define flush_cache_page(vma,addr,pfn) \
                vivt_flush_cache_page(vma,addr,pfn)
-#define flush_ptrace_access(vma,page,ua,ka,len,write) \
-               vivt_flush_ptrace_access(vma,page,ua,ka,len,write)
 #else
 extern void flush_cache_mm(struct mm_struct *mm);
 extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
 extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn);
-extern void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
-                               unsigned long uaddr, void *kaddr,
-                               unsigned long len, int write);
 #endif
 
 #define flush_cache_dup_mm(mm) flush_cache_mm(mm)
index 59303e2008457763334a6e2306de382280030ab7..e6215305544aa9c63db768c53a7c194fe38c6747 100644 (file)
@@ -13,4 +13,9 @@ static inline int tlb_ops_need_broadcast(void)
        return ((read_cpuid_ext(CPUID_EXT_MMFR3) >> 12) & 0xf) < 2;
 }
 
+static inline int cache_ops_need_broadcast(void)
+{
+       return ((read_cpuid_ext(CPUID_EXT_MMFR3) >> 12) & 0xf) < 1;
+}
+
 #endif
index 6f3a4b7a3b8276e5c442e4bbfc8273c8e9745f54..e34f095e2090517b8f968f4af6703e60dd371c8b 100644 (file)
@@ -13,6 +13,7 @@
 
 #include <asm/cacheflush.h>
 #include <asm/cachetype.h>
+#include <asm/smp_plat.h>
 #include <asm/system.h>
 #include <asm/tlbflush.h>
 
@@ -87,13 +88,26 @@ void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsig
        if (vma->vm_flags & VM_EXEC && icache_is_vivt_asid_tagged())
                __flush_icache_all();
 }
+#else
+#define flush_pfn_alias(pfn,vaddr)     do { } while (0)
+#endif
 
+#ifdef CONFIG_SMP
+static void flush_ptrace_access_other(void *args)
+{
+       __flush_icache_all();
+}
+#endif
+
+static
 void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
-                        unsigned long uaddr, void *kaddr,
-                        unsigned long len, int write)
+                        unsigned long uaddr, void *kaddr, unsigned long len)
 {
        if (cache_is_vivt()) {
-               vivt_flush_ptrace_access(vma, page, uaddr, kaddr, len, write);
+               if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
+                       unsigned long addr = (unsigned long)kaddr;
+                       __cpuc_coherent_kern_range(addr, addr + len);
+               }
                return;
        }
 
@@ -104,16 +118,37 @@ void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
        }
 
        /* VIPT non-aliasing cache */
-       if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm)) &&
-           vma->vm_flags & VM_EXEC) {
+       if (vma->vm_flags & VM_EXEC) {
                unsigned long addr = (unsigned long)kaddr;
-               /* only flushing the kernel mapping on non-aliasing VIPT */
                __cpuc_coherent_kern_range(addr, addr + len);
+#ifdef CONFIG_SMP
+               if (cache_ops_need_broadcast())
+                       smp_call_function(flush_ptrace_access_other,
+                                         NULL, 1);
+#endif
        }
 }
-#else
-#define flush_pfn_alias(pfn,vaddr)     do { } while (0)
+
+/*
+ * Copy user data from/to a page which is mapped into a different
+ * processes address space.  Really, we want to allow our "user
+ * space" model to handle this.
+ *
+ * Note that this code needs to run on the current CPU.
+ */
+void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
+                      unsigned long uaddr, void *dst, const void *src,
+                      unsigned long len)
+{
+#ifdef CONFIG_SMP
+       preempt_disable();
 #endif
+       memcpy(dst, src, len);
+       flush_ptrace_access(vma, page, uaddr, dst, len);
+#ifdef CONFIG_SMP
+       preempt_enable();
+#endif
+}
 
 void __flush_dcache_page(struct address_space *mapping, struct page *page)
 {