x86/mm/cpa: Optimize cpa_flush_array() TLB invalidation
authorPeter Zijlstra <peterz@infradead.org>
Mon, 3 Dec 2018 17:03:49 +0000 (18:03 +0100)
committerIngo Molnar <mingo@kernel.org>
Mon, 17 Dec 2018 17:54:26 +0000 (18:54 +0100)
Instead of punting and doing tlb_flush_all(), do the same as
flush_tlb_kernel_range() does and use single page invalidations.

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rik van Riel <riel@surriel.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Tom.StDenis@amd.com
Cc: dave.hansen@intel.com
Link: http://lkml.kernel.org/r/20181203171043.430001980@infradead.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
arch/x86/mm/mm_internal.h
arch/x86/mm/pageattr.c
arch/x86/mm/tlb.c

index 4e1f6e1b8159e7ddc78d46c8ebb6a4e8d05f4964..319bde386d5f4a9402f695ffb6948b76b58f47bd 100644 (file)
@@ -19,4 +19,6 @@ extern int after_bootmem;
 
 void update_cache_mode_entry(unsigned entry, enum page_cache_mode cache);
 
+extern unsigned long tlb_single_page_flush_ceiling;
+
 #endif /* __X86_MM_INTERNAL_H */
index afa98b7b60505e0503b392ef8ba5005fb8af9294..351874259a71391acf60834a5246455f1be6f37d 100644 (file)
@@ -26,6 +26,8 @@
 #include <asm/pat.h>
 #include <asm/set_memory.h>
 
+#include "mm_internal.h"
+
 /*
  * The current flushing context - we pass it instead of 5 arguments:
  */
@@ -346,16 +348,26 @@ static void cpa_flush_range(unsigned long start, int numpages, int cache)
        }
 }
 
-static void cpa_flush_array(unsigned long baddr, unsigned long *start,
-                           int numpages, int cache,
-                           int in_flags, struct page **pages)
+void __cpa_flush_array(void *data)
 {
-       unsigned int i, level;
+       struct cpa_data *cpa = data;
+       unsigned int i;
 
-       if (__inv_flush_all(cache))
+       for (i = 0; i < cpa->numpages; i++)
+               __flush_tlb_one_kernel(__cpa_addr(cpa, i));
+}
+
+static void cpa_flush_array(struct cpa_data *cpa, int cache)
+{
+       unsigned int i;
+
+       if (cpa_check_flush_all(cache))
                return;
 
-       flush_tlb_all();
+       if (cpa->numpages <= tlb_single_page_flush_ceiling)
+               on_each_cpu(__cpa_flush_array, cpa, 1);
+       else
+               flush_tlb_all();
 
        if (!cache)
                return;
@@ -366,15 +378,11 @@ static void cpa_flush_array(unsigned long baddr, unsigned long *start,
         * will cause all other CPUs to flush the same
         * cachelines:
         */
-       for (i = 0; i < numpages; i++) {
-               unsigned long addr;
+       for (i = 0; i < cpa->numpages; i++) {
+               unsigned long addr = __cpa_addr(cpa, i);
+               unsigned int level;
                pte_t *pte;
 
-               if (in_flags & CPA_PAGES_ARRAY)
-                       addr = (unsigned long)page_address(pages[i]);
-               else
-                       addr = start[i];
-
                pte = lookup_address(addr, &level);
 
                /*
@@ -1771,12 +1779,10 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages,
                goto out;
        }
 
-       if (cpa.flags & (CPA_PAGES_ARRAY | CPA_ARRAY)) {
-               cpa_flush_array(baddr, addr, numpages, cache,
-                               cpa.flags, pages);
-       } else {
+       if (cpa.flags & (CPA_PAGES_ARRAY | CPA_ARRAY))
+               cpa_flush_array(&cpa, cache);
+       else
                cpa_flush_range(baddr, numpages, cache);
-       }
 
 out:
        return ret;
index 03b6b4c2238daa6c5a97d6eb10c32fb825a9a077..999d6d8f0beff218a9e0a8dd720d0f5607ff9165 100644 (file)
@@ -15,6 +15,8 @@
 #include <asm/apic.h>
 #include <asm/uv/uv.h>
 
+#include "mm_internal.h"
+
 /*
  *     TLB flushing, formerly SMP-only
  *             c/o Linus Torvalds.
@@ -721,7 +723,7 @@ void native_flush_tlb_others(const struct cpumask *cpumask,
  *
  * This is in units of pages.
  */
-static unsigned long tlb_single_page_flush_ceiling __read_mostly = 33;
+unsigned long tlb_single_page_flush_ceiling __read_mostly = 33;
 
 void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
                                unsigned long end, unsigned int stride_shift,