x86/tlb: Uninline nmi_uaccess_okay()
authorThomas Gleixner <tglx@linutronix.de>
Tue, 21 Apr 2020 09:20:40 +0000 (11:20 +0200)
committerBorislav Petkov <bp@suse.de>
Sun, 26 Apr 2020 16:47:05 +0000 (18:47 +0200)
cpu_tlbstate is exported because various TLB-related functions need
access to it, but cpu_tlbstate is sensitive information which should
only be accessed by well-contained kernel functions and not be directly
exposed to modules.

nmi_access_ok() is the last inline function which requires access to
cpu_tlbstate. Move it into the TLB code.

No functional change.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Borislav Petkov <bp@suse.de>
Reviewed-by: Alexandre Chartre <alexandre.chartre@oracle.com>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/20200421092600.052543007@linutronix.de
arch/x86/include/asm/tlbflush.h
arch/x86/mm/tlb.c

index 917deea058d50d1d542fec604596cff3ac3cf521..1c17f5a6cb5385a239bcf444793cb7332a991c6e 100644 (file)
@@ -247,38 +247,7 @@ struct tlb_state {
 };
 DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate);
 
-/*
- * Blindly accessing user memory from NMI context can be dangerous
- * if we're in the middle of switching the current user task or
- * switching the loaded mm.  It can also be dangerous if we
- * interrupted some kernel code that was temporarily using a
- * different mm.
- */
-static inline bool nmi_uaccess_okay(void)
-{
-       struct mm_struct *loaded_mm = this_cpu_read(cpu_tlbstate.loaded_mm);
-       struct mm_struct *current_mm = current->mm;
-
-       VM_WARN_ON_ONCE(!loaded_mm);
-
-       /*
-        * The condition we want to check is
-        * current_mm->pgd == __va(read_cr3_pa()).  This may be slow, though,
-        * if we're running in a VM with shadow paging, and nmi_uaccess_okay()
-        * is supposed to be reasonably fast.
-        *
-        * Instead, we check the almost equivalent but somewhat conservative
-        * condition below, and we rely on the fact that switch_mm_irqs_off()
-        * sets loaded_mm to LOADED_MM_SWITCHING before writing to CR3.
-        */
-       if (loaded_mm != current_mm)
-               return false;
-
-       VM_WARN_ON_ONCE(current_mm->pgd != __va(read_cr3_pa()));
-
-       return true;
-}
-
+bool nmi_uaccess_okay(void);
 #define nmi_uaccess_okay nmi_uaccess_okay
 
 void cr4_update_irqsoff(unsigned long set, unsigned long clear);
index aabf8c7377e3e51533ba7fbcc4dfe0043fceb4f9..45426ae8e7d73a286be752137b3f9eee3510177b 100644 (file)
@@ -1094,6 +1094,38 @@ void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch)
        put_cpu();
 }
 
+/*
+ * Blindly accessing user memory from NMI context can be dangerous
+ * if we're in the middle of switching the current user task or
+ * switching the loaded mm.  It can also be dangerous if we
+ * interrupted some kernel code that was temporarily using a
+ * different mm.
+ */
+bool nmi_uaccess_okay(void)
+{
+       struct mm_struct *loaded_mm = this_cpu_read(cpu_tlbstate.loaded_mm);
+       struct mm_struct *current_mm = current->mm;
+
+       VM_WARN_ON_ONCE(!loaded_mm);
+
+       /*
+        * The condition we want to check is
+        * current_mm->pgd == __va(read_cr3_pa()).  This may be slow, though,
+        * if we're running in a VM with shadow paging, and nmi_uaccess_okay()
+        * is supposed to be reasonably fast.
+        *
+        * Instead, we check the almost equivalent but somewhat conservative
+        * condition below, and we rely on the fact that switch_mm_irqs_off()
+        * sets loaded_mm to LOADED_MM_SWITCHING before writing to CR3.
+        */
+       if (loaded_mm != current_mm)
+               return false;
+
+       VM_WARN_ON_ONCE(current_mm->pgd != __va(read_cr3_pa()));
+
+       return true;
+}
+
 static ssize_t tlbflush_read_file(struct file *file, char __user *user_buf,
                             size_t count, loff_t *ppos)
 {