x86/reboot: Harden virtualization hooks for emergency reboot
authorSean Christopherson <seanjc@google.com>
Fri, 21 Jul 2023 20:18:42 +0000 (13:18 -0700)
committerSean Christopherson <seanjc@google.com>
Thu, 3 Aug 2023 22:37:14 +0000 (15:37 -0700)
Provide dedicated helpers to (un)register virt hooks used during an
emergency crash/reboot, and WARN if there is an attempt to overwrite
the registered callback, or an attempt to do an unpaired unregister.

Opportunsitically use rcu_assign_pointer() instead of RCU_INIT_POINTER(),
mainly so that the set/unset paths are more symmetrical, but also because
any performance gains from using RCU_INIT_POINTER() are meaningless for
this code.

Reviewed-by: Kai Huang <kai.huang@intel.com>
Link: https://lore.kernel.org/r/20230721201859.2307736-3-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
arch/x86/include/asm/reboot.h
arch/x86/kernel/reboot.c
arch/x86/kvm/vmx/vmx.c

index dc201724a64336cbd852c74897d886f6f62dd302..74c6a624d1669f02b2dc5350b2b55854d39297d2 100644 (file)
@@ -25,8 +25,9 @@ void __noreturn machine_real_restart(unsigned int type);
 #define MRR_BIOS       0
 #define MRR_APM                1
 
-typedef void crash_vmclear_fn(void);
-extern crash_vmclear_fn __rcu *crash_vmclear_loaded_vmcss;
+typedef void (cpu_emergency_virt_cb)(void);
+void cpu_emergency_register_virt_callback(cpu_emergency_virt_cb *callback);
+void cpu_emergency_unregister_virt_callback(cpu_emergency_virt_cb *callback);
 void cpu_emergency_disable_virtualization(void);
 
 typedef void (*nmi_shootdown_cb)(int, struct pt_regs*);
index 3fa4c6717a1dbd54e83a7d474598022c0b64f25d..62ccedeb5e2b8bb5cbccfa1d7e9ea97346b7dd72 100644 (file)
@@ -794,17 +794,35 @@ void machine_crash_shutdown(struct pt_regs *regs)
  *
  * protected by rcu.
  */
-crash_vmclear_fn __rcu *crash_vmclear_loaded_vmcss;
-EXPORT_SYMBOL_GPL(crash_vmclear_loaded_vmcss);
+static cpu_emergency_virt_cb __rcu *cpu_emergency_virt_callback;
+
+void cpu_emergency_register_virt_callback(cpu_emergency_virt_cb *callback)
+{
+       if (WARN_ON_ONCE(rcu_access_pointer(cpu_emergency_virt_callback)))
+               return;
+
+       rcu_assign_pointer(cpu_emergency_virt_callback, callback);
+}
+EXPORT_SYMBOL_GPL(cpu_emergency_register_virt_callback);
+
+void cpu_emergency_unregister_virt_callback(cpu_emergency_virt_cb *callback)
+{
+       if (WARN_ON_ONCE(rcu_access_pointer(cpu_emergency_virt_callback) != callback))
+               return;
+
+       rcu_assign_pointer(cpu_emergency_virt_callback, NULL);
+       synchronize_rcu();
+}
+EXPORT_SYMBOL_GPL(cpu_emergency_unregister_virt_callback);
 
 static inline void cpu_crash_vmclear_loaded_vmcss(void)
 {
-       crash_vmclear_fn *do_vmclear_operation = NULL;
+       cpu_emergency_virt_cb *callback;
 
        rcu_read_lock();
-       do_vmclear_operation = rcu_dereference(crash_vmclear_loaded_vmcss);
-       if (do_vmclear_operation)
-               do_vmclear_operation();
+       callback = rcu_dereference(cpu_emergency_virt_callback);
+       if (callback)
+               callback();
        rcu_read_unlock();
 }
 
index 75351477f090ca3f4f79544afdb12651f89da0c7..661ba09685b8a38f4e7d8b7d73f1a4a23b955c42 100644 (file)
@@ -8571,8 +8571,7 @@ static void __vmx_exit(void)
 {
        allow_smaller_maxphyaddr = false;
 
-       RCU_INIT_POINTER(crash_vmclear_loaded_vmcss, NULL);
-       synchronize_rcu();
+       cpu_emergency_unregister_virt_callback(crash_vmclear_local_loaded_vmcss);
 
        vmx_cleanup_l1d_flush();
 }
@@ -8620,8 +8619,7 @@ static int __init vmx_init(void)
                pi_init_cpu(cpu);
        }
 
-       rcu_assign_pointer(crash_vmclear_loaded_vmcss,
-                          crash_vmclear_local_loaded_vmcss);
+       cpu_emergency_register_virt_callback(crash_vmclear_local_loaded_vmcss);
 
        vmx_check_vmcs12_offsets();