Merge tag 'x86_cpu_for_v6.0_rc1' of git://git.kernel.org/pub/scm/linux/kernel/git...
[sfrench/cifs-2.6.git] / arch / x86 / kernel / cpu / intel.c
index a00dd3e2ab558d66cf72df411dce32b9ec328dec..663f6e6dd288650b198ebf89c95f4257e862f715 100644 (file)
@@ -7,10 +7,13 @@
 #include <linux/smp.h>
 #include <linux/sched.h>
 #include <linux/sched/clock.h>
+#include <linux/semaphore.h>
 #include <linux/thread_info.h>
 #include <linux/init.h>
 #include <linux/uaccess.h>
+#include <linux/workqueue.h>
 #include <linux/delay.h>
+#include <linux/cpuhotplug.h>
 
 #include <asm/cpufeature.h>
 #include <asm/msr.h>
@@ -91,7 +94,7 @@ static bool ring3mwait_disabled __read_mostly;
 static int __init ring3mwait_disable(char *__unused)
 {
        ring3mwait_disabled = true;
-       return 0;
+       return 1;
 }
 __setup("ring3mwait=disable", ring3mwait_disable);
 
@@ -181,6 +184,38 @@ static bool bad_spectre_microcode(struct cpuinfo_x86 *c)
        return false;
 }
 
+int intel_cpu_collect_info(struct ucode_cpu_info *uci)
+{
+       unsigned int val[2];
+       unsigned int family, model;
+       struct cpu_signature csig = { 0 };
+       unsigned int eax, ebx, ecx, edx;
+
+       memset(uci, 0, sizeof(*uci));
+
+       eax = 0x00000001;
+       ecx = 0;
+       native_cpuid(&eax, &ebx, &ecx, &edx);
+       csig.sig = eax;
+
+       family = x86_family(eax);
+       model  = x86_model(eax);
+
+       if (model >= 5 || family > 6) {
+               /* get processor flags from MSR 0x17 */
+               native_rdmsr(MSR_IA32_PLATFORM_ID, val[0], val[1]);
+               csig.pf = 1 << ((val[1] >> 18) & 7);
+       }
+
+       csig.rev = intel_get_microcode_revision();
+
+       uci->cpu_sig = csig;
+       uci->valid = 1;
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(intel_cpu_collect_info);
+
 static void early_init_intel(struct cpuinfo_x86 *c)
 {
        u64 misc_enable;
@@ -717,13 +752,6 @@ static void init_intel(struct cpuinfo_x86 *c)
 
        init_intel_misc_features(c);
 
-       if (tsx_ctrl_state == TSX_CTRL_ENABLE)
-               tsx_enable();
-       else if (tsx_ctrl_state == TSX_CTRL_DISABLE)
-               tsx_disable();
-       else if (tsx_ctrl_state == TSX_CTRL_RTM_ALWAYS_ABORT)
-               tsx_clear_cpuid();
-
        split_lock_init();
        bus_lock_init();
 
@@ -1006,6 +1034,8 @@ static const struct {
 
 static struct ratelimit_state bld_ratelimit;
 
+static DEFINE_SEMAPHORE(buslock_sem);
+
 static inline bool match_option(const char *arg, int arglen, const char *opt)
 {
        int len = strlen(opt), ratelimit;
@@ -1116,18 +1146,52 @@ static void split_lock_init(void)
                split_lock_verify_msr(sld_state != sld_off);
 }
 
+static void __split_lock_reenable(struct work_struct *work)
+{
+       sld_update_msr(true);
+       up(&buslock_sem);
+}
+
+/*
+ * If a CPU goes offline with pending delayed work to re-enable split lock
+ * detection then the delayed work will be executed on some other CPU. That
+ * handles releasing the buslock_sem, but because it executes on a
+ * different CPU probably won't re-enable split lock detection. This is a
+ * problem on HT systems since the sibling CPU on the same core may then be
+ * left running with split lock detection disabled.
+ *
+ * Unconditionally re-enable detection here.
+ */
+static int splitlock_cpu_offline(unsigned int cpu)
+{
+       sld_update_msr(true);
+
+       return 0;
+}
+
+static DECLARE_DELAYED_WORK(split_lock_reenable, __split_lock_reenable);
+
 static void split_lock_warn(unsigned long ip)
 {
-       pr_warn_ratelimited("#AC: %s/%d took a split_lock trap at address: 0x%lx\n",
-                           current->comm, current->pid, ip);
+       int cpu;
 
-       /*
-        * Disable the split lock detection for this task so it can make
-        * progress and set TIF_SLD so the detection is re-enabled via
-        * switch_to_sld() when the task is scheduled out.
-        */
+       if (!current->reported_split_lock)
+               pr_warn_ratelimited("#AC: %s/%d took a split_lock trap at address: 0x%lx\n",
+                                   current->comm, current->pid, ip);
+       current->reported_split_lock = 1;
+
+       /* misery factor #1, sleep 10ms before trying to execute split lock */
+       if (msleep_interruptible(10) > 0)
+               return;
+       /* Misery factor #2, only allow one buslocked disabled core at a time */
+       if (down_interruptible(&buslock_sem) == -EINTR)
+               return;
+       cpu = get_cpu();
+       schedule_delayed_work_on(cpu, &split_lock_reenable, 2);
+
+       /* Disable split lock detection on this CPU to make progress */
        sld_update_msr(false);
-       set_tsk_thread_flag(current, TIF_SLD);
+       put_cpu();
 }
 
 bool handle_guest_split_lock(unsigned long ip)
@@ -1200,18 +1264,6 @@ void handle_bus_lock(struct pt_regs *regs)
        }
 }
 
-/*
- * This function is called only when switching between tasks with
- * different split-lock detection modes. It sets the MSR for the
- * mode of the new task. This is right most of the time, but since
- * the MSR is shared by hyperthreads on a physical core there can
- * be glitches when the two threads need different modes.
- */
-void switch_to_sld(unsigned long tifn)
-{
-       sld_update_msr(!(tifn & _TIF_SLD));
-}
-
 /*
  * Bits in the IA32_CORE_CAPABILITIES are not architectural, so they should
  * only be trusted if it is confirmed that a CPU model implements a
@@ -1237,6 +1289,7 @@ static const struct x86_cpu_id split_lock_cpu_ids[] __initconst = {
        X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X,    1),
        X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE,           1),
        X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L,         1),
+       X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE,          1),
        {}
 };
 
@@ -1281,10 +1334,14 @@ static void sld_state_show(void)
                pr_info("disabled\n");
                break;
        case sld_warn:
-               if (boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT))
+               if (boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT)) {
                        pr_info("#AC: crashing the kernel on kernel split_locks and warning on user-space split_locks\n");
-               else if (boot_cpu_has(X86_FEATURE_BUS_LOCK_DETECT))
+                       if (cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
+                                             "x86/splitlock", NULL, splitlock_cpu_offline) < 0)
+                               pr_warn("No splitlock CPU offline handler\n");
+               } else if (boot_cpu_has(X86_FEATURE_BUS_LOCK_DETECT)) {
                        pr_info("#DB: warning on user-space bus_locks\n");
+               }
                break;
        case sld_fatal:
                if (boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT)) {