Merge tag 'x86-apic-2020-12-14' of git://git.kernel.org/pub/scm/linux/kernel/git...
[sfrench/cifs-2.6.git] / arch / x86 / kernel / smpboot.c
index de776b2e60467b0f7d8bcbfbe0912cb6213d6554..8ca66af96a547ddc6606e3783ec33950f7716e93 100644 (file)
 #include <asm/hw_irq.h>
 #include <asm/stackprotector.h>
 
+#ifdef CONFIG_ACPI_CPPC_LIB
+#include <acpi/cppc_acpi.h>
+#endif
+
 /* representing HT siblings of each logical CPU */
 DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_sibling_map);
 EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
@@ -148,7 +152,7 @@ static inline void smpboot_restore_warm_reset_vector(void)
        *((volatile u32 *)phys_to_virt(TRAMPOLINE_PHYS_LOW)) = 0;
 }
 
-static void init_freq_invariance(bool secondary);
+static void init_freq_invariance(bool secondary, bool cppc_ready);
 
 /*
  * Report back to the Boot Processor during boot time or to the caller processor
@@ -186,7 +190,7 @@ static void smp_callin(void)
         */
        set_cpu_sibling_map(raw_smp_processor_id());
 
-       init_freq_invariance(true);
+       init_freq_invariance(true, false);
 
        /*
         * Get our bogomips.
@@ -229,6 +233,7 @@ static void notrace start_secondary(void *unused)
 #endif
        cpu_init_exception_handling();
        cpu_init();
+       rcu_cpu_starting(raw_smp_processor_id());
        x86_cpuinit.early_percpu_clock_init();
        preempt_disable();
        smp_callin();
@@ -747,13 +752,14 @@ static void __init smp_quirk_init_udelay(void)
 int
 wakeup_secondary_cpu_via_nmi(int apicid, unsigned long start_eip)
 {
+       u32 dm = apic->dest_mode_logical ? APIC_DEST_LOGICAL : APIC_DEST_PHYSICAL;
        unsigned long send_status, accept_status = 0;
        int maxlvt;
 
        /* Target chip */
        /* Boot on the stack */
        /* Kick the second */
-       apic_icr_write(APIC_DM_NMI | apic->dest_logical, apicid);
+       apic_icr_write(APIC_DM_NMI | dm, apicid);
 
        pr_debug("Waiting for send to finish...\n");
        send_status = safe_apic_wait_icr_idle();
@@ -980,10 +986,7 @@ wakeup_cpu_via_init_nmi(int cpu, unsigned long start_ip, int apicid,
        if (!boot_error) {
                enable_start_cpu0 = 1;
                *cpu0_nmi_registered = 1;
-               if (apic->dest_logical == APIC_DEST_LOGICAL)
-                       id = cpu0_logical_apicid;
-               else
-                       id = apicid;
+               id = apic->dest_mode_logical ? cpu0_logical_apicid : apicid;
                boot_error = wakeup_secondary_cpu_via_nmi(id, start_ip);
        }
 
@@ -1340,7 +1343,7 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
        set_sched_topology(x86_topology);
 
        set_cpu_sibling_map(0);
-       init_freq_invariance(false);
+       init_freq_invariance(false, false);
        smp_sanity_check();
 
        switch (apic_intr_mode) {
@@ -2027,6 +2030,48 @@ out:
        return true;
 }
 
+#ifdef CONFIG_ACPI_CPPC_LIB
+static bool amd_set_max_freq_ratio(void)
+{
+       struct cppc_perf_caps perf_caps;
+       u64 highest_perf, nominal_perf;
+       u64 perf_ratio;
+       int rc;
+
+       rc = cppc_get_perf_caps(0, &perf_caps);
+       if (rc) {
+               pr_debug("Could not retrieve perf counters (%d)\n", rc);
+               return false;
+       }
+
+       highest_perf = perf_caps.highest_perf;
+       nominal_perf = perf_caps.nominal_perf;
+
+       if (!highest_perf || !nominal_perf) {
+               pr_debug("Could not retrieve highest or nominal performance\n");
+               return false;
+       }
+
+       perf_ratio = div_u64(highest_perf * SCHED_CAPACITY_SCALE, nominal_perf);
+       /* midpoint between max_boost and max_P */
+       perf_ratio = (perf_ratio + SCHED_CAPACITY_SCALE) >> 1;
+       if (!perf_ratio) {
+               pr_debug("Non-zero highest/nominal perf values led to a 0 ratio\n");
+               return false;
+       }
+
+       arch_turbo_freq_ratio = perf_ratio;
+       arch_set_max_freq_ratio(false);
+
+       return true;
+}
+#else
+static bool amd_set_max_freq_ratio(void)
+{
+       return false;
+}
+#endif
+
 static void init_counter_refs(void)
 {
        u64 aperf, mperf;
@@ -2038,7 +2083,7 @@ static void init_counter_refs(void)
        this_cpu_write(arch_prev_mperf, mperf);
 }
 
-static void init_freq_invariance(bool secondary)
+static void init_freq_invariance(bool secondary, bool cppc_ready)
 {
        bool ret = false;
 
@@ -2054,15 +2099,38 @@ static void init_freq_invariance(bool secondary)
 
        if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
                ret = intel_set_max_freq_ratio();
+       else if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
+               if (!cppc_ready) {
+                       return;
+               }
+               ret = amd_set_max_freq_ratio();
+       }
 
        if (ret) {
                init_counter_refs();
                static_branch_enable(&arch_scale_freq_key);
+               pr_info("Estimated ratio of average max frequency by base frequency (times 1024): %llu\n", arch_max_freq_ratio);
        } else {
                pr_debug("Couldn't determine max cpu frequency, necessary for scale-invariant accounting.\n");
        }
 }
 
+#ifdef CONFIG_ACPI_CPPC_LIB
+static DEFINE_MUTEX(freq_invariance_lock);
+
+void init_freq_invariance_cppc(void)
+{
+       static bool secondary;
+
+       mutex_lock(&freq_invariance_lock);
+
+       init_freq_invariance(secondary, true);
+       secondary = true;
+
+       mutex_unlock(&freq_invariance_lock);
+}
+#endif
+
 static void disable_freq_invariance_workfn(struct work_struct *work)
 {
        static_branch_disable(&arch_scale_freq_key);
@@ -2112,7 +2180,7 @@ error:
        schedule_work(&disable_freq_invariance_work);
 }
 #else
-static inline void init_freq_invariance(bool secondary)
+static inline void init_freq_invariance(bool secondary, bool cppc_ready)
 {
 }
 #endif /* CONFIG_X86_64 */