Merge branch 'x86-tsx-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[sfrench/cifs-2.6.git] / arch / x86 / events / intel / core.c
index dadb8f7e5a0dd9999dcd90c4c54e39b9784e6f5c..35102ecdfc8d5debd884581bb1d40e2a9029a59f 100644 (file)
@@ -18,6 +18,7 @@
 #include <asm/hardirq.h>
 #include <asm/intel-family.h>
 #include <asm/apic.h>
+#include <asm/cpu_device_id.h>
 
 #include "../perf_event.h"
 
@@ -3248,16 +3249,27 @@ static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr)
        arr[0].msr = MSR_CORE_PERF_GLOBAL_CTRL;
        arr[0].host = x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_guest_mask;
        arr[0].guest = x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_host_mask;
-       /*
-        * If PMU counter has PEBS enabled it is not enough to disable counter
-        * on a guest entry since PEBS memory write can overshoot guest entry
-        * and corrupt guest memory. Disabling PEBS solves the problem.
-        */
-       arr[1].msr = MSR_IA32_PEBS_ENABLE;
-       arr[1].host = cpuc->pebs_enabled;
-       arr[1].guest = 0;
+       if (x86_pmu.flags & PMU_FL_PEBS_ALL)
+               arr[0].guest &= ~cpuc->pebs_enabled;
+       else
+               arr[0].guest &= ~(cpuc->pebs_enabled & PEBS_COUNTER_MASK);
+       *nr = 1;
+
+       if (x86_pmu.pebs && x86_pmu.pebs_no_isolation) {
+               /*
+                * If PMU counter has PEBS enabled it is not enough to
+                * disable counter on a guest entry since PEBS memory
+                * write can overshoot guest entry and corrupt guest
+                * memory. Disabling PEBS solves the problem.
+                *
+                * Don't do this if the CPU already enforces it.
+                */
+               arr[1].msr = MSR_IA32_PEBS_ENABLE;
+               arr[1].host = cpuc->pebs_enabled;
+               arr[1].guest = 0;
+               *nr = 2;
+       }
 
-       *nr = 2;
        return arr;
 }
 
@@ -3820,36 +3832,62 @@ static __init void intel_clovertown_quirk(void)
        x86_pmu.pebs_constraints = NULL;
 }
 
-static int intel_snb_pebs_broken(int cpu)
+static const struct x86_cpu_desc isolation_ucodes[] = {
+       INTEL_CPU_DESC(INTEL_FAM6_HASWELL_CORE,          3, 0x0000001f),
+       INTEL_CPU_DESC(INTEL_FAM6_HASWELL_ULT,           1, 0x0000001e),
+       INTEL_CPU_DESC(INTEL_FAM6_HASWELL_GT3E,          1, 0x00000015),
+       INTEL_CPU_DESC(INTEL_FAM6_HASWELL_X,             2, 0x00000037),
+       INTEL_CPU_DESC(INTEL_FAM6_HASWELL_X,             4, 0x0000000a),
+       INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_CORE,        4, 0x00000023),
+       INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_GT3E,        1, 0x00000014),
+       INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_XEON_D,      2, 0x00000010),
+       INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_XEON_D,      3, 0x07000009),
+       INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_XEON_D,      4, 0x0f000009),
+       INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_XEON_D,      5, 0x0e000002),
+       INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_X,           2, 0x0b000014),
+       INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X,             3, 0x00000021),
+       INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X,             4, 0x00000000),
+       INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_MOBILE,        3, 0x0000007c),
+       INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_DESKTOP,       3, 0x0000007c),
+       INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_DESKTOP,      9, 0x0000004e),
+       INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_MOBILE,       9, 0x0000004e),
+       INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_MOBILE,      10, 0x0000004e),
+       INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_MOBILE,      11, 0x0000004e),
+       INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_MOBILE,      12, 0x0000004e),
+       INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_DESKTOP,     10, 0x0000004e),
+       INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_DESKTOP,     11, 0x0000004e),
+       INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_DESKTOP,     12, 0x0000004e),
+       INTEL_CPU_DESC(INTEL_FAM6_KABYLAKE_DESKTOP,     13, 0x0000004e),
+       {}
+};
+
+static void intel_check_pebs_isolation(void)
 {
-       u32 rev = UINT_MAX; /* default to broken for unknown models */
+       x86_pmu.pebs_no_isolation = !x86_cpu_has_min_microcode_rev(isolation_ucodes);
+}
 
-       switch (cpu_data(cpu).x86_model) {
-       case INTEL_FAM6_SANDYBRIDGE:
-               rev = 0x28;
-               break;
+static __init void intel_pebs_isolation_quirk(void)
+{
+       WARN_ON_ONCE(x86_pmu.check_microcode);
+       x86_pmu.check_microcode = intel_check_pebs_isolation;
+       intel_check_pebs_isolation();
+}
 
-       case INTEL_FAM6_SANDYBRIDGE_X:
-               switch (cpu_data(cpu).x86_stepping) {
-               case 6: rev = 0x618; break;
-               case 7: rev = 0x70c; break;
-               }
-       }
+static const struct x86_cpu_desc pebs_ucodes[] = {
+       INTEL_CPU_DESC(INTEL_FAM6_SANDYBRIDGE,          7, 0x00000028),
+       INTEL_CPU_DESC(INTEL_FAM6_SANDYBRIDGE_X,        6, 0x00000618),
+       INTEL_CPU_DESC(INTEL_FAM6_SANDYBRIDGE_X,        7, 0x0000070c),
+       {}
+};
 
-       return (cpu_data(cpu).microcode < rev);
+static bool intel_snb_pebs_broken(void)
+{
+       return !x86_cpu_has_min_microcode_rev(pebs_ucodes);
 }
 
 static void intel_snb_check_microcode(void)
 {
-       int pebs_broken = 0;
-       int cpu;
-
-       for_each_online_cpu(cpu) {
-               if ((pebs_broken = intel_snb_pebs_broken(cpu)))
-                       break;
-       }
-
-       if (pebs_broken == x86_pmu.pebs_broken)
+       if (intel_snb_pebs_broken() == x86_pmu.pebs_broken)
                return;
 
        /*
@@ -3966,23 +4004,22 @@ static __init void intel_nehalem_quirk(void)
        }
 }
 
-static bool intel_glp_counter_freezing_broken(int cpu)
-{
-       u32 rev = UINT_MAX; /* default to broken for unknown stepping */
-
-       switch (cpu_data(cpu).x86_stepping) {
-       case 1:
-               rev = 0x28;
-               break;
-       case 8:
-               rev = 0x6;
-               break;
-       }
+static const struct x86_cpu_desc counter_freezing_ucodes[] = {
+       INTEL_CPU_DESC(INTEL_FAM6_ATOM_GOLDMONT,         2, 0x0000000e),
+       INTEL_CPU_DESC(INTEL_FAM6_ATOM_GOLDMONT,         9, 0x0000002e),
+       INTEL_CPU_DESC(INTEL_FAM6_ATOM_GOLDMONT,        10, 0x00000008),
+       INTEL_CPU_DESC(INTEL_FAM6_ATOM_GOLDMONT_X,       1, 0x00000028),
+       INTEL_CPU_DESC(INTEL_FAM6_ATOM_GOLDMONT_PLUS,    1, 0x00000028),
+       INTEL_CPU_DESC(INTEL_FAM6_ATOM_GOLDMONT_PLUS,    8, 0x00000006),
+       {}
+};
 
-       return (cpu_data(cpu).microcode < rev);
+static bool intel_counter_freezing_broken(void)
+{
+       return !x86_cpu_has_min_microcode_rev(counter_freezing_ucodes);
 }
 
-static __init void intel_glp_counter_freezing_quirk(void)
+static __init void intel_counter_freezing_quirk(void)
 {
        /* Check if it's already disabled */
        if (disable_counter_freezing)
@@ -3992,7 +4029,7 @@ static __init void intel_glp_counter_freezing_quirk(void)
         * If the system starts with the wrong ucode, leave the
         * counter-freezing feature permanently disabled.
         */
-       if (intel_glp_counter_freezing_broken(raw_smp_processor_id())) {
+       if (intel_counter_freezing_broken()) {
                pr_info("PMU counter freezing disabled due to CPU errata,"
                        "please upgrade microcode\n");
                x86_pmu.counter_freezing = false;
@@ -4258,6 +4295,8 @@ __init int intel_pmu_init(void)
 
        case INTEL_FAM6_CORE2_MEROM:
                x86_add_quirk(intel_clovertown_quirk);
+               /* fall through */
+
        case INTEL_FAM6_CORE2_MEROM_L:
        case INTEL_FAM6_CORE2_PENRYN:
        case INTEL_FAM6_CORE2_DUNNINGTON:
@@ -4346,6 +4385,7 @@ __init int intel_pmu_init(void)
 
        case INTEL_FAM6_ATOM_GOLDMONT:
        case INTEL_FAM6_ATOM_GOLDMONT_X:
+               x86_add_quirk(intel_counter_freezing_quirk);
                memcpy(hw_cache_event_ids, glm_hw_cache_event_ids,
                       sizeof(hw_cache_event_ids));
                memcpy(hw_cache_extra_regs, glm_hw_cache_extra_regs,
@@ -4372,7 +4412,7 @@ __init int intel_pmu_init(void)
                break;
 
        case INTEL_FAM6_ATOM_GOLDMONT_PLUS:
-               x86_add_quirk(intel_glp_counter_freezing_quirk);
+               x86_add_quirk(intel_counter_freezing_quirk);
                memcpy(hw_cache_event_ids, glp_hw_cache_event_ids,
                       sizeof(hw_cache_event_ids));
                memcpy(hw_cache_extra_regs, glp_hw_cache_extra_regs,
@@ -4515,6 +4555,7 @@ __init int intel_pmu_init(void)
        case INTEL_FAM6_HASWELL_ULT:
        case INTEL_FAM6_HASWELL_GT3E:
                x86_add_quirk(intel_ht_bug);
+               x86_add_quirk(intel_pebs_isolation_quirk);
                x86_pmu.late_ack = true;
                memcpy(hw_cache_event_ids, hsw_hw_cache_event_ids, sizeof(hw_cache_event_ids));
                memcpy(hw_cache_extra_regs, hsw_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
@@ -4546,6 +4587,7 @@ __init int intel_pmu_init(void)
        case INTEL_FAM6_BROADWELL_XEON_D:
        case INTEL_FAM6_BROADWELL_GT3E:
        case INTEL_FAM6_BROADWELL_X:
+               x86_add_quirk(intel_pebs_isolation_quirk);
                x86_pmu.late_ack = true;
                memcpy(hw_cache_event_ids, hsw_hw_cache_event_ids, sizeof(hw_cache_event_ids));
                memcpy(hw_cache_extra_regs, hsw_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
@@ -4608,6 +4650,7 @@ __init int intel_pmu_init(void)
        case INTEL_FAM6_SKYLAKE_X:
        case INTEL_FAM6_KABYLAKE_MOBILE:
        case INTEL_FAM6_KABYLAKE_DESKTOP:
+               x86_add_quirk(intel_pebs_isolation_quirk);
                x86_pmu.late_ack = true;
                memcpy(hw_cache_event_ids, skl_hw_cache_event_ids, sizeof(hw_cache_event_ids));
                memcpy(hw_cache_extra_regs, skl_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));