perf/x86/intel: Support PEBS on fixed counters
authorKan Liang <kan.liang@linux.intel.com>
Fri, 9 Mar 2018 02:15:40 +0000 (18:15 -0800)
committerIngo Molnar <mingo@kernel.org>
Wed, 25 Jul 2018 09:50:50 +0000 (11:50 +0200)
The Extended PEBS feature supports PEBS on fixed-function performance
counters as well as all four general purpose counters.

It has to change the order of PEBS and fixed counter enabling to make
sure PEBS is enabled for the fixed counters.

The change of the order doesn't impact the behavior of current code on
other platforms which don't support extended PEBS.
Because there is no dependency among those enable/disable functions.

Don't enable IRQ generation (0x8) for MSR_ARCH_PERFMON_FIXED_CTR_CTRL.
The PEBS ucode will handle the interrupt generation.

Based-on-code-from: Andi Kleen <ak@linux.intel.com>
Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Vince Weaver <vincent.weaver@maine.edu>
Cc: acme@kernel.org
Link: http://lkml.kernel.org/r/20180309021542.11374-2-kan.liang@linux.intel.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
arch/x86/events/intel/core.c

index 86f0c15dcc2db2fbc63a3ee6d81e349cd5351e48..d5a3124605f5d14fbfb1debbe6a35e48929160c4 100644 (file)
@@ -2041,15 +2041,15 @@ static void intel_pmu_disable_event(struct perf_event *event)
        cpuc->intel_ctrl_host_mask &= ~(1ull << hwc->idx);
        cpuc->intel_cp_status &= ~(1ull << hwc->idx);
 
+       if (unlikely(event->attr.precise_ip))
+               intel_pmu_pebs_disable(event);
+
        if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
                intel_pmu_disable_fixed(hwc);
                return;
        }
 
        x86_pmu_disable_event(event);
-
-       if (unlikely(event->attr.precise_ip))
-               intel_pmu_pebs_disable(event);
 }
 
 static void intel_pmu_del_event(struct perf_event *event)
@@ -2068,17 +2068,19 @@ static void intel_pmu_read_event(struct perf_event *event)
                x86_perf_event_update(event);
 }
 
-static void intel_pmu_enable_fixed(struct hw_perf_event *hwc)
+static void intel_pmu_enable_fixed(struct perf_event *event)
 {
+       struct hw_perf_event *hwc = &event->hw;
        int idx = hwc->idx - INTEL_PMC_IDX_FIXED;
-       u64 ctrl_val, bits, mask;
+       u64 ctrl_val, mask, bits = 0;
 
        /*
-        * Enable IRQ generation (0x8),
+        * Enable IRQ generation (0x8), if not PEBS,
         * and enable ring-3 counting (0x2) and ring-0 counting (0x1)
         * if requested:
         */
-       bits = 0x8ULL;
+       if (!event->attr.precise_ip)
+               bits |= 0x8;
        if (hwc->config & ARCH_PERFMON_EVENTSEL_USR)
                bits |= 0x2;
        if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
@@ -2120,14 +2122,14 @@ static void intel_pmu_enable_event(struct perf_event *event)
        if (unlikely(event_is_checkpointed(event)))
                cpuc->intel_cp_status |= (1ull << hwc->idx);
 
+       if (unlikely(event->attr.precise_ip))
+               intel_pmu_pebs_enable(event);
+
        if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
-               intel_pmu_enable_fixed(hwc);
+               intel_pmu_enable_fixed(event);
                return;
        }
 
-       if (unlikely(event->attr.precise_ip))
-               intel_pmu_pebs_enable(event);
-
        __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
 }