Merge branch 'x86-mm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[sfrench/cifs-2.6.git] / arch / x86 / kernel / cpu / perf_event_amd.c
index b87e0b6970cbc363d2a6c4b233796416176ba09e..611df11ba15e7f9145e49f2fa2c7e61b1371d77c 100644 (file)
@@ -2,7 +2,7 @@
 
 static DEFINE_RAW_SPINLOCK(amd_nb_lock);
 
-static __initconst u64 amd_hw_cache_event_ids
+static __initconst const u64 amd_hw_cache_event_ids
                                [PERF_COUNT_HW_CACHE_MAX]
                                [PERF_COUNT_HW_CACHE_OP_MAX]
                                [PERF_COUNT_HW_CACHE_RESULT_MAX] =
@@ -111,22 +111,19 @@ static u64 amd_pmu_event_map(int hw_event)
        return amd_perfmon_event_map[hw_event];
 }
 
-static u64 amd_pmu_raw_event(u64 hw_event)
+static int amd_pmu_hw_config(struct perf_event *event)
 {
-#define K7_EVNTSEL_EVENT_MASK  0xF000000FFULL
-#define K7_EVNTSEL_UNIT_MASK   0x00000FF00ULL
-#define K7_EVNTSEL_EDGE_MASK   0x000040000ULL
-#define K7_EVNTSEL_INV_MASK    0x000800000ULL
-#define K7_EVNTSEL_REG_MASK    0x0FF000000ULL
-
-#define K7_EVNTSEL_MASK                        \
-       (K7_EVNTSEL_EVENT_MASK |        \
-        K7_EVNTSEL_UNIT_MASK  |        \
-        K7_EVNTSEL_EDGE_MASK  |        \
-        K7_EVNTSEL_INV_MASK   |        \
-        K7_EVNTSEL_REG_MASK)
-
-       return hw_event & K7_EVNTSEL_MASK;
+       int ret = x86_pmu_hw_config(event);
+
+       if (ret)
+               return ret;
+
+       if (event->attr.type != PERF_TYPE_RAW)
+               return 0;
+
+       event->hw.config |= event->attr.config & AMD64_RAW_EVENT_MASK;
+
+       return 0;
 }
 
 /*
@@ -137,6 +134,13 @@ static inline int amd_is_nb_event(struct hw_perf_event *hwc)
        return (hwc->config & 0xe0) == 0xe0;
 }
 
+static inline int amd_has_nb(struct cpu_hw_events *cpuc)
+{
+       struct amd_nb *nb = cpuc->amd_nb;
+
+       return nb && nb->nb_id != -1;
+}
+
 static void amd_put_event_constraints(struct cpu_hw_events *cpuc,
                                      struct perf_event *event)
 {
@@ -147,7 +151,7 @@ static void amd_put_event_constraints(struct cpu_hw_events *cpuc,
        /*
         * only care about NB events
         */
-       if (!(nb && amd_is_nb_event(hwc)))
+       if (!(amd_has_nb(cpuc) && amd_is_nb_event(hwc)))
                return;
 
        /*
@@ -158,7 +162,7 @@ static void amd_put_event_constraints(struct cpu_hw_events *cpuc,
         * be removed on one CPU at a time AND PMU is disabled
         * when we come here
         */
-       for (i = 0; i < x86_pmu.num_events; i++) {
+       for (i = 0; i < x86_pmu.num_counters; i++) {
                if (nb->owners[i] == event) {
                        cmpxchg(nb->owners+i, event, NULL);
                        break;
@@ -208,13 +212,13 @@ amd_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
        struct hw_perf_event *hwc = &event->hw;
        struct amd_nb *nb = cpuc->amd_nb;
        struct perf_event *old = NULL;
-       int max = x86_pmu.num_events;
+       int max = x86_pmu.num_counters;
        int i, j, k = -1;
 
        /*
         * if not NB event or no NB, then no constraints
         */
-       if (!(nb && amd_is_nb_event(hwc)))
+       if (!(amd_has_nb(cpuc) && amd_is_nb_event(hwc)))
                return &unconstrained;
 
        /*
@@ -286,58 +290,62 @@ static struct amd_nb *amd_alloc_nb(int cpu, int nb_id)
        /*
         * initialize all possible NB constraints
         */
-       for (i = 0; i < x86_pmu.num_events; i++) {
+       for (i = 0; i < x86_pmu.num_counters; i++) {
                __set_bit(i, nb->event_constraints[i].idxmsk);
                nb->event_constraints[i].weight = 1;
        }
        return nb;
 }
 
-static void amd_pmu_cpu_online(int cpu)
+static int amd_pmu_cpu_prepare(int cpu)
 {
-       struct cpu_hw_events *cpu1, *cpu2;
-       struct amd_nb *nb = NULL;
+       struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
+
+       WARN_ON_ONCE(cpuc->amd_nb);
+
+       if (boot_cpu_data.x86_max_cores < 2)
+               return NOTIFY_OK;
+
+       cpuc->amd_nb = amd_alloc_nb(cpu, -1);
+       if (!cpuc->amd_nb)
+               return NOTIFY_BAD;
+
+       return NOTIFY_OK;
+}
+
+static void amd_pmu_cpu_starting(int cpu)
+{
+       struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
+       struct amd_nb *nb;
        int i, nb_id;
 
        if (boot_cpu_data.x86_max_cores < 2)
                return;
 
-       /*
-        * function may be called too early in the
-        * boot process, in which case nb_id is bogus
-        */
        nb_id = amd_get_nb_id(cpu);
-       if (nb_id == BAD_APICID)
-               return;
-
-       cpu1 = &per_cpu(cpu_hw_events, cpu);
-       cpu1->amd_nb = NULL;
+       WARN_ON_ONCE(nb_id == BAD_APICID);
 
        raw_spin_lock(&amd_nb_lock);
 
        for_each_online_cpu(i) {
-               cpu2 = &per_cpu(cpu_hw_events, i);
-               nb = cpu2->amd_nb;
-               if (!nb)
+               nb = per_cpu(cpu_hw_events, i).amd_nb;
+               if (WARN_ON_ONCE(!nb))
                        continue;
-               if (nb->nb_id == nb_id)
-                       goto found;
-       }
 
-       nb = amd_alloc_nb(cpu, nb_id);
-       if (!nb) {
-               pr_err("perf_events: failed NB allocation for CPU%d\n", cpu);
-               raw_spin_unlock(&amd_nb_lock);
-               return;
+               if (nb->nb_id == nb_id) {
+                       kfree(cpuc->amd_nb);
+                       cpuc->amd_nb = nb;
+                       break;
+               }
        }
-found:
-       nb->refcnt++;
-       cpu1->amd_nb = nb;
+
+       cpuc->amd_nb->nb_id = nb_id;
+       cpuc->amd_nb->refcnt++;
 
        raw_spin_unlock(&amd_nb_lock);
 }
 
-static void amd_pmu_cpu_offline(int cpu)
+static void amd_pmu_cpu_dead(int cpu)
 {
        struct cpu_hw_events *cpuhw;
 
@@ -349,8 +357,10 @@ static void amd_pmu_cpu_offline(int cpu)
        raw_spin_lock(&amd_nb_lock);
 
        if (cpuhw->amd_nb) {
-               if (--cpuhw->amd_nb->refcnt == 0)
-                       kfree(cpuhw->amd_nb);
+               struct amd_nb *nb = cpuhw->amd_nb;
+
+               if (nb->nb_id == -1 || --nb->refcnt == 0)
+                       kfree(nb);
 
                cpuhw->amd_nb = NULL;
        }
@@ -358,29 +368,31 @@ static void amd_pmu_cpu_offline(int cpu)
        raw_spin_unlock(&amd_nb_lock);
 }
 
-static __initconst struct x86_pmu amd_pmu = {
+static __initconst const struct x86_pmu amd_pmu = {
        .name                   = "AMD",
        .handle_irq             = x86_pmu_handle_irq,
        .disable_all            = x86_pmu_disable_all,
        .enable_all             = x86_pmu_enable_all,
        .enable                 = x86_pmu_enable_event,
        .disable                = x86_pmu_disable_event,
+       .hw_config              = amd_pmu_hw_config,
+       .schedule_events        = x86_schedule_events,
        .eventsel               = MSR_K7_EVNTSEL0,
        .perfctr                = MSR_K7_PERFCTR0,
        .event_map              = amd_pmu_event_map,
-       .raw_event              = amd_pmu_raw_event,
        .max_events             = ARRAY_SIZE(amd_perfmon_event_map),
-       .num_events             = 4,
-       .event_bits             = 48,
-       .event_mask             = (1ULL << 48) - 1,
+       .num_counters           = 4,
+       .cntval_bits            = 48,
+       .cntval_mask            = (1ULL << 48) - 1,
        .apic                   = 1,
        /* use highest bit to detect overflow */
        .max_period             = (1ULL << 47) - 1,
        .get_event_constraints  = amd_get_event_constraints,
        .put_event_constraints  = amd_put_event_constraints,
 
-       .cpu_prepare            = amd_pmu_cpu_online,
-       .cpu_dead               = amd_pmu_cpu_offline,
+       .cpu_prepare            = amd_pmu_cpu_prepare,
+       .cpu_starting           = amd_pmu_cpu_starting,
+       .cpu_dead               = amd_pmu_cpu_dead,
 };
 
 static __init int amd_pmu_init(void)