perf, powerpc: Implement group scheduling transactional APIs
authorLin Ming <ming.m.lin@intel.com>
Sat, 8 May 2010 10:28:41 +0000 (20:28 +1000)
committerIngo Molnar <mingo@elte.hu>
Tue, 11 May 2010 15:08:24 +0000 (17:08 +0200)
[paulus@samba.org: Set cpuhw->event[i]->hw.config in
power_pmu_commit_txn.]

Signed-off-by: Lin Ming <ming.m.lin@intel.com>
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <20100508102841.GA10650@brick.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
arch/powerpc/kernel/perf_event.c

index 08460a2e9f4126228367bfaefaac76baeb5d09a7..43b83c35cf54b0dfb07bc5abef8ee558d5f3cd9c 100644 (file)
@@ -35,6 +35,9 @@ struct cpu_hw_events {
        u64 alternatives[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES];
        unsigned long amasks[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES];
        unsigned long avalues[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES];
+
+       unsigned int group_flag;
+       int n_txn_start;
 };
 DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
 
@@ -718,66 +721,6 @@ static int collect_events(struct perf_event *group, int max_count,
        return n;
 }
 
-static void event_sched_in(struct perf_event *event)
-{
-       event->state = PERF_EVENT_STATE_ACTIVE;
-       event->oncpu = smp_processor_id();
-       event->tstamp_running += event->ctx->time - event->tstamp_stopped;
-       if (is_software_event(event))
-               event->pmu->enable(event);
-}
-
-/*
- * Called to enable a whole group of events.
- * Returns 1 if the group was enabled, or -EAGAIN if it could not be.
- * Assumes the caller has disabled interrupts and has
- * frozen the PMU with hw_perf_save_disable.
- */
-int hw_perf_group_sched_in(struct perf_event *group_leader,
-              struct perf_cpu_context *cpuctx,
-              struct perf_event_context *ctx)
-{
-       struct cpu_hw_events *cpuhw;
-       long i, n, n0;
-       struct perf_event *sub;
-
-       if (!ppmu)
-               return 0;
-       cpuhw = &__get_cpu_var(cpu_hw_events);
-       n0 = cpuhw->n_events;
-       n = collect_events(group_leader, ppmu->n_counter - n0,
-                          &cpuhw->event[n0], &cpuhw->events[n0],
-                          &cpuhw->flags[n0]);
-       if (n < 0)
-               return -EAGAIN;
-       if (check_excludes(cpuhw->event, cpuhw->flags, n0, n))
-               return -EAGAIN;
-       i = power_check_constraints(cpuhw, cpuhw->events, cpuhw->flags, n + n0);
-       if (i < 0)
-               return -EAGAIN;
-       cpuhw->n_events = n0 + n;
-       cpuhw->n_added += n;
-
-       /*
-        * OK, this group can go on; update event states etc.,
-        * and enable any software events
-        */
-       for (i = n0; i < n0 + n; ++i)
-               cpuhw->event[i]->hw.config = cpuhw->events[i];
-       cpuctx->active_oncpu += n;
-       n = 1;
-       event_sched_in(group_leader);
-       list_for_each_entry(sub, &group_leader->sibling_list, group_entry) {
-               if (sub->state != PERF_EVENT_STATE_OFF) {
-                       event_sched_in(sub);
-                       ++n;
-               }
-       }
-       ctx->nr_active += n;
-
-       return 1;
-}
-
 /*
  * Add a event to the PMU.
  * If all events are not already frozen, then we disable and
@@ -805,12 +748,22 @@ static int power_pmu_enable(struct perf_event *event)
        cpuhw->event[n0] = event;
        cpuhw->events[n0] = event->hw.config;
        cpuhw->flags[n0] = event->hw.event_base;
+
+       /*
+        * If group events scheduling transaction was started,
+        * skip the schedulability test here, it will be peformed
+        * at commit time(->commit_txn) as a whole
+        */
+       if (cpuhw->group_flag & PERF_EVENT_TXN_STARTED)
+               goto nocheck;
+
        if (check_excludes(cpuhw->event, cpuhw->flags, n0, 1))
                goto out;
        if (power_check_constraints(cpuhw, cpuhw->events, cpuhw->flags, n0 + 1))
                goto out;
-
        event->hw.config = cpuhw->events[n0];
+
+nocheck:
        ++cpuhw->n_events;
        ++cpuhw->n_added;
 
@@ -896,11 +849,65 @@ static void power_pmu_unthrottle(struct perf_event *event)
        local_irq_restore(flags);
 }
 
+/*
+ * Start group events scheduling transaction
+ * Set the flag to make pmu::enable() not perform the
+ * schedulability test, it will be performed at commit time
+ */
+void power_pmu_start_txn(const struct pmu *pmu)
+{
+       struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
+
+       cpuhw->group_flag |= PERF_EVENT_TXN_STARTED;
+       cpuhw->n_txn_start = cpuhw->n_events;
+}
+
+/*
+ * Stop group events scheduling transaction
+ * Clear the flag and pmu::enable() will perform the
+ * schedulability test.
+ */
+void power_pmu_cancel_txn(const struct pmu *pmu)
+{
+       struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
+
+       cpuhw->group_flag &= ~PERF_EVENT_TXN_STARTED;
+}
+
+/*
+ * Commit group events scheduling transaction
+ * Perform the group schedulability test as a whole
+ * Return 0 if success
+ */
+int power_pmu_commit_txn(const struct pmu *pmu)
+{
+       struct cpu_hw_events *cpuhw;
+       long i, n;
+
+       if (!ppmu)
+               return -EAGAIN;
+       cpuhw = &__get_cpu_var(cpu_hw_events);
+       n = cpuhw->n_events;
+       if (check_excludes(cpuhw->event, cpuhw->flags, 0, n))
+               return -EAGAIN;
+       i = power_check_constraints(cpuhw, cpuhw->events, cpuhw->flags, n);
+       if (i < 0)
+               return -EAGAIN;
+
+       for (i = cpuhw->n_txn_start; i < n; ++i)
+               cpuhw->event[i]->hw.config = cpuhw->events[i];
+
+       return 0;
+}
+
 struct pmu power_pmu = {
        .enable         = power_pmu_enable,
        .disable        = power_pmu_disable,
        .read           = power_pmu_read,
        .unthrottle     = power_pmu_unthrottle,
+       .start_txn      = power_pmu_start_txn,
+       .cancel_txn     = power_pmu_cancel_txn,
+       .commit_txn     = power_pmu_commit_txn,
 };
 
 /*