Merge branch 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[sfrench/cifs-2.6.git] / kernel / hw_breakpoint.c
index 71ed3ce29e12e7d2dbe25d9e4b92e8d46a71fecd..d71a987fd2bf2ba5e698f9b69fccb59db4a1bb30 100644 (file)
@@ -41,6 +41,7 @@
 #include <linux/sched.h>
 #include <linux/init.h>
 #include <linux/slab.h>
+#include <linux/list.h>
 #include <linux/cpu.h>
 #include <linux/smp.h>
 
@@ -62,6 +63,9 @@ static DEFINE_PER_CPU(unsigned int, nr_bp_flexible[TYPE_MAX]);
 
 static int nr_slots[TYPE_MAX];
 
+/* Keep track of the breakpoints attached to tasks */
+static LIST_HEAD(bp_task_head);
+
 static int constraints_initialized;
 
 /* Gather the number of total pinned and un-pinned bp in a cpuset */
@@ -103,33 +107,21 @@ static unsigned int max_task_bp_pinned(int cpu, enum bp_type_idx type)
        return 0;
 }
 
-static int task_bp_pinned(struct task_struct *tsk, enum bp_type_idx type)
+/*
+ * Count the number of breakpoints of the same type and same task.
+ * The given event must be not on the list.
+ */
+static int task_bp_pinned(struct perf_event *bp, enum bp_type_idx type)
 {
-       struct perf_event_context *ctx = tsk->perf_event_ctxp;
-       struct list_head *list;
-       struct perf_event *bp;
-       unsigned long flags;
+       struct perf_event_context *ctx = bp->ctx;
+       struct perf_event *iter;
        int count = 0;
 
-       if (WARN_ONCE(!ctx, "No perf context for this task"))
-               return 0;
-
-       list = &ctx->event_list;
-
-       raw_spin_lock_irqsave(&ctx->lock, flags);
-
-       /*
-        * The current breakpoint counter is not included in the list
-        * at the open() callback time
-        */
-       list_for_each_entry(bp, list, event_entry) {
-               if (bp->attr.type == PERF_TYPE_BREAKPOINT)
-                       if (find_slot_idx(bp) == type)
-                               count += hw_breakpoint_weight(bp);
+       list_for_each_entry(iter, &bp_task_head, hw.bp_list) {
+               if (iter->ctx == ctx && find_slot_idx(iter) == type)
+                       count += hw_breakpoint_weight(iter);
        }
 
-       raw_spin_unlock_irqrestore(&ctx->lock, flags);
-
        return count;
 }
 
@@ -149,7 +141,7 @@ fetch_bp_busy_slots(struct bp_busy_slots *slots, struct perf_event *bp,
                if (!tsk)
                        slots->pinned += max_task_bp_pinned(cpu, type);
                else
-                       slots->pinned += task_bp_pinned(tsk, type);
+                       slots->pinned += task_bp_pinned(bp, type);
                slots->flexible = per_cpu(nr_bp_flexible[type], cpu);
 
                return;
@@ -162,7 +154,7 @@ fetch_bp_busy_slots(struct bp_busy_slots *slots, struct perf_event *bp,
                if (!tsk)
                        nr += max_task_bp_pinned(cpu, type);
                else
-                       nr += task_bp_pinned(tsk, type);
+                       nr += task_bp_pinned(bp, type);
 
                if (nr > slots->pinned)
                        slots->pinned = nr;
@@ -188,7 +180,7 @@ fetch_this_slot(struct bp_busy_slots *slots, int weight)
 /*
  * Add a pinned breakpoint for the given task in our constraint table
  */
-static void toggle_bp_task_slot(struct task_struct *tsk, int cpu, bool enable,
+static void toggle_bp_task_slot(struct perf_event *bp, int cpu, bool enable,
                                enum bp_type_idx type, int weight)
 {
        unsigned int *tsk_pinned;
@@ -196,10 +188,11 @@ static void toggle_bp_task_slot(struct task_struct *tsk, int cpu, bool enable,
        int old_idx = 0;
        int idx = 0;
 
-       old_count = task_bp_pinned(tsk, type);
+       old_count = task_bp_pinned(bp, type);
        old_idx = old_count - 1;
        idx = old_idx + weight;
 
+       /* tsk_pinned[n] is the number of tasks having n breakpoints */
        tsk_pinned = per_cpu(nr_task_bp_pinned[type], cpu);
        if (enable) {
                tsk_pinned[idx]++;
@@ -222,23 +215,30 @@ toggle_bp_slot(struct perf_event *bp, bool enable, enum bp_type_idx type,
        int cpu = bp->cpu;
        struct task_struct *tsk = bp->ctx->task;
 
+       /* Pinned counter cpu profiling */
+       if (!tsk) {
+
+               if (enable)
+                       per_cpu(nr_cpu_bp_pinned[type], bp->cpu) += weight;
+               else
+                       per_cpu(nr_cpu_bp_pinned[type], bp->cpu) -= weight;
+               return;
+       }
+
        /* Pinned counter task profiling */
-       if (tsk) {
-               if (cpu >= 0) {
-                       toggle_bp_task_slot(tsk, cpu, enable, type, weight);
-                       return;
-               }
 
+       if (!enable)
+               list_del(&bp->hw.bp_list);
+
+       if (cpu >= 0) {
+               toggle_bp_task_slot(bp, cpu, enable, type, weight);
+       } else {
                for_each_online_cpu(cpu)
-                       toggle_bp_task_slot(tsk, cpu, enable, type, weight);
-               return;
+                       toggle_bp_task_slot(bp, cpu, enable, type, weight);
        }
 
-       /* Pinned counter cpu profiling */
        if (enable)
-               per_cpu(nr_cpu_bp_pinned[type], bp->cpu) += weight;
-       else
-               per_cpu(nr_cpu_bp_pinned[type], bp->cpu) -= weight;
+               list_add_tail(&bp->hw.bp_list, &bp_task_head);
 }
 
 /*
@@ -312,6 +312,10 @@ static int __reserve_bp_slot(struct perf_event *bp)
        weight = hw_breakpoint_weight(bp);
 
        fetch_bp_busy_slots(&slots, bp, type);
+       /*
+        * Simulate the addition of this breakpoint to the constraints
+        * and see the result.
+        */
        fetch_this_slot(&slots, weight);
 
        /* Flexible counters need to keep at least one slot */