perf: Add pmu callbacks to track event mapping and unmapping
authorAndy Lutomirski <luto@amacapital.net>
Fri, 24 Oct 2014 22:58:10 +0000 (15:58 -0700)
committerIngo Molnar <mingo@kernel.org>
Wed, 4 Feb 2015 11:10:45 +0000 (12:10 +0100)
Signed-off-by: Andy Lutomirski <luto@amacapital.net>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Arnaldo Carvalho de Melo <acme@kernel.org>
Cc: Kees Cook <keescook@chromium.org>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Vince Weaver <vince@deater.net>
Cc: "hillf.zj" <hillf.zj@alibaba-inc.com>
Cc: Valdis Kletnieks <Valdis.Kletnieks@vt.edu>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Link: http://lkml.kernel.org/r/266afcba1d1f91ea5501e4e16e94bbbc1a9339b6.1414190806.git.luto@amacapital.net
Signed-off-by: Ingo Molnar <mingo@kernel.org>
include/linux/perf_event.h
kernel/events/core.c

index 5cad0e6f35524b454ec691e1787848d322893b3b..33262004c31041c69b0706eb0b07bf37931b83b8 100644 (file)
@@ -202,6 +202,13 @@ struct pmu {
         */
        int (*event_init)               (struct perf_event *event);
 
+       /*
+        * Notification that the event was mapped or unmapped.  Called
+        * in the context of the mapping task.
+        */
+       void (*event_mapped)            (struct perf_event *event); /*optional*/
+       void (*event_unmapped)          (struct perf_event *event); /*optional*/
+
 #define PERF_EF_START  0x01            /* start the counter when adding    */
 #define PERF_EF_RELOAD 0x02            /* reload the counter when starting */
 #define PERF_EF_UPDATE 0x04            /* update the counter when stopping */
index 7f2fbb8b5069b3258bdd9721c60b850f965953d1..cc1487145d33e213e987c918057358cfe06b6319 100644 (file)
@@ -4293,6 +4293,9 @@ static void perf_mmap_open(struct vm_area_struct *vma)
 
        atomic_inc(&event->mmap_count);
        atomic_inc(&event->rb->mmap_count);
+
+       if (event->pmu->event_mapped)
+               event->pmu->event_mapped(event);
 }
 
 /*
@@ -4312,6 +4315,9 @@ static void perf_mmap_close(struct vm_area_struct *vma)
        int mmap_locked = rb->mmap_locked;
        unsigned long size = perf_data_size(rb);
 
+       if (event->pmu->event_unmapped)
+               event->pmu->event_unmapped(event);
+
        atomic_dec(&rb->mmap_count);
 
        if (!atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex))
@@ -4513,6 +4519,9 @@ unlock:
        vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP;
        vma->vm_ops = &perf_mmap_vmops;
 
+       if (event->pmu->event_mapped)
+               event->pmu->event_mapped(event);
+
        return ret;
 }