perf record: Introduce thread affinity and mmap masks
authorAlexey Bayduraev <alexey.v.bayduraev@linux.intel.com>
Mon, 17 Jan 2022 18:34:21 +0000 (21:34 +0300)
committerArnaldo Carvalho de Melo <acme@redhat.com>
Thu, 10 Feb 2022 19:21:22 +0000 (16:21 -0300)
Introduce affinity and mmap thread masks. Thread affinity mask
defines CPUs that a thread is allowed to run on. Thread maps
mask defines mmap data buffers the thread serves to stream
profiling data from.

Reviewed-by: Riccardo Mancini <rickyman7@gmail.com>
Signed-off-by: Alexey Bayduraev <alexey.v.bayduraev@linux.intel.com>
Tested-by: Jiri Olsa <jolsa@kernel.org>
Tested-by: Riccardo Mancini <rickyman7@gmail.com>
Acked-by: Andi Kleen <ak@linux.intel.com>
Acked-by: Namhyung Kim <namhyung@gmail.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Alexander Antonov <alexander.antonov@linux.intel.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Alexei Budankov <abudankov@huawei.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: https://lore.kernel.org/r/9042bf7daf988e17e17e6acbf5d29590bde869cd.1642440724.git.alexey.v.bayduraev@linux.intel.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
tools/perf/builtin-record.c

index bb716c953d02fda883489d6703d40a3e4707bf15..41998f2140cd51199b9c78679c80879d6f246cb9 100644 (file)
@@ -87,6 +87,11 @@ struct switch_output {
        int              cur_file;
 };
 
+struct thread_mask {
+       struct mmap_cpu_mask    maps;
+       struct mmap_cpu_mask    affinity;
+};
+
 struct record {
        struct perf_tool        tool;
        struct record_opts      opts;
@@ -112,6 +117,8 @@ struct record {
        struct mmap_cpu_mask    affinity_mask;
        unsigned long           output_max_size;        /* = 0: unlimited */
        struct perf_debuginfod  debuginfod;
+       int                     nr_threads;
+       struct thread_mask      *thread_masks;
 };
 
 static volatile int done;
@@ -2204,6 +2211,47 @@ static int record__parse_affinity(const struct option *opt, const char *str, int
        return 0;
 }
 
+static int record__mmap_cpu_mask_alloc(struct mmap_cpu_mask *mask, int nr_bits)
+{
+       mask->nbits = nr_bits;
+       mask->bits = bitmap_zalloc(mask->nbits);
+       if (!mask->bits)
+               return -ENOMEM;
+
+       return 0;
+}
+
+static void record__mmap_cpu_mask_free(struct mmap_cpu_mask *mask)
+{
+       bitmap_free(mask->bits);
+       mask->nbits = 0;
+}
+
+static int record__thread_mask_alloc(struct thread_mask *mask, int nr_bits)
+{
+       int ret;
+
+       ret = record__mmap_cpu_mask_alloc(&mask->maps, nr_bits);
+       if (ret) {
+               mask->affinity.bits = NULL;
+               return ret;
+       }
+
+       ret = record__mmap_cpu_mask_alloc(&mask->affinity, nr_bits);
+       if (ret) {
+               record__mmap_cpu_mask_free(&mask->maps);
+               mask->maps.bits = NULL;
+       }
+
+       return ret;
+}
+
+static void record__thread_mask_free(struct thread_mask *mask)
+{
+       record__mmap_cpu_mask_free(&mask->maps);
+       record__mmap_cpu_mask_free(&mask->affinity);
+}
+
 static int parse_output_max_size(const struct option *opt,
                                 const char *str, int unset)
 {
@@ -2683,6 +2731,73 @@ static struct option __record_options[] = {
 
 struct option *record_options = __record_options;
 
+static void record__mmap_cpu_mask_init(struct mmap_cpu_mask *mask, struct perf_cpu_map *cpus)
+{
+       int c;
+
+       for (c = 0; c < cpus->nr; c++)
+               set_bit(cpus->map[c].cpu, mask->bits);
+}
+
+static void record__free_thread_masks(struct record *rec, int nr_threads)
+{
+       int t;
+
+       if (rec->thread_masks)
+               for (t = 0; t < nr_threads; t++)
+                       record__thread_mask_free(&rec->thread_masks[t]);
+
+       zfree(&rec->thread_masks);
+}
+
+static int record__alloc_thread_masks(struct record *rec, int nr_threads, int nr_bits)
+{
+       int t, ret;
+
+       rec->thread_masks = zalloc(nr_threads * sizeof(*(rec->thread_masks)));
+       if (!rec->thread_masks) {
+               pr_err("Failed to allocate thread masks\n");
+               return -ENOMEM;
+       }
+
+       for (t = 0; t < nr_threads; t++) {
+               ret = record__thread_mask_alloc(&rec->thread_masks[t], nr_bits);
+               if (ret) {
+                       pr_err("Failed to allocate thread masks[%d]\n", t);
+                       goto out_free;
+               }
+       }
+
+       return 0;
+
+out_free:
+       record__free_thread_masks(rec, nr_threads);
+
+       return ret;
+}
+
+static int record__init_thread_default_masks(struct record *rec, struct perf_cpu_map *cpus)
+{
+       int ret;
+
+       ret = record__alloc_thread_masks(rec, 1, cpu__max_cpu().cpu);
+       if (ret)
+               return ret;
+
+       record__mmap_cpu_mask_init(&rec->thread_masks->maps, cpus);
+
+       rec->nr_threads = 1;
+
+       return 0;
+}
+
+static int record__init_thread_masks(struct record *rec)
+{
+       struct perf_cpu_map *cpus = rec->evlist->core.cpus;
+
+       return record__init_thread_default_masks(rec, cpus);
+}
+
 int cmd_record(int argc, const char **argv)
 {
        int err;
@@ -2948,6 +3063,12 @@ int cmd_record(int argc, const char **argv)
                goto out;
        }
 
+       err = record__init_thread_masks(rec);
+       if (err) {
+               pr_err("Failed to initialize parallel data streaming masks\n");
+               goto out;
+       }
+
        if (rec->opts.nr_cblocks > nr_cblocks_max)
                rec->opts.nr_cblocks = nr_cblocks_max;
        pr_debug("nr_cblocks: %d\n", rec->opts.nr_cblocks);
@@ -2966,6 +3087,8 @@ out:
        symbol__exit();
        auxtrace_record__free(rec->itr);
 out_opts:
+       record__free_thread_masks(rec, rec->nr_threads);
+       rec->nr_threads = 0;
        evlist__close_control(rec->opts.ctl_fd, rec->opts.ctl_fd_ack, &rec->opts.ctl_fd_close);
        return err;
 }