1 // SPDX-License-Identifier: GPL-2.0
2 #include "util/cgroup.h"
3 #include "util/debug.h"
4 #include "util/evlist.h"
5 #include "util/machine.h"
7 #include "util/symbol.h"
8 #include "util/target.h"
9 #include "util/thread.h"
10 #include "util/thread_map.h"
11 #include "util/lock-contention.h"
12 #include <linux/zalloc.h>
13 #include <linux/string.h>
17 #include "bpf_skel/lock_contention.skel.h"
18 #include "bpf_skel/lock_data.h"
20 static struct lock_contention_bpf *skel;
22 int lock_contention_prepare(struct lock_contention *con)
25 int ncpus = 1, ntasks = 1, ntypes = 1, naddrs = 1, ncgrps = 1;
26 struct evlist *evlist = con->evlist;
27 struct target *target = con->target;
29 skel = lock_contention_bpf__open();
31 pr_err("Failed to open lock-contention BPF skeleton\n");
35 bpf_map__set_value_size(skel->maps.stacks, con->max_stack * sizeof(u64));
36 bpf_map__set_max_entries(skel->maps.lock_stat, con->map_nr_entries);
37 bpf_map__set_max_entries(skel->maps.tstamp, con->map_nr_entries);
39 if (con->aggr_mode == LOCK_AGGR_TASK)
40 bpf_map__set_max_entries(skel->maps.task_data, con->map_nr_entries);
42 bpf_map__set_max_entries(skel->maps.task_data, 1);
44 if (con->save_callstack)
45 bpf_map__set_max_entries(skel->maps.stacks, con->map_nr_entries);
47 bpf_map__set_max_entries(skel->maps.stacks, 1);
49 if (target__has_cpu(target))
50 ncpus = perf_cpu_map__nr(evlist->core.user_requested_cpus);
51 if (target__has_task(target))
52 ntasks = perf_thread_map__nr(evlist->core.threads);
53 if (con->filters->nr_types)
54 ntypes = con->filters->nr_types;
55 if (con->filters->nr_cgrps)
56 ncgrps = con->filters->nr_cgrps;
58 /* resolve lock name filters to addr */
59 if (con->filters->nr_syms) {
64 for (i = 0; i < con->filters->nr_syms; i++) {
65 sym = machine__find_kernel_symbol_by_name(con->machine,
66 con->filters->syms[i],
69 pr_warning("ignore unknown symbol: %s\n",
70 con->filters->syms[i]);
74 addrs = realloc(con->filters->addrs,
75 (con->filters->nr_addrs + 1) * sizeof(*addrs));
77 pr_warning("memory allocation failure\n");
81 addrs[con->filters->nr_addrs++] = map__unmap_ip(kmap, sym->start);
82 con->filters->addrs = addrs;
84 naddrs = con->filters->nr_addrs;
87 bpf_map__set_max_entries(skel->maps.cpu_filter, ncpus);
88 bpf_map__set_max_entries(skel->maps.task_filter, ntasks);
89 bpf_map__set_max_entries(skel->maps.type_filter, ntypes);
90 bpf_map__set_max_entries(skel->maps.addr_filter, naddrs);
91 bpf_map__set_max_entries(skel->maps.cgroup_filter, ncgrps);
93 if (lock_contention_bpf__load(skel) < 0) {
94 pr_err("Failed to load lock-contention BPF skeleton\n");
98 if (target__has_cpu(target)) {
102 skel->bss->has_cpu = 1;
103 fd = bpf_map__fd(skel->maps.cpu_filter);
105 for (i = 0; i < ncpus; i++) {
106 cpu = perf_cpu_map__cpu(evlist->core.user_requested_cpus, i).cpu;
107 bpf_map_update_elem(fd, &cpu, &val, BPF_ANY);
111 if (target__has_task(target)) {
115 skel->bss->has_task = 1;
116 fd = bpf_map__fd(skel->maps.task_filter);
118 for (i = 0; i < ntasks; i++) {
119 pid = perf_thread_map__pid(evlist->core.threads, i);
120 bpf_map_update_elem(fd, &pid, &val, BPF_ANY);
124 if (target__none(target) && evlist->workload.pid > 0) {
125 u32 pid = evlist->workload.pid;
128 skel->bss->has_task = 1;
129 fd = bpf_map__fd(skel->maps.task_filter);
130 bpf_map_update_elem(fd, &pid, &val, BPF_ANY);
133 if (con->filters->nr_types) {
136 skel->bss->has_type = 1;
137 fd = bpf_map__fd(skel->maps.type_filter);
139 for (i = 0; i < con->filters->nr_types; i++)
140 bpf_map_update_elem(fd, &con->filters->types[i], &val, BPF_ANY);
143 if (con->filters->nr_addrs) {
146 skel->bss->has_addr = 1;
147 fd = bpf_map__fd(skel->maps.addr_filter);
149 for (i = 0; i < con->filters->nr_addrs; i++)
150 bpf_map_update_elem(fd, &con->filters->addrs[i], &val, BPF_ANY);
153 if (con->filters->nr_cgrps) {
156 skel->bss->has_cgroup = 1;
157 fd = bpf_map__fd(skel->maps.cgroup_filter);
159 for (i = 0; i < con->filters->nr_cgrps; i++)
160 bpf_map_update_elem(fd, &con->filters->cgrps[i], &val, BPF_ANY);
163 /* these don't work well if in the rodata section */
164 skel->bss->stack_skip = con->stack_skip;
165 skel->bss->aggr_mode = con->aggr_mode;
166 skel->bss->needs_callstack = con->save_callstack;
167 skel->bss->lock_owner = con->owner;
169 if (con->aggr_mode == LOCK_AGGR_CGROUP) {
170 if (cgroup_is_v2("perf_event"))
171 skel->bss->use_cgroup_v2 = 1;
173 read_all_cgroups(&con->cgroups);
176 bpf_program__set_autoload(skel->progs.collect_lock_syms, false);
178 lock_contention_bpf__attach(skel);
182 int lock_contention_start(void)
184 skel->bss->enabled = 1;
188 int lock_contention_stop(void)
190 skel->bss->enabled = 0;
194 static const char *lock_contention_get_name(struct lock_contention *con,
195 struct contention_key *key,
196 u64 *stack_trace, u32 flags)
200 const char *name = "";
201 static char name_buf[KSYM_NAME_LEN];
204 struct machine *machine = con->machine;
206 if (con->aggr_mode == LOCK_AGGR_TASK) {
207 struct contention_task_data task;
209 int task_fd = bpf_map__fd(skel->maps.task_data);
211 /* do not update idle comm which contains CPU number */
213 struct thread *t = __machine__findnew_thread(machine, /*pid=*/-1, pid);
217 if (!bpf_map_lookup_elem(task_fd, &pid, &task) &&
218 thread__set_comm(t, task.comm, /*timestamp=*/0))
224 if (con->aggr_mode == LOCK_AGGR_ADDR) {
225 int lock_fd = bpf_map__fd(skel->maps.lock_syms);
227 /* per-process locks set upper bits of the flags */
228 if (flags & LCD_F_MMAP_LOCK)
230 if (flags & LCD_F_SIGHAND_LOCK)
233 /* global locks with symbols */
234 sym = machine__find_kernel_symbol(machine, key->lock_addr_or_cgroup, &kmap);
238 /* try semi-global locks collected separately */
239 if (!bpf_map_lookup_elem(lock_fd, &key->lock_addr_or_cgroup, &flags)) {
240 if (flags == LOCK_CLASS_RQLOCK)
247 if (con->aggr_mode == LOCK_AGGR_CGROUP) {
248 u64 cgrp_id = key->lock_addr_or_cgroup;
249 struct cgroup *cgrp = __cgroup__find(&con->cgroups, cgrp_id);
254 snprintf(name_buf, sizeof(name_buf), "cgroup:%" PRIu64 "", cgrp_id);
258 /* LOCK_AGGR_CALLER: skip lock internal functions */
259 while (machine__is_lock_function(machine, stack_trace[idx]) &&
260 idx < con->max_stack - 1)
263 addr = stack_trace[idx];
264 sym = machine__find_kernel_symbol(machine, addr, &kmap);
267 unsigned long offset;
269 offset = map__map_ip(kmap, addr) - sym->start;
274 snprintf(name_buf, sizeof(name_buf), "%s+%#lx", sym->name, offset);
276 snprintf(name_buf, sizeof(name_buf), "%#lx", (unsigned long)addr);
282 int lock_contention_read(struct lock_contention *con)
284 int fd, stack, err = 0;
285 struct contention_key *prev_key, key = {};
286 struct contention_data data = {};
287 struct lock_stat *st = NULL;
288 struct machine *machine = con->machine;
290 size_t stack_size = con->max_stack * sizeof(*stack_trace);
292 fd = bpf_map__fd(skel->maps.lock_stat);
293 stack = bpf_map__fd(skel->maps.stacks);
295 con->fails.task = skel->bss->task_fail;
296 con->fails.stack = skel->bss->stack_fail;
297 con->fails.time = skel->bss->time_fail;
298 con->fails.data = skel->bss->data_fail;
300 stack_trace = zalloc(stack_size);
301 if (stack_trace == NULL)
304 if (con->aggr_mode == LOCK_AGGR_TASK) {
305 struct thread *idle = __machine__findnew_thread(machine,
308 thread__set_comm(idle, "swapper", /*timestamp=*/0);
311 if (con->aggr_mode == LOCK_AGGR_ADDR) {
312 DECLARE_LIBBPF_OPTS(bpf_test_run_opts, opts,
313 .flags = BPF_F_TEST_RUN_ON_CPU,
315 int prog_fd = bpf_program__fd(skel->progs.collect_lock_syms);
317 bpf_prog_test_run_opts(prog_fd, &opts);
320 /* make sure it loads the kernel map */
321 maps__load_first(machine->kmaps);
324 while (!bpf_map_get_next_key(fd, prev_key, &key)) {
328 /* to handle errors in the loop body */
331 bpf_map_lookup_elem(fd, &key, &data);
332 if (con->save_callstack) {
333 bpf_map_lookup_elem(stack, &key.stack_id, stack_trace);
335 if (!match_callstack_filter(machine, stack_trace)) {
336 con->nr_filtered += data.count;
341 switch (con->aggr_mode) {
342 case LOCK_AGGR_CALLER:
343 ls_key = key.stack_id;
349 case LOCK_AGGR_CGROUP:
350 ls_key = key.lock_addr_or_cgroup;
356 st = lock_stat_find(ls_key);
358 st->wait_time_total += data.total_time;
359 if (st->wait_time_max < data.max_time)
360 st->wait_time_max = data.max_time;
361 if (st->wait_time_min > data.min_time)
362 st->wait_time_min = data.min_time;
364 st->nr_contended += data.count;
365 if (st->nr_contended)
366 st->avg_wait_time = st->wait_time_total / st->nr_contended;
370 name = lock_contention_get_name(con, &key, stack_trace, data.flags);
371 st = lock_stat_findnew(ls_key, name, data.flags);
375 st->nr_contended = data.count;
376 st->wait_time_total = data.total_time;
377 st->wait_time_max = data.max_time;
378 st->wait_time_min = data.min_time;
381 st->avg_wait_time = data.total_time / data.count;
383 if (con->aggr_mode == LOCK_AGGR_CALLER && verbose > 0) {
384 st->callstack = memdup(stack_trace, stack_size);
385 if (st->callstack == NULL)
392 /* we're fine now, reset the error */
401 int lock_contention_finish(struct lock_contention *con)
404 skel->bss->enabled = 0;
405 lock_contention_bpf__destroy(skel);
408 while (!RB_EMPTY_ROOT(&con->cgroups)) {
409 struct rb_node *node = rb_first(&con->cgroups);
410 struct cgroup *cgrp = rb_entry(node, struct cgroup, node);
412 rb_erase(node, &con->cgroups);