1 // SPDX-License-Identifier: GPL-2.0-only
3 * BTS PMU driver for perf
4 * Copyright (c) 2013-2014, Intel Corporation.
9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11 #include <linux/bitops.h>
12 #include <linux/types.h>
13 #include <linux/slab.h>
14 #include <linux/debugfs.h>
15 #include <linux/device.h>
16 #include <linux/coredump.h>
18 #include <linux/sizes.h>
19 #include <asm/perf_event.h>
21 #include "../perf_event.h"
24 struct perf_output_handle handle;
25 struct debug_store ds_back;
29 /* BTS context states: */
31 /* no ongoing AUX transactions */
32 BTS_STATE_STOPPED = 0,
33 /* AUX transaction is on, BTS tracing is disabled */
35 /* AUX transaction is on, BTS tracing is running */
39 static DEFINE_PER_CPU(struct bts_ctx, bts_ctx);
41 #define BTS_RECORD_SIZE 24
42 #define BTS_SAFETY_MARGIN 4080
48 unsigned long displacement;
52 size_t real_size; /* multiple of BTS_RECORD_SIZE */
53 unsigned int nr_pages;
61 struct bts_phys buf[0];
64 static struct pmu bts_pmu;
66 static size_t buf_size(struct page *page)
68 return 1 << (PAGE_SHIFT + page_private(page));
72 bts_buffer_setup_aux(struct perf_event *event, void **pages,
73 int nr_pages, bool overwrite)
75 struct bts_buffer *buf;
78 int node = (cpu == -1) ? cpu : cpu_to_node(cpu);
80 size_t size = nr_pages << PAGE_SHIFT;
83 /* count all the high order buffers */
84 for (pg = 0, nbuf = 0; pg < nr_pages;) {
85 page = virt_to_page(pages[pg]);
86 if (WARN_ON_ONCE(!PagePrivate(page) && nr_pages > 1))
88 pg += 1 << page_private(page);
93 * to avoid interrupts in overwrite mode, only allow one physical
95 if (overwrite && nbuf > 1)
98 buf = kzalloc_node(offsetof(struct bts_buffer, buf[nbuf]), GFP_KERNEL, node);
102 buf->nr_pages = nr_pages;
104 buf->snapshot = overwrite;
105 buf->data_pages = pages;
106 buf->real_size = size - size % BTS_RECORD_SIZE;
108 for (pg = 0, nbuf = 0, offset = 0, pad = 0; nbuf < buf->nr_bufs; nbuf++) {
109 unsigned int __nr_pages;
111 page = virt_to_page(pages[pg]);
112 __nr_pages = PagePrivate(page) ? 1 << page_private(page) : 1;
113 buf->buf[nbuf].page = page;
114 buf->buf[nbuf].offset = offset;
115 buf->buf[nbuf].displacement = (pad ? BTS_RECORD_SIZE - pad : 0);
116 buf->buf[nbuf].size = buf_size(page) - buf->buf[nbuf].displacement;
117 pad = buf->buf[nbuf].size % BTS_RECORD_SIZE;
118 buf->buf[nbuf].size -= pad;
121 offset += __nr_pages << PAGE_SHIFT;
127 static void bts_buffer_free_aux(void *data)
132 static unsigned long bts_buffer_offset(struct bts_buffer *buf, unsigned int idx)
134 return buf->buf[idx].offset + buf->buf[idx].displacement;
138 bts_config_buffer(struct bts_buffer *buf)
140 int cpu = raw_smp_processor_id();
141 struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
142 struct bts_phys *phys = &buf->buf[buf->cur_buf];
143 unsigned long index, thresh = 0, end = phys->size;
144 struct page *page = phys->page;
146 index = local_read(&buf->head);
148 if (!buf->snapshot) {
149 if (buf->end < phys->offset + buf_size(page))
150 end = buf->end - phys->offset - phys->displacement;
152 index -= phys->offset + phys->displacement;
154 if (end - index > BTS_SAFETY_MARGIN)
155 thresh = end - BTS_SAFETY_MARGIN;
156 else if (end - index > BTS_RECORD_SIZE)
157 thresh = end - BTS_RECORD_SIZE;
162 ds->bts_buffer_base = (u64)(long)page_address(page) + phys->displacement;
163 ds->bts_index = ds->bts_buffer_base + index;
164 ds->bts_absolute_maximum = ds->bts_buffer_base + end;
165 ds->bts_interrupt_threshold = !buf->snapshot
166 ? ds->bts_buffer_base + thresh
167 : ds->bts_absolute_maximum + BTS_RECORD_SIZE;
170 static void bts_buffer_pad_out(struct bts_phys *phys, unsigned long head)
172 unsigned long index = head - phys->offset;
174 memset(page_address(phys->page) + index, 0, phys->size - index);
177 static void bts_update(struct bts_ctx *bts)
179 int cpu = raw_smp_processor_id();
180 struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
181 struct bts_buffer *buf = perf_get_aux(&bts->handle);
182 unsigned long index = ds->bts_index - ds->bts_buffer_base, old, head;
187 head = index + bts_buffer_offset(buf, buf->cur_buf);
188 old = local_xchg(&buf->head, head);
190 if (!buf->snapshot) {
194 if (ds->bts_index >= ds->bts_absolute_maximum)
195 perf_aux_output_flag(&bts->handle,
196 PERF_AUX_FLAG_TRUNCATED);
199 * old and head are always in the same physical buffer, so we
200 * can subtract them to get the data size.
202 local_add(head - old, &buf->data_size);
204 local_set(&buf->data_size, head);
209 bts_buffer_reset(struct bts_buffer *buf, struct perf_output_handle *handle);
212 * Ordering PMU callbacks wrt themselves and the PMI is done by means
213 * of bts::state, which:
214 * - is set when bts::handle::event is valid, that is, between
215 * perf_aux_output_begin() and perf_aux_output_end();
216 * - is zero otherwise;
217 * - is ordered against bts::handle::event with a compiler barrier.
220 static void __bts_event_start(struct perf_event *event)
222 struct bts_ctx *bts = this_cpu_ptr(&bts_ctx);
223 struct bts_buffer *buf = perf_get_aux(&bts->handle);
227 config |= ARCH_PERFMON_EVENTSEL_INT;
228 if (!event->attr.exclude_kernel)
229 config |= ARCH_PERFMON_EVENTSEL_OS;
230 if (!event->attr.exclude_user)
231 config |= ARCH_PERFMON_EVENTSEL_USR;
233 bts_config_buffer(buf);
236 * local barrier to make sure that ds configuration made it
237 * before we enable BTS and bts::state goes ACTIVE
241 /* INACTIVE/STOPPED -> ACTIVE */
242 WRITE_ONCE(bts->state, BTS_STATE_ACTIVE);
244 intel_pmu_enable_bts(config);
248 static void bts_event_start(struct perf_event *event, int flags)
250 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
251 struct bts_ctx *bts = this_cpu_ptr(&bts_ctx);
252 struct bts_buffer *buf;
254 buf = perf_aux_output_begin(&bts->handle, event);
258 if (bts_buffer_reset(buf, &bts->handle))
261 bts->ds_back.bts_buffer_base = cpuc->ds->bts_buffer_base;
262 bts->ds_back.bts_absolute_maximum = cpuc->ds->bts_absolute_maximum;
263 bts->ds_back.bts_interrupt_threshold = cpuc->ds->bts_interrupt_threshold;
265 perf_event_itrace_started(event);
268 __bts_event_start(event);
273 perf_aux_output_end(&bts->handle, 0);
276 event->hw.state = PERF_HES_STOPPED;
279 static void __bts_event_stop(struct perf_event *event, int state)
281 struct bts_ctx *bts = this_cpu_ptr(&bts_ctx);
283 /* ACTIVE -> INACTIVE(PMI)/STOPPED(->stop()) */
284 WRITE_ONCE(bts->state, state);
287 * No extra synchronization is mandated by the documentation to have
288 * BTS data stores globally visible.
290 intel_pmu_disable_bts();
293 static void bts_event_stop(struct perf_event *event, int flags)
295 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
296 struct bts_ctx *bts = this_cpu_ptr(&bts_ctx);
297 struct bts_buffer *buf = NULL;
298 int state = READ_ONCE(bts->state);
300 if (state == BTS_STATE_ACTIVE)
301 __bts_event_stop(event, BTS_STATE_STOPPED);
303 if (state != BTS_STATE_STOPPED)
304 buf = perf_get_aux(&bts->handle);
306 event->hw.state |= PERF_HES_STOPPED;
308 if (flags & PERF_EF_UPDATE) {
314 local_xchg(&buf->data_size,
315 buf->nr_pages << PAGE_SHIFT);
316 perf_aux_output_end(&bts->handle,
317 local_xchg(&buf->data_size, 0));
320 cpuc->ds->bts_index = bts->ds_back.bts_buffer_base;
321 cpuc->ds->bts_buffer_base = bts->ds_back.bts_buffer_base;
322 cpuc->ds->bts_absolute_maximum = bts->ds_back.bts_absolute_maximum;
323 cpuc->ds->bts_interrupt_threshold = bts->ds_back.bts_interrupt_threshold;
327 void intel_bts_enable_local(void)
329 struct bts_ctx *bts = this_cpu_ptr(&bts_ctx);
330 int state = READ_ONCE(bts->state);
333 * Here we transition from INACTIVE to ACTIVE;
334 * if we instead are STOPPED from the interrupt handler,
335 * stay that way. Can't be ACTIVE here though.
337 if (WARN_ON_ONCE(state == BTS_STATE_ACTIVE))
340 if (state == BTS_STATE_STOPPED)
343 if (bts->handle.event)
344 __bts_event_start(bts->handle.event);
347 void intel_bts_disable_local(void)
349 struct bts_ctx *bts = this_cpu_ptr(&bts_ctx);
352 * Here we transition from ACTIVE to INACTIVE;
353 * do nothing for STOPPED or INACTIVE.
355 if (READ_ONCE(bts->state) != BTS_STATE_ACTIVE)
358 if (bts->handle.event)
359 __bts_event_stop(bts->handle.event, BTS_STATE_INACTIVE);
363 bts_buffer_reset(struct bts_buffer *buf, struct perf_output_handle *handle)
365 unsigned long head, space, next_space, pad, gap, skip, wakeup;
366 unsigned int next_buf;
367 struct bts_phys *phys, *next_phys;
373 head = handle->head & ((buf->nr_pages << PAGE_SHIFT) - 1);
375 phys = &buf->buf[buf->cur_buf];
376 space = phys->offset + phys->displacement + phys->size - head;
378 if (space > handle->size) {
379 space = handle->size;
380 space -= space % BTS_RECORD_SIZE;
382 if (space <= BTS_SAFETY_MARGIN) {
383 /* See if next phys buffer has more space */
384 next_buf = buf->cur_buf + 1;
385 if (next_buf >= buf->nr_bufs)
387 next_phys = &buf->buf[next_buf];
388 gap = buf_size(phys->page) - phys->displacement - phys->size +
389 next_phys->displacement;
391 if (handle->size >= skip) {
392 next_space = next_phys->size;
393 if (next_space + skip > handle->size) {
394 next_space = handle->size - skip;
395 next_space -= next_space % BTS_RECORD_SIZE;
397 if (next_space > space || !space) {
399 bts_buffer_pad_out(phys, head);
400 ret = perf_aux_output_skip(handle, skip);
403 /* Advance to next phys buffer */
406 head = phys->offset + phys->displacement;
408 * After this, cur_buf and head won't match ds
409 * anymore, so we must not be racing with
412 buf->cur_buf = next_buf;
413 local_set(&buf->head, head);
418 /* Don't go far beyond wakeup watermark */
419 wakeup = BTS_SAFETY_MARGIN + BTS_RECORD_SIZE + handle->wakeup -
421 if (space > wakeup) {
423 space -= space % BTS_RECORD_SIZE;
426 buf->end = head + space;
429 * If we have no space, the lost notification would have been sent when
430 * we hit absolute_maximum - see bts_update()
438 int intel_bts_interrupt(void)
440 struct debug_store *ds = this_cpu_ptr(&cpu_hw_events)->ds;
441 struct bts_ctx *bts = this_cpu_ptr(&bts_ctx);
442 struct perf_event *event = bts->handle.event;
443 struct bts_buffer *buf;
445 int err = -ENOSPC, handled = 0;
448 * The only surefire way of knowing if this NMI is ours is by checking
449 * the write ptr against the PMI threshold.
451 if (ds && (ds->bts_index >= ds->bts_interrupt_threshold))
455 * this is wrapped in intel_bts_enable_local/intel_bts_disable_local,
456 * so we can only be INACTIVE or STOPPED
458 if (READ_ONCE(bts->state) == BTS_STATE_STOPPED)
461 buf = perf_get_aux(&bts->handle);
466 * Skip snapshot counters: they don't use the interrupt, but
467 * there's no other way of telling, because the pointer will
473 old_head = local_read(&buf->head);
477 if (old_head == local_read(&buf->head))
480 perf_aux_output_end(&bts->handle, local_xchg(&buf->data_size, 0));
482 buf = perf_aux_output_begin(&bts->handle, event);
484 err = bts_buffer_reset(buf, &bts->handle);
487 WRITE_ONCE(bts->state, BTS_STATE_STOPPED);
491 * BTS_STATE_STOPPED should be visible before
492 * cleared handle::event
495 perf_aux_output_end(&bts->handle, 0);
502 static void bts_event_del(struct perf_event *event, int mode)
504 bts_event_stop(event, PERF_EF_UPDATE);
507 static int bts_event_add(struct perf_event *event, int mode)
509 struct bts_ctx *bts = this_cpu_ptr(&bts_ctx);
510 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
511 struct hw_perf_event *hwc = &event->hw;
513 event->hw.state = PERF_HES_STOPPED;
515 if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask))
518 if (bts->handle.event)
521 if (mode & PERF_EF_START) {
522 bts_event_start(event, 0);
523 if (hwc->state & PERF_HES_STOPPED)
530 static void bts_event_destroy(struct perf_event *event)
532 x86_release_hardware();
533 x86_del_exclusive(x86_lbr_exclusive_bts);
536 static int bts_event_init(struct perf_event *event)
540 if (event->attr.type != bts_pmu.type)
544 * BTS leaks kernel addresses even when CPL0 tracing is
545 * disabled, so disallow intel_bts driver for unprivileged
546 * users on paranoid systems since it provides trace data
547 * to the user in a zero-copy fashion.
549 * Note that the default paranoia setting permits unprivileged
550 * users to profile the kernel.
552 if (event->attr.exclude_kernel && perf_paranoid_kernel() &&
553 !capable(CAP_SYS_ADMIN))
556 if (x86_add_exclusive(x86_lbr_exclusive_bts))
559 ret = x86_reserve_hardware();
561 x86_del_exclusive(x86_lbr_exclusive_bts);
565 event->destroy = bts_event_destroy;
570 static void bts_event_read(struct perf_event *event)
574 static __init int bts_init(void)
576 if (!boot_cpu_has(X86_FEATURE_DTES64) || !x86_pmu.bts)
579 if (boot_cpu_has(X86_FEATURE_PTI)) {
581 * BTS hardware writes through a virtual memory map we must
582 * either use the kernel physical map, or the user mapping of
585 * However, since this driver supports per-CPU and per-task inherit
586 * we cannot use the user mapping since it will not be available
587 * if we're not running the owning process.
589 * With PTI we can't use the kernal map either, because its not
590 * there when we run userspace.
592 * For now, disable this driver when using PTI.
597 bts_pmu.capabilities = PERF_PMU_CAP_AUX_NO_SG | PERF_PMU_CAP_ITRACE |
598 PERF_PMU_CAP_EXCLUSIVE;
599 bts_pmu.task_ctx_nr = perf_sw_context;
600 bts_pmu.event_init = bts_event_init;
601 bts_pmu.add = bts_event_add;
602 bts_pmu.del = bts_event_del;
603 bts_pmu.start = bts_event_start;
604 bts_pmu.stop = bts_event_stop;
605 bts_pmu.read = bts_event_read;
606 bts_pmu.setup_aux = bts_buffer_setup_aux;
607 bts_pmu.free_aux = bts_buffer_free_aux;
609 return perf_pmu_register(&bts_pmu, "intel_bts", -1);
611 arch_initcall(bts_init);