1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright(C) 2015 Linaro Limited. All rights reserved.
4 * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
7 #include <linux/coresight.h>
8 #include <linux/coresight-pmu.h>
9 #include <linux/cpumask.h>
10 #include <linux/device.h>
11 #include <linux/list.h>
13 #include <linux/init.h>
14 #include <linux/perf_event.h>
15 #include <linux/percpu-defs.h>
16 #include <linux/slab.h>
17 #include <linux/stringhash.h>
18 #include <linux/types.h>
19 #include <linux/workqueue.h>
21 #include "coresight-etm-perf.h"
22 #include "coresight-priv.h"
24 static struct pmu etm_pmu;
25 static bool etm_perf_up;
27 static DEFINE_PER_CPU(struct perf_output_handle, ctx_handle);
28 static DEFINE_PER_CPU(struct coresight_device *, csdev_src);
31 * The PMU formats were orignally for ETMv3.5/PTM's ETMCR 'config';
32 * now take them as general formats and apply on all ETMs.
34 PMU_FORMAT_ATTR(cycacc, "config:" __stringify(ETM_OPT_CYCACC));
35 /* contextid1 enables tracing CONTEXTIDR_EL1 for ETMv4 */
36 PMU_FORMAT_ATTR(contextid1, "config:" __stringify(ETM_OPT_CTXTID));
37 /* contextid2 enables tracing CONTEXTIDR_EL2 for ETMv4 */
38 PMU_FORMAT_ATTR(contextid2, "config:" __stringify(ETM_OPT_CTXTID2));
39 PMU_FORMAT_ATTR(timestamp, "config:" __stringify(ETM_OPT_TS));
40 PMU_FORMAT_ATTR(retstack, "config:" __stringify(ETM_OPT_RETSTK));
41 /* Sink ID - same for all ETMs */
42 PMU_FORMAT_ATTR(sinkid, "config2:0-31");
45 * contextid always traces the "PID". The PID is in CONTEXTIDR_EL1
46 * when the kernel is running at EL1; when the kernel is at EL2,
47 * the PID is in CONTEXTIDR_EL2.
49 static ssize_t format_attr_contextid_show(struct device *dev,
50 struct device_attribute *attr,
53 int pid_fmt = ETM_OPT_CTXTID;
55 #if defined(CONFIG_CORESIGHT_SOURCE_ETM4X)
56 pid_fmt = is_kernel_in_hyp_mode() ? ETM_OPT_CTXTID2 : ETM_OPT_CTXTID;
58 return sprintf(page, "config:%d\n", pid_fmt);
61 struct device_attribute format_attr_contextid =
62 __ATTR(contextid, 0444, format_attr_contextid_show, NULL);
64 static struct attribute *etm_config_formats_attr[] = {
65 &format_attr_cycacc.attr,
66 &format_attr_contextid.attr,
67 &format_attr_contextid1.attr,
68 &format_attr_contextid2.attr,
69 &format_attr_timestamp.attr,
70 &format_attr_retstack.attr,
71 &format_attr_sinkid.attr,
75 static const struct attribute_group etm_pmu_format_group = {
77 .attrs = etm_config_formats_attr,
80 static struct attribute *etm_config_sinks_attr[] = {
84 static const struct attribute_group etm_pmu_sinks_group = {
86 .attrs = etm_config_sinks_attr,
89 static const struct attribute_group *etm_pmu_attr_groups[] = {
90 &etm_pmu_format_group,
95 static inline struct list_head **
96 etm_event_cpu_path_ptr(struct etm_event_data *data, int cpu)
98 return per_cpu_ptr(data->path, cpu);
101 static inline struct list_head *
102 etm_event_cpu_path(struct etm_event_data *data, int cpu)
104 return *etm_event_cpu_path_ptr(data, cpu);
107 static void etm_event_read(struct perf_event *event) {}
109 static int etm_addr_filters_alloc(struct perf_event *event)
111 struct etm_filters *filters;
112 int node = event->cpu == -1 ? -1 : cpu_to_node(event->cpu);
114 filters = kzalloc_node(sizeof(struct etm_filters), GFP_KERNEL, node);
119 memcpy(filters, event->parent->hw.addr_filters,
122 event->hw.addr_filters = filters;
127 static void etm_event_destroy(struct perf_event *event)
129 kfree(event->hw.addr_filters);
130 event->hw.addr_filters = NULL;
133 static int etm_event_init(struct perf_event *event)
137 if (event->attr.type != etm_pmu.type) {
142 ret = etm_addr_filters_alloc(event);
146 event->destroy = etm_event_destroy;
151 static void free_sink_buffer(struct etm_event_data *event_data)
154 cpumask_t *mask = &event_data->mask;
155 struct coresight_device *sink;
157 if (!event_data->snk_config)
160 if (WARN_ON(cpumask_empty(mask)))
163 cpu = cpumask_first(mask);
164 sink = coresight_get_sink(etm_event_cpu_path(event_data, cpu));
165 sink_ops(sink)->free_buffer(event_data->snk_config);
168 static void free_event_data(struct work_struct *work)
172 struct etm_event_data *event_data;
174 event_data = container_of(work, struct etm_event_data, work);
175 mask = &event_data->mask;
177 /* Free the sink buffers, if there are any */
178 free_sink_buffer(event_data);
180 for_each_cpu(cpu, mask) {
181 struct list_head **ppath;
183 ppath = etm_event_cpu_path_ptr(event_data, cpu);
184 if (!(IS_ERR_OR_NULL(*ppath)))
185 coresight_release_path(*ppath);
189 free_percpu(event_data->path);
193 static void *alloc_event_data(int cpu)
196 struct etm_event_data *event_data;
198 /* First get memory for the session's data */
199 event_data = kzalloc(sizeof(struct etm_event_data), GFP_KERNEL);
204 mask = &event_data->mask;
206 cpumask_set_cpu(cpu, mask);
208 cpumask_copy(mask, cpu_present_mask);
211 * Each CPU has a single path between source and destination. As such
212 * allocate an array using CPU numbers as indexes. That way a path
213 * for any CPU can easily be accessed at any given time. We proceed
214 * the same way for sessions involving a single CPU. The cost of
215 * unused memory when dealing with single CPU trace scenarios is small
216 * compared to the cost of searching through an optimized array.
218 event_data->path = alloc_percpu(struct list_head *);
220 if (!event_data->path) {
228 static void etm_free_aux(void *data)
230 struct etm_event_data *event_data = data;
232 schedule_work(&event_data->work);
235 static void *etm_setup_aux(struct perf_event *event, void **pages,
236 int nr_pages, bool overwrite)
239 int cpu = event->cpu;
241 struct coresight_device *sink = NULL;
242 struct etm_event_data *event_data = NULL;
244 event_data = alloc_event_data(cpu);
247 INIT_WORK(&event_data->work, free_event_data);
249 /* First get the selected sink from user space. */
250 if (event->attr.config2) {
251 id = (u32)event->attr.config2;
252 sink = coresight_get_sink_by_id(id);
255 mask = &event_data->mask;
258 * Setup the path for each CPU in a trace session. We try to build
259 * trace path for each CPU in the mask. If we don't find an ETM
260 * for the CPU or fail to build a path, we clear the CPU from the
261 * mask and continue with the rest. If ever we try to trace on those
262 * CPUs, we can handle it and fail the session.
264 for_each_cpu(cpu, mask) {
265 struct list_head *path;
266 struct coresight_device *csdev;
268 csdev = per_cpu(csdev_src, cpu);
270 * If there is no ETM associated with this CPU clear it from
271 * the mask and continue with the rest. If ever we try to trace
272 * on this CPU, we handle it accordingly.
275 cpumask_clear_cpu(cpu, mask);
280 * No sink provided - look for a default sink for one of the
281 * devices. At present we only support topology where all CPUs
282 * use the same sink [N:1], so only need to find one sink. The
283 * coresight_build_path later will remove any CPU that does not
284 * attach to the sink, or if we have not found a sink.
287 sink = coresight_find_default_sink(csdev);
290 * Building a path doesn't enable it, it simply builds a
291 * list of devices from source to sink that can be
292 * referenced later when the path is actually needed.
294 path = coresight_build_path(csdev, sink);
296 cpumask_clear_cpu(cpu, mask);
300 *etm_event_cpu_path_ptr(event_data, cpu) = path;
303 /* no sink found for any CPU - cannot trace */
307 /* If we don't have any CPUs ready for tracing, abort */
308 cpu = cpumask_first(mask);
309 if (cpu >= nr_cpu_ids)
312 if (!sink_ops(sink)->alloc_buffer || !sink_ops(sink)->free_buffer)
315 /* Allocate the sink buffer for this session */
316 event_data->snk_config =
317 sink_ops(sink)->alloc_buffer(sink, event, pages,
318 nr_pages, overwrite);
319 if (!event_data->snk_config)
326 etm_free_aux(event_data);
331 static void etm_event_start(struct perf_event *event, int flags)
333 int cpu = smp_processor_id();
334 struct etm_event_data *event_data;
335 struct perf_output_handle *handle = this_cpu_ptr(&ctx_handle);
336 struct coresight_device *sink, *csdev = per_cpu(csdev_src, cpu);
337 struct list_head *path;
343 * Deal with the ring buffer API and get a handle on the
344 * session's information.
346 event_data = perf_aux_output_begin(handle, event);
351 * Check if this ETM is allowed to trace, as decided
352 * at etm_setup_aux(). This could be due to an unreachable
353 * sink from this ETM. We can't do much in this case if
354 * the sink was specified or hinted to the driver. For
355 * now, simply don't record anything on this ETM.
357 if (!cpumask_test_cpu(cpu, &event_data->mask))
360 path = etm_event_cpu_path(event_data, cpu);
361 /* We need a sink, no need to continue without one */
362 sink = coresight_get_sink(path);
363 if (WARN_ON_ONCE(!sink))
366 /* Nothing will happen without a path */
367 if (coresight_enable_path(path, CS_MODE_PERF, handle))
370 /* Tell the perf core the event is alive */
373 /* Finally enable the tracer */
374 if (source_ops(csdev)->enable(csdev, event, CS_MODE_PERF))
375 goto fail_disable_path;
381 coresight_disable_path(path);
383 perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED);
384 perf_aux_output_end(handle, 0);
386 event->hw.state = PERF_HES_STOPPED;
390 static void etm_event_stop(struct perf_event *event, int mode)
392 int cpu = smp_processor_id();
394 struct coresight_device *sink, *csdev = per_cpu(csdev_src, cpu);
395 struct perf_output_handle *handle = this_cpu_ptr(&ctx_handle);
396 struct etm_event_data *event_data = perf_get_aux(handle);
397 struct list_head *path;
399 if (event->hw.state == PERF_HES_STOPPED)
405 path = etm_event_cpu_path(event_data, cpu);
409 sink = coresight_get_sink(path);
414 source_ops(csdev)->disable(csdev, event);
417 event->hw.state = PERF_HES_STOPPED;
419 if (mode & PERF_EF_UPDATE) {
420 if (WARN_ON_ONCE(handle->event != event))
423 /* update trace information */
424 if (!sink_ops(sink)->update_buffer)
427 size = sink_ops(sink)->update_buffer(sink, handle,
428 event_data->snk_config);
429 perf_aux_output_end(handle, size);
432 /* Disabling the path make its elements available to other sessions */
433 coresight_disable_path(path);
436 static int etm_event_add(struct perf_event *event, int mode)
439 struct hw_perf_event *hwc = &event->hw;
441 if (mode & PERF_EF_START) {
442 etm_event_start(event, 0);
443 if (hwc->state & PERF_HES_STOPPED)
446 hwc->state = PERF_HES_STOPPED;
452 static void etm_event_del(struct perf_event *event, int mode)
454 etm_event_stop(event, PERF_EF_UPDATE);
457 static int etm_addr_filters_validate(struct list_head *filters)
459 bool range = false, address = false;
461 struct perf_addr_filter *filter;
463 list_for_each_entry(filter, filters, entry) {
465 * No need to go further if there's no more
468 if (++index > ETM_ADDR_CMP_MAX)
471 /* filter::size==0 means single address trigger */
474 * The existing code relies on START/STOP filters
475 * being address filters.
477 if (filter->action == PERF_ADDR_FILTER_ACTION_START ||
478 filter->action == PERF_ADDR_FILTER_ACTION_STOP)
486 * At this time we don't allow range and start/stop filtering
487 * to cohabitate, they have to be mutually exclusive.
489 if (range && address)
496 static void etm_addr_filters_sync(struct perf_event *event)
498 struct perf_addr_filters_head *head = perf_event_addr_filters(event);
499 unsigned long start, stop;
500 struct perf_addr_filter_range *fr = event->addr_filter_ranges;
501 struct etm_filters *filters = event->hw.addr_filters;
502 struct etm_filter *etm_filter;
503 struct perf_addr_filter *filter;
506 list_for_each_entry(filter, &head->list, entry) {
508 stop = start + fr[i].size;
509 etm_filter = &filters->etm_filter[i];
511 switch (filter->action) {
512 case PERF_ADDR_FILTER_ACTION_FILTER:
513 etm_filter->start_addr = start;
514 etm_filter->stop_addr = stop;
515 etm_filter->type = ETM_ADDR_TYPE_RANGE;
517 case PERF_ADDR_FILTER_ACTION_START:
518 etm_filter->start_addr = start;
519 etm_filter->type = ETM_ADDR_TYPE_START;
521 case PERF_ADDR_FILTER_ACTION_STOP:
522 etm_filter->stop_addr = stop;
523 etm_filter->type = ETM_ADDR_TYPE_STOP;
529 filters->nr_filters = i;
532 int etm_perf_symlink(struct coresight_device *csdev, bool link)
534 char entry[sizeof("cpu9999999")];
535 int ret = 0, cpu = source_ops(csdev)->cpu_id(csdev);
536 struct device *pmu_dev = etm_pmu.dev;
537 struct device *cs_dev = &csdev->dev;
539 sprintf(entry, "cpu%d", cpu);
542 return -EPROBE_DEFER;
545 ret = sysfs_create_link(&pmu_dev->kobj, &cs_dev->kobj, entry);
548 per_cpu(csdev_src, cpu) = csdev;
550 sysfs_remove_link(&pmu_dev->kobj, entry);
551 per_cpu(csdev_src, cpu) = NULL;
556 EXPORT_SYMBOL_GPL(etm_perf_symlink);
558 static ssize_t etm_perf_sink_name_show(struct device *dev,
559 struct device_attribute *dattr,
562 struct dev_ext_attribute *ea;
564 ea = container_of(dattr, struct dev_ext_attribute, attr);
565 return scnprintf(buf, PAGE_SIZE, "0x%lx\n", (unsigned long)(ea->var));
568 int etm_perf_add_symlink_sink(struct coresight_device *csdev)
573 struct device *pmu_dev = etm_pmu.dev;
574 struct device *dev = &csdev->dev;
575 struct dev_ext_attribute *ea;
577 if (csdev->type != CORESIGHT_DEV_TYPE_SINK &&
578 csdev->type != CORESIGHT_DEV_TYPE_LINKSINK)
581 if (csdev->ea != NULL)
585 return -EPROBE_DEFER;
587 ea = devm_kzalloc(dev, sizeof(*ea), GFP_KERNEL);
591 name = dev_name(dev);
592 /* See function coresight_get_sink_by_id() to know where this is used */
593 hash = hashlen_hash(hashlen_string(NULL, name));
595 sysfs_attr_init(&ea->attr.attr);
596 ea->attr.attr.name = devm_kstrdup(dev, name, GFP_KERNEL);
597 if (!ea->attr.attr.name)
600 ea->attr.attr.mode = 0444;
601 ea->attr.show = etm_perf_sink_name_show;
602 ea->var = (unsigned long *)hash;
604 ret = sysfs_add_file_to_group(&pmu_dev->kobj,
605 &ea->attr.attr, "sinks");
613 void etm_perf_del_symlink_sink(struct coresight_device *csdev)
615 struct device *pmu_dev = etm_pmu.dev;
616 struct dev_ext_attribute *ea = csdev->ea;
618 if (csdev->type != CORESIGHT_DEV_TYPE_SINK &&
619 csdev->type != CORESIGHT_DEV_TYPE_LINKSINK)
625 sysfs_remove_file_from_group(&pmu_dev->kobj,
626 &ea->attr.attr, "sinks");
630 int __init etm_perf_init(void)
634 etm_pmu.capabilities = (PERF_PMU_CAP_EXCLUSIVE |
635 PERF_PMU_CAP_ITRACE);
637 etm_pmu.attr_groups = etm_pmu_attr_groups;
638 etm_pmu.task_ctx_nr = perf_sw_context;
639 etm_pmu.read = etm_event_read;
640 etm_pmu.event_init = etm_event_init;
641 etm_pmu.setup_aux = etm_setup_aux;
642 etm_pmu.free_aux = etm_free_aux;
643 etm_pmu.start = etm_event_start;
644 etm_pmu.stop = etm_event_stop;
645 etm_pmu.add = etm_event_add;
646 etm_pmu.del = etm_event_del;
647 etm_pmu.addr_filters_sync = etm_addr_filters_sync;
648 etm_pmu.addr_filters_validate = etm_addr_filters_validate;
649 etm_pmu.nr_addr_filters = ETM_ADDR_CMP_MAX;
651 ret = perf_pmu_register(&etm_pmu, CORESIGHT_ETM_PMU_NAME, -1);
658 void __exit etm_perf_exit(void)
660 perf_pmu_unregister(&etm_pmu);