2 * Copyright (C) 2013 Advanced Micro Devices, Inc.
4 * Author: Jacob Shin <jacob.shin@amd.com>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
11 #include <linux/perf_event.h>
12 #include <linux/percpu.h>
13 #include <linux/types.h>
14 #include <linux/slab.h>
15 #include <linux/init.h>
16 #include <linux/cpu.h>
17 #include <linux/cpumask.h>
19 #include <asm/cpufeature.h>
20 #include <asm/perf_event.h>
23 #define NUM_COUNTERS_NB 4
24 #define NUM_COUNTERS_L2 4
25 #define NUM_COUNTERS_L3 6
26 #define MAX_COUNTERS 6
28 #define RDPMC_BASE_NB 6
29 #define RDPMC_BASE_LLC 10
31 #define COUNTER_SHIFT 16
34 #define pr_fmt(fmt) "amd_uncore: " fmt
36 static int num_counters_llc;
37 static int num_counters_nb;
39 static HLIST_HEAD(uncore_unused_list);
48 cpumask_t *active_mask;
50 struct perf_event *events[MAX_COUNTERS];
51 struct hlist_node node;
54 static struct amd_uncore * __percpu *amd_uncore_nb;
55 static struct amd_uncore * __percpu *amd_uncore_llc;
57 static struct pmu amd_nb_pmu;
58 static struct pmu amd_llc_pmu;
60 static cpumask_t amd_nb_active_mask;
61 static cpumask_t amd_llc_active_mask;
63 static bool is_nb_event(struct perf_event *event)
65 return event->pmu->type == amd_nb_pmu.type;
68 static bool is_llc_event(struct perf_event *event)
70 return event->pmu->type == amd_llc_pmu.type;
73 static struct amd_uncore *event_to_amd_uncore(struct perf_event *event)
75 if (is_nb_event(event) && amd_uncore_nb)
76 return *per_cpu_ptr(amd_uncore_nb, event->cpu);
77 else if (is_llc_event(event) && amd_uncore_llc)
78 return *per_cpu_ptr(amd_uncore_llc, event->cpu);
83 static void amd_uncore_read(struct perf_event *event)
85 struct hw_perf_event *hwc = &event->hw;
90 * since we do not enable counter overflow interrupts,
91 * we do not have to worry about prev_count changing on us
94 prev = local64_read(&hwc->prev_count);
95 rdpmcl(hwc->event_base_rdpmc, new);
96 local64_set(&hwc->prev_count, new);
97 delta = (new << COUNTER_SHIFT) - (prev << COUNTER_SHIFT);
98 delta >>= COUNTER_SHIFT;
99 local64_add(delta, &event->count);
102 static void amd_uncore_start(struct perf_event *event, int flags)
104 struct hw_perf_event *hwc = &event->hw;
106 if (flags & PERF_EF_RELOAD)
107 wrmsrl(hwc->event_base, (u64)local64_read(&hwc->prev_count));
110 wrmsrl(hwc->config_base, (hwc->config | ARCH_PERFMON_EVENTSEL_ENABLE));
111 perf_event_update_userpage(event);
114 static void amd_uncore_stop(struct perf_event *event, int flags)
116 struct hw_perf_event *hwc = &event->hw;
118 wrmsrl(hwc->config_base, hwc->config);
119 hwc->state |= PERF_HES_STOPPED;
121 if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
122 amd_uncore_read(event);
123 hwc->state |= PERF_HES_UPTODATE;
127 static int amd_uncore_add(struct perf_event *event, int flags)
130 struct amd_uncore *uncore = event_to_amd_uncore(event);
131 struct hw_perf_event *hwc = &event->hw;
133 /* are we already assigned? */
134 if (hwc->idx != -1 && uncore->events[hwc->idx] == event)
137 for (i = 0; i < uncore->num_counters; i++) {
138 if (uncore->events[i] == event) {
144 /* if not, take the first available counter */
146 for (i = 0; i < uncore->num_counters; i++) {
147 if (cmpxchg(&uncore->events[i], NULL, event) == NULL) {
157 hwc->config_base = uncore->msr_base + (2 * hwc->idx);
158 hwc->event_base = uncore->msr_base + 1 + (2 * hwc->idx);
159 hwc->event_base_rdpmc = uncore->rdpmc_base + hwc->idx;
160 hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
162 if (flags & PERF_EF_START)
163 amd_uncore_start(event, PERF_EF_RELOAD);
168 static void amd_uncore_del(struct perf_event *event, int flags)
171 struct amd_uncore *uncore = event_to_amd_uncore(event);
172 struct hw_perf_event *hwc = &event->hw;
174 amd_uncore_stop(event, PERF_EF_UPDATE);
176 for (i = 0; i < uncore->num_counters; i++) {
177 if (cmpxchg(&uncore->events[i], event, NULL) == event)
184 static int amd_uncore_event_init(struct perf_event *event)
186 struct amd_uncore *uncore;
187 struct hw_perf_event *hwc = &event->hw;
189 if (event->attr.type != event->pmu->type)
193 * NB and Last level cache counters (MSRs) are shared across all cores
194 * that share the same NB / Last level cache. Interrupts can be directed
195 * to a single target core, however, event counts generated by processes
196 * running on other cores cannot be masked out. So we do not support
197 * sampling and per-thread events.
199 if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
202 /* NB and Last level cache counters do not have usr/os/guest/host bits */
203 if (event->attr.exclude_user || event->attr.exclude_kernel ||
204 event->attr.exclude_host || event->attr.exclude_guest)
207 /* and we do not enable counter overflow interrupts */
208 hwc->config = event->attr.config & AMD64_RAW_EVENT_MASK_NB;
214 uncore = event_to_amd_uncore(event);
219 * since request can come in to any of the shared cores, we will remap
220 * to a single common cpu.
222 event->cpu = uncore->cpu;
227 static ssize_t amd_uncore_attr_show_cpumask(struct device *dev,
228 struct device_attribute *attr,
231 cpumask_t *active_mask;
232 struct pmu *pmu = dev_get_drvdata(dev);
234 if (pmu->type == amd_nb_pmu.type)
235 active_mask = &amd_nb_active_mask;
236 else if (pmu->type == amd_llc_pmu.type)
237 active_mask = &amd_llc_active_mask;
241 return cpumap_print_to_pagebuf(true, buf, active_mask);
243 static DEVICE_ATTR(cpumask, S_IRUGO, amd_uncore_attr_show_cpumask, NULL);
245 static struct attribute *amd_uncore_attrs[] = {
246 &dev_attr_cpumask.attr,
250 static struct attribute_group amd_uncore_attr_group = {
251 .attrs = amd_uncore_attrs,
255 * Similar to PMU_FORMAT_ATTR but allowing for format_attr to be assigned based
258 #define AMD_FORMAT_ATTR(_dev, _name, _format) \
260 _dev##_show##_name(struct device *dev, \
261 struct device_attribute *attr, \
264 BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \
265 return sprintf(page, _format "\n"); \
267 static struct device_attribute format_attr_##_dev##_name = __ATTR_RO(_dev);
269 /* Used for each uncore counter type */
270 #define AMD_ATTRIBUTE(_name) \
271 static struct attribute *amd_uncore_format_attr_##_name[] = { \
272 &format_attr_event_##_name.attr, \
273 &format_attr_umask.attr, \
276 static struct attribute_group amd_uncore_format_group_##_name = { \
278 .attrs = amd_uncore_format_attr_##_name, \
280 static const struct attribute_group *amd_uncore_attr_groups_##_name[] = { \
281 &amd_uncore_attr_group, \
282 &amd_uncore_format_group_##_name, \
286 AMD_FORMAT_ATTR(event, , "config:0-7,32-35");
287 AMD_FORMAT_ATTR(umask, , "config:8-15");
288 AMD_FORMAT_ATTR(event, _df, "config:0-7,32-35,59-60");
289 AMD_FORMAT_ATTR(event, _l3, "config:0-7");
293 static struct pmu amd_nb_pmu = {
294 .task_ctx_nr = perf_invalid_context,
295 .event_init = amd_uncore_event_init,
296 .add = amd_uncore_add,
297 .del = amd_uncore_del,
298 .start = amd_uncore_start,
299 .stop = amd_uncore_stop,
300 .read = amd_uncore_read,
303 static struct pmu amd_llc_pmu = {
304 .task_ctx_nr = perf_invalid_context,
305 .event_init = amd_uncore_event_init,
306 .add = amd_uncore_add,
307 .del = amd_uncore_del,
308 .start = amd_uncore_start,
309 .stop = amd_uncore_stop,
310 .read = amd_uncore_read,
313 static struct amd_uncore *amd_uncore_alloc(unsigned int cpu)
315 return kzalloc_node(sizeof(struct amd_uncore), GFP_KERNEL,
319 static int amd_uncore_cpu_up_prepare(unsigned int cpu)
321 struct amd_uncore *uncore_nb = NULL, *uncore_llc;
324 uncore_nb = amd_uncore_alloc(cpu);
327 uncore_nb->cpu = cpu;
328 uncore_nb->num_counters = num_counters_nb;
329 uncore_nb->rdpmc_base = RDPMC_BASE_NB;
330 uncore_nb->msr_base = MSR_F15H_NB_PERF_CTL;
331 uncore_nb->active_mask = &amd_nb_active_mask;
332 uncore_nb->pmu = &amd_nb_pmu;
334 *per_cpu_ptr(amd_uncore_nb, cpu) = uncore_nb;
337 if (amd_uncore_llc) {
338 uncore_llc = amd_uncore_alloc(cpu);
341 uncore_llc->cpu = cpu;
342 uncore_llc->num_counters = num_counters_llc;
343 uncore_llc->rdpmc_base = RDPMC_BASE_LLC;
344 uncore_llc->msr_base = MSR_F16H_L2I_PERF_CTL;
345 uncore_llc->active_mask = &amd_llc_active_mask;
346 uncore_llc->pmu = &amd_llc_pmu;
348 *per_cpu_ptr(amd_uncore_llc, cpu) = uncore_llc;
355 *per_cpu_ptr(amd_uncore_nb, cpu) = NULL;
360 static struct amd_uncore *
361 amd_uncore_find_online_sibling(struct amd_uncore *this,
362 struct amd_uncore * __percpu *uncores)
365 struct amd_uncore *that;
367 for_each_online_cpu(cpu) {
368 that = *per_cpu_ptr(uncores, cpu);
376 if (this->id == that->id) {
377 hlist_add_head(&this->node, &uncore_unused_list);
387 static int amd_uncore_cpu_starting(unsigned int cpu)
389 unsigned int eax, ebx, ecx, edx;
390 struct amd_uncore *uncore;
393 uncore = *per_cpu_ptr(amd_uncore_nb, cpu);
394 cpuid(0x8000001e, &eax, &ebx, &ecx, &edx);
395 uncore->id = ecx & 0xff;
397 uncore = amd_uncore_find_online_sibling(uncore, amd_uncore_nb);
398 *per_cpu_ptr(amd_uncore_nb, cpu) = uncore;
401 if (amd_uncore_llc) {
402 unsigned int apicid = cpu_data(cpu).apicid;
403 unsigned int nshared, subleaf, prev_eax = 0;
405 uncore = *per_cpu_ptr(amd_uncore_llc, cpu);
407 * Iterate over Cache Topology Definition leaves until no
408 * more cache descriptions are available.
410 for (subleaf = 0; subleaf < 5; subleaf++) {
411 cpuid_count(0x8000001d, subleaf, &eax, &ebx, &ecx, &edx);
413 /* EAX[0:4] gives type of cache */
419 nshared = ((prev_eax >> 14) & 0xfff) + 1;
421 uncore->id = apicid - (apicid % nshared);
423 uncore = amd_uncore_find_online_sibling(uncore, amd_uncore_llc);
424 *per_cpu_ptr(amd_uncore_llc, cpu) = uncore;
430 static void uncore_clean_online(void)
432 struct amd_uncore *uncore;
433 struct hlist_node *n;
435 hlist_for_each_entry_safe(uncore, n, &uncore_unused_list, node) {
436 hlist_del(&uncore->node);
441 static void uncore_online(unsigned int cpu,
442 struct amd_uncore * __percpu *uncores)
444 struct amd_uncore *uncore = *per_cpu_ptr(uncores, cpu);
446 uncore_clean_online();
448 if (cpu == uncore->cpu)
449 cpumask_set_cpu(cpu, uncore->active_mask);
452 static int amd_uncore_cpu_online(unsigned int cpu)
455 uncore_online(cpu, amd_uncore_nb);
458 uncore_online(cpu, amd_uncore_llc);
463 static void uncore_down_prepare(unsigned int cpu,
464 struct amd_uncore * __percpu *uncores)
467 struct amd_uncore *this = *per_cpu_ptr(uncores, cpu);
469 if (this->cpu != cpu)
472 /* this cpu is going down, migrate to a shared sibling if possible */
473 for_each_online_cpu(i) {
474 struct amd_uncore *that = *per_cpu_ptr(uncores, i);
480 perf_pmu_migrate_context(this->pmu, cpu, i);
481 cpumask_clear_cpu(cpu, that->active_mask);
482 cpumask_set_cpu(i, that->active_mask);
489 static int amd_uncore_cpu_down_prepare(unsigned int cpu)
492 uncore_down_prepare(cpu, amd_uncore_nb);
495 uncore_down_prepare(cpu, amd_uncore_llc);
500 static void uncore_dead(unsigned int cpu, struct amd_uncore * __percpu *uncores)
502 struct amd_uncore *uncore = *per_cpu_ptr(uncores, cpu);
504 if (cpu == uncore->cpu)
505 cpumask_clear_cpu(cpu, uncore->active_mask);
507 if (!--uncore->refcnt)
509 *per_cpu_ptr(uncores, cpu) = NULL;
512 static int amd_uncore_cpu_dead(unsigned int cpu)
515 uncore_dead(cpu, amd_uncore_nb);
518 uncore_dead(cpu, amd_uncore_llc);
523 static int __init amd_uncore_init(void)
527 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
530 if (!boot_cpu_has(X86_FEATURE_TOPOEXT))
533 if (boot_cpu_data.x86 == 0x17) {
535 * For F17h, the Northbridge counters are repurposed as Data
536 * Fabric counters. Also, L3 counters are supported too. The PMUs
537 * are exported based on family as either L2 or L3 and NB or DF.
539 num_counters_nb = NUM_COUNTERS_NB;
540 num_counters_llc = NUM_COUNTERS_L3;
541 amd_nb_pmu.name = "amd_df";
542 amd_llc_pmu.name = "amd_l3";
543 format_attr_event_df.show = &event_show_df;
544 format_attr_event_l3.show = &event_show_l3;
546 num_counters_nb = NUM_COUNTERS_NB;
547 num_counters_llc = NUM_COUNTERS_L2;
548 amd_nb_pmu.name = "amd_nb";
549 amd_llc_pmu.name = "amd_l2";
550 format_attr_event_df = format_attr_event;
551 format_attr_event_l3 = format_attr_event;
554 amd_nb_pmu.attr_groups = amd_uncore_attr_groups_df;
555 amd_llc_pmu.attr_groups = amd_uncore_attr_groups_l3;
557 if (boot_cpu_has(X86_FEATURE_PERFCTR_NB)) {
558 amd_uncore_nb = alloc_percpu(struct amd_uncore *);
559 if (!amd_uncore_nb) {
563 ret = perf_pmu_register(&amd_nb_pmu, amd_nb_pmu.name, -1);
567 pr_info("AMD NB counters detected\n");
571 if (boot_cpu_has(X86_FEATURE_PERFCTR_LLC)) {
572 amd_uncore_llc = alloc_percpu(struct amd_uncore *);
573 if (!amd_uncore_llc) {
577 ret = perf_pmu_register(&amd_llc_pmu, amd_llc_pmu.name, -1);
581 pr_info("AMD LLC counters detected\n");
586 * Install callbacks. Core will call them for each online cpu.
588 if (cpuhp_setup_state(CPUHP_PERF_X86_AMD_UNCORE_PREP,
589 "perf/x86/amd/uncore:prepare",
590 amd_uncore_cpu_up_prepare, amd_uncore_cpu_dead))
593 if (cpuhp_setup_state(CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING,
594 "perf/x86/amd/uncore:starting",
595 amd_uncore_cpu_starting, NULL))
597 if (cpuhp_setup_state(CPUHP_AP_PERF_X86_AMD_UNCORE_ONLINE,
598 "perf/x86/amd/uncore:online",
599 amd_uncore_cpu_online,
600 amd_uncore_cpu_down_prepare))
605 cpuhp_remove_state(CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING);
607 cpuhp_remove_state(CPUHP_PERF_X86_AMD_UNCORE_PREP);
609 if (boot_cpu_has(X86_FEATURE_PERFCTR_NB))
610 perf_pmu_unregister(&amd_nb_pmu);
612 free_percpu(amd_uncore_llc);
615 free_percpu(amd_uncore_nb);
619 device_initcall(amd_uncore_init);