06723671ae4e91d53b7327b44fe96b588826d838
[sfrench/cifs-2.6.git] / arch / x86 / events / msr.c
1 #include <linux/perf_event.h>
2 #include <asm/intel-family.h>
3
4 enum perf_msr_id {
5         PERF_MSR_TSC                    = 0,
6         PERF_MSR_APERF                  = 1,
7         PERF_MSR_MPERF                  = 2,
8         PERF_MSR_PPERF                  = 3,
9         PERF_MSR_SMI                    = 4,
10         PERF_MSR_PTSC                   = 5,
11         PERF_MSR_IRPERF                 = 6,
12
13         PERF_MSR_EVENT_MAX,
14 };
15
16 static bool test_aperfmperf(int idx)
17 {
18         return boot_cpu_has(X86_FEATURE_APERFMPERF);
19 }
20
21 static bool test_ptsc(int idx)
22 {
23         return boot_cpu_has(X86_FEATURE_PTSC);
24 }
25
26 static bool test_irperf(int idx)
27 {
28         return boot_cpu_has(X86_FEATURE_IRPERF);
29 }
30
31 static bool test_intel(int idx)
32 {
33         if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL ||
34             boot_cpu_data.x86 != 6)
35                 return false;
36
37         switch (boot_cpu_data.x86_model) {
38         case INTEL_FAM6_NEHALEM:
39         case INTEL_FAM6_NEHALEM_G:
40         case INTEL_FAM6_NEHALEM_EP:
41         case INTEL_FAM6_NEHALEM_EX:
42
43         case INTEL_FAM6_WESTMERE:
44         case INTEL_FAM6_WESTMERE_EP:
45         case INTEL_FAM6_WESTMERE_EX:
46
47         case INTEL_FAM6_SANDYBRIDGE:
48         case INTEL_FAM6_SANDYBRIDGE_X:
49
50         case INTEL_FAM6_IVYBRIDGE:
51         case INTEL_FAM6_IVYBRIDGE_X:
52
53         case INTEL_FAM6_HASWELL_CORE:
54         case INTEL_FAM6_HASWELL_X:
55         case INTEL_FAM6_HASWELL_ULT:
56         case INTEL_FAM6_HASWELL_GT3E:
57
58         case INTEL_FAM6_BROADWELL_CORE:
59         case INTEL_FAM6_BROADWELL_XEON_D:
60         case INTEL_FAM6_BROADWELL_GT3E:
61         case INTEL_FAM6_BROADWELL_X:
62
63         case INTEL_FAM6_ATOM_SILVERMONT1:
64         case INTEL_FAM6_ATOM_SILVERMONT2:
65         case INTEL_FAM6_ATOM_AIRMONT:
66
67         case INTEL_FAM6_ATOM_GOLDMONT:
68         case INTEL_FAM6_ATOM_DENVERTON:
69
70         case INTEL_FAM6_ATOM_GEMINI_LAKE:
71
72         case INTEL_FAM6_XEON_PHI_KNL:
73         case INTEL_FAM6_XEON_PHI_KNM:
74                 if (idx == PERF_MSR_SMI)
75                         return true;
76                 break;
77
78         case INTEL_FAM6_SKYLAKE_MOBILE:
79         case INTEL_FAM6_SKYLAKE_DESKTOP:
80         case INTEL_FAM6_SKYLAKE_X:
81         case INTEL_FAM6_KABYLAKE_MOBILE:
82         case INTEL_FAM6_KABYLAKE_DESKTOP:
83                 if (idx == PERF_MSR_SMI || idx == PERF_MSR_PPERF)
84                         return true;
85                 break;
86         }
87
88         return false;
89 }
90
91 struct perf_msr {
92         u64     msr;
93         struct  perf_pmu_events_attr *attr;
94         bool    (*test)(int idx);
95 };
96
97 PMU_EVENT_ATTR_STRING(tsc,    evattr_tsc,    "event=0x00");
98 PMU_EVENT_ATTR_STRING(aperf,  evattr_aperf,  "event=0x01");
99 PMU_EVENT_ATTR_STRING(mperf,  evattr_mperf,  "event=0x02");
100 PMU_EVENT_ATTR_STRING(pperf,  evattr_pperf,  "event=0x03");
101 PMU_EVENT_ATTR_STRING(smi,    evattr_smi,    "event=0x04");
102 PMU_EVENT_ATTR_STRING(ptsc,   evattr_ptsc,   "event=0x05");
103 PMU_EVENT_ATTR_STRING(irperf, evattr_irperf, "event=0x06");
104
105 static struct perf_msr msr[] = {
106         [PERF_MSR_TSC]    = { 0,                &evattr_tsc,    NULL,            },
107         [PERF_MSR_APERF]  = { MSR_IA32_APERF,   &evattr_aperf,  test_aperfmperf, },
108         [PERF_MSR_MPERF]  = { MSR_IA32_MPERF,   &evattr_mperf,  test_aperfmperf, },
109         [PERF_MSR_PPERF]  = { MSR_PPERF,        &evattr_pperf,  test_intel,      },
110         [PERF_MSR_SMI]    = { MSR_SMI_COUNT,    &evattr_smi,    test_intel,      },
111         [PERF_MSR_PTSC]   = { MSR_F15H_PTSC,    &evattr_ptsc,   test_ptsc,       },
112         [PERF_MSR_IRPERF] = { MSR_F17H_IRPERF,  &evattr_irperf, test_irperf,     },
113 };
114
115 static struct attribute *events_attrs[PERF_MSR_EVENT_MAX + 1] = {
116         NULL,
117 };
118
119 static struct attribute_group events_attr_group = {
120         .name = "events",
121         .attrs = events_attrs,
122 };
123
124 PMU_FORMAT_ATTR(event, "config:0-63");
125 static struct attribute *format_attrs[] = {
126         &format_attr_event.attr,
127         NULL,
128 };
129 static struct attribute_group format_attr_group = {
130         .name = "format",
131         .attrs = format_attrs,
132 };
133
134 static const struct attribute_group *attr_groups[] = {
135         &events_attr_group,
136         &format_attr_group,
137         NULL,
138 };
139
140 static int msr_event_init(struct perf_event *event)
141 {
142         u64 cfg = event->attr.config;
143
144         if (event->attr.type != event->pmu->type)
145                 return -ENOENT;
146
147         if (cfg >= PERF_MSR_EVENT_MAX)
148                 return -EINVAL;
149
150         /* unsupported modes and filters */
151         if (event->attr.exclude_user   ||
152             event->attr.exclude_kernel ||
153             event->attr.exclude_hv     ||
154             event->attr.exclude_idle   ||
155             event->attr.exclude_host   ||
156             event->attr.exclude_guest  ||
157             event->attr.sample_period) /* no sampling */
158                 return -EINVAL;
159
160         if (!msr[cfg].attr)
161                 return -EINVAL;
162
163         event->hw.idx = -1;
164         event->hw.event_base = msr[cfg].msr;
165         event->hw.config = cfg;
166
167         return 0;
168 }
169
170 static inline u64 msr_read_counter(struct perf_event *event)
171 {
172         u64 now;
173
174         if (event->hw.event_base)
175                 rdmsrl(event->hw.event_base, now);
176         else
177                 rdtscll(now);
178
179         return now;
180 }
181 static void msr_event_update(struct perf_event *event)
182 {
183         u64 prev, now;
184         s64 delta;
185
186         /* Careful, an NMI might modify the previous event value. */
187 again:
188         prev = local64_read(&event->hw.prev_count);
189         now = msr_read_counter(event);
190
191         if (local64_cmpxchg(&event->hw.prev_count, prev, now) != prev)
192                 goto again;
193
194         delta = now - prev;
195         if (unlikely(event->hw.event_base == MSR_SMI_COUNT))
196                 delta = sign_extend64(delta, 31);
197
198         local64_add(delta, &event->count);
199 }
200
201 static void msr_event_start(struct perf_event *event, int flags)
202 {
203         u64 now;
204
205         now = msr_read_counter(event);
206         local64_set(&event->hw.prev_count, now);
207 }
208
209 static void msr_event_stop(struct perf_event *event, int flags)
210 {
211         msr_event_update(event);
212 }
213
214 static void msr_event_del(struct perf_event *event, int flags)
215 {
216         msr_event_stop(event, PERF_EF_UPDATE);
217 }
218
219 static int msr_event_add(struct perf_event *event, int flags)
220 {
221         if (flags & PERF_EF_START)
222                 msr_event_start(event, flags);
223
224         return 0;
225 }
226
227 static struct pmu pmu_msr = {
228         .task_ctx_nr    = perf_sw_context,
229         .attr_groups    = attr_groups,
230         .event_init     = msr_event_init,
231         .add            = msr_event_add,
232         .del            = msr_event_del,
233         .start          = msr_event_start,
234         .stop           = msr_event_stop,
235         .read           = msr_event_update,
236         .capabilities   = PERF_PMU_CAP_NO_INTERRUPT,
237 };
238
239 static int __init msr_init(void)
240 {
241         int i, j = 0;
242
243         if (!boot_cpu_has(X86_FEATURE_TSC)) {
244                 pr_cont("no MSR PMU driver.\n");
245                 return 0;
246         }
247
248         /* Probe the MSRs. */
249         for (i = PERF_MSR_TSC + 1; i < PERF_MSR_EVENT_MAX; i++) {
250                 u64 val;
251
252                 /*
253                  * Virt sucks arse; you cannot tell if a R/O MSR is present :/
254                  */
255                 if (!msr[i].test(i) || rdmsrl_safe(msr[i].msr, &val))
256                         msr[i].attr = NULL;
257         }
258
259         /* List remaining MSRs in the sysfs attrs. */
260         for (i = 0; i < PERF_MSR_EVENT_MAX; i++) {
261                 if (msr[i].attr)
262                         events_attrs[j++] = &msr[i].attr->attr.attr;
263         }
264         events_attrs[j] = NULL;
265
266         perf_pmu_register(&pmu_msr, "msr", -1);
267
268         return 0;
269 }
270 device_initcall(msr_init);