KVM: x86/pmu: mask the result of rdpmc according to the width of the counters
[sfrench/cifs-2.6.git] / arch / x86 / kvm / vmx / pmu_intel.c
1 /*
2  * KVM PMU support for Intel CPUs
3  *
4  * Copyright 2011 Red Hat, Inc. and/or its affiliates.
5  *
6  * Authors:
7  *   Avi Kivity   <avi@redhat.com>
8  *   Gleb Natapov <gleb@redhat.com>
9  *
10  * This work is licensed under the terms of the GNU GPL, version 2.  See
11  * the COPYING file in the top-level directory.
12  *
13  */
14 #include <linux/types.h>
15 #include <linux/kvm_host.h>
16 #include <linux/perf_event.h>
17 #include <asm/perf_event.h>
18 #include "x86.h"
19 #include "cpuid.h"
20 #include "lapic.h"
21 #include "pmu.h"
22
23 static struct kvm_event_hw_type_mapping intel_arch_events[] = {
24         /* Index must match CPUID 0x0A.EBX bit vector */
25         [0] = { 0x3c, 0x00, PERF_COUNT_HW_CPU_CYCLES },
26         [1] = { 0xc0, 0x00, PERF_COUNT_HW_INSTRUCTIONS },
27         [2] = { 0x3c, 0x01, PERF_COUNT_HW_BUS_CYCLES  },
28         [3] = { 0x2e, 0x4f, PERF_COUNT_HW_CACHE_REFERENCES },
29         [4] = { 0x2e, 0x41, PERF_COUNT_HW_CACHE_MISSES },
30         [5] = { 0xc4, 0x00, PERF_COUNT_HW_BRANCH_INSTRUCTIONS },
31         [6] = { 0xc5, 0x00, PERF_COUNT_HW_BRANCH_MISSES },
32         [7] = { 0x00, 0x30, PERF_COUNT_HW_REF_CPU_CYCLES },
33 };
34
35 /* mapping between fixed pmc index and intel_arch_events array */
36 static int fixed_pmc_events[] = {1, 0, 7};
37
38 static void reprogram_fixed_counters(struct kvm_pmu *pmu, u64 data)
39 {
40         int i;
41
42         for (i = 0; i < pmu->nr_arch_fixed_counters; i++) {
43                 u8 new_ctrl = fixed_ctrl_field(data, i);
44                 u8 old_ctrl = fixed_ctrl_field(pmu->fixed_ctr_ctrl, i);
45                 struct kvm_pmc *pmc;
46
47                 pmc = get_fixed_pmc(pmu, MSR_CORE_PERF_FIXED_CTR0 + i);
48
49                 if (old_ctrl == new_ctrl)
50                         continue;
51
52                 reprogram_fixed_counter(pmc, new_ctrl, i);
53         }
54
55         pmu->fixed_ctr_ctrl = data;
56 }
57
58 /* function is called when global control register has been updated. */
59 static void global_ctrl_changed(struct kvm_pmu *pmu, u64 data)
60 {
61         int bit;
62         u64 diff = pmu->global_ctrl ^ data;
63
64         pmu->global_ctrl = data;
65
66         for_each_set_bit(bit, (unsigned long *)&diff, X86_PMC_IDX_MAX)
67                 reprogram_counter(pmu, bit);
68 }
69
70 static unsigned intel_find_arch_event(struct kvm_pmu *pmu,
71                                       u8 event_select,
72                                       u8 unit_mask)
73 {
74         int i;
75
76         for (i = 0; i < ARRAY_SIZE(intel_arch_events); i++)
77                 if (intel_arch_events[i].eventsel == event_select
78                     && intel_arch_events[i].unit_mask == unit_mask
79                     && (pmu->available_event_types & (1 << i)))
80                         break;
81
82         if (i == ARRAY_SIZE(intel_arch_events))
83                 return PERF_COUNT_HW_MAX;
84
85         return intel_arch_events[i].event_type;
86 }
87
88 static unsigned intel_find_fixed_event(int idx)
89 {
90         if (idx >= ARRAY_SIZE(fixed_pmc_events))
91                 return PERF_COUNT_HW_MAX;
92
93         return intel_arch_events[fixed_pmc_events[idx]].event_type;
94 }
95
96 /* check if a PMC is enabled by comparing it with globl_ctrl bits. */
97 static bool intel_pmc_is_enabled(struct kvm_pmc *pmc)
98 {
99         struct kvm_pmu *pmu = pmc_to_pmu(pmc);
100
101         return test_bit(pmc->idx, (unsigned long *)&pmu->global_ctrl);
102 }
103
104 static struct kvm_pmc *intel_pmc_idx_to_pmc(struct kvm_pmu *pmu, int pmc_idx)
105 {
106         if (pmc_idx < INTEL_PMC_IDX_FIXED)
107                 return get_gp_pmc(pmu, MSR_P6_EVNTSEL0 + pmc_idx,
108                                   MSR_P6_EVNTSEL0);
109         else {
110                 u32 idx = pmc_idx - INTEL_PMC_IDX_FIXED;
111
112                 return get_fixed_pmc(pmu, idx + MSR_CORE_PERF_FIXED_CTR0);
113         }
114 }
115
116 /* returns 0 if idx's corresponding MSR exists; otherwise returns 1. */
117 static int intel_is_valid_msr_idx(struct kvm_vcpu *vcpu, unsigned idx)
118 {
119         struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
120         bool fixed = idx & (1u << 30);
121
122         idx &= ~(3u << 30);
123
124         return (!fixed && idx >= pmu->nr_arch_gp_counters) ||
125                 (fixed && idx >= pmu->nr_arch_fixed_counters);
126 }
127
128 static struct kvm_pmc *intel_msr_idx_to_pmc(struct kvm_vcpu *vcpu,
129                                             unsigned idx, u64 *mask)
130 {
131         struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
132         bool fixed = idx & (1u << 30);
133         struct kvm_pmc *counters;
134
135         idx &= ~(3u << 30);
136         if (!fixed && idx >= pmu->nr_arch_gp_counters)
137                 return NULL;
138         if (fixed && idx >= pmu->nr_arch_fixed_counters)
139                 return NULL;
140         counters = fixed ? pmu->fixed_counters : pmu->gp_counters;
141         *mask &= pmu->counter_bitmask[fixed ? KVM_PMC_FIXED : KVM_PMC_GP];
142
143         return &counters[idx];
144 }
145
146 static bool intel_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
147 {
148         struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
149         int ret;
150
151         switch (msr) {
152         case MSR_CORE_PERF_FIXED_CTR_CTRL:
153         case MSR_CORE_PERF_GLOBAL_STATUS:
154         case MSR_CORE_PERF_GLOBAL_CTRL:
155         case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
156                 ret = pmu->version > 1;
157                 break;
158         default:
159                 ret = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0) ||
160                         get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0) ||
161                         get_fixed_pmc(pmu, msr);
162                 break;
163         }
164
165         return ret;
166 }
167
168 static int intel_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data)
169 {
170         struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
171         struct kvm_pmc *pmc;
172
173         switch (msr) {
174         case MSR_CORE_PERF_FIXED_CTR_CTRL:
175                 *data = pmu->fixed_ctr_ctrl;
176                 return 0;
177         case MSR_CORE_PERF_GLOBAL_STATUS:
178                 *data = pmu->global_status;
179                 return 0;
180         case MSR_CORE_PERF_GLOBAL_CTRL:
181                 *data = pmu->global_ctrl;
182                 return 0;
183         case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
184                 *data = pmu->global_ovf_ctrl;
185                 return 0;
186         default:
187                 if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0))) {
188                         u64 val = pmc_read_counter(pmc);
189                         *data = val & pmu->counter_bitmask[KVM_PMC_GP];
190                         return 0;
191                 } else if ((pmc = get_fixed_pmc(pmu, msr))) {
192                         u64 val = pmc_read_counter(pmc);
193                         *data = val & pmu->counter_bitmask[KVM_PMC_FIXED];
194                         return 0;
195                 } else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) {
196                         *data = pmc->eventsel;
197                         return 0;
198                 }
199         }
200
201         return 1;
202 }
203
204 static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
205 {
206         struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
207         struct kvm_pmc *pmc;
208         u32 msr = msr_info->index;
209         u64 data = msr_info->data;
210
211         switch (msr) {
212         case MSR_CORE_PERF_FIXED_CTR_CTRL:
213                 if (pmu->fixed_ctr_ctrl == data)
214                         return 0;
215                 if (!(data & 0xfffffffffffff444ull)) {
216                         reprogram_fixed_counters(pmu, data);
217                         return 0;
218                 }
219                 break;
220         case MSR_CORE_PERF_GLOBAL_STATUS:
221                 if (msr_info->host_initiated) {
222                         pmu->global_status = data;
223                         return 0;
224                 }
225                 break; /* RO MSR */
226         case MSR_CORE_PERF_GLOBAL_CTRL:
227                 if (pmu->global_ctrl == data)
228                         return 0;
229                 if (!(data & pmu->global_ctrl_mask)) {
230                         global_ctrl_changed(pmu, data);
231                         return 0;
232                 }
233                 break;
234         case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
235                 if (!(data & pmu->global_ovf_ctrl_mask)) {
236                         if (!msr_info->host_initiated)
237                                 pmu->global_status &= ~data;
238                         pmu->global_ovf_ctrl = data;
239                         return 0;
240                 }
241                 break;
242         default:
243                 if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) ||
244                     (pmc = get_fixed_pmc(pmu, msr))) {
245                         if (!msr_info->host_initiated)
246                                 data = (s64)(s32)data;
247                         pmc->counter += data - pmc_read_counter(pmc);
248                         return 0;
249                 } else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) {
250                         if (data == pmc->eventsel)
251                                 return 0;
252                         if (!(data & pmu->reserved_bits)) {
253                                 reprogram_gp_counter(pmc, data);
254                                 return 0;
255                         }
256                 }
257         }
258
259         return 1;
260 }
261
262 static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
263 {
264         struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
265         struct kvm_cpuid_entry2 *entry;
266         union cpuid10_eax eax;
267         union cpuid10_edx edx;
268
269         pmu->nr_arch_gp_counters = 0;
270         pmu->nr_arch_fixed_counters = 0;
271         pmu->counter_bitmask[KVM_PMC_GP] = 0;
272         pmu->counter_bitmask[KVM_PMC_FIXED] = 0;
273         pmu->version = 0;
274         pmu->reserved_bits = 0xffffffff00200000ull;
275
276         entry = kvm_find_cpuid_entry(vcpu, 0xa, 0);
277         if (!entry)
278                 return;
279         eax.full = entry->eax;
280         edx.full = entry->edx;
281
282         pmu->version = eax.split.version_id;
283         if (!pmu->version)
284                 return;
285
286         pmu->nr_arch_gp_counters = min_t(int, eax.split.num_counters,
287                                         INTEL_PMC_MAX_GENERIC);
288         pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << eax.split.bit_width) - 1;
289         pmu->available_event_types = ~entry->ebx &
290                                         ((1ull << eax.split.mask_length) - 1);
291
292         if (pmu->version == 1) {
293                 pmu->nr_arch_fixed_counters = 0;
294         } else {
295                 pmu->nr_arch_fixed_counters =
296                         min_t(int, edx.split.num_counters_fixed,
297                                 INTEL_PMC_MAX_FIXED);
298                 pmu->counter_bitmask[KVM_PMC_FIXED] =
299                         ((u64)1 << edx.split.bit_width_fixed) - 1;
300         }
301
302         pmu->global_ctrl = ((1ull << pmu->nr_arch_gp_counters) - 1) |
303                 (((1ull << pmu->nr_arch_fixed_counters) - 1) << INTEL_PMC_IDX_FIXED);
304         pmu->global_ctrl_mask = ~pmu->global_ctrl;
305         pmu->global_ovf_ctrl_mask = pmu->global_ctrl_mask
306                         & ~(MSR_CORE_PERF_GLOBAL_OVF_CTRL_OVF_BUF |
307                             MSR_CORE_PERF_GLOBAL_OVF_CTRL_COND_CHGD);
308         if (kvm_x86_ops->pt_supported())
309                 pmu->global_ovf_ctrl_mask &=
310                                 ~MSR_CORE_PERF_GLOBAL_OVF_CTRL_TRACE_TOPA_PMI;
311
312         entry = kvm_find_cpuid_entry(vcpu, 7, 0);
313         if (entry &&
314             (boot_cpu_has(X86_FEATURE_HLE) || boot_cpu_has(X86_FEATURE_RTM)) &&
315             (entry->ebx & (X86_FEATURE_HLE|X86_FEATURE_RTM)))
316                 pmu->reserved_bits ^= HSW_IN_TX|HSW_IN_TX_CHECKPOINTED;
317 }
318
319 static void intel_pmu_init(struct kvm_vcpu *vcpu)
320 {
321         int i;
322         struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
323
324         for (i = 0; i < INTEL_PMC_MAX_GENERIC; i++) {
325                 pmu->gp_counters[i].type = KVM_PMC_GP;
326                 pmu->gp_counters[i].vcpu = vcpu;
327                 pmu->gp_counters[i].idx = i;
328         }
329
330         for (i = 0; i < INTEL_PMC_MAX_FIXED; i++) {
331                 pmu->fixed_counters[i].type = KVM_PMC_FIXED;
332                 pmu->fixed_counters[i].vcpu = vcpu;
333                 pmu->fixed_counters[i].idx = i + INTEL_PMC_IDX_FIXED;
334         }
335 }
336
337 static void intel_pmu_reset(struct kvm_vcpu *vcpu)
338 {
339         struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
340         int i;
341
342         for (i = 0; i < INTEL_PMC_MAX_GENERIC; i++) {
343                 struct kvm_pmc *pmc = &pmu->gp_counters[i];
344
345                 pmc_stop_counter(pmc);
346                 pmc->counter = pmc->eventsel = 0;
347         }
348
349         for (i = 0; i < INTEL_PMC_MAX_FIXED; i++)
350                 pmc_stop_counter(&pmu->fixed_counters[i]);
351
352         pmu->fixed_ctr_ctrl = pmu->global_ctrl = pmu->global_status =
353                 pmu->global_ovf_ctrl = 0;
354 }
355
356 struct kvm_pmu_ops intel_pmu_ops = {
357         .find_arch_event = intel_find_arch_event,
358         .find_fixed_event = intel_find_fixed_event,
359         .pmc_is_enabled = intel_pmc_is_enabled,
360         .pmc_idx_to_pmc = intel_pmc_idx_to_pmc,
361         .msr_idx_to_pmc = intel_msr_idx_to_pmc,
362         .is_valid_msr_idx = intel_is_valid_msr_idx,
363         .is_valid_msr = intel_is_valid_msr,
364         .get_msr = intel_pmu_get_msr,
365         .set_msr = intel_pmu_set_msr,
366         .refresh = intel_pmu_refresh,
367         .init = intel_pmu_init,
368         .reset = intel_pmu_reset,
369 };