Merge tag 'armsoc-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc
[sfrench/cifs-2.6.git] / arch / s390 / kernel / perf_cpum_cf.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Performance event support for s390x - CPU-measurement Counter Facility
4  *
5  *  Copyright IBM Corp. 2012, 2017
6  *  Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
7  */
8 #define KMSG_COMPONENT  "cpum_cf"
9 #define pr_fmt(fmt)     KMSG_COMPONENT ": " fmt
10
11 #include <linux/kernel.h>
12 #include <linux/kernel_stat.h>
13 #include <linux/perf_event.h>
14 #include <linux/percpu.h>
15 #include <linux/notifier.h>
16 #include <linux/init.h>
17 #include <linux/export.h>
18 #include <asm/ctl_reg.h>
19 #include <asm/irq.h>
20 #include <asm/cpu_mf.h>
21
22 enum cpumf_ctr_set {
23         CPUMF_CTR_SET_BASIC   = 0,    /* Basic Counter Set */
24         CPUMF_CTR_SET_USER    = 1,    /* Problem-State Counter Set */
25         CPUMF_CTR_SET_CRYPTO  = 2,    /* Crypto-Activity Counter Set */
26         CPUMF_CTR_SET_EXT     = 3,    /* Extended Counter Set */
27         CPUMF_CTR_SET_MT_DIAG = 4,    /* MT-diagnostic Counter Set */
28
29         /* Maximum number of counter sets */
30         CPUMF_CTR_SET_MAX,
31 };
32
33 #define CPUMF_LCCTL_ENABLE_SHIFT    16
34 #define CPUMF_LCCTL_ACTCTL_SHIFT     0
35 static const u64 cpumf_state_ctl[CPUMF_CTR_SET_MAX] = {
36         [CPUMF_CTR_SET_BASIC]   = 0x02,
37         [CPUMF_CTR_SET_USER]    = 0x04,
38         [CPUMF_CTR_SET_CRYPTO]  = 0x08,
39         [CPUMF_CTR_SET_EXT]     = 0x01,
40         [CPUMF_CTR_SET_MT_DIAG] = 0x20,
41 };
42
43 static void ctr_set_enable(u64 *state, int ctr_set)
44 {
45         *state |= cpumf_state_ctl[ctr_set] << CPUMF_LCCTL_ENABLE_SHIFT;
46 }
47 static void ctr_set_disable(u64 *state, int ctr_set)
48 {
49         *state &= ~(cpumf_state_ctl[ctr_set] << CPUMF_LCCTL_ENABLE_SHIFT);
50 }
51 static void ctr_set_start(u64 *state, int ctr_set)
52 {
53         *state |= cpumf_state_ctl[ctr_set] << CPUMF_LCCTL_ACTCTL_SHIFT;
54 }
55 static void ctr_set_stop(u64 *state, int ctr_set)
56 {
57         *state &= ~(cpumf_state_ctl[ctr_set] << CPUMF_LCCTL_ACTCTL_SHIFT);
58 }
59
60 /* Local CPUMF event structure */
61 struct cpu_hw_events {
62         struct cpumf_ctr_info   info;
63         atomic_t                ctr_set[CPUMF_CTR_SET_MAX];
64         u64                     state, tx_state;
65         unsigned int            flags;
66         unsigned int            txn_flags;
67 };
68 static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
69         .ctr_set = {
70                 [CPUMF_CTR_SET_BASIC]   = ATOMIC_INIT(0),
71                 [CPUMF_CTR_SET_USER]    = ATOMIC_INIT(0),
72                 [CPUMF_CTR_SET_CRYPTO]  = ATOMIC_INIT(0),
73                 [CPUMF_CTR_SET_EXT]     = ATOMIC_INIT(0),
74                 [CPUMF_CTR_SET_MT_DIAG] = ATOMIC_INIT(0),
75         },
76         .state = 0,
77         .flags = 0,
78         .txn_flags = 0,
79 };
80
81 static enum cpumf_ctr_set get_counter_set(u64 event)
82 {
83         int set = CPUMF_CTR_SET_MAX;
84
85         if (event < 32)
86                 set = CPUMF_CTR_SET_BASIC;
87         else if (event < 64)
88                 set = CPUMF_CTR_SET_USER;
89         else if (event < 128)
90                 set = CPUMF_CTR_SET_CRYPTO;
91         else if (event < 256)
92                 set = CPUMF_CTR_SET_EXT;
93         else if (event >= 448 && event < 496)
94                 set = CPUMF_CTR_SET_MT_DIAG;
95
96         return set;
97 }
98
99 static int validate_ctr_version(const struct hw_perf_event *hwc)
100 {
101         struct cpu_hw_events *cpuhw;
102         int err = 0;
103         u16 mtdiag_ctl;
104
105         cpuhw = &get_cpu_var(cpu_hw_events);
106
107         /* check required version for counter sets */
108         switch (hwc->config_base) {
109         case CPUMF_CTR_SET_BASIC:
110         case CPUMF_CTR_SET_USER:
111                 if (cpuhw->info.cfvn < 1)
112                         err = -EOPNOTSUPP;
113                 break;
114         case CPUMF_CTR_SET_CRYPTO:
115         case CPUMF_CTR_SET_EXT:
116                 if (cpuhw->info.csvn < 1)
117                         err = -EOPNOTSUPP;
118                 if ((cpuhw->info.csvn == 1 && hwc->config > 159) ||
119                     (cpuhw->info.csvn == 2 && hwc->config > 175) ||
120                     (cpuhw->info.csvn  > 2 && hwc->config > 255))
121                         err = -EOPNOTSUPP;
122                 break;
123         case CPUMF_CTR_SET_MT_DIAG:
124                 if (cpuhw->info.csvn <= 3)
125                         err = -EOPNOTSUPP;
126                 /*
127                  * MT-diagnostic counters are read-only.  The counter set
128                  * is automatically enabled and activated on all CPUs with
129                  * multithreading (SMT).  Deactivation of multithreading
130                  * also disables the counter set.  State changes are ignored
131                  * by lcctl().  Because Linux controls SMT enablement through
132                  * a kernel parameter only, the counter set is either disabled
133                  * or enabled and active.
134                  *
135                  * Thus, the counters can only be used if SMT is on and the
136                  * counter set is enabled and active.
137                  */
138                 mtdiag_ctl = cpumf_state_ctl[CPUMF_CTR_SET_MT_DIAG];
139                 if (!((cpuhw->info.auth_ctl & mtdiag_ctl) &&
140                       (cpuhw->info.enable_ctl & mtdiag_ctl) &&
141                       (cpuhw->info.act_ctl & mtdiag_ctl)))
142                         err = -EOPNOTSUPP;
143                 break;
144         }
145
146         put_cpu_var(cpu_hw_events);
147         return err;
148 }
149
150 static int validate_ctr_auth(const struct hw_perf_event *hwc)
151 {
152         struct cpu_hw_events *cpuhw;
153         u64 ctrs_state;
154         int err = 0;
155
156         cpuhw = &get_cpu_var(cpu_hw_events);
157
158         /* Check authorization for cpu counter sets.
159          * If the particular CPU counter set is not authorized,
160          * return with -ENOENT in order to fall back to other
161          * PMUs that might suffice the event request.
162          */
163         ctrs_state = cpumf_state_ctl[hwc->config_base];
164         if (!(ctrs_state & cpuhw->info.auth_ctl))
165                 err = -ENOENT;
166
167         put_cpu_var(cpu_hw_events);
168         return err;
169 }
170
171 /*
172  * Change the CPUMF state to active.
173  * Enable and activate the CPU-counter sets according
174  * to the per-cpu control state.
175  */
176 static void cpumf_pmu_enable(struct pmu *pmu)
177 {
178         struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
179         int err;
180
181         if (cpuhw->flags & PMU_F_ENABLED)
182                 return;
183
184         err = lcctl(cpuhw->state);
185         if (err) {
186                 pr_err("Enabling the performance measuring unit "
187                        "failed with rc=%x\n", err);
188                 return;
189         }
190
191         cpuhw->flags |= PMU_F_ENABLED;
192 }
193
194 /*
195  * Change the CPUMF state to inactive.
196  * Disable and enable (inactive) the CPU-counter sets according
197  * to the per-cpu control state.
198  */
199 static void cpumf_pmu_disable(struct pmu *pmu)
200 {
201         struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
202         int err;
203         u64 inactive;
204
205         if (!(cpuhw->flags & PMU_F_ENABLED))
206                 return;
207
208         inactive = cpuhw->state & ~((1 << CPUMF_LCCTL_ENABLE_SHIFT) - 1);
209         err = lcctl(inactive);
210         if (err) {
211                 pr_err("Disabling the performance measuring unit "
212                        "failed with rc=%x\n", err);
213                 return;
214         }
215
216         cpuhw->flags &= ~PMU_F_ENABLED;
217 }
218
219
220 /* Number of perf events counting hardware events */
221 static atomic_t num_events = ATOMIC_INIT(0);
222 /* Used to avoid races in calling reserve/release_cpumf_hardware */
223 static DEFINE_MUTEX(pmc_reserve_mutex);
224
225 /* CPU-measurement alerts for the counter facility */
226 static void cpumf_measurement_alert(struct ext_code ext_code,
227                                     unsigned int alert, unsigned long unused)
228 {
229         struct cpu_hw_events *cpuhw;
230
231         if (!(alert & CPU_MF_INT_CF_MASK))
232                 return;
233
234         inc_irq_stat(IRQEXT_CMC);
235         cpuhw = this_cpu_ptr(&cpu_hw_events);
236
237         /* Measurement alerts are shared and might happen when the PMU
238          * is not reserved.  Ignore these alerts in this case. */
239         if (!(cpuhw->flags & PMU_F_RESERVED))
240                 return;
241
242         /* counter authorization change alert */
243         if (alert & CPU_MF_INT_CF_CACA)
244                 qctri(&cpuhw->info);
245
246         /* loss of counter data alert */
247         if (alert & CPU_MF_INT_CF_LCDA)
248                 pr_err("CPU[%i] Counter data was lost\n", smp_processor_id());
249
250         /* loss of MT counter data alert */
251         if (alert & CPU_MF_INT_CF_MTDA)
252                 pr_warn("CPU[%i] MT counter data was lost\n",
253                         smp_processor_id());
254 }
255
256 #define PMC_INIT      0
257 #define PMC_RELEASE   1
258 static void setup_pmc_cpu(void *flags)
259 {
260         struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
261
262         switch (*((int *) flags)) {
263         case PMC_INIT:
264                 memset(&cpuhw->info, 0, sizeof(cpuhw->info));
265                 qctri(&cpuhw->info);
266                 cpuhw->flags |= PMU_F_RESERVED;
267                 break;
268
269         case PMC_RELEASE:
270                 cpuhw->flags &= ~PMU_F_RESERVED;
271                 break;
272         }
273
274         /* Disable CPU counter sets */
275         lcctl(0);
276 }
277
278 /* Initialize the CPU-measurement facility */
279 static int reserve_pmc_hardware(void)
280 {
281         int flags = PMC_INIT;
282
283         on_each_cpu(setup_pmc_cpu, &flags, 1);
284         irq_subclass_register(IRQ_SUBCLASS_MEASUREMENT_ALERT);
285
286         return 0;
287 }
288
289 /* Release the CPU-measurement facility */
290 static void release_pmc_hardware(void)
291 {
292         int flags = PMC_RELEASE;
293
294         on_each_cpu(setup_pmc_cpu, &flags, 1);
295         irq_subclass_unregister(IRQ_SUBCLASS_MEASUREMENT_ALERT);
296 }
297
298 /* Release the PMU if event is the last perf event */
299 static void hw_perf_event_destroy(struct perf_event *event)
300 {
301         if (!atomic_add_unless(&num_events, -1, 1)) {
302                 mutex_lock(&pmc_reserve_mutex);
303                 if (atomic_dec_return(&num_events) == 0)
304                         release_pmc_hardware();
305                 mutex_unlock(&pmc_reserve_mutex);
306         }
307 }
308
309 /* CPUMF <-> perf event mappings for kernel+userspace (basic set) */
310 static const int cpumf_generic_events_basic[] = {
311         [PERF_COUNT_HW_CPU_CYCLES]          = 0,
312         [PERF_COUNT_HW_INSTRUCTIONS]        = 1,
313         [PERF_COUNT_HW_CACHE_REFERENCES]    = -1,
314         [PERF_COUNT_HW_CACHE_MISSES]        = -1,
315         [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = -1,
316         [PERF_COUNT_HW_BRANCH_MISSES]       = -1,
317         [PERF_COUNT_HW_BUS_CYCLES]          = -1,
318 };
319 /* CPUMF <-> perf event mappings for userspace (problem-state set) */
320 static const int cpumf_generic_events_user[] = {
321         [PERF_COUNT_HW_CPU_CYCLES]          = 32,
322         [PERF_COUNT_HW_INSTRUCTIONS]        = 33,
323         [PERF_COUNT_HW_CACHE_REFERENCES]    = -1,
324         [PERF_COUNT_HW_CACHE_MISSES]        = -1,
325         [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = -1,
326         [PERF_COUNT_HW_BRANCH_MISSES]       = -1,
327         [PERF_COUNT_HW_BUS_CYCLES]          = -1,
328 };
329
330 static int __hw_perf_event_init(struct perf_event *event)
331 {
332         struct perf_event_attr *attr = &event->attr;
333         struct hw_perf_event *hwc = &event->hw;
334         enum cpumf_ctr_set set;
335         int err;
336         u64 ev;
337
338         switch (attr->type) {
339         case PERF_TYPE_RAW:
340                 /* Raw events are used to access counters directly,
341                  * hence do not permit excludes */
342                 if (attr->exclude_kernel || attr->exclude_user ||
343                     attr->exclude_hv)
344                         return -EOPNOTSUPP;
345                 ev = attr->config;
346                 break;
347
348         case PERF_TYPE_HARDWARE:
349                 ev = attr->config;
350                 /* Count user space (problem-state) only */
351                 if (!attr->exclude_user && attr->exclude_kernel) {
352                         if (ev >= ARRAY_SIZE(cpumf_generic_events_user))
353                                 return -EOPNOTSUPP;
354                         ev = cpumf_generic_events_user[ev];
355
356                 /* No support for kernel space counters only */
357                 } else if (!attr->exclude_kernel && attr->exclude_user) {
358                         return -EOPNOTSUPP;
359
360                 /* Count user and kernel space */
361                 } else {
362                         if (ev >= ARRAY_SIZE(cpumf_generic_events_basic))
363                                 return -EOPNOTSUPP;
364                         ev = cpumf_generic_events_basic[ev];
365                 }
366                 break;
367
368         default:
369                 return -ENOENT;
370         }
371
372         if (ev == -1)
373                 return -ENOENT;
374
375         if (ev > PERF_CPUM_CF_MAX_CTR)
376                 return -EINVAL;
377
378         /* Obtain the counter set to which the specified counter belongs */
379         set = get_counter_set(ev);
380         switch (set) {
381         case CPUMF_CTR_SET_BASIC:
382         case CPUMF_CTR_SET_USER:
383         case CPUMF_CTR_SET_CRYPTO:
384         case CPUMF_CTR_SET_EXT:
385         case CPUMF_CTR_SET_MT_DIAG:
386                 /*
387                  * Use the hardware perf event structure to store the
388                  * counter number in the 'config' member and the counter
389                  * set number in the 'config_base'.  The counter set number
390                  * is then later used to enable/disable the counter(s).
391                  */
392                 hwc->config = ev;
393                 hwc->config_base = set;
394                 break;
395         case CPUMF_CTR_SET_MAX:
396                 /* The counter could not be associated to a counter set */
397                 return -EINVAL;
398         };
399
400         /* Initialize for using the CPU-measurement counter facility */
401         if (!atomic_inc_not_zero(&num_events)) {
402                 mutex_lock(&pmc_reserve_mutex);
403                 if (atomic_read(&num_events) == 0 && reserve_pmc_hardware())
404                         err = -EBUSY;
405                 else
406                         atomic_inc(&num_events);
407                 mutex_unlock(&pmc_reserve_mutex);
408         }
409         event->destroy = hw_perf_event_destroy;
410
411         /* Finally, validate version and authorization of the counter set */
412         err = validate_ctr_auth(hwc);
413         if (!err)
414                 err = validate_ctr_version(hwc);
415
416         return err;
417 }
418
419 static int cpumf_pmu_event_init(struct perf_event *event)
420 {
421         int err;
422
423         switch (event->attr.type) {
424         case PERF_TYPE_HARDWARE:
425         case PERF_TYPE_HW_CACHE:
426         case PERF_TYPE_RAW:
427                 err = __hw_perf_event_init(event);
428                 break;
429         default:
430                 return -ENOENT;
431         }
432
433         if (unlikely(err) && event->destroy)
434                 event->destroy(event);
435
436         return err;
437 }
438
439 static int hw_perf_event_reset(struct perf_event *event)
440 {
441         u64 prev, new;
442         int err;
443
444         do {
445                 prev = local64_read(&event->hw.prev_count);
446                 err = ecctr(event->hw.config, &new);
447                 if (err) {
448                         if (err != 3)
449                                 break;
450                         /* The counter is not (yet) available. This
451                          * might happen if the counter set to which
452                          * this counter belongs is in the disabled
453                          * state.
454                          */
455                         new = 0;
456                 }
457         } while (local64_cmpxchg(&event->hw.prev_count, prev, new) != prev);
458
459         return err;
460 }
461
462 static void hw_perf_event_update(struct perf_event *event)
463 {
464         u64 prev, new, delta;
465         int err;
466
467         do {
468                 prev = local64_read(&event->hw.prev_count);
469                 err = ecctr(event->hw.config, &new);
470                 if (err)
471                         return;
472         } while (local64_cmpxchg(&event->hw.prev_count, prev, new) != prev);
473
474         delta = (prev <= new) ? new - prev
475                               : (-1ULL - prev) + new + 1;        /* overflow */
476         local64_add(delta, &event->count);
477 }
478
479 static void cpumf_pmu_read(struct perf_event *event)
480 {
481         if (event->hw.state & PERF_HES_STOPPED)
482                 return;
483
484         hw_perf_event_update(event);
485 }
486
487 static void cpumf_pmu_start(struct perf_event *event, int flags)
488 {
489         struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
490         struct hw_perf_event *hwc = &event->hw;
491
492         if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED)))
493                 return;
494
495         if (WARN_ON_ONCE(hwc->config == -1))
496                 return;
497
498         if (flags & PERF_EF_RELOAD)
499                 WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
500
501         hwc->state = 0;
502
503         /* (Re-)enable and activate the counter set */
504         ctr_set_enable(&cpuhw->state, hwc->config_base);
505         ctr_set_start(&cpuhw->state, hwc->config_base);
506
507         /* The counter set to which this counter belongs can be already active.
508          * Because all counters in a set are active, the event->hw.prev_count
509          * needs to be synchronized.  At this point, the counter set can be in
510          * the inactive or disabled state.
511          */
512         hw_perf_event_reset(event);
513
514         /* increment refcount for this counter set */
515         atomic_inc(&cpuhw->ctr_set[hwc->config_base]);
516 }
517
518 static void cpumf_pmu_stop(struct perf_event *event, int flags)
519 {
520         struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
521         struct hw_perf_event *hwc = &event->hw;
522
523         if (!(hwc->state & PERF_HES_STOPPED)) {
524                 /* Decrement reference count for this counter set and if this
525                  * is the last used counter in the set, clear activation
526                  * control and set the counter set state to inactive.
527                  */
528                 if (!atomic_dec_return(&cpuhw->ctr_set[hwc->config_base]))
529                         ctr_set_stop(&cpuhw->state, hwc->config_base);
530                 event->hw.state |= PERF_HES_STOPPED;
531         }
532
533         if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
534                 hw_perf_event_update(event);
535                 event->hw.state |= PERF_HES_UPTODATE;
536         }
537 }
538
539 static int cpumf_pmu_add(struct perf_event *event, int flags)
540 {
541         struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
542
543         /* Check authorization for the counter set to which this
544          * counter belongs.
545          * For group events transaction, the authorization check is
546          * done in cpumf_pmu_commit_txn().
547          */
548         if (!(cpuhw->txn_flags & PERF_PMU_TXN_ADD))
549                 if (validate_ctr_auth(&event->hw))
550                         return -ENOENT;
551
552         ctr_set_enable(&cpuhw->state, event->hw.config_base);
553         event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
554
555         if (flags & PERF_EF_START)
556                 cpumf_pmu_start(event, PERF_EF_RELOAD);
557
558         perf_event_update_userpage(event);
559
560         return 0;
561 }
562
563 static void cpumf_pmu_del(struct perf_event *event, int flags)
564 {
565         struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
566
567         cpumf_pmu_stop(event, PERF_EF_UPDATE);
568
569         /* Check if any counter in the counter set is still used.  If not used,
570          * change the counter set to the disabled state.  This also clears the
571          * content of all counters in the set.
572          *
573          * When a new perf event has been added but not yet started, this can
574          * clear enable control and resets all counters in a set.  Therefore,
575          * cpumf_pmu_start() always has to reenable a counter set.
576          */
577         if (!atomic_read(&cpuhw->ctr_set[event->hw.config_base]))
578                 ctr_set_disable(&cpuhw->state, event->hw.config_base);
579
580         perf_event_update_userpage(event);
581 }
582
583 /*
584  * Start group events scheduling transaction.
585  * Set flags to perform a single test at commit time.
586  *
587  * We only support PERF_PMU_TXN_ADD transactions. Save the
588  * transaction flags but otherwise ignore non-PERF_PMU_TXN_ADD
589  * transactions.
590  */
591 static void cpumf_pmu_start_txn(struct pmu *pmu, unsigned int txn_flags)
592 {
593         struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
594
595         WARN_ON_ONCE(cpuhw->txn_flags);         /* txn already in flight */
596
597         cpuhw->txn_flags = txn_flags;
598         if (txn_flags & ~PERF_PMU_TXN_ADD)
599                 return;
600
601         perf_pmu_disable(pmu);
602         cpuhw->tx_state = cpuhw->state;
603 }
604
605 /*
606  * Stop and cancel a group events scheduling tranctions.
607  * Assumes cpumf_pmu_del() is called for each successful added
608  * cpumf_pmu_add() during the transaction.
609  */
610 static void cpumf_pmu_cancel_txn(struct pmu *pmu)
611 {
612         unsigned int txn_flags;
613         struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
614
615         WARN_ON_ONCE(!cpuhw->txn_flags);        /* no txn in flight */
616
617         txn_flags = cpuhw->txn_flags;
618         cpuhw->txn_flags = 0;
619         if (txn_flags & ~PERF_PMU_TXN_ADD)
620                 return;
621
622         WARN_ON(cpuhw->tx_state != cpuhw->state);
623
624         perf_pmu_enable(pmu);
625 }
626
627 /*
628  * Commit the group events scheduling transaction.  On success, the
629  * transaction is closed.   On error, the transaction is kept open
630  * until cpumf_pmu_cancel_txn() is called.
631  */
632 static int cpumf_pmu_commit_txn(struct pmu *pmu)
633 {
634         struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
635         u64 state;
636
637         WARN_ON_ONCE(!cpuhw->txn_flags);        /* no txn in flight */
638
639         if (cpuhw->txn_flags & ~PERF_PMU_TXN_ADD) {
640                 cpuhw->txn_flags = 0;
641                 return 0;
642         }
643
644         /* check if the updated state can be scheduled */
645         state = cpuhw->state & ~((1 << CPUMF_LCCTL_ENABLE_SHIFT) - 1);
646         state >>= CPUMF_LCCTL_ENABLE_SHIFT;
647         if ((state & cpuhw->info.auth_ctl) != state)
648                 return -ENOENT;
649
650         cpuhw->txn_flags = 0;
651         perf_pmu_enable(pmu);
652         return 0;
653 }
654
655 /* Performance monitoring unit for s390x */
656 static struct pmu cpumf_pmu = {
657         .task_ctx_nr  = perf_sw_context,
658         .capabilities = PERF_PMU_CAP_NO_INTERRUPT,
659         .pmu_enable   = cpumf_pmu_enable,
660         .pmu_disable  = cpumf_pmu_disable,
661         .event_init   = cpumf_pmu_event_init,
662         .add          = cpumf_pmu_add,
663         .del          = cpumf_pmu_del,
664         .start        = cpumf_pmu_start,
665         .stop         = cpumf_pmu_stop,
666         .read         = cpumf_pmu_read,
667         .start_txn    = cpumf_pmu_start_txn,
668         .commit_txn   = cpumf_pmu_commit_txn,
669         .cancel_txn   = cpumf_pmu_cancel_txn,
670 };
671
672 static int cpumf_pmf_setup(unsigned int cpu, int flags)
673 {
674         local_irq_disable();
675         setup_pmc_cpu(&flags);
676         local_irq_enable();
677         return 0;
678 }
679
680 static int s390_pmu_online_cpu(unsigned int cpu)
681 {
682         return cpumf_pmf_setup(cpu, PMC_INIT);
683 }
684
685 static int s390_pmu_offline_cpu(unsigned int cpu)
686 {
687         return cpumf_pmf_setup(cpu, PMC_RELEASE);
688 }
689
690 static int __init cpumf_pmu_init(void)
691 {
692         int rc;
693
694         if (!cpum_cf_avail())
695                 return -ENODEV;
696
697         /* clear bit 15 of cr0 to unauthorize problem-state to
698          * extract measurement counters */
699         ctl_clear_bit(0, 48);
700
701         /* register handler for measurement-alert interruptions */
702         rc = register_external_irq(EXT_IRQ_MEASURE_ALERT,
703                                    cpumf_measurement_alert);
704         if (rc) {
705                 pr_err("Registering for CPU-measurement alerts "
706                        "failed with rc=%i\n", rc);
707                 return rc;
708         }
709
710         cpumf_pmu.attr_groups = cpumf_cf_event_group();
711         rc = perf_pmu_register(&cpumf_pmu, "cpum_cf", PERF_TYPE_RAW);
712         if (rc) {
713                 pr_err("Registering the cpum_cf PMU failed with rc=%i\n", rc);
714                 unregister_external_irq(EXT_IRQ_MEASURE_ALERT,
715                                         cpumf_measurement_alert);
716                 return rc;
717         }
718         return cpuhp_setup_state(CPUHP_AP_PERF_S390_CF_ONLINE,
719                                  "perf/s390/cf:online",
720                                  s390_pmu_online_cpu, s390_pmu_offline_cpu);
721 }
722 early_initcall(cpumf_pmu_init);