spnego: add missing OID to oid registry
[sfrench/cifs-2.6.git] / arch / powerpc / perf / core-fsl-emb.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Performance event support - Freescale Embedded Performance Monitor
4  *
5  * Copyright 2008-2009 Paul Mackerras, IBM Corporation.
6  * Copyright 2010 Freescale Semiconductor, Inc.
7  */
8 #include <linux/kernel.h>
9 #include <linux/sched.h>
10 #include <linux/perf_event.h>
11 #include <linux/percpu.h>
12 #include <linux/hardirq.h>
13 #include <asm/reg_fsl_emb.h>
14 #include <asm/pmc.h>
15 #include <asm/machdep.h>
16 #include <asm/firmware.h>
17 #include <asm/ptrace.h>
18
19 struct cpu_hw_events {
20         int n_events;
21         int disabled;
22         u8  pmcs_enabled;
23         struct perf_event *event[MAX_HWEVENTS];
24 };
25 static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
26
27 static struct fsl_emb_pmu *ppmu;
28
29 /* Number of perf_events counting hardware events */
30 static atomic_t num_events;
31 /* Used to avoid races in calling reserve/release_pmc_hardware */
32 static DEFINE_MUTEX(pmc_reserve_mutex);
33
34 static void perf_event_interrupt(struct pt_regs *regs);
35
36 /*
37  * Read one performance monitor counter (PMC).
38  */
39 static unsigned long read_pmc(int idx)
40 {
41         unsigned long val;
42
43         switch (idx) {
44         case 0:
45                 val = mfpmr(PMRN_PMC0);
46                 break;
47         case 1:
48                 val = mfpmr(PMRN_PMC1);
49                 break;
50         case 2:
51                 val = mfpmr(PMRN_PMC2);
52                 break;
53         case 3:
54                 val = mfpmr(PMRN_PMC3);
55                 break;
56         case 4:
57                 val = mfpmr(PMRN_PMC4);
58                 break;
59         case 5:
60                 val = mfpmr(PMRN_PMC5);
61                 break;
62         default:
63                 printk(KERN_ERR "oops trying to read PMC%d\n", idx);
64                 val = 0;
65         }
66         return val;
67 }
68
69 /*
70  * Write one PMC.
71  */
72 static void write_pmc(int idx, unsigned long val)
73 {
74         switch (idx) {
75         case 0:
76                 mtpmr(PMRN_PMC0, val);
77                 break;
78         case 1:
79                 mtpmr(PMRN_PMC1, val);
80                 break;
81         case 2:
82                 mtpmr(PMRN_PMC2, val);
83                 break;
84         case 3:
85                 mtpmr(PMRN_PMC3, val);
86                 break;
87         case 4:
88                 mtpmr(PMRN_PMC4, val);
89                 break;
90         case 5:
91                 mtpmr(PMRN_PMC5, val);
92                 break;
93         default:
94                 printk(KERN_ERR "oops trying to write PMC%d\n", idx);
95         }
96
97         isync();
98 }
99
100 /*
101  * Write one local control A register
102  */
103 static void write_pmlca(int idx, unsigned long val)
104 {
105         switch (idx) {
106         case 0:
107                 mtpmr(PMRN_PMLCA0, val);
108                 break;
109         case 1:
110                 mtpmr(PMRN_PMLCA1, val);
111                 break;
112         case 2:
113                 mtpmr(PMRN_PMLCA2, val);
114                 break;
115         case 3:
116                 mtpmr(PMRN_PMLCA3, val);
117                 break;
118         case 4:
119                 mtpmr(PMRN_PMLCA4, val);
120                 break;
121         case 5:
122                 mtpmr(PMRN_PMLCA5, val);
123                 break;
124         default:
125                 printk(KERN_ERR "oops trying to write PMLCA%d\n", idx);
126         }
127
128         isync();
129 }
130
131 /*
132  * Write one local control B register
133  */
134 static void write_pmlcb(int idx, unsigned long val)
135 {
136         switch (idx) {
137         case 0:
138                 mtpmr(PMRN_PMLCB0, val);
139                 break;
140         case 1:
141                 mtpmr(PMRN_PMLCB1, val);
142                 break;
143         case 2:
144                 mtpmr(PMRN_PMLCB2, val);
145                 break;
146         case 3:
147                 mtpmr(PMRN_PMLCB3, val);
148                 break;
149         case 4:
150                 mtpmr(PMRN_PMLCB4, val);
151                 break;
152         case 5:
153                 mtpmr(PMRN_PMLCB5, val);
154                 break;
155         default:
156                 printk(KERN_ERR "oops trying to write PMLCB%d\n", idx);
157         }
158
159         isync();
160 }
161
162 static void fsl_emb_pmu_read(struct perf_event *event)
163 {
164         s64 val, delta, prev;
165
166         if (event->hw.state & PERF_HES_STOPPED)
167                 return;
168
169         /*
170          * Performance monitor interrupts come even when interrupts
171          * are soft-disabled, as long as interrupts are hard-enabled.
172          * Therefore we treat them like NMIs.
173          */
174         do {
175                 prev = local64_read(&event->hw.prev_count);
176                 barrier();
177                 val = read_pmc(event->hw.idx);
178         } while (local64_cmpxchg(&event->hw.prev_count, prev, val) != prev);
179
180         /* The counters are only 32 bits wide */
181         delta = (val - prev) & 0xfffffffful;
182         local64_add(delta, &event->count);
183         local64_sub(delta, &event->hw.period_left);
184 }
185
186 /*
187  * Disable all events to prevent PMU interrupts and to allow
188  * events to be added or removed.
189  */
190 static void fsl_emb_pmu_disable(struct pmu *pmu)
191 {
192         struct cpu_hw_events *cpuhw;
193         unsigned long flags;
194
195         local_irq_save(flags);
196         cpuhw = this_cpu_ptr(&cpu_hw_events);
197
198         if (!cpuhw->disabled) {
199                 cpuhw->disabled = 1;
200
201                 /*
202                  * Check if we ever enabled the PMU on this cpu.
203                  */
204                 if (!cpuhw->pmcs_enabled) {
205                         ppc_enable_pmcs();
206                         cpuhw->pmcs_enabled = 1;
207                 }
208
209                 if (atomic_read(&num_events)) {
210                         /*
211                          * Set the 'freeze all counters' bit, and disable
212                          * interrupts.  The barrier is to make sure the
213                          * mtpmr has been executed and the PMU has frozen
214                          * the events before we return.
215                          */
216
217                         mtpmr(PMRN_PMGC0, PMGC0_FAC);
218                         isync();
219                 }
220         }
221         local_irq_restore(flags);
222 }
223
224 /*
225  * Re-enable all events if disable == 0.
226  * If we were previously disabled and events were added, then
227  * put the new config on the PMU.
228  */
229 static void fsl_emb_pmu_enable(struct pmu *pmu)
230 {
231         struct cpu_hw_events *cpuhw;
232         unsigned long flags;
233
234         local_irq_save(flags);
235         cpuhw = this_cpu_ptr(&cpu_hw_events);
236         if (!cpuhw->disabled)
237                 goto out;
238
239         cpuhw->disabled = 0;
240         ppc_set_pmu_inuse(cpuhw->n_events != 0);
241
242         if (cpuhw->n_events > 0) {
243                 mtpmr(PMRN_PMGC0, PMGC0_PMIE | PMGC0_FCECE);
244                 isync();
245         }
246
247  out:
248         local_irq_restore(flags);
249 }
250
251 static int collect_events(struct perf_event *group, int max_count,
252                           struct perf_event *ctrs[])
253 {
254         int n = 0;
255         struct perf_event *event;
256
257         if (!is_software_event(group)) {
258                 if (n >= max_count)
259                         return -1;
260                 ctrs[n] = group;
261                 n++;
262         }
263         for_each_sibling_event(event, group) {
264                 if (!is_software_event(event) &&
265                     event->state != PERF_EVENT_STATE_OFF) {
266                         if (n >= max_count)
267                                 return -1;
268                         ctrs[n] = event;
269                         n++;
270                 }
271         }
272         return n;
273 }
274
275 /* context locked on entry */
276 static int fsl_emb_pmu_add(struct perf_event *event, int flags)
277 {
278         struct cpu_hw_events *cpuhw;
279         int ret = -EAGAIN;
280         int num_counters = ppmu->n_counter;
281         u64 val;
282         int i;
283
284         perf_pmu_disable(event->pmu);
285         cpuhw = &get_cpu_var(cpu_hw_events);
286
287         if (event->hw.config & FSL_EMB_EVENT_RESTRICTED)
288                 num_counters = ppmu->n_restricted;
289
290         /*
291          * Allocate counters from top-down, so that restricted-capable
292          * counters are kept free as long as possible.
293          */
294         for (i = num_counters - 1; i >= 0; i--) {
295                 if (cpuhw->event[i])
296                         continue;
297
298                 break;
299         }
300
301         if (i < 0)
302                 goto out;
303
304         event->hw.idx = i;
305         cpuhw->event[i] = event;
306         ++cpuhw->n_events;
307
308         val = 0;
309         if (event->hw.sample_period) {
310                 s64 left = local64_read(&event->hw.period_left);
311                 if (left < 0x80000000L)
312                         val = 0x80000000L - left;
313         }
314         local64_set(&event->hw.prev_count, val);
315
316         if (unlikely(!(flags & PERF_EF_START))) {
317                 event->hw.state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
318                 val = 0;
319         } else {
320                 event->hw.state &= ~(PERF_HES_STOPPED | PERF_HES_UPTODATE);
321         }
322
323         write_pmc(i, val);
324         perf_event_update_userpage(event);
325
326         write_pmlcb(i, event->hw.config >> 32);
327         write_pmlca(i, event->hw.config_base);
328
329         ret = 0;
330  out:
331         put_cpu_var(cpu_hw_events);
332         perf_pmu_enable(event->pmu);
333         return ret;
334 }
335
336 /* context locked on entry */
337 static void fsl_emb_pmu_del(struct perf_event *event, int flags)
338 {
339         struct cpu_hw_events *cpuhw;
340         int i = event->hw.idx;
341
342         perf_pmu_disable(event->pmu);
343         if (i < 0)
344                 goto out;
345
346         fsl_emb_pmu_read(event);
347
348         cpuhw = &get_cpu_var(cpu_hw_events);
349
350         WARN_ON(event != cpuhw->event[event->hw.idx]);
351
352         write_pmlca(i, 0);
353         write_pmlcb(i, 0);
354         write_pmc(i, 0);
355
356         cpuhw->event[i] = NULL;
357         event->hw.idx = -1;
358
359         /*
360          * TODO: if at least one restricted event exists, and we
361          * just freed up a non-restricted-capable counter, and
362          * there is a restricted-capable counter occupied by
363          * a non-restricted event, migrate that event to the
364          * vacated counter.
365          */
366
367         cpuhw->n_events--;
368
369  out:
370         perf_pmu_enable(event->pmu);
371         put_cpu_var(cpu_hw_events);
372 }
373
374 static void fsl_emb_pmu_start(struct perf_event *event, int ef_flags)
375 {
376         unsigned long flags;
377         unsigned long val;
378         s64 left;
379
380         if (event->hw.idx < 0 || !event->hw.sample_period)
381                 return;
382
383         if (!(event->hw.state & PERF_HES_STOPPED))
384                 return;
385
386         if (ef_flags & PERF_EF_RELOAD)
387                 WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
388
389         local_irq_save(flags);
390         perf_pmu_disable(event->pmu);
391
392         event->hw.state = 0;
393         left = local64_read(&event->hw.period_left);
394         val = 0;
395         if (left < 0x80000000L)
396                 val = 0x80000000L - left;
397         write_pmc(event->hw.idx, val);
398
399         perf_event_update_userpage(event);
400         perf_pmu_enable(event->pmu);
401         local_irq_restore(flags);
402 }
403
404 static void fsl_emb_pmu_stop(struct perf_event *event, int ef_flags)
405 {
406         unsigned long flags;
407
408         if (event->hw.idx < 0 || !event->hw.sample_period)
409                 return;
410
411         if (event->hw.state & PERF_HES_STOPPED)
412                 return;
413
414         local_irq_save(flags);
415         perf_pmu_disable(event->pmu);
416
417         fsl_emb_pmu_read(event);
418         event->hw.state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
419         write_pmc(event->hw.idx, 0);
420
421         perf_event_update_userpage(event);
422         perf_pmu_enable(event->pmu);
423         local_irq_restore(flags);
424 }
425
426 /*
427  * Release the PMU if this is the last perf_event.
428  */
429 static void hw_perf_event_destroy(struct perf_event *event)
430 {
431         if (!atomic_add_unless(&num_events, -1, 1)) {
432                 mutex_lock(&pmc_reserve_mutex);
433                 if (atomic_dec_return(&num_events) == 0)
434                         release_pmc_hardware();
435                 mutex_unlock(&pmc_reserve_mutex);
436         }
437 }
438
439 /*
440  * Translate a generic cache event_id config to a raw event_id code.
441  */
442 static int hw_perf_cache_event(u64 config, u64 *eventp)
443 {
444         unsigned long type, op, result;
445         int ev;
446
447         if (!ppmu->cache_events)
448                 return -EINVAL;
449
450         /* unpack config */
451         type = config & 0xff;
452         op = (config >> 8) & 0xff;
453         result = (config >> 16) & 0xff;
454
455         if (type >= PERF_COUNT_HW_CACHE_MAX ||
456             op >= PERF_COUNT_HW_CACHE_OP_MAX ||
457             result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
458                 return -EINVAL;
459
460         ev = (*ppmu->cache_events)[type][op][result];
461         if (ev == 0)
462                 return -EOPNOTSUPP;
463         if (ev == -1)
464                 return -EINVAL;
465         *eventp = ev;
466         return 0;
467 }
468
469 static int fsl_emb_pmu_event_init(struct perf_event *event)
470 {
471         u64 ev;
472         struct perf_event *events[MAX_HWEVENTS];
473         int n;
474         int err;
475         int num_restricted;
476         int i;
477
478         if (ppmu->n_counter > MAX_HWEVENTS) {
479                 WARN(1, "No. of perf counters (%d) is higher than max array size(%d)\n",
480                         ppmu->n_counter, MAX_HWEVENTS);
481                 ppmu->n_counter = MAX_HWEVENTS;
482         }
483
484         switch (event->attr.type) {
485         case PERF_TYPE_HARDWARE:
486                 ev = event->attr.config;
487                 if (ev >= ppmu->n_generic || ppmu->generic_events[ev] == 0)
488                         return -EOPNOTSUPP;
489                 ev = ppmu->generic_events[ev];
490                 break;
491
492         case PERF_TYPE_HW_CACHE:
493                 err = hw_perf_cache_event(event->attr.config, &ev);
494                 if (err)
495                         return err;
496                 break;
497
498         case PERF_TYPE_RAW:
499                 ev = event->attr.config;
500                 break;
501
502         default:
503                 return -ENOENT;
504         }
505
506         event->hw.config = ppmu->xlate_event(ev);
507         if (!(event->hw.config & FSL_EMB_EVENT_VALID))
508                 return -EINVAL;
509
510         /*
511          * If this is in a group, check if it can go on with all the
512          * other hardware events in the group.  We assume the event
513          * hasn't been linked into its leader's sibling list at this point.
514          */
515         n = 0;
516         if (event->group_leader != event) {
517                 n = collect_events(event->group_leader,
518                                    ppmu->n_counter - 1, events);
519                 if (n < 0)
520                         return -EINVAL;
521         }
522
523         if (event->hw.config & FSL_EMB_EVENT_RESTRICTED) {
524                 num_restricted = 0;
525                 for (i = 0; i < n; i++) {
526                         if (events[i]->hw.config & FSL_EMB_EVENT_RESTRICTED)
527                                 num_restricted++;
528                 }
529
530                 if (num_restricted >= ppmu->n_restricted)
531                         return -EINVAL;
532         }
533
534         event->hw.idx = -1;
535
536         event->hw.config_base = PMLCA_CE | PMLCA_FCM1 |
537                                 (u32)((ev << 16) & PMLCA_EVENT_MASK);
538
539         if (event->attr.exclude_user)
540                 event->hw.config_base |= PMLCA_FCU;
541         if (event->attr.exclude_kernel)
542                 event->hw.config_base |= PMLCA_FCS;
543         if (event->attr.exclude_idle)
544                 return -ENOTSUPP;
545
546         event->hw.last_period = event->hw.sample_period;
547         local64_set(&event->hw.period_left, event->hw.last_period);
548
549         /*
550          * See if we need to reserve the PMU.
551          * If no events are currently in use, then we have to take a
552          * mutex to ensure that we don't race with another task doing
553          * reserve_pmc_hardware or release_pmc_hardware.
554          */
555         err = 0;
556         if (!atomic_inc_not_zero(&num_events)) {
557                 mutex_lock(&pmc_reserve_mutex);
558                 if (atomic_read(&num_events) == 0 &&
559                     reserve_pmc_hardware(perf_event_interrupt))
560                         err = -EBUSY;
561                 else
562                         atomic_inc(&num_events);
563                 mutex_unlock(&pmc_reserve_mutex);
564
565                 mtpmr(PMRN_PMGC0, PMGC0_FAC);
566                 isync();
567         }
568         event->destroy = hw_perf_event_destroy;
569
570         return err;
571 }
572
573 static struct pmu fsl_emb_pmu = {
574         .pmu_enable     = fsl_emb_pmu_enable,
575         .pmu_disable    = fsl_emb_pmu_disable,
576         .event_init     = fsl_emb_pmu_event_init,
577         .add            = fsl_emb_pmu_add,
578         .del            = fsl_emb_pmu_del,
579         .start          = fsl_emb_pmu_start,
580         .stop           = fsl_emb_pmu_stop,
581         .read           = fsl_emb_pmu_read,
582 };
583
584 /*
585  * A counter has overflowed; update its count and record
586  * things if requested.  Note that interrupts are hard-disabled
587  * here so there is no possibility of being interrupted.
588  */
589 static void record_and_restart(struct perf_event *event, unsigned long val,
590                                struct pt_regs *regs)
591 {
592         u64 period = event->hw.sample_period;
593         s64 prev, delta, left;
594         int record = 0;
595
596         if (event->hw.state & PERF_HES_STOPPED) {
597                 write_pmc(event->hw.idx, 0);
598                 return;
599         }
600
601         /* we don't have to worry about interrupts here */
602         prev = local64_read(&event->hw.prev_count);
603         delta = (val - prev) & 0xfffffffful;
604         local64_add(delta, &event->count);
605
606         /*
607          * See if the total period for this event has expired,
608          * and update for the next period.
609          */
610         val = 0;
611         left = local64_read(&event->hw.period_left) - delta;
612         if (period) {
613                 if (left <= 0) {
614                         left += period;
615                         if (left <= 0)
616                                 left = period;
617                         record = 1;
618                         event->hw.last_period = event->hw.sample_period;
619                 }
620                 if (left < 0x80000000LL)
621                         val = 0x80000000LL - left;
622         }
623
624         write_pmc(event->hw.idx, val);
625         local64_set(&event->hw.prev_count, val);
626         local64_set(&event->hw.period_left, left);
627         perf_event_update_userpage(event);
628
629         /*
630          * Finally record data if requested.
631          */
632         if (record) {
633                 struct perf_sample_data data;
634
635                 perf_sample_data_init(&data, 0, event->hw.last_period);
636
637                 if (perf_event_overflow(event, &data, regs))
638                         fsl_emb_pmu_stop(event, 0);
639         }
640 }
641
642 static void perf_event_interrupt(struct pt_regs *regs)
643 {
644         int i;
645         struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
646         struct perf_event *event;
647         unsigned long val;
648         int found = 0;
649
650         for (i = 0; i < ppmu->n_counter; ++i) {
651                 event = cpuhw->event[i];
652
653                 val = read_pmc(i);
654                 if ((int)val < 0) {
655                         if (event) {
656                                 /* event has overflowed */
657                                 found = 1;
658                                 record_and_restart(event, val, regs);
659                         } else {
660                                 /*
661                                  * Disabled counter is negative,
662                                  * reset it just in case.
663                                  */
664                                 write_pmc(i, 0);
665                         }
666                 }
667         }
668
669         /* PMM will keep counters frozen until we return from the interrupt. */
670         mtmsr(mfmsr() | MSR_PMM);
671         mtpmr(PMRN_PMGC0, PMGC0_PMIE | PMGC0_FCECE);
672         isync();
673 }
674
675 void hw_perf_event_setup(int cpu)
676 {
677         struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu);
678
679         memset(cpuhw, 0, sizeof(*cpuhw));
680 }
681
682 int register_fsl_emb_pmu(struct fsl_emb_pmu *pmu)
683 {
684         if (ppmu)
685                 return -EBUSY;          /* something's already registered */
686
687         ppmu = pmu;
688         pr_info("%s performance monitor hardware support registered\n",
689                 pmu->name);
690
691         perf_pmu_register(&fsl_emb_pmu, "cpu", PERF_TYPE_RAW);
692
693         return 0;
694 }