Merge tag 'modules-for-v4.15' of git://git.kernel.org/pub/scm/linux/kernel/git/jeyu...
[sfrench/cifs-2.6.git] / arch / arm / kernel / perf_event_xscale.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * ARMv5 [xscale] Performance counter handling code.
4  *
5  * Copyright (C) 2010, ARM Ltd., Will Deacon <will.deacon@arm.com>
6  *
7  * Based on the previous xscale OProfile code.
8  *
9  * There are two variants of the xscale PMU that we support:
10  *      - xscale1pmu: 2 event counters and a cycle counter
11  *      - xscale2pmu: 4 event counters and a cycle counter
12  * The two variants share event definitions, but have different
13  * PMU structures.
14  */
15
16 #ifdef CONFIG_CPU_XSCALE
17
18 #include <asm/cputype.h>
19 #include <asm/irq_regs.h>
20
21 #include <linux/of.h>
22 #include <linux/perf/arm_pmu.h>
23 #include <linux/platform_device.h>
24
25 enum xscale_perf_types {
26         XSCALE_PERFCTR_ICACHE_MISS              = 0x00,
27         XSCALE_PERFCTR_ICACHE_NO_DELIVER        = 0x01,
28         XSCALE_PERFCTR_DATA_STALL               = 0x02,
29         XSCALE_PERFCTR_ITLB_MISS                = 0x03,
30         XSCALE_PERFCTR_DTLB_MISS                = 0x04,
31         XSCALE_PERFCTR_BRANCH                   = 0x05,
32         XSCALE_PERFCTR_BRANCH_MISS              = 0x06,
33         XSCALE_PERFCTR_INSTRUCTION              = 0x07,
34         XSCALE_PERFCTR_DCACHE_FULL_STALL        = 0x08,
35         XSCALE_PERFCTR_DCACHE_FULL_STALL_CONTIG = 0x09,
36         XSCALE_PERFCTR_DCACHE_ACCESS            = 0x0A,
37         XSCALE_PERFCTR_DCACHE_MISS              = 0x0B,
38         XSCALE_PERFCTR_DCACHE_WRITE_BACK        = 0x0C,
39         XSCALE_PERFCTR_PC_CHANGED               = 0x0D,
40         XSCALE_PERFCTR_BCU_REQUEST              = 0x10,
41         XSCALE_PERFCTR_BCU_FULL                 = 0x11,
42         XSCALE_PERFCTR_BCU_DRAIN                = 0x12,
43         XSCALE_PERFCTR_BCU_ECC_NO_ELOG          = 0x14,
44         XSCALE_PERFCTR_BCU_1_BIT_ERR            = 0x15,
45         XSCALE_PERFCTR_RMW                      = 0x16,
46         /* XSCALE_PERFCTR_CCNT is not hardware defined */
47         XSCALE_PERFCTR_CCNT                     = 0xFE,
48         XSCALE_PERFCTR_UNUSED                   = 0xFF,
49 };
50
51 enum xscale_counters {
52         XSCALE_CYCLE_COUNTER    = 0,
53         XSCALE_COUNTER0,
54         XSCALE_COUNTER1,
55         XSCALE_COUNTER2,
56         XSCALE_COUNTER3,
57 };
58
59 static const unsigned xscale_perf_map[PERF_COUNT_HW_MAX] = {
60         PERF_MAP_ALL_UNSUPPORTED,
61         [PERF_COUNT_HW_CPU_CYCLES]              = XSCALE_PERFCTR_CCNT,
62         [PERF_COUNT_HW_INSTRUCTIONS]            = XSCALE_PERFCTR_INSTRUCTION,
63         [PERF_COUNT_HW_BRANCH_INSTRUCTIONS]     = XSCALE_PERFCTR_BRANCH,
64         [PERF_COUNT_HW_BRANCH_MISSES]           = XSCALE_PERFCTR_BRANCH_MISS,
65         [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = XSCALE_PERFCTR_ICACHE_NO_DELIVER,
66 };
67
68 static const unsigned xscale_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
69                                            [PERF_COUNT_HW_CACHE_OP_MAX]
70                                            [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
71         PERF_CACHE_MAP_ALL_UNSUPPORTED,
72
73         [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)]  = XSCALE_PERFCTR_DCACHE_ACCESS,
74         [C(L1D)][C(OP_READ)][C(RESULT_MISS)]    = XSCALE_PERFCTR_DCACHE_MISS,
75         [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = XSCALE_PERFCTR_DCACHE_ACCESS,
76         [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)]   = XSCALE_PERFCTR_DCACHE_MISS,
77
78         [C(L1I)][C(OP_READ)][C(RESULT_MISS)]    = XSCALE_PERFCTR_ICACHE_MISS,
79
80         [C(DTLB)][C(OP_READ)][C(RESULT_MISS)]   = XSCALE_PERFCTR_DTLB_MISS,
81         [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)]  = XSCALE_PERFCTR_DTLB_MISS,
82
83         [C(ITLB)][C(OP_READ)][C(RESULT_MISS)]   = XSCALE_PERFCTR_ITLB_MISS,
84         [C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)]  = XSCALE_PERFCTR_ITLB_MISS,
85 };
86
87 #define XSCALE_PMU_ENABLE       0x001
88 #define XSCALE_PMN_RESET        0x002
89 #define XSCALE_CCNT_RESET       0x004
90 #define XSCALE_PMU_RESET        (CCNT_RESET | PMN_RESET)
91 #define XSCALE_PMU_CNT64        0x008
92
93 #define XSCALE1_OVERFLOWED_MASK 0x700
94 #define XSCALE1_CCOUNT_OVERFLOW 0x400
95 #define XSCALE1_COUNT0_OVERFLOW 0x100
96 #define XSCALE1_COUNT1_OVERFLOW 0x200
97 #define XSCALE1_CCOUNT_INT_EN   0x040
98 #define XSCALE1_COUNT0_INT_EN   0x010
99 #define XSCALE1_COUNT1_INT_EN   0x020
100 #define XSCALE1_COUNT0_EVT_SHFT 12
101 #define XSCALE1_COUNT0_EVT_MASK (0xff << XSCALE1_COUNT0_EVT_SHFT)
102 #define XSCALE1_COUNT1_EVT_SHFT 20
103 #define XSCALE1_COUNT1_EVT_MASK (0xff << XSCALE1_COUNT1_EVT_SHFT)
104
105 static inline u32
106 xscale1pmu_read_pmnc(void)
107 {
108         u32 val;
109         asm volatile("mrc p14, 0, %0, c0, c0, 0" : "=r" (val));
110         return val;
111 }
112
113 static inline void
114 xscale1pmu_write_pmnc(u32 val)
115 {
116         /* upper 4bits and 7, 11 are write-as-0 */
117         val &= 0xffff77f;
118         asm volatile("mcr p14, 0, %0, c0, c0, 0" : : "r" (val));
119 }
120
121 static inline int
122 xscale1_pmnc_counter_has_overflowed(unsigned long pmnc,
123                                         enum xscale_counters counter)
124 {
125         int ret = 0;
126
127         switch (counter) {
128         case XSCALE_CYCLE_COUNTER:
129                 ret = pmnc & XSCALE1_CCOUNT_OVERFLOW;
130                 break;
131         case XSCALE_COUNTER0:
132                 ret = pmnc & XSCALE1_COUNT0_OVERFLOW;
133                 break;
134         case XSCALE_COUNTER1:
135                 ret = pmnc & XSCALE1_COUNT1_OVERFLOW;
136                 break;
137         default:
138                 WARN_ONCE(1, "invalid counter number (%d)\n", counter);
139         }
140
141         return ret;
142 }
143
144 static irqreturn_t
145 xscale1pmu_handle_irq(int irq_num, void *dev)
146 {
147         unsigned long pmnc;
148         struct perf_sample_data data;
149         struct arm_pmu *cpu_pmu = (struct arm_pmu *)dev;
150         struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events);
151         struct pt_regs *regs;
152         int idx;
153
154         /*
155          * NOTE: there's an A stepping erratum that states if an overflow
156          *       bit already exists and another occurs, the previous
157          *       Overflow bit gets cleared. There's no workaround.
158          *       Fixed in B stepping or later.
159          */
160         pmnc = xscale1pmu_read_pmnc();
161
162         /*
163          * Write the value back to clear the overflow flags. Overflow
164          * flags remain in pmnc for use below. We also disable the PMU
165          * while we process the interrupt.
166          */
167         xscale1pmu_write_pmnc(pmnc & ~XSCALE_PMU_ENABLE);
168
169         if (!(pmnc & XSCALE1_OVERFLOWED_MASK))
170                 return IRQ_NONE;
171
172         regs = get_irq_regs();
173
174         for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
175                 struct perf_event *event = cpuc->events[idx];
176                 struct hw_perf_event *hwc;
177
178                 if (!event)
179                         continue;
180
181                 if (!xscale1_pmnc_counter_has_overflowed(pmnc, idx))
182                         continue;
183
184                 hwc = &event->hw;
185                 armpmu_event_update(event);
186                 perf_sample_data_init(&data, 0, hwc->last_period);
187                 if (!armpmu_event_set_period(event))
188                         continue;
189
190                 if (perf_event_overflow(event, &data, regs))
191                         cpu_pmu->disable(event);
192         }
193
194         irq_work_run();
195
196         /*
197          * Re-enable the PMU.
198          */
199         pmnc = xscale1pmu_read_pmnc() | XSCALE_PMU_ENABLE;
200         xscale1pmu_write_pmnc(pmnc);
201
202         return IRQ_HANDLED;
203 }
204
205 static void xscale1pmu_enable_event(struct perf_event *event)
206 {
207         unsigned long val, mask, evt, flags;
208         struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
209         struct hw_perf_event *hwc = &event->hw;
210         struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
211         int idx = hwc->idx;
212
213         switch (idx) {
214         case XSCALE_CYCLE_COUNTER:
215                 mask = 0;
216                 evt = XSCALE1_CCOUNT_INT_EN;
217                 break;
218         case XSCALE_COUNTER0:
219                 mask = XSCALE1_COUNT0_EVT_MASK;
220                 evt = (hwc->config_base << XSCALE1_COUNT0_EVT_SHFT) |
221                         XSCALE1_COUNT0_INT_EN;
222                 break;
223         case XSCALE_COUNTER1:
224                 mask = XSCALE1_COUNT1_EVT_MASK;
225                 evt = (hwc->config_base << XSCALE1_COUNT1_EVT_SHFT) |
226                         XSCALE1_COUNT1_INT_EN;
227                 break;
228         default:
229                 WARN_ONCE(1, "invalid counter number (%d)\n", idx);
230                 return;
231         }
232
233         raw_spin_lock_irqsave(&events->pmu_lock, flags);
234         val = xscale1pmu_read_pmnc();
235         val &= ~mask;
236         val |= evt;
237         xscale1pmu_write_pmnc(val);
238         raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
239 }
240
241 static void xscale1pmu_disable_event(struct perf_event *event)
242 {
243         unsigned long val, mask, evt, flags;
244         struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
245         struct hw_perf_event *hwc = &event->hw;
246         struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
247         int idx = hwc->idx;
248
249         switch (idx) {
250         case XSCALE_CYCLE_COUNTER:
251                 mask = XSCALE1_CCOUNT_INT_EN;
252                 evt = 0;
253                 break;
254         case XSCALE_COUNTER0:
255                 mask = XSCALE1_COUNT0_INT_EN | XSCALE1_COUNT0_EVT_MASK;
256                 evt = XSCALE_PERFCTR_UNUSED << XSCALE1_COUNT0_EVT_SHFT;
257                 break;
258         case XSCALE_COUNTER1:
259                 mask = XSCALE1_COUNT1_INT_EN | XSCALE1_COUNT1_EVT_MASK;
260                 evt = XSCALE_PERFCTR_UNUSED << XSCALE1_COUNT1_EVT_SHFT;
261                 break;
262         default:
263                 WARN_ONCE(1, "invalid counter number (%d)\n", idx);
264                 return;
265         }
266
267         raw_spin_lock_irqsave(&events->pmu_lock, flags);
268         val = xscale1pmu_read_pmnc();
269         val &= ~mask;
270         val |= evt;
271         xscale1pmu_write_pmnc(val);
272         raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
273 }
274
275 static int
276 xscale1pmu_get_event_idx(struct pmu_hw_events *cpuc,
277                                 struct perf_event *event)
278 {
279         struct hw_perf_event *hwc = &event->hw;
280         if (XSCALE_PERFCTR_CCNT == hwc->config_base) {
281                 if (test_and_set_bit(XSCALE_CYCLE_COUNTER, cpuc->used_mask))
282                         return -EAGAIN;
283
284                 return XSCALE_CYCLE_COUNTER;
285         } else {
286                 if (!test_and_set_bit(XSCALE_COUNTER1, cpuc->used_mask))
287                         return XSCALE_COUNTER1;
288
289                 if (!test_and_set_bit(XSCALE_COUNTER0, cpuc->used_mask))
290                         return XSCALE_COUNTER0;
291
292                 return -EAGAIN;
293         }
294 }
295
296 static void xscale1pmu_start(struct arm_pmu *cpu_pmu)
297 {
298         unsigned long flags, val;
299         struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
300
301         raw_spin_lock_irqsave(&events->pmu_lock, flags);
302         val = xscale1pmu_read_pmnc();
303         val |= XSCALE_PMU_ENABLE;
304         xscale1pmu_write_pmnc(val);
305         raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
306 }
307
308 static void xscale1pmu_stop(struct arm_pmu *cpu_pmu)
309 {
310         unsigned long flags, val;
311         struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
312
313         raw_spin_lock_irqsave(&events->pmu_lock, flags);
314         val = xscale1pmu_read_pmnc();
315         val &= ~XSCALE_PMU_ENABLE;
316         xscale1pmu_write_pmnc(val);
317         raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
318 }
319
320 static inline u32 xscale1pmu_read_counter(struct perf_event *event)
321 {
322         struct hw_perf_event *hwc = &event->hw;
323         int counter = hwc->idx;
324         u32 val = 0;
325
326         switch (counter) {
327         case XSCALE_CYCLE_COUNTER:
328                 asm volatile("mrc p14, 0, %0, c1, c0, 0" : "=r" (val));
329                 break;
330         case XSCALE_COUNTER0:
331                 asm volatile("mrc p14, 0, %0, c2, c0, 0" : "=r" (val));
332                 break;
333         case XSCALE_COUNTER1:
334                 asm volatile("mrc p14, 0, %0, c3, c0, 0" : "=r" (val));
335                 break;
336         }
337
338         return val;
339 }
340
341 static inline void xscale1pmu_write_counter(struct perf_event *event, u32 val)
342 {
343         struct hw_perf_event *hwc = &event->hw;
344         int counter = hwc->idx;
345
346         switch (counter) {
347         case XSCALE_CYCLE_COUNTER:
348                 asm volatile("mcr p14, 0, %0, c1, c0, 0" : : "r" (val));
349                 break;
350         case XSCALE_COUNTER0:
351                 asm volatile("mcr p14, 0, %0, c2, c0, 0" : : "r" (val));
352                 break;
353         case XSCALE_COUNTER1:
354                 asm volatile("mcr p14, 0, %0, c3, c0, 0" : : "r" (val));
355                 break;
356         }
357 }
358
359 static int xscale_map_event(struct perf_event *event)
360 {
361         return armpmu_map_event(event, &xscale_perf_map,
362                                 &xscale_perf_cache_map, 0xFF);
363 }
364
365 static int xscale1pmu_init(struct arm_pmu *cpu_pmu)
366 {
367         cpu_pmu->name           = "armv5_xscale1";
368         cpu_pmu->handle_irq     = xscale1pmu_handle_irq;
369         cpu_pmu->enable         = xscale1pmu_enable_event;
370         cpu_pmu->disable        = xscale1pmu_disable_event;
371         cpu_pmu->read_counter   = xscale1pmu_read_counter;
372         cpu_pmu->write_counter  = xscale1pmu_write_counter;
373         cpu_pmu->get_event_idx  = xscale1pmu_get_event_idx;
374         cpu_pmu->start          = xscale1pmu_start;
375         cpu_pmu->stop           = xscale1pmu_stop;
376         cpu_pmu->map_event      = xscale_map_event;
377         cpu_pmu->num_events     = 3;
378         cpu_pmu->max_period     = (1LLU << 32) - 1;
379
380         return 0;
381 }
382
383 #define XSCALE2_OVERFLOWED_MASK 0x01f
384 #define XSCALE2_CCOUNT_OVERFLOW 0x001
385 #define XSCALE2_COUNT0_OVERFLOW 0x002
386 #define XSCALE2_COUNT1_OVERFLOW 0x004
387 #define XSCALE2_COUNT2_OVERFLOW 0x008
388 #define XSCALE2_COUNT3_OVERFLOW 0x010
389 #define XSCALE2_CCOUNT_INT_EN   0x001
390 #define XSCALE2_COUNT0_INT_EN   0x002
391 #define XSCALE2_COUNT1_INT_EN   0x004
392 #define XSCALE2_COUNT2_INT_EN   0x008
393 #define XSCALE2_COUNT3_INT_EN   0x010
394 #define XSCALE2_COUNT0_EVT_SHFT 0
395 #define XSCALE2_COUNT0_EVT_MASK (0xff << XSCALE2_COUNT0_EVT_SHFT)
396 #define XSCALE2_COUNT1_EVT_SHFT 8
397 #define XSCALE2_COUNT1_EVT_MASK (0xff << XSCALE2_COUNT1_EVT_SHFT)
398 #define XSCALE2_COUNT2_EVT_SHFT 16
399 #define XSCALE2_COUNT2_EVT_MASK (0xff << XSCALE2_COUNT2_EVT_SHFT)
400 #define XSCALE2_COUNT3_EVT_SHFT 24
401 #define XSCALE2_COUNT3_EVT_MASK (0xff << XSCALE2_COUNT3_EVT_SHFT)
402
403 static inline u32
404 xscale2pmu_read_pmnc(void)
405 {
406         u32 val;
407         asm volatile("mrc p14, 0, %0, c0, c1, 0" : "=r" (val));
408         /* bits 1-2 and 4-23 are read-unpredictable */
409         return val & 0xff000009;
410 }
411
412 static inline void
413 xscale2pmu_write_pmnc(u32 val)
414 {
415         /* bits 4-23 are write-as-0, 24-31 are write ignored */
416         val &= 0xf;
417         asm volatile("mcr p14, 0, %0, c0, c1, 0" : : "r" (val));
418 }
419
420 static inline u32
421 xscale2pmu_read_overflow_flags(void)
422 {
423         u32 val;
424         asm volatile("mrc p14, 0, %0, c5, c1, 0" : "=r" (val));
425         return val;
426 }
427
428 static inline void
429 xscale2pmu_write_overflow_flags(u32 val)
430 {
431         asm volatile("mcr p14, 0, %0, c5, c1, 0" : : "r" (val));
432 }
433
434 static inline u32
435 xscale2pmu_read_event_select(void)
436 {
437         u32 val;
438         asm volatile("mrc p14, 0, %0, c8, c1, 0" : "=r" (val));
439         return val;
440 }
441
442 static inline void
443 xscale2pmu_write_event_select(u32 val)
444 {
445         asm volatile("mcr p14, 0, %0, c8, c1, 0" : : "r"(val));
446 }
447
448 static inline u32
449 xscale2pmu_read_int_enable(void)
450 {
451         u32 val;
452         asm volatile("mrc p14, 0, %0, c4, c1, 0" : "=r" (val));
453         return val;
454 }
455
456 static void
457 xscale2pmu_write_int_enable(u32 val)
458 {
459         asm volatile("mcr p14, 0, %0, c4, c1, 0" : : "r" (val));
460 }
461
462 static inline int
463 xscale2_pmnc_counter_has_overflowed(unsigned long of_flags,
464                                         enum xscale_counters counter)
465 {
466         int ret = 0;
467
468         switch (counter) {
469         case XSCALE_CYCLE_COUNTER:
470                 ret = of_flags & XSCALE2_CCOUNT_OVERFLOW;
471                 break;
472         case XSCALE_COUNTER0:
473                 ret = of_flags & XSCALE2_COUNT0_OVERFLOW;
474                 break;
475         case XSCALE_COUNTER1:
476                 ret = of_flags & XSCALE2_COUNT1_OVERFLOW;
477                 break;
478         case XSCALE_COUNTER2:
479                 ret = of_flags & XSCALE2_COUNT2_OVERFLOW;
480                 break;
481         case XSCALE_COUNTER3:
482                 ret = of_flags & XSCALE2_COUNT3_OVERFLOW;
483                 break;
484         default:
485                 WARN_ONCE(1, "invalid counter number (%d)\n", counter);
486         }
487
488         return ret;
489 }
490
491 static irqreturn_t
492 xscale2pmu_handle_irq(int irq_num, void *dev)
493 {
494         unsigned long pmnc, of_flags;
495         struct perf_sample_data data;
496         struct arm_pmu *cpu_pmu = (struct arm_pmu *)dev;
497         struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events);
498         struct pt_regs *regs;
499         int idx;
500
501         /* Disable the PMU. */
502         pmnc = xscale2pmu_read_pmnc();
503         xscale2pmu_write_pmnc(pmnc & ~XSCALE_PMU_ENABLE);
504
505         /* Check the overflow flag register. */
506         of_flags = xscale2pmu_read_overflow_flags();
507         if (!(of_flags & XSCALE2_OVERFLOWED_MASK))
508                 return IRQ_NONE;
509
510         /* Clear the overflow bits. */
511         xscale2pmu_write_overflow_flags(of_flags);
512
513         regs = get_irq_regs();
514
515         for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
516                 struct perf_event *event = cpuc->events[idx];
517                 struct hw_perf_event *hwc;
518
519                 if (!event)
520                         continue;
521
522                 if (!xscale2_pmnc_counter_has_overflowed(of_flags, idx))
523                         continue;
524
525                 hwc = &event->hw;
526                 armpmu_event_update(event);
527                 perf_sample_data_init(&data, 0, hwc->last_period);
528                 if (!armpmu_event_set_period(event))
529                         continue;
530
531                 if (perf_event_overflow(event, &data, regs))
532                         cpu_pmu->disable(event);
533         }
534
535         irq_work_run();
536
537         /*
538          * Re-enable the PMU.
539          */
540         pmnc = xscale2pmu_read_pmnc() | XSCALE_PMU_ENABLE;
541         xscale2pmu_write_pmnc(pmnc);
542
543         return IRQ_HANDLED;
544 }
545
546 static void xscale2pmu_enable_event(struct perf_event *event)
547 {
548         unsigned long flags, ien, evtsel;
549         struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
550         struct hw_perf_event *hwc = &event->hw;
551         struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
552         int idx = hwc->idx;
553
554         ien = xscale2pmu_read_int_enable();
555         evtsel = xscale2pmu_read_event_select();
556
557         switch (idx) {
558         case XSCALE_CYCLE_COUNTER:
559                 ien |= XSCALE2_CCOUNT_INT_EN;
560                 break;
561         case XSCALE_COUNTER0:
562                 ien |= XSCALE2_COUNT0_INT_EN;
563                 evtsel &= ~XSCALE2_COUNT0_EVT_MASK;
564                 evtsel |= hwc->config_base << XSCALE2_COUNT0_EVT_SHFT;
565                 break;
566         case XSCALE_COUNTER1:
567                 ien |= XSCALE2_COUNT1_INT_EN;
568                 evtsel &= ~XSCALE2_COUNT1_EVT_MASK;
569                 evtsel |= hwc->config_base << XSCALE2_COUNT1_EVT_SHFT;
570                 break;
571         case XSCALE_COUNTER2:
572                 ien |= XSCALE2_COUNT2_INT_EN;
573                 evtsel &= ~XSCALE2_COUNT2_EVT_MASK;
574                 evtsel |= hwc->config_base << XSCALE2_COUNT2_EVT_SHFT;
575                 break;
576         case XSCALE_COUNTER3:
577                 ien |= XSCALE2_COUNT3_INT_EN;
578                 evtsel &= ~XSCALE2_COUNT3_EVT_MASK;
579                 evtsel |= hwc->config_base << XSCALE2_COUNT3_EVT_SHFT;
580                 break;
581         default:
582                 WARN_ONCE(1, "invalid counter number (%d)\n", idx);
583                 return;
584         }
585
586         raw_spin_lock_irqsave(&events->pmu_lock, flags);
587         xscale2pmu_write_event_select(evtsel);
588         xscale2pmu_write_int_enable(ien);
589         raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
590 }
591
592 static void xscale2pmu_disable_event(struct perf_event *event)
593 {
594         unsigned long flags, ien, evtsel, of_flags;
595         struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
596         struct hw_perf_event *hwc = &event->hw;
597         struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
598         int idx = hwc->idx;
599
600         ien = xscale2pmu_read_int_enable();
601         evtsel = xscale2pmu_read_event_select();
602
603         switch (idx) {
604         case XSCALE_CYCLE_COUNTER:
605                 ien &= ~XSCALE2_CCOUNT_INT_EN;
606                 of_flags = XSCALE2_CCOUNT_OVERFLOW;
607                 break;
608         case XSCALE_COUNTER0:
609                 ien &= ~XSCALE2_COUNT0_INT_EN;
610                 evtsel &= ~XSCALE2_COUNT0_EVT_MASK;
611                 evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT0_EVT_SHFT;
612                 of_flags = XSCALE2_COUNT0_OVERFLOW;
613                 break;
614         case XSCALE_COUNTER1:
615                 ien &= ~XSCALE2_COUNT1_INT_EN;
616                 evtsel &= ~XSCALE2_COUNT1_EVT_MASK;
617                 evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT1_EVT_SHFT;
618                 of_flags = XSCALE2_COUNT1_OVERFLOW;
619                 break;
620         case XSCALE_COUNTER2:
621                 ien &= ~XSCALE2_COUNT2_INT_EN;
622                 evtsel &= ~XSCALE2_COUNT2_EVT_MASK;
623                 evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT2_EVT_SHFT;
624                 of_flags = XSCALE2_COUNT2_OVERFLOW;
625                 break;
626         case XSCALE_COUNTER3:
627                 ien &= ~XSCALE2_COUNT3_INT_EN;
628                 evtsel &= ~XSCALE2_COUNT3_EVT_MASK;
629                 evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT3_EVT_SHFT;
630                 of_flags = XSCALE2_COUNT3_OVERFLOW;
631                 break;
632         default:
633                 WARN_ONCE(1, "invalid counter number (%d)\n", idx);
634                 return;
635         }
636
637         raw_spin_lock_irqsave(&events->pmu_lock, flags);
638         xscale2pmu_write_event_select(evtsel);
639         xscale2pmu_write_int_enable(ien);
640         xscale2pmu_write_overflow_flags(of_flags);
641         raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
642 }
643
644 static int
645 xscale2pmu_get_event_idx(struct pmu_hw_events *cpuc,
646                                 struct perf_event *event)
647 {
648         int idx = xscale1pmu_get_event_idx(cpuc, event);
649         if (idx >= 0)
650                 goto out;
651
652         if (!test_and_set_bit(XSCALE_COUNTER3, cpuc->used_mask))
653                 idx = XSCALE_COUNTER3;
654         else if (!test_and_set_bit(XSCALE_COUNTER2, cpuc->used_mask))
655                 idx = XSCALE_COUNTER2;
656 out:
657         return idx;
658 }
659
660 static void xscale2pmu_start(struct arm_pmu *cpu_pmu)
661 {
662         unsigned long flags, val;
663         struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
664
665         raw_spin_lock_irqsave(&events->pmu_lock, flags);
666         val = xscale2pmu_read_pmnc() & ~XSCALE_PMU_CNT64;
667         val |= XSCALE_PMU_ENABLE;
668         xscale2pmu_write_pmnc(val);
669         raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
670 }
671
672 static void xscale2pmu_stop(struct arm_pmu *cpu_pmu)
673 {
674         unsigned long flags, val;
675         struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
676
677         raw_spin_lock_irqsave(&events->pmu_lock, flags);
678         val = xscale2pmu_read_pmnc();
679         val &= ~XSCALE_PMU_ENABLE;
680         xscale2pmu_write_pmnc(val);
681         raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
682 }
683
684 static inline u32 xscale2pmu_read_counter(struct perf_event *event)
685 {
686         struct hw_perf_event *hwc = &event->hw;
687         int counter = hwc->idx;
688         u32 val = 0;
689
690         switch (counter) {
691         case XSCALE_CYCLE_COUNTER:
692                 asm volatile("mrc p14, 0, %0, c1, c1, 0" : "=r" (val));
693                 break;
694         case XSCALE_COUNTER0:
695                 asm volatile("mrc p14, 0, %0, c0, c2, 0" : "=r" (val));
696                 break;
697         case XSCALE_COUNTER1:
698                 asm volatile("mrc p14, 0, %0, c1, c2, 0" : "=r" (val));
699                 break;
700         case XSCALE_COUNTER2:
701                 asm volatile("mrc p14, 0, %0, c2, c2, 0" : "=r" (val));
702                 break;
703         case XSCALE_COUNTER3:
704                 asm volatile("mrc p14, 0, %0, c3, c2, 0" : "=r" (val));
705                 break;
706         }
707
708         return val;
709 }
710
711 static inline void xscale2pmu_write_counter(struct perf_event *event, u32 val)
712 {
713         struct hw_perf_event *hwc = &event->hw;
714         int counter = hwc->idx;
715
716         switch (counter) {
717         case XSCALE_CYCLE_COUNTER:
718                 asm volatile("mcr p14, 0, %0, c1, c1, 0" : : "r" (val));
719                 break;
720         case XSCALE_COUNTER0:
721                 asm volatile("mcr p14, 0, %0, c0, c2, 0" : : "r" (val));
722                 break;
723         case XSCALE_COUNTER1:
724                 asm volatile("mcr p14, 0, %0, c1, c2, 0" : : "r" (val));
725                 break;
726         case XSCALE_COUNTER2:
727                 asm volatile("mcr p14, 0, %0, c2, c2, 0" : : "r" (val));
728                 break;
729         case XSCALE_COUNTER3:
730                 asm volatile("mcr p14, 0, %0, c3, c2, 0" : : "r" (val));
731                 break;
732         }
733 }
734
735 static int xscale2pmu_init(struct arm_pmu *cpu_pmu)
736 {
737         cpu_pmu->name           = "armv5_xscale2";
738         cpu_pmu->handle_irq     = xscale2pmu_handle_irq;
739         cpu_pmu->enable         = xscale2pmu_enable_event;
740         cpu_pmu->disable        = xscale2pmu_disable_event;
741         cpu_pmu->read_counter   = xscale2pmu_read_counter;
742         cpu_pmu->write_counter  = xscale2pmu_write_counter;
743         cpu_pmu->get_event_idx  = xscale2pmu_get_event_idx;
744         cpu_pmu->start          = xscale2pmu_start;
745         cpu_pmu->stop           = xscale2pmu_stop;
746         cpu_pmu->map_event      = xscale_map_event;
747         cpu_pmu->num_events     = 5;
748         cpu_pmu->max_period     = (1LLU << 32) - 1;
749
750         return 0;
751 }
752
753 static const struct pmu_probe_info xscale_pmu_probe_table[] = {
754         XSCALE_PMU_PROBE(ARM_CPU_XSCALE_ARCH_V1, xscale1pmu_init),
755         XSCALE_PMU_PROBE(ARM_CPU_XSCALE_ARCH_V2, xscale2pmu_init),
756         { /* sentinel value */ }
757 };
758
759 static int xscale_pmu_device_probe(struct platform_device *pdev)
760 {
761         return arm_pmu_device_probe(pdev, NULL, xscale_pmu_probe_table);
762 }
763
764 static struct platform_driver xscale_pmu_driver = {
765         .driver         = {
766                 .name   = "xscale-pmu",
767         },
768         .probe          = xscale_pmu_device_probe,
769 };
770
771 builtin_platform_driver(xscale_pmu_driver);
772 #endif  /* CONFIG_CPU_XSCALE */