Merge branch 'etnaviv/fixes' of https://git.pengutronix.de/git/lst/linux into drm...
[sfrench/cifs-2.6.git] / arch / arm / mach-imx / mmdc.c
1 /*
2  * Copyright 2017 NXP
3  * Copyright 2011,2016 Freescale Semiconductor, Inc.
4  * Copyright 2011 Linaro Ltd.
5  *
6  * The code contained herein is licensed under the GNU General Public
7  * License. You may obtain a copy of the GNU General Public License
8  * Version 2 or later at the following locations:
9  *
10  * http://www.opensource.org/licenses/gpl-license.html
11  * http://www.gnu.org/copyleft/gpl.html
12  */
13
14 #include <linux/clk.h>
15 #include <linux/hrtimer.h>
16 #include <linux/init.h>
17 #include <linux/interrupt.h>
18 #include <linux/io.h>
19 #include <linux/module.h>
20 #include <linux/of.h>
21 #include <linux/of_address.h>
22 #include <linux/of_device.h>
23 #include <linux/perf_event.h>
24 #include <linux/slab.h>
25
26 #include "common.h"
27
28 #define MMDC_MAPSR              0x404
29 #define BP_MMDC_MAPSR_PSD       0
30 #define BP_MMDC_MAPSR_PSS       4
31
32 #define MMDC_MDMISC             0x18
33 #define BM_MMDC_MDMISC_DDR_TYPE 0x18
34 #define BP_MMDC_MDMISC_DDR_TYPE 0x3
35
36 #define TOTAL_CYCLES            0x0
37 #define BUSY_CYCLES             0x1
38 #define READ_ACCESSES           0x2
39 #define WRITE_ACCESSES          0x3
40 #define READ_BYTES              0x4
41 #define WRITE_BYTES             0x5
42
43 /* Enables, resets, freezes, overflow profiling*/
44 #define DBG_DIS                 0x0
45 #define DBG_EN                  0x1
46 #define DBG_RST                 0x2
47 #define PRF_FRZ                 0x4
48 #define CYC_OVF                 0x8
49 #define PROFILE_SEL             0x10
50
51 #define MMDC_MADPCR0    0x410
52 #define MMDC_MADPCR1    0x414
53 #define MMDC_MADPSR0    0x418
54 #define MMDC_MADPSR1    0x41C
55 #define MMDC_MADPSR2    0x420
56 #define MMDC_MADPSR3    0x424
57 #define MMDC_MADPSR4    0x428
58 #define MMDC_MADPSR5    0x42C
59
60 #define MMDC_NUM_COUNTERS       6
61
62 #define MMDC_FLAG_PROFILE_SEL   0x1
63 #define MMDC_PRF_AXI_ID_CLEAR   0x0
64
65 #define to_mmdc_pmu(p) container_of(p, struct mmdc_pmu, pmu)
66
67 static int ddr_type;
68
69 struct fsl_mmdc_devtype_data {
70         unsigned int flags;
71 };
72
73 static const struct fsl_mmdc_devtype_data imx6q_data = {
74 };
75
76 static const struct fsl_mmdc_devtype_data imx6qp_data = {
77         .flags = MMDC_FLAG_PROFILE_SEL,
78 };
79
80 static const struct of_device_id imx_mmdc_dt_ids[] = {
81         { .compatible = "fsl,imx6q-mmdc", .data = (void *)&imx6q_data},
82         { .compatible = "fsl,imx6qp-mmdc", .data = (void *)&imx6qp_data},
83         { /* sentinel */ }
84 };
85
86 #ifdef CONFIG_PERF_EVENTS
87
88 static enum cpuhp_state cpuhp_mmdc_state;
89 static DEFINE_IDA(mmdc_ida);
90
91 PMU_EVENT_ATTR_STRING(total-cycles, mmdc_pmu_total_cycles, "event=0x00")
92 PMU_EVENT_ATTR_STRING(busy-cycles, mmdc_pmu_busy_cycles, "event=0x01")
93 PMU_EVENT_ATTR_STRING(read-accesses, mmdc_pmu_read_accesses, "event=0x02")
94 PMU_EVENT_ATTR_STRING(write-accesses, mmdc_pmu_write_accesses, "event=0x03")
95 PMU_EVENT_ATTR_STRING(read-bytes, mmdc_pmu_read_bytes, "event=0x04")
96 PMU_EVENT_ATTR_STRING(read-bytes.unit, mmdc_pmu_read_bytes_unit, "MB");
97 PMU_EVENT_ATTR_STRING(read-bytes.scale, mmdc_pmu_read_bytes_scale, "0.000001");
98 PMU_EVENT_ATTR_STRING(write-bytes, mmdc_pmu_write_bytes, "event=0x05")
99 PMU_EVENT_ATTR_STRING(write-bytes.unit, mmdc_pmu_write_bytes_unit, "MB");
100 PMU_EVENT_ATTR_STRING(write-bytes.scale, mmdc_pmu_write_bytes_scale, "0.000001");
101
102 struct mmdc_pmu {
103         struct pmu pmu;
104         void __iomem *mmdc_base;
105         cpumask_t cpu;
106         struct hrtimer hrtimer;
107         unsigned int active_events;
108         struct device *dev;
109         struct perf_event *mmdc_events[MMDC_NUM_COUNTERS];
110         struct hlist_node node;
111         struct fsl_mmdc_devtype_data *devtype_data;
112 };
113
114 /*
115  * Polling period is set to one second, overflow of total-cycles (the fastest
116  * increasing counter) takes ten seconds so one second is safe
117  */
118 static unsigned int mmdc_pmu_poll_period_us = 1000000;
119
120 module_param_named(pmu_pmu_poll_period_us, mmdc_pmu_poll_period_us, uint,
121                 S_IRUGO | S_IWUSR);
122
123 static ktime_t mmdc_pmu_timer_period(void)
124 {
125         return ns_to_ktime((u64)mmdc_pmu_poll_period_us * 1000);
126 }
127
128 static ssize_t mmdc_pmu_cpumask_show(struct device *dev,
129                 struct device_attribute *attr, char *buf)
130 {
131         struct mmdc_pmu *pmu_mmdc = dev_get_drvdata(dev);
132
133         return cpumap_print_to_pagebuf(true, buf, &pmu_mmdc->cpu);
134 }
135
136 static struct device_attribute mmdc_pmu_cpumask_attr =
137         __ATTR(cpumask, S_IRUGO, mmdc_pmu_cpumask_show, NULL);
138
139 static struct attribute *mmdc_pmu_cpumask_attrs[] = {
140         &mmdc_pmu_cpumask_attr.attr,
141         NULL,
142 };
143
144 static struct attribute_group mmdc_pmu_cpumask_attr_group = {
145         .attrs = mmdc_pmu_cpumask_attrs,
146 };
147
148 static struct attribute *mmdc_pmu_events_attrs[] = {
149         &mmdc_pmu_total_cycles.attr.attr,
150         &mmdc_pmu_busy_cycles.attr.attr,
151         &mmdc_pmu_read_accesses.attr.attr,
152         &mmdc_pmu_write_accesses.attr.attr,
153         &mmdc_pmu_read_bytes.attr.attr,
154         &mmdc_pmu_read_bytes_unit.attr.attr,
155         &mmdc_pmu_read_bytes_scale.attr.attr,
156         &mmdc_pmu_write_bytes.attr.attr,
157         &mmdc_pmu_write_bytes_unit.attr.attr,
158         &mmdc_pmu_write_bytes_scale.attr.attr,
159         NULL,
160 };
161
162 static struct attribute_group mmdc_pmu_events_attr_group = {
163         .name = "events",
164         .attrs = mmdc_pmu_events_attrs,
165 };
166
167 PMU_FORMAT_ATTR(event, "config:0-63");
168 PMU_FORMAT_ATTR(axi_id, "config1:0-63");
169
170 static struct attribute *mmdc_pmu_format_attrs[] = {
171         &format_attr_event.attr,
172         &format_attr_axi_id.attr,
173         NULL,
174 };
175
176 static struct attribute_group mmdc_pmu_format_attr_group = {
177         .name = "format",
178         .attrs = mmdc_pmu_format_attrs,
179 };
180
181 static const struct attribute_group *attr_groups[] = {
182         &mmdc_pmu_events_attr_group,
183         &mmdc_pmu_format_attr_group,
184         &mmdc_pmu_cpumask_attr_group,
185         NULL,
186 };
187
188 static u32 mmdc_pmu_read_counter(struct mmdc_pmu *pmu_mmdc, int cfg)
189 {
190         void __iomem *mmdc_base, *reg;
191
192         mmdc_base = pmu_mmdc->mmdc_base;
193
194         switch (cfg) {
195         case TOTAL_CYCLES:
196                 reg = mmdc_base + MMDC_MADPSR0;
197                 break;
198         case BUSY_CYCLES:
199                 reg = mmdc_base + MMDC_MADPSR1;
200                 break;
201         case READ_ACCESSES:
202                 reg = mmdc_base + MMDC_MADPSR2;
203                 break;
204         case WRITE_ACCESSES:
205                 reg = mmdc_base + MMDC_MADPSR3;
206                 break;
207         case READ_BYTES:
208                 reg = mmdc_base + MMDC_MADPSR4;
209                 break;
210         case WRITE_BYTES:
211                 reg = mmdc_base + MMDC_MADPSR5;
212                 break;
213         default:
214                 return WARN_ONCE(1,
215                         "invalid configuration %d for mmdc counter", cfg);
216         }
217         return readl(reg);
218 }
219
220 static int mmdc_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
221 {
222         struct mmdc_pmu *pmu_mmdc = hlist_entry_safe(node, struct mmdc_pmu, node);
223         int target;
224
225         if (!cpumask_test_and_clear_cpu(cpu, &pmu_mmdc->cpu))
226                 return 0;
227
228         target = cpumask_any_but(cpu_online_mask, cpu);
229         if (target >= nr_cpu_ids)
230                 return 0;
231
232         perf_pmu_migrate_context(&pmu_mmdc->pmu, cpu, target);
233         cpumask_set_cpu(target, &pmu_mmdc->cpu);
234
235         return 0;
236 }
237
238 static bool mmdc_pmu_group_event_is_valid(struct perf_event *event,
239                                           struct pmu *pmu,
240                                           unsigned long *used_counters)
241 {
242         int cfg = event->attr.config;
243
244         if (is_software_event(event))
245                 return true;
246
247         if (event->pmu != pmu)
248                 return false;
249
250         return !test_and_set_bit(cfg, used_counters);
251 }
252
253 /*
254  * Each event has a single fixed-purpose counter, so we can only have a
255  * single active event for each at any point in time. Here we just check
256  * for duplicates, and rely on mmdc_pmu_event_init to verify that the HW
257  * event numbers are valid.
258  */
259 static bool mmdc_pmu_group_is_valid(struct perf_event *event)
260 {
261         struct pmu *pmu = event->pmu;
262         struct perf_event *leader = event->group_leader;
263         struct perf_event *sibling;
264         unsigned long counter_mask = 0;
265
266         set_bit(leader->attr.config, &counter_mask);
267
268         if (event != leader) {
269                 if (!mmdc_pmu_group_event_is_valid(event, pmu, &counter_mask))
270                         return false;
271         }
272
273         for_each_sibling_event(sibling, leader) {
274                 if (!mmdc_pmu_group_event_is_valid(sibling, pmu, &counter_mask))
275                         return false;
276         }
277
278         return true;
279 }
280
281 static int mmdc_pmu_event_init(struct perf_event *event)
282 {
283         struct mmdc_pmu *pmu_mmdc = to_mmdc_pmu(event->pmu);
284         int cfg = event->attr.config;
285
286         if (event->attr.type != event->pmu->type)
287                 return -ENOENT;
288
289         if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
290                 return -EOPNOTSUPP;
291
292         if (event->cpu < 0) {
293                 dev_warn(pmu_mmdc->dev, "Can't provide per-task data!\n");
294                 return -EOPNOTSUPP;
295         }
296
297         if (event->attr.exclude_user            ||
298                         event->attr.exclude_kernel      ||
299                         event->attr.exclude_hv          ||
300                         event->attr.exclude_idle        ||
301                         event->attr.exclude_host        ||
302                         event->attr.exclude_guest       ||
303                         event->attr.sample_period)
304                 return -EINVAL;
305
306         if (cfg < 0 || cfg >= MMDC_NUM_COUNTERS)
307                 return -EINVAL;
308
309         if (!mmdc_pmu_group_is_valid(event))
310                 return -EINVAL;
311
312         event->cpu = cpumask_first(&pmu_mmdc->cpu);
313         return 0;
314 }
315
316 static void mmdc_pmu_event_update(struct perf_event *event)
317 {
318         struct mmdc_pmu *pmu_mmdc = to_mmdc_pmu(event->pmu);
319         struct hw_perf_event *hwc = &event->hw;
320         u64 delta, prev_raw_count, new_raw_count;
321
322         do {
323                 prev_raw_count = local64_read(&hwc->prev_count);
324                 new_raw_count = mmdc_pmu_read_counter(pmu_mmdc,
325                                                       event->attr.config);
326         } while (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
327                 new_raw_count) != prev_raw_count);
328
329         delta = (new_raw_count - prev_raw_count) & 0xFFFFFFFF;
330
331         local64_add(delta, &event->count);
332 }
333
334 static void mmdc_pmu_event_start(struct perf_event *event, int flags)
335 {
336         struct mmdc_pmu *pmu_mmdc = to_mmdc_pmu(event->pmu);
337         struct hw_perf_event *hwc = &event->hw;
338         void __iomem *mmdc_base, *reg;
339         u32 val;
340
341         mmdc_base = pmu_mmdc->mmdc_base;
342         reg = mmdc_base + MMDC_MADPCR0;
343
344         /*
345          * hrtimer is required because mmdc does not provide an interrupt so
346          * polling is necessary
347          */
348         hrtimer_start(&pmu_mmdc->hrtimer, mmdc_pmu_timer_period(),
349                         HRTIMER_MODE_REL_PINNED);
350
351         local64_set(&hwc->prev_count, 0);
352
353         writel(DBG_RST, reg);
354
355         /*
356          * Write the AXI id parameter to MADPCR1.
357          */
358         val = event->attr.config1;
359         reg = mmdc_base + MMDC_MADPCR1;
360         writel(val, reg);
361
362         reg = mmdc_base + MMDC_MADPCR0;
363         val = DBG_EN;
364         if (pmu_mmdc->devtype_data->flags & MMDC_FLAG_PROFILE_SEL)
365                 val |= PROFILE_SEL;
366
367         writel(val, reg);
368 }
369
370 static int mmdc_pmu_event_add(struct perf_event *event, int flags)
371 {
372         struct mmdc_pmu *pmu_mmdc = to_mmdc_pmu(event->pmu);
373         struct hw_perf_event *hwc = &event->hw;
374
375         int cfg = event->attr.config;
376
377         if (flags & PERF_EF_START)
378                 mmdc_pmu_event_start(event, flags);
379
380         if (pmu_mmdc->mmdc_events[cfg] != NULL)
381                 return -EAGAIN;
382
383         pmu_mmdc->mmdc_events[cfg] = event;
384         pmu_mmdc->active_events++;
385
386         local64_set(&hwc->prev_count, mmdc_pmu_read_counter(pmu_mmdc, cfg));
387
388         return 0;
389 }
390
391 static void mmdc_pmu_event_stop(struct perf_event *event, int flags)
392 {
393         struct mmdc_pmu *pmu_mmdc = to_mmdc_pmu(event->pmu);
394         void __iomem *mmdc_base, *reg;
395
396         mmdc_base = pmu_mmdc->mmdc_base;
397         reg = mmdc_base + MMDC_MADPCR0;
398
399         writel(PRF_FRZ, reg);
400
401         reg = mmdc_base + MMDC_MADPCR1;
402         writel(MMDC_PRF_AXI_ID_CLEAR, reg);
403
404         mmdc_pmu_event_update(event);
405 }
406
407 static void mmdc_pmu_event_del(struct perf_event *event, int flags)
408 {
409         struct mmdc_pmu *pmu_mmdc = to_mmdc_pmu(event->pmu);
410         int cfg = event->attr.config;
411
412         pmu_mmdc->mmdc_events[cfg] = NULL;
413         pmu_mmdc->active_events--;
414
415         if (pmu_mmdc->active_events == 0)
416                 hrtimer_cancel(&pmu_mmdc->hrtimer);
417
418         mmdc_pmu_event_stop(event, PERF_EF_UPDATE);
419 }
420
421 static void mmdc_pmu_overflow_handler(struct mmdc_pmu *pmu_mmdc)
422 {
423         int i;
424
425         for (i = 0; i < MMDC_NUM_COUNTERS; i++) {
426                 struct perf_event *event = pmu_mmdc->mmdc_events[i];
427
428                 if (event)
429                         mmdc_pmu_event_update(event);
430         }
431 }
432
433 static enum hrtimer_restart mmdc_pmu_timer_handler(struct hrtimer *hrtimer)
434 {
435         struct mmdc_pmu *pmu_mmdc = container_of(hrtimer, struct mmdc_pmu,
436                         hrtimer);
437
438         mmdc_pmu_overflow_handler(pmu_mmdc);
439         hrtimer_forward_now(hrtimer, mmdc_pmu_timer_period());
440
441         return HRTIMER_RESTART;
442 }
443
444 static int mmdc_pmu_init(struct mmdc_pmu *pmu_mmdc,
445                 void __iomem *mmdc_base, struct device *dev)
446 {
447         int mmdc_num;
448
449         *pmu_mmdc = (struct mmdc_pmu) {
450                 .pmu = (struct pmu) {
451                         .task_ctx_nr    = perf_invalid_context,
452                         .attr_groups    = attr_groups,
453                         .event_init     = mmdc_pmu_event_init,
454                         .add            = mmdc_pmu_event_add,
455                         .del            = mmdc_pmu_event_del,
456                         .start          = mmdc_pmu_event_start,
457                         .stop           = mmdc_pmu_event_stop,
458                         .read           = mmdc_pmu_event_update,
459                 },
460                 .mmdc_base = mmdc_base,
461                 .dev = dev,
462                 .active_events = 0,
463         };
464
465         mmdc_num = ida_simple_get(&mmdc_ida, 0, 0, GFP_KERNEL);
466
467         return mmdc_num;
468 }
469
470 static int imx_mmdc_remove(struct platform_device *pdev)
471 {
472         struct mmdc_pmu *pmu_mmdc = platform_get_drvdata(pdev);
473
474         cpuhp_state_remove_instance_nocalls(cpuhp_mmdc_state, &pmu_mmdc->node);
475         perf_pmu_unregister(&pmu_mmdc->pmu);
476         kfree(pmu_mmdc);
477         return 0;
478 }
479
480 static int imx_mmdc_perf_init(struct platform_device *pdev, void __iomem *mmdc_base)
481 {
482         struct mmdc_pmu *pmu_mmdc;
483         char *name;
484         int mmdc_num;
485         int ret;
486         const struct of_device_id *of_id =
487                 of_match_device(imx_mmdc_dt_ids, &pdev->dev);
488
489         pmu_mmdc = kzalloc(sizeof(*pmu_mmdc), GFP_KERNEL);
490         if (!pmu_mmdc) {
491                 pr_err("failed to allocate PMU device!\n");
492                 return -ENOMEM;
493         }
494
495         /* The first instance registers the hotplug state */
496         if (!cpuhp_mmdc_state) {
497                 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
498                                               "perf/arm/mmdc:online", NULL,
499                                               mmdc_pmu_offline_cpu);
500                 if (ret < 0) {
501                         pr_err("cpuhp_setup_state_multi failed\n");
502                         goto pmu_free;
503                 }
504                 cpuhp_mmdc_state = ret;
505         }
506
507         mmdc_num = mmdc_pmu_init(pmu_mmdc, mmdc_base, &pdev->dev);
508         if (mmdc_num == 0)
509                 name = "mmdc";
510         else
511                 name = devm_kasprintf(&pdev->dev,
512                                 GFP_KERNEL, "mmdc%d", mmdc_num);
513
514         pmu_mmdc->devtype_data = (struct fsl_mmdc_devtype_data *)of_id->data;
515
516         hrtimer_init(&pmu_mmdc->hrtimer, CLOCK_MONOTONIC,
517                         HRTIMER_MODE_REL);
518         pmu_mmdc->hrtimer.function = mmdc_pmu_timer_handler;
519
520         cpumask_set_cpu(raw_smp_processor_id(), &pmu_mmdc->cpu);
521
522         /* Register the pmu instance for cpu hotplug */
523         cpuhp_state_add_instance_nocalls(cpuhp_mmdc_state, &pmu_mmdc->node);
524
525         ret = perf_pmu_register(&(pmu_mmdc->pmu), name, -1);
526         if (ret)
527                 goto pmu_register_err;
528
529         platform_set_drvdata(pdev, pmu_mmdc);
530         return 0;
531
532 pmu_register_err:
533         pr_warn("MMDC Perf PMU failed (%d), disabled\n", ret);
534         cpuhp_state_remove_instance_nocalls(cpuhp_mmdc_state, &pmu_mmdc->node);
535         hrtimer_cancel(&pmu_mmdc->hrtimer);
536 pmu_free:
537         kfree(pmu_mmdc);
538         return ret;
539 }
540
541 #else
542 #define imx_mmdc_remove NULL
543 #define imx_mmdc_perf_init(pdev, mmdc_base) 0
544 #endif
545
546 static int imx_mmdc_probe(struct platform_device *pdev)
547 {
548         struct device_node *np = pdev->dev.of_node;
549         void __iomem *mmdc_base, *reg;
550         struct clk *mmdc_ipg_clk;
551         u32 val;
552         int err;
553
554         /* the ipg clock is optional */
555         mmdc_ipg_clk = devm_clk_get(&pdev->dev, NULL);
556         if (IS_ERR(mmdc_ipg_clk))
557                 mmdc_ipg_clk = NULL;
558
559         err = clk_prepare_enable(mmdc_ipg_clk);
560         if (err) {
561                 dev_err(&pdev->dev, "Unable to enable mmdc ipg clock.\n");
562                 return err;
563         }
564
565         mmdc_base = of_iomap(np, 0);
566         WARN_ON(!mmdc_base);
567
568         reg = mmdc_base + MMDC_MDMISC;
569         /* Get ddr type */
570         val = readl_relaxed(reg);
571         ddr_type = (val & BM_MMDC_MDMISC_DDR_TYPE) >>
572                  BP_MMDC_MDMISC_DDR_TYPE;
573
574         reg = mmdc_base + MMDC_MAPSR;
575
576         /* Enable automatic power saving */
577         val = readl_relaxed(reg);
578         val &= ~(1 << BP_MMDC_MAPSR_PSD);
579         writel_relaxed(val, reg);
580
581         return imx_mmdc_perf_init(pdev, mmdc_base);
582 }
583
584 int imx_mmdc_get_ddr_type(void)
585 {
586         return ddr_type;
587 }
588
589 static struct platform_driver imx_mmdc_driver = {
590         .driver         = {
591                 .name   = "imx-mmdc",
592                 .of_match_table = imx_mmdc_dt_ids,
593         },
594         .probe          = imx_mmdc_probe,
595         .remove         = imx_mmdc_remove,
596 };
597
598 static int __init imx_mmdc_init(void)
599 {
600         return platform_driver_register(&imx_mmdc_driver);
601 }
602 postcore_initcall(imx_mmdc_init);