drm/i915/gvt: Refine non privilege register address calucation
[sfrench/cifs-2.6.git] / drivers / gpu / drm / i915 / i915_pmu.c
1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2017-2018 Intel Corporation
5  */
6
7 #include <linux/irq.h>
8 #include <linux/pm_runtime.h>
9
10 #include "gt/intel_engine.h"
11 #include "gt/intel_engine_pm.h"
12 #include "gt/intel_engine_user.h"
13 #include "gt/intel_gt_pm.h"
14 #include "gt/intel_rc6.h"
15 #include "gt/intel_rps.h"
16
17 #include "i915_drv.h"
18 #include "i915_pmu.h"
19 #include "intel_pm.h"
20
21 /* Frequency for the sampling timer for events which need it. */
22 #define FREQUENCY 200
23 #define PERIOD max_t(u64, 10000, NSEC_PER_SEC / FREQUENCY)
24
25 #define ENGINE_SAMPLE_MASK \
26         (BIT(I915_SAMPLE_BUSY) | \
27          BIT(I915_SAMPLE_WAIT) | \
28          BIT(I915_SAMPLE_SEMA))
29
30 #define ENGINE_SAMPLE_BITS (1 << I915_PMU_SAMPLE_BITS)
31
32 static cpumask_t i915_pmu_cpumask;
33
34 static u8 engine_config_sample(u64 config)
35 {
36         return config & I915_PMU_SAMPLE_MASK;
37 }
38
39 static u8 engine_event_sample(struct perf_event *event)
40 {
41         return engine_config_sample(event->attr.config);
42 }
43
44 static u8 engine_event_class(struct perf_event *event)
45 {
46         return (event->attr.config >> I915_PMU_CLASS_SHIFT) & 0xff;
47 }
48
49 static u8 engine_event_instance(struct perf_event *event)
50 {
51         return (event->attr.config >> I915_PMU_SAMPLE_BITS) & 0xff;
52 }
53
54 static bool is_engine_config(u64 config)
55 {
56         return config < __I915_PMU_OTHER(0);
57 }
58
59 static unsigned int config_enabled_bit(u64 config)
60 {
61         if (is_engine_config(config))
62                 return engine_config_sample(config);
63         else
64                 return ENGINE_SAMPLE_BITS + (config - __I915_PMU_OTHER(0));
65 }
66
67 static u64 config_enabled_mask(u64 config)
68 {
69         return BIT_ULL(config_enabled_bit(config));
70 }
71
72 static bool is_engine_event(struct perf_event *event)
73 {
74         return is_engine_config(event->attr.config);
75 }
76
77 static unsigned int event_enabled_bit(struct perf_event *event)
78 {
79         return config_enabled_bit(event->attr.config);
80 }
81
82 static bool pmu_needs_timer(struct i915_pmu *pmu, bool gpu_active)
83 {
84         struct drm_i915_private *i915 = container_of(pmu, typeof(*i915), pmu);
85         u64 enable;
86
87         /*
88          * Only some counters need the sampling timer.
89          *
90          * We start with a bitmask of all currently enabled events.
91          */
92         enable = pmu->enable;
93
94         /*
95          * Mask out all the ones which do not need the timer, or in
96          * other words keep all the ones that could need the timer.
97          */
98         enable &= config_enabled_mask(I915_PMU_ACTUAL_FREQUENCY) |
99                   config_enabled_mask(I915_PMU_REQUESTED_FREQUENCY) |
100                   ENGINE_SAMPLE_MASK;
101
102         /*
103          * When the GPU is idle per-engine counters do not need to be
104          * running so clear those bits out.
105          */
106         if (!gpu_active)
107                 enable &= ~ENGINE_SAMPLE_MASK;
108         /*
109          * Also there is software busyness tracking available we do not
110          * need the timer for I915_SAMPLE_BUSY counter.
111          */
112         else if (i915->caps.scheduler & I915_SCHEDULER_CAP_ENGINE_BUSY_STATS)
113                 enable &= ~BIT(I915_SAMPLE_BUSY);
114
115         /*
116          * If some bits remain it means we need the sampling timer running.
117          */
118         return enable;
119 }
120
121 static u64 __get_rc6(struct intel_gt *gt)
122 {
123         struct drm_i915_private *i915 = gt->i915;
124         u64 val;
125
126         val = intel_rc6_residency_ns(&gt->rc6,
127                                      IS_VALLEYVIEW(i915) ?
128                                      VLV_GT_RENDER_RC6 :
129                                      GEN6_GT_GFX_RC6);
130
131         if (HAS_RC6p(i915))
132                 val += intel_rc6_residency_ns(&gt->rc6, GEN6_GT_GFX_RC6p);
133
134         if (HAS_RC6pp(i915))
135                 val += intel_rc6_residency_ns(&gt->rc6, GEN6_GT_GFX_RC6pp);
136
137         return val;
138 }
139
140 #if IS_ENABLED(CONFIG_PM)
141
142 static inline s64 ktime_since(const ktime_t kt)
143 {
144         return ktime_to_ns(ktime_sub(ktime_get(), kt));
145 }
146
147 static u64 __pmu_estimate_rc6(struct i915_pmu *pmu)
148 {
149         u64 val;
150
151         /*
152          * We think we are runtime suspended.
153          *
154          * Report the delta from when the device was suspended to now,
155          * on top of the last known real value, as the approximated RC6
156          * counter value.
157          */
158         val = ktime_since(pmu->sleep_last);
159         val += pmu->sample[__I915_SAMPLE_RC6].cur;
160
161         pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur = val;
162
163         return val;
164 }
165
166 static u64 __pmu_update_rc6(struct i915_pmu *pmu, u64 val)
167 {
168         /*
169          * If we are coming back from being runtime suspended we must
170          * be careful not to report a larger value than returned
171          * previously.
172          */
173         if (val >= pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur) {
174                 pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur = 0;
175                 pmu->sample[__I915_SAMPLE_RC6].cur = val;
176         } else {
177                 val = pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur;
178         }
179
180         return val;
181 }
182
183 static u64 get_rc6(struct intel_gt *gt)
184 {
185         struct drm_i915_private *i915 = gt->i915;
186         struct i915_pmu *pmu = &i915->pmu;
187         unsigned long flags;
188         u64 val;
189
190         val = 0;
191         if (intel_gt_pm_get_if_awake(gt)) {
192                 val = __get_rc6(gt);
193                 intel_gt_pm_put(gt);
194         }
195
196         spin_lock_irqsave(&pmu->lock, flags);
197
198         if (val)
199                 val = __pmu_update_rc6(pmu, val);
200         else
201                 val = __pmu_estimate_rc6(pmu);
202
203         spin_unlock_irqrestore(&pmu->lock, flags);
204
205         return val;
206 }
207
208 static void park_rc6(struct drm_i915_private *i915)
209 {
210         struct i915_pmu *pmu = &i915->pmu;
211
212         if (pmu->enable & config_enabled_mask(I915_PMU_RC6_RESIDENCY))
213                 __pmu_update_rc6(pmu, __get_rc6(&i915->gt));
214
215         pmu->sleep_last = ktime_get();
216 }
217
218 static void unpark_rc6(struct drm_i915_private *i915)
219 {
220         struct i915_pmu *pmu = &i915->pmu;
221
222         /* Estimate how long we slept and accumulate that into rc6 counters */
223         if (pmu->enable & config_enabled_mask(I915_PMU_RC6_RESIDENCY))
224                 __pmu_estimate_rc6(pmu);
225 }
226
227 #else
228
229 static u64 get_rc6(struct intel_gt *gt)
230 {
231         return __get_rc6(gt);
232 }
233
234 static void park_rc6(struct drm_i915_private *i915) {}
235 static void unpark_rc6(struct drm_i915_private *i915) {}
236
237 #endif
238
239 static void __i915_pmu_maybe_start_timer(struct i915_pmu *pmu)
240 {
241         if (!pmu->timer_enabled && pmu_needs_timer(pmu, true)) {
242                 pmu->timer_enabled = true;
243                 pmu->timer_last = ktime_get();
244                 hrtimer_start_range_ns(&pmu->timer,
245                                        ns_to_ktime(PERIOD), 0,
246                                        HRTIMER_MODE_REL_PINNED);
247         }
248 }
249
250 void i915_pmu_gt_parked(struct drm_i915_private *i915)
251 {
252         struct i915_pmu *pmu = &i915->pmu;
253
254         if (!pmu->base.event_init)
255                 return;
256
257         spin_lock_irq(&pmu->lock);
258
259         park_rc6(i915);
260
261         /*
262          * Signal sampling timer to stop if only engine events are enabled and
263          * GPU went idle.
264          */
265         pmu->timer_enabled = pmu_needs_timer(pmu, false);
266
267         spin_unlock_irq(&pmu->lock);
268 }
269
270 void i915_pmu_gt_unparked(struct drm_i915_private *i915)
271 {
272         struct i915_pmu *pmu = &i915->pmu;
273
274         if (!pmu->base.event_init)
275                 return;
276
277         spin_lock_irq(&pmu->lock);
278
279         /*
280          * Re-enable sampling timer when GPU goes active.
281          */
282         __i915_pmu_maybe_start_timer(pmu);
283
284         unpark_rc6(i915);
285
286         spin_unlock_irq(&pmu->lock);
287 }
288
289 static void
290 add_sample(struct i915_pmu_sample *sample, u32 val)
291 {
292         sample->cur += val;
293 }
294
295 static void
296 engines_sample(struct intel_gt *gt, unsigned int period_ns)
297 {
298         struct drm_i915_private *i915 = gt->i915;
299         struct intel_engine_cs *engine;
300         enum intel_engine_id id;
301
302         if ((i915->pmu.enable & ENGINE_SAMPLE_MASK) == 0)
303                 return;
304
305         for_each_engine(engine, gt, id) {
306                 struct intel_engine_pmu *pmu = &engine->pmu;
307                 unsigned long flags;
308                 bool busy;
309                 u32 val;
310
311                 if (!intel_engine_pm_get_if_awake(engine))
312                         continue;
313
314                 spin_lock_irqsave(&engine->uncore->lock, flags);
315
316                 val = ENGINE_READ_FW(engine, RING_CTL);
317                 if (val == 0) /* powerwell off => engine idle */
318                         goto skip;
319
320                 if (val & RING_WAIT)
321                         add_sample(&pmu->sample[I915_SAMPLE_WAIT], period_ns);
322                 if (val & RING_WAIT_SEMAPHORE)
323                         add_sample(&pmu->sample[I915_SAMPLE_SEMA], period_ns);
324
325                 /* No need to sample when busy stats are supported. */
326                 if (intel_engine_supports_stats(engine))
327                         goto skip;
328
329                 /*
330                  * While waiting on a semaphore or event, MI_MODE reports the
331                  * ring as idle. However, previously using the seqno, and with
332                  * execlists sampling, we account for the ring waiting as the
333                  * engine being busy. Therefore, we record the sample as being
334                  * busy if either waiting or !idle.
335                  */
336                 busy = val & (RING_WAIT_SEMAPHORE | RING_WAIT);
337                 if (!busy) {
338                         val = ENGINE_READ_FW(engine, RING_MI_MODE);
339                         busy = !(val & MODE_IDLE);
340                 }
341                 if (busy)
342                         add_sample(&pmu->sample[I915_SAMPLE_BUSY], period_ns);
343
344 skip:
345                 spin_unlock_irqrestore(&engine->uncore->lock, flags);
346                 intel_engine_pm_put(engine);
347         }
348 }
349
350 static void
351 add_sample_mult(struct i915_pmu_sample *sample, u32 val, u32 mul)
352 {
353         sample->cur += mul_u32_u32(val, mul);
354 }
355
356 static void
357 frequency_sample(struct intel_gt *gt, unsigned int period_ns)
358 {
359         struct drm_i915_private *i915 = gt->i915;
360         struct intel_uncore *uncore = gt->uncore;
361         struct i915_pmu *pmu = &i915->pmu;
362         struct intel_rps *rps = &gt->rps;
363
364         if (pmu->enable & config_enabled_mask(I915_PMU_ACTUAL_FREQUENCY)) {
365                 u32 val;
366
367                 val = rps->cur_freq;
368                 if (intel_gt_pm_get_if_awake(gt)) {
369                         val = intel_uncore_read_notrace(uncore, GEN6_RPSTAT1);
370                         val = intel_get_cagf(rps, val);
371                         intel_gt_pm_put(gt);
372                 }
373
374                 add_sample_mult(&pmu->sample[__I915_SAMPLE_FREQ_ACT],
375                                 intel_gpu_freq(rps, val),
376                                 period_ns / 1000);
377         }
378
379         if (pmu->enable & config_enabled_mask(I915_PMU_REQUESTED_FREQUENCY)) {
380                 add_sample_mult(&pmu->sample[__I915_SAMPLE_FREQ_REQ],
381                                 intel_gpu_freq(rps, rps->cur_freq),
382                                 period_ns / 1000);
383         }
384 }
385
386 static enum hrtimer_restart i915_sample(struct hrtimer *hrtimer)
387 {
388         struct drm_i915_private *i915 =
389                 container_of(hrtimer, struct drm_i915_private, pmu.timer);
390         struct i915_pmu *pmu = &i915->pmu;
391         struct intel_gt *gt = &i915->gt;
392         unsigned int period_ns;
393         ktime_t now;
394
395         if (!READ_ONCE(pmu->timer_enabled))
396                 return HRTIMER_NORESTART;
397
398         now = ktime_get();
399         period_ns = ktime_to_ns(ktime_sub(now, pmu->timer_last));
400         pmu->timer_last = now;
401
402         /*
403          * Strictly speaking the passed in period may not be 100% accurate for
404          * all internal calculation, since some amount of time can be spent on
405          * grabbing the forcewake. However the potential error from timer call-
406          * back delay greatly dominates this so we keep it simple.
407          */
408         engines_sample(gt, period_ns);
409         frequency_sample(gt, period_ns);
410
411         hrtimer_forward(hrtimer, now, ns_to_ktime(PERIOD));
412
413         return HRTIMER_RESTART;
414 }
415
416 static u64 count_interrupts(struct drm_i915_private *i915)
417 {
418         /* open-coded kstat_irqs() */
419         struct irq_desc *desc = irq_to_desc(i915->drm.pdev->irq);
420         u64 sum = 0;
421         int cpu;
422
423         if (!desc || !desc->kstat_irqs)
424                 return 0;
425
426         for_each_possible_cpu(cpu)
427                 sum += *per_cpu_ptr(desc->kstat_irqs, cpu);
428
429         return sum;
430 }
431
432 static void engine_event_destroy(struct perf_event *event)
433 {
434         struct drm_i915_private *i915 =
435                 container_of(event->pmu, typeof(*i915), pmu.base);
436         struct intel_engine_cs *engine;
437
438         engine = intel_engine_lookup_user(i915,
439                                           engine_event_class(event),
440                                           engine_event_instance(event));
441         if (WARN_ON_ONCE(!engine))
442                 return;
443
444         if (engine_event_sample(event) == I915_SAMPLE_BUSY &&
445             intel_engine_supports_stats(engine))
446                 intel_disable_engine_stats(engine);
447 }
448
449 static void i915_pmu_event_destroy(struct perf_event *event)
450 {
451         WARN_ON(event->parent);
452
453         if (is_engine_event(event))
454                 engine_event_destroy(event);
455 }
456
457 static int
458 engine_event_status(struct intel_engine_cs *engine,
459                     enum drm_i915_pmu_engine_sample sample)
460 {
461         switch (sample) {
462         case I915_SAMPLE_BUSY:
463         case I915_SAMPLE_WAIT:
464                 break;
465         case I915_SAMPLE_SEMA:
466                 if (INTEL_GEN(engine->i915) < 6)
467                         return -ENODEV;
468                 break;
469         default:
470                 return -ENOENT;
471         }
472
473         return 0;
474 }
475
476 static int
477 config_status(struct drm_i915_private *i915, u64 config)
478 {
479         switch (config) {
480         case I915_PMU_ACTUAL_FREQUENCY:
481                 if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
482                         /* Requires a mutex for sampling! */
483                         return -ENODEV;
484                 /* Fall-through. */
485         case I915_PMU_REQUESTED_FREQUENCY:
486                 if (INTEL_GEN(i915) < 6)
487                         return -ENODEV;
488                 break;
489         case I915_PMU_INTERRUPTS:
490                 break;
491         case I915_PMU_RC6_RESIDENCY:
492                 if (!HAS_RC6(i915))
493                         return -ENODEV;
494                 break;
495         default:
496                 return -ENOENT;
497         }
498
499         return 0;
500 }
501
502 static int engine_event_init(struct perf_event *event)
503 {
504         struct drm_i915_private *i915 =
505                 container_of(event->pmu, typeof(*i915), pmu.base);
506         struct intel_engine_cs *engine;
507         u8 sample;
508         int ret;
509
510         engine = intel_engine_lookup_user(i915, engine_event_class(event),
511                                           engine_event_instance(event));
512         if (!engine)
513                 return -ENODEV;
514
515         sample = engine_event_sample(event);
516         ret = engine_event_status(engine, sample);
517         if (ret)
518                 return ret;
519
520         if (sample == I915_SAMPLE_BUSY && intel_engine_supports_stats(engine))
521                 ret = intel_enable_engine_stats(engine);
522
523         return ret;
524 }
525
526 static int i915_pmu_event_init(struct perf_event *event)
527 {
528         struct drm_i915_private *i915 =
529                 container_of(event->pmu, typeof(*i915), pmu.base);
530         int ret;
531
532         if (event->attr.type != event->pmu->type)
533                 return -ENOENT;
534
535         /* unsupported modes and filters */
536         if (event->attr.sample_period) /* no sampling */
537                 return -EINVAL;
538
539         if (has_branch_stack(event))
540                 return -EOPNOTSUPP;
541
542         if (event->cpu < 0)
543                 return -EINVAL;
544
545         /* only allow running on one cpu at a time */
546         if (!cpumask_test_cpu(event->cpu, &i915_pmu_cpumask))
547                 return -EINVAL;
548
549         if (is_engine_event(event))
550                 ret = engine_event_init(event);
551         else
552                 ret = config_status(i915, event->attr.config);
553         if (ret)
554                 return ret;
555
556         if (!event->parent)
557                 event->destroy = i915_pmu_event_destroy;
558
559         return 0;
560 }
561
562 static u64 __i915_pmu_event_read(struct perf_event *event)
563 {
564         struct drm_i915_private *i915 =
565                 container_of(event->pmu, typeof(*i915), pmu.base);
566         struct i915_pmu *pmu = &i915->pmu;
567         u64 val = 0;
568
569         if (is_engine_event(event)) {
570                 u8 sample = engine_event_sample(event);
571                 struct intel_engine_cs *engine;
572
573                 engine = intel_engine_lookup_user(i915,
574                                                   engine_event_class(event),
575                                                   engine_event_instance(event));
576
577                 if (WARN_ON_ONCE(!engine)) {
578                         /* Do nothing */
579                 } else if (sample == I915_SAMPLE_BUSY &&
580                            intel_engine_supports_stats(engine)) {
581                         val = ktime_to_ns(intel_engine_get_busy_time(engine));
582                 } else {
583                         val = engine->pmu.sample[sample].cur;
584                 }
585         } else {
586                 switch (event->attr.config) {
587                 case I915_PMU_ACTUAL_FREQUENCY:
588                         val =
589                            div_u64(pmu->sample[__I915_SAMPLE_FREQ_ACT].cur,
590                                    USEC_PER_SEC /* to MHz */);
591                         break;
592                 case I915_PMU_REQUESTED_FREQUENCY:
593                         val =
594                            div_u64(pmu->sample[__I915_SAMPLE_FREQ_REQ].cur,
595                                    USEC_PER_SEC /* to MHz */);
596                         break;
597                 case I915_PMU_INTERRUPTS:
598                         val = count_interrupts(i915);
599                         break;
600                 case I915_PMU_RC6_RESIDENCY:
601                         val = get_rc6(&i915->gt);
602                         break;
603                 }
604         }
605
606         return val;
607 }
608
609 static void i915_pmu_event_read(struct perf_event *event)
610 {
611         struct hw_perf_event *hwc = &event->hw;
612         u64 prev, new;
613
614 again:
615         prev = local64_read(&hwc->prev_count);
616         new = __i915_pmu_event_read(event);
617
618         if (local64_cmpxchg(&hwc->prev_count, prev, new) != prev)
619                 goto again;
620
621         local64_add(new - prev, &event->count);
622 }
623
624 static void i915_pmu_enable(struct perf_event *event)
625 {
626         struct drm_i915_private *i915 =
627                 container_of(event->pmu, typeof(*i915), pmu.base);
628         unsigned int bit = event_enabled_bit(event);
629         struct i915_pmu *pmu = &i915->pmu;
630         unsigned long flags;
631
632         spin_lock_irqsave(&pmu->lock, flags);
633
634         /*
635          * Update the bitmask of enabled events and increment
636          * the event reference counter.
637          */
638         BUILD_BUG_ON(ARRAY_SIZE(pmu->enable_count) != I915_PMU_MASK_BITS);
639         GEM_BUG_ON(bit >= ARRAY_SIZE(pmu->enable_count));
640         GEM_BUG_ON(pmu->enable_count[bit] == ~0);
641         pmu->enable |= BIT_ULL(bit);
642         pmu->enable_count[bit]++;
643
644         /*
645          * Start the sampling timer if needed and not already enabled.
646          */
647         __i915_pmu_maybe_start_timer(pmu);
648
649         /*
650          * For per-engine events the bitmask and reference counting
651          * is stored per engine.
652          */
653         if (is_engine_event(event)) {
654                 u8 sample = engine_event_sample(event);
655                 struct intel_engine_cs *engine;
656
657                 engine = intel_engine_lookup_user(i915,
658                                                   engine_event_class(event),
659                                                   engine_event_instance(event));
660
661                 BUILD_BUG_ON(ARRAY_SIZE(engine->pmu.enable_count) !=
662                              I915_ENGINE_SAMPLE_COUNT);
663                 BUILD_BUG_ON(ARRAY_SIZE(engine->pmu.sample) !=
664                              I915_ENGINE_SAMPLE_COUNT);
665                 GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.enable_count));
666                 GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.sample));
667                 GEM_BUG_ON(engine->pmu.enable_count[sample] == ~0);
668
669                 engine->pmu.enable |= BIT(sample);
670                 engine->pmu.enable_count[sample]++;
671         }
672
673         spin_unlock_irqrestore(&pmu->lock, flags);
674
675         /*
676          * Store the current counter value so we can report the correct delta
677          * for all listeners. Even when the event was already enabled and has
678          * an existing non-zero value.
679          */
680         local64_set(&event->hw.prev_count, __i915_pmu_event_read(event));
681 }
682
683 static void i915_pmu_disable(struct perf_event *event)
684 {
685         struct drm_i915_private *i915 =
686                 container_of(event->pmu, typeof(*i915), pmu.base);
687         unsigned int bit = event_enabled_bit(event);
688         struct i915_pmu *pmu = &i915->pmu;
689         unsigned long flags;
690
691         spin_lock_irqsave(&pmu->lock, flags);
692
693         if (is_engine_event(event)) {
694                 u8 sample = engine_event_sample(event);
695                 struct intel_engine_cs *engine;
696
697                 engine = intel_engine_lookup_user(i915,
698                                                   engine_event_class(event),
699                                                   engine_event_instance(event));
700
701                 GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.enable_count));
702                 GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.sample));
703                 GEM_BUG_ON(engine->pmu.enable_count[sample] == 0);
704
705                 /*
706                  * Decrement the reference count and clear the enabled
707                  * bitmask when the last listener on an event goes away.
708                  */
709                 if (--engine->pmu.enable_count[sample] == 0)
710                         engine->pmu.enable &= ~BIT(sample);
711         }
712
713         GEM_BUG_ON(bit >= ARRAY_SIZE(pmu->enable_count));
714         GEM_BUG_ON(pmu->enable_count[bit] == 0);
715         /*
716          * Decrement the reference count and clear the enabled
717          * bitmask when the last listener on an event goes away.
718          */
719         if (--pmu->enable_count[bit] == 0) {
720                 pmu->enable &= ~BIT_ULL(bit);
721                 pmu->timer_enabled &= pmu_needs_timer(pmu, true);
722         }
723
724         spin_unlock_irqrestore(&pmu->lock, flags);
725 }
726
727 static void i915_pmu_event_start(struct perf_event *event, int flags)
728 {
729         i915_pmu_enable(event);
730         event->hw.state = 0;
731 }
732
733 static void i915_pmu_event_stop(struct perf_event *event, int flags)
734 {
735         if (flags & PERF_EF_UPDATE)
736                 i915_pmu_event_read(event);
737         i915_pmu_disable(event);
738         event->hw.state = PERF_HES_STOPPED;
739 }
740
741 static int i915_pmu_event_add(struct perf_event *event, int flags)
742 {
743         if (flags & PERF_EF_START)
744                 i915_pmu_event_start(event, flags);
745
746         return 0;
747 }
748
749 static void i915_pmu_event_del(struct perf_event *event, int flags)
750 {
751         i915_pmu_event_stop(event, PERF_EF_UPDATE);
752 }
753
754 static int i915_pmu_event_event_idx(struct perf_event *event)
755 {
756         return 0;
757 }
758
759 struct i915_str_attribute {
760         struct device_attribute attr;
761         const char *str;
762 };
763
764 static ssize_t i915_pmu_format_show(struct device *dev,
765                                     struct device_attribute *attr, char *buf)
766 {
767         struct i915_str_attribute *eattr;
768
769         eattr = container_of(attr, struct i915_str_attribute, attr);
770         return sprintf(buf, "%s\n", eattr->str);
771 }
772
773 #define I915_PMU_FORMAT_ATTR(_name, _config) \
774         (&((struct i915_str_attribute[]) { \
775                 { .attr = __ATTR(_name, 0444, i915_pmu_format_show, NULL), \
776                   .str = _config, } \
777         })[0].attr.attr)
778
779 static struct attribute *i915_pmu_format_attrs[] = {
780         I915_PMU_FORMAT_ATTR(i915_eventid, "config:0-20"),
781         NULL,
782 };
783
784 static const struct attribute_group i915_pmu_format_attr_group = {
785         .name = "format",
786         .attrs = i915_pmu_format_attrs,
787 };
788
789 struct i915_ext_attribute {
790         struct device_attribute attr;
791         unsigned long val;
792 };
793
794 static ssize_t i915_pmu_event_show(struct device *dev,
795                                    struct device_attribute *attr, char *buf)
796 {
797         struct i915_ext_attribute *eattr;
798
799         eattr = container_of(attr, struct i915_ext_attribute, attr);
800         return sprintf(buf, "config=0x%lx\n", eattr->val);
801 }
802
803 static struct attribute_group i915_pmu_events_attr_group = {
804         .name = "events",
805         /* Patch in attrs at runtime. */
806 };
807
808 static ssize_t
809 i915_pmu_get_attr_cpumask(struct device *dev,
810                           struct device_attribute *attr,
811                           char *buf)
812 {
813         return cpumap_print_to_pagebuf(true, buf, &i915_pmu_cpumask);
814 }
815
816 static DEVICE_ATTR(cpumask, 0444, i915_pmu_get_attr_cpumask, NULL);
817
818 static struct attribute *i915_cpumask_attrs[] = {
819         &dev_attr_cpumask.attr,
820         NULL,
821 };
822
823 static const struct attribute_group i915_pmu_cpumask_attr_group = {
824         .attrs = i915_cpumask_attrs,
825 };
826
827 static const struct attribute_group *i915_pmu_attr_groups[] = {
828         &i915_pmu_format_attr_group,
829         &i915_pmu_events_attr_group,
830         &i915_pmu_cpumask_attr_group,
831         NULL
832 };
833
834 #define __event(__config, __name, __unit) \
835 { \
836         .config = (__config), \
837         .name = (__name), \
838         .unit = (__unit), \
839 }
840
841 #define __engine_event(__sample, __name) \
842 { \
843         .sample = (__sample), \
844         .name = (__name), \
845 }
846
847 static struct i915_ext_attribute *
848 add_i915_attr(struct i915_ext_attribute *attr, const char *name, u64 config)
849 {
850         sysfs_attr_init(&attr->attr.attr);
851         attr->attr.attr.name = name;
852         attr->attr.attr.mode = 0444;
853         attr->attr.show = i915_pmu_event_show;
854         attr->val = config;
855
856         return ++attr;
857 }
858
859 static struct perf_pmu_events_attr *
860 add_pmu_attr(struct perf_pmu_events_attr *attr, const char *name,
861              const char *str)
862 {
863         sysfs_attr_init(&attr->attr.attr);
864         attr->attr.attr.name = name;
865         attr->attr.attr.mode = 0444;
866         attr->attr.show = perf_event_sysfs_show;
867         attr->event_str = str;
868
869         return ++attr;
870 }
871
872 static struct attribute **
873 create_event_attributes(struct i915_pmu *pmu)
874 {
875         struct drm_i915_private *i915 = container_of(pmu, typeof(*i915), pmu);
876         static const struct {
877                 u64 config;
878                 const char *name;
879                 const char *unit;
880         } events[] = {
881                 __event(I915_PMU_ACTUAL_FREQUENCY, "actual-frequency", "MHz"),
882                 __event(I915_PMU_REQUESTED_FREQUENCY, "requested-frequency", "MHz"),
883                 __event(I915_PMU_INTERRUPTS, "interrupts", NULL),
884                 __event(I915_PMU_RC6_RESIDENCY, "rc6-residency", "ns"),
885         };
886         static const struct {
887                 enum drm_i915_pmu_engine_sample sample;
888                 char *name;
889         } engine_events[] = {
890                 __engine_event(I915_SAMPLE_BUSY, "busy"),
891                 __engine_event(I915_SAMPLE_SEMA, "sema"),
892                 __engine_event(I915_SAMPLE_WAIT, "wait"),
893         };
894         unsigned int count = 0;
895         struct perf_pmu_events_attr *pmu_attr = NULL, *pmu_iter;
896         struct i915_ext_attribute *i915_attr = NULL, *i915_iter;
897         struct attribute **attr = NULL, **attr_iter;
898         struct intel_engine_cs *engine;
899         unsigned int i;
900
901         /* Count how many counters we will be exposing. */
902         for (i = 0; i < ARRAY_SIZE(events); i++) {
903                 if (!config_status(i915, events[i].config))
904                         count++;
905         }
906
907         for_each_uabi_engine(engine, i915) {
908                 for (i = 0; i < ARRAY_SIZE(engine_events); i++) {
909                         if (!engine_event_status(engine,
910                                                  engine_events[i].sample))
911                                 count++;
912                 }
913         }
914
915         /* Allocate attribute objects and table. */
916         i915_attr = kcalloc(count, sizeof(*i915_attr), GFP_KERNEL);
917         if (!i915_attr)
918                 goto err_alloc;
919
920         pmu_attr = kcalloc(count, sizeof(*pmu_attr), GFP_KERNEL);
921         if (!pmu_attr)
922                 goto err_alloc;
923
924         /* Max one pointer of each attribute type plus a termination entry. */
925         attr = kcalloc(count * 2 + 1, sizeof(*attr), GFP_KERNEL);
926         if (!attr)
927                 goto err_alloc;
928
929         i915_iter = i915_attr;
930         pmu_iter = pmu_attr;
931         attr_iter = attr;
932
933         /* Initialize supported non-engine counters. */
934         for (i = 0; i < ARRAY_SIZE(events); i++) {
935                 char *str;
936
937                 if (config_status(i915, events[i].config))
938                         continue;
939
940                 str = kstrdup(events[i].name, GFP_KERNEL);
941                 if (!str)
942                         goto err;
943
944                 *attr_iter++ = &i915_iter->attr.attr;
945                 i915_iter = add_i915_attr(i915_iter, str, events[i].config);
946
947                 if (events[i].unit) {
948                         str = kasprintf(GFP_KERNEL, "%s.unit", events[i].name);
949                         if (!str)
950                                 goto err;
951
952                         *attr_iter++ = &pmu_iter->attr.attr;
953                         pmu_iter = add_pmu_attr(pmu_iter, str, events[i].unit);
954                 }
955         }
956
957         /* Initialize supported engine counters. */
958         for_each_uabi_engine(engine, i915) {
959                 for (i = 0; i < ARRAY_SIZE(engine_events); i++) {
960                         char *str;
961
962                         if (engine_event_status(engine,
963                                                 engine_events[i].sample))
964                                 continue;
965
966                         str = kasprintf(GFP_KERNEL, "%s-%s",
967                                         engine->name, engine_events[i].name);
968                         if (!str)
969                                 goto err;
970
971                         *attr_iter++ = &i915_iter->attr.attr;
972                         i915_iter =
973                                 add_i915_attr(i915_iter, str,
974                                               __I915_PMU_ENGINE(engine->uabi_class,
975                                                                 engine->uabi_instance,
976                                                                 engine_events[i].sample));
977
978                         str = kasprintf(GFP_KERNEL, "%s-%s.unit",
979                                         engine->name, engine_events[i].name);
980                         if (!str)
981                                 goto err;
982
983                         *attr_iter++ = &pmu_iter->attr.attr;
984                         pmu_iter = add_pmu_attr(pmu_iter, str, "ns");
985                 }
986         }
987
988         pmu->i915_attr = i915_attr;
989         pmu->pmu_attr = pmu_attr;
990
991         return attr;
992
993 err:;
994         for (attr_iter = attr; *attr_iter; attr_iter++)
995                 kfree((*attr_iter)->name);
996
997 err_alloc:
998         kfree(attr);
999         kfree(i915_attr);
1000         kfree(pmu_attr);
1001
1002         return NULL;
1003 }
1004
1005 static void free_event_attributes(struct i915_pmu *pmu)
1006 {
1007         struct attribute **attr_iter = i915_pmu_events_attr_group.attrs;
1008
1009         for (; *attr_iter; attr_iter++)
1010                 kfree((*attr_iter)->name);
1011
1012         kfree(i915_pmu_events_attr_group.attrs);
1013         kfree(pmu->i915_attr);
1014         kfree(pmu->pmu_attr);
1015
1016         i915_pmu_events_attr_group.attrs = NULL;
1017         pmu->i915_attr = NULL;
1018         pmu->pmu_attr = NULL;
1019 }
1020
1021 static int i915_pmu_cpu_online(unsigned int cpu, struct hlist_node *node)
1022 {
1023         struct i915_pmu *pmu = hlist_entry_safe(node, typeof(*pmu), node);
1024
1025         GEM_BUG_ON(!pmu->base.event_init);
1026
1027         /* Select the first online CPU as a designated reader. */
1028         if (!cpumask_weight(&i915_pmu_cpumask))
1029                 cpumask_set_cpu(cpu, &i915_pmu_cpumask);
1030
1031         return 0;
1032 }
1033
1034 static int i915_pmu_cpu_offline(unsigned int cpu, struct hlist_node *node)
1035 {
1036         struct i915_pmu *pmu = hlist_entry_safe(node, typeof(*pmu), node);
1037         unsigned int target;
1038
1039         GEM_BUG_ON(!pmu->base.event_init);
1040
1041         if (cpumask_test_and_clear_cpu(cpu, &i915_pmu_cpumask)) {
1042                 target = cpumask_any_but(topology_sibling_cpumask(cpu), cpu);
1043                 /* Migrate events if there is a valid target */
1044                 if (target < nr_cpu_ids) {
1045                         cpumask_set_cpu(target, &i915_pmu_cpumask);
1046                         perf_pmu_migrate_context(&pmu->base, cpu, target);
1047                 }
1048         }
1049
1050         return 0;
1051 }
1052
1053 static enum cpuhp_state cpuhp_slot = CPUHP_INVALID;
1054
1055 static int i915_pmu_register_cpuhp_state(struct i915_pmu *pmu)
1056 {
1057         enum cpuhp_state slot;
1058         int ret;
1059
1060         ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
1061                                       "perf/x86/intel/i915:online",
1062                                       i915_pmu_cpu_online,
1063                                       i915_pmu_cpu_offline);
1064         if (ret < 0)
1065                 return ret;
1066
1067         slot = ret;
1068         ret = cpuhp_state_add_instance(slot, &pmu->node);
1069         if (ret) {
1070                 cpuhp_remove_multi_state(slot);
1071                 return ret;
1072         }
1073
1074         cpuhp_slot = slot;
1075         return 0;
1076 }
1077
1078 static void i915_pmu_unregister_cpuhp_state(struct i915_pmu *pmu)
1079 {
1080         WARN_ON(cpuhp_slot == CPUHP_INVALID);
1081         WARN_ON(cpuhp_state_remove_instance(cpuhp_slot, &pmu->node));
1082         cpuhp_remove_multi_state(cpuhp_slot);
1083 }
1084
1085 static bool is_igp(struct drm_i915_private *i915)
1086 {
1087         struct pci_dev *pdev = i915->drm.pdev;
1088
1089         /* IGP is 0000:00:02.0 */
1090         return pci_domain_nr(pdev->bus) == 0 &&
1091                pdev->bus->number == 0 &&
1092                PCI_SLOT(pdev->devfn) == 2 &&
1093                PCI_FUNC(pdev->devfn) == 0;
1094 }
1095
1096 void i915_pmu_register(struct drm_i915_private *i915)
1097 {
1098         struct i915_pmu *pmu = &i915->pmu;
1099         int ret = -ENOMEM;
1100
1101         if (INTEL_GEN(i915) <= 2) {
1102                 dev_info(i915->drm.dev, "PMU not supported for this GPU.");
1103                 return;
1104         }
1105
1106         spin_lock_init(&pmu->lock);
1107         hrtimer_init(&pmu->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1108         pmu->timer.function = i915_sample;
1109
1110         if (!is_igp(i915))
1111                 pmu->name = kasprintf(GFP_KERNEL,
1112                                       "i915-%s",
1113                                       dev_name(i915->drm.dev));
1114         else
1115                 pmu->name = "i915";
1116         if (!pmu->name)
1117                 goto err;
1118
1119         i915_pmu_events_attr_group.attrs = create_event_attributes(pmu);
1120         if (!i915_pmu_events_attr_group.attrs)
1121                 goto err_name;
1122
1123         pmu->base.attr_groups   = i915_pmu_attr_groups;
1124         pmu->base.task_ctx_nr   = perf_invalid_context;
1125         pmu->base.event_init    = i915_pmu_event_init;
1126         pmu->base.add           = i915_pmu_event_add;
1127         pmu->base.del           = i915_pmu_event_del;
1128         pmu->base.start         = i915_pmu_event_start;
1129         pmu->base.stop          = i915_pmu_event_stop;
1130         pmu->base.read          = i915_pmu_event_read;
1131         pmu->base.event_idx     = i915_pmu_event_event_idx;
1132
1133         ret = perf_pmu_register(&pmu->base, pmu->name, -1);
1134         if (ret)
1135                 goto err_attr;
1136
1137         ret = i915_pmu_register_cpuhp_state(pmu);
1138         if (ret)
1139                 goto err_unreg;
1140
1141         return;
1142
1143 err_unreg:
1144         perf_pmu_unregister(&pmu->base);
1145 err_attr:
1146         pmu->base.event_init = NULL;
1147         free_event_attributes(pmu);
1148 err_name:
1149         if (!is_igp(i915))
1150                 kfree(pmu->name);
1151 err:
1152         dev_notice(i915->drm.dev, "Failed to register PMU!\n");
1153 }
1154
1155 void i915_pmu_unregister(struct drm_i915_private *i915)
1156 {
1157         struct i915_pmu *pmu = &i915->pmu;
1158
1159         if (!pmu->base.event_init)
1160                 return;
1161
1162         WARN_ON(pmu->enable);
1163
1164         hrtimer_cancel(&pmu->timer);
1165
1166         i915_pmu_unregister_cpuhp_state(pmu);
1167
1168         perf_pmu_unregister(&pmu->base);
1169         pmu->base.event_init = NULL;
1170         if (!is_igp(i915))
1171                 kfree(pmu->name);
1172         free_event_attributes(pmu);
1173 }