2 * Copyright © 2017 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include <linux/perf_event.h>
26 #include <linux/pm_runtime.h>
30 #include "intel_ringbuffer.h"
32 /* Frequency for the sampling timer for events which need it. */
34 #define PERIOD max_t(u64, 10000, NSEC_PER_SEC / FREQUENCY)
36 #define ENGINE_SAMPLE_MASK \
37 (BIT(I915_SAMPLE_BUSY) | \
38 BIT(I915_SAMPLE_WAIT) | \
39 BIT(I915_SAMPLE_SEMA))
41 #define ENGINE_SAMPLE_BITS (1 << I915_PMU_SAMPLE_BITS)
43 static cpumask_t i915_pmu_cpumask;
45 static u8 engine_config_sample(u64 config)
47 return config & I915_PMU_SAMPLE_MASK;
50 static u8 engine_event_sample(struct perf_event *event)
52 return engine_config_sample(event->attr.config);
55 static u8 engine_event_class(struct perf_event *event)
57 return (event->attr.config >> I915_PMU_CLASS_SHIFT) & 0xff;
60 static u8 engine_event_instance(struct perf_event *event)
62 return (event->attr.config >> I915_PMU_SAMPLE_BITS) & 0xff;
65 static bool is_engine_config(u64 config)
67 return config < __I915_PMU_OTHER(0);
70 static unsigned int config_enabled_bit(u64 config)
72 if (is_engine_config(config))
73 return engine_config_sample(config);
75 return ENGINE_SAMPLE_BITS + (config - __I915_PMU_OTHER(0));
78 static u64 config_enabled_mask(u64 config)
80 return BIT_ULL(config_enabled_bit(config));
83 static bool is_engine_event(struct perf_event *event)
85 return is_engine_config(event->attr.config);
88 static unsigned int event_enabled_bit(struct perf_event *event)
90 return config_enabled_bit(event->attr.config);
93 static bool pmu_needs_timer(struct drm_i915_private *i915, bool gpu_active)
98 * Only some counters need the sampling timer.
100 * We start with a bitmask of all currently enabled events.
102 enable = i915->pmu.enable;
105 * Mask out all the ones which do not need the timer, or in
106 * other words keep all the ones that could need the timer.
108 enable &= config_enabled_mask(I915_PMU_ACTUAL_FREQUENCY) |
109 config_enabled_mask(I915_PMU_REQUESTED_FREQUENCY) |
113 * When the GPU is idle per-engine counters do not need to be
114 * running so clear those bits out.
117 enable &= ~ENGINE_SAMPLE_MASK;
119 * Also there is software busyness tracking available we do not
120 * need the timer for I915_SAMPLE_BUSY counter.
122 * Use RCS as proxy for all engines.
124 else if (intel_engine_supports_stats(i915->engine[RCS]))
125 enable &= ~BIT(I915_SAMPLE_BUSY);
128 * If some bits remain it means we need the sampling timer running.
133 void i915_pmu_gt_parked(struct drm_i915_private *i915)
135 if (!i915->pmu.base.event_init)
138 spin_lock_irq(&i915->pmu.lock);
140 * Signal sampling timer to stop if only engine events are enabled and
143 i915->pmu.timer_enabled = pmu_needs_timer(i915, false);
144 spin_unlock_irq(&i915->pmu.lock);
147 static void __i915_pmu_maybe_start_timer(struct drm_i915_private *i915)
149 if (!i915->pmu.timer_enabled && pmu_needs_timer(i915, true)) {
150 i915->pmu.timer_enabled = true;
151 hrtimer_start_range_ns(&i915->pmu.timer,
152 ns_to_ktime(PERIOD), 0,
153 HRTIMER_MODE_REL_PINNED);
157 void i915_pmu_gt_unparked(struct drm_i915_private *i915)
159 if (!i915->pmu.base.event_init)
162 spin_lock_irq(&i915->pmu.lock);
164 * Re-enable sampling timer when GPU goes active.
166 __i915_pmu_maybe_start_timer(i915);
167 spin_unlock_irq(&i915->pmu.lock);
170 static bool grab_forcewake(struct drm_i915_private *i915, bool fw)
173 intel_uncore_forcewake_get(i915, FORCEWAKE_ALL);
179 update_sample(struct i915_pmu_sample *sample, u32 unit, u32 val)
181 sample->cur += mul_u32_u32(val, unit);
184 static void engines_sample(struct drm_i915_private *dev_priv)
186 struct intel_engine_cs *engine;
187 enum intel_engine_id id;
190 if ((dev_priv->pmu.enable & ENGINE_SAMPLE_MASK) == 0)
193 if (!dev_priv->gt.awake)
196 if (!intel_runtime_pm_get_if_in_use(dev_priv))
199 for_each_engine(engine, dev_priv, id) {
200 u32 current_seqno = intel_engine_get_seqno(engine);
201 u32 last_seqno = intel_engine_last_submit(engine);
204 val = !i915_seqno_passed(current_seqno, last_seqno);
206 update_sample(&engine->pmu.sample[I915_SAMPLE_BUSY],
209 if (val && (engine->pmu.enable &
210 (BIT(I915_SAMPLE_WAIT) | BIT(I915_SAMPLE_SEMA)))) {
211 fw = grab_forcewake(dev_priv, fw);
213 val = I915_READ_FW(RING_CTL(engine->mmio_base));
218 update_sample(&engine->pmu.sample[I915_SAMPLE_WAIT],
219 PERIOD, !!(val & RING_WAIT));
221 update_sample(&engine->pmu.sample[I915_SAMPLE_SEMA],
222 PERIOD, !!(val & RING_WAIT_SEMAPHORE));
226 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
228 intel_runtime_pm_put(dev_priv);
231 static void frequency_sample(struct drm_i915_private *dev_priv)
233 if (dev_priv->pmu.enable &
234 config_enabled_mask(I915_PMU_ACTUAL_FREQUENCY)) {
237 val = dev_priv->gt_pm.rps.cur_freq;
238 if (dev_priv->gt.awake &&
239 intel_runtime_pm_get_if_in_use(dev_priv)) {
240 val = intel_get_cagf(dev_priv,
241 I915_READ_NOTRACE(GEN6_RPSTAT1));
242 intel_runtime_pm_put(dev_priv);
245 update_sample(&dev_priv->pmu.sample[__I915_SAMPLE_FREQ_ACT],
246 1, intel_gpu_freq(dev_priv, val));
249 if (dev_priv->pmu.enable &
250 config_enabled_mask(I915_PMU_REQUESTED_FREQUENCY)) {
251 update_sample(&dev_priv->pmu.sample[__I915_SAMPLE_FREQ_REQ], 1,
252 intel_gpu_freq(dev_priv,
253 dev_priv->gt_pm.rps.cur_freq));
257 static enum hrtimer_restart i915_sample(struct hrtimer *hrtimer)
259 struct drm_i915_private *i915 =
260 container_of(hrtimer, struct drm_i915_private, pmu.timer);
262 if (!READ_ONCE(i915->pmu.timer_enabled))
263 return HRTIMER_NORESTART;
265 engines_sample(i915);
266 frequency_sample(i915);
268 hrtimer_forward_now(hrtimer, ns_to_ktime(PERIOD));
269 return HRTIMER_RESTART;
272 static u64 count_interrupts(struct drm_i915_private *i915)
274 /* open-coded kstat_irqs() */
275 struct irq_desc *desc = irq_to_desc(i915->drm.pdev->irq);
279 if (!desc || !desc->kstat_irqs)
282 for_each_possible_cpu(cpu)
283 sum += *per_cpu_ptr(desc->kstat_irqs, cpu);
288 static void i915_pmu_event_destroy(struct perf_event *event)
290 WARN_ON(event->parent);
294 engine_event_status(struct intel_engine_cs *engine,
295 enum drm_i915_pmu_engine_sample sample)
298 case I915_SAMPLE_BUSY:
299 case I915_SAMPLE_WAIT:
301 case I915_SAMPLE_SEMA:
302 if (INTEL_GEN(engine->i915) < 6)
313 config_status(struct drm_i915_private *i915, u64 config)
316 case I915_PMU_ACTUAL_FREQUENCY:
317 if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
318 /* Requires a mutex for sampling! */
321 case I915_PMU_REQUESTED_FREQUENCY:
322 if (INTEL_GEN(i915) < 6)
325 case I915_PMU_INTERRUPTS:
327 case I915_PMU_RC6_RESIDENCY:
338 static int engine_event_init(struct perf_event *event)
340 struct drm_i915_private *i915 =
341 container_of(event->pmu, typeof(*i915), pmu.base);
342 struct intel_engine_cs *engine;
344 engine = intel_engine_lookup_user(i915, engine_event_class(event),
345 engine_event_instance(event));
349 return engine_event_status(engine, engine_event_sample(event));
352 static int i915_pmu_event_init(struct perf_event *event)
354 struct drm_i915_private *i915 =
355 container_of(event->pmu, typeof(*i915), pmu.base);
358 if (event->attr.type != event->pmu->type)
361 /* unsupported modes and filters */
362 if (event->attr.sample_period) /* no sampling */
365 if (has_branch_stack(event))
371 /* only allow running on one cpu at a time */
372 if (!cpumask_test_cpu(event->cpu, &i915_pmu_cpumask))
375 if (is_engine_event(event))
376 ret = engine_event_init(event);
378 ret = config_status(i915, event->attr.config);
383 event->destroy = i915_pmu_event_destroy;
388 static u64 __i915_pmu_event_read(struct perf_event *event)
390 struct drm_i915_private *i915 =
391 container_of(event->pmu, typeof(*i915), pmu.base);
394 if (is_engine_event(event)) {
395 u8 sample = engine_event_sample(event);
396 struct intel_engine_cs *engine;
398 engine = intel_engine_lookup_user(i915,
399 engine_event_class(event),
400 engine_event_instance(event));
402 if (WARN_ON_ONCE(!engine)) {
404 } else if (sample == I915_SAMPLE_BUSY &&
405 engine->pmu.busy_stats) {
406 val = ktime_to_ns(intel_engine_get_busy_time(engine));
408 val = engine->pmu.sample[sample].cur;
411 switch (event->attr.config) {
412 case I915_PMU_ACTUAL_FREQUENCY:
414 div_u64(i915->pmu.sample[__I915_SAMPLE_FREQ_ACT].cur,
417 case I915_PMU_REQUESTED_FREQUENCY:
419 div_u64(i915->pmu.sample[__I915_SAMPLE_FREQ_REQ].cur,
422 case I915_PMU_INTERRUPTS:
423 val = count_interrupts(i915);
425 case I915_PMU_RC6_RESIDENCY:
426 intel_runtime_pm_get(i915);
427 val = intel_rc6_residency_ns(i915,
428 IS_VALLEYVIEW(i915) ?
432 val += intel_rc6_residency_ns(i915,
435 val += intel_rc6_residency_ns(i915,
437 intel_runtime_pm_put(i915);
445 static void i915_pmu_event_read(struct perf_event *event)
447 struct hw_perf_event *hwc = &event->hw;
451 prev = local64_read(&hwc->prev_count);
452 new = __i915_pmu_event_read(event);
454 if (local64_cmpxchg(&hwc->prev_count, prev, new) != prev)
457 local64_add(new - prev, &event->count);
460 static bool engine_needs_busy_stats(struct intel_engine_cs *engine)
462 return intel_engine_supports_stats(engine) &&
463 (engine->pmu.enable & BIT(I915_SAMPLE_BUSY));
466 static void i915_pmu_enable(struct perf_event *event)
468 struct drm_i915_private *i915 =
469 container_of(event->pmu, typeof(*i915), pmu.base);
470 unsigned int bit = event_enabled_bit(event);
473 spin_lock_irqsave(&i915->pmu.lock, flags);
476 * Update the bitmask of enabled events and increment
477 * the event reference counter.
479 GEM_BUG_ON(bit >= I915_PMU_MASK_BITS);
480 GEM_BUG_ON(i915->pmu.enable_count[bit] == ~0);
481 i915->pmu.enable |= BIT_ULL(bit);
482 i915->pmu.enable_count[bit]++;
485 * Start the sampling timer if needed and not already enabled.
487 __i915_pmu_maybe_start_timer(i915);
490 * For per-engine events the bitmask and reference counting
491 * is stored per engine.
493 if (is_engine_event(event)) {
494 u8 sample = engine_event_sample(event);
495 struct intel_engine_cs *engine;
497 engine = intel_engine_lookup_user(i915,
498 engine_event_class(event),
499 engine_event_instance(event));
501 engine->pmu.enable |= BIT(sample);
503 GEM_BUG_ON(sample >= I915_PMU_SAMPLE_BITS);
504 GEM_BUG_ON(engine->pmu.enable_count[sample] == ~0);
505 if (engine->pmu.enable_count[sample]++ == 0) {
507 * Enable engine busy stats tracking if needed or
508 * alternatively cancel the scheduled disable.
510 * If the delayed disable was pending, cancel it and
511 * in this case do not enable since it already is.
513 if (engine_needs_busy_stats(engine) &&
514 !engine->pmu.busy_stats) {
515 engine->pmu.busy_stats = true;
516 if (!cancel_delayed_work(&engine->pmu.disable_busy_stats))
517 intel_enable_engine_stats(engine);
523 * Store the current counter value so we can report the correct delta
524 * for all listeners. Even when the event was already enabled and has
525 * an existing non-zero value.
527 local64_set(&event->hw.prev_count, __i915_pmu_event_read(event));
529 spin_unlock_irqrestore(&i915->pmu.lock, flags);
532 static void __disable_busy_stats(struct work_struct *work)
534 struct intel_engine_cs *engine =
535 container_of(work, typeof(*engine), pmu.disable_busy_stats.work);
537 intel_disable_engine_stats(engine);
540 static void i915_pmu_disable(struct perf_event *event)
542 struct drm_i915_private *i915 =
543 container_of(event->pmu, typeof(*i915), pmu.base);
544 unsigned int bit = event_enabled_bit(event);
547 spin_lock_irqsave(&i915->pmu.lock, flags);
549 if (is_engine_event(event)) {
550 u8 sample = engine_event_sample(event);
551 struct intel_engine_cs *engine;
553 engine = intel_engine_lookup_user(i915,
554 engine_event_class(event),
555 engine_event_instance(event));
557 GEM_BUG_ON(sample >= I915_PMU_SAMPLE_BITS);
558 GEM_BUG_ON(engine->pmu.enable_count[sample] == 0);
560 * Decrement the reference count and clear the enabled
561 * bitmask when the last listener on an event goes away.
563 if (--engine->pmu.enable_count[sample] == 0) {
564 engine->pmu.enable &= ~BIT(sample);
565 if (!engine_needs_busy_stats(engine) &&
566 engine->pmu.busy_stats) {
567 engine->pmu.busy_stats = false;
569 * We request a delayed disable to handle the
570 * rapid on/off cycles on events, which can
571 * happen when tools like perf stat start, in a
574 * In addition, this also helps with busy stats
575 * accuracy with background CPU offline/online
578 queue_delayed_work(system_wq,
579 &engine->pmu.disable_busy_stats,
580 round_jiffies_up_relative(HZ));
585 GEM_BUG_ON(bit >= I915_PMU_MASK_BITS);
586 GEM_BUG_ON(i915->pmu.enable_count[bit] == 0);
588 * Decrement the reference count and clear the enabled
589 * bitmask when the last listener on an event goes away.
591 if (--i915->pmu.enable_count[bit] == 0) {
592 i915->pmu.enable &= ~BIT_ULL(bit);
593 i915->pmu.timer_enabled &= pmu_needs_timer(i915, true);
596 spin_unlock_irqrestore(&i915->pmu.lock, flags);
599 static void i915_pmu_event_start(struct perf_event *event, int flags)
601 i915_pmu_enable(event);
605 static void i915_pmu_event_stop(struct perf_event *event, int flags)
607 if (flags & PERF_EF_UPDATE)
608 i915_pmu_event_read(event);
609 i915_pmu_disable(event);
610 event->hw.state = PERF_HES_STOPPED;
613 static int i915_pmu_event_add(struct perf_event *event, int flags)
615 if (flags & PERF_EF_START)
616 i915_pmu_event_start(event, flags);
621 static void i915_pmu_event_del(struct perf_event *event, int flags)
623 i915_pmu_event_stop(event, PERF_EF_UPDATE);
626 static int i915_pmu_event_event_idx(struct perf_event *event)
631 struct i915_str_attribute {
632 struct device_attribute attr;
636 static ssize_t i915_pmu_format_show(struct device *dev,
637 struct device_attribute *attr, char *buf)
639 struct i915_str_attribute *eattr;
641 eattr = container_of(attr, struct i915_str_attribute, attr);
642 return sprintf(buf, "%s\n", eattr->str);
645 #define I915_PMU_FORMAT_ATTR(_name, _config) \
646 (&((struct i915_str_attribute[]) { \
647 { .attr = __ATTR(_name, 0444, i915_pmu_format_show, NULL), \
651 static struct attribute *i915_pmu_format_attrs[] = {
652 I915_PMU_FORMAT_ATTR(i915_eventid, "config:0-20"),
656 static const struct attribute_group i915_pmu_format_attr_group = {
658 .attrs = i915_pmu_format_attrs,
661 struct i915_ext_attribute {
662 struct device_attribute attr;
666 static ssize_t i915_pmu_event_show(struct device *dev,
667 struct device_attribute *attr, char *buf)
669 struct i915_ext_attribute *eattr;
671 eattr = container_of(attr, struct i915_ext_attribute, attr);
672 return sprintf(buf, "config=0x%lx\n", eattr->val);
675 static struct attribute_group i915_pmu_events_attr_group = {
677 /* Patch in attrs at runtime. */
681 i915_pmu_get_attr_cpumask(struct device *dev,
682 struct device_attribute *attr,
685 return cpumap_print_to_pagebuf(true, buf, &i915_pmu_cpumask);
688 static DEVICE_ATTR(cpumask, 0444, i915_pmu_get_attr_cpumask, NULL);
690 static struct attribute *i915_cpumask_attrs[] = {
691 &dev_attr_cpumask.attr,
695 static const struct attribute_group i915_pmu_cpumask_attr_group = {
696 .attrs = i915_cpumask_attrs,
699 static const struct attribute_group *i915_pmu_attr_groups[] = {
700 &i915_pmu_format_attr_group,
701 &i915_pmu_events_attr_group,
702 &i915_pmu_cpumask_attr_group,
706 #define __event(__config, __name, __unit) \
708 .config = (__config), \
713 #define __engine_event(__sample, __name) \
715 .sample = (__sample), \
719 static struct i915_ext_attribute *
720 add_i915_attr(struct i915_ext_attribute *attr, const char *name, u64 config)
722 sysfs_attr_init(&attr->attr.attr);
723 attr->attr.attr.name = name;
724 attr->attr.attr.mode = 0444;
725 attr->attr.show = i915_pmu_event_show;
731 static struct perf_pmu_events_attr *
732 add_pmu_attr(struct perf_pmu_events_attr *attr, const char *name,
735 sysfs_attr_init(&attr->attr.attr);
736 attr->attr.attr.name = name;
737 attr->attr.attr.mode = 0444;
738 attr->attr.show = perf_event_sysfs_show;
739 attr->event_str = str;
744 static struct attribute **
745 create_event_attributes(struct drm_i915_private *i915)
747 static const struct {
752 __event(I915_PMU_ACTUAL_FREQUENCY, "actual-frequency", "MHz"),
753 __event(I915_PMU_REQUESTED_FREQUENCY, "requested-frequency", "MHz"),
754 __event(I915_PMU_INTERRUPTS, "interrupts", NULL),
755 __event(I915_PMU_RC6_RESIDENCY, "rc6-residency", "ns"),
757 static const struct {
758 enum drm_i915_pmu_engine_sample sample;
760 } engine_events[] = {
761 __engine_event(I915_SAMPLE_BUSY, "busy"),
762 __engine_event(I915_SAMPLE_SEMA, "sema"),
763 __engine_event(I915_SAMPLE_WAIT, "wait"),
765 unsigned int count = 0;
766 struct perf_pmu_events_attr *pmu_attr = NULL, *pmu_iter;
767 struct i915_ext_attribute *i915_attr = NULL, *i915_iter;
768 struct attribute **attr = NULL, **attr_iter;
769 struct intel_engine_cs *engine;
770 enum intel_engine_id id;
773 /* Count how many counters we will be exposing. */
774 for (i = 0; i < ARRAY_SIZE(events); i++) {
775 if (!config_status(i915, events[i].config))
779 for_each_engine(engine, i915, id) {
780 for (i = 0; i < ARRAY_SIZE(engine_events); i++) {
781 if (!engine_event_status(engine,
782 engine_events[i].sample))
787 /* Allocate attribute objects and table. */
788 i915_attr = kcalloc(count, sizeof(*i915_attr), GFP_KERNEL);
792 pmu_attr = kcalloc(count, sizeof(*pmu_attr), GFP_KERNEL);
796 /* Max one pointer of each attribute type plus a termination entry. */
797 attr = kcalloc(count * 2 + 1, sizeof(*attr), GFP_KERNEL);
801 i915_iter = i915_attr;
805 /* Initialize supported non-engine counters. */
806 for (i = 0; i < ARRAY_SIZE(events); i++) {
809 if (config_status(i915, events[i].config))
812 str = kstrdup(events[i].name, GFP_KERNEL);
816 *attr_iter++ = &i915_iter->attr.attr;
817 i915_iter = add_i915_attr(i915_iter, str, events[i].config);
819 if (events[i].unit) {
820 str = kasprintf(GFP_KERNEL, "%s.unit", events[i].name);
824 *attr_iter++ = &pmu_iter->attr.attr;
825 pmu_iter = add_pmu_attr(pmu_iter, str, events[i].unit);
829 /* Initialize supported engine counters. */
830 for_each_engine(engine, i915, id) {
831 for (i = 0; i < ARRAY_SIZE(engine_events); i++) {
834 if (engine_event_status(engine,
835 engine_events[i].sample))
838 str = kasprintf(GFP_KERNEL, "%s-%s",
839 engine->name, engine_events[i].name);
843 *attr_iter++ = &i915_iter->attr.attr;
845 add_i915_attr(i915_iter, str,
846 __I915_PMU_ENGINE(engine->class,
848 engine_events[i].sample));
850 str = kasprintf(GFP_KERNEL, "%s-%s.unit",
851 engine->name, engine_events[i].name);
855 *attr_iter++ = &pmu_iter->attr.attr;
856 pmu_iter = add_pmu_attr(pmu_iter, str, "ns");
860 i915->pmu.i915_attr = i915_attr;
861 i915->pmu.pmu_attr = pmu_attr;
866 for (attr_iter = attr; *attr_iter; attr_iter++)
867 kfree((*attr_iter)->name);
877 static void free_event_attributes(struct drm_i915_private *i915)
879 struct attribute **attr_iter = i915_pmu_events_attr_group.attrs;
881 for (; *attr_iter; attr_iter++)
882 kfree((*attr_iter)->name);
884 kfree(i915_pmu_events_attr_group.attrs);
885 kfree(i915->pmu.i915_attr);
886 kfree(i915->pmu.pmu_attr);
888 i915_pmu_events_attr_group.attrs = NULL;
889 i915->pmu.i915_attr = NULL;
890 i915->pmu.pmu_attr = NULL;
893 static int i915_pmu_cpu_online(unsigned int cpu, struct hlist_node *node)
895 struct i915_pmu *pmu = hlist_entry_safe(node, typeof(*pmu), node);
897 GEM_BUG_ON(!pmu->base.event_init);
899 /* Select the first online CPU as a designated reader. */
900 if (!cpumask_weight(&i915_pmu_cpumask))
901 cpumask_set_cpu(cpu, &i915_pmu_cpumask);
906 static int i915_pmu_cpu_offline(unsigned int cpu, struct hlist_node *node)
908 struct i915_pmu *pmu = hlist_entry_safe(node, typeof(*pmu), node);
911 GEM_BUG_ON(!pmu->base.event_init);
913 if (cpumask_test_and_clear_cpu(cpu, &i915_pmu_cpumask)) {
914 target = cpumask_any_but(topology_sibling_cpumask(cpu), cpu);
915 /* Migrate events if there is a valid target */
916 if (target < nr_cpu_ids) {
917 cpumask_set_cpu(target, &i915_pmu_cpumask);
918 perf_pmu_migrate_context(&pmu->base, cpu, target);
925 static enum cpuhp_state cpuhp_slot = CPUHP_INVALID;
927 static int i915_pmu_register_cpuhp_state(struct drm_i915_private *i915)
929 enum cpuhp_state slot;
932 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
933 "perf/x86/intel/i915:online",
935 i915_pmu_cpu_offline);
940 ret = cpuhp_state_add_instance(slot, &i915->pmu.node);
942 cpuhp_remove_multi_state(slot);
950 static void i915_pmu_unregister_cpuhp_state(struct drm_i915_private *i915)
952 WARN_ON(cpuhp_slot == CPUHP_INVALID);
953 WARN_ON(cpuhp_state_remove_instance(cpuhp_slot, &i915->pmu.node));
954 cpuhp_remove_multi_state(cpuhp_slot);
957 void i915_pmu_register(struct drm_i915_private *i915)
959 struct intel_engine_cs *engine;
960 enum intel_engine_id id;
963 if (INTEL_GEN(i915) <= 2) {
964 DRM_INFO("PMU not supported for this GPU.");
968 i915_pmu_events_attr_group.attrs = create_event_attributes(i915);
969 if (!i915_pmu_events_attr_group.attrs) {
974 i915->pmu.base.attr_groups = i915_pmu_attr_groups;
975 i915->pmu.base.task_ctx_nr = perf_invalid_context;
976 i915->pmu.base.event_init = i915_pmu_event_init;
977 i915->pmu.base.add = i915_pmu_event_add;
978 i915->pmu.base.del = i915_pmu_event_del;
979 i915->pmu.base.start = i915_pmu_event_start;
980 i915->pmu.base.stop = i915_pmu_event_stop;
981 i915->pmu.base.read = i915_pmu_event_read;
982 i915->pmu.base.event_idx = i915_pmu_event_event_idx;
984 spin_lock_init(&i915->pmu.lock);
985 hrtimer_init(&i915->pmu.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
986 i915->pmu.timer.function = i915_sample;
988 for_each_engine(engine, i915, id)
989 INIT_DELAYED_WORK(&engine->pmu.disable_busy_stats,
990 __disable_busy_stats);
992 ret = perf_pmu_register(&i915->pmu.base, "i915", -1);
996 ret = i915_pmu_register_cpuhp_state(i915);
1003 perf_pmu_unregister(&i915->pmu.base);
1005 i915->pmu.base.event_init = NULL;
1006 free_event_attributes(i915);
1007 DRM_NOTE("Failed to register PMU! (err=%d)\n", ret);
1010 void i915_pmu_unregister(struct drm_i915_private *i915)
1012 struct intel_engine_cs *engine;
1013 enum intel_engine_id id;
1015 if (!i915->pmu.base.event_init)
1018 WARN_ON(i915->pmu.enable);
1020 hrtimer_cancel(&i915->pmu.timer);
1022 for_each_engine(engine, i915, id) {
1023 GEM_BUG_ON(engine->pmu.busy_stats);
1024 flush_delayed_work(&engine->pmu.disable_busy_stats);
1027 i915_pmu_unregister_cpuhp_state(i915);
1029 perf_pmu_unregister(&i915->pmu.base);
1030 i915->pmu.base.event_init = NULL;
1031 free_event_attributes(i915);