1 // SPDX-License-Identifier: MIT
3 * Copyright © 2021 Intel Corporation
6 #include <drm/drm_cache.h>
7 #include <linux/string_helpers.h>
11 #include "intel_guc_slpc.h"
12 #include "intel_guc_print.h"
13 #include "intel_mchbar_regs.h"
14 #include "gt/intel_gt.h"
15 #include "gt/intel_gt_regs.h"
16 #include "gt/intel_rps.h"
18 static inline struct intel_guc *slpc_to_guc(struct intel_guc_slpc *slpc)
20 return container_of(slpc, struct intel_guc, slpc);
23 static inline struct intel_gt *slpc_to_gt(struct intel_guc_slpc *slpc)
25 return guc_to_gt(slpc_to_guc(slpc));
28 static inline struct drm_i915_private *slpc_to_i915(struct intel_guc_slpc *slpc)
30 return slpc_to_gt(slpc)->i915;
33 static bool __detect_slpc_supported(struct intel_guc *guc)
35 /* GuC SLPC is unavailable for pre-Gen12 */
36 return guc->submission_supported &&
37 GRAPHICS_VER(guc_to_gt(guc)->i915) >= 12;
40 static bool __guc_slpc_selected(struct intel_guc *guc)
42 if (!intel_guc_slpc_is_supported(guc))
45 return guc->submission_selected;
48 void intel_guc_slpc_init_early(struct intel_guc_slpc *slpc)
50 struct intel_guc *guc = slpc_to_guc(slpc);
52 slpc->supported = __detect_slpc_supported(guc);
53 slpc->selected = __guc_slpc_selected(guc);
56 static void slpc_mem_set_param(struct slpc_shared_data *data,
59 GEM_BUG_ON(id >= SLPC_MAX_OVERRIDE_PARAMETERS);
61 * When the flag bit is set, corresponding value will be read
62 * and applied by SLPC.
64 data->override_params.bits[id >> 5] |= (1 << (id % 32));
65 data->override_params.values[id] = value;
68 static void slpc_mem_set_enabled(struct slpc_shared_data *data,
69 u8 enable_id, u8 disable_id)
72 * Enabling a param involves setting the enable_id
73 * to 1 and disable_id to 0.
75 slpc_mem_set_param(data, enable_id, 1);
76 slpc_mem_set_param(data, disable_id, 0);
79 static void slpc_mem_set_disabled(struct slpc_shared_data *data,
80 u8 enable_id, u8 disable_id)
83 * Disabling a param involves setting the enable_id
84 * to 0 and disable_id to 1.
86 slpc_mem_set_param(data, disable_id, 1);
87 slpc_mem_set_param(data, enable_id, 0);
90 static u32 slpc_get_state(struct intel_guc_slpc *slpc)
92 struct slpc_shared_data *data;
94 GEM_BUG_ON(!slpc->vma);
96 drm_clflush_virt_range(slpc->vaddr, sizeof(u32));
99 return data->header.global_state;
102 static int guc_action_slpc_set_param_nb(struct intel_guc *guc, u8 id, u32 value)
105 GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
106 SLPC_EVENT(SLPC_EVENT_PARAMETER_SET, 2),
112 ret = intel_guc_send_nb(guc, request, ARRAY_SIZE(request), 0);
114 return ret > 0 ? -EPROTO : ret;
117 static int slpc_set_param_nb(struct intel_guc_slpc *slpc, u8 id, u32 value)
119 struct intel_guc *guc = slpc_to_guc(slpc);
121 GEM_BUG_ON(id >= SLPC_MAX_PARAM);
123 return guc_action_slpc_set_param_nb(guc, id, value);
126 static int guc_action_slpc_set_param(struct intel_guc *guc, u8 id, u32 value)
129 GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
130 SLPC_EVENT(SLPC_EVENT_PARAMETER_SET, 2),
136 ret = intel_guc_send(guc, request, ARRAY_SIZE(request));
138 return ret > 0 ? -EPROTO : ret;
141 static int guc_action_slpc_unset_param(struct intel_guc *guc, u8 id)
144 GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
145 SLPC_EVENT(SLPC_EVENT_PARAMETER_UNSET, 1),
149 return intel_guc_send(guc, request, ARRAY_SIZE(request));
152 static bool slpc_is_running(struct intel_guc_slpc *slpc)
154 return slpc_get_state(slpc) == SLPC_GLOBAL_STATE_RUNNING;
157 static int guc_action_slpc_query(struct intel_guc *guc, u32 offset)
160 GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
161 SLPC_EVENT(SLPC_EVENT_QUERY_TASK_STATE, 2),
167 ret = intel_guc_send(guc, request, ARRAY_SIZE(request));
169 return ret > 0 ? -EPROTO : ret;
172 static int slpc_query_task_state(struct intel_guc_slpc *slpc)
174 struct intel_guc *guc = slpc_to_guc(slpc);
175 u32 offset = intel_guc_ggtt_offset(guc, slpc->vma);
178 ret = guc_action_slpc_query(guc, offset);
180 guc_probe_error(guc, "Failed to query task state: %pe\n", ERR_PTR(ret));
182 drm_clflush_virt_range(slpc->vaddr, SLPC_PAGE_SIZE_BYTES);
187 static int slpc_set_param(struct intel_guc_slpc *slpc, u8 id, u32 value)
189 struct intel_guc *guc = slpc_to_guc(slpc);
192 GEM_BUG_ON(id >= SLPC_MAX_PARAM);
194 ret = guc_action_slpc_set_param(guc, id, value);
196 guc_probe_error(guc, "Failed to set param %d to %u: %pe\n",
197 id, value, ERR_PTR(ret));
202 static int slpc_unset_param(struct intel_guc_slpc *slpc, u8 id)
204 struct intel_guc *guc = slpc_to_guc(slpc);
206 GEM_BUG_ON(id >= SLPC_MAX_PARAM);
208 return guc_action_slpc_unset_param(guc, id);
211 static int slpc_force_min_freq(struct intel_guc_slpc *slpc, u32 freq)
213 struct intel_guc *guc = slpc_to_guc(slpc);
214 struct drm_i915_private *i915 = slpc_to_i915(slpc);
215 intel_wakeref_t wakeref;
218 lockdep_assert_held(&slpc->lock);
220 if (!intel_guc_is_ready(guc))
224 * This function is a little different as compared to
225 * intel_guc_slpc_set_min_freq(). Softlimit will not be updated
226 * here since this is used to temporarily change min freq,
227 * for example, during a waitboost. Caller is responsible for
231 with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
232 /* Non-blocking request will avoid stalls */
233 ret = slpc_set_param_nb(slpc,
234 SLPC_PARAM_GLOBAL_MIN_GT_UNSLICE_FREQ_MHZ,
237 guc_notice(guc, "Failed to send set_param for min freq(%d): %pe\n",
244 static void slpc_boost_work(struct work_struct *work)
246 struct intel_guc_slpc *slpc = container_of(work, typeof(*slpc), boost_work);
250 * Raise min freq to boost. It's possible that
251 * this is greater than current max. But it will
252 * certainly be limited by RP0. An error setting
253 * the min param is not fatal.
255 mutex_lock(&slpc->lock);
256 if (atomic_read(&slpc->num_waiters)) {
257 err = slpc_force_min_freq(slpc, slpc->boost_freq);
261 mutex_unlock(&slpc->lock);
264 int intel_guc_slpc_init(struct intel_guc_slpc *slpc)
266 struct intel_guc *guc = slpc_to_guc(slpc);
267 u32 size = PAGE_ALIGN(sizeof(struct slpc_shared_data));
270 GEM_BUG_ON(slpc->vma);
272 err = intel_guc_allocate_and_map_vma(guc, size, &slpc->vma, (void **)&slpc->vaddr);
274 guc_probe_error(guc, "Failed to allocate SLPC struct: %pe\n", ERR_PTR(err));
278 slpc->max_freq_softlimit = 0;
279 slpc->min_freq_softlimit = 0;
280 slpc->ignore_eff_freq = false;
281 slpc->min_is_rpmax = false;
283 slpc->boost_freq = 0;
284 atomic_set(&slpc->num_waiters, 0);
285 slpc->num_boosts = 0;
286 slpc->media_ratio_mode = SLPC_MEDIA_RATIO_MODE_DYNAMIC_CONTROL;
288 mutex_init(&slpc->lock);
289 INIT_WORK(&slpc->boost_work, slpc_boost_work);
294 static const char *slpc_global_state_to_string(enum slpc_global_state state)
297 case SLPC_GLOBAL_STATE_NOT_RUNNING:
298 return "not running";
299 case SLPC_GLOBAL_STATE_INITIALIZING:
300 return "initializing";
301 case SLPC_GLOBAL_STATE_RESETTING:
303 case SLPC_GLOBAL_STATE_RUNNING:
305 case SLPC_GLOBAL_STATE_SHUTTING_DOWN:
306 return "shutting down";
307 case SLPC_GLOBAL_STATE_ERROR:
314 static const char *slpc_get_state_string(struct intel_guc_slpc *slpc)
316 return slpc_global_state_to_string(slpc_get_state(slpc));
319 static int guc_action_slpc_reset(struct intel_guc *guc, u32 offset)
322 GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
323 SLPC_EVENT(SLPC_EVENT_RESET, 2),
329 ret = intel_guc_send(guc, request, ARRAY_SIZE(request));
331 return ret > 0 ? -EPROTO : ret;
334 static int slpc_reset(struct intel_guc_slpc *slpc)
336 struct intel_guc *guc = slpc_to_guc(slpc);
337 u32 offset = intel_guc_ggtt_offset(guc, slpc->vma);
340 ret = guc_action_slpc_reset(guc, offset);
342 if (unlikely(ret < 0)) {
343 guc_probe_error(guc, "SLPC reset action failed: %pe\n", ERR_PTR(ret));
348 if (wait_for(slpc_is_running(slpc), SLPC_RESET_TIMEOUT_MS)) {
349 guc_probe_error(guc, "SLPC not enabled! State = %s\n",
350 slpc_get_state_string(slpc));
358 static u32 slpc_decode_min_freq(struct intel_guc_slpc *slpc)
360 struct slpc_shared_data *data = slpc->vaddr;
362 GEM_BUG_ON(!slpc->vma);
364 return DIV_ROUND_CLOSEST(REG_FIELD_GET(SLPC_MIN_UNSLICE_FREQ_MASK,
365 data->task_state_data.freq) *
366 GT_FREQUENCY_MULTIPLIER, GEN9_FREQ_SCALER);
369 static u32 slpc_decode_max_freq(struct intel_guc_slpc *slpc)
371 struct slpc_shared_data *data = slpc->vaddr;
373 GEM_BUG_ON(!slpc->vma);
375 return DIV_ROUND_CLOSEST(REG_FIELD_GET(SLPC_MAX_UNSLICE_FREQ_MASK,
376 data->task_state_data.freq) *
377 GT_FREQUENCY_MULTIPLIER, GEN9_FREQ_SCALER);
380 static void slpc_shared_data_reset(struct slpc_shared_data *data)
382 memset(data, 0, sizeof(struct slpc_shared_data));
384 data->header.size = sizeof(struct slpc_shared_data);
386 /* Enable only GTPERF task, disable others */
387 slpc_mem_set_enabled(data, SLPC_PARAM_TASK_ENABLE_GTPERF,
388 SLPC_PARAM_TASK_DISABLE_GTPERF);
390 slpc_mem_set_disabled(data, SLPC_PARAM_TASK_ENABLE_BALANCER,
391 SLPC_PARAM_TASK_DISABLE_BALANCER);
393 slpc_mem_set_disabled(data, SLPC_PARAM_TASK_ENABLE_DCC,
394 SLPC_PARAM_TASK_DISABLE_DCC);
398 * intel_guc_slpc_set_max_freq() - Set max frequency limit for SLPC.
399 * @slpc: pointer to intel_guc_slpc.
400 * @val: frequency (MHz)
402 * This function will invoke GuC SLPC action to update the max frequency
405 * Return: 0 on success, non-zero error code on failure.
407 int intel_guc_slpc_set_max_freq(struct intel_guc_slpc *slpc, u32 val)
409 struct drm_i915_private *i915 = slpc_to_i915(slpc);
410 intel_wakeref_t wakeref;
413 if (val < slpc->min_freq ||
414 val > slpc->rp0_freq ||
415 val < slpc->min_freq_softlimit)
418 with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
419 ret = slpc_set_param(slpc,
420 SLPC_PARAM_GLOBAL_MAX_GT_UNSLICE_FREQ_MHZ,
423 /* Return standardized err code for sysfs calls */
429 slpc->max_freq_softlimit = val;
435 * intel_guc_slpc_get_max_freq() - Get max frequency limit for SLPC.
436 * @slpc: pointer to intel_guc_slpc.
437 * @val: pointer to val which will hold max frequency (MHz)
439 * This function will invoke GuC SLPC action to read the max frequency
442 * Return: 0 on success, non-zero error code on failure.
444 int intel_guc_slpc_get_max_freq(struct intel_guc_slpc *slpc, u32 *val)
446 struct drm_i915_private *i915 = slpc_to_i915(slpc);
447 intel_wakeref_t wakeref;
450 with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
451 /* Force GuC to update task data */
452 ret = slpc_query_task_state(slpc);
455 *val = slpc_decode_max_freq(slpc);
461 int intel_guc_slpc_set_ignore_eff_freq(struct intel_guc_slpc *slpc, bool val)
463 struct drm_i915_private *i915 = slpc_to_i915(slpc);
464 intel_wakeref_t wakeref;
467 mutex_lock(&slpc->lock);
468 wakeref = intel_runtime_pm_get(&i915->runtime_pm);
470 ret = slpc_set_param(slpc,
471 SLPC_PARAM_IGNORE_EFFICIENT_FREQUENCY,
474 guc_probe_error(slpc_to_guc(slpc), "Failed to set efficient freq(%d): %pe\n",
477 slpc->ignore_eff_freq = val;
479 intel_runtime_pm_put(&i915->runtime_pm, wakeref);
480 mutex_unlock(&slpc->lock);
485 * intel_guc_slpc_set_min_freq() - Set min frequency limit for SLPC.
486 * @slpc: pointer to intel_guc_slpc.
487 * @val: frequency (MHz)
489 * This function will invoke GuC SLPC action to update the min unslice
492 * Return: 0 on success, non-zero error code on failure.
494 int intel_guc_slpc_set_min_freq(struct intel_guc_slpc *slpc, u32 val)
496 struct drm_i915_private *i915 = slpc_to_i915(slpc);
497 intel_wakeref_t wakeref;
500 if (val < slpc->min_freq ||
501 val > slpc->rp0_freq ||
502 val > slpc->max_freq_softlimit)
505 /* Need a lock now since waitboost can be modifying min as well */
506 mutex_lock(&slpc->lock);
507 wakeref = intel_runtime_pm_get(&i915->runtime_pm);
509 ret = slpc_set_param(slpc,
510 SLPC_PARAM_GLOBAL_MIN_GT_UNSLICE_FREQ_MHZ,
514 slpc->min_freq_softlimit = val;
516 intel_runtime_pm_put(&i915->runtime_pm, wakeref);
517 mutex_unlock(&slpc->lock);
519 /* Return standardized err code for sysfs calls */
527 * intel_guc_slpc_get_min_freq() - Get min frequency limit for SLPC.
528 * @slpc: pointer to intel_guc_slpc.
529 * @val: pointer to val which will hold min frequency (MHz)
531 * This function will invoke GuC SLPC action to read the min frequency
534 * Return: 0 on success, non-zero error code on failure.
536 int intel_guc_slpc_get_min_freq(struct intel_guc_slpc *slpc, u32 *val)
538 struct drm_i915_private *i915 = slpc_to_i915(slpc);
539 intel_wakeref_t wakeref;
542 with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
543 /* Force GuC to update task data */
544 ret = slpc_query_task_state(slpc);
547 *val = slpc_decode_min_freq(slpc);
553 int intel_guc_slpc_set_media_ratio_mode(struct intel_guc_slpc *slpc, u32 val)
555 struct drm_i915_private *i915 = slpc_to_i915(slpc);
556 intel_wakeref_t wakeref;
559 if (!HAS_MEDIA_RATIO_MODE(i915))
562 with_intel_runtime_pm(&i915->runtime_pm, wakeref)
563 ret = slpc_set_param(slpc,
564 SLPC_PARAM_MEDIA_FF_RATIO_MODE,
569 void intel_guc_pm_intrmsk_enable(struct intel_gt *gt)
571 u32 pm_intrmsk_mbz = 0;
574 * Allow GuC to receive ARAT timer expiry event.
575 * This interrupt register is setup by RPS code
576 * when host based Turbo is enabled.
578 pm_intrmsk_mbz |= ARAT_EXPIRED_INTRMSK;
580 intel_uncore_rmw(gt->uncore,
581 GEN6_PMINTRMSK, pm_intrmsk_mbz, 0);
584 static int slpc_set_softlimits(struct intel_guc_slpc *slpc)
589 * Softlimits are initially equivalent to platform limits
590 * unless they have deviated from defaults, in which case,
591 * we retain the values and set min/max accordingly.
593 if (!slpc->max_freq_softlimit) {
594 slpc->max_freq_softlimit = slpc->rp0_freq;
595 slpc_to_gt(slpc)->defaults.max_freq = slpc->max_freq_softlimit;
596 } else if (slpc->max_freq_softlimit != slpc->rp0_freq) {
597 ret = intel_guc_slpc_set_max_freq(slpc,
598 slpc->max_freq_softlimit);
604 if (!slpc->min_freq_softlimit) {
605 ret = intel_guc_slpc_get_min_freq(slpc, &slpc->min_freq_softlimit);
608 slpc_to_gt(slpc)->defaults.min_freq = slpc->min_freq_softlimit;
609 } else if (slpc->min_freq_softlimit != slpc->min_freq) {
610 return intel_guc_slpc_set_min_freq(slpc,
611 slpc->min_freq_softlimit);
617 static bool is_slpc_min_freq_rpmax(struct intel_guc_slpc *slpc)
622 ret = intel_guc_slpc_get_min_freq(slpc, &slpc_min_freq);
624 guc_err(slpc_to_guc(slpc), "Failed to get min freq: %pe\n", ERR_PTR(ret));
628 if (slpc_min_freq == SLPC_MAX_FREQ_MHZ)
634 static void update_server_min_softlimit(struct intel_guc_slpc *slpc)
636 /* For server parts, SLPC min will be at RPMax.
637 * Use min softlimit to clamp it to RP0 instead.
639 if (!slpc->min_freq_softlimit &&
640 is_slpc_min_freq_rpmax(slpc)) {
641 slpc->min_is_rpmax = true;
642 slpc->min_freq_softlimit = slpc->rp0_freq;
643 (slpc_to_gt(slpc))->defaults.min_freq = slpc->min_freq_softlimit;
647 static int slpc_use_fused_rp0(struct intel_guc_slpc *slpc)
649 /* Force SLPC to used platform rp0 */
650 return slpc_set_param(slpc,
651 SLPC_PARAM_GLOBAL_MAX_GT_UNSLICE_FREQ_MHZ,
655 static void slpc_get_rp_values(struct intel_guc_slpc *slpc)
657 struct intel_rps *rps = &slpc_to_gt(slpc)->rps;
658 struct intel_rps_freq_caps caps;
660 gen6_rps_get_freq_caps(rps, &caps);
661 slpc->rp0_freq = intel_gpu_freq(rps, caps.rp0_freq);
662 slpc->rp1_freq = intel_gpu_freq(rps, caps.rp1_freq);
663 slpc->min_freq = intel_gpu_freq(rps, caps.min_freq);
665 if (!slpc->boost_freq)
666 slpc->boost_freq = slpc->rp0_freq;
670 * intel_guc_slpc_override_gucrc_mode() - override GUCRC mode
671 * @slpc: pointer to intel_guc_slpc.
672 * @mode: new value of the mode.
674 * This function will override the GUCRC mode.
676 * Return: 0 on success, non-zero error code on failure.
678 int intel_guc_slpc_override_gucrc_mode(struct intel_guc_slpc *slpc, u32 mode)
681 struct drm_i915_private *i915 = slpc_to_i915(slpc);
682 intel_wakeref_t wakeref;
684 if (mode >= SLPC_GUCRC_MODE_MAX)
687 with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
688 ret = slpc_set_param(slpc, SLPC_PARAM_PWRGATE_RC_MODE, mode);
690 guc_err(slpc_to_guc(slpc), "Override RC mode %d failed: %pe\n",
697 int intel_guc_slpc_unset_gucrc_mode(struct intel_guc_slpc *slpc)
699 struct drm_i915_private *i915 = slpc_to_i915(slpc);
700 intel_wakeref_t wakeref;
703 with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
704 ret = slpc_unset_param(slpc, SLPC_PARAM_PWRGATE_RC_MODE);
706 guc_err(slpc_to_guc(slpc), "Unsetting RC mode failed: %pe\n", ERR_PTR(ret));
713 * intel_guc_slpc_enable() - Start SLPC
714 * @slpc: pointer to intel_guc_slpc.
716 * SLPC is enabled by setting up the shared data structure and
717 * sending reset event to GuC SLPC. Initial data is setup in
718 * intel_guc_slpc_init. Here we send the reset event. We do
719 * not currently need a slpc_disable since this is taken care
720 * of automatically when a reset/suspend occurs and the GuC
723 * Return: 0 on success, non-zero error code on failure.
725 int intel_guc_slpc_enable(struct intel_guc_slpc *slpc)
727 struct intel_guc *guc = slpc_to_guc(slpc);
730 GEM_BUG_ON(!slpc->vma);
732 slpc_shared_data_reset(slpc->vaddr);
734 ret = slpc_reset(slpc);
735 if (unlikely(ret < 0)) {
736 guc_probe_error(guc, "SLPC Reset event returned: %pe\n", ERR_PTR(ret));
740 ret = slpc_query_task_state(slpc);
741 if (unlikely(ret < 0))
744 intel_guc_pm_intrmsk_enable(slpc_to_gt(slpc));
746 slpc_get_rp_values(slpc);
748 /* Handle the case where min=max=RPmax */
749 update_server_min_softlimit(slpc);
751 /* Set SLPC max limit to RP0 */
752 ret = slpc_use_fused_rp0(slpc);
754 guc_probe_error(guc, "Failed to set SLPC max to RP0: %pe\n", ERR_PTR(ret));
758 /* Revert SLPC min/max to softlimits if necessary */
759 ret = slpc_set_softlimits(slpc);
761 guc_probe_error(guc, "Failed to set SLPC softlimits: %pe\n", ERR_PTR(ret));
765 /* Set cached media freq ratio mode */
766 intel_guc_slpc_set_media_ratio_mode(slpc, slpc->media_ratio_mode);
768 /* Set cached value of ignore efficient freq */
769 intel_guc_slpc_set_ignore_eff_freq(slpc, slpc->ignore_eff_freq);
774 int intel_guc_slpc_set_boost_freq(struct intel_guc_slpc *slpc, u32 val)
778 if (val < slpc->min_freq || val > slpc->rp0_freq)
781 mutex_lock(&slpc->lock);
783 if (slpc->boost_freq != val) {
784 /* Apply only if there are active waiters */
785 if (atomic_read(&slpc->num_waiters)) {
786 ret = slpc_force_min_freq(slpc, val);
793 slpc->boost_freq = val;
797 mutex_unlock(&slpc->lock);
801 void intel_guc_slpc_dec_waiters(struct intel_guc_slpc *slpc)
804 * Return min back to the softlimit.
805 * This is called during request retire,
806 * so we don't need to fail that if the
809 mutex_lock(&slpc->lock);
810 if (atomic_dec_and_test(&slpc->num_waiters))
811 slpc_force_min_freq(slpc, slpc->min_freq_softlimit);
812 mutex_unlock(&slpc->lock);
815 int intel_guc_slpc_print_info(struct intel_guc_slpc *slpc, struct drm_printer *p)
817 struct drm_i915_private *i915 = slpc_to_i915(slpc);
818 struct slpc_shared_data *data = slpc->vaddr;
819 struct slpc_task_state_data *slpc_tasks;
820 intel_wakeref_t wakeref;
823 GEM_BUG_ON(!slpc->vma);
825 with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
826 ret = slpc_query_task_state(slpc);
829 slpc_tasks = &data->task_state_data;
831 drm_printf(p, "\tSLPC state: %s\n", slpc_get_state_string(slpc));
832 drm_printf(p, "\tGTPERF task active: %s\n",
833 str_yes_no(slpc_tasks->status & SLPC_GTPERF_TASK_ENABLED));
834 drm_printf(p, "\tMax freq: %u MHz\n",
835 slpc_decode_max_freq(slpc));
836 drm_printf(p, "\tMin freq: %u MHz\n",
837 slpc_decode_min_freq(slpc));
838 drm_printf(p, "\twaitboosts: %u\n",
840 drm_printf(p, "\tBoosts outstanding: %u\n",
841 atomic_read(&slpc->num_waiters));
848 void intel_guc_slpc_fini(struct intel_guc_slpc *slpc)
853 i915_vma_unpin_and_release(&slpc->vma, I915_VMA_RELEASE_MAP);