2 * Copyright 2017 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
22 * Authors: Rafał Miłecki <zajec5@gmail.com>
23 * Alex Deucher <alexdeucher@gmail.com>
27 #include "amdgpu_drv.h"
28 #include "amdgpu_pm.h"
29 #include "amdgpu_dpm.h"
31 #include <linux/pci.h>
32 #include <linux/hwmon.h>
33 #include <linux/hwmon-sysfs.h>
34 #include <linux/nospec.h>
35 #include <linux/pm_runtime.h>
36 #include <asm/processor.h>
38 static const struct cg_flag_name clocks[] = {
39 {AMD_CG_SUPPORT_GFX_FGCG, "Graphics Fine Grain Clock Gating"},
40 {AMD_CG_SUPPORT_GFX_MGCG, "Graphics Medium Grain Clock Gating"},
41 {AMD_CG_SUPPORT_GFX_MGLS, "Graphics Medium Grain memory Light Sleep"},
42 {AMD_CG_SUPPORT_GFX_CGCG, "Graphics Coarse Grain Clock Gating"},
43 {AMD_CG_SUPPORT_GFX_CGLS, "Graphics Coarse Grain memory Light Sleep"},
44 {AMD_CG_SUPPORT_GFX_CGTS, "Graphics Coarse Grain Tree Shader Clock Gating"},
45 {AMD_CG_SUPPORT_GFX_CGTS_LS, "Graphics Coarse Grain Tree Shader Light Sleep"},
46 {AMD_CG_SUPPORT_GFX_CP_LS, "Graphics Command Processor Light Sleep"},
47 {AMD_CG_SUPPORT_GFX_RLC_LS, "Graphics Run List Controller Light Sleep"},
48 {AMD_CG_SUPPORT_GFX_3D_CGCG, "Graphics 3D Coarse Grain Clock Gating"},
49 {AMD_CG_SUPPORT_GFX_3D_CGLS, "Graphics 3D Coarse Grain memory Light Sleep"},
50 {AMD_CG_SUPPORT_MC_LS, "Memory Controller Light Sleep"},
51 {AMD_CG_SUPPORT_MC_MGCG, "Memory Controller Medium Grain Clock Gating"},
52 {AMD_CG_SUPPORT_SDMA_LS, "System Direct Memory Access Light Sleep"},
53 {AMD_CG_SUPPORT_SDMA_MGCG, "System Direct Memory Access Medium Grain Clock Gating"},
54 {AMD_CG_SUPPORT_BIF_MGCG, "Bus Interface Medium Grain Clock Gating"},
55 {AMD_CG_SUPPORT_BIF_LS, "Bus Interface Light Sleep"},
56 {AMD_CG_SUPPORT_UVD_MGCG, "Unified Video Decoder Medium Grain Clock Gating"},
57 {AMD_CG_SUPPORT_VCE_MGCG, "Video Compression Engine Medium Grain Clock Gating"},
58 {AMD_CG_SUPPORT_HDP_LS, "Host Data Path Light Sleep"},
59 {AMD_CG_SUPPORT_HDP_MGCG, "Host Data Path Medium Grain Clock Gating"},
60 {AMD_CG_SUPPORT_DRM_MGCG, "Digital Right Management Medium Grain Clock Gating"},
61 {AMD_CG_SUPPORT_DRM_LS, "Digital Right Management Light Sleep"},
62 {AMD_CG_SUPPORT_ROM_MGCG, "Rom Medium Grain Clock Gating"},
63 {AMD_CG_SUPPORT_DF_MGCG, "Data Fabric Medium Grain Clock Gating"},
64 {AMD_CG_SUPPORT_VCN_MGCG, "VCN Medium Grain Clock Gating"},
65 {AMD_CG_SUPPORT_HDP_DS, "Host Data Path Deep Sleep"},
66 {AMD_CG_SUPPORT_HDP_SD, "Host Data Path Shutdown"},
67 {AMD_CG_SUPPORT_IH_CG, "Interrupt Handler Clock Gating"},
68 {AMD_CG_SUPPORT_JPEG_MGCG, "JPEG Medium Grain Clock Gating"},
69 {AMD_CG_SUPPORT_REPEATER_FGCG, "Repeater Fine Grain Clock Gating"},
70 {AMD_CG_SUPPORT_GFX_PERF_CLK, "Perfmon Clock Gating"},
71 {AMD_CG_SUPPORT_ATHUB_MGCG, "Address Translation Hub Medium Grain Clock Gating"},
72 {AMD_CG_SUPPORT_ATHUB_LS, "Address Translation Hub Light Sleep"},
76 static const struct hwmon_temp_label {
77 enum PP_HWMON_TEMP channel;
80 {PP_TEMP_EDGE, "edge"},
81 {PP_TEMP_JUNCTION, "junction"},
85 const char * const amdgpu_pp_profile_name[] = {
99 * DOC: power_dpm_state
101 * The power_dpm_state file is a legacy interface and is only provided for
102 * backwards compatibility. The amdgpu driver provides a sysfs API for adjusting
103 * certain power related parameters. The file power_dpm_state is used for this.
104 * It accepts the following arguments:
114 * On older GPUs, the vbios provided a special power state for battery
115 * operation. Selecting battery switched to this state. This is no
116 * longer provided on newer GPUs so the option does nothing in that case.
120 * On older GPUs, the vbios provided a special power state for balanced
121 * operation. Selecting balanced switched to this state. This is no
122 * longer provided on newer GPUs so the option does nothing in that case.
126 * On older GPUs, the vbios provided a special power state for performance
127 * operation. Selecting performance switched to this state. This is no
128 * longer provided on newer GPUs so the option does nothing in that case.
132 static ssize_t amdgpu_get_power_dpm_state(struct device *dev,
133 struct device_attribute *attr,
136 struct drm_device *ddev = dev_get_drvdata(dev);
137 struct amdgpu_device *adev = drm_to_adev(ddev);
138 enum amd_pm_state_type pm;
141 if (amdgpu_in_reset(adev))
143 if (adev->in_suspend && !adev->in_runpm)
146 ret = pm_runtime_get_sync(ddev->dev);
148 pm_runtime_put_autosuspend(ddev->dev);
152 amdgpu_dpm_get_current_power_state(adev, &pm);
154 pm_runtime_mark_last_busy(ddev->dev);
155 pm_runtime_put_autosuspend(ddev->dev);
157 return sysfs_emit(buf, "%s\n",
158 (pm == POWER_STATE_TYPE_BATTERY) ? "battery" :
159 (pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance");
162 static ssize_t amdgpu_set_power_dpm_state(struct device *dev,
163 struct device_attribute *attr,
167 struct drm_device *ddev = dev_get_drvdata(dev);
168 struct amdgpu_device *adev = drm_to_adev(ddev);
169 enum amd_pm_state_type state;
172 if (amdgpu_in_reset(adev))
174 if (adev->in_suspend && !adev->in_runpm)
177 if (strncmp("battery", buf, strlen("battery")) == 0)
178 state = POWER_STATE_TYPE_BATTERY;
179 else if (strncmp("balanced", buf, strlen("balanced")) == 0)
180 state = POWER_STATE_TYPE_BALANCED;
181 else if (strncmp("performance", buf, strlen("performance")) == 0)
182 state = POWER_STATE_TYPE_PERFORMANCE;
186 ret = pm_runtime_get_sync(ddev->dev);
188 pm_runtime_put_autosuspend(ddev->dev);
192 amdgpu_dpm_set_power_state(adev, state);
194 pm_runtime_mark_last_busy(ddev->dev);
195 pm_runtime_put_autosuspend(ddev->dev);
202 * DOC: power_dpm_force_performance_level
204 * The amdgpu driver provides a sysfs API for adjusting certain power
205 * related parameters. The file power_dpm_force_performance_level is
206 * used for this. It accepts the following arguments:
226 * When auto is selected, the driver will attempt to dynamically select
227 * the optimal power profile for current conditions in the driver.
231 * When low is selected, the clocks are forced to the lowest power state.
235 * When high is selected, the clocks are forced to the highest power state.
239 * When manual is selected, the user can manually adjust which power states
240 * are enabled for each clock domain via the sysfs pp_dpm_mclk, pp_dpm_sclk,
241 * and pp_dpm_pcie files and adjust the power state transition heuristics
242 * via the pp_power_profile_mode sysfs file.
249 * When the profiling modes are selected, clock and power gating are
250 * disabled and the clocks are set for different profiling cases. This
251 * mode is recommended for profiling specific work loads where you do
252 * not want clock or power gating for clock fluctuation to interfere
253 * with your results. profile_standard sets the clocks to a fixed clock
254 * level which varies from asic to asic. profile_min_sclk forces the sclk
255 * to the lowest level. profile_min_mclk forces the mclk to the lowest level.
256 * profile_peak sets all clocks (mclk, sclk, pcie) to the highest levels.
260 static ssize_t amdgpu_get_power_dpm_force_performance_level(struct device *dev,
261 struct device_attribute *attr,
264 struct drm_device *ddev = dev_get_drvdata(dev);
265 struct amdgpu_device *adev = drm_to_adev(ddev);
266 enum amd_dpm_forced_level level = 0xff;
269 if (amdgpu_in_reset(adev))
271 if (adev->in_suspend && !adev->in_runpm)
274 ret = pm_runtime_get_sync(ddev->dev);
276 pm_runtime_put_autosuspend(ddev->dev);
280 level = amdgpu_dpm_get_performance_level(adev);
282 pm_runtime_mark_last_busy(ddev->dev);
283 pm_runtime_put_autosuspend(ddev->dev);
285 return sysfs_emit(buf, "%s\n",
286 (level == AMD_DPM_FORCED_LEVEL_AUTO) ? "auto" :
287 (level == AMD_DPM_FORCED_LEVEL_LOW) ? "low" :
288 (level == AMD_DPM_FORCED_LEVEL_HIGH) ? "high" :
289 (level == AMD_DPM_FORCED_LEVEL_MANUAL) ? "manual" :
290 (level == AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD) ? "profile_standard" :
291 (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) ? "profile_min_sclk" :
292 (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) ? "profile_min_mclk" :
293 (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) ? "profile_peak" :
294 (level == AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) ? "perf_determinism" :
298 static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev,
299 struct device_attribute *attr,
303 struct drm_device *ddev = dev_get_drvdata(dev);
304 struct amdgpu_device *adev = drm_to_adev(ddev);
305 enum amd_dpm_forced_level level;
308 if (amdgpu_in_reset(adev))
310 if (adev->in_suspend && !adev->in_runpm)
313 if (strncmp("low", buf, strlen("low")) == 0) {
314 level = AMD_DPM_FORCED_LEVEL_LOW;
315 } else if (strncmp("high", buf, strlen("high")) == 0) {
316 level = AMD_DPM_FORCED_LEVEL_HIGH;
317 } else if (strncmp("auto", buf, strlen("auto")) == 0) {
318 level = AMD_DPM_FORCED_LEVEL_AUTO;
319 } else if (strncmp("manual", buf, strlen("manual")) == 0) {
320 level = AMD_DPM_FORCED_LEVEL_MANUAL;
321 } else if (strncmp("profile_exit", buf, strlen("profile_exit")) == 0) {
322 level = AMD_DPM_FORCED_LEVEL_PROFILE_EXIT;
323 } else if (strncmp("profile_standard", buf, strlen("profile_standard")) == 0) {
324 level = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD;
325 } else if (strncmp("profile_min_sclk", buf, strlen("profile_min_sclk")) == 0) {
326 level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK;
327 } else if (strncmp("profile_min_mclk", buf, strlen("profile_min_mclk")) == 0) {
328 level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK;
329 } else if (strncmp("profile_peak", buf, strlen("profile_peak")) == 0) {
330 level = AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
331 } else if (strncmp("perf_determinism", buf, strlen("perf_determinism")) == 0) {
332 level = AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM;
337 ret = pm_runtime_get_sync(ddev->dev);
339 pm_runtime_put_autosuspend(ddev->dev);
343 mutex_lock(&adev->pm.stable_pstate_ctx_lock);
344 if (amdgpu_dpm_force_performance_level(adev, level)) {
345 pm_runtime_mark_last_busy(ddev->dev);
346 pm_runtime_put_autosuspend(ddev->dev);
347 mutex_unlock(&adev->pm.stable_pstate_ctx_lock);
350 /* override whatever a user ctx may have set */
351 adev->pm.stable_pstate_ctx = NULL;
352 mutex_unlock(&adev->pm.stable_pstate_ctx_lock);
354 pm_runtime_mark_last_busy(ddev->dev);
355 pm_runtime_put_autosuspend(ddev->dev);
360 static ssize_t amdgpu_get_pp_num_states(struct device *dev,
361 struct device_attribute *attr,
364 struct drm_device *ddev = dev_get_drvdata(dev);
365 struct amdgpu_device *adev = drm_to_adev(ddev);
366 struct pp_states_info data;
370 if (amdgpu_in_reset(adev))
372 if (adev->in_suspend && !adev->in_runpm)
375 ret = pm_runtime_get_sync(ddev->dev);
377 pm_runtime_put_autosuspend(ddev->dev);
381 if (amdgpu_dpm_get_pp_num_states(adev, &data))
382 memset(&data, 0, sizeof(data));
384 pm_runtime_mark_last_busy(ddev->dev);
385 pm_runtime_put_autosuspend(ddev->dev);
387 buf_len = sysfs_emit(buf, "states: %d\n", data.nums);
388 for (i = 0; i < data.nums; i++)
389 buf_len += sysfs_emit_at(buf, buf_len, "%d %s\n", i,
390 (data.states[i] == POWER_STATE_TYPE_INTERNAL_BOOT) ? "boot" :
391 (data.states[i] == POWER_STATE_TYPE_BATTERY) ? "battery" :
392 (data.states[i] == POWER_STATE_TYPE_BALANCED) ? "balanced" :
393 (data.states[i] == POWER_STATE_TYPE_PERFORMANCE) ? "performance" : "default");
398 static ssize_t amdgpu_get_pp_cur_state(struct device *dev,
399 struct device_attribute *attr,
402 struct drm_device *ddev = dev_get_drvdata(dev);
403 struct amdgpu_device *adev = drm_to_adev(ddev);
404 struct pp_states_info data = {0};
405 enum amd_pm_state_type pm = 0;
408 if (amdgpu_in_reset(adev))
410 if (adev->in_suspend && !adev->in_runpm)
413 ret = pm_runtime_get_sync(ddev->dev);
415 pm_runtime_put_autosuspend(ddev->dev);
419 amdgpu_dpm_get_current_power_state(adev, &pm);
421 ret = amdgpu_dpm_get_pp_num_states(adev, &data);
423 pm_runtime_mark_last_busy(ddev->dev);
424 pm_runtime_put_autosuspend(ddev->dev);
429 for (i = 0; i < data.nums; i++) {
430 if (pm == data.states[i])
437 return sysfs_emit(buf, "%d\n", i);
440 static ssize_t amdgpu_get_pp_force_state(struct device *dev,
441 struct device_attribute *attr,
444 struct drm_device *ddev = dev_get_drvdata(dev);
445 struct amdgpu_device *adev = drm_to_adev(ddev);
447 if (amdgpu_in_reset(adev))
449 if (adev->in_suspend && !adev->in_runpm)
452 if (adev->pm.pp_force_state_enabled)
453 return amdgpu_get_pp_cur_state(dev, attr, buf);
455 return sysfs_emit(buf, "\n");
458 static ssize_t amdgpu_set_pp_force_state(struct device *dev,
459 struct device_attribute *attr,
463 struct drm_device *ddev = dev_get_drvdata(dev);
464 struct amdgpu_device *adev = drm_to_adev(ddev);
465 enum amd_pm_state_type state = 0;
466 struct pp_states_info data;
470 if (amdgpu_in_reset(adev))
472 if (adev->in_suspend && !adev->in_runpm)
475 adev->pm.pp_force_state_enabled = false;
477 if (strlen(buf) == 1)
480 ret = kstrtoul(buf, 0, &idx);
481 if (ret || idx >= ARRAY_SIZE(data.states))
484 idx = array_index_nospec(idx, ARRAY_SIZE(data.states));
486 ret = pm_runtime_get_sync(ddev->dev);
488 pm_runtime_put_autosuspend(ddev->dev);
492 ret = amdgpu_dpm_get_pp_num_states(adev, &data);
496 state = data.states[idx];
498 /* only set user selected power states */
499 if (state != POWER_STATE_TYPE_INTERNAL_BOOT &&
500 state != POWER_STATE_TYPE_DEFAULT) {
501 ret = amdgpu_dpm_dispatch_task(adev,
502 AMD_PP_TASK_ENABLE_USER_STATE, &state);
506 adev->pm.pp_force_state_enabled = true;
509 pm_runtime_mark_last_busy(ddev->dev);
510 pm_runtime_put_autosuspend(ddev->dev);
515 pm_runtime_mark_last_busy(ddev->dev);
516 pm_runtime_put_autosuspend(ddev->dev);
523 * The amdgpu driver provides a sysfs API for uploading new powerplay
524 * tables. The file pp_table is used for this. Reading the file
525 * will dump the current power play table. Writing to the file
526 * will attempt to upload a new powerplay table and re-initialize
527 * powerplay using that new table.
531 static ssize_t amdgpu_get_pp_table(struct device *dev,
532 struct device_attribute *attr,
535 struct drm_device *ddev = dev_get_drvdata(dev);
536 struct amdgpu_device *adev = drm_to_adev(ddev);
540 if (amdgpu_in_reset(adev))
542 if (adev->in_suspend && !adev->in_runpm)
545 ret = pm_runtime_get_sync(ddev->dev);
547 pm_runtime_put_autosuspend(ddev->dev);
551 size = amdgpu_dpm_get_pp_table(adev, &table);
553 pm_runtime_mark_last_busy(ddev->dev);
554 pm_runtime_put_autosuspend(ddev->dev);
559 if (size >= PAGE_SIZE)
560 size = PAGE_SIZE - 1;
562 memcpy(buf, table, size);
567 static ssize_t amdgpu_set_pp_table(struct device *dev,
568 struct device_attribute *attr,
572 struct drm_device *ddev = dev_get_drvdata(dev);
573 struct amdgpu_device *adev = drm_to_adev(ddev);
576 if (amdgpu_in_reset(adev))
578 if (adev->in_suspend && !adev->in_runpm)
581 ret = pm_runtime_get_sync(ddev->dev);
583 pm_runtime_put_autosuspend(ddev->dev);
587 ret = amdgpu_dpm_set_pp_table(adev, buf, count);
589 pm_runtime_mark_last_busy(ddev->dev);
590 pm_runtime_put_autosuspend(ddev->dev);
599 * DOC: pp_od_clk_voltage
601 * The amdgpu driver provides a sysfs API for adjusting the clocks and voltages
602 * in each power level within a power state. The pp_od_clk_voltage is used for
605 * Note that the actual memory controller clock rate are exposed, not
606 * the effective memory clock of the DRAMs. To translate it, use the
609 * Clock conversion (Mhz):
611 * HBM: effective_memory_clock = memory_controller_clock * 1
613 * G5: effective_memory_clock = memory_controller_clock * 1
615 * G6: effective_memory_clock = memory_controller_clock * 2
617 * DRAM data rate (MT/s):
619 * HBM: effective_memory_clock * 2 = data_rate
621 * G5: effective_memory_clock * 4 = data_rate
623 * G6: effective_memory_clock * 8 = data_rate
627 * data_rate * vram_bit_width / 8 = memory_bandwidth
633 * memory_controller_clock = 1750 Mhz
635 * effective_memory_clock = 1750 Mhz * 1 = 1750 Mhz
637 * data rate = 1750 * 4 = 7000 MT/s
639 * memory_bandwidth = 7000 * 128 bits / 8 = 112000 MB/s
643 * memory_controller_clock = 875 Mhz
645 * effective_memory_clock = 875 Mhz * 2 = 1750 Mhz
647 * data rate = 1750 * 8 = 14000 MT/s
649 * memory_bandwidth = 14000 * 256 bits / 8 = 448000 MB/s
651 * < For Vega10 and previous ASICs >
653 * Reading the file will display:
655 * - a list of engine clock levels and voltages labeled OD_SCLK
657 * - a list of memory clock levels and voltages labeled OD_MCLK
659 * - a list of valid ranges for sclk, mclk, and voltage labeled OD_RANGE
661 * To manually adjust these settings, first select manual using
662 * power_dpm_force_performance_level. Enter a new value for each
663 * level by writing a string that contains "s/m level clock voltage" to
664 * the file. E.g., "s 1 500 820" will update sclk level 1 to be 500 MHz
665 * at 820 mV; "m 0 350 810" will update mclk level 0 to be 350 MHz at
666 * 810 mV. When you have edited all of the states as needed, write
667 * "c" (commit) to the file to commit your changes. If you want to reset to the
668 * default power levels, write "r" (reset) to the file to reset them.
671 * < For Vega20 and newer ASICs >
673 * Reading the file will display:
675 * - minimum and maximum engine clock labeled OD_SCLK
677 * - minimum(not available for Vega20 and Navi1x) and maximum memory
678 * clock labeled OD_MCLK
680 * - three <frequency, voltage> points labeled OD_VDDC_CURVE.
681 * They can be used to calibrate the sclk voltage curve. This is
682 * available for Vega20 and NV1X.
684 * - voltage offset for the six anchor points of the v/f curve labeled
685 * OD_VDDC_CURVE. They can be used to calibrate the v/f curve. This
686 * is only availabe for some SMU13 ASICs.
688 * - voltage offset(in mV) applied on target voltage calculation.
689 * This is available for Sienna Cichlid, Navy Flounder and Dimgrey
690 * Cavefish. For these ASICs, the target voltage calculation can be
691 * illustrated by "voltage = voltage calculated from v/f curve +
692 * overdrive vddgfx offset"
694 * - a list of valid ranges for sclk, mclk, and voltage curve points
699 * Reading the file will display:
701 * - minimum and maximum engine clock labeled OD_SCLK
703 * - a list of valid ranges for sclk labeled OD_RANGE
707 * Reading the file will display:
709 * - minimum and maximum engine clock labeled OD_SCLK
710 * - minimum and maximum core clocks labeled OD_CCLK
712 * - a list of valid ranges for sclk and cclk labeled OD_RANGE
714 * To manually adjust these settings:
716 * - First select manual using power_dpm_force_performance_level
718 * - For clock frequency setting, enter a new value by writing a
719 * string that contains "s/m index clock" to the file. The index
720 * should be 0 if to set minimum clock. And 1 if to set maximum
721 * clock. E.g., "s 0 500" will update minimum sclk to be 500 MHz.
722 * "m 1 800" will update maximum mclk to be 800Mhz. For core
723 * clocks on VanGogh, the string contains "p core index clock".
724 * E.g., "p 2 0 800" would set the minimum core clock on core
727 * For sclk voltage curve,
728 * - For NV1X, enter the new values by writing a string that
729 * contains "vc point clock voltage" to the file. The points
730 * are indexed by 0, 1 and 2. E.g., "vc 0 300 600" will update
731 * point1 with clock set as 300Mhz and voltage as 600mV. "vc 2
732 * 1000 1000" will update point3 with clock set as 1000Mhz and
734 * - For SMU13 ASICs, enter the new values by writing a string that
735 * contains "vc anchor_point_index voltage_offset" to the file.
736 * There are total six anchor points defined on the v/f curve with
738 * - "vc 0 10" will update the voltage offset for point1 as 10mv.
739 * - "vc 5 -10" will update the voltage offset for point6 as -10mv.
741 * To update the voltage offset applied for gfxclk/voltage calculation,
742 * enter the new value by writing a string that contains "vo offset".
743 * This is supported by Sienna Cichlid, Navy Flounder and Dimgrey Cavefish.
744 * And the offset can be a positive or negative value.
746 * - When you have edited all of the states as needed, write "c" (commit)
747 * to the file to commit your changes
749 * - If you want to reset to the default power levels, write "r" (reset)
750 * to the file to reset them
754 static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
755 struct device_attribute *attr,
759 struct drm_device *ddev = dev_get_drvdata(dev);
760 struct amdgpu_device *adev = drm_to_adev(ddev);
762 uint32_t parameter_size = 0;
767 const char delimiter[3] = {' ', '\n', '\0'};
770 if (amdgpu_in_reset(adev))
772 if (adev->in_suspend && !adev->in_runpm)
779 type = PP_OD_EDIT_SCLK_VDDC_TABLE;
780 else if (*buf == 'p')
781 type = PP_OD_EDIT_CCLK_VDDC_TABLE;
782 else if (*buf == 'm')
783 type = PP_OD_EDIT_MCLK_VDDC_TABLE;
785 type = PP_OD_RESTORE_DEFAULT_TABLE;
786 else if (*buf == 'c')
787 type = PP_OD_COMMIT_DPM_TABLE;
788 else if (!strncmp(buf, "vc", 2))
789 type = PP_OD_EDIT_VDDC_CURVE;
790 else if (!strncmp(buf, "vo", 2))
791 type = PP_OD_EDIT_VDDGFX_OFFSET;
795 memcpy(buf_cpy, buf, count+1);
799 if ((type == PP_OD_EDIT_VDDC_CURVE) ||
800 (type == PP_OD_EDIT_VDDGFX_OFFSET))
802 while (isspace(*++tmp_str));
804 while ((sub_str = strsep(&tmp_str, delimiter)) != NULL) {
805 if (strlen(sub_str) == 0)
807 ret = kstrtol(sub_str, 0, ¶meter[parameter_size]);
812 while (isspace(*tmp_str))
816 ret = pm_runtime_get_sync(ddev->dev);
818 pm_runtime_put_autosuspend(ddev->dev);
822 if (amdgpu_dpm_set_fine_grain_clk_vol(adev,
828 if (amdgpu_dpm_odn_edit_dpm_table(adev, type,
829 parameter, parameter_size))
832 if (type == PP_OD_COMMIT_DPM_TABLE) {
833 if (amdgpu_dpm_dispatch_task(adev,
834 AMD_PP_TASK_READJUST_POWER_STATE,
839 pm_runtime_mark_last_busy(ddev->dev);
840 pm_runtime_put_autosuspend(ddev->dev);
845 pm_runtime_mark_last_busy(ddev->dev);
846 pm_runtime_put_autosuspend(ddev->dev);
850 static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev,
851 struct device_attribute *attr,
854 struct drm_device *ddev = dev_get_drvdata(dev);
855 struct amdgpu_device *adev = drm_to_adev(ddev);
858 enum pp_clock_type od_clocks[6] = {
868 if (amdgpu_in_reset(adev))
870 if (adev->in_suspend && !adev->in_runpm)
873 ret = pm_runtime_get_sync(ddev->dev);
875 pm_runtime_put_autosuspend(ddev->dev);
879 for (clk_index = 0 ; clk_index < 6 ; clk_index++) {
880 ret = amdgpu_dpm_emit_clock_levels(adev, od_clocks[clk_index], buf, &size);
884 if (ret == -ENOENT) {
885 size = amdgpu_dpm_print_clock_levels(adev, OD_SCLK, buf);
886 size += amdgpu_dpm_print_clock_levels(adev, OD_MCLK, buf + size);
887 size += amdgpu_dpm_print_clock_levels(adev, OD_VDDC_CURVE, buf + size);
888 size += amdgpu_dpm_print_clock_levels(adev, OD_VDDGFX_OFFSET, buf + size);
889 size += amdgpu_dpm_print_clock_levels(adev, OD_RANGE, buf + size);
890 size += amdgpu_dpm_print_clock_levels(adev, OD_CCLK, buf + size);
894 size = sysfs_emit(buf, "\n");
896 pm_runtime_mark_last_busy(ddev->dev);
897 pm_runtime_put_autosuspend(ddev->dev);
905 * The amdgpu driver provides a sysfs API for adjusting what powerplay
906 * features to be enabled. The file pp_features is used for this. And
907 * this is only available for Vega10 and later dGPUs.
909 * Reading back the file will show you the followings:
910 * - Current ppfeature masks
911 * - List of the all supported powerplay features with their naming,
912 * bitmasks and enablement status('Y'/'N' means "enabled"/"disabled").
914 * To manually enable or disable a specific feature, just set or clear
915 * the corresponding bit from original ppfeature masks and input the
916 * new ppfeature masks.
918 static ssize_t amdgpu_set_pp_features(struct device *dev,
919 struct device_attribute *attr,
923 struct drm_device *ddev = dev_get_drvdata(dev);
924 struct amdgpu_device *adev = drm_to_adev(ddev);
925 uint64_t featuremask;
928 if (amdgpu_in_reset(adev))
930 if (adev->in_suspend && !adev->in_runpm)
933 ret = kstrtou64(buf, 0, &featuremask);
937 ret = pm_runtime_get_sync(ddev->dev);
939 pm_runtime_put_autosuspend(ddev->dev);
943 ret = amdgpu_dpm_set_ppfeature_status(adev, featuremask);
945 pm_runtime_mark_last_busy(ddev->dev);
946 pm_runtime_put_autosuspend(ddev->dev);
954 static ssize_t amdgpu_get_pp_features(struct device *dev,
955 struct device_attribute *attr,
958 struct drm_device *ddev = dev_get_drvdata(dev);
959 struct amdgpu_device *adev = drm_to_adev(ddev);
963 if (amdgpu_in_reset(adev))
965 if (adev->in_suspend && !adev->in_runpm)
968 ret = pm_runtime_get_sync(ddev->dev);
970 pm_runtime_put_autosuspend(ddev->dev);
974 size = amdgpu_dpm_get_ppfeature_status(adev, buf);
976 size = sysfs_emit(buf, "\n");
978 pm_runtime_mark_last_busy(ddev->dev);
979 pm_runtime_put_autosuspend(ddev->dev);
985 * DOC: pp_dpm_sclk pp_dpm_mclk pp_dpm_socclk pp_dpm_fclk pp_dpm_dcefclk pp_dpm_pcie
987 * The amdgpu driver provides a sysfs API for adjusting what power levels
988 * are enabled for a given power state. The files pp_dpm_sclk, pp_dpm_mclk,
989 * pp_dpm_socclk, pp_dpm_fclk, pp_dpm_dcefclk and pp_dpm_pcie are used for
992 * pp_dpm_socclk and pp_dpm_dcefclk interfaces are only available for
993 * Vega10 and later ASICs.
994 * pp_dpm_fclk interface is only available for Vega20 and later ASICs.
996 * Reading back the files will show you the available power levels within
997 * the power state and the clock information for those levels.
999 * To manually adjust these states, first select manual using
1000 * power_dpm_force_performance_level.
1001 * Secondly, enter a new value for each level by inputing a string that
1002 * contains " echo xx xx xx > pp_dpm_sclk/mclk/pcie"
1005 * .. code-block:: bash
1007 * echo "4 5 6" > pp_dpm_sclk
1009 * will enable sclk levels 4, 5, and 6.
1011 * NOTE: change to the dcefclk max dpm level is not supported now
1014 static ssize_t amdgpu_get_pp_dpm_clock(struct device *dev,
1015 enum pp_clock_type type,
1018 struct drm_device *ddev = dev_get_drvdata(dev);
1019 struct amdgpu_device *adev = drm_to_adev(ddev);
1023 if (amdgpu_in_reset(adev))
1025 if (adev->in_suspend && !adev->in_runpm)
1028 ret = pm_runtime_get_sync(ddev->dev);
1030 pm_runtime_put_autosuspend(ddev->dev);
1034 ret = amdgpu_dpm_emit_clock_levels(adev, type, buf, &size);
1036 size = amdgpu_dpm_print_clock_levels(adev, type, buf);
1039 size = sysfs_emit(buf, "\n");
1041 pm_runtime_mark_last_busy(ddev->dev);
1042 pm_runtime_put_autosuspend(ddev->dev);
1048 * Worst case: 32 bits individually specified, in octal at 12 characters
1049 * per line (+1 for \n).
1051 #define AMDGPU_MASK_BUF_MAX (32 * 13)
1053 static ssize_t amdgpu_read_mask(const char *buf, size_t count, uint32_t *mask)
1056 unsigned long level;
1057 char *sub_str = NULL;
1059 char buf_cpy[AMDGPU_MASK_BUF_MAX + 1];
1060 const char delimiter[3] = {' ', '\n', '\0'};
1065 bytes = min(count, sizeof(buf_cpy) - 1);
1066 memcpy(buf_cpy, buf, bytes);
1067 buf_cpy[bytes] = '\0';
1069 while ((sub_str = strsep(&tmp, delimiter)) != NULL) {
1070 if (strlen(sub_str)) {
1071 ret = kstrtoul(sub_str, 0, &level);
1072 if (ret || level > 31)
1074 *mask |= 1 << level;
1082 static ssize_t amdgpu_set_pp_dpm_clock(struct device *dev,
1083 enum pp_clock_type type,
1087 struct drm_device *ddev = dev_get_drvdata(dev);
1088 struct amdgpu_device *adev = drm_to_adev(ddev);
1092 if (amdgpu_in_reset(adev))
1094 if (adev->in_suspend && !adev->in_runpm)
1097 ret = amdgpu_read_mask(buf, count, &mask);
1101 ret = pm_runtime_get_sync(ddev->dev);
1103 pm_runtime_put_autosuspend(ddev->dev);
1107 ret = amdgpu_dpm_force_clock_level(adev, type, mask);
1109 pm_runtime_mark_last_busy(ddev->dev);
1110 pm_runtime_put_autosuspend(ddev->dev);
1118 static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev,
1119 struct device_attribute *attr,
1122 return amdgpu_get_pp_dpm_clock(dev, PP_SCLK, buf);
1125 static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
1126 struct device_attribute *attr,
1130 return amdgpu_set_pp_dpm_clock(dev, PP_SCLK, buf, count);
1133 static ssize_t amdgpu_get_pp_dpm_mclk(struct device *dev,
1134 struct device_attribute *attr,
1137 return amdgpu_get_pp_dpm_clock(dev, PP_MCLK, buf);
1140 static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev,
1141 struct device_attribute *attr,
1145 return amdgpu_set_pp_dpm_clock(dev, PP_MCLK, buf, count);
1148 static ssize_t amdgpu_get_pp_dpm_socclk(struct device *dev,
1149 struct device_attribute *attr,
1152 return amdgpu_get_pp_dpm_clock(dev, PP_SOCCLK, buf);
1155 static ssize_t amdgpu_set_pp_dpm_socclk(struct device *dev,
1156 struct device_attribute *attr,
1160 return amdgpu_set_pp_dpm_clock(dev, PP_SOCCLK, buf, count);
1163 static ssize_t amdgpu_get_pp_dpm_fclk(struct device *dev,
1164 struct device_attribute *attr,
1167 return amdgpu_get_pp_dpm_clock(dev, PP_FCLK, buf);
1170 static ssize_t amdgpu_set_pp_dpm_fclk(struct device *dev,
1171 struct device_attribute *attr,
1175 return amdgpu_set_pp_dpm_clock(dev, PP_FCLK, buf, count);
1178 static ssize_t amdgpu_get_pp_dpm_vclk(struct device *dev,
1179 struct device_attribute *attr,
1182 return amdgpu_get_pp_dpm_clock(dev, PP_VCLK, buf);
1185 static ssize_t amdgpu_set_pp_dpm_vclk(struct device *dev,
1186 struct device_attribute *attr,
1190 return amdgpu_set_pp_dpm_clock(dev, PP_VCLK, buf, count);
1193 static ssize_t amdgpu_get_pp_dpm_vclk1(struct device *dev,
1194 struct device_attribute *attr,
1197 return amdgpu_get_pp_dpm_clock(dev, PP_VCLK1, buf);
1200 static ssize_t amdgpu_set_pp_dpm_vclk1(struct device *dev,
1201 struct device_attribute *attr,
1205 return amdgpu_set_pp_dpm_clock(dev, PP_VCLK1, buf, count);
1208 static ssize_t amdgpu_get_pp_dpm_dclk(struct device *dev,
1209 struct device_attribute *attr,
1212 return amdgpu_get_pp_dpm_clock(dev, PP_DCLK, buf);
1215 static ssize_t amdgpu_set_pp_dpm_dclk(struct device *dev,
1216 struct device_attribute *attr,
1220 return amdgpu_set_pp_dpm_clock(dev, PP_DCLK, buf, count);
1223 static ssize_t amdgpu_get_pp_dpm_dclk1(struct device *dev,
1224 struct device_attribute *attr,
1227 return amdgpu_get_pp_dpm_clock(dev, PP_DCLK1, buf);
1230 static ssize_t amdgpu_set_pp_dpm_dclk1(struct device *dev,
1231 struct device_attribute *attr,
1235 return amdgpu_set_pp_dpm_clock(dev, PP_DCLK1, buf, count);
1238 static ssize_t amdgpu_get_pp_dpm_dcefclk(struct device *dev,
1239 struct device_attribute *attr,
1242 return amdgpu_get_pp_dpm_clock(dev, PP_DCEFCLK, buf);
1245 static ssize_t amdgpu_set_pp_dpm_dcefclk(struct device *dev,
1246 struct device_attribute *attr,
1250 return amdgpu_set_pp_dpm_clock(dev, PP_DCEFCLK, buf, count);
1253 static ssize_t amdgpu_get_pp_dpm_pcie(struct device *dev,
1254 struct device_attribute *attr,
1257 return amdgpu_get_pp_dpm_clock(dev, PP_PCIE, buf);
1260 static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev,
1261 struct device_attribute *attr,
1265 return amdgpu_set_pp_dpm_clock(dev, PP_PCIE, buf, count);
1268 static ssize_t amdgpu_get_pp_sclk_od(struct device *dev,
1269 struct device_attribute *attr,
1272 struct drm_device *ddev = dev_get_drvdata(dev);
1273 struct amdgpu_device *adev = drm_to_adev(ddev);
1277 if (amdgpu_in_reset(adev))
1279 if (adev->in_suspend && !adev->in_runpm)
1282 ret = pm_runtime_get_sync(ddev->dev);
1284 pm_runtime_put_autosuspend(ddev->dev);
1288 value = amdgpu_dpm_get_sclk_od(adev);
1290 pm_runtime_mark_last_busy(ddev->dev);
1291 pm_runtime_put_autosuspend(ddev->dev);
1293 return sysfs_emit(buf, "%d\n", value);
1296 static ssize_t amdgpu_set_pp_sclk_od(struct device *dev,
1297 struct device_attribute *attr,
1301 struct drm_device *ddev = dev_get_drvdata(dev);
1302 struct amdgpu_device *adev = drm_to_adev(ddev);
1306 if (amdgpu_in_reset(adev))
1308 if (adev->in_suspend && !adev->in_runpm)
1311 ret = kstrtol(buf, 0, &value);
1316 ret = pm_runtime_get_sync(ddev->dev);
1318 pm_runtime_put_autosuspend(ddev->dev);
1322 amdgpu_dpm_set_sclk_od(adev, (uint32_t)value);
1324 pm_runtime_mark_last_busy(ddev->dev);
1325 pm_runtime_put_autosuspend(ddev->dev);
1330 static ssize_t amdgpu_get_pp_mclk_od(struct device *dev,
1331 struct device_attribute *attr,
1334 struct drm_device *ddev = dev_get_drvdata(dev);
1335 struct amdgpu_device *adev = drm_to_adev(ddev);
1339 if (amdgpu_in_reset(adev))
1341 if (adev->in_suspend && !adev->in_runpm)
1344 ret = pm_runtime_get_sync(ddev->dev);
1346 pm_runtime_put_autosuspend(ddev->dev);
1350 value = amdgpu_dpm_get_mclk_od(adev);
1352 pm_runtime_mark_last_busy(ddev->dev);
1353 pm_runtime_put_autosuspend(ddev->dev);
1355 return sysfs_emit(buf, "%d\n", value);
1358 static ssize_t amdgpu_set_pp_mclk_od(struct device *dev,
1359 struct device_attribute *attr,
1363 struct drm_device *ddev = dev_get_drvdata(dev);
1364 struct amdgpu_device *adev = drm_to_adev(ddev);
1368 if (amdgpu_in_reset(adev))
1370 if (adev->in_suspend && !adev->in_runpm)
1373 ret = kstrtol(buf, 0, &value);
1378 ret = pm_runtime_get_sync(ddev->dev);
1380 pm_runtime_put_autosuspend(ddev->dev);
1384 amdgpu_dpm_set_mclk_od(adev, (uint32_t)value);
1386 pm_runtime_mark_last_busy(ddev->dev);
1387 pm_runtime_put_autosuspend(ddev->dev);
1393 * DOC: pp_power_profile_mode
1395 * The amdgpu driver provides a sysfs API for adjusting the heuristics
1396 * related to switching between power levels in a power state. The file
1397 * pp_power_profile_mode is used for this.
1399 * Reading this file outputs a list of all of the predefined power profiles
1400 * and the relevant heuristics settings for that profile.
1402 * To select a profile or create a custom profile, first select manual using
1403 * power_dpm_force_performance_level. Writing the number of a predefined
1404 * profile to pp_power_profile_mode will enable those heuristics. To
1405 * create a custom set of heuristics, write a string of numbers to the file
1406 * starting with the number of the custom profile along with a setting
1407 * for each heuristic parameter. Due to differences across asic families
1408 * the heuristic parameters vary from family to family.
1412 static ssize_t amdgpu_get_pp_power_profile_mode(struct device *dev,
1413 struct device_attribute *attr,
1416 struct drm_device *ddev = dev_get_drvdata(dev);
1417 struct amdgpu_device *adev = drm_to_adev(ddev);
1421 if (amdgpu_in_reset(adev))
1423 if (adev->in_suspend && !adev->in_runpm)
1426 ret = pm_runtime_get_sync(ddev->dev);
1428 pm_runtime_put_autosuspend(ddev->dev);
1432 size = amdgpu_dpm_get_power_profile_mode(adev, buf);
1434 size = sysfs_emit(buf, "\n");
1436 pm_runtime_mark_last_busy(ddev->dev);
1437 pm_runtime_put_autosuspend(ddev->dev);
1443 static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,
1444 struct device_attribute *attr,
1449 struct drm_device *ddev = dev_get_drvdata(dev);
1450 struct amdgpu_device *adev = drm_to_adev(ddev);
1451 uint32_t parameter_size = 0;
1453 char *sub_str, buf_cpy[128];
1457 long int profile_mode = 0;
1458 const char delimiter[3] = {' ', '\n', '\0'};
1460 if (amdgpu_in_reset(adev))
1462 if (adev->in_suspend && !adev->in_runpm)
1467 ret = kstrtol(tmp, 0, &profile_mode);
1471 if (profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
1472 if (count < 2 || count > 127)
1474 while (isspace(*++buf))
1476 memcpy(buf_cpy, buf, count-i);
1478 while ((sub_str = strsep(&tmp_str, delimiter)) != NULL) {
1479 if (strlen(sub_str) == 0)
1481 ret = kstrtol(sub_str, 0, ¶meter[parameter_size]);
1485 while (isspace(*tmp_str))
1489 parameter[parameter_size] = profile_mode;
1491 ret = pm_runtime_get_sync(ddev->dev);
1493 pm_runtime_put_autosuspend(ddev->dev);
1497 ret = amdgpu_dpm_set_power_profile_mode(adev, parameter, parameter_size);
1499 pm_runtime_mark_last_busy(ddev->dev);
1500 pm_runtime_put_autosuspend(ddev->dev);
1509 * DOC: gpu_busy_percent
1511 * The amdgpu driver provides a sysfs API for reading how busy the GPU
1512 * is as a percentage. The file gpu_busy_percent is used for this.
1513 * The SMU firmware computes a percentage of load based on the
1514 * aggregate activity level in the IP cores.
1516 static ssize_t amdgpu_get_gpu_busy_percent(struct device *dev,
1517 struct device_attribute *attr,
1520 struct drm_device *ddev = dev_get_drvdata(dev);
1521 struct amdgpu_device *adev = drm_to_adev(ddev);
1522 int r, value, size = sizeof(value);
1524 if (amdgpu_in_reset(adev))
1526 if (adev->in_suspend && !adev->in_runpm)
1529 r = pm_runtime_get_sync(ddev->dev);
1531 pm_runtime_put_autosuspend(ddev->dev);
1535 /* read the IP busy sensor */
1536 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_LOAD,
1537 (void *)&value, &size);
1539 pm_runtime_mark_last_busy(ddev->dev);
1540 pm_runtime_put_autosuspend(ddev->dev);
1545 return sysfs_emit(buf, "%d\n", value);
1549 * DOC: mem_busy_percent
1551 * The amdgpu driver provides a sysfs API for reading how busy the VRAM
1552 * is as a percentage. The file mem_busy_percent is used for this.
1553 * The SMU firmware computes a percentage of load based on the
1554 * aggregate activity level in the IP cores.
1556 static ssize_t amdgpu_get_mem_busy_percent(struct device *dev,
1557 struct device_attribute *attr,
1560 struct drm_device *ddev = dev_get_drvdata(dev);
1561 struct amdgpu_device *adev = drm_to_adev(ddev);
1562 int r, value, size = sizeof(value);
1564 if (amdgpu_in_reset(adev))
1566 if (adev->in_suspend && !adev->in_runpm)
1569 r = pm_runtime_get_sync(ddev->dev);
1571 pm_runtime_put_autosuspend(ddev->dev);
1575 /* read the IP busy sensor */
1576 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MEM_LOAD,
1577 (void *)&value, &size);
1579 pm_runtime_mark_last_busy(ddev->dev);
1580 pm_runtime_put_autosuspend(ddev->dev);
1585 return sysfs_emit(buf, "%d\n", value);
1591 * The amdgpu driver provides a sysfs API for estimating how much data
1592 * has been received and sent by the GPU in the last second through PCIe.
1593 * The file pcie_bw is used for this.
1594 * The Perf counters count the number of received and sent messages and return
1595 * those values, as well as the maximum payload size of a PCIe packet (mps).
1596 * Note that it is not possible to easily and quickly obtain the size of each
1597 * packet transmitted, so we output the max payload size (mps) to allow for
1598 * quick estimation of the PCIe bandwidth usage
1600 static ssize_t amdgpu_get_pcie_bw(struct device *dev,
1601 struct device_attribute *attr,
1604 struct drm_device *ddev = dev_get_drvdata(dev);
1605 struct amdgpu_device *adev = drm_to_adev(ddev);
1606 uint64_t count0 = 0, count1 = 0;
1609 if (amdgpu_in_reset(adev))
1611 if (adev->in_suspend && !adev->in_runpm)
1614 if (adev->flags & AMD_IS_APU)
1617 if (!adev->asic_funcs->get_pcie_usage)
1620 ret = pm_runtime_get_sync(ddev->dev);
1622 pm_runtime_put_autosuspend(ddev->dev);
1626 amdgpu_asic_get_pcie_usage(adev, &count0, &count1);
1628 pm_runtime_mark_last_busy(ddev->dev);
1629 pm_runtime_put_autosuspend(ddev->dev);
1631 return sysfs_emit(buf, "%llu %llu %i\n",
1632 count0, count1, pcie_get_mps(adev->pdev));
1638 * The amdgpu driver provides a sysfs API for providing a unique ID for the GPU
1639 * The file unique_id is used for this.
1640 * This will provide a Unique ID that will persist from machine to machine
1642 * NOTE: This will only work for GFX9 and newer. This file will be absent
1643 * on unsupported ASICs (GFX8 and older)
1645 static ssize_t amdgpu_get_unique_id(struct device *dev,
1646 struct device_attribute *attr,
1649 struct drm_device *ddev = dev_get_drvdata(dev);
1650 struct amdgpu_device *adev = drm_to_adev(ddev);
1652 if (amdgpu_in_reset(adev))
1654 if (adev->in_suspend && !adev->in_runpm)
1657 if (adev->unique_id)
1658 return sysfs_emit(buf, "%016llx\n", adev->unique_id);
1664 * DOC: thermal_throttling_logging
1666 * Thermal throttling pulls down the clock frequency and thus the performance.
1667 * It's an useful mechanism to protect the chip from overheating. Since it
1668 * impacts performance, the user controls whether it is enabled and if so,
1669 * the log frequency.
1671 * Reading back the file shows you the status(enabled or disabled) and
1672 * the interval(in seconds) between each thermal logging.
1674 * Writing an integer to the file, sets a new logging interval, in seconds.
1675 * The value should be between 1 and 3600. If the value is less than 1,
1676 * thermal logging is disabled. Values greater than 3600 are ignored.
1678 static ssize_t amdgpu_get_thermal_throttling_logging(struct device *dev,
1679 struct device_attribute *attr,
1682 struct drm_device *ddev = dev_get_drvdata(dev);
1683 struct amdgpu_device *adev = drm_to_adev(ddev);
1685 return sysfs_emit(buf, "%s: thermal throttling logging %s, with interval %d seconds\n",
1686 adev_to_drm(adev)->unique,
1687 atomic_read(&adev->throttling_logging_enabled) ? "enabled" : "disabled",
1688 adev->throttling_logging_rs.interval / HZ + 1);
1691 static ssize_t amdgpu_set_thermal_throttling_logging(struct device *dev,
1692 struct device_attribute *attr,
1696 struct drm_device *ddev = dev_get_drvdata(dev);
1697 struct amdgpu_device *adev = drm_to_adev(ddev);
1698 long throttling_logging_interval;
1699 unsigned long flags;
1702 ret = kstrtol(buf, 0, &throttling_logging_interval);
1706 if (throttling_logging_interval > 3600)
1709 if (throttling_logging_interval > 0) {
1710 raw_spin_lock_irqsave(&adev->throttling_logging_rs.lock, flags);
1712 * Reset the ratelimit timer internals.
1713 * This can effectively restart the timer.
1715 adev->throttling_logging_rs.interval =
1716 (throttling_logging_interval - 1) * HZ;
1717 adev->throttling_logging_rs.begin = 0;
1718 adev->throttling_logging_rs.printed = 0;
1719 adev->throttling_logging_rs.missed = 0;
1720 raw_spin_unlock_irqrestore(&adev->throttling_logging_rs.lock, flags);
1722 atomic_set(&adev->throttling_logging_enabled, 1);
1724 atomic_set(&adev->throttling_logging_enabled, 0);
1731 * DOC: apu_thermal_cap
1733 * The amdgpu driver provides a sysfs API for retrieving/updating thermal
1734 * limit temperature in millidegrees Celsius
1736 * Reading back the file shows you core limit value
1738 * Writing an integer to the file, sets a new thermal limit. The value
1739 * should be between 0 and 100. If the value is less than 0 or greater
1740 * than 100, then the write request will be ignored.
1742 static ssize_t amdgpu_get_apu_thermal_cap(struct device *dev,
1743 struct device_attribute *attr,
1748 struct drm_device *ddev = dev_get_drvdata(dev);
1749 struct amdgpu_device *adev = drm_to_adev(ddev);
1751 ret = pm_runtime_get_sync(ddev->dev);
1753 pm_runtime_put_autosuspend(ddev->dev);
1757 ret = amdgpu_dpm_get_apu_thermal_limit(adev, &limit);
1759 size = sysfs_emit(buf, "%u\n", limit);
1761 size = sysfs_emit(buf, "failed to get thermal limit\n");
1763 pm_runtime_mark_last_busy(ddev->dev);
1764 pm_runtime_put_autosuspend(ddev->dev);
1769 static ssize_t amdgpu_set_apu_thermal_cap(struct device *dev,
1770 struct device_attribute *attr,
1776 struct drm_device *ddev = dev_get_drvdata(dev);
1777 struct amdgpu_device *adev = drm_to_adev(ddev);
1779 ret = kstrtou32(buf, 10, &value);
1784 dev_err(dev, "Invalid argument !\n");
1788 ret = pm_runtime_get_sync(ddev->dev);
1790 pm_runtime_put_autosuspend(ddev->dev);
1794 ret = amdgpu_dpm_set_apu_thermal_limit(adev, value);
1796 dev_err(dev, "failed to update thermal limit\n");
1800 pm_runtime_mark_last_busy(ddev->dev);
1801 pm_runtime_put_autosuspend(ddev->dev);
1809 * The amdgpu driver provides a sysfs API for retrieving current gpu
1810 * metrics data. The file gpu_metrics is used for this. Reading the
1811 * file will dump all the current gpu metrics data.
1813 * These data include temperature, frequency, engines utilization,
1814 * power consume, throttler status, fan speed and cpu core statistics(
1815 * available for APU only). That's it will give a snapshot of all sensors
1818 static ssize_t amdgpu_get_gpu_metrics(struct device *dev,
1819 struct device_attribute *attr,
1822 struct drm_device *ddev = dev_get_drvdata(dev);
1823 struct amdgpu_device *adev = drm_to_adev(ddev);
1828 if (amdgpu_in_reset(adev))
1830 if (adev->in_suspend && !adev->in_runpm)
1833 ret = pm_runtime_get_sync(ddev->dev);
1835 pm_runtime_put_autosuspend(ddev->dev);
1839 size = amdgpu_dpm_get_gpu_metrics(adev, &gpu_metrics);
1843 if (size >= PAGE_SIZE)
1844 size = PAGE_SIZE - 1;
1846 memcpy(buf, gpu_metrics, size);
1849 pm_runtime_mark_last_busy(ddev->dev);
1850 pm_runtime_put_autosuspend(ddev->dev);
1855 static int amdgpu_device_read_powershift(struct amdgpu_device *adev,
1856 uint32_t *ss_power, bool dgpu_share)
1858 struct drm_device *ddev = adev_to_drm(adev);
1862 if (amdgpu_in_reset(adev))
1864 if (adev->in_suspend && !adev->in_runpm)
1867 r = pm_runtime_get_sync(ddev->dev);
1869 pm_runtime_put_autosuspend(ddev->dev);
1874 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_SS_DGPU_SHARE,
1875 (void *)ss_power, &size);
1877 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_SS_APU_SHARE,
1878 (void *)ss_power, &size);
1880 pm_runtime_mark_last_busy(ddev->dev);
1881 pm_runtime_put_autosuspend(ddev->dev);
1885 static int amdgpu_show_powershift_percent(struct device *dev,
1886 char *buf, bool dgpu_share)
1888 struct drm_device *ddev = dev_get_drvdata(dev);
1889 struct amdgpu_device *adev = drm_to_adev(ddev);
1893 r = amdgpu_device_read_powershift(adev, &ss_power, dgpu_share);
1894 if (r == -EOPNOTSUPP) {
1895 /* sensor not available on dGPU, try to read from APU */
1897 mutex_lock(&mgpu_info.mutex);
1898 for (i = 0; i < mgpu_info.num_gpu; i++) {
1899 if (mgpu_info.gpu_ins[i].adev->flags & AMD_IS_APU) {
1900 adev = mgpu_info.gpu_ins[i].adev;
1904 mutex_unlock(&mgpu_info.mutex);
1906 r = amdgpu_device_read_powershift(adev, &ss_power, dgpu_share);
1910 r = sysfs_emit(buf, "%u%%\n", ss_power);
1915 * DOC: smartshift_apu_power
1917 * The amdgpu driver provides a sysfs API for reporting APU power
1918 * shift in percentage if platform supports smartshift. Value 0 means that
1919 * there is no powershift and values between [1-100] means that the power
1920 * is shifted to APU, the percentage of boost is with respect to APU power
1921 * limit on the platform.
1924 static ssize_t amdgpu_get_smartshift_apu_power(struct device *dev, struct device_attribute *attr,
1927 return amdgpu_show_powershift_percent(dev, buf, false);
1931 * DOC: smartshift_dgpu_power
1933 * The amdgpu driver provides a sysfs API for reporting dGPU power
1934 * shift in percentage if platform supports smartshift. Value 0 means that
1935 * there is no powershift and values between [1-100] means that the power is
1936 * shifted to dGPU, the percentage of boost is with respect to dGPU power
1937 * limit on the platform.
1940 static ssize_t amdgpu_get_smartshift_dgpu_power(struct device *dev, struct device_attribute *attr,
1943 return amdgpu_show_powershift_percent(dev, buf, true);
1947 * DOC: smartshift_bias
1949 * The amdgpu driver provides a sysfs API for reporting the
1950 * smartshift(SS2.0) bias level. The value ranges from -100 to 100
1951 * and the default is 0. -100 sets maximum preference to APU
1952 * and 100 sets max perference to dGPU.
1955 static ssize_t amdgpu_get_smartshift_bias(struct device *dev,
1956 struct device_attribute *attr,
1961 r = sysfs_emit(buf, "%d\n", amdgpu_smartshift_bias);
1966 static ssize_t amdgpu_set_smartshift_bias(struct device *dev,
1967 struct device_attribute *attr,
1968 const char *buf, size_t count)
1970 struct drm_device *ddev = dev_get_drvdata(dev);
1971 struct amdgpu_device *adev = drm_to_adev(ddev);
1975 if (amdgpu_in_reset(adev))
1977 if (adev->in_suspend && !adev->in_runpm)
1980 r = pm_runtime_get_sync(ddev->dev);
1982 pm_runtime_put_autosuspend(ddev->dev);
1986 r = kstrtoint(buf, 10, &bias);
1990 if (bias > AMDGPU_SMARTSHIFT_MAX_BIAS)
1991 bias = AMDGPU_SMARTSHIFT_MAX_BIAS;
1992 else if (bias < AMDGPU_SMARTSHIFT_MIN_BIAS)
1993 bias = AMDGPU_SMARTSHIFT_MIN_BIAS;
1995 amdgpu_smartshift_bias = bias;
1998 /* TODO: update bias level with SMU message */
2001 pm_runtime_mark_last_busy(ddev->dev);
2002 pm_runtime_put_autosuspend(ddev->dev);
2007 static int ss_power_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
2008 uint32_t mask, enum amdgpu_device_attr_states *states)
2010 if (!amdgpu_device_supports_smart_shift(adev_to_drm(adev)))
2011 *states = ATTR_STATE_UNSUPPORTED;
2016 static int ss_bias_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
2017 uint32_t mask, enum amdgpu_device_attr_states *states)
2019 uint32_t ss_power, size;
2021 if (!amdgpu_device_supports_smart_shift(adev_to_drm(adev)))
2022 *states = ATTR_STATE_UNSUPPORTED;
2023 else if (amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_SS_APU_SHARE,
2024 (void *)&ss_power, &size))
2025 *states = ATTR_STATE_UNSUPPORTED;
2026 else if (amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_SS_DGPU_SHARE,
2027 (void *)&ss_power, &size))
2028 *states = ATTR_STATE_UNSUPPORTED;
2033 static struct amdgpu_device_attr amdgpu_device_attrs[] = {
2034 AMDGPU_DEVICE_ATTR_RW(power_dpm_state, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2035 AMDGPU_DEVICE_ATTR_RW(power_dpm_force_performance_level, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2036 AMDGPU_DEVICE_ATTR_RO(pp_num_states, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2037 AMDGPU_DEVICE_ATTR_RO(pp_cur_state, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2038 AMDGPU_DEVICE_ATTR_RW(pp_force_state, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2039 AMDGPU_DEVICE_ATTR_RW(pp_table, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2040 AMDGPU_DEVICE_ATTR_RW(pp_dpm_sclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2041 AMDGPU_DEVICE_ATTR_RW(pp_dpm_mclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2042 AMDGPU_DEVICE_ATTR_RW(pp_dpm_socclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2043 AMDGPU_DEVICE_ATTR_RW(pp_dpm_fclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2044 AMDGPU_DEVICE_ATTR_RW(pp_dpm_vclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2045 AMDGPU_DEVICE_ATTR_RW(pp_dpm_vclk1, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2046 AMDGPU_DEVICE_ATTR_RW(pp_dpm_dclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2047 AMDGPU_DEVICE_ATTR_RW(pp_dpm_dclk1, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2048 AMDGPU_DEVICE_ATTR_RW(pp_dpm_dcefclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2049 AMDGPU_DEVICE_ATTR_RW(pp_dpm_pcie, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2050 AMDGPU_DEVICE_ATTR_RW(pp_sclk_od, ATTR_FLAG_BASIC),
2051 AMDGPU_DEVICE_ATTR_RW(pp_mclk_od, ATTR_FLAG_BASIC),
2052 AMDGPU_DEVICE_ATTR_RW(pp_power_profile_mode, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2053 AMDGPU_DEVICE_ATTR_RW(pp_od_clk_voltage, ATTR_FLAG_BASIC),
2054 AMDGPU_DEVICE_ATTR_RO(gpu_busy_percent, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2055 AMDGPU_DEVICE_ATTR_RO(mem_busy_percent, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2056 AMDGPU_DEVICE_ATTR_RO(pcie_bw, ATTR_FLAG_BASIC),
2057 AMDGPU_DEVICE_ATTR_RW(pp_features, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2058 AMDGPU_DEVICE_ATTR_RO(unique_id, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2059 AMDGPU_DEVICE_ATTR_RW(thermal_throttling_logging, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2060 AMDGPU_DEVICE_ATTR_RW(apu_thermal_cap, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2061 AMDGPU_DEVICE_ATTR_RO(gpu_metrics, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2062 AMDGPU_DEVICE_ATTR_RO(smartshift_apu_power, ATTR_FLAG_BASIC,
2063 .attr_update = ss_power_attr_update),
2064 AMDGPU_DEVICE_ATTR_RO(smartshift_dgpu_power, ATTR_FLAG_BASIC,
2065 .attr_update = ss_power_attr_update),
2066 AMDGPU_DEVICE_ATTR_RW(smartshift_bias, ATTR_FLAG_BASIC,
2067 .attr_update = ss_bias_attr_update),
2070 static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
2071 uint32_t mask, enum amdgpu_device_attr_states *states)
2073 struct device_attribute *dev_attr = &attr->dev_attr;
2074 uint32_t mp1_ver = adev->ip_versions[MP1_HWIP][0];
2075 uint32_t gc_ver = adev->ip_versions[GC_HWIP][0];
2076 const char *attr_name = dev_attr->attr.name;
2078 if (!(attr->flags & mask)) {
2079 *states = ATTR_STATE_UNSUPPORTED;
2083 #define DEVICE_ATTR_IS(_name) (!strcmp(attr_name, #_name))
2085 if (DEVICE_ATTR_IS(pp_dpm_socclk)) {
2086 if (gc_ver < IP_VERSION(9, 0, 0))
2087 *states = ATTR_STATE_UNSUPPORTED;
2088 } else if (DEVICE_ATTR_IS(pp_dpm_dcefclk)) {
2089 if (gc_ver < IP_VERSION(9, 0, 0) ||
2090 gc_ver == IP_VERSION(9, 4, 1) ||
2091 gc_ver == IP_VERSION(9, 4, 2))
2092 *states = ATTR_STATE_UNSUPPORTED;
2093 } else if (DEVICE_ATTR_IS(pp_dpm_fclk)) {
2094 if (mp1_ver < IP_VERSION(10, 0, 0))
2095 *states = ATTR_STATE_UNSUPPORTED;
2096 } else if (DEVICE_ATTR_IS(pp_od_clk_voltage)) {
2097 *states = ATTR_STATE_UNSUPPORTED;
2098 if (amdgpu_dpm_is_overdrive_supported(adev))
2099 *states = ATTR_STATE_SUPPORTED;
2100 } else if (DEVICE_ATTR_IS(mem_busy_percent)) {
2101 if (adev->flags & AMD_IS_APU || gc_ver == IP_VERSION(9, 0, 1))
2102 *states = ATTR_STATE_UNSUPPORTED;
2103 } else if (DEVICE_ATTR_IS(pcie_bw)) {
2104 /* PCIe Perf counters won't work on APU nodes */
2105 if (adev->flags & AMD_IS_APU)
2106 *states = ATTR_STATE_UNSUPPORTED;
2107 } else if (DEVICE_ATTR_IS(unique_id)) {
2109 case IP_VERSION(9, 0, 1):
2110 case IP_VERSION(9, 4, 0):
2111 case IP_VERSION(9, 4, 1):
2112 case IP_VERSION(9, 4, 2):
2113 case IP_VERSION(10, 3, 0):
2114 case IP_VERSION(11, 0, 0):
2115 case IP_VERSION(11, 0, 1):
2116 case IP_VERSION(11, 0, 2):
2117 *states = ATTR_STATE_SUPPORTED;
2120 *states = ATTR_STATE_UNSUPPORTED;
2122 } else if (DEVICE_ATTR_IS(pp_features)) {
2123 if (adev->flags & AMD_IS_APU || gc_ver < IP_VERSION(9, 0, 0))
2124 *states = ATTR_STATE_UNSUPPORTED;
2125 } else if (DEVICE_ATTR_IS(gpu_metrics)) {
2126 if (gc_ver < IP_VERSION(9, 1, 0))
2127 *states = ATTR_STATE_UNSUPPORTED;
2128 } else if (DEVICE_ATTR_IS(pp_dpm_vclk)) {
2129 if (!(gc_ver == IP_VERSION(10, 3, 1) ||
2130 gc_ver == IP_VERSION(10, 3, 0) ||
2131 gc_ver == IP_VERSION(10, 1, 2) ||
2132 gc_ver == IP_VERSION(11, 0, 0) ||
2133 gc_ver == IP_VERSION(11, 0, 2) ||
2134 gc_ver == IP_VERSION(11, 0, 3)))
2135 *states = ATTR_STATE_UNSUPPORTED;
2136 } else if (DEVICE_ATTR_IS(pp_dpm_vclk1)) {
2137 if (!((gc_ver == IP_VERSION(10, 3, 1) ||
2138 gc_ver == IP_VERSION(10, 3, 0) ||
2139 gc_ver == IP_VERSION(11, 0, 2) ||
2140 gc_ver == IP_VERSION(11, 0, 3)) && adev->vcn.num_vcn_inst >= 2))
2141 *states = ATTR_STATE_UNSUPPORTED;
2142 } else if (DEVICE_ATTR_IS(pp_dpm_dclk)) {
2143 if (!(gc_ver == IP_VERSION(10, 3, 1) ||
2144 gc_ver == IP_VERSION(10, 3, 0) ||
2145 gc_ver == IP_VERSION(10, 1, 2) ||
2146 gc_ver == IP_VERSION(11, 0, 0) ||
2147 gc_ver == IP_VERSION(11, 0, 2) ||
2148 gc_ver == IP_VERSION(11, 0, 3)))
2149 *states = ATTR_STATE_UNSUPPORTED;
2150 } else if (DEVICE_ATTR_IS(pp_dpm_dclk1)) {
2151 if (!((gc_ver == IP_VERSION(10, 3, 1) ||
2152 gc_ver == IP_VERSION(10, 3, 0) ||
2153 gc_ver == IP_VERSION(11, 0, 2) ||
2154 gc_ver == IP_VERSION(11, 0, 3)) && adev->vcn.num_vcn_inst >= 2))
2155 *states = ATTR_STATE_UNSUPPORTED;
2156 } else if (DEVICE_ATTR_IS(pp_power_profile_mode)) {
2157 if (amdgpu_dpm_get_power_profile_mode(adev, NULL) == -EOPNOTSUPP)
2158 *states = ATTR_STATE_UNSUPPORTED;
2159 else if (gc_ver == IP_VERSION(10, 3, 0) && amdgpu_sriov_vf(adev))
2160 *states = ATTR_STATE_UNSUPPORTED;
2164 case IP_VERSION(9, 4, 1):
2165 case IP_VERSION(9, 4, 2):
2166 /* the Mi series card does not support standalone mclk/socclk/fclk level setting */
2167 if (DEVICE_ATTR_IS(pp_dpm_mclk) ||
2168 DEVICE_ATTR_IS(pp_dpm_socclk) ||
2169 DEVICE_ATTR_IS(pp_dpm_fclk)) {
2170 dev_attr->attr.mode &= ~S_IWUGO;
2171 dev_attr->store = NULL;
2174 case IP_VERSION(10, 3, 0):
2175 if (DEVICE_ATTR_IS(power_dpm_force_performance_level) &&
2176 amdgpu_sriov_vf(adev)) {
2177 dev_attr->attr.mode &= ~0222;
2178 dev_attr->store = NULL;
2185 if (DEVICE_ATTR_IS(pp_dpm_dcefclk)) {
2186 /* SMU MP1 does not support dcefclk level setting */
2187 if (gc_ver >= IP_VERSION(10, 0, 0)) {
2188 dev_attr->attr.mode &= ~S_IWUGO;
2189 dev_attr->store = NULL;
2193 /* setting should not be allowed from VF if not in one VF mode */
2194 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) {
2195 dev_attr->attr.mode &= ~S_IWUGO;
2196 dev_attr->store = NULL;
2199 #undef DEVICE_ATTR_IS
2205 static int amdgpu_device_attr_create(struct amdgpu_device *adev,
2206 struct amdgpu_device_attr *attr,
2207 uint32_t mask, struct list_head *attr_list)
2210 struct device_attribute *dev_attr = &attr->dev_attr;
2211 const char *name = dev_attr->attr.name;
2212 enum amdgpu_device_attr_states attr_states = ATTR_STATE_SUPPORTED;
2213 struct amdgpu_device_attr_entry *attr_entry;
2215 int (*attr_update)(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
2216 uint32_t mask, enum amdgpu_device_attr_states *states) = default_attr_update;
2220 attr_update = attr->attr_update ? attr->attr_update : default_attr_update;
2222 ret = attr_update(adev, attr, mask, &attr_states);
2224 dev_err(adev->dev, "failed to update device file %s, ret = %d\n",
2229 if (attr_states == ATTR_STATE_UNSUPPORTED)
2232 ret = device_create_file(adev->dev, dev_attr);
2234 dev_err(adev->dev, "failed to create device file %s, ret = %d\n",
2238 attr_entry = kmalloc(sizeof(*attr_entry), GFP_KERNEL);
2242 attr_entry->attr = attr;
2243 INIT_LIST_HEAD(&attr_entry->entry);
2245 list_add_tail(&attr_entry->entry, attr_list);
2250 static void amdgpu_device_attr_remove(struct amdgpu_device *adev, struct amdgpu_device_attr *attr)
2252 struct device_attribute *dev_attr = &attr->dev_attr;
2254 device_remove_file(adev->dev, dev_attr);
2257 static void amdgpu_device_attr_remove_groups(struct amdgpu_device *adev,
2258 struct list_head *attr_list);
2260 static int amdgpu_device_attr_create_groups(struct amdgpu_device *adev,
2261 struct amdgpu_device_attr *attrs,
2264 struct list_head *attr_list)
2269 for (i = 0; i < counts; i++) {
2270 ret = amdgpu_device_attr_create(adev, &attrs[i], mask, attr_list);
2278 amdgpu_device_attr_remove_groups(adev, attr_list);
2283 static void amdgpu_device_attr_remove_groups(struct amdgpu_device *adev,
2284 struct list_head *attr_list)
2286 struct amdgpu_device_attr_entry *entry, *entry_tmp;
2288 if (list_empty(attr_list))
2291 list_for_each_entry_safe(entry, entry_tmp, attr_list, entry) {
2292 amdgpu_device_attr_remove(adev, entry->attr);
2293 list_del(&entry->entry);
2298 static ssize_t amdgpu_hwmon_show_temp(struct device *dev,
2299 struct device_attribute *attr,
2302 struct amdgpu_device *adev = dev_get_drvdata(dev);
2303 int channel = to_sensor_dev_attr(attr)->index;
2304 int r, temp = 0, size = sizeof(temp);
2306 if (amdgpu_in_reset(adev))
2308 if (adev->in_suspend && !adev->in_runpm)
2311 if (channel >= PP_TEMP_MAX)
2314 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2316 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2321 case PP_TEMP_JUNCTION:
2322 /* get current junction temperature */
2323 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_HOTSPOT_TEMP,
2324 (void *)&temp, &size);
2327 /* get current edge temperature */
2328 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_EDGE_TEMP,
2329 (void *)&temp, &size);
2332 /* get current memory temperature */
2333 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MEM_TEMP,
2334 (void *)&temp, &size);
2341 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2342 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2347 return sysfs_emit(buf, "%d\n", temp);
2350 static ssize_t amdgpu_hwmon_show_temp_thresh(struct device *dev,
2351 struct device_attribute *attr,
2354 struct amdgpu_device *adev = dev_get_drvdata(dev);
2355 int hyst = to_sensor_dev_attr(attr)->index;
2359 temp = adev->pm.dpm.thermal.min_temp;
2361 temp = adev->pm.dpm.thermal.max_temp;
2363 return sysfs_emit(buf, "%d\n", temp);
2366 static ssize_t amdgpu_hwmon_show_hotspot_temp_thresh(struct device *dev,
2367 struct device_attribute *attr,
2370 struct amdgpu_device *adev = dev_get_drvdata(dev);
2371 int hyst = to_sensor_dev_attr(attr)->index;
2375 temp = adev->pm.dpm.thermal.min_hotspot_temp;
2377 temp = adev->pm.dpm.thermal.max_hotspot_crit_temp;
2379 return sysfs_emit(buf, "%d\n", temp);
2382 static ssize_t amdgpu_hwmon_show_mem_temp_thresh(struct device *dev,
2383 struct device_attribute *attr,
2386 struct amdgpu_device *adev = dev_get_drvdata(dev);
2387 int hyst = to_sensor_dev_attr(attr)->index;
2391 temp = adev->pm.dpm.thermal.min_mem_temp;
2393 temp = adev->pm.dpm.thermal.max_mem_crit_temp;
2395 return sysfs_emit(buf, "%d\n", temp);
2398 static ssize_t amdgpu_hwmon_show_temp_label(struct device *dev,
2399 struct device_attribute *attr,
2402 int channel = to_sensor_dev_attr(attr)->index;
2404 if (channel >= PP_TEMP_MAX)
2407 return sysfs_emit(buf, "%s\n", temp_label[channel].label);
2410 static ssize_t amdgpu_hwmon_show_temp_emergency(struct device *dev,
2411 struct device_attribute *attr,
2414 struct amdgpu_device *adev = dev_get_drvdata(dev);
2415 int channel = to_sensor_dev_attr(attr)->index;
2418 if (channel >= PP_TEMP_MAX)
2422 case PP_TEMP_JUNCTION:
2423 temp = adev->pm.dpm.thermal.max_hotspot_emergency_temp;
2426 temp = adev->pm.dpm.thermal.max_edge_emergency_temp;
2429 temp = adev->pm.dpm.thermal.max_mem_emergency_temp;
2433 return sysfs_emit(buf, "%d\n", temp);
2436 static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev,
2437 struct device_attribute *attr,
2440 struct amdgpu_device *adev = dev_get_drvdata(dev);
2444 if (amdgpu_in_reset(adev))
2446 if (adev->in_suspend && !adev->in_runpm)
2449 ret = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2451 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2455 ret = amdgpu_dpm_get_fan_control_mode(adev, &pwm_mode);
2457 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2458 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2463 return sysfs_emit(buf, "%u\n", pwm_mode);
2466 static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev,
2467 struct device_attribute *attr,
2471 struct amdgpu_device *adev = dev_get_drvdata(dev);
2475 if (amdgpu_in_reset(adev))
2477 if (adev->in_suspend && !adev->in_runpm)
2480 err = kstrtoint(buf, 10, &value);
2484 ret = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2486 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2490 ret = amdgpu_dpm_set_fan_control_mode(adev, value);
2492 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2493 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2501 static ssize_t amdgpu_hwmon_get_pwm1_min(struct device *dev,
2502 struct device_attribute *attr,
2505 return sysfs_emit(buf, "%i\n", 0);
2508 static ssize_t amdgpu_hwmon_get_pwm1_max(struct device *dev,
2509 struct device_attribute *attr,
2512 return sysfs_emit(buf, "%i\n", 255);
2515 static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev,
2516 struct device_attribute *attr,
2517 const char *buf, size_t count)
2519 struct amdgpu_device *adev = dev_get_drvdata(dev);
2524 if (amdgpu_in_reset(adev))
2526 if (adev->in_suspend && !adev->in_runpm)
2529 err = kstrtou32(buf, 10, &value);
2533 err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2535 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2539 err = amdgpu_dpm_get_fan_control_mode(adev, &pwm_mode);
2543 if (pwm_mode != AMD_FAN_CTRL_MANUAL) {
2544 pr_info("manual fan speed control should be enabled first\n");
2549 err = amdgpu_dpm_set_fan_speed_pwm(adev, value);
2552 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2553 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2561 static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev,
2562 struct device_attribute *attr,
2565 struct amdgpu_device *adev = dev_get_drvdata(dev);
2569 if (amdgpu_in_reset(adev))
2571 if (adev->in_suspend && !adev->in_runpm)
2574 err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2576 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2580 err = amdgpu_dpm_get_fan_speed_pwm(adev, &speed);
2582 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2583 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2588 return sysfs_emit(buf, "%i\n", speed);
2591 static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev,
2592 struct device_attribute *attr,
2595 struct amdgpu_device *adev = dev_get_drvdata(dev);
2599 if (amdgpu_in_reset(adev))
2601 if (adev->in_suspend && !adev->in_runpm)
2604 err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2606 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2610 err = amdgpu_dpm_get_fan_speed_rpm(adev, &speed);
2612 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2613 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2618 return sysfs_emit(buf, "%i\n", speed);
2621 static ssize_t amdgpu_hwmon_get_fan1_min(struct device *dev,
2622 struct device_attribute *attr,
2625 struct amdgpu_device *adev = dev_get_drvdata(dev);
2627 u32 size = sizeof(min_rpm);
2630 if (amdgpu_in_reset(adev))
2632 if (adev->in_suspend && !adev->in_runpm)
2635 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2637 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2641 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MIN_FAN_RPM,
2642 (void *)&min_rpm, &size);
2644 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2645 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2650 return sysfs_emit(buf, "%d\n", min_rpm);
2653 static ssize_t amdgpu_hwmon_get_fan1_max(struct device *dev,
2654 struct device_attribute *attr,
2657 struct amdgpu_device *adev = dev_get_drvdata(dev);
2659 u32 size = sizeof(max_rpm);
2662 if (amdgpu_in_reset(adev))
2664 if (adev->in_suspend && !adev->in_runpm)
2667 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2669 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2673 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MAX_FAN_RPM,
2674 (void *)&max_rpm, &size);
2676 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2677 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2682 return sysfs_emit(buf, "%d\n", max_rpm);
2685 static ssize_t amdgpu_hwmon_get_fan1_target(struct device *dev,
2686 struct device_attribute *attr,
2689 struct amdgpu_device *adev = dev_get_drvdata(dev);
2693 if (amdgpu_in_reset(adev))
2695 if (adev->in_suspend && !adev->in_runpm)
2698 err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2700 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2704 err = amdgpu_dpm_get_fan_speed_rpm(adev, &rpm);
2706 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2707 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2712 return sysfs_emit(buf, "%i\n", rpm);
2715 static ssize_t amdgpu_hwmon_set_fan1_target(struct device *dev,
2716 struct device_attribute *attr,
2717 const char *buf, size_t count)
2719 struct amdgpu_device *adev = dev_get_drvdata(dev);
2724 if (amdgpu_in_reset(adev))
2726 if (adev->in_suspend && !adev->in_runpm)
2729 err = kstrtou32(buf, 10, &value);
2733 err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2735 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2739 err = amdgpu_dpm_get_fan_control_mode(adev, &pwm_mode);
2743 if (pwm_mode != AMD_FAN_CTRL_MANUAL) {
2748 err = amdgpu_dpm_set_fan_speed_rpm(adev, value);
2751 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2752 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2760 static ssize_t amdgpu_hwmon_get_fan1_enable(struct device *dev,
2761 struct device_attribute *attr,
2764 struct amdgpu_device *adev = dev_get_drvdata(dev);
2768 if (amdgpu_in_reset(adev))
2770 if (adev->in_suspend && !adev->in_runpm)
2773 ret = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2775 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2779 ret = amdgpu_dpm_get_fan_control_mode(adev, &pwm_mode);
2781 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2782 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2787 return sysfs_emit(buf, "%i\n", pwm_mode == AMD_FAN_CTRL_AUTO ? 0 : 1);
2790 static ssize_t amdgpu_hwmon_set_fan1_enable(struct device *dev,
2791 struct device_attribute *attr,
2795 struct amdgpu_device *adev = dev_get_drvdata(dev);
2800 if (amdgpu_in_reset(adev))
2802 if (adev->in_suspend && !adev->in_runpm)
2805 err = kstrtoint(buf, 10, &value);
2810 pwm_mode = AMD_FAN_CTRL_AUTO;
2811 else if (value == 1)
2812 pwm_mode = AMD_FAN_CTRL_MANUAL;
2816 err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2818 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2822 err = amdgpu_dpm_set_fan_control_mode(adev, pwm_mode);
2824 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2825 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2833 static ssize_t amdgpu_hwmon_show_vddgfx(struct device *dev,
2834 struct device_attribute *attr,
2837 struct amdgpu_device *adev = dev_get_drvdata(dev);
2839 int r, size = sizeof(vddgfx);
2841 if (amdgpu_in_reset(adev))
2843 if (adev->in_suspend && !adev->in_runpm)
2846 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2848 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2852 /* get the voltage */
2853 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDGFX,
2854 (void *)&vddgfx, &size);
2856 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2857 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2862 return sysfs_emit(buf, "%d\n", vddgfx);
2865 static ssize_t amdgpu_hwmon_show_vddgfx_label(struct device *dev,
2866 struct device_attribute *attr,
2869 return sysfs_emit(buf, "vddgfx\n");
2872 static ssize_t amdgpu_hwmon_show_vddnb(struct device *dev,
2873 struct device_attribute *attr,
2876 struct amdgpu_device *adev = dev_get_drvdata(dev);
2878 int r, size = sizeof(vddnb);
2880 if (amdgpu_in_reset(adev))
2882 if (adev->in_suspend && !adev->in_runpm)
2885 /* only APUs have vddnb */
2886 if (!(adev->flags & AMD_IS_APU))
2889 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2891 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2895 /* get the voltage */
2896 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB,
2897 (void *)&vddnb, &size);
2899 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2900 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2905 return sysfs_emit(buf, "%d\n", vddnb);
2908 static ssize_t amdgpu_hwmon_show_vddnb_label(struct device *dev,
2909 struct device_attribute *attr,
2912 return sysfs_emit(buf, "vddnb\n");
2915 static ssize_t amdgpu_hwmon_show_power_avg(struct device *dev,
2916 struct device_attribute *attr,
2919 struct amdgpu_device *adev = dev_get_drvdata(dev);
2921 int r, size = sizeof(u32);
2924 if (amdgpu_in_reset(adev))
2926 if (adev->in_suspend && !adev->in_runpm)
2929 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2931 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2935 /* get the voltage */
2936 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_POWER,
2937 (void *)&query, &size);
2939 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2940 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2945 /* convert to microwatts */
2946 uw = (query >> 8) * 1000000 + (query & 0xff) * 1000;
2948 return sysfs_emit(buf, "%u\n", uw);
2951 static ssize_t amdgpu_hwmon_show_power_cap_min(struct device *dev,
2952 struct device_attribute *attr,
2955 return sysfs_emit(buf, "%i\n", 0);
2959 static ssize_t amdgpu_hwmon_show_power_cap_generic(struct device *dev,
2960 struct device_attribute *attr,
2962 enum pp_power_limit_level pp_limit_level)
2964 struct amdgpu_device *adev = dev_get_drvdata(dev);
2965 enum pp_power_type power_type = to_sensor_dev_attr(attr)->index;
2970 if (amdgpu_in_reset(adev))
2972 if (adev->in_suspend && !adev->in_runpm)
2975 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2977 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2981 r = amdgpu_dpm_get_power_limit(adev, &limit,
2982 pp_limit_level, power_type);
2985 size = sysfs_emit(buf, "%u\n", limit * 1000000);
2987 size = sysfs_emit(buf, "\n");
2989 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2990 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2996 static ssize_t amdgpu_hwmon_show_power_cap_max(struct device *dev,
2997 struct device_attribute *attr,
3000 return amdgpu_hwmon_show_power_cap_generic(dev, attr, buf, PP_PWR_LIMIT_MAX);
3004 static ssize_t amdgpu_hwmon_show_power_cap(struct device *dev,
3005 struct device_attribute *attr,
3008 return amdgpu_hwmon_show_power_cap_generic(dev, attr, buf, PP_PWR_LIMIT_CURRENT);
3012 static ssize_t amdgpu_hwmon_show_power_cap_default(struct device *dev,
3013 struct device_attribute *attr,
3016 return amdgpu_hwmon_show_power_cap_generic(dev, attr, buf, PP_PWR_LIMIT_DEFAULT);
3020 static ssize_t amdgpu_hwmon_show_power_label(struct device *dev,
3021 struct device_attribute *attr,
3024 struct amdgpu_device *adev = dev_get_drvdata(dev);
3025 uint32_t gc_ver = adev->ip_versions[GC_HWIP][0];
3027 if (gc_ver == IP_VERSION(10, 3, 1))
3028 return sysfs_emit(buf, "%s\n",
3029 to_sensor_dev_attr(attr)->index == PP_PWR_TYPE_FAST ?
3030 "fastPPT" : "slowPPT");
3032 return sysfs_emit(buf, "PPT\n");
3035 static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev,
3036 struct device_attribute *attr,
3040 struct amdgpu_device *adev = dev_get_drvdata(dev);
3041 int limit_type = to_sensor_dev_attr(attr)->index;
3045 if (amdgpu_in_reset(adev))
3047 if (adev->in_suspend && !adev->in_runpm)
3050 if (amdgpu_sriov_vf(adev))
3053 err = kstrtou32(buf, 10, &value);
3057 value = value / 1000000; /* convert to Watt */
3058 value |= limit_type << 24;
3060 err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
3062 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
3066 err = amdgpu_dpm_set_power_limit(adev, value);
3068 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
3069 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
3077 static ssize_t amdgpu_hwmon_show_sclk(struct device *dev,
3078 struct device_attribute *attr,
3081 struct amdgpu_device *adev = dev_get_drvdata(dev);
3083 int r, size = sizeof(sclk);
3085 if (amdgpu_in_reset(adev))
3087 if (adev->in_suspend && !adev->in_runpm)
3090 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
3092 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
3097 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_SCLK,
3098 (void *)&sclk, &size);
3100 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
3101 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
3106 return sysfs_emit(buf, "%u\n", sclk * 10 * 1000);
3109 static ssize_t amdgpu_hwmon_show_sclk_label(struct device *dev,
3110 struct device_attribute *attr,
3113 return sysfs_emit(buf, "sclk\n");
3116 static ssize_t amdgpu_hwmon_show_mclk(struct device *dev,
3117 struct device_attribute *attr,
3120 struct amdgpu_device *adev = dev_get_drvdata(dev);
3122 int r, size = sizeof(mclk);
3124 if (amdgpu_in_reset(adev))
3126 if (adev->in_suspend && !adev->in_runpm)
3129 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
3131 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
3136 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_MCLK,
3137 (void *)&mclk, &size);
3139 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
3140 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
3145 return sysfs_emit(buf, "%u\n", mclk * 10 * 1000);
3148 static ssize_t amdgpu_hwmon_show_mclk_label(struct device *dev,
3149 struct device_attribute *attr,
3152 return sysfs_emit(buf, "mclk\n");
3158 * The amdgpu driver exposes the following sensor interfaces:
3160 * - GPU temperature (via the on-die sensor)
3164 * - Northbridge voltage (APUs only)
3170 * - GPU gfx/compute engine clock
3172 * - GPU memory clock (dGPU only)
3174 * hwmon interfaces for GPU temperature:
3176 * - temp[1-3]_input: the on die GPU temperature in millidegrees Celsius
3177 * - temp2_input and temp3_input are supported on SOC15 dGPUs only
3179 * - temp[1-3]_label: temperature channel label
3180 * - temp2_label and temp3_label are supported on SOC15 dGPUs only
3182 * - temp[1-3]_crit: temperature critical max value in millidegrees Celsius
3183 * - temp2_crit and temp3_crit are supported on SOC15 dGPUs only
3185 * - temp[1-3]_crit_hyst: temperature hysteresis for critical limit in millidegrees Celsius
3186 * - temp2_crit_hyst and temp3_crit_hyst are supported on SOC15 dGPUs only
3188 * - temp[1-3]_emergency: temperature emergency max value(asic shutdown) in millidegrees Celsius
3189 * - these are supported on SOC15 dGPUs only
3191 * hwmon interfaces for GPU voltage:
3193 * - in0_input: the voltage on the GPU in millivolts
3195 * - in1_input: the voltage on the Northbridge in millivolts
3197 * hwmon interfaces for GPU power:
3199 * - power1_average: average power used by the SoC in microWatts. On APUs this includes the CPU.
3201 * - power1_cap_min: minimum cap supported in microWatts
3203 * - power1_cap_max: maximum cap supported in microWatts
3205 * - power1_cap: selected power cap in microWatts
3207 * hwmon interfaces for GPU fan:
3209 * - pwm1: pulse width modulation fan level (0-255)
3211 * - pwm1_enable: pulse width modulation fan control method (0: no fan speed control, 1: manual fan speed control using pwm interface, 2: automatic fan speed control)
3213 * - pwm1_min: pulse width modulation fan control minimum level (0)
3215 * - pwm1_max: pulse width modulation fan control maximum level (255)
3217 * - fan1_min: a minimum value Unit: revolution/min (RPM)
3219 * - fan1_max: a maximum value Unit: revolution/max (RPM)
3221 * - fan1_input: fan speed in RPM
3223 * - fan[1-\*]_target: Desired fan speed Unit: revolution/min (RPM)
3225 * - fan[1-\*]_enable: Enable or disable the sensors.1: Enable 0: Disable
3227 * NOTE: DO NOT set the fan speed via "pwm1" and "fan[1-\*]_target" interfaces at the same time.
3228 * That will get the former one overridden.
3230 * hwmon interfaces for GPU clocks:
3232 * - freq1_input: the gfx/compute clock in hertz
3234 * - freq2_input: the memory clock in hertz
3236 * You can use hwmon tools like sensors to view this information on your system.
3240 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, PP_TEMP_EDGE);
3241 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 0);
3242 static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 1);
3243 static SENSOR_DEVICE_ATTR(temp1_emergency, S_IRUGO, amdgpu_hwmon_show_temp_emergency, NULL, PP_TEMP_EDGE);
3244 static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, PP_TEMP_JUNCTION);
3245 static SENSOR_DEVICE_ATTR(temp2_crit, S_IRUGO, amdgpu_hwmon_show_hotspot_temp_thresh, NULL, 0);
3246 static SENSOR_DEVICE_ATTR(temp2_crit_hyst, S_IRUGO, amdgpu_hwmon_show_hotspot_temp_thresh, NULL, 1);
3247 static SENSOR_DEVICE_ATTR(temp2_emergency, S_IRUGO, amdgpu_hwmon_show_temp_emergency, NULL, PP_TEMP_JUNCTION);
3248 static SENSOR_DEVICE_ATTR(temp3_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, PP_TEMP_MEM);
3249 static SENSOR_DEVICE_ATTR(temp3_crit, S_IRUGO, amdgpu_hwmon_show_mem_temp_thresh, NULL, 0);
3250 static SENSOR_DEVICE_ATTR(temp3_crit_hyst, S_IRUGO, amdgpu_hwmon_show_mem_temp_thresh, NULL, 1);
3251 static SENSOR_DEVICE_ATTR(temp3_emergency, S_IRUGO, amdgpu_hwmon_show_temp_emergency, NULL, PP_TEMP_MEM);
3252 static SENSOR_DEVICE_ATTR(temp1_label, S_IRUGO, amdgpu_hwmon_show_temp_label, NULL, PP_TEMP_EDGE);
3253 static SENSOR_DEVICE_ATTR(temp2_label, S_IRUGO, amdgpu_hwmon_show_temp_label, NULL, PP_TEMP_JUNCTION);
3254 static SENSOR_DEVICE_ATTR(temp3_label, S_IRUGO, amdgpu_hwmon_show_temp_label, NULL, PP_TEMP_MEM);
3255 static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1, amdgpu_hwmon_set_pwm1, 0);
3256 static SENSOR_DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1_enable, amdgpu_hwmon_set_pwm1_enable, 0);
3257 static SENSOR_DEVICE_ATTR(pwm1_min, S_IRUGO, amdgpu_hwmon_get_pwm1_min, NULL, 0);
3258 static SENSOR_DEVICE_ATTR(pwm1_max, S_IRUGO, amdgpu_hwmon_get_pwm1_max, NULL, 0);
3259 static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, amdgpu_hwmon_get_fan1_input, NULL, 0);
3260 static SENSOR_DEVICE_ATTR(fan1_min, S_IRUGO, amdgpu_hwmon_get_fan1_min, NULL, 0);
3261 static SENSOR_DEVICE_ATTR(fan1_max, S_IRUGO, amdgpu_hwmon_get_fan1_max, NULL, 0);
3262 static SENSOR_DEVICE_ATTR(fan1_target, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_fan1_target, amdgpu_hwmon_set_fan1_target, 0);
3263 static SENSOR_DEVICE_ATTR(fan1_enable, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_fan1_enable, amdgpu_hwmon_set_fan1_enable, 0);
3264 static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, amdgpu_hwmon_show_vddgfx, NULL, 0);
3265 static SENSOR_DEVICE_ATTR(in0_label, S_IRUGO, amdgpu_hwmon_show_vddgfx_label, NULL, 0);
3266 static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, amdgpu_hwmon_show_vddnb, NULL, 0);
3267 static SENSOR_DEVICE_ATTR(in1_label, S_IRUGO, amdgpu_hwmon_show_vddnb_label, NULL, 0);
3268 static SENSOR_DEVICE_ATTR(power1_average, S_IRUGO, amdgpu_hwmon_show_power_avg, NULL, 0);
3269 static SENSOR_DEVICE_ATTR(power1_cap_max, S_IRUGO, amdgpu_hwmon_show_power_cap_max, NULL, 0);
3270 static SENSOR_DEVICE_ATTR(power1_cap_min, S_IRUGO, amdgpu_hwmon_show_power_cap_min, NULL, 0);
3271 static SENSOR_DEVICE_ATTR(power1_cap, S_IRUGO | S_IWUSR, amdgpu_hwmon_show_power_cap, amdgpu_hwmon_set_power_cap, 0);
3272 static SENSOR_DEVICE_ATTR(power1_cap_default, S_IRUGO, amdgpu_hwmon_show_power_cap_default, NULL, 0);
3273 static SENSOR_DEVICE_ATTR(power1_label, S_IRUGO, amdgpu_hwmon_show_power_label, NULL, 0);
3274 static SENSOR_DEVICE_ATTR(power2_average, S_IRUGO, amdgpu_hwmon_show_power_avg, NULL, 1);
3275 static SENSOR_DEVICE_ATTR(power2_cap_max, S_IRUGO, amdgpu_hwmon_show_power_cap_max, NULL, 1);
3276 static SENSOR_DEVICE_ATTR(power2_cap_min, S_IRUGO, amdgpu_hwmon_show_power_cap_min, NULL, 1);
3277 static SENSOR_DEVICE_ATTR(power2_cap, S_IRUGO | S_IWUSR, amdgpu_hwmon_show_power_cap, amdgpu_hwmon_set_power_cap, 1);
3278 static SENSOR_DEVICE_ATTR(power2_cap_default, S_IRUGO, amdgpu_hwmon_show_power_cap_default, NULL, 1);
3279 static SENSOR_DEVICE_ATTR(power2_label, S_IRUGO, amdgpu_hwmon_show_power_label, NULL, 1);
3280 static SENSOR_DEVICE_ATTR(freq1_input, S_IRUGO, amdgpu_hwmon_show_sclk, NULL, 0);
3281 static SENSOR_DEVICE_ATTR(freq1_label, S_IRUGO, amdgpu_hwmon_show_sclk_label, NULL, 0);
3282 static SENSOR_DEVICE_ATTR(freq2_input, S_IRUGO, amdgpu_hwmon_show_mclk, NULL, 0);
3283 static SENSOR_DEVICE_ATTR(freq2_label, S_IRUGO, amdgpu_hwmon_show_mclk_label, NULL, 0);
3285 static struct attribute *hwmon_attributes[] = {
3286 &sensor_dev_attr_temp1_input.dev_attr.attr,
3287 &sensor_dev_attr_temp1_crit.dev_attr.attr,
3288 &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr,
3289 &sensor_dev_attr_temp2_input.dev_attr.attr,
3290 &sensor_dev_attr_temp2_crit.dev_attr.attr,
3291 &sensor_dev_attr_temp2_crit_hyst.dev_attr.attr,
3292 &sensor_dev_attr_temp3_input.dev_attr.attr,
3293 &sensor_dev_attr_temp3_crit.dev_attr.attr,
3294 &sensor_dev_attr_temp3_crit_hyst.dev_attr.attr,
3295 &sensor_dev_attr_temp1_emergency.dev_attr.attr,
3296 &sensor_dev_attr_temp2_emergency.dev_attr.attr,
3297 &sensor_dev_attr_temp3_emergency.dev_attr.attr,
3298 &sensor_dev_attr_temp1_label.dev_attr.attr,
3299 &sensor_dev_attr_temp2_label.dev_attr.attr,
3300 &sensor_dev_attr_temp3_label.dev_attr.attr,
3301 &sensor_dev_attr_pwm1.dev_attr.attr,
3302 &sensor_dev_attr_pwm1_enable.dev_attr.attr,
3303 &sensor_dev_attr_pwm1_min.dev_attr.attr,
3304 &sensor_dev_attr_pwm1_max.dev_attr.attr,
3305 &sensor_dev_attr_fan1_input.dev_attr.attr,
3306 &sensor_dev_attr_fan1_min.dev_attr.attr,
3307 &sensor_dev_attr_fan1_max.dev_attr.attr,
3308 &sensor_dev_attr_fan1_target.dev_attr.attr,
3309 &sensor_dev_attr_fan1_enable.dev_attr.attr,
3310 &sensor_dev_attr_in0_input.dev_attr.attr,
3311 &sensor_dev_attr_in0_label.dev_attr.attr,
3312 &sensor_dev_attr_in1_input.dev_attr.attr,
3313 &sensor_dev_attr_in1_label.dev_attr.attr,
3314 &sensor_dev_attr_power1_average.dev_attr.attr,
3315 &sensor_dev_attr_power1_cap_max.dev_attr.attr,
3316 &sensor_dev_attr_power1_cap_min.dev_attr.attr,
3317 &sensor_dev_attr_power1_cap.dev_attr.attr,
3318 &sensor_dev_attr_power1_cap_default.dev_attr.attr,
3319 &sensor_dev_attr_power1_label.dev_attr.attr,
3320 &sensor_dev_attr_power2_average.dev_attr.attr,
3321 &sensor_dev_attr_power2_cap_max.dev_attr.attr,
3322 &sensor_dev_attr_power2_cap_min.dev_attr.attr,
3323 &sensor_dev_attr_power2_cap.dev_attr.attr,
3324 &sensor_dev_attr_power2_cap_default.dev_attr.attr,
3325 &sensor_dev_attr_power2_label.dev_attr.attr,
3326 &sensor_dev_attr_freq1_input.dev_attr.attr,
3327 &sensor_dev_attr_freq1_label.dev_attr.attr,
3328 &sensor_dev_attr_freq2_input.dev_attr.attr,
3329 &sensor_dev_attr_freq2_label.dev_attr.attr,
3333 static umode_t hwmon_attributes_visible(struct kobject *kobj,
3334 struct attribute *attr, int index)
3336 struct device *dev = kobj_to_dev(kobj);
3337 struct amdgpu_device *adev = dev_get_drvdata(dev);
3338 umode_t effective_mode = attr->mode;
3339 uint32_t gc_ver = adev->ip_versions[GC_HWIP][0];
3341 /* under multi-vf mode, the hwmon attributes are all not supported */
3342 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
3345 /* under pp one vf mode manage of hwmon attributes is not supported */
3346 if (amdgpu_sriov_is_pp_one_vf(adev))
3347 effective_mode &= ~S_IWUSR;
3349 /* Skip fan attributes if fan is not present */
3350 if (adev->pm.no_fan && (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
3351 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
3352 attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
3353 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
3354 attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
3355 attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
3356 attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
3357 attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
3358 attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
3361 /* Skip fan attributes on APU */
3362 if ((adev->flags & AMD_IS_APU) &&
3363 (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
3364 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
3365 attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
3366 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
3367 attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
3368 attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
3369 attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
3370 attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
3371 attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
3374 /* Skip crit temp on APU */
3375 if ((((adev->flags & AMD_IS_APU) && (adev->family >= AMDGPU_FAMILY_CZ)) ||
3376 (gc_ver == IP_VERSION(9, 4, 3))) &&
3377 (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
3378 attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr))
3381 /* Skip limit attributes if DPM is not enabled */
3382 if (!adev->pm.dpm_enabled &&
3383 (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
3384 attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr ||
3385 attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
3386 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
3387 attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
3388 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
3389 attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
3390 attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
3391 attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
3392 attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
3393 attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
3396 /* mask fan attributes if we have no bindings for this asic to expose */
3397 if (((amdgpu_dpm_get_fan_speed_pwm(adev, NULL) == -EOPNOTSUPP) &&
3398 attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't query fan */
3399 ((amdgpu_dpm_get_fan_control_mode(adev, NULL) == -EOPNOTSUPP) &&
3400 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't query state */
3401 effective_mode &= ~S_IRUGO;
3403 if (((amdgpu_dpm_set_fan_speed_pwm(adev, U32_MAX) == -EOPNOTSUPP) &&
3404 attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't manage fan */
3405 ((amdgpu_dpm_set_fan_control_mode(adev, U32_MAX) == -EOPNOTSUPP) &&
3406 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't manage state */
3407 effective_mode &= ~S_IWUSR;
3409 /* not implemented yet for APUs other than GC 10.3.1 (vangogh) and 9.4.3 */
3410 if (((adev->family == AMDGPU_FAMILY_SI) ||
3411 ((adev->flags & AMD_IS_APU) && (gc_ver != IP_VERSION(10, 3, 1)) &&
3412 (gc_ver != IP_VERSION(9, 4, 3)))) &&
3413 (attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr ||
3414 attr == &sensor_dev_attr_power1_cap_min.dev_attr.attr ||
3415 attr == &sensor_dev_attr_power1_cap.dev_attr.attr ||
3416 attr == &sensor_dev_attr_power1_cap_default.dev_attr.attr))
3419 /* not implemented yet for APUs having < GC 9.3.0 (Renoir) */
3420 if (((adev->family == AMDGPU_FAMILY_SI) ||
3421 ((adev->flags & AMD_IS_APU) && (gc_ver < IP_VERSION(9, 3, 0)))) &&
3422 (attr == &sensor_dev_attr_power1_average.dev_attr.attr))
3425 /* hide max/min values if we can't both query and manage the fan */
3426 if (((amdgpu_dpm_set_fan_speed_pwm(adev, U32_MAX) == -EOPNOTSUPP) &&
3427 (amdgpu_dpm_get_fan_speed_pwm(adev, NULL) == -EOPNOTSUPP) &&
3428 (amdgpu_dpm_set_fan_speed_rpm(adev, U32_MAX) == -EOPNOTSUPP) &&
3429 (amdgpu_dpm_get_fan_speed_rpm(adev, NULL) == -EOPNOTSUPP)) &&
3430 (attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
3431 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
3434 if ((amdgpu_dpm_set_fan_speed_rpm(adev, U32_MAX) == -EOPNOTSUPP) &&
3435 (amdgpu_dpm_get_fan_speed_rpm(adev, NULL) == -EOPNOTSUPP) &&
3436 (attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
3437 attr == &sensor_dev_attr_fan1_min.dev_attr.attr))
3440 if ((adev->family == AMDGPU_FAMILY_SI || /* not implemented yet */
3441 adev->family == AMDGPU_FAMILY_KV || /* not implemented yet */
3442 (gc_ver == IP_VERSION(9, 4, 3))) &&
3443 (attr == &sensor_dev_attr_in0_input.dev_attr.attr ||
3444 attr == &sensor_dev_attr_in0_label.dev_attr.attr))
3447 /* only APUs other than gc 9,4,3 have vddnb */
3448 if ((!(adev->flags & AMD_IS_APU) || (gc_ver == IP_VERSION(9, 4, 3))) &&
3449 (attr == &sensor_dev_attr_in1_input.dev_attr.attr ||
3450 attr == &sensor_dev_attr_in1_label.dev_attr.attr))
3453 /* no mclk on APUs other than gc 9,4,3*/
3454 if (((adev->flags & AMD_IS_APU) && (gc_ver != IP_VERSION(9, 4, 3))) &&
3455 (attr == &sensor_dev_attr_freq2_input.dev_attr.attr ||
3456 attr == &sensor_dev_attr_freq2_label.dev_attr.attr))
3459 if (((adev->flags & AMD_IS_APU) || gc_ver < IP_VERSION(9, 0, 0)) &&
3460 (gc_ver != IP_VERSION(9, 4, 3)) &&
3461 (attr == &sensor_dev_attr_temp2_input.dev_attr.attr ||
3462 attr == &sensor_dev_attr_temp2_label.dev_attr.attr ||
3463 attr == &sensor_dev_attr_temp3_input.dev_attr.attr ||
3464 attr == &sensor_dev_attr_temp3_label.dev_attr.attr))
3467 /* hotspot temperature for gc 9,4,3*/
3468 if ((gc_ver == IP_VERSION(9, 4, 3)) &&
3469 (attr == &sensor_dev_attr_temp1_input.dev_attr.attr ||
3470 attr == &sensor_dev_attr_temp1_label.dev_attr.attr))
3473 /* only SOC15 dGPUs support hotspot and mem temperatures */
3474 if (((adev->flags & AMD_IS_APU) || gc_ver < IP_VERSION(9, 0, 0) ||
3475 (gc_ver == IP_VERSION(9, 4, 3))) &&
3476 (attr == &sensor_dev_attr_temp2_crit.dev_attr.attr ||
3477 attr == &sensor_dev_attr_temp2_crit_hyst.dev_attr.attr ||
3478 attr == &sensor_dev_attr_temp3_crit.dev_attr.attr ||
3479 attr == &sensor_dev_attr_temp3_crit_hyst.dev_attr.attr ||
3480 attr == &sensor_dev_attr_temp1_emergency.dev_attr.attr ||
3481 attr == &sensor_dev_attr_temp2_emergency.dev_attr.attr ||
3482 attr == &sensor_dev_attr_temp3_emergency.dev_attr.attr))
3485 /* only Vangogh has fast PPT limit and power labels */
3486 if (!(gc_ver == IP_VERSION(10, 3, 1)) &&
3487 (attr == &sensor_dev_attr_power2_average.dev_attr.attr ||
3488 attr == &sensor_dev_attr_power2_cap_max.dev_attr.attr ||
3489 attr == &sensor_dev_attr_power2_cap_min.dev_attr.attr ||
3490 attr == &sensor_dev_attr_power2_cap.dev_attr.attr ||
3491 attr == &sensor_dev_attr_power2_cap_default.dev_attr.attr ||
3492 attr == &sensor_dev_attr_power2_label.dev_attr.attr))
3495 return effective_mode;
3498 static const struct attribute_group hwmon_attrgroup = {
3499 .attrs = hwmon_attributes,
3500 .is_visible = hwmon_attributes_visible,
3503 static const struct attribute_group *hwmon_groups[] = {
3508 int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
3513 if (adev->pm.sysfs_initialized)
3516 INIT_LIST_HEAD(&adev->pm.pm_attr_list);
3518 if (adev->pm.dpm_enabled == 0)
3521 adev->pm.int_hwmon_dev = hwmon_device_register_with_groups(adev->dev,
3524 if (IS_ERR(adev->pm.int_hwmon_dev)) {
3525 ret = PTR_ERR(adev->pm.int_hwmon_dev);
3527 "Unable to register hwmon device: %d\n", ret);
3531 switch (amdgpu_virt_get_sriov_vf_mode(adev)) {
3532 case SRIOV_VF_MODE_ONE_VF:
3533 mask = ATTR_FLAG_ONEVF;
3535 case SRIOV_VF_MODE_MULTI_VF:
3538 case SRIOV_VF_MODE_BARE_METAL:
3540 mask = ATTR_FLAG_MASK_ALL;
3544 ret = amdgpu_device_attr_create_groups(adev,
3545 amdgpu_device_attrs,
3546 ARRAY_SIZE(amdgpu_device_attrs),
3548 &adev->pm.pm_attr_list);
3552 adev->pm.sysfs_initialized = true;
3557 void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev)
3559 if (adev->pm.int_hwmon_dev)
3560 hwmon_device_unregister(adev->pm.int_hwmon_dev);
3562 amdgpu_device_attr_remove_groups(adev, &adev->pm.pm_attr_list);
3568 #if defined(CONFIG_DEBUG_FS)
3570 static void amdgpu_debugfs_prints_cpu_info(struct seq_file *m,
3571 struct amdgpu_device *adev) {
3575 uint32_t num_cpu_cores = amdgpu_dpm_get_num_cpu_cores(adev);
3577 if (amdgpu_dpm_is_cclk_dpm_supported(adev)) {
3578 p_val = kcalloc(num_cpu_cores, sizeof(uint16_t),
3581 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_CPU_CLK,
3582 (void *)p_val, &size)) {
3583 for (i = 0; i < num_cpu_cores; i++)
3584 seq_printf(m, "\t%u MHz (CPU%d)\n",
3592 static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *adev)
3594 uint32_t mp1_ver = adev->ip_versions[MP1_HWIP][0];
3595 uint32_t gc_ver = adev->ip_versions[GC_HWIP][0];
3597 uint64_t value64 = 0;
3602 size = sizeof(value);
3603 seq_printf(m, "GFX Clocks and Power:\n");
3605 amdgpu_debugfs_prints_cpu_info(m, adev);
3607 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_MCLK, (void *)&value, &size))
3608 seq_printf(m, "\t%u MHz (MCLK)\n", value/100);
3609 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_SCLK, (void *)&value, &size))
3610 seq_printf(m, "\t%u MHz (SCLK)\n", value/100);
3611 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK, (void *)&value, &size))
3612 seq_printf(m, "\t%u MHz (PSTATE_SCLK)\n", value/100);
3613 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK, (void *)&value, &size))
3614 seq_printf(m, "\t%u MHz (PSTATE_MCLK)\n", value/100);
3615 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDGFX, (void *)&value, &size))
3616 seq_printf(m, "\t%u mV (VDDGFX)\n", value);
3617 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB, (void *)&value, &size))
3618 seq_printf(m, "\t%u mV (VDDNB)\n", value);
3619 size = sizeof(uint32_t);
3620 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_POWER, (void *)&query, &size))
3621 seq_printf(m, "\t%u.%u W (average GPU)\n", query >> 8, query & 0xff);
3622 size = sizeof(value);
3623 seq_printf(m, "\n");
3626 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP, (void *)&value, &size))
3627 seq_printf(m, "GPU Temperature: %u C\n", value/1000);
3630 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_LOAD, (void *)&value, &size))
3631 seq_printf(m, "GPU Load: %u %%\n", value);
3633 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MEM_LOAD, (void *)&value, &size))
3634 seq_printf(m, "MEM Load: %u %%\n", value);
3636 seq_printf(m, "\n");
3638 /* SMC feature mask */
3639 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK, (void *)&value64, &size))
3640 seq_printf(m, "SMC Feature Mask: 0x%016llx\n", value64);
3642 /* ASICs greater than CHIP_VEGA20 supports these sensors */
3643 if (gc_ver != IP_VERSION(9, 4, 0) && mp1_ver > IP_VERSION(9, 0, 0)) {
3645 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCN_POWER_STATE, (void *)&value, &size)) {
3647 seq_printf(m, "VCN: Disabled\n");
3649 seq_printf(m, "VCN: Enabled\n");
3650 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size))
3651 seq_printf(m, "\t%u MHz (DCLK)\n", value/100);
3652 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size))
3653 seq_printf(m, "\t%u MHz (VCLK)\n", value/100);
3656 seq_printf(m, "\n");
3659 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_POWER, (void *)&value, &size)) {
3661 seq_printf(m, "UVD: Disabled\n");
3663 seq_printf(m, "UVD: Enabled\n");
3664 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size))
3665 seq_printf(m, "\t%u MHz (DCLK)\n", value/100);
3666 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size))
3667 seq_printf(m, "\t%u MHz (VCLK)\n", value/100);
3670 seq_printf(m, "\n");
3673 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_POWER, (void *)&value, &size)) {
3675 seq_printf(m, "VCE: Disabled\n");
3677 seq_printf(m, "VCE: Enabled\n");
3678 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_ECCLK, (void *)&value, &size))
3679 seq_printf(m, "\t%u MHz (ECCLK)\n", value/100);
3687 static void amdgpu_parse_cg_state(struct seq_file *m, u64 flags)
3691 for (i = 0; clocks[i].flag; i++)
3692 seq_printf(m, "\t%s: %s\n", clocks[i].name,
3693 (flags & clocks[i].flag) ? "On" : "Off");
3696 static int amdgpu_debugfs_pm_info_show(struct seq_file *m, void *unused)
3698 struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
3699 struct drm_device *dev = adev_to_drm(adev);
3703 if (amdgpu_in_reset(adev))
3705 if (adev->in_suspend && !adev->in_runpm)
3708 r = pm_runtime_get_sync(dev->dev);
3710 pm_runtime_put_autosuspend(dev->dev);
3714 if (amdgpu_dpm_debugfs_print_current_performance_level(adev, m)) {
3715 r = amdgpu_debugfs_pm_info_pp(m, adev);
3720 amdgpu_device_ip_get_clockgating_state(adev, &flags);
3722 seq_printf(m, "Clock Gating Flags Mask: 0x%llx\n", flags);
3723 amdgpu_parse_cg_state(m, flags);
3724 seq_printf(m, "\n");
3727 pm_runtime_mark_last_busy(dev->dev);
3728 pm_runtime_put_autosuspend(dev->dev);
3733 DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_pm_info);
3736 * amdgpu_pm_priv_buffer_read - Read memory region allocated to FW
3738 * Reads debug memory region allocated to PMFW
3740 static ssize_t amdgpu_pm_prv_buffer_read(struct file *f, char __user *buf,
3741 size_t size, loff_t *pos)
3743 struct amdgpu_device *adev = file_inode(f)->i_private;
3744 size_t smu_prv_buf_size;
3748 if (amdgpu_in_reset(adev))
3750 if (adev->in_suspend && !adev->in_runpm)
3753 ret = amdgpu_dpm_get_smu_prv_buf_details(adev, &smu_prv_buf, &smu_prv_buf_size);
3757 if (!smu_prv_buf || !smu_prv_buf_size)
3760 return simple_read_from_buffer(buf, size, pos, smu_prv_buf,
3764 static const struct file_operations amdgpu_debugfs_pm_prv_buffer_fops = {
3765 .owner = THIS_MODULE,
3766 .open = simple_open,
3767 .read = amdgpu_pm_prv_buffer_read,
3768 .llseek = default_llseek,
3773 void amdgpu_debugfs_pm_init(struct amdgpu_device *adev)
3775 #if defined(CONFIG_DEBUG_FS)
3776 struct drm_minor *minor = adev_to_drm(adev)->primary;
3777 struct dentry *root = minor->debugfs_root;
3779 if (!adev->pm.dpm_enabled)
3782 debugfs_create_file("amdgpu_pm_info", 0444, root, adev,
3783 &amdgpu_debugfs_pm_info_fops);
3785 if (adev->pm.smu_prv_buffer_size > 0)
3786 debugfs_create_file_size("amdgpu_pm_prv_buffer", 0444, root,
3788 &amdgpu_debugfs_pm_prv_buffer_fops,
3789 adev->pm.smu_prv_buffer_size);
3791 amdgpu_dpm_stb_debug_fs_init(adev);