2 * Permission is hereby granted, free of charge, to any person obtaining a
3 * copy of this software and associated documentation files (the "Software"),
4 * to deal in the Software without restriction, including without limitation
5 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
6 * and/or sell copies of the Software, and to permit persons to whom the
7 * Software is furnished to do so, subject to the following conditions:
9 * The above copyright notice and this permission notice shall be included in
10 * all copies or substantial portions of the Software.
12 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
13 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
15 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
16 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
17 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
18 * OTHER DEALINGS IN THE SOFTWARE.
20 * Authors: Rafał Miłecki <zajec5@gmail.com>
21 * Alex Deucher <alexdeucher@gmail.com>
25 #include "amdgpu_drv.h"
26 #include "amdgpu_pm.h"
27 #include "amdgpu_dpm.h"
29 #include <linux/power_supply.h>
30 #include <linux/hwmon.h>
31 #include <linux/hwmon-sysfs.h>
33 #include "amd_powerplay.h"
35 static int amdgpu_debugfs_pm_init(struct amdgpu_device
*adev
);
37 void amdgpu_pm_acpi_event_handler(struct amdgpu_device
*adev
)
43 if (adev
->pm
.dpm_enabled
) {
44 mutex_lock(&adev
->pm
.mutex
);
45 if (power_supply_is_system_supplied() > 0)
46 adev
->pm
.dpm
.ac_power
= true;
48 adev
->pm
.dpm
.ac_power
= false;
49 if (adev
->pm
.funcs
->enable_bapm
)
50 amdgpu_dpm_enable_bapm(adev
, adev
->pm
.dpm
.ac_power
);
51 mutex_unlock(&adev
->pm
.mutex
);
55 static ssize_t
amdgpu_get_dpm_state(struct device
*dev
,
56 struct device_attribute
*attr
,
59 struct drm_device
*ddev
= dev_get_drvdata(dev
);
60 struct amdgpu_device
*adev
= ddev
->dev_private
;
61 enum amd_pm_state_type pm
;
63 if (adev
->pp_enabled
) {
64 pm
= amdgpu_dpm_get_current_power_state(adev
);
66 pm
= adev
->pm
.dpm
.user_state
;
68 return snprintf(buf
, PAGE_SIZE
, "%s\n",
69 (pm
== POWER_STATE_TYPE_BATTERY
) ? "battery" :
70 (pm
== POWER_STATE_TYPE_BALANCED
) ? "balanced" : "performance");
73 static ssize_t
amdgpu_set_dpm_state(struct device
*dev
,
74 struct device_attribute
*attr
,
78 struct drm_device
*ddev
= dev_get_drvdata(dev
);
79 struct amdgpu_device
*adev
= ddev
->dev_private
;
80 enum amd_pm_state_type state
;
82 if (strncmp("battery", buf
, strlen("battery")) == 0)
83 state
= POWER_STATE_TYPE_BATTERY
;
84 else if (strncmp("balanced", buf
, strlen("balanced")) == 0)
85 state
= POWER_STATE_TYPE_BALANCED
;
86 else if (strncmp("performance", buf
, strlen("performance")) == 0)
87 state
= POWER_STATE_TYPE_PERFORMANCE
;
93 if (adev
->pp_enabled
) {
94 amdgpu_dpm_dispatch_task(adev
, AMD_PP_EVENT_ENABLE_USER_STATE
, &state
, NULL
);
96 mutex_lock(&adev
->pm
.mutex
);
97 adev
->pm
.dpm
.user_state
= state
;
98 mutex_unlock(&adev
->pm
.mutex
);
100 /* Can't set dpm state when the card is off */
101 if (!(adev
->flags
& AMD_IS_PX
) ||
102 (ddev
->switch_power_state
== DRM_SWITCH_POWER_ON
))
103 amdgpu_pm_compute_clocks(adev
);
109 static ssize_t
amdgpu_get_dpm_forced_performance_level(struct device
*dev
,
110 struct device_attribute
*attr
,
113 struct drm_device
*ddev
= dev_get_drvdata(dev
);
114 struct amdgpu_device
*adev
= ddev
->dev_private
;
116 if ((adev
->flags
& AMD_IS_PX
) &&
117 (ddev
->switch_power_state
!= DRM_SWITCH_POWER_ON
))
118 return snprintf(buf
, PAGE_SIZE
, "off\n");
120 if (adev
->pp_enabled
) {
121 enum amd_dpm_forced_level level
;
123 level
= amdgpu_dpm_get_performance_level(adev
);
124 return snprintf(buf
, PAGE_SIZE
, "%s\n",
125 (level
== AMD_DPM_FORCED_LEVEL_AUTO
) ? "auto" :
126 (level
== AMD_DPM_FORCED_LEVEL_LOW
) ? "low" :
127 (level
== AMD_DPM_FORCED_LEVEL_HIGH
) ? "high" :
128 (level
== AMD_DPM_FORCED_LEVEL_MANUAL
) ? "manual" : "unknown");
130 enum amdgpu_dpm_forced_level level
;
132 level
= adev
->pm
.dpm
.forced_level
;
133 return snprintf(buf
, PAGE_SIZE
, "%s\n",
134 (level
== AMDGPU_DPM_FORCED_LEVEL_AUTO
) ? "auto" :
135 (level
== AMDGPU_DPM_FORCED_LEVEL_LOW
) ? "low" : "high");
139 static ssize_t
amdgpu_set_dpm_forced_performance_level(struct device
*dev
,
140 struct device_attribute
*attr
,
144 struct drm_device
*ddev
= dev_get_drvdata(dev
);
145 struct amdgpu_device
*adev
= ddev
->dev_private
;
146 enum amdgpu_dpm_forced_level level
;
149 /* Can't force performance level when the card is off */
150 if ((adev
->flags
& AMD_IS_PX
) &&
151 (ddev
->switch_power_state
!= DRM_SWITCH_POWER_ON
))
154 if (strncmp("low", buf
, strlen("low")) == 0) {
155 level
= AMDGPU_DPM_FORCED_LEVEL_LOW
;
156 } else if (strncmp("high", buf
, strlen("high")) == 0) {
157 level
= AMDGPU_DPM_FORCED_LEVEL_HIGH
;
158 } else if (strncmp("auto", buf
, strlen("auto")) == 0) {
159 level
= AMDGPU_DPM_FORCED_LEVEL_AUTO
;
160 } else if (strncmp("manual", buf
, strlen("manual")) == 0) {
161 level
= AMDGPU_DPM_FORCED_LEVEL_MANUAL
;
167 if (adev
->pp_enabled
)
168 amdgpu_dpm_force_performance_level(adev
, level
);
170 mutex_lock(&adev
->pm
.mutex
);
171 if (adev
->pm
.dpm
.thermal_active
) {
173 mutex_unlock(&adev
->pm
.mutex
);
176 ret
= amdgpu_dpm_force_performance_level(adev
, level
);
180 adev
->pm
.dpm
.forced_level
= level
;
181 mutex_unlock(&adev
->pm
.mutex
);
187 static ssize_t
amdgpu_get_pp_num_states(struct device
*dev
,
188 struct device_attribute
*attr
,
191 struct drm_device
*ddev
= dev_get_drvdata(dev
);
192 struct amdgpu_device
*adev
= ddev
->dev_private
;
193 struct pp_states_info data
;
196 if (adev
->pp_enabled
)
197 amdgpu_dpm_get_pp_num_states(adev
, &data
);
199 buf_len
= snprintf(buf
, PAGE_SIZE
, "states: %d\n", data
.nums
);
200 for (i
= 0; i
< data
.nums
; i
++)
201 buf_len
+= snprintf(buf
+ buf_len
, PAGE_SIZE
, "%d %s\n", i
,
202 (data
.states
[i
] == POWER_STATE_TYPE_INTERNAL_BOOT
) ? "boot" :
203 (data
.states
[i
] == POWER_STATE_TYPE_BATTERY
) ? "battery" :
204 (data
.states
[i
] == POWER_STATE_TYPE_BALANCED
) ? "balanced" :
205 (data
.states
[i
] == POWER_STATE_TYPE_PERFORMANCE
) ? "performance" : "default");
210 static ssize_t
amdgpu_get_pp_cur_state(struct device
*dev
,
211 struct device_attribute
*attr
,
214 struct drm_device
*ddev
= dev_get_drvdata(dev
);
215 struct amdgpu_device
*adev
= ddev
->dev_private
;
216 struct pp_states_info data
;
217 enum amd_pm_state_type pm
= 0;
220 if (adev
->pp_enabled
) {
222 pm
= amdgpu_dpm_get_current_power_state(adev
);
223 amdgpu_dpm_get_pp_num_states(adev
, &data
);
225 for (i
= 0; i
< data
.nums
; i
++) {
226 if (pm
== data
.states
[i
])
234 return snprintf(buf
, PAGE_SIZE
, "%d\n", i
);
237 static ssize_t
amdgpu_get_pp_force_state(struct device
*dev
,
238 struct device_attribute
*attr
,
241 struct drm_device
*ddev
= dev_get_drvdata(dev
);
242 struct amdgpu_device
*adev
= ddev
->dev_private
;
243 struct pp_states_info data
;
244 enum amd_pm_state_type pm
= 0;
247 if (adev
->pp_force_state_enabled
&& adev
->pp_enabled
) {
248 pm
= amdgpu_dpm_get_current_power_state(adev
);
249 amdgpu_dpm_get_pp_num_states(adev
, &data
);
251 for (i
= 0; i
< data
.nums
; i
++) {
252 if (pm
== data
.states
[i
])
259 return snprintf(buf
, PAGE_SIZE
, "%d\n", i
);
262 return snprintf(buf
, PAGE_SIZE
, "\n");
265 static ssize_t
amdgpu_set_pp_force_state(struct device
*dev
,
266 struct device_attribute
*attr
,
270 struct drm_device
*ddev
= dev_get_drvdata(dev
);
271 struct amdgpu_device
*adev
= ddev
->dev_private
;
272 enum amd_pm_state_type state
= 0;
276 if (strlen(buf
) == 1)
277 adev
->pp_force_state_enabled
= false;
278 else if (adev
->pp_enabled
) {
279 struct pp_states_info data
;
281 ret
= kstrtoul(buf
, 0, &idx
);
282 if (ret
|| idx
>= ARRAY_SIZE(data
.states
)) {
287 amdgpu_dpm_get_pp_num_states(adev
, &data
);
288 state
= data
.states
[idx
];
289 /* only set user selected power states */
290 if (state
!= POWER_STATE_TYPE_INTERNAL_BOOT
&&
291 state
!= POWER_STATE_TYPE_DEFAULT
) {
292 amdgpu_dpm_dispatch_task(adev
,
293 AMD_PP_EVENT_ENABLE_USER_STATE
, &state
, NULL
);
294 adev
->pp_force_state_enabled
= true;
301 static ssize_t
amdgpu_get_pp_table(struct device
*dev
,
302 struct device_attribute
*attr
,
305 struct drm_device
*ddev
= dev_get_drvdata(dev
);
306 struct amdgpu_device
*adev
= ddev
->dev_private
;
310 if (adev
->pp_enabled
)
311 size
= amdgpu_dpm_get_pp_table(adev
, &table
);
315 if (size
>= PAGE_SIZE
)
316 size
= PAGE_SIZE
- 1;
318 for (i
= 0; i
< size
; i
++) {
319 sprintf(buf
+ i
, "%02x", table
[i
]);
321 sprintf(buf
+ i
, "\n");
326 static ssize_t
amdgpu_set_pp_table(struct device
*dev
,
327 struct device_attribute
*attr
,
331 struct drm_device
*ddev
= dev_get_drvdata(dev
);
332 struct amdgpu_device
*adev
= ddev
->dev_private
;
334 if (adev
->pp_enabled
)
335 amdgpu_dpm_set_pp_table(adev
, buf
, count
);
340 static ssize_t
amdgpu_get_pp_dpm_sclk(struct device
*dev
,
341 struct device_attribute
*attr
,
344 struct drm_device
*ddev
= dev_get_drvdata(dev
);
345 struct amdgpu_device
*adev
= ddev
->dev_private
;
348 if (adev
->pp_enabled
)
349 size
= amdgpu_dpm_print_clock_levels(adev
, PP_SCLK
, buf
);
350 else if (adev
->pm
.funcs
->print_clock_levels
)
351 size
= adev
->pm
.funcs
->print_clock_levels(adev
, PP_SCLK
, buf
);
356 static ssize_t
amdgpu_set_pp_dpm_sclk(struct device
*dev
,
357 struct device_attribute
*attr
,
361 struct drm_device
*ddev
= dev_get_drvdata(dev
);
362 struct amdgpu_device
*adev
= ddev
->dev_private
;
365 uint32_t i
, mask
= 0;
368 for (i
= 0; i
< strlen(buf
); i
++) {
369 if (*(buf
+ i
) == '\n')
371 sub_str
[0] = *(buf
+ i
);
373 ret
= kstrtol(sub_str
, 0, &level
);
382 if (adev
->pp_enabled
)
383 amdgpu_dpm_force_clock_level(adev
, PP_SCLK
, mask
);
384 else if (adev
->pm
.funcs
->force_clock_level
)
385 adev
->pm
.funcs
->force_clock_level(adev
, PP_SCLK
, mask
);
390 static ssize_t
amdgpu_get_pp_dpm_mclk(struct device
*dev
,
391 struct device_attribute
*attr
,
394 struct drm_device
*ddev
= dev_get_drvdata(dev
);
395 struct amdgpu_device
*adev
= ddev
->dev_private
;
398 if (adev
->pp_enabled
)
399 size
= amdgpu_dpm_print_clock_levels(adev
, PP_MCLK
, buf
);
400 else if (adev
->pm
.funcs
->print_clock_levels
)
401 size
= adev
->pm
.funcs
->print_clock_levels(adev
, PP_MCLK
, buf
);
406 static ssize_t
amdgpu_set_pp_dpm_mclk(struct device
*dev
,
407 struct device_attribute
*attr
,
411 struct drm_device
*ddev
= dev_get_drvdata(dev
);
412 struct amdgpu_device
*adev
= ddev
->dev_private
;
415 uint32_t i
, mask
= 0;
418 for (i
= 0; i
< strlen(buf
); i
++) {
419 if (*(buf
+ i
) == '\n')
421 sub_str
[0] = *(buf
+ i
);
423 ret
= kstrtol(sub_str
, 0, &level
);
432 if (adev
->pp_enabled
)
433 amdgpu_dpm_force_clock_level(adev
, PP_MCLK
, mask
);
434 else if (adev
->pm
.funcs
->force_clock_level
)
435 adev
->pm
.funcs
->force_clock_level(adev
, PP_MCLK
, mask
);
440 static ssize_t
amdgpu_get_pp_dpm_pcie(struct device
*dev
,
441 struct device_attribute
*attr
,
444 struct drm_device
*ddev
= dev_get_drvdata(dev
);
445 struct amdgpu_device
*adev
= ddev
->dev_private
;
448 if (adev
->pp_enabled
)
449 size
= amdgpu_dpm_print_clock_levels(adev
, PP_PCIE
, buf
);
450 else if (adev
->pm
.funcs
->print_clock_levels
)
451 size
= adev
->pm
.funcs
->print_clock_levels(adev
, PP_PCIE
, buf
);
456 static ssize_t
amdgpu_set_pp_dpm_pcie(struct device
*dev
,
457 struct device_attribute
*attr
,
461 struct drm_device
*ddev
= dev_get_drvdata(dev
);
462 struct amdgpu_device
*adev
= ddev
->dev_private
;
465 uint32_t i
, mask
= 0;
468 for (i
= 0; i
< strlen(buf
); i
++) {
469 if (*(buf
+ i
) == '\n')
471 sub_str
[0] = *(buf
+ i
);
473 ret
= kstrtol(sub_str
, 0, &level
);
482 if (adev
->pp_enabled
)
483 amdgpu_dpm_force_clock_level(adev
, PP_PCIE
, mask
);
484 else if (adev
->pm
.funcs
->force_clock_level
)
485 adev
->pm
.funcs
->force_clock_level(adev
, PP_PCIE
, mask
);
490 static ssize_t
amdgpu_get_pp_sclk_od(struct device
*dev
,
491 struct device_attribute
*attr
,
494 struct drm_device
*ddev
= dev_get_drvdata(dev
);
495 struct amdgpu_device
*adev
= ddev
->dev_private
;
498 if (adev
->pp_enabled
)
499 value
= amdgpu_dpm_get_sclk_od(adev
);
500 else if (adev
->pm
.funcs
->get_sclk_od
)
501 value
= adev
->pm
.funcs
->get_sclk_od(adev
);
503 return snprintf(buf
, PAGE_SIZE
, "%d\n", value
);
506 static ssize_t
amdgpu_set_pp_sclk_od(struct device
*dev
,
507 struct device_attribute
*attr
,
511 struct drm_device
*ddev
= dev_get_drvdata(dev
);
512 struct amdgpu_device
*adev
= ddev
->dev_private
;
516 ret
= kstrtol(buf
, 0, &value
);
523 if (adev
->pp_enabled
) {
524 amdgpu_dpm_set_sclk_od(adev
, (uint32_t)value
);
525 amdgpu_dpm_dispatch_task(adev
, AMD_PP_EVENT_READJUST_POWER_STATE
, NULL
, NULL
);
526 } else if (adev
->pm
.funcs
->set_sclk_od
) {
527 adev
->pm
.funcs
->set_sclk_od(adev
, (uint32_t)value
);
528 adev
->pm
.dpm
.current_ps
= adev
->pm
.dpm
.boot_ps
;
529 amdgpu_pm_compute_clocks(adev
);
536 static ssize_t
amdgpu_get_pp_mclk_od(struct device
*dev
,
537 struct device_attribute
*attr
,
540 struct drm_device
*ddev
= dev_get_drvdata(dev
);
541 struct amdgpu_device
*adev
= ddev
->dev_private
;
544 if (adev
->pp_enabled
)
545 value
= amdgpu_dpm_get_mclk_od(adev
);
546 else if (adev
->pm
.funcs
->get_mclk_od
)
547 value
= adev
->pm
.funcs
->get_mclk_od(adev
);
549 return snprintf(buf
, PAGE_SIZE
, "%d\n", value
);
552 static ssize_t
amdgpu_set_pp_mclk_od(struct device
*dev
,
553 struct device_attribute
*attr
,
557 struct drm_device
*ddev
= dev_get_drvdata(dev
);
558 struct amdgpu_device
*adev
= ddev
->dev_private
;
562 ret
= kstrtol(buf
, 0, &value
);
569 if (adev
->pp_enabled
) {
570 amdgpu_dpm_set_mclk_od(adev
, (uint32_t)value
);
571 amdgpu_dpm_dispatch_task(adev
, AMD_PP_EVENT_READJUST_POWER_STATE
, NULL
, NULL
);
572 } else if (adev
->pm
.funcs
->set_mclk_od
) {
573 adev
->pm
.funcs
->set_mclk_od(adev
, (uint32_t)value
);
574 adev
->pm
.dpm
.current_ps
= adev
->pm
.dpm
.boot_ps
;
575 amdgpu_pm_compute_clocks(adev
);
582 static DEVICE_ATTR(power_dpm_state
, S_IRUGO
| S_IWUSR
, amdgpu_get_dpm_state
, amdgpu_set_dpm_state
);
583 static DEVICE_ATTR(power_dpm_force_performance_level
, S_IRUGO
| S_IWUSR
,
584 amdgpu_get_dpm_forced_performance_level
,
585 amdgpu_set_dpm_forced_performance_level
);
586 static DEVICE_ATTR(pp_num_states
, S_IRUGO
, amdgpu_get_pp_num_states
, NULL
);
587 static DEVICE_ATTR(pp_cur_state
, S_IRUGO
, amdgpu_get_pp_cur_state
, NULL
);
588 static DEVICE_ATTR(pp_force_state
, S_IRUGO
| S_IWUSR
,
589 amdgpu_get_pp_force_state
,
590 amdgpu_set_pp_force_state
);
591 static DEVICE_ATTR(pp_table
, S_IRUGO
| S_IWUSR
,
593 amdgpu_set_pp_table
);
594 static DEVICE_ATTR(pp_dpm_sclk
, S_IRUGO
| S_IWUSR
,
595 amdgpu_get_pp_dpm_sclk
,
596 amdgpu_set_pp_dpm_sclk
);
597 static DEVICE_ATTR(pp_dpm_mclk
, S_IRUGO
| S_IWUSR
,
598 amdgpu_get_pp_dpm_mclk
,
599 amdgpu_set_pp_dpm_mclk
);
600 static DEVICE_ATTR(pp_dpm_pcie
, S_IRUGO
| S_IWUSR
,
601 amdgpu_get_pp_dpm_pcie
,
602 amdgpu_set_pp_dpm_pcie
);
603 static DEVICE_ATTR(pp_sclk_od
, S_IRUGO
| S_IWUSR
,
604 amdgpu_get_pp_sclk_od
,
605 amdgpu_set_pp_sclk_od
);
606 static DEVICE_ATTR(pp_mclk_od
, S_IRUGO
| S_IWUSR
,
607 amdgpu_get_pp_mclk_od
,
608 amdgpu_set_pp_mclk_od
);
610 static ssize_t
amdgpu_hwmon_show_temp(struct device
*dev
,
611 struct device_attribute
*attr
,
614 struct amdgpu_device
*adev
= dev_get_drvdata(dev
);
615 struct drm_device
*ddev
= adev
->ddev
;
618 /* Can't get temperature when the card is off */
619 if ((adev
->flags
& AMD_IS_PX
) &&
620 (ddev
->switch_power_state
!= DRM_SWITCH_POWER_ON
))
623 if (!adev
->pp_enabled
&& !adev
->pm
.funcs
->get_temperature
)
626 temp
= amdgpu_dpm_get_temperature(adev
);
628 return snprintf(buf
, PAGE_SIZE
, "%d\n", temp
);
631 static ssize_t
amdgpu_hwmon_show_temp_thresh(struct device
*dev
,
632 struct device_attribute
*attr
,
635 struct amdgpu_device
*adev
= dev_get_drvdata(dev
);
636 int hyst
= to_sensor_dev_attr(attr
)->index
;
640 temp
= adev
->pm
.dpm
.thermal
.min_temp
;
642 temp
= adev
->pm
.dpm
.thermal
.max_temp
;
644 return snprintf(buf
, PAGE_SIZE
, "%d\n", temp
);
647 static ssize_t
amdgpu_hwmon_get_pwm1_enable(struct device
*dev
,
648 struct device_attribute
*attr
,
651 struct amdgpu_device
*adev
= dev_get_drvdata(dev
);
654 if (!adev
->pp_enabled
&& !adev
->pm
.funcs
->get_fan_control_mode
)
657 pwm_mode
= amdgpu_dpm_get_fan_control_mode(adev
);
659 /* never 0 (full-speed), fuse or smc-controlled always */
660 return sprintf(buf
, "%i\n", pwm_mode
== FDO_PWM_MODE_STATIC
? 1 : 2);
663 static ssize_t
amdgpu_hwmon_set_pwm1_enable(struct device
*dev
,
664 struct device_attribute
*attr
,
668 struct amdgpu_device
*adev
= dev_get_drvdata(dev
);
672 if (!adev
->pp_enabled
&& !adev
->pm
.funcs
->set_fan_control_mode
)
675 err
= kstrtoint(buf
, 10, &value
);
680 case 1: /* manual, percent-based */
681 amdgpu_dpm_set_fan_control_mode(adev
, FDO_PWM_MODE_STATIC
);
683 default: /* disable */
684 amdgpu_dpm_set_fan_control_mode(adev
, 0);
691 static ssize_t
amdgpu_hwmon_get_pwm1_min(struct device
*dev
,
692 struct device_attribute
*attr
,
695 return sprintf(buf
, "%i\n", 0);
698 static ssize_t
amdgpu_hwmon_get_pwm1_max(struct device
*dev
,
699 struct device_attribute
*attr
,
702 return sprintf(buf
, "%i\n", 255);
705 static ssize_t
amdgpu_hwmon_set_pwm1(struct device
*dev
,
706 struct device_attribute
*attr
,
707 const char *buf
, size_t count
)
709 struct amdgpu_device
*adev
= dev_get_drvdata(dev
);
713 err
= kstrtou32(buf
, 10, &value
);
717 value
= (value
* 100) / 255;
719 err
= amdgpu_dpm_set_fan_speed_percent(adev
, value
);
726 static ssize_t
amdgpu_hwmon_get_pwm1(struct device
*dev
,
727 struct device_attribute
*attr
,
730 struct amdgpu_device
*adev
= dev_get_drvdata(dev
);
734 err
= amdgpu_dpm_get_fan_speed_percent(adev
, &speed
);
738 speed
= (speed
* 255) / 100;
740 return sprintf(buf
, "%i\n", speed
);
743 static SENSOR_DEVICE_ATTR(temp1_input
, S_IRUGO
, amdgpu_hwmon_show_temp
, NULL
, 0);
744 static SENSOR_DEVICE_ATTR(temp1_crit
, S_IRUGO
, amdgpu_hwmon_show_temp_thresh
, NULL
, 0);
745 static SENSOR_DEVICE_ATTR(temp1_crit_hyst
, S_IRUGO
, amdgpu_hwmon_show_temp_thresh
, NULL
, 1);
746 static SENSOR_DEVICE_ATTR(pwm1
, S_IRUGO
| S_IWUSR
, amdgpu_hwmon_get_pwm1
, amdgpu_hwmon_set_pwm1
, 0);
747 static SENSOR_DEVICE_ATTR(pwm1_enable
, S_IRUGO
| S_IWUSR
, amdgpu_hwmon_get_pwm1_enable
, amdgpu_hwmon_set_pwm1_enable
, 0);
748 static SENSOR_DEVICE_ATTR(pwm1_min
, S_IRUGO
, amdgpu_hwmon_get_pwm1_min
, NULL
, 0);
749 static SENSOR_DEVICE_ATTR(pwm1_max
, S_IRUGO
, amdgpu_hwmon_get_pwm1_max
, NULL
, 0);
751 static struct attribute
*hwmon_attributes
[] = {
752 &sensor_dev_attr_temp1_input
.dev_attr
.attr
,
753 &sensor_dev_attr_temp1_crit
.dev_attr
.attr
,
754 &sensor_dev_attr_temp1_crit_hyst
.dev_attr
.attr
,
755 &sensor_dev_attr_pwm1
.dev_attr
.attr
,
756 &sensor_dev_attr_pwm1_enable
.dev_attr
.attr
,
757 &sensor_dev_attr_pwm1_min
.dev_attr
.attr
,
758 &sensor_dev_attr_pwm1_max
.dev_attr
.attr
,
762 static umode_t
hwmon_attributes_visible(struct kobject
*kobj
,
763 struct attribute
*attr
, int index
)
765 struct device
*dev
= kobj_to_dev(kobj
);
766 struct amdgpu_device
*adev
= dev_get_drvdata(dev
);
767 umode_t effective_mode
= attr
->mode
;
769 /* Skip limit attributes if DPM is not enabled */
770 if (!adev
->pm
.dpm_enabled
&&
771 (attr
== &sensor_dev_attr_temp1_crit
.dev_attr
.attr
||
772 attr
== &sensor_dev_attr_temp1_crit_hyst
.dev_attr
.attr
||
773 attr
== &sensor_dev_attr_pwm1
.dev_attr
.attr
||
774 attr
== &sensor_dev_attr_pwm1_enable
.dev_attr
.attr
||
775 attr
== &sensor_dev_attr_pwm1_max
.dev_attr
.attr
||
776 attr
== &sensor_dev_attr_pwm1_min
.dev_attr
.attr
))
779 if (adev
->pp_enabled
)
780 return effective_mode
;
782 /* Skip fan attributes if fan is not present */
783 if (adev
->pm
.no_fan
&&
784 (attr
== &sensor_dev_attr_pwm1
.dev_attr
.attr
||
785 attr
== &sensor_dev_attr_pwm1_enable
.dev_attr
.attr
||
786 attr
== &sensor_dev_attr_pwm1_max
.dev_attr
.attr
||
787 attr
== &sensor_dev_attr_pwm1_min
.dev_attr
.attr
))
790 /* mask fan attributes if we have no bindings for this asic to expose */
791 if ((!adev
->pm
.funcs
->get_fan_speed_percent
&&
792 attr
== &sensor_dev_attr_pwm1
.dev_attr
.attr
) || /* can't query fan */
793 (!adev
->pm
.funcs
->get_fan_control_mode
&&
794 attr
== &sensor_dev_attr_pwm1_enable
.dev_attr
.attr
)) /* can't query state */
795 effective_mode
&= ~S_IRUGO
;
797 if ((!adev
->pm
.funcs
->set_fan_speed_percent
&&
798 attr
== &sensor_dev_attr_pwm1
.dev_attr
.attr
) || /* can't manage fan */
799 (!adev
->pm
.funcs
->set_fan_control_mode
&&
800 attr
== &sensor_dev_attr_pwm1_enable
.dev_attr
.attr
)) /* can't manage state */
801 effective_mode
&= ~S_IWUSR
;
803 /* hide max/min values if we can't both query and manage the fan */
804 if ((!adev
->pm
.funcs
->set_fan_speed_percent
&&
805 !adev
->pm
.funcs
->get_fan_speed_percent
) &&
806 (attr
== &sensor_dev_attr_pwm1_max
.dev_attr
.attr
||
807 attr
== &sensor_dev_attr_pwm1_min
.dev_attr
.attr
))
810 return effective_mode
;
813 static const struct attribute_group hwmon_attrgroup
= {
814 .attrs
= hwmon_attributes
,
815 .is_visible
= hwmon_attributes_visible
,
818 static const struct attribute_group
*hwmon_groups
[] = {
823 void amdgpu_dpm_thermal_work_handler(struct work_struct
*work
)
825 struct amdgpu_device
*adev
=
826 container_of(work
, struct amdgpu_device
,
827 pm
.dpm
.thermal
.work
);
828 /* switch to the thermal state */
829 enum amd_pm_state_type dpm_state
= POWER_STATE_TYPE_INTERNAL_THERMAL
;
831 if (!adev
->pm
.dpm_enabled
)
834 if (adev
->pm
.funcs
->get_temperature
) {
835 int temp
= amdgpu_dpm_get_temperature(adev
);
837 if (temp
< adev
->pm
.dpm
.thermal
.min_temp
)
838 /* switch back the user state */
839 dpm_state
= adev
->pm
.dpm
.user_state
;
841 if (adev
->pm
.dpm
.thermal
.high_to_low
)
842 /* switch back the user state */
843 dpm_state
= adev
->pm
.dpm
.user_state
;
845 mutex_lock(&adev
->pm
.mutex
);
846 if (dpm_state
== POWER_STATE_TYPE_INTERNAL_THERMAL
)
847 adev
->pm
.dpm
.thermal_active
= true;
849 adev
->pm
.dpm
.thermal_active
= false;
850 adev
->pm
.dpm
.state
= dpm_state
;
851 mutex_unlock(&adev
->pm
.mutex
);
853 amdgpu_pm_compute_clocks(adev
);
856 static struct amdgpu_ps
*amdgpu_dpm_pick_power_state(struct amdgpu_device
*adev
,
857 enum amd_pm_state_type dpm_state
)
860 struct amdgpu_ps
*ps
;
862 bool single_display
= (adev
->pm
.dpm
.new_active_crtc_count
< 2) ?
865 /* check if the vblank period is too short to adjust the mclk */
866 if (single_display
&& adev
->pm
.funcs
->vblank_too_short
) {
867 if (amdgpu_dpm_vblank_too_short(adev
))
868 single_display
= false;
871 /* certain older asics have a separare 3D performance state,
872 * so try that first if the user selected performance
874 if (dpm_state
== POWER_STATE_TYPE_PERFORMANCE
)
875 dpm_state
= POWER_STATE_TYPE_INTERNAL_3DPERF
;
876 /* balanced states don't exist at the moment */
877 if (dpm_state
== POWER_STATE_TYPE_BALANCED
)
878 dpm_state
= POWER_STATE_TYPE_PERFORMANCE
;
881 /* Pick the best power state based on current conditions */
882 for (i
= 0; i
< adev
->pm
.dpm
.num_ps
; i
++) {
883 ps
= &adev
->pm
.dpm
.ps
[i
];
884 ui_class
= ps
->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK
;
887 case POWER_STATE_TYPE_BATTERY
:
888 if (ui_class
== ATOM_PPLIB_CLASSIFICATION_UI_BATTERY
) {
889 if (ps
->caps
& ATOM_PPLIB_SINGLE_DISPLAY_ONLY
) {
896 case POWER_STATE_TYPE_BALANCED
:
897 if (ui_class
== ATOM_PPLIB_CLASSIFICATION_UI_BALANCED
) {
898 if (ps
->caps
& ATOM_PPLIB_SINGLE_DISPLAY_ONLY
) {
905 case POWER_STATE_TYPE_PERFORMANCE
:
906 if (ui_class
== ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE
) {
907 if (ps
->caps
& ATOM_PPLIB_SINGLE_DISPLAY_ONLY
) {
914 /* internal states */
915 case POWER_STATE_TYPE_INTERNAL_UVD
:
916 if (adev
->pm
.dpm
.uvd_ps
)
917 return adev
->pm
.dpm
.uvd_ps
;
920 case POWER_STATE_TYPE_INTERNAL_UVD_SD
:
921 if (ps
->class & ATOM_PPLIB_CLASSIFICATION_SDSTATE
)
924 case POWER_STATE_TYPE_INTERNAL_UVD_HD
:
925 if (ps
->class & ATOM_PPLIB_CLASSIFICATION_HDSTATE
)
928 case POWER_STATE_TYPE_INTERNAL_UVD_HD2
:
929 if (ps
->class & ATOM_PPLIB_CLASSIFICATION_HD2STATE
)
932 case POWER_STATE_TYPE_INTERNAL_UVD_MVC
:
933 if (ps
->class2
& ATOM_PPLIB_CLASSIFICATION2_MVC
)
936 case POWER_STATE_TYPE_INTERNAL_BOOT
:
937 return adev
->pm
.dpm
.boot_ps
;
938 case POWER_STATE_TYPE_INTERNAL_THERMAL
:
939 if (ps
->class & ATOM_PPLIB_CLASSIFICATION_THERMAL
)
942 case POWER_STATE_TYPE_INTERNAL_ACPI
:
943 if (ps
->class & ATOM_PPLIB_CLASSIFICATION_ACPI
)
946 case POWER_STATE_TYPE_INTERNAL_ULV
:
947 if (ps
->class2
& ATOM_PPLIB_CLASSIFICATION2_ULV
)
950 case POWER_STATE_TYPE_INTERNAL_3DPERF
:
951 if (ps
->class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE
)
958 /* use a fallback state if we didn't match */
960 case POWER_STATE_TYPE_INTERNAL_UVD_SD
:
961 dpm_state
= POWER_STATE_TYPE_INTERNAL_UVD_HD
;
963 case POWER_STATE_TYPE_INTERNAL_UVD_HD
:
964 case POWER_STATE_TYPE_INTERNAL_UVD_HD2
:
965 case POWER_STATE_TYPE_INTERNAL_UVD_MVC
:
966 if (adev
->pm
.dpm
.uvd_ps
) {
967 return adev
->pm
.dpm
.uvd_ps
;
969 dpm_state
= POWER_STATE_TYPE_PERFORMANCE
;
972 case POWER_STATE_TYPE_INTERNAL_THERMAL
:
973 dpm_state
= POWER_STATE_TYPE_INTERNAL_ACPI
;
975 case POWER_STATE_TYPE_INTERNAL_ACPI
:
976 dpm_state
= POWER_STATE_TYPE_BATTERY
;
978 case POWER_STATE_TYPE_BATTERY
:
979 case POWER_STATE_TYPE_BALANCED
:
980 case POWER_STATE_TYPE_INTERNAL_3DPERF
:
981 dpm_state
= POWER_STATE_TYPE_PERFORMANCE
;
990 static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device
*adev
)
993 struct amdgpu_ps
*ps
;
994 enum amd_pm_state_type dpm_state
;
997 /* if dpm init failed */
998 if (!adev
->pm
.dpm_enabled
)
1001 if (adev
->pm
.dpm
.user_state
!= adev
->pm
.dpm
.state
) {
1002 /* add other state override checks here */
1003 if ((!adev
->pm
.dpm
.thermal_active
) &&
1004 (!adev
->pm
.dpm
.uvd_active
))
1005 adev
->pm
.dpm
.state
= adev
->pm
.dpm
.user_state
;
1007 dpm_state
= adev
->pm
.dpm
.state
;
1009 ps
= amdgpu_dpm_pick_power_state(adev
, dpm_state
);
1011 adev
->pm
.dpm
.requested_ps
= ps
;
1015 /* no need to reprogram if nothing changed unless we are on BTC+ */
1016 if (adev
->pm
.dpm
.current_ps
== adev
->pm
.dpm
.requested_ps
) {
1017 /* vce just modifies an existing state so force a change */
1018 if (ps
->vce_active
!= adev
->pm
.dpm
.vce_active
)
1020 if (adev
->flags
& AMD_IS_APU
) {
1021 /* for APUs if the num crtcs changed but state is the same,
1022 * all we need to do is update the display configuration.
1024 if (adev
->pm
.dpm
.new_active_crtcs
!= adev
->pm
.dpm
.current_active_crtcs
) {
1025 /* update display watermarks based on new power state */
1026 amdgpu_display_bandwidth_update(adev
);
1027 /* update displays */
1028 amdgpu_dpm_display_configuration_changed(adev
);
1029 adev
->pm
.dpm
.current_active_crtcs
= adev
->pm
.dpm
.new_active_crtcs
;
1030 adev
->pm
.dpm
.current_active_crtc_count
= adev
->pm
.dpm
.new_active_crtc_count
;
1034 /* for BTC+ if the num crtcs hasn't changed and state is the same,
1035 * nothing to do, if the num crtcs is > 1 and state is the same,
1036 * update display configuration.
1038 if (adev
->pm
.dpm
.new_active_crtcs
==
1039 adev
->pm
.dpm
.current_active_crtcs
) {
1041 } else if ((adev
->pm
.dpm
.current_active_crtc_count
> 1) &&
1042 (adev
->pm
.dpm
.new_active_crtc_count
> 1)) {
1043 /* update display watermarks based on new power state */
1044 amdgpu_display_bandwidth_update(adev
);
1045 /* update displays */
1046 amdgpu_dpm_display_configuration_changed(adev
);
1047 adev
->pm
.dpm
.current_active_crtcs
= adev
->pm
.dpm
.new_active_crtcs
;
1048 adev
->pm
.dpm
.current_active_crtc_count
= adev
->pm
.dpm
.new_active_crtc_count
;
1055 if (amdgpu_dpm
== 1) {
1056 printk("switching from power state:\n");
1057 amdgpu_dpm_print_power_state(adev
, adev
->pm
.dpm
.current_ps
);
1058 printk("switching to power state:\n");
1059 amdgpu_dpm_print_power_state(adev
, adev
->pm
.dpm
.requested_ps
);
1062 /* update whether vce is active */
1063 ps
->vce_active
= adev
->pm
.dpm
.vce_active
;
1065 ret
= amdgpu_dpm_pre_set_power_state(adev
);
1069 /* update display watermarks based on new power state */
1070 amdgpu_display_bandwidth_update(adev
);
1072 /* wait for the rings to drain */
1073 for (i
= 0; i
< AMDGPU_MAX_RINGS
; i
++) {
1074 struct amdgpu_ring
*ring
= adev
->rings
[i
];
1075 if (ring
&& ring
->ready
)
1076 amdgpu_fence_wait_empty(ring
);
1079 /* program the new power state */
1080 amdgpu_dpm_set_power_state(adev
);
1082 /* update current power state */
1083 adev
->pm
.dpm
.current_ps
= adev
->pm
.dpm
.requested_ps
;
1085 amdgpu_dpm_post_set_power_state(adev
);
1087 /* update displays */
1088 amdgpu_dpm_display_configuration_changed(adev
);
1090 adev
->pm
.dpm
.current_active_crtcs
= adev
->pm
.dpm
.new_active_crtcs
;
1091 adev
->pm
.dpm
.current_active_crtc_count
= adev
->pm
.dpm
.new_active_crtc_count
;
1093 if (adev
->pm
.funcs
->force_performance_level
) {
1094 if (adev
->pm
.dpm
.thermal_active
) {
1095 enum amdgpu_dpm_forced_level level
= adev
->pm
.dpm
.forced_level
;
1096 /* force low perf level for thermal */
1097 amdgpu_dpm_force_performance_level(adev
, AMDGPU_DPM_FORCED_LEVEL_LOW
);
1098 /* save the user's level */
1099 adev
->pm
.dpm
.forced_level
= level
;
1101 /* otherwise, user selected level */
1102 amdgpu_dpm_force_performance_level(adev
, adev
->pm
.dpm
.forced_level
);
1107 void amdgpu_dpm_enable_uvd(struct amdgpu_device
*adev
, bool enable
)
1109 if (adev
->pp_enabled
)
1110 amdgpu_dpm_powergate_uvd(adev
, !enable
);
1112 if (adev
->pm
.funcs
->powergate_uvd
) {
1113 mutex_lock(&adev
->pm
.mutex
);
1114 /* enable/disable UVD */
1115 amdgpu_dpm_powergate_uvd(adev
, !enable
);
1116 mutex_unlock(&adev
->pm
.mutex
);
1119 mutex_lock(&adev
->pm
.mutex
);
1120 adev
->pm
.dpm
.uvd_active
= true;
1121 adev
->pm
.dpm
.state
= POWER_STATE_TYPE_INTERNAL_UVD
;
1122 mutex_unlock(&adev
->pm
.mutex
);
1124 mutex_lock(&adev
->pm
.mutex
);
1125 adev
->pm
.dpm
.uvd_active
= false;
1126 mutex_unlock(&adev
->pm
.mutex
);
1128 amdgpu_pm_compute_clocks(adev
);
1134 void amdgpu_dpm_enable_vce(struct amdgpu_device
*adev
, bool enable
)
1136 if (adev
->pp_enabled
)
1137 amdgpu_dpm_powergate_vce(adev
, !enable
);
1139 if (adev
->pm
.funcs
->powergate_vce
) {
1140 mutex_lock(&adev
->pm
.mutex
);
1141 amdgpu_dpm_powergate_vce(adev
, !enable
);
1142 mutex_unlock(&adev
->pm
.mutex
);
1145 mutex_lock(&adev
->pm
.mutex
);
1146 adev
->pm
.dpm
.vce_active
= true;
1147 /* XXX select vce level based on ring/task */
1148 adev
->pm
.dpm
.vce_level
= AMDGPU_VCE_LEVEL_AC_ALL
;
1149 mutex_unlock(&adev
->pm
.mutex
);
1151 mutex_lock(&adev
->pm
.mutex
);
1152 adev
->pm
.dpm
.vce_active
= false;
1153 mutex_unlock(&adev
->pm
.mutex
);
1155 amdgpu_pm_compute_clocks(adev
);
1160 void amdgpu_pm_print_power_states(struct amdgpu_device
*adev
)
1164 if (adev
->pp_enabled
)
1168 for (i
= 0; i
< adev
->pm
.dpm
.num_ps
; i
++)
1169 amdgpu_dpm_print_power_state(adev
, &adev
->pm
.dpm
.ps
[i
]);
1173 int amdgpu_pm_sysfs_init(struct amdgpu_device
*adev
)
1177 if (adev
->pm
.sysfs_initialized
)
1180 if (!adev
->pp_enabled
) {
1181 if (adev
->pm
.funcs
->get_temperature
== NULL
)
1185 adev
->pm
.int_hwmon_dev
= hwmon_device_register_with_groups(adev
->dev
,
1188 if (IS_ERR(adev
->pm
.int_hwmon_dev
)) {
1189 ret
= PTR_ERR(adev
->pm
.int_hwmon_dev
);
1191 "Unable to register hwmon device: %d\n", ret
);
1195 ret
= device_create_file(adev
->dev
, &dev_attr_power_dpm_state
);
1197 DRM_ERROR("failed to create device file for dpm state\n");
1200 ret
= device_create_file(adev
->dev
, &dev_attr_power_dpm_force_performance_level
);
1202 DRM_ERROR("failed to create device file for dpm state\n");
1206 if (adev
->pp_enabled
) {
1207 ret
= device_create_file(adev
->dev
, &dev_attr_pp_num_states
);
1209 DRM_ERROR("failed to create device file pp_num_states\n");
1212 ret
= device_create_file(adev
->dev
, &dev_attr_pp_cur_state
);
1214 DRM_ERROR("failed to create device file pp_cur_state\n");
1217 ret
= device_create_file(adev
->dev
, &dev_attr_pp_force_state
);
1219 DRM_ERROR("failed to create device file pp_force_state\n");
1222 ret
= device_create_file(adev
->dev
, &dev_attr_pp_table
);
1224 DRM_ERROR("failed to create device file pp_table\n");
1229 ret
= device_create_file(adev
->dev
, &dev_attr_pp_dpm_sclk
);
1231 DRM_ERROR("failed to create device file pp_dpm_sclk\n");
1234 ret
= device_create_file(adev
->dev
, &dev_attr_pp_dpm_mclk
);
1236 DRM_ERROR("failed to create device file pp_dpm_mclk\n");
1239 ret
= device_create_file(adev
->dev
, &dev_attr_pp_dpm_pcie
);
1241 DRM_ERROR("failed to create device file pp_dpm_pcie\n");
1244 ret
= device_create_file(adev
->dev
, &dev_attr_pp_sclk_od
);
1246 DRM_ERROR("failed to create device file pp_sclk_od\n");
1249 ret
= device_create_file(adev
->dev
, &dev_attr_pp_mclk_od
);
1251 DRM_ERROR("failed to create device file pp_mclk_od\n");
1255 ret
= amdgpu_debugfs_pm_init(adev
);
1257 DRM_ERROR("Failed to register debugfs file for dpm!\n");
1261 adev
->pm
.sysfs_initialized
= true;
1266 void amdgpu_pm_sysfs_fini(struct amdgpu_device
*adev
)
1268 if (adev
->pm
.int_hwmon_dev
)
1269 hwmon_device_unregister(adev
->pm
.int_hwmon_dev
);
1270 device_remove_file(adev
->dev
, &dev_attr_power_dpm_state
);
1271 device_remove_file(adev
->dev
, &dev_attr_power_dpm_force_performance_level
);
1272 if (adev
->pp_enabled
) {
1273 device_remove_file(adev
->dev
, &dev_attr_pp_num_states
);
1274 device_remove_file(adev
->dev
, &dev_attr_pp_cur_state
);
1275 device_remove_file(adev
->dev
, &dev_attr_pp_force_state
);
1276 device_remove_file(adev
->dev
, &dev_attr_pp_table
);
1278 device_remove_file(adev
->dev
, &dev_attr_pp_dpm_sclk
);
1279 device_remove_file(adev
->dev
, &dev_attr_pp_dpm_mclk
);
1280 device_remove_file(adev
->dev
, &dev_attr_pp_dpm_pcie
);
1281 device_remove_file(adev
->dev
, &dev_attr_pp_sclk_od
);
1282 device_remove_file(adev
->dev
, &dev_attr_pp_mclk_od
);
1285 void amdgpu_pm_compute_clocks(struct amdgpu_device
*adev
)
1287 struct drm_device
*ddev
= adev
->ddev
;
1288 struct drm_crtc
*crtc
;
1289 struct amdgpu_crtc
*amdgpu_crtc
;
1291 if (!adev
->pm
.dpm_enabled
)
1294 if (adev
->pp_enabled
) {
1297 amdgpu_display_bandwidth_update(adev
);
1298 for (i
= 0; i
< AMDGPU_MAX_RINGS
; i
++) {
1299 struct amdgpu_ring
*ring
= adev
->rings
[i
];
1300 if (ring
&& ring
->ready
)
1301 amdgpu_fence_wait_empty(ring
);
1304 amdgpu_dpm_dispatch_task(adev
, AMD_PP_EVENT_DISPLAY_CONFIG_CHANGE
, NULL
, NULL
);
1306 mutex_lock(&adev
->pm
.mutex
);
1307 adev
->pm
.dpm
.new_active_crtcs
= 0;
1308 adev
->pm
.dpm
.new_active_crtc_count
= 0;
1309 if (adev
->mode_info
.num_crtc
&& adev
->mode_info
.mode_config_initialized
) {
1310 list_for_each_entry(crtc
,
1311 &ddev
->mode_config
.crtc_list
, head
) {
1312 amdgpu_crtc
= to_amdgpu_crtc(crtc
);
1313 if (crtc
->enabled
) {
1314 adev
->pm
.dpm
.new_active_crtcs
|= (1 << amdgpu_crtc
->crtc_id
);
1315 adev
->pm
.dpm
.new_active_crtc_count
++;
1319 /* update battery/ac status */
1320 if (power_supply_is_system_supplied() > 0)
1321 adev
->pm
.dpm
.ac_power
= true;
1323 adev
->pm
.dpm
.ac_power
= false;
1325 amdgpu_dpm_change_power_state_locked(adev
);
1327 mutex_unlock(&adev
->pm
.mutex
);
1334 #if defined(CONFIG_DEBUG_FS)
1336 static int amdgpu_debugfs_pm_info(struct seq_file
*m
, void *data
)
1338 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
1339 struct drm_device
*dev
= node
->minor
->dev
;
1340 struct amdgpu_device
*adev
= dev
->dev_private
;
1341 struct drm_device
*ddev
= adev
->ddev
;
1343 if (!adev
->pm
.dpm_enabled
) {
1344 seq_printf(m
, "dpm not enabled\n");
1347 if ((adev
->flags
& AMD_IS_PX
) &&
1348 (ddev
->switch_power_state
!= DRM_SWITCH_POWER_ON
)) {
1349 seq_printf(m
, "PX asic powered off\n");
1350 } else if (adev
->pp_enabled
) {
1351 amdgpu_dpm_debugfs_print_current_performance_level(adev
, m
);
1353 mutex_lock(&adev
->pm
.mutex
);
1354 if (adev
->pm
.funcs
->debugfs_print_current_performance_level
)
1355 amdgpu_dpm_debugfs_print_current_performance_level(adev
, m
);
1357 seq_printf(m
, "Debugfs support not implemented for this asic\n");
1358 mutex_unlock(&adev
->pm
.mutex
);
1364 static const struct drm_info_list amdgpu_pm_info_list
[] = {
1365 {"amdgpu_pm_info", amdgpu_debugfs_pm_info
, 0, NULL
},
1369 static int amdgpu_debugfs_pm_init(struct amdgpu_device
*adev
)
1371 #if defined(CONFIG_DEBUG_FS)
1372 return amdgpu_debugfs_add_files(adev
, amdgpu_pm_info_list
, ARRAY_SIZE(amdgpu_pm_info_list
));