2 * Copyright 2013 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 #include "amdgpu_pm.h"
29 #include "amdgpu_atombios.h"
30 #include "amdgpu_dpm.h"
33 #include <linux/seq_file.h>
35 #include "smu/smu_7_0_0_d.h"
36 #include "smu/smu_7_0_0_sh_mask.h"
38 #include "gca/gfx_7_2_d.h"
39 #include "gca/gfx_7_2_sh_mask.h"
41 #define KV_MAX_DEEPSLEEP_DIVIDER_ID 5
42 #define KV_MINIMUM_ENGINE_CLOCK 800
43 #define SMC_RAM_END 0x40000
45 static void kv_dpm_set_dpm_funcs(struct amdgpu_device
*adev
);
46 static void kv_dpm_set_irq_funcs(struct amdgpu_device
*adev
);
47 static int kv_enable_nb_dpm(struct amdgpu_device
*adev
,
49 static void kv_init_graphics_levels(struct amdgpu_device
*adev
);
50 static int kv_calculate_ds_divider(struct amdgpu_device
*adev
);
51 static int kv_calculate_nbps_level_settings(struct amdgpu_device
*adev
);
52 static int kv_calculate_dpm_settings(struct amdgpu_device
*adev
);
53 static void kv_enable_new_levels(struct amdgpu_device
*adev
);
54 static void kv_program_nbps_index_settings(struct amdgpu_device
*adev
,
55 struct amdgpu_ps
*new_rps
);
56 static int kv_set_enabled_level(struct amdgpu_device
*adev
, u32 level
);
57 static int kv_set_enabled_levels(struct amdgpu_device
*adev
);
58 static int kv_force_dpm_highest(struct amdgpu_device
*adev
);
59 static int kv_force_dpm_lowest(struct amdgpu_device
*adev
);
60 static void kv_apply_state_adjust_rules(struct amdgpu_device
*adev
,
61 struct amdgpu_ps
*new_rps
,
62 struct amdgpu_ps
*old_rps
);
63 static int kv_set_thermal_temperature_range(struct amdgpu_device
*adev
,
64 int min_temp
, int max_temp
);
65 static int kv_init_fps_limits(struct amdgpu_device
*adev
);
67 static void kv_dpm_powergate_uvd(struct amdgpu_device
*adev
, bool gate
);
68 static void kv_dpm_powergate_vce(struct amdgpu_device
*adev
, bool gate
);
69 static void kv_dpm_powergate_samu(struct amdgpu_device
*adev
, bool gate
);
70 static void kv_dpm_powergate_acp(struct amdgpu_device
*adev
, bool gate
);
73 static u32
kv_convert_vid2_to_vid7(struct amdgpu_device
*adev
,
74 struct sumo_vid_mapping_table
*vid_mapping_table
,
77 struct amdgpu_clock_voltage_dependency_table
*vddc_sclk_table
=
78 &adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_sclk
;
81 if (vddc_sclk_table
&& vddc_sclk_table
->count
) {
82 if (vid_2bit
< vddc_sclk_table
->count
)
83 return vddc_sclk_table
->entries
[vid_2bit
].v
;
85 return vddc_sclk_table
->entries
[vddc_sclk_table
->count
- 1].v
;
87 for (i
= 0; i
< vid_mapping_table
->num_entries
; i
++) {
88 if (vid_mapping_table
->entries
[i
].vid_2bit
== vid_2bit
)
89 return vid_mapping_table
->entries
[i
].vid_7bit
;
91 return vid_mapping_table
->entries
[vid_mapping_table
->num_entries
- 1].vid_7bit
;
95 static u32
kv_convert_vid7_to_vid2(struct amdgpu_device
*adev
,
96 struct sumo_vid_mapping_table
*vid_mapping_table
,
99 struct amdgpu_clock_voltage_dependency_table
*vddc_sclk_table
=
100 &adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_sclk
;
103 if (vddc_sclk_table
&& vddc_sclk_table
->count
) {
104 for (i
= 0; i
< vddc_sclk_table
->count
; i
++) {
105 if (vddc_sclk_table
->entries
[i
].v
== vid_7bit
)
108 return vddc_sclk_table
->count
- 1;
110 for (i
= 0; i
< vid_mapping_table
->num_entries
; i
++) {
111 if (vid_mapping_table
->entries
[i
].vid_7bit
== vid_7bit
)
112 return vid_mapping_table
->entries
[i
].vid_2bit
;
115 return vid_mapping_table
->entries
[vid_mapping_table
->num_entries
- 1].vid_2bit
;
119 static void sumo_take_smu_control(struct amdgpu_device
*adev
, bool enable
)
121 /* This bit selects who handles display phy powergating.
122 * Clear the bit to let atom handle it.
123 * Set it to let the driver handle it.
124 * For now we just let atom handle it.
127 u32 v
= RREG32(mmDOUT_SCRATCH3
);
134 WREG32(mmDOUT_SCRATCH3
, v
);
138 static void sumo_construct_sclk_voltage_mapping_table(struct amdgpu_device
*adev
,
139 struct sumo_sclk_voltage_mapping_table
*sclk_voltage_mapping_table
,
140 ATOM_AVAILABLE_SCLK_LIST
*table
)
146 for (i
= 0; i
< SUMO_MAX_HARDWARE_POWERLEVELS
; i
++) {
147 if (table
[i
].ulSupportedSCLK
> prev_sclk
) {
148 sclk_voltage_mapping_table
->entries
[n
].sclk_frequency
=
149 table
[i
].ulSupportedSCLK
;
150 sclk_voltage_mapping_table
->entries
[n
].vid_2bit
=
151 table
[i
].usVoltageIndex
;
152 prev_sclk
= table
[i
].ulSupportedSCLK
;
157 sclk_voltage_mapping_table
->num_max_dpm_entries
= n
;
160 static void sumo_construct_vid_mapping_table(struct amdgpu_device
*adev
,
161 struct sumo_vid_mapping_table
*vid_mapping_table
,
162 ATOM_AVAILABLE_SCLK_LIST
*table
)
166 for (i
= 0; i
< SUMO_MAX_HARDWARE_POWERLEVELS
; i
++) {
167 if (table
[i
].ulSupportedSCLK
!= 0) {
168 vid_mapping_table
->entries
[table
[i
].usVoltageIndex
].vid_7bit
=
169 table
[i
].usVoltageID
;
170 vid_mapping_table
->entries
[table
[i
].usVoltageIndex
].vid_2bit
=
171 table
[i
].usVoltageIndex
;
175 for (i
= 0; i
< SUMO_MAX_NUMBER_VOLTAGES
; i
++) {
176 if (vid_mapping_table
->entries
[i
].vid_7bit
== 0) {
177 for (j
= i
+ 1; j
< SUMO_MAX_NUMBER_VOLTAGES
; j
++) {
178 if (vid_mapping_table
->entries
[j
].vid_7bit
!= 0) {
179 vid_mapping_table
->entries
[i
] =
180 vid_mapping_table
->entries
[j
];
181 vid_mapping_table
->entries
[j
].vid_7bit
= 0;
186 if (j
== SUMO_MAX_NUMBER_VOLTAGES
)
191 vid_mapping_table
->num_entries
= i
;
194 static const struct kv_lcac_config_values sx_local_cac_cfg_kv
[] =
207 static const struct kv_lcac_config_values mc0_local_cac_cfg_kv
[] =
213 static const struct kv_lcac_config_values mc1_local_cac_cfg_kv
[] =
219 static const struct kv_lcac_config_values mc2_local_cac_cfg_kv
[] =
225 static const struct kv_lcac_config_values mc3_local_cac_cfg_kv
[] =
231 static const struct kv_lcac_config_values cpl_local_cac_cfg_kv
[] =
263 static const struct kv_lcac_config_reg sx0_cac_config_reg
[] =
265 { 0xc0400d00, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
268 static const struct kv_lcac_config_reg mc0_cac_config_reg
[] =
270 { 0xc0400d30, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
273 static const struct kv_lcac_config_reg mc1_cac_config_reg
[] =
275 { 0xc0400d3c, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
278 static const struct kv_lcac_config_reg mc2_cac_config_reg
[] =
280 { 0xc0400d48, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
283 static const struct kv_lcac_config_reg mc3_cac_config_reg
[] =
285 { 0xc0400d54, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
288 static const struct kv_lcac_config_reg cpl_cac_config_reg
[] =
290 { 0xc0400d80, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
293 static const struct kv_pt_config_reg didt_config_kv
[] =
295 { 0x10, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND
},
296 { 0x10, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND
},
297 { 0x10, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND
},
298 { 0x10, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND
},
299 { 0x11, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND
},
300 { 0x11, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND
},
301 { 0x11, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND
},
302 { 0x11, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND
},
303 { 0x12, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND
},
304 { 0x12, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND
},
305 { 0x12, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND
},
306 { 0x12, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND
},
307 { 0x2, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND
},
308 { 0x2, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND
},
309 { 0x2, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND
},
310 { 0x1, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND
},
311 { 0x1, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND
},
312 { 0x0, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND
},
313 { 0x30, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND
},
314 { 0x30, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND
},
315 { 0x30, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND
},
316 { 0x30, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND
},
317 { 0x31, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND
},
318 { 0x31, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND
},
319 { 0x31, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND
},
320 { 0x31, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND
},
321 { 0x32, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND
},
322 { 0x32, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND
},
323 { 0x32, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND
},
324 { 0x32, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND
},
325 { 0x22, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND
},
326 { 0x22, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND
},
327 { 0x22, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND
},
328 { 0x21, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND
},
329 { 0x21, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND
},
330 { 0x20, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND
},
331 { 0x50, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND
},
332 { 0x50, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND
},
333 { 0x50, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND
},
334 { 0x50, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND
},
335 { 0x51, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND
},
336 { 0x51, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND
},
337 { 0x51, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND
},
338 { 0x51, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND
},
339 { 0x52, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND
},
340 { 0x52, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND
},
341 { 0x52, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND
},
342 { 0x52, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND
},
343 { 0x42, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND
},
344 { 0x42, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND
},
345 { 0x42, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND
},
346 { 0x41, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND
},
347 { 0x41, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND
},
348 { 0x40, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND
},
349 { 0x70, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND
},
350 { 0x70, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND
},
351 { 0x70, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND
},
352 { 0x70, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND
},
353 { 0x71, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND
},
354 { 0x71, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND
},
355 { 0x71, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND
},
356 { 0x71, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND
},
357 { 0x72, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND
},
358 { 0x72, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND
},
359 { 0x72, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND
},
360 { 0x72, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND
},
361 { 0x62, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND
},
362 { 0x62, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND
},
363 { 0x62, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND
},
364 { 0x61, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND
},
365 { 0x61, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND
},
366 { 0x60, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND
},
370 static struct kv_ps
*kv_get_ps(struct amdgpu_ps
*rps
)
372 struct kv_ps
*ps
= rps
->ps_priv
;
377 static struct kv_power_info
*kv_get_pi(struct amdgpu_device
*adev
)
379 struct kv_power_info
*pi
= adev
->pm
.dpm
.priv
;
385 static void kv_program_local_cac_table(struct amdgpu_device
*adev
,
386 const struct kv_lcac_config_values
*local_cac_table
,
387 const struct kv_lcac_config_reg
*local_cac_reg
)
390 const struct kv_lcac_config_values
*values
= local_cac_table
;
392 while (values
->block_id
!= 0xffffffff) {
393 count
= values
->signal_id
;
394 for (i
= 0; i
< count
; i
++) {
395 data
= ((values
->block_id
<< local_cac_reg
->block_shift
) &
396 local_cac_reg
->block_mask
);
397 data
|= ((i
<< local_cac_reg
->signal_shift
) &
398 local_cac_reg
->signal_mask
);
399 data
|= ((values
->t
<< local_cac_reg
->t_shift
) &
400 local_cac_reg
->t_mask
);
401 data
|= ((1 << local_cac_reg
->enable_shift
) &
402 local_cac_reg
->enable_mask
);
403 WREG32_SMC(local_cac_reg
->cntl
, data
);
410 static int kv_program_pt_config_registers(struct amdgpu_device
*adev
,
411 const struct kv_pt_config_reg
*cac_config_regs
)
413 const struct kv_pt_config_reg
*config_regs
= cac_config_regs
;
417 if (config_regs
== NULL
)
420 while (config_regs
->offset
!= 0xFFFFFFFF) {
421 if (config_regs
->type
== KV_CONFIGREG_CACHE
) {
422 cache
|= ((config_regs
->value
<< config_regs
->shift
) & config_regs
->mask
);
424 switch (config_regs
->type
) {
425 case KV_CONFIGREG_SMC_IND
:
426 data
= RREG32_SMC(config_regs
->offset
);
428 case KV_CONFIGREG_DIDT_IND
:
429 data
= RREG32_DIDT(config_regs
->offset
);
432 data
= RREG32(config_regs
->offset
);
436 data
&= ~config_regs
->mask
;
437 data
|= ((config_regs
->value
<< config_regs
->shift
) & config_regs
->mask
);
441 switch (config_regs
->type
) {
442 case KV_CONFIGREG_SMC_IND
:
443 WREG32_SMC(config_regs
->offset
, data
);
445 case KV_CONFIGREG_DIDT_IND
:
446 WREG32_DIDT(config_regs
->offset
, data
);
449 WREG32(config_regs
->offset
, data
);
459 static void kv_do_enable_didt(struct amdgpu_device
*adev
, bool enable
)
461 struct kv_power_info
*pi
= kv_get_pi(adev
);
464 if (pi
->caps_sq_ramping
) {
465 data
= RREG32_DIDT(ixDIDT_SQ_CTRL0
);
467 data
|= DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK
;
469 data
&= ~DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK
;
470 WREG32_DIDT(ixDIDT_SQ_CTRL0
, data
);
473 if (pi
->caps_db_ramping
) {
474 data
= RREG32_DIDT(ixDIDT_DB_CTRL0
);
476 data
|= DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK
;
478 data
&= ~DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK
;
479 WREG32_DIDT(ixDIDT_DB_CTRL0
, data
);
482 if (pi
->caps_td_ramping
) {
483 data
= RREG32_DIDT(ixDIDT_TD_CTRL0
);
485 data
|= DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK
;
487 data
&= ~DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK
;
488 WREG32_DIDT(ixDIDT_TD_CTRL0
, data
);
491 if (pi
->caps_tcp_ramping
) {
492 data
= RREG32_DIDT(ixDIDT_TCP_CTRL0
);
494 data
|= DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK
;
496 data
&= ~DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK
;
497 WREG32_DIDT(ixDIDT_TCP_CTRL0
, data
);
501 static int kv_enable_didt(struct amdgpu_device
*adev
, bool enable
)
503 struct kv_power_info
*pi
= kv_get_pi(adev
);
506 if (pi
->caps_sq_ramping
||
507 pi
->caps_db_ramping
||
508 pi
->caps_td_ramping
||
509 pi
->caps_tcp_ramping
) {
510 adev
->gfx
.rlc
.funcs
->enter_safe_mode(adev
);
513 ret
= kv_program_pt_config_registers(adev
, didt_config_kv
);
515 adev
->gfx
.rlc
.funcs
->exit_safe_mode(adev
);
520 kv_do_enable_didt(adev
, enable
);
522 adev
->gfx
.rlc
.funcs
->exit_safe_mode(adev
);
529 static void kv_initialize_hardware_cac_manager(struct amdgpu_device
*adev
)
531 struct kv_power_info
*pi
= kv_get_pi(adev
);
534 WREG32_SMC(ixLCAC_SX0_OVR_SEL
, 0);
535 WREG32_SMC(ixLCAC_SX0_OVR_VAL
, 0);
536 kv_program_local_cac_table(adev
, sx_local_cac_cfg_kv
, sx0_cac_config_reg
);
538 WREG32_SMC(ixLCAC_MC0_OVR_SEL
, 0);
539 WREG32_SMC(ixLCAC_MC0_OVR_VAL
, 0);
540 kv_program_local_cac_table(adev
, mc0_local_cac_cfg_kv
, mc0_cac_config_reg
);
542 WREG32_SMC(ixLCAC_MC1_OVR_SEL
, 0);
543 WREG32_SMC(ixLCAC_MC1_OVR_VAL
, 0);
544 kv_program_local_cac_table(adev
, mc1_local_cac_cfg_kv
, mc1_cac_config_reg
);
546 WREG32_SMC(ixLCAC_MC2_OVR_SEL
, 0);
547 WREG32_SMC(ixLCAC_MC2_OVR_VAL
, 0);
548 kv_program_local_cac_table(adev
, mc2_local_cac_cfg_kv
, mc2_cac_config_reg
);
550 WREG32_SMC(ixLCAC_MC3_OVR_SEL
, 0);
551 WREG32_SMC(ixLCAC_MC3_OVR_VAL
, 0);
552 kv_program_local_cac_table(adev
, mc3_local_cac_cfg_kv
, mc3_cac_config_reg
);
554 WREG32_SMC(ixLCAC_CPL_OVR_SEL
, 0);
555 WREG32_SMC(ixLCAC_CPL_OVR_VAL
, 0);
556 kv_program_local_cac_table(adev
, cpl_local_cac_cfg_kv
, cpl_cac_config_reg
);
561 static int kv_enable_smc_cac(struct amdgpu_device
*adev
, bool enable
)
563 struct kv_power_info
*pi
= kv_get_pi(adev
);
568 ret
= amdgpu_kv_notify_message_to_smu(adev
, PPSMC_MSG_EnableCac
);
570 pi
->cac_enabled
= false;
572 pi
->cac_enabled
= true;
573 } else if (pi
->cac_enabled
) {
574 amdgpu_kv_notify_message_to_smu(adev
, PPSMC_MSG_DisableCac
);
575 pi
->cac_enabled
= false;
582 static int kv_process_firmware_header(struct amdgpu_device
*adev
)
584 struct kv_power_info
*pi
= kv_get_pi(adev
);
588 ret
= amdgpu_kv_read_smc_sram_dword(adev
, SMU7_FIRMWARE_HEADER_LOCATION
+
589 offsetof(SMU7_Firmware_Header
, DpmTable
),
593 pi
->dpm_table_start
= tmp
;
595 ret
= amdgpu_kv_read_smc_sram_dword(adev
, SMU7_FIRMWARE_HEADER_LOCATION
+
596 offsetof(SMU7_Firmware_Header
, SoftRegisters
),
600 pi
->soft_regs_start
= tmp
;
605 static int kv_enable_dpm_voltage_scaling(struct amdgpu_device
*adev
)
607 struct kv_power_info
*pi
= kv_get_pi(adev
);
610 pi
->graphics_voltage_change_enable
= 1;
612 ret
= amdgpu_kv_copy_bytes_to_smc(adev
,
613 pi
->dpm_table_start
+
614 offsetof(SMU7_Fusion_DpmTable
, GraphicsVoltageChangeEnable
),
615 &pi
->graphics_voltage_change_enable
,
616 sizeof(u8
), pi
->sram_end
);
621 static int kv_set_dpm_interval(struct amdgpu_device
*adev
)
623 struct kv_power_info
*pi
= kv_get_pi(adev
);
626 pi
->graphics_interval
= 1;
628 ret
= amdgpu_kv_copy_bytes_to_smc(adev
,
629 pi
->dpm_table_start
+
630 offsetof(SMU7_Fusion_DpmTable
, GraphicsInterval
),
631 &pi
->graphics_interval
,
632 sizeof(u8
), pi
->sram_end
);
637 static int kv_set_dpm_boot_state(struct amdgpu_device
*adev
)
639 struct kv_power_info
*pi
= kv_get_pi(adev
);
642 ret
= amdgpu_kv_copy_bytes_to_smc(adev
,
643 pi
->dpm_table_start
+
644 offsetof(SMU7_Fusion_DpmTable
, GraphicsBootLevel
),
645 &pi
->graphics_boot_level
,
646 sizeof(u8
), pi
->sram_end
);
651 static void kv_program_vc(struct amdgpu_device
*adev
)
653 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_0
, 0x3FFFC100);
656 static void kv_clear_vc(struct amdgpu_device
*adev
)
658 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_0
, 0);
661 static int kv_set_divider_value(struct amdgpu_device
*adev
,
664 struct kv_power_info
*pi
= kv_get_pi(adev
);
665 struct atom_clock_dividers dividers
;
668 ret
= amdgpu_atombios_get_clock_dividers(adev
, COMPUTE_ENGINE_PLL_PARAM
,
669 sclk
, false, ÷rs
);
673 pi
->graphics_level
[index
].SclkDid
= (u8
)dividers
.post_div
;
674 pi
->graphics_level
[index
].SclkFrequency
= cpu_to_be32(sclk
);
679 static u16
kv_convert_8bit_index_to_voltage(struct amdgpu_device
*adev
,
682 return 6200 - (voltage
* 25);
685 static u16
kv_convert_2bit_index_to_voltage(struct amdgpu_device
*adev
,
688 struct kv_power_info
*pi
= kv_get_pi(adev
);
689 u32 vid_8bit
= kv_convert_vid2_to_vid7(adev
,
690 &pi
->sys_info
.vid_mapping_table
,
693 return kv_convert_8bit_index_to_voltage(adev
, (u16
)vid_8bit
);
697 static int kv_set_vid(struct amdgpu_device
*adev
, u32 index
, u32 vid
)
699 struct kv_power_info
*pi
= kv_get_pi(adev
);
701 pi
->graphics_level
[index
].VoltageDownH
= (u8
)pi
->voltage_drop_t
;
702 pi
->graphics_level
[index
].MinVddNb
=
703 cpu_to_be32(kv_convert_2bit_index_to_voltage(adev
, vid
));
708 static int kv_set_at(struct amdgpu_device
*adev
, u32 index
, u32 at
)
710 struct kv_power_info
*pi
= kv_get_pi(adev
);
712 pi
->graphics_level
[index
].AT
= cpu_to_be16((u16
)at
);
717 static void kv_dpm_power_level_enable(struct amdgpu_device
*adev
,
718 u32 index
, bool enable
)
720 struct kv_power_info
*pi
= kv_get_pi(adev
);
722 pi
->graphics_level
[index
].EnabledForActivity
= enable
? 1 : 0;
725 static void kv_start_dpm(struct amdgpu_device
*adev
)
727 u32 tmp
= RREG32_SMC(ixGENERAL_PWRMGT
);
729 tmp
|= GENERAL_PWRMGT__GLOBAL_PWRMGT_EN_MASK
;
730 WREG32_SMC(ixGENERAL_PWRMGT
, tmp
);
732 amdgpu_kv_smc_dpm_enable(adev
, true);
735 static void kv_stop_dpm(struct amdgpu_device
*adev
)
737 amdgpu_kv_smc_dpm_enable(adev
, false);
740 static void kv_start_am(struct amdgpu_device
*adev
)
742 u32 sclk_pwrmgt_cntl
= RREG32_SMC(ixSCLK_PWRMGT_CNTL
);
744 sclk_pwrmgt_cntl
&= ~(SCLK_PWRMGT_CNTL__RESET_SCLK_CNT_MASK
|
745 SCLK_PWRMGT_CNTL__RESET_BUSY_CNT_MASK
);
746 sclk_pwrmgt_cntl
|= SCLK_PWRMGT_CNTL__DYNAMIC_PM_EN_MASK
;
748 WREG32_SMC(ixSCLK_PWRMGT_CNTL
, sclk_pwrmgt_cntl
);
751 static void kv_reset_am(struct amdgpu_device
*adev
)
753 u32 sclk_pwrmgt_cntl
= RREG32_SMC(ixSCLK_PWRMGT_CNTL
);
755 sclk_pwrmgt_cntl
|= (SCLK_PWRMGT_CNTL__RESET_SCLK_CNT_MASK
|
756 SCLK_PWRMGT_CNTL__RESET_BUSY_CNT_MASK
);
758 WREG32_SMC(ixSCLK_PWRMGT_CNTL
, sclk_pwrmgt_cntl
);
761 static int kv_freeze_sclk_dpm(struct amdgpu_device
*adev
, bool freeze
)
763 return amdgpu_kv_notify_message_to_smu(adev
, freeze
?
764 PPSMC_MSG_SCLKDPM_FreezeLevel
: PPSMC_MSG_SCLKDPM_UnfreezeLevel
);
767 static int kv_force_lowest_valid(struct amdgpu_device
*adev
)
769 return kv_force_dpm_lowest(adev
);
772 static int kv_unforce_levels(struct amdgpu_device
*adev
)
774 if (adev
->asic_type
== CHIP_KABINI
|| adev
->asic_type
== CHIP_MULLINS
)
775 return amdgpu_kv_notify_message_to_smu(adev
, PPSMC_MSG_NoForcedLevel
);
777 return kv_set_enabled_levels(adev
);
780 static int kv_update_sclk_t(struct amdgpu_device
*adev
)
782 struct kv_power_info
*pi
= kv_get_pi(adev
);
783 u32 low_sclk_interrupt_t
= 0;
786 if (pi
->caps_sclk_throttle_low_notification
) {
787 low_sclk_interrupt_t
= cpu_to_be32(pi
->low_sclk_interrupt_t
);
789 ret
= amdgpu_kv_copy_bytes_to_smc(adev
,
790 pi
->dpm_table_start
+
791 offsetof(SMU7_Fusion_DpmTable
, LowSclkInterruptT
),
792 (u8
*)&low_sclk_interrupt_t
,
793 sizeof(u32
), pi
->sram_end
);
798 static int kv_program_bootup_state(struct amdgpu_device
*adev
)
800 struct kv_power_info
*pi
= kv_get_pi(adev
);
802 struct amdgpu_clock_voltage_dependency_table
*table
=
803 &adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_sclk
;
805 if (table
&& table
->count
) {
806 for (i
= pi
->graphics_dpm_level_count
- 1; i
> 0; i
--) {
807 if (table
->entries
[i
].clk
== pi
->boot_pl
.sclk
)
811 pi
->graphics_boot_level
= (u8
)i
;
812 kv_dpm_power_level_enable(adev
, i
, true);
814 struct sumo_sclk_voltage_mapping_table
*table
=
815 &pi
->sys_info
.sclk_voltage_mapping_table
;
817 if (table
->num_max_dpm_entries
== 0)
820 for (i
= pi
->graphics_dpm_level_count
- 1; i
> 0; i
--) {
821 if (table
->entries
[i
].sclk_frequency
== pi
->boot_pl
.sclk
)
825 pi
->graphics_boot_level
= (u8
)i
;
826 kv_dpm_power_level_enable(adev
, i
, true);
831 static int kv_enable_auto_thermal_throttling(struct amdgpu_device
*adev
)
833 struct kv_power_info
*pi
= kv_get_pi(adev
);
836 pi
->graphics_therm_throttle_enable
= 1;
838 ret
= amdgpu_kv_copy_bytes_to_smc(adev
,
839 pi
->dpm_table_start
+
840 offsetof(SMU7_Fusion_DpmTable
, GraphicsThermThrottleEnable
),
841 &pi
->graphics_therm_throttle_enable
,
842 sizeof(u8
), pi
->sram_end
);
847 static int kv_upload_dpm_settings(struct amdgpu_device
*adev
)
849 struct kv_power_info
*pi
= kv_get_pi(adev
);
852 ret
= amdgpu_kv_copy_bytes_to_smc(adev
,
853 pi
->dpm_table_start
+
854 offsetof(SMU7_Fusion_DpmTable
, GraphicsLevel
),
855 (u8
*)&pi
->graphics_level
,
856 sizeof(SMU7_Fusion_GraphicsLevel
) * SMU7_MAX_LEVELS_GRAPHICS
,
862 ret
= amdgpu_kv_copy_bytes_to_smc(adev
,
863 pi
->dpm_table_start
+
864 offsetof(SMU7_Fusion_DpmTable
, GraphicsDpmLevelCount
),
865 &pi
->graphics_dpm_level_count
,
866 sizeof(u8
), pi
->sram_end
);
871 static u32
kv_get_clock_difference(u32 a
, u32 b
)
873 return (a
>= b
) ? a
- b
: b
- a
;
876 static u32
kv_get_clk_bypass(struct amdgpu_device
*adev
, u32 clk
)
878 struct kv_power_info
*pi
= kv_get_pi(adev
);
881 if (pi
->caps_enable_dfs_bypass
) {
882 if (kv_get_clock_difference(clk
, 40000) < 200)
884 else if (kv_get_clock_difference(clk
, 30000) < 200)
886 else if (kv_get_clock_difference(clk
, 20000) < 200)
888 else if (kv_get_clock_difference(clk
, 15000) < 200)
890 else if (kv_get_clock_difference(clk
, 10000) < 200)
901 static int kv_populate_uvd_table(struct amdgpu_device
*adev
)
903 struct kv_power_info
*pi
= kv_get_pi(adev
);
904 struct amdgpu_uvd_clock_voltage_dependency_table
*table
=
905 &adev
->pm
.dpm
.dyn_state
.uvd_clock_voltage_dependency_table
;
906 struct atom_clock_dividers dividers
;
910 if (table
== NULL
|| table
->count
== 0)
913 pi
->uvd_level_count
= 0;
914 for (i
= 0; i
< table
->count
; i
++) {
915 if (pi
->high_voltage_t
&&
916 (pi
->high_voltage_t
< table
->entries
[i
].v
))
919 pi
->uvd_level
[i
].VclkFrequency
= cpu_to_be32(table
->entries
[i
].vclk
);
920 pi
->uvd_level
[i
].DclkFrequency
= cpu_to_be32(table
->entries
[i
].dclk
);
921 pi
->uvd_level
[i
].MinVddNb
= cpu_to_be16(table
->entries
[i
].v
);
923 pi
->uvd_level
[i
].VClkBypassCntl
=
924 (u8
)kv_get_clk_bypass(adev
, table
->entries
[i
].vclk
);
925 pi
->uvd_level
[i
].DClkBypassCntl
=
926 (u8
)kv_get_clk_bypass(adev
, table
->entries
[i
].dclk
);
928 ret
= amdgpu_atombios_get_clock_dividers(adev
, COMPUTE_ENGINE_PLL_PARAM
,
929 table
->entries
[i
].vclk
, false, ÷rs
);
932 pi
->uvd_level
[i
].VclkDivider
= (u8
)dividers
.post_div
;
934 ret
= amdgpu_atombios_get_clock_dividers(adev
, COMPUTE_ENGINE_PLL_PARAM
,
935 table
->entries
[i
].dclk
, false, ÷rs
);
938 pi
->uvd_level
[i
].DclkDivider
= (u8
)dividers
.post_div
;
940 pi
->uvd_level_count
++;
943 ret
= amdgpu_kv_copy_bytes_to_smc(adev
,
944 pi
->dpm_table_start
+
945 offsetof(SMU7_Fusion_DpmTable
, UvdLevelCount
),
946 (u8
*)&pi
->uvd_level_count
,
947 sizeof(u8
), pi
->sram_end
);
951 pi
->uvd_interval
= 1;
953 ret
= amdgpu_kv_copy_bytes_to_smc(adev
,
954 pi
->dpm_table_start
+
955 offsetof(SMU7_Fusion_DpmTable
, UVDInterval
),
957 sizeof(u8
), pi
->sram_end
);
961 ret
= amdgpu_kv_copy_bytes_to_smc(adev
,
962 pi
->dpm_table_start
+
963 offsetof(SMU7_Fusion_DpmTable
, UvdLevel
),
964 (u8
*)&pi
->uvd_level
,
965 sizeof(SMU7_Fusion_UvdLevel
) * SMU7_MAX_LEVELS_UVD
,
972 static int kv_populate_vce_table(struct amdgpu_device
*adev
)
974 struct kv_power_info
*pi
= kv_get_pi(adev
);
977 struct amdgpu_vce_clock_voltage_dependency_table
*table
=
978 &adev
->pm
.dpm
.dyn_state
.vce_clock_voltage_dependency_table
;
979 struct atom_clock_dividers dividers
;
981 if (table
== NULL
|| table
->count
== 0)
984 pi
->vce_level_count
= 0;
985 for (i
= 0; i
< table
->count
; i
++) {
986 if (pi
->high_voltage_t
&&
987 pi
->high_voltage_t
< table
->entries
[i
].v
)
990 pi
->vce_level
[i
].Frequency
= cpu_to_be32(table
->entries
[i
].evclk
);
991 pi
->vce_level
[i
].MinVoltage
= cpu_to_be16(table
->entries
[i
].v
);
993 pi
->vce_level
[i
].ClkBypassCntl
=
994 (u8
)kv_get_clk_bypass(adev
, table
->entries
[i
].evclk
);
996 ret
= amdgpu_atombios_get_clock_dividers(adev
, COMPUTE_ENGINE_PLL_PARAM
,
997 table
->entries
[i
].evclk
, false, ÷rs
);
1000 pi
->vce_level
[i
].Divider
= (u8
)dividers
.post_div
;
1002 pi
->vce_level_count
++;
1005 ret
= amdgpu_kv_copy_bytes_to_smc(adev
,
1006 pi
->dpm_table_start
+
1007 offsetof(SMU7_Fusion_DpmTable
, VceLevelCount
),
1008 (u8
*)&pi
->vce_level_count
,
1014 pi
->vce_interval
= 1;
1016 ret
= amdgpu_kv_copy_bytes_to_smc(adev
,
1017 pi
->dpm_table_start
+
1018 offsetof(SMU7_Fusion_DpmTable
, VCEInterval
),
1019 (u8
*)&pi
->vce_interval
,
1025 ret
= amdgpu_kv_copy_bytes_to_smc(adev
,
1026 pi
->dpm_table_start
+
1027 offsetof(SMU7_Fusion_DpmTable
, VceLevel
),
1028 (u8
*)&pi
->vce_level
,
1029 sizeof(SMU7_Fusion_ExtClkLevel
) * SMU7_MAX_LEVELS_VCE
,
1035 static int kv_populate_samu_table(struct amdgpu_device
*adev
)
1037 struct kv_power_info
*pi
= kv_get_pi(adev
);
1038 struct amdgpu_clock_voltage_dependency_table
*table
=
1039 &adev
->pm
.dpm
.dyn_state
.samu_clock_voltage_dependency_table
;
1040 struct atom_clock_dividers dividers
;
1044 if (table
== NULL
|| table
->count
== 0)
1047 pi
->samu_level_count
= 0;
1048 for (i
= 0; i
< table
->count
; i
++) {
1049 if (pi
->high_voltage_t
&&
1050 pi
->high_voltage_t
< table
->entries
[i
].v
)
1053 pi
->samu_level
[i
].Frequency
= cpu_to_be32(table
->entries
[i
].clk
);
1054 pi
->samu_level
[i
].MinVoltage
= cpu_to_be16(table
->entries
[i
].v
);
1056 pi
->samu_level
[i
].ClkBypassCntl
=
1057 (u8
)kv_get_clk_bypass(adev
, table
->entries
[i
].clk
);
1059 ret
= amdgpu_atombios_get_clock_dividers(adev
, COMPUTE_ENGINE_PLL_PARAM
,
1060 table
->entries
[i
].clk
, false, ÷rs
);
1063 pi
->samu_level
[i
].Divider
= (u8
)dividers
.post_div
;
1065 pi
->samu_level_count
++;
1068 ret
= amdgpu_kv_copy_bytes_to_smc(adev
,
1069 pi
->dpm_table_start
+
1070 offsetof(SMU7_Fusion_DpmTable
, SamuLevelCount
),
1071 (u8
*)&pi
->samu_level_count
,
1077 pi
->samu_interval
= 1;
1079 ret
= amdgpu_kv_copy_bytes_to_smc(adev
,
1080 pi
->dpm_table_start
+
1081 offsetof(SMU7_Fusion_DpmTable
, SAMUInterval
),
1082 (u8
*)&pi
->samu_interval
,
1088 ret
= amdgpu_kv_copy_bytes_to_smc(adev
,
1089 pi
->dpm_table_start
+
1090 offsetof(SMU7_Fusion_DpmTable
, SamuLevel
),
1091 (u8
*)&pi
->samu_level
,
1092 sizeof(SMU7_Fusion_ExtClkLevel
) * SMU7_MAX_LEVELS_SAMU
,
1101 static int kv_populate_acp_table(struct amdgpu_device
*adev
)
1103 struct kv_power_info
*pi
= kv_get_pi(adev
);
1104 struct amdgpu_clock_voltage_dependency_table
*table
=
1105 &adev
->pm
.dpm
.dyn_state
.acp_clock_voltage_dependency_table
;
1106 struct atom_clock_dividers dividers
;
1110 if (table
== NULL
|| table
->count
== 0)
1113 pi
->acp_level_count
= 0;
1114 for (i
= 0; i
< table
->count
; i
++) {
1115 pi
->acp_level
[i
].Frequency
= cpu_to_be32(table
->entries
[i
].clk
);
1116 pi
->acp_level
[i
].MinVoltage
= cpu_to_be16(table
->entries
[i
].v
);
1118 ret
= amdgpu_atombios_get_clock_dividers(adev
, COMPUTE_ENGINE_PLL_PARAM
,
1119 table
->entries
[i
].clk
, false, ÷rs
);
1122 pi
->acp_level
[i
].Divider
= (u8
)dividers
.post_div
;
1124 pi
->acp_level_count
++;
1127 ret
= amdgpu_kv_copy_bytes_to_smc(adev
,
1128 pi
->dpm_table_start
+
1129 offsetof(SMU7_Fusion_DpmTable
, AcpLevelCount
),
1130 (u8
*)&pi
->acp_level_count
,
1136 pi
->acp_interval
= 1;
1138 ret
= amdgpu_kv_copy_bytes_to_smc(adev
,
1139 pi
->dpm_table_start
+
1140 offsetof(SMU7_Fusion_DpmTable
, ACPInterval
),
1141 (u8
*)&pi
->acp_interval
,
1147 ret
= amdgpu_kv_copy_bytes_to_smc(adev
,
1148 pi
->dpm_table_start
+
1149 offsetof(SMU7_Fusion_DpmTable
, AcpLevel
),
1150 (u8
*)&pi
->acp_level
,
1151 sizeof(SMU7_Fusion_ExtClkLevel
) * SMU7_MAX_LEVELS_ACP
,
1159 static void kv_calculate_dfs_bypass_settings(struct amdgpu_device
*adev
)
1161 struct kv_power_info
*pi
= kv_get_pi(adev
);
1163 struct amdgpu_clock_voltage_dependency_table
*table
=
1164 &adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_sclk
;
1166 if (table
&& table
->count
) {
1167 for (i
= 0; i
< pi
->graphics_dpm_level_count
; i
++) {
1168 if (pi
->caps_enable_dfs_bypass
) {
1169 if (kv_get_clock_difference(table
->entries
[i
].clk
, 40000) < 200)
1170 pi
->graphics_level
[i
].ClkBypassCntl
= 3;
1171 else if (kv_get_clock_difference(table
->entries
[i
].clk
, 30000) < 200)
1172 pi
->graphics_level
[i
].ClkBypassCntl
= 2;
1173 else if (kv_get_clock_difference(table
->entries
[i
].clk
, 26600) < 200)
1174 pi
->graphics_level
[i
].ClkBypassCntl
= 7;
1175 else if (kv_get_clock_difference(table
->entries
[i
].clk
, 20000) < 200)
1176 pi
->graphics_level
[i
].ClkBypassCntl
= 6;
1177 else if (kv_get_clock_difference(table
->entries
[i
].clk
, 10000) < 200)
1178 pi
->graphics_level
[i
].ClkBypassCntl
= 8;
1180 pi
->graphics_level
[i
].ClkBypassCntl
= 0;
1182 pi
->graphics_level
[i
].ClkBypassCntl
= 0;
1186 struct sumo_sclk_voltage_mapping_table
*table
=
1187 &pi
->sys_info
.sclk_voltage_mapping_table
;
1188 for (i
= 0; i
< pi
->graphics_dpm_level_count
; i
++) {
1189 if (pi
->caps_enable_dfs_bypass
) {
1190 if (kv_get_clock_difference(table
->entries
[i
].sclk_frequency
, 40000) < 200)
1191 pi
->graphics_level
[i
].ClkBypassCntl
= 3;
1192 else if (kv_get_clock_difference(table
->entries
[i
].sclk_frequency
, 30000) < 200)
1193 pi
->graphics_level
[i
].ClkBypassCntl
= 2;
1194 else if (kv_get_clock_difference(table
->entries
[i
].sclk_frequency
, 26600) < 200)
1195 pi
->graphics_level
[i
].ClkBypassCntl
= 7;
1196 else if (kv_get_clock_difference(table
->entries
[i
].sclk_frequency
, 20000) < 200)
1197 pi
->graphics_level
[i
].ClkBypassCntl
= 6;
1198 else if (kv_get_clock_difference(table
->entries
[i
].sclk_frequency
, 10000) < 200)
1199 pi
->graphics_level
[i
].ClkBypassCntl
= 8;
1201 pi
->graphics_level
[i
].ClkBypassCntl
= 0;
1203 pi
->graphics_level
[i
].ClkBypassCntl
= 0;
1209 static int kv_enable_ulv(struct amdgpu_device
*adev
, bool enable
)
1211 return amdgpu_kv_notify_message_to_smu(adev
, enable
?
1212 PPSMC_MSG_EnableULV
: PPSMC_MSG_DisableULV
);
1215 static void kv_reset_acp_boot_level(struct amdgpu_device
*adev
)
1217 struct kv_power_info
*pi
= kv_get_pi(adev
);
1219 pi
->acp_boot_level
= 0xff;
1222 static void kv_update_current_ps(struct amdgpu_device
*adev
,
1223 struct amdgpu_ps
*rps
)
1225 struct kv_ps
*new_ps
= kv_get_ps(rps
);
1226 struct kv_power_info
*pi
= kv_get_pi(adev
);
1228 pi
->current_rps
= *rps
;
1229 pi
->current_ps
= *new_ps
;
1230 pi
->current_rps
.ps_priv
= &pi
->current_ps
;
1233 static void kv_update_requested_ps(struct amdgpu_device
*adev
,
1234 struct amdgpu_ps
*rps
)
1236 struct kv_ps
*new_ps
= kv_get_ps(rps
);
1237 struct kv_power_info
*pi
= kv_get_pi(adev
);
1239 pi
->requested_rps
= *rps
;
1240 pi
->requested_ps
= *new_ps
;
1241 pi
->requested_rps
.ps_priv
= &pi
->requested_ps
;
1244 static void kv_dpm_enable_bapm(struct amdgpu_device
*adev
, bool enable
)
1246 struct kv_power_info
*pi
= kv_get_pi(adev
);
1249 if (pi
->bapm_enable
) {
1250 ret
= amdgpu_kv_smc_bapm_enable(adev
, enable
);
1252 DRM_ERROR("amdgpu_kv_smc_bapm_enable failed\n");
1256 static int kv_dpm_enable(struct amdgpu_device
*adev
)
1258 struct kv_power_info
*pi
= kv_get_pi(adev
);
1261 ret
= kv_process_firmware_header(adev
);
1263 DRM_ERROR("kv_process_firmware_header failed\n");
1266 kv_init_fps_limits(adev
);
1267 kv_init_graphics_levels(adev
);
1268 ret
= kv_program_bootup_state(adev
);
1270 DRM_ERROR("kv_program_bootup_state failed\n");
1273 kv_calculate_dfs_bypass_settings(adev
);
1274 ret
= kv_upload_dpm_settings(adev
);
1276 DRM_ERROR("kv_upload_dpm_settings failed\n");
1279 ret
= kv_populate_uvd_table(adev
);
1281 DRM_ERROR("kv_populate_uvd_table failed\n");
1284 ret
= kv_populate_vce_table(adev
);
1286 DRM_ERROR("kv_populate_vce_table failed\n");
1289 ret
= kv_populate_samu_table(adev
);
1291 DRM_ERROR("kv_populate_samu_table failed\n");
1294 ret
= kv_populate_acp_table(adev
);
1296 DRM_ERROR("kv_populate_acp_table failed\n");
1299 kv_program_vc(adev
);
1301 kv_initialize_hardware_cac_manager(adev
);
1304 if (pi
->enable_auto_thermal_throttling
) {
1305 ret
= kv_enable_auto_thermal_throttling(adev
);
1307 DRM_ERROR("kv_enable_auto_thermal_throttling failed\n");
1311 ret
= kv_enable_dpm_voltage_scaling(adev
);
1313 DRM_ERROR("kv_enable_dpm_voltage_scaling failed\n");
1316 ret
= kv_set_dpm_interval(adev
);
1318 DRM_ERROR("kv_set_dpm_interval failed\n");
1321 ret
= kv_set_dpm_boot_state(adev
);
1323 DRM_ERROR("kv_set_dpm_boot_state failed\n");
1326 ret
= kv_enable_ulv(adev
, true);
1328 DRM_ERROR("kv_enable_ulv failed\n");
1332 ret
= kv_enable_didt(adev
, true);
1334 DRM_ERROR("kv_enable_didt failed\n");
1337 ret
= kv_enable_smc_cac(adev
, true);
1339 DRM_ERROR("kv_enable_smc_cac failed\n");
1343 kv_reset_acp_boot_level(adev
);
1345 ret
= amdgpu_kv_smc_bapm_enable(adev
, false);
1347 DRM_ERROR("amdgpu_kv_smc_bapm_enable failed\n");
1351 kv_update_current_ps(adev
, adev
->pm
.dpm
.boot_ps
);
1353 if (adev
->irq
.installed
&&
1354 amdgpu_is_internal_thermal_sensor(adev
->pm
.int_thermal_type
)) {
1355 ret
= kv_set_thermal_temperature_range(adev
, KV_TEMP_RANGE_MIN
, KV_TEMP_RANGE_MAX
);
1357 DRM_ERROR("kv_set_thermal_temperature_range failed\n");
1360 amdgpu_irq_get(adev
, &adev
->pm
.dpm
.thermal
.irq
,
1361 AMDGPU_THERMAL_IRQ_LOW_TO_HIGH
);
1362 amdgpu_irq_get(adev
, &adev
->pm
.dpm
.thermal
.irq
,
1363 AMDGPU_THERMAL_IRQ_HIGH_TO_LOW
);
1369 static void kv_dpm_disable(struct amdgpu_device
*adev
)
1371 amdgpu_irq_put(adev
, &adev
->pm
.dpm
.thermal
.irq
,
1372 AMDGPU_THERMAL_IRQ_LOW_TO_HIGH
);
1373 amdgpu_irq_put(adev
, &adev
->pm
.dpm
.thermal
.irq
,
1374 AMDGPU_THERMAL_IRQ_HIGH_TO_LOW
);
1376 amdgpu_kv_smc_bapm_enable(adev
, false);
1378 if (adev
->asic_type
== CHIP_MULLINS
)
1379 kv_enable_nb_dpm(adev
, false);
1381 /* powerup blocks */
1382 kv_dpm_powergate_acp(adev
, false);
1383 kv_dpm_powergate_samu(adev
, false);
1384 kv_dpm_powergate_vce(adev
, false);
1385 kv_dpm_powergate_uvd(adev
, false);
1387 kv_enable_smc_cac(adev
, false);
1388 kv_enable_didt(adev
, false);
1391 kv_enable_ulv(adev
, false);
1394 kv_update_current_ps(adev
, adev
->pm
.dpm
.boot_ps
);
1398 static int kv_write_smc_soft_register(struct amdgpu_device
*adev
,
1399 u16 reg_offset
, u32 value
)
1401 struct kv_power_info
*pi
= kv_get_pi(adev
);
1403 return amdgpu_kv_copy_bytes_to_smc(adev
, pi
->soft_regs_start
+ reg_offset
,
1404 (u8
*)&value
, sizeof(u16
), pi
->sram_end
);
1407 static int kv_read_smc_soft_register(struct amdgpu_device
*adev
,
1408 u16 reg_offset
, u32
*value
)
1410 struct kv_power_info
*pi
= kv_get_pi(adev
);
1412 return amdgpu_kv_read_smc_sram_dword(adev
, pi
->soft_regs_start
+ reg_offset
,
1413 value
, pi
->sram_end
);
1417 static void kv_init_sclk_t(struct amdgpu_device
*adev
)
1419 struct kv_power_info
*pi
= kv_get_pi(adev
);
1421 pi
->low_sclk_interrupt_t
= 0;
1424 static int kv_init_fps_limits(struct amdgpu_device
*adev
)
1426 struct kv_power_info
*pi
= kv_get_pi(adev
);
1433 pi
->fps_high_t
= cpu_to_be16(tmp
);
1434 ret
= amdgpu_kv_copy_bytes_to_smc(adev
,
1435 pi
->dpm_table_start
+
1436 offsetof(SMU7_Fusion_DpmTable
, FpsHighT
),
1437 (u8
*)&pi
->fps_high_t
,
1438 sizeof(u16
), pi
->sram_end
);
1441 pi
->fps_low_t
= cpu_to_be16(tmp
);
1443 ret
= amdgpu_kv_copy_bytes_to_smc(adev
,
1444 pi
->dpm_table_start
+
1445 offsetof(SMU7_Fusion_DpmTable
, FpsLowT
),
1446 (u8
*)&pi
->fps_low_t
,
1447 sizeof(u16
), pi
->sram_end
);
1453 static void kv_init_powergate_state(struct amdgpu_device
*adev
)
1455 struct kv_power_info
*pi
= kv_get_pi(adev
);
1457 pi
->uvd_power_gated
= false;
1458 pi
->vce_power_gated
= false;
1459 pi
->samu_power_gated
= false;
1460 pi
->acp_power_gated
= false;
1464 static int kv_enable_uvd_dpm(struct amdgpu_device
*adev
, bool enable
)
1466 return amdgpu_kv_notify_message_to_smu(adev
, enable
?
1467 PPSMC_MSG_UVDDPM_Enable
: PPSMC_MSG_UVDDPM_Disable
);
1470 static int kv_enable_vce_dpm(struct amdgpu_device
*adev
, bool enable
)
1472 return amdgpu_kv_notify_message_to_smu(adev
, enable
?
1473 PPSMC_MSG_VCEDPM_Enable
: PPSMC_MSG_VCEDPM_Disable
);
1476 static int kv_enable_samu_dpm(struct amdgpu_device
*adev
, bool enable
)
1478 return amdgpu_kv_notify_message_to_smu(adev
, enable
?
1479 PPSMC_MSG_SAMUDPM_Enable
: PPSMC_MSG_SAMUDPM_Disable
);
1482 static int kv_enable_acp_dpm(struct amdgpu_device
*adev
, bool enable
)
1484 return amdgpu_kv_notify_message_to_smu(adev
, enable
?
1485 PPSMC_MSG_ACPDPM_Enable
: PPSMC_MSG_ACPDPM_Disable
);
1488 static int kv_update_uvd_dpm(struct amdgpu_device
*adev
, bool gate
)
1490 struct kv_power_info
*pi
= kv_get_pi(adev
);
1491 struct amdgpu_uvd_clock_voltage_dependency_table
*table
=
1492 &adev
->pm
.dpm
.dyn_state
.uvd_clock_voltage_dependency_table
;
1498 pi
->uvd_boot_level
= table
->count
- 1;
1500 pi
->uvd_boot_level
= 0;
1502 if (!pi
->caps_uvd_dpm
|| pi
->caps_stable_p_state
) {
1503 mask
= 1 << pi
->uvd_boot_level
;
1508 ret
= amdgpu_kv_copy_bytes_to_smc(adev
,
1509 pi
->dpm_table_start
+
1510 offsetof(SMU7_Fusion_DpmTable
, UvdBootLevel
),
1511 (uint8_t *)&pi
->uvd_boot_level
,
1512 sizeof(u8
), pi
->sram_end
);
1516 amdgpu_kv_send_msg_to_smc_with_parameter(adev
,
1517 PPSMC_MSG_UVDDPM_SetEnabledMask
,
1521 return kv_enable_uvd_dpm(adev
, !gate
);
1524 static u8
kv_get_vce_boot_level(struct amdgpu_device
*adev
, u32 evclk
)
1527 struct amdgpu_vce_clock_voltage_dependency_table
*table
=
1528 &adev
->pm
.dpm
.dyn_state
.vce_clock_voltage_dependency_table
;
1530 for (i
= 0; i
< table
->count
; i
++) {
1531 if (table
->entries
[i
].evclk
>= evclk
)
1538 static int kv_update_vce_dpm(struct amdgpu_device
*adev
,
1539 struct amdgpu_ps
*amdgpu_new_state
,
1540 struct amdgpu_ps
*amdgpu_current_state
)
1542 struct kv_power_info
*pi
= kv_get_pi(adev
);
1543 struct amdgpu_vce_clock_voltage_dependency_table
*table
=
1544 &adev
->pm
.dpm
.dyn_state
.vce_clock_voltage_dependency_table
;
1547 if (amdgpu_new_state
->evclk
> 0 && amdgpu_current_state
->evclk
== 0) {
1548 kv_dpm_powergate_vce(adev
, false);
1549 /* turn the clocks on when encoding */
1550 ret
= amdgpu_set_clockgating_state(adev
, AMD_IP_BLOCK_TYPE_VCE
,
1551 AMD_CG_STATE_UNGATE
);
1554 if (pi
->caps_stable_p_state
)
1555 pi
->vce_boot_level
= table
->count
- 1;
1557 pi
->vce_boot_level
= kv_get_vce_boot_level(adev
, amdgpu_new_state
->evclk
);
1559 ret
= amdgpu_kv_copy_bytes_to_smc(adev
,
1560 pi
->dpm_table_start
+
1561 offsetof(SMU7_Fusion_DpmTable
, VceBootLevel
),
1562 (u8
*)&pi
->vce_boot_level
,
1568 if (pi
->caps_stable_p_state
)
1569 amdgpu_kv_send_msg_to_smc_with_parameter(adev
,
1570 PPSMC_MSG_VCEDPM_SetEnabledMask
,
1571 (1 << pi
->vce_boot_level
));
1573 kv_enable_vce_dpm(adev
, true);
1574 } else if (amdgpu_new_state
->evclk
== 0 && amdgpu_current_state
->evclk
> 0) {
1575 kv_enable_vce_dpm(adev
, false);
1576 /* turn the clocks off when not encoding */
1577 ret
= amdgpu_set_clockgating_state(adev
, AMD_IP_BLOCK_TYPE_VCE
,
1581 kv_dpm_powergate_vce(adev
, true);
1587 static int kv_update_samu_dpm(struct amdgpu_device
*adev
, bool gate
)
1589 struct kv_power_info
*pi
= kv_get_pi(adev
);
1590 struct amdgpu_clock_voltage_dependency_table
*table
=
1591 &adev
->pm
.dpm
.dyn_state
.samu_clock_voltage_dependency_table
;
1595 if (pi
->caps_stable_p_state
)
1596 pi
->samu_boot_level
= table
->count
- 1;
1598 pi
->samu_boot_level
= 0;
1600 ret
= amdgpu_kv_copy_bytes_to_smc(adev
,
1601 pi
->dpm_table_start
+
1602 offsetof(SMU7_Fusion_DpmTable
, SamuBootLevel
),
1603 (u8
*)&pi
->samu_boot_level
,
1609 if (pi
->caps_stable_p_state
)
1610 amdgpu_kv_send_msg_to_smc_with_parameter(adev
,
1611 PPSMC_MSG_SAMUDPM_SetEnabledMask
,
1612 (1 << pi
->samu_boot_level
));
1615 return kv_enable_samu_dpm(adev
, !gate
);
1618 static u8
kv_get_acp_boot_level(struct amdgpu_device
*adev
)
1621 struct amdgpu_clock_voltage_dependency_table
*table
=
1622 &adev
->pm
.dpm
.dyn_state
.acp_clock_voltage_dependency_table
;
1624 for (i
= 0; i
< table
->count
; i
++) {
1625 if (table
->entries
[i
].clk
>= 0) /* XXX */
1629 if (i
>= table
->count
)
1630 i
= table
->count
- 1;
1635 static void kv_update_acp_boot_level(struct amdgpu_device
*adev
)
1637 struct kv_power_info
*pi
= kv_get_pi(adev
);
1640 if (!pi
->caps_stable_p_state
) {
1641 acp_boot_level
= kv_get_acp_boot_level(adev
);
1642 if (acp_boot_level
!= pi
->acp_boot_level
) {
1643 pi
->acp_boot_level
= acp_boot_level
;
1644 amdgpu_kv_send_msg_to_smc_with_parameter(adev
,
1645 PPSMC_MSG_ACPDPM_SetEnabledMask
,
1646 (1 << pi
->acp_boot_level
));
1651 static int kv_update_acp_dpm(struct amdgpu_device
*adev
, bool gate
)
1653 struct kv_power_info
*pi
= kv_get_pi(adev
);
1654 struct amdgpu_clock_voltage_dependency_table
*table
=
1655 &adev
->pm
.dpm
.dyn_state
.acp_clock_voltage_dependency_table
;
1659 if (pi
->caps_stable_p_state
)
1660 pi
->acp_boot_level
= table
->count
- 1;
1662 pi
->acp_boot_level
= kv_get_acp_boot_level(adev
);
1664 ret
= amdgpu_kv_copy_bytes_to_smc(adev
,
1665 pi
->dpm_table_start
+
1666 offsetof(SMU7_Fusion_DpmTable
, AcpBootLevel
),
1667 (u8
*)&pi
->acp_boot_level
,
1673 if (pi
->caps_stable_p_state
)
1674 amdgpu_kv_send_msg_to_smc_with_parameter(adev
,
1675 PPSMC_MSG_ACPDPM_SetEnabledMask
,
1676 (1 << pi
->acp_boot_level
));
1679 return kv_enable_acp_dpm(adev
, !gate
);
1682 static void kv_dpm_powergate_uvd(struct amdgpu_device
*adev
, bool gate
)
1684 struct kv_power_info
*pi
= kv_get_pi(adev
);
1687 if (pi
->uvd_power_gated
== gate
)
1690 pi
->uvd_power_gated
= gate
;
1693 if (pi
->caps_uvd_pg
) {
1694 /* disable clockgating so we can properly shut down the block */
1695 ret
= amdgpu_set_clockgating_state(adev
, AMD_IP_BLOCK_TYPE_UVD
,
1696 AMD_CG_STATE_UNGATE
);
1697 /* shutdown the UVD block */
1698 ret
= amdgpu_set_powergating_state(adev
, AMD_IP_BLOCK_TYPE_UVD
,
1700 /* XXX: check for errors */
1702 kv_update_uvd_dpm(adev
, gate
);
1703 if (pi
->caps_uvd_pg
)
1704 /* power off the UVD block */
1705 amdgpu_kv_notify_message_to_smu(adev
, PPSMC_MSG_UVDPowerOFF
);
1707 if (pi
->caps_uvd_pg
) {
1708 /* power on the UVD block */
1709 amdgpu_kv_notify_message_to_smu(adev
, PPSMC_MSG_UVDPowerON
);
1710 /* re-init the UVD block */
1711 ret
= amdgpu_set_powergating_state(adev
, AMD_IP_BLOCK_TYPE_UVD
,
1712 AMD_PG_STATE_UNGATE
);
1713 /* enable clockgating. hw will dynamically gate/ungate clocks on the fly */
1714 ret
= amdgpu_set_clockgating_state(adev
, AMD_IP_BLOCK_TYPE_UVD
,
1716 /* XXX: check for errors */
1718 kv_update_uvd_dpm(adev
, gate
);
1722 static void kv_dpm_powergate_vce(struct amdgpu_device
*adev
, bool gate
)
1724 struct kv_power_info
*pi
= kv_get_pi(adev
);
1727 if (pi
->vce_power_gated
== gate
)
1730 pi
->vce_power_gated
= gate
;
1733 if (pi
->caps_vce_pg
) {
1734 /* shutdown the VCE block */
1735 ret
= amdgpu_set_powergating_state(adev
, AMD_IP_BLOCK_TYPE_VCE
,
1737 /* XXX: check for errors */
1738 /* power off the VCE block */
1739 amdgpu_kv_notify_message_to_smu(adev
, PPSMC_MSG_VCEPowerOFF
);
1742 if (pi
->caps_vce_pg
) {
1743 /* power on the VCE block */
1744 amdgpu_kv_notify_message_to_smu(adev
, PPSMC_MSG_VCEPowerON
);
1745 /* re-init the VCE block */
1746 ret
= amdgpu_set_powergating_state(adev
, AMD_IP_BLOCK_TYPE_VCE
,
1747 AMD_PG_STATE_UNGATE
);
1748 /* XXX: check for errors */
1753 static void kv_dpm_powergate_samu(struct amdgpu_device
*adev
, bool gate
)
1755 struct kv_power_info
*pi
= kv_get_pi(adev
);
1757 if (pi
->samu_power_gated
== gate
)
1760 pi
->samu_power_gated
= gate
;
1763 kv_update_samu_dpm(adev
, true);
1764 if (pi
->caps_samu_pg
)
1765 amdgpu_kv_notify_message_to_smu(adev
, PPSMC_MSG_SAMPowerOFF
);
1767 if (pi
->caps_samu_pg
)
1768 amdgpu_kv_notify_message_to_smu(adev
, PPSMC_MSG_SAMPowerON
);
1769 kv_update_samu_dpm(adev
, false);
1773 static void kv_dpm_powergate_acp(struct amdgpu_device
*adev
, bool gate
)
1775 struct kv_power_info
*pi
= kv_get_pi(adev
);
1777 if (pi
->acp_power_gated
== gate
)
1780 if (adev
->asic_type
== CHIP_KABINI
|| adev
->asic_type
== CHIP_MULLINS
)
1783 pi
->acp_power_gated
= gate
;
1786 kv_update_acp_dpm(adev
, true);
1787 if (pi
->caps_acp_pg
)
1788 amdgpu_kv_notify_message_to_smu(adev
, PPSMC_MSG_ACPPowerOFF
);
1790 if (pi
->caps_acp_pg
)
1791 amdgpu_kv_notify_message_to_smu(adev
, PPSMC_MSG_ACPPowerON
);
1792 kv_update_acp_dpm(adev
, false);
1796 static void kv_set_valid_clock_range(struct amdgpu_device
*adev
,
1797 struct amdgpu_ps
*new_rps
)
1799 struct kv_ps
*new_ps
= kv_get_ps(new_rps
);
1800 struct kv_power_info
*pi
= kv_get_pi(adev
);
1802 struct amdgpu_clock_voltage_dependency_table
*table
=
1803 &adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_sclk
;
1805 if (table
&& table
->count
) {
1806 for (i
= 0; i
< pi
->graphics_dpm_level_count
; i
++) {
1807 if ((table
->entries
[i
].clk
>= new_ps
->levels
[0].sclk
) ||
1808 (i
== (pi
->graphics_dpm_level_count
- 1))) {
1809 pi
->lowest_valid
= i
;
1814 for (i
= pi
->graphics_dpm_level_count
- 1; i
> 0; i
--) {
1815 if (table
->entries
[i
].clk
<= new_ps
->levels
[new_ps
->num_levels
- 1].sclk
)
1818 pi
->highest_valid
= i
;
1820 if (pi
->lowest_valid
> pi
->highest_valid
) {
1821 if ((new_ps
->levels
[0].sclk
- table
->entries
[pi
->highest_valid
].clk
) >
1822 (table
->entries
[pi
->lowest_valid
].clk
- new_ps
->levels
[new_ps
->num_levels
- 1].sclk
))
1823 pi
->highest_valid
= pi
->lowest_valid
;
1825 pi
->lowest_valid
= pi
->highest_valid
;
1828 struct sumo_sclk_voltage_mapping_table
*table
=
1829 &pi
->sys_info
.sclk_voltage_mapping_table
;
1831 for (i
= 0; i
< (int)pi
->graphics_dpm_level_count
; i
++) {
1832 if (table
->entries
[i
].sclk_frequency
>= new_ps
->levels
[0].sclk
||
1833 i
== (int)(pi
->graphics_dpm_level_count
- 1)) {
1834 pi
->lowest_valid
= i
;
1839 for (i
= pi
->graphics_dpm_level_count
- 1; i
> 0; i
--) {
1840 if (table
->entries
[i
].sclk_frequency
<=
1841 new_ps
->levels
[new_ps
->num_levels
- 1].sclk
)
1844 pi
->highest_valid
= i
;
1846 if (pi
->lowest_valid
> pi
->highest_valid
) {
1847 if ((new_ps
->levels
[0].sclk
-
1848 table
->entries
[pi
->highest_valid
].sclk_frequency
) >
1849 (table
->entries
[pi
->lowest_valid
].sclk_frequency
-
1850 new_ps
->levels
[new_ps
->num_levels
-1].sclk
))
1851 pi
->highest_valid
= pi
->lowest_valid
;
1853 pi
->lowest_valid
= pi
->highest_valid
;
1858 static int kv_update_dfs_bypass_settings(struct amdgpu_device
*adev
,
1859 struct amdgpu_ps
*new_rps
)
1861 struct kv_ps
*new_ps
= kv_get_ps(new_rps
);
1862 struct kv_power_info
*pi
= kv_get_pi(adev
);
1866 if (pi
->caps_enable_dfs_bypass
) {
1867 clk_bypass_cntl
= new_ps
->need_dfs_bypass
?
1868 pi
->graphics_level
[pi
->graphics_boot_level
].ClkBypassCntl
: 0;
1869 ret
= amdgpu_kv_copy_bytes_to_smc(adev
,
1870 (pi
->dpm_table_start
+
1871 offsetof(SMU7_Fusion_DpmTable
, GraphicsLevel
) +
1872 (pi
->graphics_boot_level
* sizeof(SMU7_Fusion_GraphicsLevel
)) +
1873 offsetof(SMU7_Fusion_GraphicsLevel
, ClkBypassCntl
)),
1875 sizeof(u8
), pi
->sram_end
);
1881 static int kv_enable_nb_dpm(struct amdgpu_device
*adev
,
1884 struct kv_power_info
*pi
= kv_get_pi(adev
);
1888 if (pi
->enable_nb_dpm
&& !pi
->nb_dpm_enabled
) {
1889 ret
= amdgpu_kv_notify_message_to_smu(adev
, PPSMC_MSG_NBDPM_Enable
);
1891 pi
->nb_dpm_enabled
= true;
1894 if (pi
->enable_nb_dpm
&& pi
->nb_dpm_enabled
) {
1895 ret
= amdgpu_kv_notify_message_to_smu(adev
, PPSMC_MSG_NBDPM_Disable
);
1897 pi
->nb_dpm_enabled
= false;
1904 static int kv_dpm_force_performance_level(struct amdgpu_device
*adev
,
1905 enum amdgpu_dpm_forced_level level
)
1909 if (level
== AMDGPU_DPM_FORCED_LEVEL_HIGH
) {
1910 ret
= kv_force_dpm_highest(adev
);
1913 } else if (level
== AMDGPU_DPM_FORCED_LEVEL_LOW
) {
1914 ret
= kv_force_dpm_lowest(adev
);
1917 } else if (level
== AMDGPU_DPM_FORCED_LEVEL_AUTO
) {
1918 ret
= kv_unforce_levels(adev
);
1923 adev
->pm
.dpm
.forced_level
= level
;
1928 static int kv_dpm_pre_set_power_state(struct amdgpu_device
*adev
)
1930 struct kv_power_info
*pi
= kv_get_pi(adev
);
1931 struct amdgpu_ps requested_ps
= *adev
->pm
.dpm
.requested_ps
;
1932 struct amdgpu_ps
*new_ps
= &requested_ps
;
1934 kv_update_requested_ps(adev
, new_ps
);
1936 kv_apply_state_adjust_rules(adev
,
1943 static int kv_dpm_set_power_state(struct amdgpu_device
*adev
)
1945 struct kv_power_info
*pi
= kv_get_pi(adev
);
1946 struct amdgpu_ps
*new_ps
= &pi
->requested_rps
;
1947 struct amdgpu_ps
*old_ps
= &pi
->current_rps
;
1950 if (pi
->bapm_enable
) {
1951 ret
= amdgpu_kv_smc_bapm_enable(adev
, adev
->pm
.dpm
.ac_power
);
1953 DRM_ERROR("amdgpu_kv_smc_bapm_enable failed\n");
1958 if (adev
->asic_type
== CHIP_KABINI
|| adev
->asic_type
== CHIP_MULLINS
) {
1959 if (pi
->enable_dpm
) {
1960 kv_set_valid_clock_range(adev
, new_ps
);
1961 kv_update_dfs_bypass_settings(adev
, new_ps
);
1962 ret
= kv_calculate_ds_divider(adev
);
1964 DRM_ERROR("kv_calculate_ds_divider failed\n");
1967 kv_calculate_nbps_level_settings(adev
);
1968 kv_calculate_dpm_settings(adev
);
1969 kv_force_lowest_valid(adev
);
1970 kv_enable_new_levels(adev
);
1971 kv_upload_dpm_settings(adev
);
1972 kv_program_nbps_index_settings(adev
, new_ps
);
1973 kv_unforce_levels(adev
);
1974 kv_set_enabled_levels(adev
);
1975 kv_force_lowest_valid(adev
);
1976 kv_unforce_levels(adev
);
1978 ret
= kv_update_vce_dpm(adev
, new_ps
, old_ps
);
1980 DRM_ERROR("kv_update_vce_dpm failed\n");
1983 kv_update_sclk_t(adev
);
1984 if (adev
->asic_type
== CHIP_MULLINS
)
1985 kv_enable_nb_dpm(adev
, true);
1988 if (pi
->enable_dpm
) {
1989 kv_set_valid_clock_range(adev
, new_ps
);
1990 kv_update_dfs_bypass_settings(adev
, new_ps
);
1991 ret
= kv_calculate_ds_divider(adev
);
1993 DRM_ERROR("kv_calculate_ds_divider failed\n");
1996 kv_calculate_nbps_level_settings(adev
);
1997 kv_calculate_dpm_settings(adev
);
1998 kv_freeze_sclk_dpm(adev
, true);
1999 kv_upload_dpm_settings(adev
);
2000 kv_program_nbps_index_settings(adev
, new_ps
);
2001 kv_freeze_sclk_dpm(adev
, false);
2002 kv_set_enabled_levels(adev
);
2003 ret
= kv_update_vce_dpm(adev
, new_ps
, old_ps
);
2005 DRM_ERROR("kv_update_vce_dpm failed\n");
2008 kv_update_acp_boot_level(adev
);
2009 kv_update_sclk_t(adev
);
2010 kv_enable_nb_dpm(adev
, true);
2017 static void kv_dpm_post_set_power_state(struct amdgpu_device
*adev
)
2019 struct kv_power_info
*pi
= kv_get_pi(adev
);
2020 struct amdgpu_ps
*new_ps
= &pi
->requested_rps
;
2022 kv_update_current_ps(adev
, new_ps
);
2025 static void kv_dpm_setup_asic(struct amdgpu_device
*adev
)
2027 sumo_take_smu_control(adev
, true);
2028 kv_init_powergate_state(adev
);
2029 kv_init_sclk_t(adev
);
2033 static void kv_dpm_reset_asic(struct amdgpu_device
*adev
)
2035 struct kv_power_info
*pi
= kv_get_pi(adev
);
2037 if (adev
->asic_type
== CHIP_KABINI
|| adev
->asic_type
== CHIP_MULLINS
) {
2038 kv_force_lowest_valid(adev
);
2039 kv_init_graphics_levels(adev
);
2040 kv_program_bootup_state(adev
);
2041 kv_upload_dpm_settings(adev
);
2042 kv_force_lowest_valid(adev
);
2043 kv_unforce_levels(adev
);
2045 kv_init_graphics_levels(adev
);
2046 kv_program_bootup_state(adev
);
2047 kv_freeze_sclk_dpm(adev
, true);
2048 kv_upload_dpm_settings(adev
);
2049 kv_freeze_sclk_dpm(adev
, false);
2050 kv_set_enabled_level(adev
, pi
->graphics_boot_level
);
2055 static void kv_construct_max_power_limits_table(struct amdgpu_device
*adev
,
2056 struct amdgpu_clock_and_voltage_limits
*table
)
2058 struct kv_power_info
*pi
= kv_get_pi(adev
);
2060 if (pi
->sys_info
.sclk_voltage_mapping_table
.num_max_dpm_entries
> 0) {
2061 int idx
= pi
->sys_info
.sclk_voltage_mapping_table
.num_max_dpm_entries
- 1;
2063 pi
->sys_info
.sclk_voltage_mapping_table
.entries
[idx
].sclk_frequency
;
2065 kv_convert_2bit_index_to_voltage(adev
,
2066 pi
->sys_info
.sclk_voltage_mapping_table
.entries
[idx
].vid_2bit
);
2069 table
->mclk
= pi
->sys_info
.nbp_memory_clock
[0];
2072 static void kv_patch_voltage_values(struct amdgpu_device
*adev
)
2075 struct amdgpu_uvd_clock_voltage_dependency_table
*uvd_table
=
2076 &adev
->pm
.dpm
.dyn_state
.uvd_clock_voltage_dependency_table
;
2077 struct amdgpu_vce_clock_voltage_dependency_table
*vce_table
=
2078 &adev
->pm
.dpm
.dyn_state
.vce_clock_voltage_dependency_table
;
2079 struct amdgpu_clock_voltage_dependency_table
*samu_table
=
2080 &adev
->pm
.dpm
.dyn_state
.samu_clock_voltage_dependency_table
;
2081 struct amdgpu_clock_voltage_dependency_table
*acp_table
=
2082 &adev
->pm
.dpm
.dyn_state
.acp_clock_voltage_dependency_table
;
2084 if (uvd_table
->count
) {
2085 for (i
= 0; i
< uvd_table
->count
; i
++)
2086 uvd_table
->entries
[i
].v
=
2087 kv_convert_8bit_index_to_voltage(adev
,
2088 uvd_table
->entries
[i
].v
);
2091 if (vce_table
->count
) {
2092 for (i
= 0; i
< vce_table
->count
; i
++)
2093 vce_table
->entries
[i
].v
=
2094 kv_convert_8bit_index_to_voltage(adev
,
2095 vce_table
->entries
[i
].v
);
2098 if (samu_table
->count
) {
2099 for (i
= 0; i
< samu_table
->count
; i
++)
2100 samu_table
->entries
[i
].v
=
2101 kv_convert_8bit_index_to_voltage(adev
,
2102 samu_table
->entries
[i
].v
);
2105 if (acp_table
->count
) {
2106 for (i
= 0; i
< acp_table
->count
; i
++)
2107 acp_table
->entries
[i
].v
=
2108 kv_convert_8bit_index_to_voltage(adev
,
2109 acp_table
->entries
[i
].v
);
2114 static void kv_construct_boot_state(struct amdgpu_device
*adev
)
2116 struct kv_power_info
*pi
= kv_get_pi(adev
);
2118 pi
->boot_pl
.sclk
= pi
->sys_info
.bootup_sclk
;
2119 pi
->boot_pl
.vddc_index
= pi
->sys_info
.bootup_nb_voltage_index
;
2120 pi
->boot_pl
.ds_divider_index
= 0;
2121 pi
->boot_pl
.ss_divider_index
= 0;
2122 pi
->boot_pl
.allow_gnb_slow
= 1;
2123 pi
->boot_pl
.force_nbp_state
= 0;
2124 pi
->boot_pl
.display_wm
= 0;
2125 pi
->boot_pl
.vce_wm
= 0;
2128 static int kv_force_dpm_highest(struct amdgpu_device
*adev
)
2133 ret
= amdgpu_kv_dpm_get_enable_mask(adev
, &enable_mask
);
2137 for (i
= SMU7_MAX_LEVELS_GRAPHICS
- 1; i
> 0; i
--) {
2138 if (enable_mask
& (1 << i
))
2142 if (adev
->asic_type
== CHIP_KABINI
|| adev
->asic_type
== CHIP_MULLINS
)
2143 return amdgpu_kv_send_msg_to_smc_with_parameter(adev
, PPSMC_MSG_DPM_ForceState
, i
);
2145 return kv_set_enabled_level(adev
, i
);
2148 static int kv_force_dpm_lowest(struct amdgpu_device
*adev
)
2153 ret
= amdgpu_kv_dpm_get_enable_mask(adev
, &enable_mask
);
2157 for (i
= 0; i
< SMU7_MAX_LEVELS_GRAPHICS
; i
++) {
2158 if (enable_mask
& (1 << i
))
2162 if (adev
->asic_type
== CHIP_KABINI
|| adev
->asic_type
== CHIP_MULLINS
)
2163 return amdgpu_kv_send_msg_to_smc_with_parameter(adev
, PPSMC_MSG_DPM_ForceState
, i
);
2165 return kv_set_enabled_level(adev
, i
);
2168 static u8
kv_get_sleep_divider_id_from_clock(struct amdgpu_device
*adev
,
2169 u32 sclk
, u32 min_sclk_in_sr
)
2171 struct kv_power_info
*pi
= kv_get_pi(adev
);
2174 u32 min
= max(min_sclk_in_sr
, (u32
)KV_MINIMUM_ENGINE_CLOCK
);
2179 if (!pi
->caps_sclk_ds
)
2182 for (i
= KV_MAX_DEEPSLEEP_DIVIDER_ID
; i
> 0; i
--) {
2191 static int kv_get_high_voltage_limit(struct amdgpu_device
*adev
, int *limit
)
2193 struct kv_power_info
*pi
= kv_get_pi(adev
);
2194 struct amdgpu_clock_voltage_dependency_table
*table
=
2195 &adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_sclk
;
2198 if (table
&& table
->count
) {
2199 for (i
= table
->count
- 1; i
>= 0; i
--) {
2200 if (pi
->high_voltage_t
&&
2201 (kv_convert_8bit_index_to_voltage(adev
, table
->entries
[i
].v
) <=
2202 pi
->high_voltage_t
)) {
2208 struct sumo_sclk_voltage_mapping_table
*table
=
2209 &pi
->sys_info
.sclk_voltage_mapping_table
;
2211 for (i
= table
->num_max_dpm_entries
- 1; i
>= 0; i
--) {
2212 if (pi
->high_voltage_t
&&
2213 (kv_convert_2bit_index_to_voltage(adev
, table
->entries
[i
].vid_2bit
) <=
2214 pi
->high_voltage_t
)) {
2225 static void kv_apply_state_adjust_rules(struct amdgpu_device
*adev
,
2226 struct amdgpu_ps
*new_rps
,
2227 struct amdgpu_ps
*old_rps
)
2229 struct kv_ps
*ps
= kv_get_ps(new_rps
);
2230 struct kv_power_info
*pi
= kv_get_pi(adev
);
2231 u32 min_sclk
= 10000; /* ??? */
2235 struct amdgpu_clock_voltage_dependency_table
*table
=
2236 &adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_sclk
;
2237 u32 stable_p_state_sclk
= 0;
2238 struct amdgpu_clock_and_voltage_limits
*max_limits
=
2239 &adev
->pm
.dpm
.dyn_state
.max_clock_voltage_on_ac
;
2241 if (new_rps
->vce_active
) {
2242 new_rps
->evclk
= adev
->pm
.dpm
.vce_states
[adev
->pm
.dpm
.vce_level
].evclk
;
2243 new_rps
->ecclk
= adev
->pm
.dpm
.vce_states
[adev
->pm
.dpm
.vce_level
].ecclk
;
2249 mclk
= max_limits
->mclk
;
2252 if (pi
->caps_stable_p_state
) {
2253 stable_p_state_sclk
= (max_limits
->sclk
* 75) / 100;
2255 for (i
= table
->count
- 1; i
>= 0; i
--) {
2256 if (stable_p_state_sclk
>= table
->entries
[i
].clk
) {
2257 stable_p_state_sclk
= table
->entries
[i
].clk
;
2263 stable_p_state_sclk
= table
->entries
[0].clk
;
2265 sclk
= stable_p_state_sclk
;
2268 if (new_rps
->vce_active
) {
2269 if (sclk
< adev
->pm
.dpm
.vce_states
[adev
->pm
.dpm
.vce_level
].sclk
)
2270 sclk
= adev
->pm
.dpm
.vce_states
[adev
->pm
.dpm
.vce_level
].sclk
;
2273 ps
->need_dfs_bypass
= true;
2275 for (i
= 0; i
< ps
->num_levels
; i
++) {
2276 if (ps
->levels
[i
].sclk
< sclk
)
2277 ps
->levels
[i
].sclk
= sclk
;
2280 if (table
&& table
->count
) {
2281 for (i
= 0; i
< ps
->num_levels
; i
++) {
2282 if (pi
->high_voltage_t
&&
2283 (pi
->high_voltage_t
<
2284 kv_convert_8bit_index_to_voltage(adev
, ps
->levels
[i
].vddc_index
))) {
2285 kv_get_high_voltage_limit(adev
, &limit
);
2286 ps
->levels
[i
].sclk
= table
->entries
[limit
].clk
;
2290 struct sumo_sclk_voltage_mapping_table
*table
=
2291 &pi
->sys_info
.sclk_voltage_mapping_table
;
2293 for (i
= 0; i
< ps
->num_levels
; i
++) {
2294 if (pi
->high_voltage_t
&&
2295 (pi
->high_voltage_t
<
2296 kv_convert_8bit_index_to_voltage(adev
, ps
->levels
[i
].vddc_index
))) {
2297 kv_get_high_voltage_limit(adev
, &limit
);
2298 ps
->levels
[i
].sclk
= table
->entries
[limit
].sclk_frequency
;
2303 if (pi
->caps_stable_p_state
) {
2304 for (i
= 0; i
< ps
->num_levels
; i
++) {
2305 ps
->levels
[i
].sclk
= stable_p_state_sclk
;
2309 pi
->video_start
= new_rps
->dclk
|| new_rps
->vclk
||
2310 new_rps
->evclk
|| new_rps
->ecclk
;
2312 if ((new_rps
->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK
) ==
2313 ATOM_PPLIB_CLASSIFICATION_UI_BATTERY
)
2314 pi
->battery_state
= true;
2316 pi
->battery_state
= false;
2318 if (adev
->asic_type
== CHIP_KABINI
|| adev
->asic_type
== CHIP_MULLINS
) {
2319 ps
->dpm0_pg_nb_ps_lo
= 0x1;
2320 ps
->dpm0_pg_nb_ps_hi
= 0x0;
2321 ps
->dpmx_nb_ps_lo
= 0x1;
2322 ps
->dpmx_nb_ps_hi
= 0x0;
2324 ps
->dpm0_pg_nb_ps_lo
= 0x3;
2325 ps
->dpm0_pg_nb_ps_hi
= 0x0;
2326 ps
->dpmx_nb_ps_lo
= 0x3;
2327 ps
->dpmx_nb_ps_hi
= 0x0;
2329 if (pi
->sys_info
.nb_dpm_enable
) {
2330 force_high
= (mclk
>= pi
->sys_info
.nbp_memory_clock
[3]) ||
2331 pi
->video_start
|| (adev
->pm
.dpm
.new_active_crtc_count
>= 3) ||
2332 pi
->disable_nb_ps3_in_battery
;
2333 ps
->dpm0_pg_nb_ps_lo
= force_high
? 0x2 : 0x3;
2334 ps
->dpm0_pg_nb_ps_hi
= 0x2;
2335 ps
->dpmx_nb_ps_lo
= force_high
? 0x2 : 0x3;
2336 ps
->dpmx_nb_ps_hi
= 0x2;
2341 static void kv_dpm_power_level_enabled_for_throttle(struct amdgpu_device
*adev
,
2342 u32 index
, bool enable
)
2344 struct kv_power_info
*pi
= kv_get_pi(adev
);
2346 pi
->graphics_level
[index
].EnabledForThrottle
= enable
? 1 : 0;
2349 static int kv_calculate_ds_divider(struct amdgpu_device
*adev
)
2351 struct kv_power_info
*pi
= kv_get_pi(adev
);
2352 u32 sclk_in_sr
= 10000; /* ??? */
2355 if (pi
->lowest_valid
> pi
->highest_valid
)
2358 for (i
= pi
->lowest_valid
; i
<= pi
->highest_valid
; i
++) {
2359 pi
->graphics_level
[i
].DeepSleepDivId
=
2360 kv_get_sleep_divider_id_from_clock(adev
,
2361 be32_to_cpu(pi
->graphics_level
[i
].SclkFrequency
),
2367 static int kv_calculate_nbps_level_settings(struct amdgpu_device
*adev
)
2369 struct kv_power_info
*pi
= kv_get_pi(adev
);
2372 struct amdgpu_clock_and_voltage_limits
*max_limits
=
2373 &adev
->pm
.dpm
.dyn_state
.max_clock_voltage_on_ac
;
2374 u32 mclk
= max_limits
->mclk
;
2376 if (pi
->lowest_valid
> pi
->highest_valid
)
2379 if (adev
->asic_type
== CHIP_KABINI
|| adev
->asic_type
== CHIP_MULLINS
) {
2380 for (i
= pi
->lowest_valid
; i
<= pi
->highest_valid
; i
++) {
2381 pi
->graphics_level
[i
].GnbSlow
= 1;
2382 pi
->graphics_level
[i
].ForceNbPs1
= 0;
2383 pi
->graphics_level
[i
].UpH
= 0;
2386 if (!pi
->sys_info
.nb_dpm_enable
)
2389 force_high
= ((mclk
>= pi
->sys_info
.nbp_memory_clock
[3]) ||
2390 (adev
->pm
.dpm
.new_active_crtc_count
>= 3) || pi
->video_start
);
2393 for (i
= pi
->lowest_valid
; i
<= pi
->highest_valid
; i
++)
2394 pi
->graphics_level
[i
].GnbSlow
= 0;
2396 if (pi
->battery_state
)
2397 pi
->graphics_level
[0].ForceNbPs1
= 1;
2399 pi
->graphics_level
[1].GnbSlow
= 0;
2400 pi
->graphics_level
[2].GnbSlow
= 0;
2401 pi
->graphics_level
[3].GnbSlow
= 0;
2402 pi
->graphics_level
[4].GnbSlow
= 0;
2405 for (i
= pi
->lowest_valid
; i
<= pi
->highest_valid
; i
++) {
2406 pi
->graphics_level
[i
].GnbSlow
= 1;
2407 pi
->graphics_level
[i
].ForceNbPs1
= 0;
2408 pi
->graphics_level
[i
].UpH
= 0;
2411 if (pi
->sys_info
.nb_dpm_enable
&& pi
->battery_state
) {
2412 pi
->graphics_level
[pi
->lowest_valid
].UpH
= 0x28;
2413 pi
->graphics_level
[pi
->lowest_valid
].GnbSlow
= 0;
2414 if (pi
->lowest_valid
!= pi
->highest_valid
)
2415 pi
->graphics_level
[pi
->lowest_valid
].ForceNbPs1
= 1;
2421 static int kv_calculate_dpm_settings(struct amdgpu_device
*adev
)
2423 struct kv_power_info
*pi
= kv_get_pi(adev
);
2426 if (pi
->lowest_valid
> pi
->highest_valid
)
2429 for (i
= pi
->lowest_valid
; i
<= pi
->highest_valid
; i
++)
2430 pi
->graphics_level
[i
].DisplayWatermark
= (i
== pi
->highest_valid
) ? 1 : 0;
2435 static void kv_init_graphics_levels(struct amdgpu_device
*adev
)
2437 struct kv_power_info
*pi
= kv_get_pi(adev
);
2439 struct amdgpu_clock_voltage_dependency_table
*table
=
2440 &adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_sclk
;
2442 if (table
&& table
->count
) {
2445 pi
->graphics_dpm_level_count
= 0;
2446 for (i
= 0; i
< table
->count
; i
++) {
2447 if (pi
->high_voltage_t
&&
2448 (pi
->high_voltage_t
<
2449 kv_convert_8bit_index_to_voltage(adev
, table
->entries
[i
].v
)))
2452 kv_set_divider_value(adev
, i
, table
->entries
[i
].clk
);
2453 vid_2bit
= kv_convert_vid7_to_vid2(adev
,
2454 &pi
->sys_info
.vid_mapping_table
,
2455 table
->entries
[i
].v
);
2456 kv_set_vid(adev
, i
, vid_2bit
);
2457 kv_set_at(adev
, i
, pi
->at
[i
]);
2458 kv_dpm_power_level_enabled_for_throttle(adev
, i
, true);
2459 pi
->graphics_dpm_level_count
++;
2462 struct sumo_sclk_voltage_mapping_table
*table
=
2463 &pi
->sys_info
.sclk_voltage_mapping_table
;
2465 pi
->graphics_dpm_level_count
= 0;
2466 for (i
= 0; i
< table
->num_max_dpm_entries
; i
++) {
2467 if (pi
->high_voltage_t
&&
2468 pi
->high_voltage_t
<
2469 kv_convert_2bit_index_to_voltage(adev
, table
->entries
[i
].vid_2bit
))
2472 kv_set_divider_value(adev
, i
, table
->entries
[i
].sclk_frequency
);
2473 kv_set_vid(adev
, i
, table
->entries
[i
].vid_2bit
);
2474 kv_set_at(adev
, i
, pi
->at
[i
]);
2475 kv_dpm_power_level_enabled_for_throttle(adev
, i
, true);
2476 pi
->graphics_dpm_level_count
++;
2480 for (i
= 0; i
< SMU7_MAX_LEVELS_GRAPHICS
; i
++)
2481 kv_dpm_power_level_enable(adev
, i
, false);
2484 static void kv_enable_new_levels(struct amdgpu_device
*adev
)
2486 struct kv_power_info
*pi
= kv_get_pi(adev
);
2489 for (i
= 0; i
< SMU7_MAX_LEVELS_GRAPHICS
; i
++) {
2490 if (i
>= pi
->lowest_valid
&& i
<= pi
->highest_valid
)
2491 kv_dpm_power_level_enable(adev
, i
, true);
2495 static int kv_set_enabled_level(struct amdgpu_device
*adev
, u32 level
)
2497 u32 new_mask
= (1 << level
);
2499 return amdgpu_kv_send_msg_to_smc_with_parameter(adev
,
2500 PPSMC_MSG_SCLKDPM_SetEnabledMask
,
2504 static int kv_set_enabled_levels(struct amdgpu_device
*adev
)
2506 struct kv_power_info
*pi
= kv_get_pi(adev
);
2507 u32 i
, new_mask
= 0;
2509 for (i
= pi
->lowest_valid
; i
<= pi
->highest_valid
; i
++)
2510 new_mask
|= (1 << i
);
2512 return amdgpu_kv_send_msg_to_smc_with_parameter(adev
,
2513 PPSMC_MSG_SCLKDPM_SetEnabledMask
,
2517 static void kv_program_nbps_index_settings(struct amdgpu_device
*adev
,
2518 struct amdgpu_ps
*new_rps
)
2520 struct kv_ps
*new_ps
= kv_get_ps(new_rps
);
2521 struct kv_power_info
*pi
= kv_get_pi(adev
);
2524 if (adev
->asic_type
== CHIP_KABINI
|| adev
->asic_type
== CHIP_MULLINS
)
2527 if (pi
->sys_info
.nb_dpm_enable
) {
2528 nbdpmconfig1
= RREG32_SMC(ixNB_DPM_CONFIG_1
);
2529 nbdpmconfig1
&= ~(NB_DPM_CONFIG_1__Dpm0PgNbPsLo_MASK
|
2530 NB_DPM_CONFIG_1__Dpm0PgNbPsHi_MASK
|
2531 NB_DPM_CONFIG_1__DpmXNbPsLo_MASK
|
2532 NB_DPM_CONFIG_1__DpmXNbPsHi_MASK
);
2533 nbdpmconfig1
|= (new_ps
->dpm0_pg_nb_ps_lo
<< NB_DPM_CONFIG_1__Dpm0PgNbPsLo__SHIFT
) |
2534 (new_ps
->dpm0_pg_nb_ps_hi
<< NB_DPM_CONFIG_1__Dpm0PgNbPsHi__SHIFT
) |
2535 (new_ps
->dpmx_nb_ps_lo
<< NB_DPM_CONFIG_1__DpmXNbPsLo__SHIFT
) |
2536 (new_ps
->dpmx_nb_ps_hi
<< NB_DPM_CONFIG_1__DpmXNbPsHi__SHIFT
);
2537 WREG32_SMC(ixNB_DPM_CONFIG_1
, nbdpmconfig1
);
2541 static int kv_set_thermal_temperature_range(struct amdgpu_device
*adev
,
2542 int min_temp
, int max_temp
)
2544 int low_temp
= 0 * 1000;
2545 int high_temp
= 255 * 1000;
2548 if (low_temp
< min_temp
)
2549 low_temp
= min_temp
;
2550 if (high_temp
> max_temp
)
2551 high_temp
= max_temp
;
2552 if (high_temp
< low_temp
) {
2553 DRM_ERROR("invalid thermal range: %d - %d\n", low_temp
, high_temp
);
2557 tmp
= RREG32_SMC(ixCG_THERMAL_INT_CTRL
);
2558 tmp
&= ~(CG_THERMAL_INT_CTRL__DIG_THERM_INTH_MASK
|
2559 CG_THERMAL_INT_CTRL__DIG_THERM_INTL_MASK
);
2560 tmp
|= ((49 + (high_temp
/ 1000)) << CG_THERMAL_INT_CTRL__DIG_THERM_INTH__SHIFT
) |
2561 ((49 + (low_temp
/ 1000)) << CG_THERMAL_INT_CTRL__DIG_THERM_INTL__SHIFT
);
2562 WREG32_SMC(ixCG_THERMAL_INT_CTRL
, tmp
);
2564 adev
->pm
.dpm
.thermal
.min_temp
= low_temp
;
2565 adev
->pm
.dpm
.thermal
.max_temp
= high_temp
;
2571 struct _ATOM_INTEGRATED_SYSTEM_INFO info
;
2572 struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2
;
2573 struct _ATOM_INTEGRATED_SYSTEM_INFO_V5 info_5
;
2574 struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 info_6
;
2575 struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7 info_7
;
2576 struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_8 info_8
;
2579 static int kv_parse_sys_info_table(struct amdgpu_device
*adev
)
2581 struct kv_power_info
*pi
= kv_get_pi(adev
);
2582 struct amdgpu_mode_info
*mode_info
= &adev
->mode_info
;
2583 int index
= GetIndexIntoMasterTable(DATA
, IntegratedSystemInfo
);
2584 union igp_info
*igp_info
;
2589 if (amdgpu_atom_parse_data_header(mode_info
->atom_context
, index
, NULL
,
2590 &frev
, &crev
, &data_offset
)) {
2591 igp_info
= (union igp_info
*)(mode_info
->atom_context
->bios
+
2595 DRM_ERROR("Unsupported IGP table: %d %d\n", frev
, crev
);
2598 pi
->sys_info
.bootup_sclk
= le32_to_cpu(igp_info
->info_8
.ulBootUpEngineClock
);
2599 pi
->sys_info
.bootup_uma_clk
= le32_to_cpu(igp_info
->info_8
.ulBootUpUMAClock
);
2600 pi
->sys_info
.bootup_nb_voltage_index
=
2601 le16_to_cpu(igp_info
->info_8
.usBootUpNBVoltage
);
2602 if (igp_info
->info_8
.ucHtcTmpLmt
== 0)
2603 pi
->sys_info
.htc_tmp_lmt
= 203;
2605 pi
->sys_info
.htc_tmp_lmt
= igp_info
->info_8
.ucHtcTmpLmt
;
2606 if (igp_info
->info_8
.ucHtcHystLmt
== 0)
2607 pi
->sys_info
.htc_hyst_lmt
= 5;
2609 pi
->sys_info
.htc_hyst_lmt
= igp_info
->info_8
.ucHtcHystLmt
;
2610 if (pi
->sys_info
.htc_tmp_lmt
<= pi
->sys_info
.htc_hyst_lmt
) {
2611 DRM_ERROR("The htcTmpLmt should be larger than htcHystLmt.\n");
2614 if (le32_to_cpu(igp_info
->info_8
.ulSystemConfig
) & (1 << 3))
2615 pi
->sys_info
.nb_dpm_enable
= true;
2617 pi
->sys_info
.nb_dpm_enable
= false;
2619 for (i
= 0; i
< KV_NUM_NBPSTATES
; i
++) {
2620 pi
->sys_info
.nbp_memory_clock
[i
] =
2621 le32_to_cpu(igp_info
->info_8
.ulNbpStateMemclkFreq
[i
]);
2622 pi
->sys_info
.nbp_n_clock
[i
] =
2623 le32_to_cpu(igp_info
->info_8
.ulNbpStateNClkFreq
[i
]);
2625 if (le32_to_cpu(igp_info
->info_8
.ulGPUCapInfo
) &
2626 SYS_INFO_GPUCAPS__ENABEL_DFS_BYPASS
)
2627 pi
->caps_enable_dfs_bypass
= true;
2629 sumo_construct_sclk_voltage_mapping_table(adev
,
2630 &pi
->sys_info
.sclk_voltage_mapping_table
,
2631 igp_info
->info_8
.sAvail_SCLK
);
2633 sumo_construct_vid_mapping_table(adev
,
2634 &pi
->sys_info
.vid_mapping_table
,
2635 igp_info
->info_8
.sAvail_SCLK
);
2637 kv_construct_max_power_limits_table(adev
,
2638 &adev
->pm
.dpm
.dyn_state
.max_clock_voltage_on_ac
);
2644 struct _ATOM_POWERPLAY_INFO info
;
2645 struct _ATOM_POWERPLAY_INFO_V2 info_2
;
2646 struct _ATOM_POWERPLAY_INFO_V3 info_3
;
2647 struct _ATOM_PPLIB_POWERPLAYTABLE pplib
;
2648 struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2
;
2649 struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3
;
2652 union pplib_clock_info
{
2653 struct _ATOM_PPLIB_R600_CLOCK_INFO r600
;
2654 struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780
;
2655 struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen
;
2656 struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo
;
2659 union pplib_power_state
{
2660 struct _ATOM_PPLIB_STATE v1
;
2661 struct _ATOM_PPLIB_STATE_V2 v2
;
2664 static void kv_patch_boot_state(struct amdgpu_device
*adev
,
2667 struct kv_power_info
*pi
= kv_get_pi(adev
);
2670 ps
->levels
[0] = pi
->boot_pl
;
2673 static void kv_parse_pplib_non_clock_info(struct amdgpu_device
*adev
,
2674 struct amdgpu_ps
*rps
,
2675 struct _ATOM_PPLIB_NONCLOCK_INFO
*non_clock_info
,
2678 struct kv_ps
*ps
= kv_get_ps(rps
);
2680 rps
->caps
= le32_to_cpu(non_clock_info
->ulCapsAndSettings
);
2681 rps
->class = le16_to_cpu(non_clock_info
->usClassification
);
2682 rps
->class2
= le16_to_cpu(non_clock_info
->usClassification2
);
2684 if (ATOM_PPLIB_NONCLOCKINFO_VER1
< table_rev
) {
2685 rps
->vclk
= le32_to_cpu(non_clock_info
->ulVCLK
);
2686 rps
->dclk
= le32_to_cpu(non_clock_info
->ulDCLK
);
2692 if (rps
->class & ATOM_PPLIB_CLASSIFICATION_BOOT
) {
2693 adev
->pm
.dpm
.boot_ps
= rps
;
2694 kv_patch_boot_state(adev
, ps
);
2696 if (rps
->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE
)
2697 adev
->pm
.dpm
.uvd_ps
= rps
;
2700 static void kv_parse_pplib_clock_info(struct amdgpu_device
*adev
,
2701 struct amdgpu_ps
*rps
, int index
,
2702 union pplib_clock_info
*clock_info
)
2704 struct kv_power_info
*pi
= kv_get_pi(adev
);
2705 struct kv_ps
*ps
= kv_get_ps(rps
);
2706 struct kv_pl
*pl
= &ps
->levels
[index
];
2709 sclk
= le16_to_cpu(clock_info
->sumo
.usEngineClockLow
);
2710 sclk
|= clock_info
->sumo
.ucEngineClockHigh
<< 16;
2712 pl
->vddc_index
= clock_info
->sumo
.vddcIndex
;
2714 ps
->num_levels
= index
+ 1;
2716 if (pi
->caps_sclk_ds
) {
2717 pl
->ds_divider_index
= 5;
2718 pl
->ss_divider_index
= 5;
2722 static int kv_parse_power_table(struct amdgpu_device
*adev
)
2724 struct amdgpu_mode_info
*mode_info
= &adev
->mode_info
;
2725 struct _ATOM_PPLIB_NONCLOCK_INFO
*non_clock_info
;
2726 union pplib_power_state
*power_state
;
2727 int i
, j
, k
, non_clock_array_index
, clock_array_index
;
2728 union pplib_clock_info
*clock_info
;
2729 struct _StateArray
*state_array
;
2730 struct _ClockInfoArray
*clock_info_array
;
2731 struct _NonClockInfoArray
*non_clock_info_array
;
2732 union power_info
*power_info
;
2733 int index
= GetIndexIntoMasterTable(DATA
, PowerPlayInfo
);
2736 u8
*power_state_offset
;
2739 if (!amdgpu_atom_parse_data_header(mode_info
->atom_context
, index
, NULL
,
2740 &frev
, &crev
, &data_offset
))
2742 power_info
= (union power_info
*)(mode_info
->atom_context
->bios
+ data_offset
);
2744 amdgpu_add_thermal_controller(adev
);
2746 state_array
= (struct _StateArray
*)
2747 (mode_info
->atom_context
->bios
+ data_offset
+
2748 le16_to_cpu(power_info
->pplib
.usStateArrayOffset
));
2749 clock_info_array
= (struct _ClockInfoArray
*)
2750 (mode_info
->atom_context
->bios
+ data_offset
+
2751 le16_to_cpu(power_info
->pplib
.usClockInfoArrayOffset
));
2752 non_clock_info_array
= (struct _NonClockInfoArray
*)
2753 (mode_info
->atom_context
->bios
+ data_offset
+
2754 le16_to_cpu(power_info
->pplib
.usNonClockInfoArrayOffset
));
2756 adev
->pm
.dpm
.ps
= kzalloc(sizeof(struct amdgpu_ps
) *
2757 state_array
->ucNumEntries
, GFP_KERNEL
);
2758 if (!adev
->pm
.dpm
.ps
)
2760 power_state_offset
= (u8
*)state_array
->states
;
2761 for (i
= 0; i
< state_array
->ucNumEntries
; i
++) {
2763 power_state
= (union pplib_power_state
*)power_state_offset
;
2764 non_clock_array_index
= power_state
->v2
.nonClockInfoIndex
;
2765 non_clock_info
= (struct _ATOM_PPLIB_NONCLOCK_INFO
*)
2766 &non_clock_info_array
->nonClockInfo
[non_clock_array_index
];
2767 ps
= kzalloc(sizeof(struct kv_ps
), GFP_KERNEL
);
2769 kfree(adev
->pm
.dpm
.ps
);
2772 adev
->pm
.dpm
.ps
[i
].ps_priv
= ps
;
2774 idx
= (u8
*)&power_state
->v2
.clockInfoIndex
[0];
2775 for (j
= 0; j
< power_state
->v2
.ucNumDPMLevels
; j
++) {
2776 clock_array_index
= idx
[j
];
2777 if (clock_array_index
>= clock_info_array
->ucNumEntries
)
2779 if (k
>= SUMO_MAX_HARDWARE_POWERLEVELS
)
2781 clock_info
= (union pplib_clock_info
*)
2782 ((u8
*)&clock_info_array
->clockInfo
[0] +
2783 (clock_array_index
* clock_info_array
->ucEntrySize
));
2784 kv_parse_pplib_clock_info(adev
,
2785 &adev
->pm
.dpm
.ps
[i
], k
,
2789 kv_parse_pplib_non_clock_info(adev
, &adev
->pm
.dpm
.ps
[i
],
2791 non_clock_info_array
->ucEntrySize
);
2792 power_state_offset
+= 2 + power_state
->v2
.ucNumDPMLevels
;
2794 adev
->pm
.dpm
.num_ps
= state_array
->ucNumEntries
;
2796 /* fill in the vce power states */
2797 for (i
= 0; i
< AMDGPU_MAX_VCE_LEVELS
; i
++) {
2799 clock_array_index
= adev
->pm
.dpm
.vce_states
[i
].clk_idx
;
2800 clock_info
= (union pplib_clock_info
*)
2801 &clock_info_array
->clockInfo
[clock_array_index
* clock_info_array
->ucEntrySize
];
2802 sclk
= le16_to_cpu(clock_info
->sumo
.usEngineClockLow
);
2803 sclk
|= clock_info
->sumo
.ucEngineClockHigh
<< 16;
2804 adev
->pm
.dpm
.vce_states
[i
].sclk
= sclk
;
2805 adev
->pm
.dpm
.vce_states
[i
].mclk
= 0;
2811 static int kv_dpm_init(struct amdgpu_device
*adev
)
2813 struct kv_power_info
*pi
;
2816 pi
= kzalloc(sizeof(struct kv_power_info
), GFP_KERNEL
);
2819 adev
->pm
.dpm
.priv
= pi
;
2821 ret
= amdgpu_get_platform_caps(adev
);
2825 ret
= amdgpu_parse_extended_power_table(adev
);
2829 for (i
= 0; i
< SUMO_MAX_HARDWARE_POWERLEVELS
; i
++)
2830 pi
->at
[i
] = TRINITY_AT_DFLT
;
2832 pi
->sram_end
= SMC_RAM_END
;
2834 pi
->enable_nb_dpm
= true;
2836 pi
->caps_power_containment
= true;
2837 pi
->caps_cac
= true;
2838 pi
->enable_didt
= false;
2839 if (pi
->enable_didt
) {
2840 pi
->caps_sq_ramping
= true;
2841 pi
->caps_db_ramping
= true;
2842 pi
->caps_td_ramping
= true;
2843 pi
->caps_tcp_ramping
= true;
2846 pi
->caps_sclk_ds
= true;
2847 pi
->enable_auto_thermal_throttling
= true;
2848 pi
->disable_nb_ps3_in_battery
= false;
2849 if (amdgpu_bapm
== 0)
2850 pi
->bapm_enable
= false;
2852 pi
->bapm_enable
= true;
2853 pi
->voltage_drop_t
= 0;
2854 pi
->caps_sclk_throttle_low_notification
= false;
2855 pi
->caps_fps
= false; /* true? */
2856 pi
->caps_uvd_pg
= (adev
->pg_flags
& AMD_PG_SUPPORT_UVD
) ? true : false;
2857 pi
->caps_uvd_dpm
= true;
2858 pi
->caps_vce_pg
= (adev
->pg_flags
& AMD_PG_SUPPORT_VCE
) ? true : false;
2859 pi
->caps_samu_pg
= (adev
->pg_flags
& AMD_PG_SUPPORT_SAMU
) ? true : false;
2860 pi
->caps_acp_pg
= (adev
->pg_flags
& AMD_PG_SUPPORT_ACP
) ? true : false;
2861 pi
->caps_stable_p_state
= false;
2863 ret
= kv_parse_sys_info_table(adev
);
2867 kv_patch_voltage_values(adev
);
2868 kv_construct_boot_state(adev
);
2870 ret
= kv_parse_power_table(adev
);
2874 pi
->enable_dpm
= true;
2880 kv_dpm_debugfs_print_current_performance_level(struct amdgpu_device
*adev
,
2883 struct kv_power_info
*pi
= kv_get_pi(adev
);
2885 (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX
) &
2886 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX_MASK
) >>
2887 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX__SHIFT
;
2891 if (current_index
>= SMU__NUM_SCLK_DPM_STATE
) {
2892 seq_printf(m
, "invalid dpm profile %d\n", current_index
);
2894 sclk
= be32_to_cpu(pi
->graphics_level
[current_index
].SclkFrequency
);
2895 tmp
= (RREG32_SMC(ixSMU_VOLTAGE_STATUS
) &
2896 SMU_VOLTAGE_STATUS__SMU_VOLTAGE_CURRENT_LEVEL_MASK
) >>
2897 SMU_VOLTAGE_STATUS__SMU_VOLTAGE_CURRENT_LEVEL__SHIFT
;
2898 vddc
= kv_convert_8bit_index_to_voltage(adev
, (u16
)tmp
);
2899 seq_printf(m
, "uvd %sabled\n", pi
->uvd_power_gated
? "dis" : "en");
2900 seq_printf(m
, "vce %sabled\n", pi
->vce_power_gated
? "dis" : "en");
2901 seq_printf(m
, "power level %d sclk: %u vddc: %u\n",
2902 current_index
, sclk
, vddc
);
2907 kv_dpm_print_power_state(struct amdgpu_device
*adev
,
2908 struct amdgpu_ps
*rps
)
2911 struct kv_ps
*ps
= kv_get_ps(rps
);
2913 amdgpu_dpm_print_class_info(rps
->class, rps
->class2
);
2914 amdgpu_dpm_print_cap_info(rps
->caps
);
2915 printk("\tuvd vclk: %d dclk: %d\n", rps
->vclk
, rps
->dclk
);
2916 for (i
= 0; i
< ps
->num_levels
; i
++) {
2917 struct kv_pl
*pl
= &ps
->levels
[i
];
2918 printk("\t\tpower level %d sclk: %u vddc: %u\n",
2920 kv_convert_8bit_index_to_voltage(adev
, pl
->vddc_index
));
2922 amdgpu_dpm_print_ps_status(adev
, rps
);
2925 static void kv_dpm_fini(struct amdgpu_device
*adev
)
2929 for (i
= 0; i
< adev
->pm
.dpm
.num_ps
; i
++) {
2930 kfree(adev
->pm
.dpm
.ps
[i
].ps_priv
);
2932 kfree(adev
->pm
.dpm
.ps
);
2933 kfree(adev
->pm
.dpm
.priv
);
2934 amdgpu_free_extended_power_table(adev
);
2937 static void kv_dpm_display_configuration_changed(struct amdgpu_device
*adev
)
2942 static u32
kv_dpm_get_sclk(struct amdgpu_device
*adev
, bool low
)
2944 struct kv_power_info
*pi
= kv_get_pi(adev
);
2945 struct kv_ps
*requested_state
= kv_get_ps(&pi
->requested_rps
);
2948 return requested_state
->levels
[0].sclk
;
2950 return requested_state
->levels
[requested_state
->num_levels
- 1].sclk
;
2953 static u32
kv_dpm_get_mclk(struct amdgpu_device
*adev
, bool low
)
2955 struct kv_power_info
*pi
= kv_get_pi(adev
);
2957 return pi
->sys_info
.bootup_uma_clk
;
2960 /* get temperature in millidegrees */
2961 static int kv_dpm_get_temp(struct amdgpu_device
*adev
)
2964 int actual_temp
= 0;
2966 temp
= RREG32_SMC(0xC0300E0C);
2969 actual_temp
= (temp
/ 8) - 49;
2973 actual_temp
= actual_temp
* 1000;
2978 static int kv_dpm_early_init(void *handle
)
2980 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
2982 kv_dpm_set_dpm_funcs(adev
);
2983 kv_dpm_set_irq_funcs(adev
);
2988 static int kv_dpm_late_init(void *handle
)
2990 /* powerdown unused blocks for now */
2991 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
2997 /* init the sysfs and debugfs files late */
2998 ret
= amdgpu_pm_sysfs_init(adev
);
3002 kv_dpm_powergate_acp(adev
, true);
3003 kv_dpm_powergate_samu(adev
, true);
3004 kv_dpm_powergate_vce(adev
, true);
3005 kv_dpm_powergate_uvd(adev
, true);
3010 static int kv_dpm_sw_init(void *handle
)
3013 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
3015 ret
= amdgpu_irq_add_id(adev
, 230, &adev
->pm
.dpm
.thermal
.irq
);
3019 ret
= amdgpu_irq_add_id(adev
, 231, &adev
->pm
.dpm
.thermal
.irq
);
3023 /* default to balanced state */
3024 adev
->pm
.dpm
.state
= POWER_STATE_TYPE_BALANCED
;
3025 adev
->pm
.dpm
.user_state
= POWER_STATE_TYPE_BALANCED
;
3026 adev
->pm
.dpm
.forced_level
= AMDGPU_DPM_FORCED_LEVEL_AUTO
;
3027 adev
->pm
.default_sclk
= adev
->clock
.default_sclk
;
3028 adev
->pm
.default_mclk
= adev
->clock
.default_mclk
;
3029 adev
->pm
.current_sclk
= adev
->clock
.default_sclk
;
3030 adev
->pm
.current_mclk
= adev
->clock
.default_mclk
;
3031 adev
->pm
.int_thermal_type
= THERMAL_TYPE_NONE
;
3033 if (amdgpu_dpm
== 0)
3036 INIT_WORK(&adev
->pm
.dpm
.thermal
.work
, amdgpu_dpm_thermal_work_handler
);
3037 mutex_lock(&adev
->pm
.mutex
);
3038 ret
= kv_dpm_init(adev
);
3041 adev
->pm
.dpm
.current_ps
= adev
->pm
.dpm
.requested_ps
= adev
->pm
.dpm
.boot_ps
;
3042 if (amdgpu_dpm
== 1)
3043 amdgpu_pm_print_power_states(adev
);
3044 mutex_unlock(&adev
->pm
.mutex
);
3045 DRM_INFO("amdgpu: dpm initialized\n");
3051 mutex_unlock(&adev
->pm
.mutex
);
3052 DRM_ERROR("amdgpu: dpm initialization failed\n");
3056 static int kv_dpm_sw_fini(void *handle
)
3058 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
3060 mutex_lock(&adev
->pm
.mutex
);
3061 amdgpu_pm_sysfs_fini(adev
);
3063 mutex_unlock(&adev
->pm
.mutex
);
3068 static int kv_dpm_hw_init(void *handle
)
3071 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
3073 mutex_lock(&adev
->pm
.mutex
);
3074 kv_dpm_setup_asic(adev
);
3075 ret
= kv_dpm_enable(adev
);
3077 adev
->pm
.dpm_enabled
= false;
3079 adev
->pm
.dpm_enabled
= true;
3080 mutex_unlock(&adev
->pm
.mutex
);
3085 static int kv_dpm_hw_fini(void *handle
)
3087 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
3089 if (adev
->pm
.dpm_enabled
) {
3090 mutex_lock(&adev
->pm
.mutex
);
3091 kv_dpm_disable(adev
);
3092 mutex_unlock(&adev
->pm
.mutex
);
3098 static int kv_dpm_suspend(void *handle
)
3100 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
3102 if (adev
->pm
.dpm_enabled
) {
3103 mutex_lock(&adev
->pm
.mutex
);
3105 kv_dpm_disable(adev
);
3106 /* reset the power state */
3107 adev
->pm
.dpm
.current_ps
= adev
->pm
.dpm
.requested_ps
= adev
->pm
.dpm
.boot_ps
;
3108 mutex_unlock(&adev
->pm
.mutex
);
3113 static int kv_dpm_resume(void *handle
)
3116 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
3118 if (adev
->pm
.dpm_enabled
) {
3119 /* asic init will reset to the boot state */
3120 mutex_lock(&adev
->pm
.mutex
);
3121 kv_dpm_setup_asic(adev
);
3122 ret
= kv_dpm_enable(adev
);
3124 adev
->pm
.dpm_enabled
= false;
3126 adev
->pm
.dpm_enabled
= true;
3127 mutex_unlock(&adev
->pm
.mutex
);
3128 if (adev
->pm
.dpm_enabled
)
3129 amdgpu_pm_compute_clocks(adev
);
3134 static bool kv_dpm_is_idle(void *handle
)
3139 static int kv_dpm_wait_for_idle(void *handle
)
3145 static int kv_dpm_soft_reset(void *handle
)
3150 static int kv_dpm_set_interrupt_state(struct amdgpu_device
*adev
,
3151 struct amdgpu_irq_src
*src
,
3153 enum amdgpu_interrupt_state state
)
3158 case AMDGPU_THERMAL_IRQ_LOW_TO_HIGH
:
3160 case AMDGPU_IRQ_STATE_DISABLE
:
3161 cg_thermal_int
= RREG32_SMC(ixCG_THERMAL_INT_CTRL
);
3162 cg_thermal_int
&= ~CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK
;
3163 WREG32_SMC(ixCG_THERMAL_INT_CTRL
, cg_thermal_int
);
3165 case AMDGPU_IRQ_STATE_ENABLE
:
3166 cg_thermal_int
= RREG32_SMC(ixCG_THERMAL_INT_CTRL
);
3167 cg_thermal_int
|= CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK
;
3168 WREG32_SMC(ixCG_THERMAL_INT_CTRL
, cg_thermal_int
);
3175 case AMDGPU_THERMAL_IRQ_HIGH_TO_LOW
:
3177 case AMDGPU_IRQ_STATE_DISABLE
:
3178 cg_thermal_int
= RREG32_SMC(ixCG_THERMAL_INT_CTRL
);
3179 cg_thermal_int
&= ~CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK
;
3180 WREG32_SMC(ixCG_THERMAL_INT_CTRL
, cg_thermal_int
);
3182 case AMDGPU_IRQ_STATE_ENABLE
:
3183 cg_thermal_int
= RREG32_SMC(ixCG_THERMAL_INT_CTRL
);
3184 cg_thermal_int
|= CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK
;
3185 WREG32_SMC(ixCG_THERMAL_INT_CTRL
, cg_thermal_int
);
3198 static int kv_dpm_process_interrupt(struct amdgpu_device
*adev
,
3199 struct amdgpu_irq_src
*source
,
3200 struct amdgpu_iv_entry
*entry
)
3202 bool queue_thermal
= false;
3207 switch (entry
->src_id
) {
3208 case 230: /* thermal low to high */
3209 DRM_DEBUG("IH: thermal low to high\n");
3210 adev
->pm
.dpm
.thermal
.high_to_low
= false;
3211 queue_thermal
= true;
3213 case 231: /* thermal high to low */
3214 DRM_DEBUG("IH: thermal high to low\n");
3215 adev
->pm
.dpm
.thermal
.high_to_low
= true;
3216 queue_thermal
= true;
3223 schedule_work(&adev
->pm
.dpm
.thermal
.work
);
3228 static int kv_dpm_set_clockgating_state(void *handle
,
3229 enum amd_clockgating_state state
)
3234 static int kv_dpm_set_powergating_state(void *handle
,
3235 enum amd_powergating_state state
)
3240 const struct amd_ip_funcs kv_dpm_ip_funcs
= {
3242 .early_init
= kv_dpm_early_init
,
3243 .late_init
= kv_dpm_late_init
,
3244 .sw_init
= kv_dpm_sw_init
,
3245 .sw_fini
= kv_dpm_sw_fini
,
3246 .hw_init
= kv_dpm_hw_init
,
3247 .hw_fini
= kv_dpm_hw_fini
,
3248 .suspend
= kv_dpm_suspend
,
3249 .resume
= kv_dpm_resume
,
3250 .is_idle
= kv_dpm_is_idle
,
3251 .wait_for_idle
= kv_dpm_wait_for_idle
,
3252 .soft_reset
= kv_dpm_soft_reset
,
3253 .set_clockgating_state
= kv_dpm_set_clockgating_state
,
3254 .set_powergating_state
= kv_dpm_set_powergating_state
,
3257 static const struct amdgpu_dpm_funcs kv_dpm_funcs
= {
3258 .get_temperature
= &kv_dpm_get_temp
,
3259 .pre_set_power_state
= &kv_dpm_pre_set_power_state
,
3260 .set_power_state
= &kv_dpm_set_power_state
,
3261 .post_set_power_state
= &kv_dpm_post_set_power_state
,
3262 .display_configuration_changed
= &kv_dpm_display_configuration_changed
,
3263 .get_sclk
= &kv_dpm_get_sclk
,
3264 .get_mclk
= &kv_dpm_get_mclk
,
3265 .print_power_state
= &kv_dpm_print_power_state
,
3266 .debugfs_print_current_performance_level
= &kv_dpm_debugfs_print_current_performance_level
,
3267 .force_performance_level
= &kv_dpm_force_performance_level
,
3268 .powergate_uvd
= &kv_dpm_powergate_uvd
,
3269 .enable_bapm
= &kv_dpm_enable_bapm
,
3272 static void kv_dpm_set_dpm_funcs(struct amdgpu_device
*adev
)
3274 if (adev
->pm
.funcs
== NULL
)
3275 adev
->pm
.funcs
= &kv_dpm_funcs
;
3278 static const struct amdgpu_irq_src_funcs kv_dpm_irq_funcs
= {
3279 .set
= kv_dpm_set_interrupt_state
,
3280 .process
= kv_dpm_process_interrupt
,
3283 static void kv_dpm_set_irq_funcs(struct amdgpu_device
*adev
)
3285 adev
->pm
.dpm
.thermal
.irq
.num_types
= AMDGPU_THERMAL_IRQ_LAST
;
3286 adev
->pm
.dpm
.thermal
.irq
.funcs
= &kv_dpm_irq_funcs
;