2 * Copyright 2013 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
29 #include "radeon_asic.h"
30 #include <linux/seq_file.h>
32 #define KV_MAX_DEEPSLEEP_DIVIDER_ID 5
33 #define KV_MINIMUM_ENGINE_CLOCK 800
34 #define SMC_RAM_END 0x40000
36 static void kv_init_graphics_levels(struct radeon_device
*rdev
);
37 static int kv_calculate_ds_divider(struct radeon_device
*rdev
);
38 static int kv_calculate_nbps_level_settings(struct radeon_device
*rdev
);
39 static int kv_calculate_dpm_settings(struct radeon_device
*rdev
);
40 static void kv_enable_new_levels(struct radeon_device
*rdev
);
41 static void kv_program_nbps_index_settings(struct radeon_device
*rdev
,
42 struct radeon_ps
*new_rps
);
43 static int kv_set_enabled_level(struct radeon_device
*rdev
, u32 level
);
44 static int kv_set_enabled_levels(struct radeon_device
*rdev
);
45 static int kv_force_dpm_highest(struct radeon_device
*rdev
);
46 static int kv_force_dpm_lowest(struct radeon_device
*rdev
);
47 static void kv_apply_state_adjust_rules(struct radeon_device
*rdev
,
48 struct radeon_ps
*new_rps
,
49 struct radeon_ps
*old_rps
);
50 static int kv_set_thermal_temperature_range(struct radeon_device
*rdev
,
51 int min_temp
, int max_temp
);
52 static int kv_init_fps_limits(struct radeon_device
*rdev
);
54 void kv_dpm_powergate_uvd(struct radeon_device
*rdev
, bool gate
);
55 static void kv_dpm_powergate_vce(struct radeon_device
*rdev
, bool gate
);
56 static void kv_dpm_powergate_samu(struct radeon_device
*rdev
, bool gate
);
57 static void kv_dpm_powergate_acp(struct radeon_device
*rdev
, bool gate
);
59 extern void cik_enter_rlc_safe_mode(struct radeon_device
*rdev
);
60 extern void cik_exit_rlc_safe_mode(struct radeon_device
*rdev
);
61 extern void cik_update_cg(struct radeon_device
*rdev
,
62 u32 block
, bool enable
);
64 static const struct kv_lcac_config_values sx_local_cac_cfg_kv
[] =
77 static const struct kv_lcac_config_values mc0_local_cac_cfg_kv
[] =
83 static const struct kv_lcac_config_values mc1_local_cac_cfg_kv
[] =
89 static const struct kv_lcac_config_values mc2_local_cac_cfg_kv
[] =
95 static const struct kv_lcac_config_values mc3_local_cac_cfg_kv
[] =
101 static const struct kv_lcac_config_values cpl_local_cac_cfg_kv
[] =
133 static const struct kv_lcac_config_reg sx0_cac_config_reg
[] =
135 { 0xc0400d00, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
138 static const struct kv_lcac_config_reg mc0_cac_config_reg
[] =
140 { 0xc0400d30, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
143 static const struct kv_lcac_config_reg mc1_cac_config_reg
[] =
145 { 0xc0400d3c, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
148 static const struct kv_lcac_config_reg mc2_cac_config_reg
[] =
150 { 0xc0400d48, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
153 static const struct kv_lcac_config_reg mc3_cac_config_reg
[] =
155 { 0xc0400d54, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
158 static const struct kv_lcac_config_reg cpl_cac_config_reg
[] =
160 { 0xc0400d80, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
163 static const struct kv_pt_config_reg didt_config_kv
[] =
165 { 0x10, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND
},
166 { 0x10, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND
},
167 { 0x10, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND
},
168 { 0x10, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND
},
169 { 0x11, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND
},
170 { 0x11, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND
},
171 { 0x11, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND
},
172 { 0x11, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND
},
173 { 0x12, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND
},
174 { 0x12, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND
},
175 { 0x12, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND
},
176 { 0x12, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND
},
177 { 0x2, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND
},
178 { 0x2, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND
},
179 { 0x2, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND
},
180 { 0x1, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND
},
181 { 0x1, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND
},
182 { 0x0, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND
},
183 { 0x30, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND
},
184 { 0x30, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND
},
185 { 0x30, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND
},
186 { 0x30, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND
},
187 { 0x31, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND
},
188 { 0x31, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND
},
189 { 0x31, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND
},
190 { 0x31, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND
},
191 { 0x32, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND
},
192 { 0x32, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND
},
193 { 0x32, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND
},
194 { 0x32, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND
},
195 { 0x22, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND
},
196 { 0x22, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND
},
197 { 0x22, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND
},
198 { 0x21, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND
},
199 { 0x21, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND
},
200 { 0x20, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND
},
201 { 0x50, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND
},
202 { 0x50, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND
},
203 { 0x50, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND
},
204 { 0x50, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND
},
205 { 0x51, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND
},
206 { 0x51, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND
},
207 { 0x51, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND
},
208 { 0x51, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND
},
209 { 0x52, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND
},
210 { 0x52, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND
},
211 { 0x52, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND
},
212 { 0x52, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND
},
213 { 0x42, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND
},
214 { 0x42, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND
},
215 { 0x42, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND
},
216 { 0x41, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND
},
217 { 0x41, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND
},
218 { 0x40, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND
},
219 { 0x70, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND
},
220 { 0x70, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND
},
221 { 0x70, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND
},
222 { 0x70, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND
},
223 { 0x71, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND
},
224 { 0x71, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND
},
225 { 0x71, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND
},
226 { 0x71, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND
},
227 { 0x72, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND
},
228 { 0x72, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND
},
229 { 0x72, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND
},
230 { 0x72, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND
},
231 { 0x62, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND
},
232 { 0x62, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND
},
233 { 0x62, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND
},
234 { 0x61, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND
},
235 { 0x61, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND
},
236 { 0x60, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND
},
240 static struct kv_ps
*kv_get_ps(struct radeon_ps
*rps
)
242 struct kv_ps
*ps
= rps
->ps_priv
;
247 static struct kv_power_info
*kv_get_pi(struct radeon_device
*rdev
)
249 struct kv_power_info
*pi
= rdev
->pm
.dpm
.priv
;
255 static void kv_program_local_cac_table(struct radeon_device
*rdev
,
256 const struct kv_lcac_config_values
*local_cac_table
,
257 const struct kv_lcac_config_reg
*local_cac_reg
)
260 const struct kv_lcac_config_values
*values
= local_cac_table
;
262 while (values
->block_id
!= 0xffffffff) {
263 count
= values
->signal_id
;
264 for (i
= 0; i
< count
; i
++) {
265 data
= ((values
->block_id
<< local_cac_reg
->block_shift
) &
266 local_cac_reg
->block_mask
);
267 data
|= ((i
<< local_cac_reg
->signal_shift
) &
268 local_cac_reg
->signal_mask
);
269 data
|= ((values
->t
<< local_cac_reg
->t_shift
) &
270 local_cac_reg
->t_mask
);
271 data
|= ((1 << local_cac_reg
->enable_shift
) &
272 local_cac_reg
->enable_mask
);
273 WREG32_SMC(local_cac_reg
->cntl
, data
);
280 static int kv_program_pt_config_registers(struct radeon_device
*rdev
,
281 const struct kv_pt_config_reg
*cac_config_regs
)
283 const struct kv_pt_config_reg
*config_regs
= cac_config_regs
;
287 if (config_regs
== NULL
)
290 while (config_regs
->offset
!= 0xFFFFFFFF) {
291 if (config_regs
->type
== KV_CONFIGREG_CACHE
) {
292 cache
|= ((config_regs
->value
<< config_regs
->shift
) & config_regs
->mask
);
294 switch (config_regs
->type
) {
295 case KV_CONFIGREG_SMC_IND
:
296 data
= RREG32_SMC(config_regs
->offset
);
298 case KV_CONFIGREG_DIDT_IND
:
299 data
= RREG32_DIDT(config_regs
->offset
);
302 data
= RREG32(config_regs
->offset
<< 2);
306 data
&= ~config_regs
->mask
;
307 data
|= ((config_regs
->value
<< config_regs
->shift
) & config_regs
->mask
);
311 switch (config_regs
->type
) {
312 case KV_CONFIGREG_SMC_IND
:
313 WREG32_SMC(config_regs
->offset
, data
);
315 case KV_CONFIGREG_DIDT_IND
:
316 WREG32_DIDT(config_regs
->offset
, data
);
319 WREG32(config_regs
->offset
<< 2, data
);
329 static void kv_do_enable_didt(struct radeon_device
*rdev
, bool enable
)
331 struct kv_power_info
*pi
= kv_get_pi(rdev
);
334 if (pi
->caps_sq_ramping
) {
335 data
= RREG32_DIDT(DIDT_SQ_CTRL0
);
337 data
|= DIDT_CTRL_EN
;
339 data
&= ~DIDT_CTRL_EN
;
340 WREG32_DIDT(DIDT_SQ_CTRL0
, data
);
343 if (pi
->caps_db_ramping
) {
344 data
= RREG32_DIDT(DIDT_DB_CTRL0
);
346 data
|= DIDT_CTRL_EN
;
348 data
&= ~DIDT_CTRL_EN
;
349 WREG32_DIDT(DIDT_DB_CTRL0
, data
);
352 if (pi
->caps_td_ramping
) {
353 data
= RREG32_DIDT(DIDT_TD_CTRL0
);
355 data
|= DIDT_CTRL_EN
;
357 data
&= ~DIDT_CTRL_EN
;
358 WREG32_DIDT(DIDT_TD_CTRL0
, data
);
361 if (pi
->caps_tcp_ramping
) {
362 data
= RREG32_DIDT(DIDT_TCP_CTRL0
);
364 data
|= DIDT_CTRL_EN
;
366 data
&= ~DIDT_CTRL_EN
;
367 WREG32_DIDT(DIDT_TCP_CTRL0
, data
);
371 static int kv_enable_didt(struct radeon_device
*rdev
, bool enable
)
373 struct kv_power_info
*pi
= kv_get_pi(rdev
);
376 if (pi
->caps_sq_ramping
||
377 pi
->caps_db_ramping
||
378 pi
->caps_td_ramping
||
379 pi
->caps_tcp_ramping
) {
380 cik_enter_rlc_safe_mode(rdev
);
383 ret
= kv_program_pt_config_registers(rdev
, didt_config_kv
);
385 cik_exit_rlc_safe_mode(rdev
);
390 kv_do_enable_didt(rdev
, enable
);
392 cik_exit_rlc_safe_mode(rdev
);
399 static void kv_initialize_hardware_cac_manager(struct radeon_device
*rdev
)
401 struct kv_power_info
*pi
= kv_get_pi(rdev
);
404 WREG32_SMC(LCAC_SX0_OVR_SEL
, 0);
405 WREG32_SMC(LCAC_SX0_OVR_VAL
, 0);
406 kv_program_local_cac_table(rdev
, sx_local_cac_cfg_kv
, sx0_cac_config_reg
);
408 WREG32_SMC(LCAC_MC0_OVR_SEL
, 0);
409 WREG32_SMC(LCAC_MC0_OVR_VAL
, 0);
410 kv_program_local_cac_table(rdev
, mc0_local_cac_cfg_kv
, mc0_cac_config_reg
);
412 WREG32_SMC(LCAC_MC1_OVR_SEL
, 0);
413 WREG32_SMC(LCAC_MC1_OVR_VAL
, 0);
414 kv_program_local_cac_table(rdev
, mc1_local_cac_cfg_kv
, mc1_cac_config_reg
);
416 WREG32_SMC(LCAC_MC2_OVR_SEL
, 0);
417 WREG32_SMC(LCAC_MC2_OVR_VAL
, 0);
418 kv_program_local_cac_table(rdev
, mc2_local_cac_cfg_kv
, mc2_cac_config_reg
);
420 WREG32_SMC(LCAC_MC3_OVR_SEL
, 0);
421 WREG32_SMC(LCAC_MC3_OVR_VAL
, 0);
422 kv_program_local_cac_table(rdev
, mc3_local_cac_cfg_kv
, mc3_cac_config_reg
);
424 WREG32_SMC(LCAC_CPL_OVR_SEL
, 0);
425 WREG32_SMC(LCAC_CPL_OVR_VAL
, 0);
426 kv_program_local_cac_table(rdev
, cpl_local_cac_cfg_kv
, cpl_cac_config_reg
);
431 static int kv_enable_smc_cac(struct radeon_device
*rdev
, bool enable
)
433 struct kv_power_info
*pi
= kv_get_pi(rdev
);
438 ret
= kv_notify_message_to_smu(rdev
, PPSMC_MSG_EnableCac
);
440 pi
->cac_enabled
= false;
442 pi
->cac_enabled
= true;
443 } else if (pi
->cac_enabled
) {
444 kv_notify_message_to_smu(rdev
, PPSMC_MSG_DisableCac
);
445 pi
->cac_enabled
= false;
452 static int kv_process_firmware_header(struct radeon_device
*rdev
)
454 struct kv_power_info
*pi
= kv_get_pi(rdev
);
458 ret
= kv_read_smc_sram_dword(rdev
, SMU7_FIRMWARE_HEADER_LOCATION
+
459 offsetof(SMU7_Firmware_Header
, DpmTable
),
463 pi
->dpm_table_start
= tmp
;
465 ret
= kv_read_smc_sram_dword(rdev
, SMU7_FIRMWARE_HEADER_LOCATION
+
466 offsetof(SMU7_Firmware_Header
, SoftRegisters
),
470 pi
->soft_regs_start
= tmp
;
475 static int kv_enable_dpm_voltage_scaling(struct radeon_device
*rdev
)
477 struct kv_power_info
*pi
= kv_get_pi(rdev
);
480 pi
->graphics_voltage_change_enable
= 1;
482 ret
= kv_copy_bytes_to_smc(rdev
,
483 pi
->dpm_table_start
+
484 offsetof(SMU7_Fusion_DpmTable
, GraphicsVoltageChangeEnable
),
485 &pi
->graphics_voltage_change_enable
,
486 sizeof(u8
), pi
->sram_end
);
491 static int kv_set_dpm_interval(struct radeon_device
*rdev
)
493 struct kv_power_info
*pi
= kv_get_pi(rdev
);
496 pi
->graphics_interval
= 1;
498 ret
= kv_copy_bytes_to_smc(rdev
,
499 pi
->dpm_table_start
+
500 offsetof(SMU7_Fusion_DpmTable
, GraphicsInterval
),
501 &pi
->graphics_interval
,
502 sizeof(u8
), pi
->sram_end
);
507 static int kv_set_dpm_boot_state(struct radeon_device
*rdev
)
509 struct kv_power_info
*pi
= kv_get_pi(rdev
);
512 ret
= kv_copy_bytes_to_smc(rdev
,
513 pi
->dpm_table_start
+
514 offsetof(SMU7_Fusion_DpmTable
, GraphicsBootLevel
),
515 &pi
->graphics_boot_level
,
516 sizeof(u8
), pi
->sram_end
);
521 static void kv_program_vc(struct radeon_device
*rdev
)
523 WREG32_SMC(CG_FTV_0
, 0x3FFFC100);
526 static void kv_clear_vc(struct radeon_device
*rdev
)
528 WREG32_SMC(CG_FTV_0
, 0);
531 static int kv_set_divider_value(struct radeon_device
*rdev
,
534 struct kv_power_info
*pi
= kv_get_pi(rdev
);
535 struct atom_clock_dividers dividers
;
538 ret
= radeon_atom_get_clock_dividers(rdev
, COMPUTE_ENGINE_PLL_PARAM
,
539 sclk
, false, ÷rs
);
543 pi
->graphics_level
[index
].SclkDid
= (u8
)dividers
.post_div
;
544 pi
->graphics_level
[index
].SclkFrequency
= cpu_to_be32(sclk
);
549 static u32
kv_convert_vid2_to_vid7(struct radeon_device
*rdev
,
550 struct sumo_vid_mapping_table
*vid_mapping_table
,
553 struct radeon_clock_voltage_dependency_table
*vddc_sclk_table
=
554 &rdev
->pm
.dpm
.dyn_state
.vddc_dependency_on_sclk
;
557 if (vddc_sclk_table
&& vddc_sclk_table
->count
) {
558 if (vid_2bit
< vddc_sclk_table
->count
)
559 return vddc_sclk_table
->entries
[vid_2bit
].v
;
561 return vddc_sclk_table
->entries
[vddc_sclk_table
->count
- 1].v
;
563 for (i
= 0; i
< vid_mapping_table
->num_entries
; i
++) {
564 if (vid_mapping_table
->entries
[i
].vid_2bit
== vid_2bit
)
565 return vid_mapping_table
->entries
[i
].vid_7bit
;
567 return vid_mapping_table
->entries
[vid_mapping_table
->num_entries
- 1].vid_7bit
;
571 static u32
kv_convert_vid7_to_vid2(struct radeon_device
*rdev
,
572 struct sumo_vid_mapping_table
*vid_mapping_table
,
575 struct radeon_clock_voltage_dependency_table
*vddc_sclk_table
=
576 &rdev
->pm
.dpm
.dyn_state
.vddc_dependency_on_sclk
;
579 if (vddc_sclk_table
&& vddc_sclk_table
->count
) {
580 for (i
= 0; i
< vddc_sclk_table
->count
; i
++) {
581 if (vddc_sclk_table
->entries
[i
].v
== vid_7bit
)
584 return vddc_sclk_table
->count
- 1;
586 for (i
= 0; i
< vid_mapping_table
->num_entries
; i
++) {
587 if (vid_mapping_table
->entries
[i
].vid_7bit
== vid_7bit
)
588 return vid_mapping_table
->entries
[i
].vid_2bit
;
591 return vid_mapping_table
->entries
[vid_mapping_table
->num_entries
- 1].vid_2bit
;
595 static u16
kv_convert_8bit_index_to_voltage(struct radeon_device
*rdev
,
598 return 6200 - (voltage
* 25);
601 static u16
kv_convert_2bit_index_to_voltage(struct radeon_device
*rdev
,
604 struct kv_power_info
*pi
= kv_get_pi(rdev
);
605 u32 vid_8bit
= kv_convert_vid2_to_vid7(rdev
,
606 &pi
->sys_info
.vid_mapping_table
,
609 return kv_convert_8bit_index_to_voltage(rdev
, (u16
)vid_8bit
);
613 static int kv_set_vid(struct radeon_device
*rdev
, u32 index
, u32 vid
)
615 struct kv_power_info
*pi
= kv_get_pi(rdev
);
617 pi
->graphics_level
[index
].VoltageDownH
= (u8
)pi
->voltage_drop_t
;
618 pi
->graphics_level
[index
].MinVddNb
=
619 cpu_to_be32(kv_convert_2bit_index_to_voltage(rdev
, vid
));
624 static int kv_set_at(struct radeon_device
*rdev
, u32 index
, u32 at
)
626 struct kv_power_info
*pi
= kv_get_pi(rdev
);
628 pi
->graphics_level
[index
].AT
= cpu_to_be16((u16
)at
);
633 static void kv_dpm_power_level_enable(struct radeon_device
*rdev
,
634 u32 index
, bool enable
)
636 struct kv_power_info
*pi
= kv_get_pi(rdev
);
638 pi
->graphics_level
[index
].EnabledForActivity
= enable
? 1 : 0;
641 static void kv_start_dpm(struct radeon_device
*rdev
)
643 u32 tmp
= RREG32_SMC(GENERAL_PWRMGT
);
645 tmp
|= GLOBAL_PWRMGT_EN
;
646 WREG32_SMC(GENERAL_PWRMGT
, tmp
);
648 kv_smc_dpm_enable(rdev
, true);
651 static void kv_stop_dpm(struct radeon_device
*rdev
)
653 kv_smc_dpm_enable(rdev
, false);
656 static void kv_start_am(struct radeon_device
*rdev
)
658 u32 sclk_pwrmgt_cntl
= RREG32_SMC(SCLK_PWRMGT_CNTL
);
660 sclk_pwrmgt_cntl
&= ~(RESET_SCLK_CNT
| RESET_BUSY_CNT
);
661 sclk_pwrmgt_cntl
|= DYNAMIC_PM_EN
;
663 WREG32_SMC(SCLK_PWRMGT_CNTL
, sclk_pwrmgt_cntl
);
666 static void kv_reset_am(struct radeon_device
*rdev
)
668 u32 sclk_pwrmgt_cntl
= RREG32_SMC(SCLK_PWRMGT_CNTL
);
670 sclk_pwrmgt_cntl
|= (RESET_SCLK_CNT
| RESET_BUSY_CNT
);
672 WREG32_SMC(SCLK_PWRMGT_CNTL
, sclk_pwrmgt_cntl
);
675 static int kv_freeze_sclk_dpm(struct radeon_device
*rdev
, bool freeze
)
677 return kv_notify_message_to_smu(rdev
, freeze
?
678 PPSMC_MSG_SCLKDPM_FreezeLevel
: PPSMC_MSG_SCLKDPM_UnfreezeLevel
);
681 static int kv_force_lowest_valid(struct radeon_device
*rdev
)
683 return kv_force_dpm_lowest(rdev
);
686 static int kv_unforce_levels(struct radeon_device
*rdev
)
688 if (rdev
->family
== CHIP_KABINI
|| rdev
->family
== CHIP_MULLINS
)
689 return kv_notify_message_to_smu(rdev
, PPSMC_MSG_NoForcedLevel
);
691 return kv_set_enabled_levels(rdev
);
694 static int kv_update_sclk_t(struct radeon_device
*rdev
)
696 struct kv_power_info
*pi
= kv_get_pi(rdev
);
697 u32 low_sclk_interrupt_t
= 0;
700 if (pi
->caps_sclk_throttle_low_notification
) {
701 low_sclk_interrupt_t
= cpu_to_be32(pi
->low_sclk_interrupt_t
);
703 ret
= kv_copy_bytes_to_smc(rdev
,
704 pi
->dpm_table_start
+
705 offsetof(SMU7_Fusion_DpmTable
, LowSclkInterruptT
),
706 (u8
*)&low_sclk_interrupt_t
,
707 sizeof(u32
), pi
->sram_end
);
712 static int kv_program_bootup_state(struct radeon_device
*rdev
)
714 struct kv_power_info
*pi
= kv_get_pi(rdev
);
716 struct radeon_clock_voltage_dependency_table
*table
=
717 &rdev
->pm
.dpm
.dyn_state
.vddc_dependency_on_sclk
;
719 if (table
&& table
->count
) {
720 for (i
= pi
->graphics_dpm_level_count
- 1; i
> 0; i
--) {
721 if (table
->entries
[i
].clk
== pi
->boot_pl
.sclk
)
725 pi
->graphics_boot_level
= (u8
)i
;
726 kv_dpm_power_level_enable(rdev
, i
, true);
728 struct sumo_sclk_voltage_mapping_table
*table
=
729 &pi
->sys_info
.sclk_voltage_mapping_table
;
731 if (table
->num_max_dpm_entries
== 0)
734 for (i
= pi
->graphics_dpm_level_count
- 1; i
> 0; i
--) {
735 if (table
->entries
[i
].sclk_frequency
== pi
->boot_pl
.sclk
)
739 pi
->graphics_boot_level
= (u8
)i
;
740 kv_dpm_power_level_enable(rdev
, i
, true);
745 static int kv_enable_auto_thermal_throttling(struct radeon_device
*rdev
)
747 struct kv_power_info
*pi
= kv_get_pi(rdev
);
750 pi
->graphics_therm_throttle_enable
= 1;
752 ret
= kv_copy_bytes_to_smc(rdev
,
753 pi
->dpm_table_start
+
754 offsetof(SMU7_Fusion_DpmTable
, GraphicsThermThrottleEnable
),
755 &pi
->graphics_therm_throttle_enable
,
756 sizeof(u8
), pi
->sram_end
);
761 static int kv_upload_dpm_settings(struct radeon_device
*rdev
)
763 struct kv_power_info
*pi
= kv_get_pi(rdev
);
766 ret
= kv_copy_bytes_to_smc(rdev
,
767 pi
->dpm_table_start
+
768 offsetof(SMU7_Fusion_DpmTable
, GraphicsLevel
),
769 (u8
*)&pi
->graphics_level
,
770 sizeof(SMU7_Fusion_GraphicsLevel
) * SMU7_MAX_LEVELS_GRAPHICS
,
776 ret
= kv_copy_bytes_to_smc(rdev
,
777 pi
->dpm_table_start
+
778 offsetof(SMU7_Fusion_DpmTable
, GraphicsDpmLevelCount
),
779 &pi
->graphics_dpm_level_count
,
780 sizeof(u8
), pi
->sram_end
);
785 static u32
kv_get_clock_difference(u32 a
, u32 b
)
787 return (a
>= b
) ? a
- b
: b
- a
;
790 static u32
kv_get_clk_bypass(struct radeon_device
*rdev
, u32 clk
)
792 struct kv_power_info
*pi
= kv_get_pi(rdev
);
795 if (pi
->caps_enable_dfs_bypass
) {
796 if (kv_get_clock_difference(clk
, 40000) < 200)
798 else if (kv_get_clock_difference(clk
, 30000) < 200)
800 else if (kv_get_clock_difference(clk
, 20000) < 200)
802 else if (kv_get_clock_difference(clk
, 15000) < 200)
804 else if (kv_get_clock_difference(clk
, 10000) < 200)
815 static int kv_populate_uvd_table(struct radeon_device
*rdev
)
817 struct kv_power_info
*pi
= kv_get_pi(rdev
);
818 struct radeon_uvd_clock_voltage_dependency_table
*table
=
819 &rdev
->pm
.dpm
.dyn_state
.uvd_clock_voltage_dependency_table
;
820 struct atom_clock_dividers dividers
;
824 if (table
== NULL
|| table
->count
== 0)
827 pi
->uvd_level_count
= 0;
828 for (i
= 0; i
< table
->count
; i
++) {
829 if (pi
->high_voltage_t
&&
830 (pi
->high_voltage_t
< table
->entries
[i
].v
))
833 pi
->uvd_level
[i
].VclkFrequency
= cpu_to_be32(table
->entries
[i
].vclk
);
834 pi
->uvd_level
[i
].DclkFrequency
= cpu_to_be32(table
->entries
[i
].dclk
);
835 pi
->uvd_level
[i
].MinVddNb
= cpu_to_be16(table
->entries
[i
].v
);
837 pi
->uvd_level
[i
].VClkBypassCntl
=
838 (u8
)kv_get_clk_bypass(rdev
, table
->entries
[i
].vclk
);
839 pi
->uvd_level
[i
].DClkBypassCntl
=
840 (u8
)kv_get_clk_bypass(rdev
, table
->entries
[i
].dclk
);
842 ret
= radeon_atom_get_clock_dividers(rdev
, COMPUTE_ENGINE_PLL_PARAM
,
843 table
->entries
[i
].vclk
, false, ÷rs
);
846 pi
->uvd_level
[i
].VclkDivider
= (u8
)dividers
.post_div
;
848 ret
= radeon_atom_get_clock_dividers(rdev
, COMPUTE_ENGINE_PLL_PARAM
,
849 table
->entries
[i
].dclk
, false, ÷rs
);
852 pi
->uvd_level
[i
].DclkDivider
= (u8
)dividers
.post_div
;
854 pi
->uvd_level_count
++;
857 ret
= kv_copy_bytes_to_smc(rdev
,
858 pi
->dpm_table_start
+
859 offsetof(SMU7_Fusion_DpmTable
, UvdLevelCount
),
860 (u8
*)&pi
->uvd_level_count
,
861 sizeof(u8
), pi
->sram_end
);
865 pi
->uvd_interval
= 1;
867 ret
= kv_copy_bytes_to_smc(rdev
,
868 pi
->dpm_table_start
+
869 offsetof(SMU7_Fusion_DpmTable
, UVDInterval
),
871 sizeof(u8
), pi
->sram_end
);
875 ret
= kv_copy_bytes_to_smc(rdev
,
876 pi
->dpm_table_start
+
877 offsetof(SMU7_Fusion_DpmTable
, UvdLevel
),
878 (u8
*)&pi
->uvd_level
,
879 sizeof(SMU7_Fusion_UvdLevel
) * SMU7_MAX_LEVELS_UVD
,
886 static int kv_populate_vce_table(struct radeon_device
*rdev
)
888 struct kv_power_info
*pi
= kv_get_pi(rdev
);
891 struct radeon_vce_clock_voltage_dependency_table
*table
=
892 &rdev
->pm
.dpm
.dyn_state
.vce_clock_voltage_dependency_table
;
893 struct atom_clock_dividers dividers
;
895 if (table
== NULL
|| table
->count
== 0)
898 pi
->vce_level_count
= 0;
899 for (i
= 0; i
< table
->count
; i
++) {
900 if (pi
->high_voltage_t
&&
901 pi
->high_voltage_t
< table
->entries
[i
].v
)
904 pi
->vce_level
[i
].Frequency
= cpu_to_be32(table
->entries
[i
].evclk
);
905 pi
->vce_level
[i
].MinVoltage
= cpu_to_be16(table
->entries
[i
].v
);
907 pi
->vce_level
[i
].ClkBypassCntl
=
908 (u8
)kv_get_clk_bypass(rdev
, table
->entries
[i
].evclk
);
910 ret
= radeon_atom_get_clock_dividers(rdev
, COMPUTE_ENGINE_PLL_PARAM
,
911 table
->entries
[i
].evclk
, false, ÷rs
);
914 pi
->vce_level
[i
].Divider
= (u8
)dividers
.post_div
;
916 pi
->vce_level_count
++;
919 ret
= kv_copy_bytes_to_smc(rdev
,
920 pi
->dpm_table_start
+
921 offsetof(SMU7_Fusion_DpmTable
, VceLevelCount
),
922 (u8
*)&pi
->vce_level_count
,
928 pi
->vce_interval
= 1;
930 ret
= kv_copy_bytes_to_smc(rdev
,
931 pi
->dpm_table_start
+
932 offsetof(SMU7_Fusion_DpmTable
, VCEInterval
),
933 (u8
*)&pi
->vce_interval
,
939 ret
= kv_copy_bytes_to_smc(rdev
,
940 pi
->dpm_table_start
+
941 offsetof(SMU7_Fusion_DpmTable
, VceLevel
),
942 (u8
*)&pi
->vce_level
,
943 sizeof(SMU7_Fusion_ExtClkLevel
) * SMU7_MAX_LEVELS_VCE
,
949 static int kv_populate_samu_table(struct radeon_device
*rdev
)
951 struct kv_power_info
*pi
= kv_get_pi(rdev
);
952 struct radeon_clock_voltage_dependency_table
*table
=
953 &rdev
->pm
.dpm
.dyn_state
.samu_clock_voltage_dependency_table
;
954 struct atom_clock_dividers dividers
;
958 if (table
== NULL
|| table
->count
== 0)
961 pi
->samu_level_count
= 0;
962 for (i
= 0; i
< table
->count
; i
++) {
963 if (pi
->high_voltage_t
&&
964 pi
->high_voltage_t
< table
->entries
[i
].v
)
967 pi
->samu_level
[i
].Frequency
= cpu_to_be32(table
->entries
[i
].clk
);
968 pi
->samu_level
[i
].MinVoltage
= cpu_to_be16(table
->entries
[i
].v
);
970 pi
->samu_level
[i
].ClkBypassCntl
=
971 (u8
)kv_get_clk_bypass(rdev
, table
->entries
[i
].clk
);
973 ret
= radeon_atom_get_clock_dividers(rdev
, COMPUTE_ENGINE_PLL_PARAM
,
974 table
->entries
[i
].clk
, false, ÷rs
);
977 pi
->samu_level
[i
].Divider
= (u8
)dividers
.post_div
;
979 pi
->samu_level_count
++;
982 ret
= kv_copy_bytes_to_smc(rdev
,
983 pi
->dpm_table_start
+
984 offsetof(SMU7_Fusion_DpmTable
, SamuLevelCount
),
985 (u8
*)&pi
->samu_level_count
,
991 pi
->samu_interval
= 1;
993 ret
= kv_copy_bytes_to_smc(rdev
,
994 pi
->dpm_table_start
+
995 offsetof(SMU7_Fusion_DpmTable
, SAMUInterval
),
996 (u8
*)&pi
->samu_interval
,
1002 ret
= kv_copy_bytes_to_smc(rdev
,
1003 pi
->dpm_table_start
+
1004 offsetof(SMU7_Fusion_DpmTable
, SamuLevel
),
1005 (u8
*)&pi
->samu_level
,
1006 sizeof(SMU7_Fusion_ExtClkLevel
) * SMU7_MAX_LEVELS_SAMU
,
1015 static int kv_populate_acp_table(struct radeon_device
*rdev
)
1017 struct kv_power_info
*pi
= kv_get_pi(rdev
);
1018 struct radeon_clock_voltage_dependency_table
*table
=
1019 &rdev
->pm
.dpm
.dyn_state
.acp_clock_voltage_dependency_table
;
1020 struct atom_clock_dividers dividers
;
1024 if (table
== NULL
|| table
->count
== 0)
1027 pi
->acp_level_count
= 0;
1028 for (i
= 0; i
< table
->count
; i
++) {
1029 pi
->acp_level
[i
].Frequency
= cpu_to_be32(table
->entries
[i
].clk
);
1030 pi
->acp_level
[i
].MinVoltage
= cpu_to_be16(table
->entries
[i
].v
);
1032 ret
= radeon_atom_get_clock_dividers(rdev
, COMPUTE_ENGINE_PLL_PARAM
,
1033 table
->entries
[i
].clk
, false, ÷rs
);
1036 pi
->acp_level
[i
].Divider
= (u8
)dividers
.post_div
;
1038 pi
->acp_level_count
++;
1041 ret
= kv_copy_bytes_to_smc(rdev
,
1042 pi
->dpm_table_start
+
1043 offsetof(SMU7_Fusion_DpmTable
, AcpLevelCount
),
1044 (u8
*)&pi
->acp_level_count
,
1050 pi
->acp_interval
= 1;
1052 ret
= kv_copy_bytes_to_smc(rdev
,
1053 pi
->dpm_table_start
+
1054 offsetof(SMU7_Fusion_DpmTable
, ACPInterval
),
1055 (u8
*)&pi
->acp_interval
,
1061 ret
= kv_copy_bytes_to_smc(rdev
,
1062 pi
->dpm_table_start
+
1063 offsetof(SMU7_Fusion_DpmTable
, AcpLevel
),
1064 (u8
*)&pi
->acp_level
,
1065 sizeof(SMU7_Fusion_ExtClkLevel
) * SMU7_MAX_LEVELS_ACP
,
1073 static void kv_calculate_dfs_bypass_settings(struct radeon_device
*rdev
)
1075 struct kv_power_info
*pi
= kv_get_pi(rdev
);
1077 struct radeon_clock_voltage_dependency_table
*table
=
1078 &rdev
->pm
.dpm
.dyn_state
.vddc_dependency_on_sclk
;
1080 if (table
&& table
->count
) {
1081 for (i
= 0; i
< pi
->graphics_dpm_level_count
; i
++) {
1082 if (pi
->caps_enable_dfs_bypass
) {
1083 if (kv_get_clock_difference(table
->entries
[i
].clk
, 40000) < 200)
1084 pi
->graphics_level
[i
].ClkBypassCntl
= 3;
1085 else if (kv_get_clock_difference(table
->entries
[i
].clk
, 30000) < 200)
1086 pi
->graphics_level
[i
].ClkBypassCntl
= 2;
1087 else if (kv_get_clock_difference(table
->entries
[i
].clk
, 26600) < 200)
1088 pi
->graphics_level
[i
].ClkBypassCntl
= 7;
1089 else if (kv_get_clock_difference(table
->entries
[i
].clk
, 20000) < 200)
1090 pi
->graphics_level
[i
].ClkBypassCntl
= 6;
1091 else if (kv_get_clock_difference(table
->entries
[i
].clk
, 10000) < 200)
1092 pi
->graphics_level
[i
].ClkBypassCntl
= 8;
1094 pi
->graphics_level
[i
].ClkBypassCntl
= 0;
1096 pi
->graphics_level
[i
].ClkBypassCntl
= 0;
1100 struct sumo_sclk_voltage_mapping_table
*table
=
1101 &pi
->sys_info
.sclk_voltage_mapping_table
;
1102 for (i
= 0; i
< pi
->graphics_dpm_level_count
; i
++) {
1103 if (pi
->caps_enable_dfs_bypass
) {
1104 if (kv_get_clock_difference(table
->entries
[i
].sclk_frequency
, 40000) < 200)
1105 pi
->graphics_level
[i
].ClkBypassCntl
= 3;
1106 else if (kv_get_clock_difference(table
->entries
[i
].sclk_frequency
, 30000) < 200)
1107 pi
->graphics_level
[i
].ClkBypassCntl
= 2;
1108 else if (kv_get_clock_difference(table
->entries
[i
].sclk_frequency
, 26600) < 200)
1109 pi
->graphics_level
[i
].ClkBypassCntl
= 7;
1110 else if (kv_get_clock_difference(table
->entries
[i
].sclk_frequency
, 20000) < 200)
1111 pi
->graphics_level
[i
].ClkBypassCntl
= 6;
1112 else if (kv_get_clock_difference(table
->entries
[i
].sclk_frequency
, 10000) < 200)
1113 pi
->graphics_level
[i
].ClkBypassCntl
= 8;
1115 pi
->graphics_level
[i
].ClkBypassCntl
= 0;
1117 pi
->graphics_level
[i
].ClkBypassCntl
= 0;
1123 static int kv_enable_ulv(struct radeon_device
*rdev
, bool enable
)
1125 return kv_notify_message_to_smu(rdev
, enable
?
1126 PPSMC_MSG_EnableULV
: PPSMC_MSG_DisableULV
);
1129 static void kv_reset_acp_boot_level(struct radeon_device
*rdev
)
1131 struct kv_power_info
*pi
= kv_get_pi(rdev
);
1133 pi
->acp_boot_level
= 0xff;
1136 static void kv_update_current_ps(struct radeon_device
*rdev
,
1137 struct radeon_ps
*rps
)
1139 struct kv_ps
*new_ps
= kv_get_ps(rps
);
1140 struct kv_power_info
*pi
= kv_get_pi(rdev
);
1142 pi
->current_rps
= *rps
;
1143 pi
->current_ps
= *new_ps
;
1144 pi
->current_rps
.ps_priv
= &pi
->current_ps
;
1147 static void kv_update_requested_ps(struct radeon_device
*rdev
,
1148 struct radeon_ps
*rps
)
1150 struct kv_ps
*new_ps
= kv_get_ps(rps
);
1151 struct kv_power_info
*pi
= kv_get_pi(rdev
);
1153 pi
->requested_rps
= *rps
;
1154 pi
->requested_ps
= *new_ps
;
1155 pi
->requested_rps
.ps_priv
= &pi
->requested_ps
;
1158 void kv_dpm_enable_bapm(struct radeon_device
*rdev
, bool enable
)
1160 struct kv_power_info
*pi
= kv_get_pi(rdev
);
1163 if (pi
->bapm_enable
) {
1164 ret
= kv_smc_bapm_enable(rdev
, enable
);
1166 DRM_ERROR("kv_smc_bapm_enable failed\n");
1170 int kv_dpm_enable(struct radeon_device
*rdev
)
1172 struct kv_power_info
*pi
= kv_get_pi(rdev
);
1175 ret
= kv_process_firmware_header(rdev
);
1177 DRM_ERROR("kv_process_firmware_header failed\n");
1180 kv_init_fps_limits(rdev
);
1181 kv_init_graphics_levels(rdev
);
1182 ret
= kv_program_bootup_state(rdev
);
1184 DRM_ERROR("kv_program_bootup_state failed\n");
1187 kv_calculate_dfs_bypass_settings(rdev
);
1188 ret
= kv_upload_dpm_settings(rdev
);
1190 DRM_ERROR("kv_upload_dpm_settings failed\n");
1193 ret
= kv_populate_uvd_table(rdev
);
1195 DRM_ERROR("kv_populate_uvd_table failed\n");
1198 ret
= kv_populate_vce_table(rdev
);
1200 DRM_ERROR("kv_populate_vce_table failed\n");
1203 ret
= kv_populate_samu_table(rdev
);
1205 DRM_ERROR("kv_populate_samu_table failed\n");
1208 ret
= kv_populate_acp_table(rdev
);
1210 DRM_ERROR("kv_populate_acp_table failed\n");
1213 kv_program_vc(rdev
);
1215 kv_initialize_hardware_cac_manager(rdev
);
1218 if (pi
->enable_auto_thermal_throttling
) {
1219 ret
= kv_enable_auto_thermal_throttling(rdev
);
1221 DRM_ERROR("kv_enable_auto_thermal_throttling failed\n");
1225 ret
= kv_enable_dpm_voltage_scaling(rdev
);
1227 DRM_ERROR("kv_enable_dpm_voltage_scaling failed\n");
1230 ret
= kv_set_dpm_interval(rdev
);
1232 DRM_ERROR("kv_set_dpm_interval failed\n");
1235 ret
= kv_set_dpm_boot_state(rdev
);
1237 DRM_ERROR("kv_set_dpm_boot_state failed\n");
1240 ret
= kv_enable_ulv(rdev
, true);
1242 DRM_ERROR("kv_enable_ulv failed\n");
1246 ret
= kv_enable_didt(rdev
, true);
1248 DRM_ERROR("kv_enable_didt failed\n");
1251 ret
= kv_enable_smc_cac(rdev
, true);
1253 DRM_ERROR("kv_enable_smc_cac failed\n");
1257 kv_reset_acp_boot_level(rdev
);
1259 ret
= kv_smc_bapm_enable(rdev
, false);
1261 DRM_ERROR("kv_smc_bapm_enable failed\n");
1265 kv_update_current_ps(rdev
, rdev
->pm
.dpm
.boot_ps
);
1270 int kv_dpm_late_enable(struct radeon_device
*rdev
)
1274 if (rdev
->irq
.installed
&&
1275 r600_is_internal_thermal_sensor(rdev
->pm
.int_thermal_type
)) {
1276 ret
= kv_set_thermal_temperature_range(rdev
, R600_TEMP_RANGE_MIN
, R600_TEMP_RANGE_MAX
);
1278 DRM_ERROR("kv_set_thermal_temperature_range failed\n");
1281 rdev
->irq
.dpm_thermal
= true;
1282 radeon_irq_set(rdev
);
1285 /* powerdown unused blocks for now */
1286 kv_dpm_powergate_acp(rdev
, true);
1287 kv_dpm_powergate_samu(rdev
, true);
1288 kv_dpm_powergate_vce(rdev
, true);
1289 kv_dpm_powergate_uvd(rdev
, true);
1294 void kv_dpm_disable(struct radeon_device
*rdev
)
1296 kv_smc_bapm_enable(rdev
, false);
1298 /* powerup blocks */
1299 kv_dpm_powergate_acp(rdev
, false);
1300 kv_dpm_powergate_samu(rdev
, false);
1301 kv_dpm_powergate_vce(rdev
, false);
1302 kv_dpm_powergate_uvd(rdev
, false);
1304 kv_enable_smc_cac(rdev
, false);
1305 kv_enable_didt(rdev
, false);
1308 kv_enable_ulv(rdev
, false);
1311 kv_update_current_ps(rdev
, rdev
->pm
.dpm
.boot_ps
);
1315 static int kv_write_smc_soft_register(struct radeon_device
*rdev
,
1316 u16 reg_offset
, u32 value
)
1318 struct kv_power_info
*pi
= kv_get_pi(rdev
);
1320 return kv_copy_bytes_to_smc(rdev
, pi
->soft_regs_start
+ reg_offset
,
1321 (u8
*)&value
, sizeof(u16
), pi
->sram_end
);
1324 static int kv_read_smc_soft_register(struct radeon_device
*rdev
,
1325 u16 reg_offset
, u32
*value
)
1327 struct kv_power_info
*pi
= kv_get_pi(rdev
);
1329 return kv_read_smc_sram_dword(rdev
, pi
->soft_regs_start
+ reg_offset
,
1330 value
, pi
->sram_end
);
1334 static void kv_init_sclk_t(struct radeon_device
*rdev
)
1336 struct kv_power_info
*pi
= kv_get_pi(rdev
);
1338 pi
->low_sclk_interrupt_t
= 0;
1341 static int kv_init_fps_limits(struct radeon_device
*rdev
)
1343 struct kv_power_info
*pi
= kv_get_pi(rdev
);
1350 pi
->fps_high_t
= cpu_to_be16(tmp
);
1351 ret
= kv_copy_bytes_to_smc(rdev
,
1352 pi
->dpm_table_start
+
1353 offsetof(SMU7_Fusion_DpmTable
, FpsHighT
),
1354 (u8
*)&pi
->fps_high_t
,
1355 sizeof(u16
), pi
->sram_end
);
1358 pi
->fps_low_t
= cpu_to_be16(tmp
);
1360 ret
= kv_copy_bytes_to_smc(rdev
,
1361 pi
->dpm_table_start
+
1362 offsetof(SMU7_Fusion_DpmTable
, FpsLowT
),
1363 (u8
*)&pi
->fps_low_t
,
1364 sizeof(u16
), pi
->sram_end
);
1370 static void kv_init_powergate_state(struct radeon_device
*rdev
)
1372 struct kv_power_info
*pi
= kv_get_pi(rdev
);
1374 pi
->uvd_power_gated
= false;
1375 pi
->vce_power_gated
= false;
1376 pi
->samu_power_gated
= false;
1377 pi
->acp_power_gated
= false;
1381 static int kv_enable_uvd_dpm(struct radeon_device
*rdev
, bool enable
)
1383 return kv_notify_message_to_smu(rdev
, enable
?
1384 PPSMC_MSG_UVDDPM_Enable
: PPSMC_MSG_UVDDPM_Disable
);
1387 static int kv_enable_vce_dpm(struct radeon_device
*rdev
, bool enable
)
1389 return kv_notify_message_to_smu(rdev
, enable
?
1390 PPSMC_MSG_VCEDPM_Enable
: PPSMC_MSG_VCEDPM_Disable
);
1393 static int kv_enable_samu_dpm(struct radeon_device
*rdev
, bool enable
)
1395 return kv_notify_message_to_smu(rdev
, enable
?
1396 PPSMC_MSG_SAMUDPM_Enable
: PPSMC_MSG_SAMUDPM_Disable
);
1399 static int kv_enable_acp_dpm(struct radeon_device
*rdev
, bool enable
)
1401 return kv_notify_message_to_smu(rdev
, enable
?
1402 PPSMC_MSG_ACPDPM_Enable
: PPSMC_MSG_ACPDPM_Disable
);
1405 static int kv_update_uvd_dpm(struct radeon_device
*rdev
, bool gate
)
1407 struct kv_power_info
*pi
= kv_get_pi(rdev
);
1408 struct radeon_uvd_clock_voltage_dependency_table
*table
=
1409 &rdev
->pm
.dpm
.dyn_state
.uvd_clock_voltage_dependency_table
;
1415 pi
->uvd_boot_level
= table
->count
- 1;
1417 pi
->uvd_boot_level
= 0;
1419 if (!pi
->caps_uvd_dpm
|| pi
->caps_stable_p_state
) {
1420 mask
= 1 << pi
->uvd_boot_level
;
1425 ret
= kv_copy_bytes_to_smc(rdev
,
1426 pi
->dpm_table_start
+
1427 offsetof(SMU7_Fusion_DpmTable
, UvdBootLevel
),
1428 (uint8_t *)&pi
->uvd_boot_level
,
1429 sizeof(u8
), pi
->sram_end
);
1433 kv_send_msg_to_smc_with_parameter(rdev
,
1434 PPSMC_MSG_UVDDPM_SetEnabledMask
,
1438 return kv_enable_uvd_dpm(rdev
, !gate
);
1441 static u8
kv_get_vce_boot_level(struct radeon_device
*rdev
)
1444 struct radeon_vce_clock_voltage_dependency_table
*table
=
1445 &rdev
->pm
.dpm
.dyn_state
.vce_clock_voltage_dependency_table
;
1447 for (i
= 0; i
< table
->count
; i
++) {
1448 if (table
->entries
[i
].evclk
>= 0) /* XXX */
1455 static int kv_update_vce_dpm(struct radeon_device
*rdev
,
1456 struct radeon_ps
*radeon_new_state
,
1457 struct radeon_ps
*radeon_current_state
)
1459 struct kv_power_info
*pi
= kv_get_pi(rdev
);
1460 struct radeon_vce_clock_voltage_dependency_table
*table
=
1461 &rdev
->pm
.dpm
.dyn_state
.vce_clock_voltage_dependency_table
;
1464 if (radeon_new_state
->evclk
> 0 && radeon_current_state
->evclk
== 0) {
1465 kv_dpm_powergate_vce(rdev
, false);
1466 /* turn the clocks on when encoding */
1467 cik_update_cg(rdev
, RADEON_CG_BLOCK_VCE
, false);
1468 if (pi
->caps_stable_p_state
)
1469 pi
->vce_boot_level
= table
->count
- 1;
1471 pi
->vce_boot_level
= kv_get_vce_boot_level(rdev
);
1473 ret
= kv_copy_bytes_to_smc(rdev
,
1474 pi
->dpm_table_start
+
1475 offsetof(SMU7_Fusion_DpmTable
, VceBootLevel
),
1476 (u8
*)&pi
->vce_boot_level
,
1482 if (pi
->caps_stable_p_state
)
1483 kv_send_msg_to_smc_with_parameter(rdev
,
1484 PPSMC_MSG_VCEDPM_SetEnabledMask
,
1485 (1 << pi
->vce_boot_level
));
1487 kv_enable_vce_dpm(rdev
, true);
1488 } else if (radeon_new_state
->evclk
== 0 && radeon_current_state
->evclk
> 0) {
1489 kv_enable_vce_dpm(rdev
, false);
1490 /* turn the clocks off when not encoding */
1491 cik_update_cg(rdev
, RADEON_CG_BLOCK_VCE
, true);
1492 kv_dpm_powergate_vce(rdev
, true);
1498 static int kv_update_samu_dpm(struct radeon_device
*rdev
, bool gate
)
1500 struct kv_power_info
*pi
= kv_get_pi(rdev
);
1501 struct radeon_clock_voltage_dependency_table
*table
=
1502 &rdev
->pm
.dpm
.dyn_state
.samu_clock_voltage_dependency_table
;
1506 if (pi
->caps_stable_p_state
)
1507 pi
->samu_boot_level
= table
->count
- 1;
1509 pi
->samu_boot_level
= 0;
1511 ret
= kv_copy_bytes_to_smc(rdev
,
1512 pi
->dpm_table_start
+
1513 offsetof(SMU7_Fusion_DpmTable
, SamuBootLevel
),
1514 (u8
*)&pi
->samu_boot_level
,
1520 if (pi
->caps_stable_p_state
)
1521 kv_send_msg_to_smc_with_parameter(rdev
,
1522 PPSMC_MSG_SAMUDPM_SetEnabledMask
,
1523 (1 << pi
->samu_boot_level
));
1526 return kv_enable_samu_dpm(rdev
, !gate
);
1529 static u8
kv_get_acp_boot_level(struct radeon_device
*rdev
)
1532 struct radeon_clock_voltage_dependency_table
*table
=
1533 &rdev
->pm
.dpm
.dyn_state
.acp_clock_voltage_dependency_table
;
1535 for (i
= 0; i
< table
->count
; i
++) {
1536 if (table
->entries
[i
].clk
>= 0) /* XXX */
1540 if (i
>= table
->count
)
1541 i
= table
->count
- 1;
1546 static void kv_update_acp_boot_level(struct radeon_device
*rdev
)
1548 struct kv_power_info
*pi
= kv_get_pi(rdev
);
1551 if (!pi
->caps_stable_p_state
) {
1552 acp_boot_level
= kv_get_acp_boot_level(rdev
);
1553 if (acp_boot_level
!= pi
->acp_boot_level
) {
1554 pi
->acp_boot_level
= acp_boot_level
;
1555 kv_send_msg_to_smc_with_parameter(rdev
,
1556 PPSMC_MSG_ACPDPM_SetEnabledMask
,
1557 (1 << pi
->acp_boot_level
));
1562 static int kv_update_acp_dpm(struct radeon_device
*rdev
, bool gate
)
1564 struct kv_power_info
*pi
= kv_get_pi(rdev
);
1565 struct radeon_clock_voltage_dependency_table
*table
=
1566 &rdev
->pm
.dpm
.dyn_state
.acp_clock_voltage_dependency_table
;
1570 if (pi
->caps_stable_p_state
)
1571 pi
->acp_boot_level
= table
->count
- 1;
1573 pi
->acp_boot_level
= kv_get_acp_boot_level(rdev
);
1575 ret
= kv_copy_bytes_to_smc(rdev
,
1576 pi
->dpm_table_start
+
1577 offsetof(SMU7_Fusion_DpmTable
, AcpBootLevel
),
1578 (u8
*)&pi
->acp_boot_level
,
1584 if (pi
->caps_stable_p_state
)
1585 kv_send_msg_to_smc_with_parameter(rdev
,
1586 PPSMC_MSG_ACPDPM_SetEnabledMask
,
1587 (1 << pi
->acp_boot_level
));
1590 return kv_enable_acp_dpm(rdev
, !gate
);
1593 void kv_dpm_powergate_uvd(struct radeon_device
*rdev
, bool gate
)
1595 struct kv_power_info
*pi
= kv_get_pi(rdev
);
1597 if (pi
->uvd_power_gated
== gate
)
1600 pi
->uvd_power_gated
= gate
;
1603 if (pi
->caps_uvd_pg
) {
1604 uvd_v1_0_stop(rdev
);
1605 cik_update_cg(rdev
, RADEON_CG_BLOCK_UVD
, false);
1607 kv_update_uvd_dpm(rdev
, gate
);
1608 if (pi
->caps_uvd_pg
)
1609 kv_notify_message_to_smu(rdev
, PPSMC_MSG_UVDPowerOFF
);
1611 if (pi
->caps_uvd_pg
) {
1612 kv_notify_message_to_smu(rdev
, PPSMC_MSG_UVDPowerON
);
1613 uvd_v4_2_resume(rdev
);
1614 uvd_v1_0_start(rdev
);
1615 cik_update_cg(rdev
, RADEON_CG_BLOCK_UVD
, true);
1617 kv_update_uvd_dpm(rdev
, gate
);
1621 static void kv_dpm_powergate_vce(struct radeon_device
*rdev
, bool gate
)
1623 struct kv_power_info
*pi
= kv_get_pi(rdev
);
1625 if (pi
->vce_power_gated
== gate
)
1628 pi
->vce_power_gated
= gate
;
1631 if (pi
->caps_vce_pg
) {
1632 /* XXX do we need a vce_v1_0_stop() ? */
1633 kv_notify_message_to_smu(rdev
, PPSMC_MSG_VCEPowerOFF
);
1636 if (pi
->caps_vce_pg
) {
1637 kv_notify_message_to_smu(rdev
, PPSMC_MSG_VCEPowerON
);
1638 vce_v2_0_resume(rdev
);
1639 vce_v1_0_start(rdev
);
1644 static void kv_dpm_powergate_samu(struct radeon_device
*rdev
, bool gate
)
1646 struct kv_power_info
*pi
= kv_get_pi(rdev
);
1648 if (pi
->samu_power_gated
== gate
)
1651 pi
->samu_power_gated
= gate
;
1654 kv_update_samu_dpm(rdev
, true);
1655 if (pi
->caps_samu_pg
)
1656 kv_notify_message_to_smu(rdev
, PPSMC_MSG_SAMPowerOFF
);
1658 if (pi
->caps_samu_pg
)
1659 kv_notify_message_to_smu(rdev
, PPSMC_MSG_SAMPowerON
);
1660 kv_update_samu_dpm(rdev
, false);
1664 static void kv_dpm_powergate_acp(struct radeon_device
*rdev
, bool gate
)
1666 struct kv_power_info
*pi
= kv_get_pi(rdev
);
1668 if (pi
->acp_power_gated
== gate
)
1671 if (rdev
->family
== CHIP_KABINI
|| rdev
->family
== CHIP_MULLINS
)
1674 pi
->acp_power_gated
= gate
;
1677 kv_update_acp_dpm(rdev
, true);
1678 if (pi
->caps_acp_pg
)
1679 kv_notify_message_to_smu(rdev
, PPSMC_MSG_ACPPowerOFF
);
1681 if (pi
->caps_acp_pg
)
1682 kv_notify_message_to_smu(rdev
, PPSMC_MSG_ACPPowerON
);
1683 kv_update_acp_dpm(rdev
, false);
1687 static void kv_set_valid_clock_range(struct radeon_device
*rdev
,
1688 struct radeon_ps
*new_rps
)
1690 struct kv_ps
*new_ps
= kv_get_ps(new_rps
);
1691 struct kv_power_info
*pi
= kv_get_pi(rdev
);
1693 struct radeon_clock_voltage_dependency_table
*table
=
1694 &rdev
->pm
.dpm
.dyn_state
.vddc_dependency_on_sclk
;
1696 if (table
&& table
->count
) {
1697 for (i
= 0; i
< pi
->graphics_dpm_level_count
; i
++) {
1698 if ((table
->entries
[i
].clk
>= new_ps
->levels
[0].sclk
) ||
1699 (i
== (pi
->graphics_dpm_level_count
- 1))) {
1700 pi
->lowest_valid
= i
;
1705 for (i
= pi
->graphics_dpm_level_count
- 1; i
> 0; i
--) {
1706 if (table
->entries
[i
].clk
<= new_ps
->levels
[new_ps
->num_levels
- 1].sclk
)
1709 pi
->highest_valid
= i
;
1711 if (pi
->lowest_valid
> pi
->highest_valid
) {
1712 if ((new_ps
->levels
[0].sclk
- table
->entries
[pi
->highest_valid
].clk
) >
1713 (table
->entries
[pi
->lowest_valid
].clk
- new_ps
->levels
[new_ps
->num_levels
- 1].sclk
))
1714 pi
->highest_valid
= pi
->lowest_valid
;
1716 pi
->lowest_valid
= pi
->highest_valid
;
1719 struct sumo_sclk_voltage_mapping_table
*table
=
1720 &pi
->sys_info
.sclk_voltage_mapping_table
;
1722 for (i
= 0; i
< (int)pi
->graphics_dpm_level_count
; i
++) {
1723 if (table
->entries
[i
].sclk_frequency
>= new_ps
->levels
[0].sclk
||
1724 i
== (int)(pi
->graphics_dpm_level_count
- 1)) {
1725 pi
->lowest_valid
= i
;
1730 for (i
= pi
->graphics_dpm_level_count
- 1; i
> 0; i
--) {
1731 if (table
->entries
[i
].sclk_frequency
<=
1732 new_ps
->levels
[new_ps
->num_levels
- 1].sclk
)
1735 pi
->highest_valid
= i
;
1737 if (pi
->lowest_valid
> pi
->highest_valid
) {
1738 if ((new_ps
->levels
[0].sclk
-
1739 table
->entries
[pi
->highest_valid
].sclk_frequency
) >
1740 (table
->entries
[pi
->lowest_valid
].sclk_frequency
-
1741 new_ps
->levels
[new_ps
->num_levels
-1].sclk
))
1742 pi
->highest_valid
= pi
->lowest_valid
;
1744 pi
->lowest_valid
= pi
->highest_valid
;
1749 static int kv_update_dfs_bypass_settings(struct radeon_device
*rdev
,
1750 struct radeon_ps
*new_rps
)
1752 struct kv_ps
*new_ps
= kv_get_ps(new_rps
);
1753 struct kv_power_info
*pi
= kv_get_pi(rdev
);
1757 if (pi
->caps_enable_dfs_bypass
) {
1758 clk_bypass_cntl
= new_ps
->need_dfs_bypass
?
1759 pi
->graphics_level
[pi
->graphics_boot_level
].ClkBypassCntl
: 0;
1760 ret
= kv_copy_bytes_to_smc(rdev
,
1761 (pi
->dpm_table_start
+
1762 offsetof(SMU7_Fusion_DpmTable
, GraphicsLevel
) +
1763 (pi
->graphics_boot_level
* sizeof(SMU7_Fusion_GraphicsLevel
)) +
1764 offsetof(SMU7_Fusion_GraphicsLevel
, ClkBypassCntl
)),
1766 sizeof(u8
), pi
->sram_end
);
1772 static int kv_enable_nb_dpm(struct radeon_device
*rdev
)
1774 struct kv_power_info
*pi
= kv_get_pi(rdev
);
1777 if (pi
->enable_nb_dpm
&& !pi
->nb_dpm_enabled
) {
1778 ret
= kv_notify_message_to_smu(rdev
, PPSMC_MSG_NBDPM_Enable
);
1780 pi
->nb_dpm_enabled
= true;
1786 int kv_dpm_force_performance_level(struct radeon_device
*rdev
,
1787 enum radeon_dpm_forced_level level
)
1791 if (level
== RADEON_DPM_FORCED_LEVEL_HIGH
) {
1792 ret
= kv_force_dpm_highest(rdev
);
1795 } else if (level
== RADEON_DPM_FORCED_LEVEL_LOW
) {
1796 ret
= kv_force_dpm_lowest(rdev
);
1799 } else if (level
== RADEON_DPM_FORCED_LEVEL_AUTO
) {
1800 ret
= kv_unforce_levels(rdev
);
1805 rdev
->pm
.dpm
.forced_level
= level
;
1810 int kv_dpm_pre_set_power_state(struct radeon_device
*rdev
)
1812 struct kv_power_info
*pi
= kv_get_pi(rdev
);
1813 struct radeon_ps requested_ps
= *rdev
->pm
.dpm
.requested_ps
;
1814 struct radeon_ps
*new_ps
= &requested_ps
;
1816 kv_update_requested_ps(rdev
, new_ps
);
1818 kv_apply_state_adjust_rules(rdev
,
1825 int kv_dpm_set_power_state(struct radeon_device
*rdev
)
1827 struct kv_power_info
*pi
= kv_get_pi(rdev
);
1828 struct radeon_ps
*new_ps
= &pi
->requested_rps
;
1829 struct radeon_ps
*old_ps
= &pi
->current_rps
;
1832 if (pi
->bapm_enable
) {
1833 ret
= kv_smc_bapm_enable(rdev
, rdev
->pm
.dpm
.ac_power
);
1835 DRM_ERROR("kv_smc_bapm_enable failed\n");
1840 if (rdev
->family
== CHIP_KABINI
|| rdev
->family
== CHIP_MULLINS
) {
1841 if (pi
->enable_dpm
) {
1842 kv_set_valid_clock_range(rdev
, new_ps
);
1843 kv_update_dfs_bypass_settings(rdev
, new_ps
);
1844 ret
= kv_calculate_ds_divider(rdev
);
1846 DRM_ERROR("kv_calculate_ds_divider failed\n");
1849 kv_calculate_nbps_level_settings(rdev
);
1850 kv_calculate_dpm_settings(rdev
);
1851 kv_force_lowest_valid(rdev
);
1852 kv_enable_new_levels(rdev
);
1853 kv_upload_dpm_settings(rdev
);
1854 kv_program_nbps_index_settings(rdev
, new_ps
);
1855 kv_unforce_levels(rdev
);
1856 kv_set_enabled_levels(rdev
);
1857 kv_force_lowest_valid(rdev
);
1858 kv_unforce_levels(rdev
);
1860 ret
= kv_update_vce_dpm(rdev
, new_ps
, old_ps
);
1862 DRM_ERROR("kv_update_vce_dpm failed\n");
1865 kv_update_sclk_t(rdev
);
1866 if (rdev
->family
== CHIP_MULLINS
)
1867 kv_enable_nb_dpm(rdev
);
1870 if (pi
->enable_dpm
) {
1871 kv_set_valid_clock_range(rdev
, new_ps
);
1872 kv_update_dfs_bypass_settings(rdev
, new_ps
);
1873 ret
= kv_calculate_ds_divider(rdev
);
1875 DRM_ERROR("kv_calculate_ds_divider failed\n");
1878 kv_calculate_nbps_level_settings(rdev
);
1879 kv_calculate_dpm_settings(rdev
);
1880 kv_freeze_sclk_dpm(rdev
, true);
1881 kv_upload_dpm_settings(rdev
);
1882 kv_program_nbps_index_settings(rdev
, new_ps
);
1883 kv_freeze_sclk_dpm(rdev
, false);
1884 kv_set_enabled_levels(rdev
);
1885 ret
= kv_update_vce_dpm(rdev
, new_ps
, old_ps
);
1887 DRM_ERROR("kv_update_vce_dpm failed\n");
1890 kv_update_acp_boot_level(rdev
);
1891 kv_update_sclk_t(rdev
);
1892 kv_enable_nb_dpm(rdev
);
1899 void kv_dpm_post_set_power_state(struct radeon_device
*rdev
)
1901 struct kv_power_info
*pi
= kv_get_pi(rdev
);
1902 struct radeon_ps
*new_ps
= &pi
->requested_rps
;
1904 kv_update_current_ps(rdev
, new_ps
);
1907 void kv_dpm_setup_asic(struct radeon_device
*rdev
)
1909 sumo_take_smu_control(rdev
, true);
1910 kv_init_powergate_state(rdev
);
1911 kv_init_sclk_t(rdev
);
1914 void kv_dpm_reset_asic(struct radeon_device
*rdev
)
1916 struct kv_power_info
*pi
= kv_get_pi(rdev
);
1918 if (rdev
->family
== CHIP_KABINI
|| rdev
->family
== CHIP_MULLINS
) {
1919 kv_force_lowest_valid(rdev
);
1920 kv_init_graphics_levels(rdev
);
1921 kv_program_bootup_state(rdev
);
1922 kv_upload_dpm_settings(rdev
);
1923 kv_force_lowest_valid(rdev
);
1924 kv_unforce_levels(rdev
);
1926 kv_init_graphics_levels(rdev
);
1927 kv_program_bootup_state(rdev
);
1928 kv_freeze_sclk_dpm(rdev
, true);
1929 kv_upload_dpm_settings(rdev
);
1930 kv_freeze_sclk_dpm(rdev
, false);
1931 kv_set_enabled_level(rdev
, pi
->graphics_boot_level
);
1935 //XXX use sumo_dpm_display_configuration_changed
1937 static void kv_construct_max_power_limits_table(struct radeon_device
*rdev
,
1938 struct radeon_clock_and_voltage_limits
*table
)
1940 struct kv_power_info
*pi
= kv_get_pi(rdev
);
1942 if (pi
->sys_info
.sclk_voltage_mapping_table
.num_max_dpm_entries
> 0) {
1943 int idx
= pi
->sys_info
.sclk_voltage_mapping_table
.num_max_dpm_entries
- 1;
1945 pi
->sys_info
.sclk_voltage_mapping_table
.entries
[idx
].sclk_frequency
;
1947 kv_convert_2bit_index_to_voltage(rdev
,
1948 pi
->sys_info
.sclk_voltage_mapping_table
.entries
[idx
].vid_2bit
);
1951 table
->mclk
= pi
->sys_info
.nbp_memory_clock
[0];
1954 static void kv_patch_voltage_values(struct radeon_device
*rdev
)
1957 struct radeon_uvd_clock_voltage_dependency_table
*uvd_table
=
1958 &rdev
->pm
.dpm
.dyn_state
.uvd_clock_voltage_dependency_table
;
1959 struct radeon_vce_clock_voltage_dependency_table
*vce_table
=
1960 &rdev
->pm
.dpm
.dyn_state
.vce_clock_voltage_dependency_table
;
1961 struct radeon_clock_voltage_dependency_table
*samu_table
=
1962 &rdev
->pm
.dpm
.dyn_state
.samu_clock_voltage_dependency_table
;
1963 struct radeon_clock_voltage_dependency_table
*acp_table
=
1964 &rdev
->pm
.dpm
.dyn_state
.acp_clock_voltage_dependency_table
;
1966 if (uvd_table
->count
) {
1967 for (i
= 0; i
< uvd_table
->count
; i
++)
1968 uvd_table
->entries
[i
].v
=
1969 kv_convert_8bit_index_to_voltage(rdev
,
1970 uvd_table
->entries
[i
].v
);
1973 if (vce_table
->count
) {
1974 for (i
= 0; i
< vce_table
->count
; i
++)
1975 vce_table
->entries
[i
].v
=
1976 kv_convert_8bit_index_to_voltage(rdev
,
1977 vce_table
->entries
[i
].v
);
1980 if (samu_table
->count
) {
1981 for (i
= 0; i
< samu_table
->count
; i
++)
1982 samu_table
->entries
[i
].v
=
1983 kv_convert_8bit_index_to_voltage(rdev
,
1984 samu_table
->entries
[i
].v
);
1987 if (acp_table
->count
) {
1988 for (i
= 0; i
< acp_table
->count
; i
++)
1989 acp_table
->entries
[i
].v
=
1990 kv_convert_8bit_index_to_voltage(rdev
,
1991 acp_table
->entries
[i
].v
);
1996 static void kv_construct_boot_state(struct radeon_device
*rdev
)
1998 struct kv_power_info
*pi
= kv_get_pi(rdev
);
2000 pi
->boot_pl
.sclk
= pi
->sys_info
.bootup_sclk
;
2001 pi
->boot_pl
.vddc_index
= pi
->sys_info
.bootup_nb_voltage_index
;
2002 pi
->boot_pl
.ds_divider_index
= 0;
2003 pi
->boot_pl
.ss_divider_index
= 0;
2004 pi
->boot_pl
.allow_gnb_slow
= 1;
2005 pi
->boot_pl
.force_nbp_state
= 0;
2006 pi
->boot_pl
.display_wm
= 0;
2007 pi
->boot_pl
.vce_wm
= 0;
2010 static int kv_force_dpm_highest(struct radeon_device
*rdev
)
2015 ret
= kv_dpm_get_enable_mask(rdev
, &enable_mask
);
2019 for (i
= SMU7_MAX_LEVELS_GRAPHICS
- 1; i
> 0; i
--) {
2020 if (enable_mask
& (1 << i
))
2024 if (rdev
->family
== CHIP_KABINI
|| rdev
->family
== CHIP_MULLINS
)
2025 return kv_send_msg_to_smc_with_parameter(rdev
, PPSMC_MSG_DPM_ForceState
, i
);
2027 return kv_set_enabled_level(rdev
, i
);
2030 static int kv_force_dpm_lowest(struct radeon_device
*rdev
)
2035 ret
= kv_dpm_get_enable_mask(rdev
, &enable_mask
);
2039 for (i
= 0; i
< SMU7_MAX_LEVELS_GRAPHICS
; i
++) {
2040 if (enable_mask
& (1 << i
))
2044 if (rdev
->family
== CHIP_KABINI
|| rdev
->family
== CHIP_MULLINS
)
2045 return kv_send_msg_to_smc_with_parameter(rdev
, PPSMC_MSG_DPM_ForceState
, i
);
2047 return kv_set_enabled_level(rdev
, i
);
2050 static u8
kv_get_sleep_divider_id_from_clock(struct radeon_device
*rdev
,
2051 u32 sclk
, u32 min_sclk_in_sr
)
2053 struct kv_power_info
*pi
= kv_get_pi(rdev
);
2056 u32 min
= (min_sclk_in_sr
> KV_MINIMUM_ENGINE_CLOCK
) ?
2057 min_sclk_in_sr
: KV_MINIMUM_ENGINE_CLOCK
;
2062 if (!pi
->caps_sclk_ds
)
2065 for (i
= KV_MAX_DEEPSLEEP_DIVIDER_ID
; i
> 0; i
--) {
2066 temp
= sclk
/ sumo_get_sleep_divider_from_id(i
);
2074 static int kv_get_high_voltage_limit(struct radeon_device
*rdev
, int *limit
)
2076 struct kv_power_info
*pi
= kv_get_pi(rdev
);
2077 struct radeon_clock_voltage_dependency_table
*table
=
2078 &rdev
->pm
.dpm
.dyn_state
.vddc_dependency_on_sclk
;
2081 if (table
&& table
->count
) {
2082 for (i
= table
->count
- 1; i
>= 0; i
--) {
2083 if (pi
->high_voltage_t
&&
2084 (kv_convert_8bit_index_to_voltage(rdev
, table
->entries
[i
].v
) <=
2085 pi
->high_voltage_t
)) {
2091 struct sumo_sclk_voltage_mapping_table
*table
=
2092 &pi
->sys_info
.sclk_voltage_mapping_table
;
2094 for (i
= table
->num_max_dpm_entries
- 1; i
>= 0; i
--) {
2095 if (pi
->high_voltage_t
&&
2096 (kv_convert_2bit_index_to_voltage(rdev
, table
->entries
[i
].vid_2bit
) <=
2097 pi
->high_voltage_t
)) {
2108 static void kv_apply_state_adjust_rules(struct radeon_device
*rdev
,
2109 struct radeon_ps
*new_rps
,
2110 struct radeon_ps
*old_rps
)
2112 struct kv_ps
*ps
= kv_get_ps(new_rps
);
2113 struct kv_power_info
*pi
= kv_get_pi(rdev
);
2114 u32 min_sclk
= 10000; /* ??? */
2118 struct radeon_clock_voltage_dependency_table
*table
=
2119 &rdev
->pm
.dpm
.dyn_state
.vddc_dependency_on_sclk
;
2120 u32 stable_p_state_sclk
= 0;
2121 struct radeon_clock_and_voltage_limits
*max_limits
=
2122 &rdev
->pm
.dpm
.dyn_state
.max_clock_voltage_on_ac
;
2124 if (new_rps
->vce_active
) {
2125 new_rps
->evclk
= rdev
->pm
.dpm
.vce_states
[rdev
->pm
.dpm
.vce_level
].evclk
;
2126 new_rps
->ecclk
= rdev
->pm
.dpm
.vce_states
[rdev
->pm
.dpm
.vce_level
].ecclk
;
2132 mclk
= max_limits
->mclk
;
2135 if (pi
->caps_stable_p_state
) {
2136 stable_p_state_sclk
= (max_limits
->sclk
* 75) / 100;
2138 for (i
= table
->count
- 1; i
>= 0; i
++) {
2139 if (stable_p_state_sclk
>= table
->entries
[i
].clk
) {
2140 stable_p_state_sclk
= table
->entries
[i
].clk
;
2146 stable_p_state_sclk
= table
->entries
[0].clk
;
2148 sclk
= stable_p_state_sclk
;
2151 if (new_rps
->vce_active
) {
2152 if (sclk
< rdev
->pm
.dpm
.vce_states
[rdev
->pm
.dpm
.vce_level
].sclk
)
2153 sclk
= rdev
->pm
.dpm
.vce_states
[rdev
->pm
.dpm
.vce_level
].sclk
;
2156 ps
->need_dfs_bypass
= true;
2158 for (i
= 0; i
< ps
->num_levels
; i
++) {
2159 if (ps
->levels
[i
].sclk
< sclk
)
2160 ps
->levels
[i
].sclk
= sclk
;
2163 if (table
&& table
->count
) {
2164 for (i
= 0; i
< ps
->num_levels
; i
++) {
2165 if (pi
->high_voltage_t
&&
2166 (pi
->high_voltage_t
<
2167 kv_convert_8bit_index_to_voltage(rdev
, ps
->levels
[i
].vddc_index
))) {
2168 kv_get_high_voltage_limit(rdev
, &limit
);
2169 ps
->levels
[i
].sclk
= table
->entries
[limit
].clk
;
2173 struct sumo_sclk_voltage_mapping_table
*table
=
2174 &pi
->sys_info
.sclk_voltage_mapping_table
;
2176 for (i
= 0; i
< ps
->num_levels
; i
++) {
2177 if (pi
->high_voltage_t
&&
2178 (pi
->high_voltage_t
<
2179 kv_convert_8bit_index_to_voltage(rdev
, ps
->levels
[i
].vddc_index
))) {
2180 kv_get_high_voltage_limit(rdev
, &limit
);
2181 ps
->levels
[i
].sclk
= table
->entries
[limit
].sclk_frequency
;
2186 if (pi
->caps_stable_p_state
) {
2187 for (i
= 0; i
< ps
->num_levels
; i
++) {
2188 ps
->levels
[i
].sclk
= stable_p_state_sclk
;
2192 pi
->video_start
= new_rps
->dclk
|| new_rps
->vclk
||
2193 new_rps
->evclk
|| new_rps
->ecclk
;
2195 if ((new_rps
->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK
) ==
2196 ATOM_PPLIB_CLASSIFICATION_UI_BATTERY
)
2197 pi
->battery_state
= true;
2199 pi
->battery_state
= false;
2201 if (rdev
->family
== CHIP_KABINI
|| rdev
->family
== CHIP_MULLINS
) {
2202 ps
->dpm0_pg_nb_ps_lo
= 0x1;
2203 ps
->dpm0_pg_nb_ps_hi
= 0x0;
2204 ps
->dpmx_nb_ps_lo
= 0x1;
2205 ps
->dpmx_nb_ps_hi
= 0x0;
2207 ps
->dpm0_pg_nb_ps_lo
= 0x3;
2208 ps
->dpm0_pg_nb_ps_hi
= 0x0;
2209 ps
->dpmx_nb_ps_lo
= 0x3;
2210 ps
->dpmx_nb_ps_hi
= 0x0;
2212 if (pi
->sys_info
.nb_dpm_enable
) {
2213 force_high
= (mclk
>= pi
->sys_info
.nbp_memory_clock
[3]) ||
2214 pi
->video_start
|| (rdev
->pm
.dpm
.new_active_crtc_count
>= 3) ||
2215 pi
->disable_nb_ps3_in_battery
;
2216 ps
->dpm0_pg_nb_ps_lo
= force_high
? 0x2 : 0x3;
2217 ps
->dpm0_pg_nb_ps_hi
= 0x2;
2218 ps
->dpmx_nb_ps_lo
= force_high
? 0x2 : 0x3;
2219 ps
->dpmx_nb_ps_hi
= 0x2;
2224 static void kv_dpm_power_level_enabled_for_throttle(struct radeon_device
*rdev
,
2225 u32 index
, bool enable
)
2227 struct kv_power_info
*pi
= kv_get_pi(rdev
);
2229 pi
->graphics_level
[index
].EnabledForThrottle
= enable
? 1 : 0;
2232 static int kv_calculate_ds_divider(struct radeon_device
*rdev
)
2234 struct kv_power_info
*pi
= kv_get_pi(rdev
);
2235 u32 sclk_in_sr
= 10000; /* ??? */
2238 if (pi
->lowest_valid
> pi
->highest_valid
)
2241 for (i
= pi
->lowest_valid
; i
<= pi
->highest_valid
; i
++) {
2242 pi
->graphics_level
[i
].DeepSleepDivId
=
2243 kv_get_sleep_divider_id_from_clock(rdev
,
2244 be32_to_cpu(pi
->graphics_level
[i
].SclkFrequency
),
2250 static int kv_calculate_nbps_level_settings(struct radeon_device
*rdev
)
2252 struct kv_power_info
*pi
= kv_get_pi(rdev
);
2255 struct radeon_clock_and_voltage_limits
*max_limits
=
2256 &rdev
->pm
.dpm
.dyn_state
.max_clock_voltage_on_ac
;
2257 u32 mclk
= max_limits
->mclk
;
2259 if (pi
->lowest_valid
> pi
->highest_valid
)
2262 if (rdev
->family
== CHIP_KABINI
|| rdev
->family
== CHIP_MULLINS
) {
2263 for (i
= pi
->lowest_valid
; i
<= pi
->highest_valid
; i
++) {
2264 pi
->graphics_level
[i
].GnbSlow
= 1;
2265 pi
->graphics_level
[i
].ForceNbPs1
= 0;
2266 pi
->graphics_level
[i
].UpH
= 0;
2269 if (!pi
->sys_info
.nb_dpm_enable
)
2272 force_high
= ((mclk
>= pi
->sys_info
.nbp_memory_clock
[3]) ||
2273 (rdev
->pm
.dpm
.new_active_crtc_count
>= 3) || pi
->video_start
);
2276 for (i
= pi
->lowest_valid
; i
<= pi
->highest_valid
; i
++)
2277 pi
->graphics_level
[i
].GnbSlow
= 0;
2279 if (pi
->battery_state
)
2280 pi
->graphics_level
[0].ForceNbPs1
= 1;
2282 pi
->graphics_level
[1].GnbSlow
= 0;
2283 pi
->graphics_level
[2].GnbSlow
= 0;
2284 pi
->graphics_level
[3].GnbSlow
= 0;
2285 pi
->graphics_level
[4].GnbSlow
= 0;
2288 for (i
= pi
->lowest_valid
; i
<= pi
->highest_valid
; i
++) {
2289 pi
->graphics_level
[i
].GnbSlow
= 1;
2290 pi
->graphics_level
[i
].ForceNbPs1
= 0;
2291 pi
->graphics_level
[i
].UpH
= 0;
2294 if (pi
->sys_info
.nb_dpm_enable
&& pi
->battery_state
) {
2295 pi
->graphics_level
[pi
->lowest_valid
].UpH
= 0x28;
2296 pi
->graphics_level
[pi
->lowest_valid
].GnbSlow
= 0;
2297 if (pi
->lowest_valid
!= pi
->highest_valid
)
2298 pi
->graphics_level
[pi
->lowest_valid
].ForceNbPs1
= 1;
2304 static int kv_calculate_dpm_settings(struct radeon_device
*rdev
)
2306 struct kv_power_info
*pi
= kv_get_pi(rdev
);
2309 if (pi
->lowest_valid
> pi
->highest_valid
)
2312 for (i
= pi
->lowest_valid
; i
<= pi
->highest_valid
; i
++)
2313 pi
->graphics_level
[i
].DisplayWatermark
= (i
== pi
->highest_valid
) ? 1 : 0;
2318 static void kv_init_graphics_levels(struct radeon_device
*rdev
)
2320 struct kv_power_info
*pi
= kv_get_pi(rdev
);
2322 struct radeon_clock_voltage_dependency_table
*table
=
2323 &rdev
->pm
.dpm
.dyn_state
.vddc_dependency_on_sclk
;
2325 if (table
&& table
->count
) {
2328 pi
->graphics_dpm_level_count
= 0;
2329 for (i
= 0; i
< table
->count
; i
++) {
2330 if (pi
->high_voltage_t
&&
2331 (pi
->high_voltage_t
<
2332 kv_convert_8bit_index_to_voltage(rdev
, table
->entries
[i
].v
)))
2335 kv_set_divider_value(rdev
, i
, table
->entries
[i
].clk
);
2336 vid_2bit
= kv_convert_vid7_to_vid2(rdev
,
2337 &pi
->sys_info
.vid_mapping_table
,
2338 table
->entries
[i
].v
);
2339 kv_set_vid(rdev
, i
, vid_2bit
);
2340 kv_set_at(rdev
, i
, pi
->at
[i
]);
2341 kv_dpm_power_level_enabled_for_throttle(rdev
, i
, true);
2342 pi
->graphics_dpm_level_count
++;
2345 struct sumo_sclk_voltage_mapping_table
*table
=
2346 &pi
->sys_info
.sclk_voltage_mapping_table
;
2348 pi
->graphics_dpm_level_count
= 0;
2349 for (i
= 0; i
< table
->num_max_dpm_entries
; i
++) {
2350 if (pi
->high_voltage_t
&&
2351 pi
->high_voltage_t
<
2352 kv_convert_2bit_index_to_voltage(rdev
, table
->entries
[i
].vid_2bit
))
2355 kv_set_divider_value(rdev
, i
, table
->entries
[i
].sclk_frequency
);
2356 kv_set_vid(rdev
, i
, table
->entries
[i
].vid_2bit
);
2357 kv_set_at(rdev
, i
, pi
->at
[i
]);
2358 kv_dpm_power_level_enabled_for_throttle(rdev
, i
, true);
2359 pi
->graphics_dpm_level_count
++;
2363 for (i
= 0; i
< SMU7_MAX_LEVELS_GRAPHICS
; i
++)
2364 kv_dpm_power_level_enable(rdev
, i
, false);
2367 static void kv_enable_new_levels(struct radeon_device
*rdev
)
2369 struct kv_power_info
*pi
= kv_get_pi(rdev
);
2372 for (i
= 0; i
< SMU7_MAX_LEVELS_GRAPHICS
; i
++) {
2373 if (i
>= pi
->lowest_valid
&& i
<= pi
->highest_valid
)
2374 kv_dpm_power_level_enable(rdev
, i
, true);
2378 static int kv_set_enabled_level(struct radeon_device
*rdev
, u32 level
)
2380 u32 new_mask
= (1 << level
);
2382 return kv_send_msg_to_smc_with_parameter(rdev
,
2383 PPSMC_MSG_SCLKDPM_SetEnabledMask
,
2387 static int kv_set_enabled_levels(struct radeon_device
*rdev
)
2389 struct kv_power_info
*pi
= kv_get_pi(rdev
);
2390 u32 i
, new_mask
= 0;
2392 for (i
= pi
->lowest_valid
; i
<= pi
->highest_valid
; i
++)
2393 new_mask
|= (1 << i
);
2395 return kv_send_msg_to_smc_with_parameter(rdev
,
2396 PPSMC_MSG_SCLKDPM_SetEnabledMask
,
2400 static void kv_program_nbps_index_settings(struct radeon_device
*rdev
,
2401 struct radeon_ps
*new_rps
)
2403 struct kv_ps
*new_ps
= kv_get_ps(new_rps
);
2404 struct kv_power_info
*pi
= kv_get_pi(rdev
);
2407 if (rdev
->family
== CHIP_KABINI
|| rdev
->family
== CHIP_MULLINS
)
2410 if (pi
->sys_info
.nb_dpm_enable
) {
2411 nbdpmconfig1
= RREG32_SMC(NB_DPM_CONFIG_1
);
2412 nbdpmconfig1
&= ~(Dpm0PgNbPsLo_MASK
| Dpm0PgNbPsHi_MASK
|
2413 DpmXNbPsLo_MASK
| DpmXNbPsHi_MASK
);
2414 nbdpmconfig1
|= (Dpm0PgNbPsLo(new_ps
->dpm0_pg_nb_ps_lo
) |
2415 Dpm0PgNbPsHi(new_ps
->dpm0_pg_nb_ps_hi
) |
2416 DpmXNbPsLo(new_ps
->dpmx_nb_ps_lo
) |
2417 DpmXNbPsHi(new_ps
->dpmx_nb_ps_hi
));
2418 WREG32_SMC(NB_DPM_CONFIG_1
, nbdpmconfig1
);
2422 static int kv_set_thermal_temperature_range(struct radeon_device
*rdev
,
2423 int min_temp
, int max_temp
)
2425 int low_temp
= 0 * 1000;
2426 int high_temp
= 255 * 1000;
2429 if (low_temp
< min_temp
)
2430 low_temp
= min_temp
;
2431 if (high_temp
> max_temp
)
2432 high_temp
= max_temp
;
2433 if (high_temp
< low_temp
) {
2434 DRM_ERROR("invalid thermal range: %d - %d\n", low_temp
, high_temp
);
2438 tmp
= RREG32_SMC(CG_THERMAL_INT_CTRL
);
2439 tmp
&= ~(DIG_THERM_INTH_MASK
| DIG_THERM_INTL_MASK
);
2440 tmp
|= (DIG_THERM_INTH(49 + (high_temp
/ 1000)) |
2441 DIG_THERM_INTL(49 + (low_temp
/ 1000)));
2442 WREG32_SMC(CG_THERMAL_INT_CTRL
, tmp
);
2444 rdev
->pm
.dpm
.thermal
.min_temp
= low_temp
;
2445 rdev
->pm
.dpm
.thermal
.max_temp
= high_temp
;
2451 struct _ATOM_INTEGRATED_SYSTEM_INFO info
;
2452 struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2
;
2453 struct _ATOM_INTEGRATED_SYSTEM_INFO_V5 info_5
;
2454 struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 info_6
;
2455 struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7 info_7
;
2456 struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_8 info_8
;
2459 static int kv_parse_sys_info_table(struct radeon_device
*rdev
)
2461 struct kv_power_info
*pi
= kv_get_pi(rdev
);
2462 struct radeon_mode_info
*mode_info
= &rdev
->mode_info
;
2463 int index
= GetIndexIntoMasterTable(DATA
, IntegratedSystemInfo
);
2464 union igp_info
*igp_info
;
2469 if (atom_parse_data_header(mode_info
->atom_context
, index
, NULL
,
2470 &frev
, &crev
, &data_offset
)) {
2471 igp_info
= (union igp_info
*)(mode_info
->atom_context
->bios
+
2475 DRM_ERROR("Unsupported IGP table: %d %d\n", frev
, crev
);
2478 pi
->sys_info
.bootup_sclk
= le32_to_cpu(igp_info
->info_8
.ulBootUpEngineClock
);
2479 pi
->sys_info
.bootup_uma_clk
= le32_to_cpu(igp_info
->info_8
.ulBootUpUMAClock
);
2480 pi
->sys_info
.bootup_nb_voltage_index
=
2481 le16_to_cpu(igp_info
->info_8
.usBootUpNBVoltage
);
2482 if (igp_info
->info_8
.ucHtcTmpLmt
== 0)
2483 pi
->sys_info
.htc_tmp_lmt
= 203;
2485 pi
->sys_info
.htc_tmp_lmt
= igp_info
->info_8
.ucHtcTmpLmt
;
2486 if (igp_info
->info_8
.ucHtcHystLmt
== 0)
2487 pi
->sys_info
.htc_hyst_lmt
= 5;
2489 pi
->sys_info
.htc_hyst_lmt
= igp_info
->info_8
.ucHtcHystLmt
;
2490 if (pi
->sys_info
.htc_tmp_lmt
<= pi
->sys_info
.htc_hyst_lmt
) {
2491 DRM_ERROR("The htcTmpLmt should be larger than htcHystLmt.\n");
2494 if (le32_to_cpu(igp_info
->info_8
.ulSystemConfig
) & (1 << 3))
2495 pi
->sys_info
.nb_dpm_enable
= true;
2497 pi
->sys_info
.nb_dpm_enable
= false;
2499 for (i
= 0; i
< KV_NUM_NBPSTATES
; i
++) {
2500 pi
->sys_info
.nbp_memory_clock
[i
] =
2501 le32_to_cpu(igp_info
->info_8
.ulNbpStateMemclkFreq
[i
]);
2502 pi
->sys_info
.nbp_n_clock
[i
] =
2503 le32_to_cpu(igp_info
->info_8
.ulNbpStateNClkFreq
[i
]);
2505 if (le32_to_cpu(igp_info
->info_8
.ulGPUCapInfo
) &
2506 SYS_INFO_GPUCAPS__ENABEL_DFS_BYPASS
)
2507 pi
->caps_enable_dfs_bypass
= true;
2509 sumo_construct_sclk_voltage_mapping_table(rdev
,
2510 &pi
->sys_info
.sclk_voltage_mapping_table
,
2511 igp_info
->info_8
.sAvail_SCLK
);
2513 sumo_construct_vid_mapping_table(rdev
,
2514 &pi
->sys_info
.vid_mapping_table
,
2515 igp_info
->info_8
.sAvail_SCLK
);
2517 kv_construct_max_power_limits_table(rdev
,
2518 &rdev
->pm
.dpm
.dyn_state
.max_clock_voltage_on_ac
);
2524 struct _ATOM_POWERPLAY_INFO info
;
2525 struct _ATOM_POWERPLAY_INFO_V2 info_2
;
2526 struct _ATOM_POWERPLAY_INFO_V3 info_3
;
2527 struct _ATOM_PPLIB_POWERPLAYTABLE pplib
;
2528 struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2
;
2529 struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3
;
2532 union pplib_clock_info
{
2533 struct _ATOM_PPLIB_R600_CLOCK_INFO r600
;
2534 struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780
;
2535 struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen
;
2536 struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo
;
2539 union pplib_power_state
{
2540 struct _ATOM_PPLIB_STATE v1
;
2541 struct _ATOM_PPLIB_STATE_V2 v2
;
2544 static void kv_patch_boot_state(struct radeon_device
*rdev
,
2547 struct kv_power_info
*pi
= kv_get_pi(rdev
);
2550 ps
->levels
[0] = pi
->boot_pl
;
2553 static void kv_parse_pplib_non_clock_info(struct radeon_device
*rdev
,
2554 struct radeon_ps
*rps
,
2555 struct _ATOM_PPLIB_NONCLOCK_INFO
*non_clock_info
,
2558 struct kv_ps
*ps
= kv_get_ps(rps
);
2560 rps
->caps
= le32_to_cpu(non_clock_info
->ulCapsAndSettings
);
2561 rps
->class = le16_to_cpu(non_clock_info
->usClassification
);
2562 rps
->class2
= le16_to_cpu(non_clock_info
->usClassification2
);
2564 if (ATOM_PPLIB_NONCLOCKINFO_VER1
< table_rev
) {
2565 rps
->vclk
= le32_to_cpu(non_clock_info
->ulVCLK
);
2566 rps
->dclk
= le32_to_cpu(non_clock_info
->ulDCLK
);
2572 if (rps
->class & ATOM_PPLIB_CLASSIFICATION_BOOT
) {
2573 rdev
->pm
.dpm
.boot_ps
= rps
;
2574 kv_patch_boot_state(rdev
, ps
);
2576 if (rps
->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE
)
2577 rdev
->pm
.dpm
.uvd_ps
= rps
;
2580 static void kv_parse_pplib_clock_info(struct radeon_device
*rdev
,
2581 struct radeon_ps
*rps
, int index
,
2582 union pplib_clock_info
*clock_info
)
2584 struct kv_power_info
*pi
= kv_get_pi(rdev
);
2585 struct kv_ps
*ps
= kv_get_ps(rps
);
2586 struct kv_pl
*pl
= &ps
->levels
[index
];
2589 sclk
= le16_to_cpu(clock_info
->sumo
.usEngineClockLow
);
2590 sclk
|= clock_info
->sumo
.ucEngineClockHigh
<< 16;
2592 pl
->vddc_index
= clock_info
->sumo
.vddcIndex
;
2594 ps
->num_levels
= index
+ 1;
2596 if (pi
->caps_sclk_ds
) {
2597 pl
->ds_divider_index
= 5;
2598 pl
->ss_divider_index
= 5;
2602 static int kv_parse_power_table(struct radeon_device
*rdev
)
2604 struct radeon_mode_info
*mode_info
= &rdev
->mode_info
;
2605 struct _ATOM_PPLIB_NONCLOCK_INFO
*non_clock_info
;
2606 union pplib_power_state
*power_state
;
2607 int i
, j
, k
, non_clock_array_index
, clock_array_index
;
2608 union pplib_clock_info
*clock_info
;
2609 struct _StateArray
*state_array
;
2610 struct _ClockInfoArray
*clock_info_array
;
2611 struct _NonClockInfoArray
*non_clock_info_array
;
2612 union power_info
*power_info
;
2613 int index
= GetIndexIntoMasterTable(DATA
, PowerPlayInfo
);
2616 u8
*power_state_offset
;
2619 if (!atom_parse_data_header(mode_info
->atom_context
, index
, NULL
,
2620 &frev
, &crev
, &data_offset
))
2622 power_info
= (union power_info
*)(mode_info
->atom_context
->bios
+ data_offset
);
2624 state_array
= (struct _StateArray
*)
2625 (mode_info
->atom_context
->bios
+ data_offset
+
2626 le16_to_cpu(power_info
->pplib
.usStateArrayOffset
));
2627 clock_info_array
= (struct _ClockInfoArray
*)
2628 (mode_info
->atom_context
->bios
+ data_offset
+
2629 le16_to_cpu(power_info
->pplib
.usClockInfoArrayOffset
));
2630 non_clock_info_array
= (struct _NonClockInfoArray
*)
2631 (mode_info
->atom_context
->bios
+ data_offset
+
2632 le16_to_cpu(power_info
->pplib
.usNonClockInfoArrayOffset
));
2634 rdev
->pm
.dpm
.ps
= kzalloc(sizeof(struct radeon_ps
) *
2635 state_array
->ucNumEntries
, GFP_KERNEL
);
2636 if (!rdev
->pm
.dpm
.ps
)
2638 power_state_offset
= (u8
*)state_array
->states
;
2639 for (i
= 0; i
< state_array
->ucNumEntries
; i
++) {
2641 power_state
= (union pplib_power_state
*)power_state_offset
;
2642 non_clock_array_index
= power_state
->v2
.nonClockInfoIndex
;
2643 non_clock_info
= (struct _ATOM_PPLIB_NONCLOCK_INFO
*)
2644 &non_clock_info_array
->nonClockInfo
[non_clock_array_index
];
2645 if (!rdev
->pm
.power_state
[i
].clock_info
)
2647 ps
= kzalloc(sizeof(struct kv_ps
), GFP_KERNEL
);
2649 kfree(rdev
->pm
.dpm
.ps
);
2652 rdev
->pm
.dpm
.ps
[i
].ps_priv
= ps
;
2654 idx
= (u8
*)&power_state
->v2
.clockInfoIndex
[0];
2655 for (j
= 0; j
< power_state
->v2
.ucNumDPMLevels
; j
++) {
2656 clock_array_index
= idx
[j
];
2657 if (clock_array_index
>= clock_info_array
->ucNumEntries
)
2659 if (k
>= SUMO_MAX_HARDWARE_POWERLEVELS
)
2661 clock_info
= (union pplib_clock_info
*)
2662 ((u8
*)&clock_info_array
->clockInfo
[0] +
2663 (clock_array_index
* clock_info_array
->ucEntrySize
));
2664 kv_parse_pplib_clock_info(rdev
,
2665 &rdev
->pm
.dpm
.ps
[i
], k
,
2669 kv_parse_pplib_non_clock_info(rdev
, &rdev
->pm
.dpm
.ps
[i
],
2671 non_clock_info_array
->ucEntrySize
);
2672 power_state_offset
+= 2 + power_state
->v2
.ucNumDPMLevels
;
2674 rdev
->pm
.dpm
.num_ps
= state_array
->ucNumEntries
;
2676 /* fill in the vce power states */
2677 for (i
= 0; i
< RADEON_MAX_VCE_LEVELS
; i
++) {
2679 clock_array_index
= rdev
->pm
.dpm
.vce_states
[i
].clk_idx
;
2680 clock_info
= (union pplib_clock_info
*)
2681 &clock_info_array
->clockInfo
[clock_array_index
* clock_info_array
->ucEntrySize
];
2682 sclk
= le16_to_cpu(clock_info
->sumo
.usEngineClockLow
);
2683 sclk
|= clock_info
->sumo
.ucEngineClockHigh
<< 16;
2684 rdev
->pm
.dpm
.vce_states
[i
].sclk
= sclk
;
2685 rdev
->pm
.dpm
.vce_states
[i
].mclk
= 0;
2691 int kv_dpm_init(struct radeon_device
*rdev
)
2693 struct kv_power_info
*pi
;
2696 pi
= kzalloc(sizeof(struct kv_power_info
), GFP_KERNEL
);
2699 rdev
->pm
.dpm
.priv
= pi
;
2701 ret
= r600_get_platform_caps(rdev
);
2705 ret
= r600_parse_extended_power_table(rdev
);
2709 for (i
= 0; i
< SUMO_MAX_HARDWARE_POWERLEVELS
; i
++)
2710 pi
->at
[i
] = TRINITY_AT_DFLT
;
2712 pi
->sram_end
= SMC_RAM_END
;
2714 pi
->enable_nb_dpm
= true;
2716 pi
->caps_power_containment
= true;
2717 pi
->caps_cac
= true;
2718 pi
->enable_didt
= false;
2719 if (pi
->enable_didt
) {
2720 pi
->caps_sq_ramping
= true;
2721 pi
->caps_db_ramping
= true;
2722 pi
->caps_td_ramping
= true;
2723 pi
->caps_tcp_ramping
= true;
2726 pi
->caps_sclk_ds
= true;
2727 pi
->enable_auto_thermal_throttling
= true;
2728 pi
->disable_nb_ps3_in_battery
= false;
2729 pi
->bapm_enable
= true;
2730 pi
->voltage_drop_t
= 0;
2731 pi
->caps_sclk_throttle_low_notification
= false;
2732 pi
->caps_fps
= false; /* true? */
2733 pi
->caps_uvd_pg
= true;
2734 pi
->caps_uvd_dpm
= true;
2735 pi
->caps_vce_pg
= false; /* XXX true */
2736 pi
->caps_samu_pg
= false;
2737 pi
->caps_acp_pg
= false;
2738 pi
->caps_stable_p_state
= false;
2740 ret
= kv_parse_sys_info_table(rdev
);
2744 kv_patch_voltage_values(rdev
);
2745 kv_construct_boot_state(rdev
);
2747 ret
= kv_parse_power_table(rdev
);
2751 pi
->enable_dpm
= true;
2756 void kv_dpm_debugfs_print_current_performance_level(struct radeon_device
*rdev
,
2759 struct kv_power_info
*pi
= kv_get_pi(rdev
);
2761 (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX
) & CURR_SCLK_INDEX_MASK
) >>
2762 CURR_SCLK_INDEX_SHIFT
;
2766 if (current_index
>= SMU__NUM_SCLK_DPM_STATE
) {
2767 seq_printf(m
, "invalid dpm profile %d\n", current_index
);
2769 sclk
= be32_to_cpu(pi
->graphics_level
[current_index
].SclkFrequency
);
2770 tmp
= (RREG32_SMC(SMU_VOLTAGE_STATUS
) & SMU_VOLTAGE_CURRENT_LEVEL_MASK
) >>
2771 SMU_VOLTAGE_CURRENT_LEVEL_SHIFT
;
2772 vddc
= kv_convert_8bit_index_to_voltage(rdev
, (u16
)tmp
);
2773 seq_printf(m
, "power level %d sclk: %u vddc: %u\n",
2774 current_index
, sclk
, vddc
);
2778 void kv_dpm_print_power_state(struct radeon_device
*rdev
,
2779 struct radeon_ps
*rps
)
2782 struct kv_ps
*ps
= kv_get_ps(rps
);
2784 r600_dpm_print_class_info(rps
->class, rps
->class2
);
2785 r600_dpm_print_cap_info(rps
->caps
);
2786 printk("\tuvd vclk: %d dclk: %d\n", rps
->vclk
, rps
->dclk
);
2787 for (i
= 0; i
< ps
->num_levels
; i
++) {
2788 struct kv_pl
*pl
= &ps
->levels
[i
];
2789 printk("\t\tpower level %d sclk: %u vddc: %u\n",
2791 kv_convert_8bit_index_to_voltage(rdev
, pl
->vddc_index
));
2793 r600_dpm_print_ps_status(rdev
, rps
);
2796 void kv_dpm_fini(struct radeon_device
*rdev
)
2800 for (i
= 0; i
< rdev
->pm
.dpm
.num_ps
; i
++) {
2801 kfree(rdev
->pm
.dpm
.ps
[i
].ps_priv
);
2803 kfree(rdev
->pm
.dpm
.ps
);
2804 kfree(rdev
->pm
.dpm
.priv
);
2805 r600_free_extended_power_table(rdev
);
2808 void kv_dpm_display_configuration_changed(struct radeon_device
*rdev
)
2813 u32
kv_dpm_get_sclk(struct radeon_device
*rdev
, bool low
)
2815 struct kv_power_info
*pi
= kv_get_pi(rdev
);
2816 struct kv_ps
*requested_state
= kv_get_ps(&pi
->requested_rps
);
2819 return requested_state
->levels
[0].sclk
;
2821 return requested_state
->levels
[requested_state
->num_levels
- 1].sclk
;
2824 u32
kv_dpm_get_mclk(struct radeon_device
*rdev
, bool low
)
2826 struct kv_power_info
*pi
= kv_get_pi(rdev
);
2828 return pi
->sys_info
.bootup_uma_clk
;