2 * Copyright 2013 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
29 #include "radeon_asic.h"
30 #include <linux/seq_file.h>
32 #define KV_MAX_DEEPSLEEP_DIVIDER_ID 5
33 #define KV_MINIMUM_ENGINE_CLOCK 800
34 #define SMC_RAM_END 0x40000
36 static void kv_init_graphics_levels(struct radeon_device
*rdev
);
37 static int kv_calculate_ds_divider(struct radeon_device
*rdev
);
38 static int kv_calculate_nbps_level_settings(struct radeon_device
*rdev
);
39 static int kv_calculate_dpm_settings(struct radeon_device
*rdev
);
40 static void kv_enable_new_levels(struct radeon_device
*rdev
);
41 static void kv_program_nbps_index_settings(struct radeon_device
*rdev
,
42 struct radeon_ps
*new_rps
);
43 static int kv_set_enabled_levels(struct radeon_device
*rdev
);
44 static int kv_force_dpm_highest(struct radeon_device
*rdev
);
45 static int kv_force_dpm_lowest(struct radeon_device
*rdev
);
46 static void kv_apply_state_adjust_rules(struct radeon_device
*rdev
,
47 struct radeon_ps
*new_rps
,
48 struct radeon_ps
*old_rps
);
49 static int kv_set_thermal_temperature_range(struct radeon_device
*rdev
,
50 int min_temp
, int max_temp
);
51 static int kv_init_fps_limits(struct radeon_device
*rdev
);
53 void kv_dpm_powergate_uvd(struct radeon_device
*rdev
, bool gate
);
54 static void kv_dpm_powergate_vce(struct radeon_device
*rdev
, bool gate
);
55 static void kv_dpm_powergate_samu(struct radeon_device
*rdev
, bool gate
);
56 static void kv_dpm_powergate_acp(struct radeon_device
*rdev
, bool gate
);
58 extern void cik_enter_rlc_safe_mode(struct radeon_device
*rdev
);
59 extern void cik_exit_rlc_safe_mode(struct radeon_device
*rdev
);
60 extern void cik_update_cg(struct radeon_device
*rdev
,
61 u32 block
, bool enable
);
63 static const struct kv_lcac_config_values sx_local_cac_cfg_kv
[] =
76 static const struct kv_lcac_config_values mc0_local_cac_cfg_kv
[] =
82 static const struct kv_lcac_config_values mc1_local_cac_cfg_kv
[] =
88 static const struct kv_lcac_config_values mc2_local_cac_cfg_kv
[] =
94 static const struct kv_lcac_config_values mc3_local_cac_cfg_kv
[] =
100 static const struct kv_lcac_config_values cpl_local_cac_cfg_kv
[] =
132 static const struct kv_lcac_config_reg sx0_cac_config_reg
[] =
134 { 0xc0400d00, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
137 static const struct kv_lcac_config_reg mc0_cac_config_reg
[] =
139 { 0xc0400d30, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
142 static const struct kv_lcac_config_reg mc1_cac_config_reg
[] =
144 { 0xc0400d3c, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
147 static const struct kv_lcac_config_reg mc2_cac_config_reg
[] =
149 { 0xc0400d48, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
152 static const struct kv_lcac_config_reg mc3_cac_config_reg
[] =
154 { 0xc0400d54, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
157 static const struct kv_lcac_config_reg cpl_cac_config_reg
[] =
159 { 0xc0400d80, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
162 static const struct kv_pt_config_reg didt_config_kv
[] =
164 { 0x10, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND
},
165 { 0x10, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND
},
166 { 0x10, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND
},
167 { 0x10, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND
},
168 { 0x11, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND
},
169 { 0x11, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND
},
170 { 0x11, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND
},
171 { 0x11, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND
},
172 { 0x12, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND
},
173 { 0x12, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND
},
174 { 0x12, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND
},
175 { 0x12, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND
},
176 { 0x2, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND
},
177 { 0x2, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND
},
178 { 0x2, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND
},
179 { 0x1, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND
},
180 { 0x1, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND
},
181 { 0x0, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND
},
182 { 0x30, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND
},
183 { 0x30, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND
},
184 { 0x30, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND
},
185 { 0x30, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND
},
186 { 0x31, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND
},
187 { 0x31, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND
},
188 { 0x31, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND
},
189 { 0x31, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND
},
190 { 0x32, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND
},
191 { 0x32, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND
},
192 { 0x32, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND
},
193 { 0x32, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND
},
194 { 0x22, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND
},
195 { 0x22, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND
},
196 { 0x22, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND
},
197 { 0x21, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND
},
198 { 0x21, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND
},
199 { 0x20, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND
},
200 { 0x50, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND
},
201 { 0x50, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND
},
202 { 0x50, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND
},
203 { 0x50, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND
},
204 { 0x51, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND
},
205 { 0x51, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND
},
206 { 0x51, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND
},
207 { 0x51, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND
},
208 { 0x52, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND
},
209 { 0x52, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND
},
210 { 0x52, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND
},
211 { 0x52, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND
},
212 { 0x42, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND
},
213 { 0x42, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND
},
214 { 0x42, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND
},
215 { 0x41, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND
},
216 { 0x41, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND
},
217 { 0x40, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND
},
218 { 0x70, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND
},
219 { 0x70, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND
},
220 { 0x70, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND
},
221 { 0x70, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND
},
222 { 0x71, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND
},
223 { 0x71, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND
},
224 { 0x71, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND
},
225 { 0x71, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND
},
226 { 0x72, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND
},
227 { 0x72, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND
},
228 { 0x72, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND
},
229 { 0x72, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND
},
230 { 0x62, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND
},
231 { 0x62, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND
},
232 { 0x62, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND
},
233 { 0x61, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND
},
234 { 0x61, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND
},
235 { 0x60, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND
},
239 static struct kv_ps
*kv_get_ps(struct radeon_ps
*rps
)
241 struct kv_ps
*ps
= rps
->ps_priv
;
246 static struct kv_power_info
*kv_get_pi(struct radeon_device
*rdev
)
248 struct kv_power_info
*pi
= rdev
->pm
.dpm
.priv
;
254 static void kv_program_local_cac_table(struct radeon_device
*rdev
,
255 const struct kv_lcac_config_values
*local_cac_table
,
256 const struct kv_lcac_config_reg
*local_cac_reg
)
259 const struct kv_lcac_config_values
*values
= local_cac_table
;
261 while (values
->block_id
!= 0xffffffff) {
262 count
= values
->signal_id
;
263 for (i
= 0; i
< count
; i
++) {
264 data
= ((values
->block_id
<< local_cac_reg
->block_shift
) &
265 local_cac_reg
->block_mask
);
266 data
|= ((i
<< local_cac_reg
->signal_shift
) &
267 local_cac_reg
->signal_mask
);
268 data
|= ((values
->t
<< local_cac_reg
->t_shift
) &
269 local_cac_reg
->t_mask
);
270 data
|= ((1 << local_cac_reg
->enable_shift
) &
271 local_cac_reg
->enable_mask
);
272 WREG32_SMC(local_cac_reg
->cntl
, data
);
279 static int kv_program_pt_config_registers(struct radeon_device
*rdev
,
280 const struct kv_pt_config_reg
*cac_config_regs
)
282 const struct kv_pt_config_reg
*config_regs
= cac_config_regs
;
286 if (config_regs
== NULL
)
289 while (config_regs
->offset
!= 0xFFFFFFFF) {
290 if (config_regs
->type
== KV_CONFIGREG_CACHE
) {
291 cache
|= ((config_regs
->value
<< config_regs
->shift
) & config_regs
->mask
);
293 switch (config_regs
->type
) {
294 case KV_CONFIGREG_SMC_IND
:
295 data
= RREG32_SMC(config_regs
->offset
);
297 case KV_CONFIGREG_DIDT_IND
:
298 data
= RREG32_DIDT(config_regs
->offset
);
301 data
= RREG32(config_regs
->offset
<< 2);
305 data
&= ~config_regs
->mask
;
306 data
|= ((config_regs
->value
<< config_regs
->shift
) & config_regs
->mask
);
310 switch (config_regs
->type
) {
311 case KV_CONFIGREG_SMC_IND
:
312 WREG32_SMC(config_regs
->offset
, data
);
314 case KV_CONFIGREG_DIDT_IND
:
315 WREG32_DIDT(config_regs
->offset
, data
);
318 WREG32(config_regs
->offset
<< 2, data
);
328 static void kv_do_enable_didt(struct radeon_device
*rdev
, bool enable
)
330 struct kv_power_info
*pi
= kv_get_pi(rdev
);
333 if (pi
->caps_sq_ramping
) {
334 data
= RREG32_DIDT(DIDT_SQ_CTRL0
);
336 data
|= DIDT_CTRL_EN
;
338 data
&= ~DIDT_CTRL_EN
;
339 WREG32_DIDT(DIDT_SQ_CTRL0
, data
);
342 if (pi
->caps_db_ramping
) {
343 data
= RREG32_DIDT(DIDT_DB_CTRL0
);
345 data
|= DIDT_CTRL_EN
;
347 data
&= ~DIDT_CTRL_EN
;
348 WREG32_DIDT(DIDT_DB_CTRL0
, data
);
351 if (pi
->caps_td_ramping
) {
352 data
= RREG32_DIDT(DIDT_TD_CTRL0
);
354 data
|= DIDT_CTRL_EN
;
356 data
&= ~DIDT_CTRL_EN
;
357 WREG32_DIDT(DIDT_TD_CTRL0
, data
);
360 if (pi
->caps_tcp_ramping
) {
361 data
= RREG32_DIDT(DIDT_TCP_CTRL0
);
363 data
|= DIDT_CTRL_EN
;
365 data
&= ~DIDT_CTRL_EN
;
366 WREG32_DIDT(DIDT_TCP_CTRL0
, data
);
370 static int kv_enable_didt(struct radeon_device
*rdev
, bool enable
)
372 struct kv_power_info
*pi
= kv_get_pi(rdev
);
375 if (pi
->caps_sq_ramping
||
376 pi
->caps_db_ramping
||
377 pi
->caps_td_ramping
||
378 pi
->caps_tcp_ramping
) {
379 cik_enter_rlc_safe_mode(rdev
);
382 ret
= kv_program_pt_config_registers(rdev
, didt_config_kv
);
384 cik_exit_rlc_safe_mode(rdev
);
389 kv_do_enable_didt(rdev
, enable
);
391 cik_exit_rlc_safe_mode(rdev
);
398 static void kv_initialize_hardware_cac_manager(struct radeon_device
*rdev
)
400 struct kv_power_info
*pi
= kv_get_pi(rdev
);
403 WREG32_SMC(LCAC_SX0_OVR_SEL
, 0);
404 WREG32_SMC(LCAC_SX0_OVR_VAL
, 0);
405 kv_program_local_cac_table(rdev
, sx_local_cac_cfg_kv
, sx0_cac_config_reg
);
407 WREG32_SMC(LCAC_MC0_OVR_SEL
, 0);
408 WREG32_SMC(LCAC_MC0_OVR_VAL
, 0);
409 kv_program_local_cac_table(rdev
, mc0_local_cac_cfg_kv
, mc0_cac_config_reg
);
411 WREG32_SMC(LCAC_MC1_OVR_SEL
, 0);
412 WREG32_SMC(LCAC_MC1_OVR_VAL
, 0);
413 kv_program_local_cac_table(rdev
, mc1_local_cac_cfg_kv
, mc1_cac_config_reg
);
415 WREG32_SMC(LCAC_MC2_OVR_SEL
, 0);
416 WREG32_SMC(LCAC_MC2_OVR_VAL
, 0);
417 kv_program_local_cac_table(rdev
, mc2_local_cac_cfg_kv
, mc2_cac_config_reg
);
419 WREG32_SMC(LCAC_MC3_OVR_SEL
, 0);
420 WREG32_SMC(LCAC_MC3_OVR_VAL
, 0);
421 kv_program_local_cac_table(rdev
, mc3_local_cac_cfg_kv
, mc3_cac_config_reg
);
423 WREG32_SMC(LCAC_CPL_OVR_SEL
, 0);
424 WREG32_SMC(LCAC_CPL_OVR_VAL
, 0);
425 kv_program_local_cac_table(rdev
, cpl_local_cac_cfg_kv
, cpl_cac_config_reg
);
430 static int kv_enable_smc_cac(struct radeon_device
*rdev
, bool enable
)
432 struct kv_power_info
*pi
= kv_get_pi(rdev
);
437 ret
= kv_notify_message_to_smu(rdev
, PPSMC_MSG_EnableCac
);
439 pi
->cac_enabled
= false;
441 pi
->cac_enabled
= true;
442 } else if (pi
->cac_enabled
) {
443 kv_notify_message_to_smu(rdev
, PPSMC_MSG_DisableCac
);
444 pi
->cac_enabled
= false;
451 static int kv_process_firmware_header(struct radeon_device
*rdev
)
453 struct kv_power_info
*pi
= kv_get_pi(rdev
);
457 ret
= kv_read_smc_sram_dword(rdev
, SMU7_FIRMWARE_HEADER_LOCATION
+
458 offsetof(SMU7_Firmware_Header
, DpmTable
),
462 pi
->dpm_table_start
= tmp
;
464 ret
= kv_read_smc_sram_dword(rdev
, SMU7_FIRMWARE_HEADER_LOCATION
+
465 offsetof(SMU7_Firmware_Header
, SoftRegisters
),
469 pi
->soft_regs_start
= tmp
;
474 static int kv_enable_dpm_voltage_scaling(struct radeon_device
*rdev
)
476 struct kv_power_info
*pi
= kv_get_pi(rdev
);
479 pi
->graphics_voltage_change_enable
= 1;
481 ret
= kv_copy_bytes_to_smc(rdev
,
482 pi
->dpm_table_start
+
483 offsetof(SMU7_Fusion_DpmTable
, GraphicsVoltageChangeEnable
),
484 &pi
->graphics_voltage_change_enable
,
485 sizeof(u8
), pi
->sram_end
);
490 static int kv_set_dpm_interval(struct radeon_device
*rdev
)
492 struct kv_power_info
*pi
= kv_get_pi(rdev
);
495 pi
->graphics_interval
= 1;
497 ret
= kv_copy_bytes_to_smc(rdev
,
498 pi
->dpm_table_start
+
499 offsetof(SMU7_Fusion_DpmTable
, GraphicsInterval
),
500 &pi
->graphics_interval
,
501 sizeof(u8
), pi
->sram_end
);
506 static int kv_set_dpm_boot_state(struct radeon_device
*rdev
)
508 struct kv_power_info
*pi
= kv_get_pi(rdev
);
511 ret
= kv_copy_bytes_to_smc(rdev
,
512 pi
->dpm_table_start
+
513 offsetof(SMU7_Fusion_DpmTable
, GraphicsBootLevel
),
514 &pi
->graphics_boot_level
,
515 sizeof(u8
), pi
->sram_end
);
520 static void kv_program_vc(struct radeon_device
*rdev
)
522 WREG32_SMC(CG_FTV_0
, 0x3FFFC000);
525 static void kv_clear_vc(struct radeon_device
*rdev
)
527 WREG32_SMC(CG_FTV_0
, 0);
530 static int kv_set_divider_value(struct radeon_device
*rdev
,
533 struct kv_power_info
*pi
= kv_get_pi(rdev
);
534 struct atom_clock_dividers dividers
;
537 ret
= radeon_atom_get_clock_dividers(rdev
, COMPUTE_ENGINE_PLL_PARAM
,
538 sclk
, false, ÷rs
);
542 pi
->graphics_level
[index
].SclkDid
= (u8
)dividers
.post_div
;
543 pi
->graphics_level
[index
].SclkFrequency
= cpu_to_be32(sclk
);
548 static u16
kv_convert_8bit_index_to_voltage(struct radeon_device
*rdev
,
551 return 6200 - (voltage
* 25);
554 static u16
kv_convert_2bit_index_to_voltage(struct radeon_device
*rdev
,
557 struct kv_power_info
*pi
= kv_get_pi(rdev
);
558 u32 vid_8bit
= sumo_convert_vid2_to_vid7(rdev
,
559 &pi
->sys_info
.vid_mapping_table
,
562 return kv_convert_8bit_index_to_voltage(rdev
, (u16
)vid_8bit
);
566 static int kv_set_vid(struct radeon_device
*rdev
, u32 index
, u32 vid
)
568 struct kv_power_info
*pi
= kv_get_pi(rdev
);
570 pi
->graphics_level
[index
].VoltageDownH
= (u8
)pi
->voltage_drop_t
;
571 pi
->graphics_level
[index
].MinVddNb
=
572 cpu_to_be32(kv_convert_2bit_index_to_voltage(rdev
, vid
));
577 static int kv_set_at(struct radeon_device
*rdev
, u32 index
, u32 at
)
579 struct kv_power_info
*pi
= kv_get_pi(rdev
);
581 pi
->graphics_level
[index
].AT
= cpu_to_be16((u16
)at
);
586 static void kv_dpm_power_level_enable(struct radeon_device
*rdev
,
587 u32 index
, bool enable
)
589 struct kv_power_info
*pi
= kv_get_pi(rdev
);
591 pi
->graphics_level
[index
].EnabledForActivity
= enable
? 1 : 0;
594 static void kv_start_dpm(struct radeon_device
*rdev
)
596 u32 tmp
= RREG32_SMC(GENERAL_PWRMGT
);
598 tmp
|= GLOBAL_PWRMGT_EN
;
599 WREG32_SMC(GENERAL_PWRMGT
, tmp
);
601 kv_smc_dpm_enable(rdev
, true);
604 static void kv_stop_dpm(struct radeon_device
*rdev
)
606 kv_smc_dpm_enable(rdev
, false);
609 static void kv_start_am(struct radeon_device
*rdev
)
611 u32 sclk_pwrmgt_cntl
= RREG32_SMC(SCLK_PWRMGT_CNTL
);
613 sclk_pwrmgt_cntl
&= ~(RESET_SCLK_CNT
| RESET_BUSY_CNT
);
614 sclk_pwrmgt_cntl
|= DYNAMIC_PM_EN
;
616 WREG32_SMC(SCLK_PWRMGT_CNTL
, sclk_pwrmgt_cntl
);
619 static void kv_reset_am(struct radeon_device
*rdev
)
621 u32 sclk_pwrmgt_cntl
= RREG32_SMC(SCLK_PWRMGT_CNTL
);
623 sclk_pwrmgt_cntl
|= (RESET_SCLK_CNT
| RESET_BUSY_CNT
);
625 WREG32_SMC(SCLK_PWRMGT_CNTL
, sclk_pwrmgt_cntl
);
628 static int kv_freeze_sclk_dpm(struct radeon_device
*rdev
, bool freeze
)
630 return kv_notify_message_to_smu(rdev
, freeze
?
631 PPSMC_MSG_SCLKDPM_FreezeLevel
: PPSMC_MSG_SCLKDPM_UnfreezeLevel
);
634 static int kv_force_lowest_valid(struct radeon_device
*rdev
)
636 return kv_force_dpm_lowest(rdev
);
639 static int kv_unforce_levels(struct radeon_device
*rdev
)
641 return kv_notify_message_to_smu(rdev
, PPSMC_MSG_NoForcedLevel
);
644 static int kv_update_sclk_t(struct radeon_device
*rdev
)
646 struct kv_power_info
*pi
= kv_get_pi(rdev
);
647 u32 low_sclk_interrupt_t
= 0;
650 if (pi
->caps_sclk_throttle_low_notification
) {
651 low_sclk_interrupt_t
= cpu_to_be32(pi
->low_sclk_interrupt_t
);
653 ret
= kv_copy_bytes_to_smc(rdev
,
654 pi
->dpm_table_start
+
655 offsetof(SMU7_Fusion_DpmTable
, LowSclkInterruptT
),
656 (u8
*)&low_sclk_interrupt_t
,
657 sizeof(u32
), pi
->sram_end
);
662 static int kv_program_bootup_state(struct radeon_device
*rdev
)
664 struct kv_power_info
*pi
= kv_get_pi(rdev
);
666 struct radeon_clock_voltage_dependency_table
*table
=
667 &rdev
->pm
.dpm
.dyn_state
.vddc_dependency_on_sclk
;
669 if (table
&& table
->count
) {
670 for (i
= pi
->graphics_dpm_level_count
- 1; i
>= 0; i
--) {
671 if ((table
->entries
[i
].clk
== pi
->boot_pl
.sclk
) ||
676 pi
->graphics_boot_level
= (u8
)i
;
677 kv_dpm_power_level_enable(rdev
, i
, true);
679 struct sumo_sclk_voltage_mapping_table
*table
=
680 &pi
->sys_info
.sclk_voltage_mapping_table
;
682 if (table
->num_max_dpm_entries
== 0)
685 for (i
= pi
->graphics_dpm_level_count
- 1; i
>= 0; i
--) {
686 if ((table
->entries
[i
].sclk_frequency
== pi
->boot_pl
.sclk
) ||
691 pi
->graphics_boot_level
= (u8
)i
;
692 kv_dpm_power_level_enable(rdev
, i
, true);
697 static int kv_enable_auto_thermal_throttling(struct radeon_device
*rdev
)
699 struct kv_power_info
*pi
= kv_get_pi(rdev
);
702 pi
->graphics_therm_throttle_enable
= 1;
704 ret
= kv_copy_bytes_to_smc(rdev
,
705 pi
->dpm_table_start
+
706 offsetof(SMU7_Fusion_DpmTable
, GraphicsThermThrottleEnable
),
707 &pi
->graphics_therm_throttle_enable
,
708 sizeof(u8
), pi
->sram_end
);
713 static int kv_upload_dpm_settings(struct radeon_device
*rdev
)
715 struct kv_power_info
*pi
= kv_get_pi(rdev
);
718 ret
= kv_copy_bytes_to_smc(rdev
,
719 pi
->dpm_table_start
+
720 offsetof(SMU7_Fusion_DpmTable
, GraphicsLevel
),
721 (u8
*)&pi
->graphics_level
,
722 sizeof(SMU7_Fusion_GraphicsLevel
) * SMU7_MAX_LEVELS_GRAPHICS
,
728 ret
= kv_copy_bytes_to_smc(rdev
,
729 pi
->dpm_table_start
+
730 offsetof(SMU7_Fusion_DpmTable
, GraphicsDpmLevelCount
),
731 &pi
->graphics_dpm_level_count
,
732 sizeof(u8
), pi
->sram_end
);
737 static u32
kv_get_clock_difference(u32 a
, u32 b
)
739 return (a
>= b
) ? a
- b
: b
- a
;
742 static u32
kv_get_clk_bypass(struct radeon_device
*rdev
, u32 clk
)
744 struct kv_power_info
*pi
= kv_get_pi(rdev
);
747 if (pi
->caps_enable_dfs_bypass
) {
748 if (kv_get_clock_difference(clk
, 40000) < 200)
750 else if (kv_get_clock_difference(clk
, 30000) < 200)
752 else if (kv_get_clock_difference(clk
, 20000) < 200)
754 else if (kv_get_clock_difference(clk
, 15000) < 200)
756 else if (kv_get_clock_difference(clk
, 10000) < 200)
767 static int kv_populate_uvd_table(struct radeon_device
*rdev
)
769 struct kv_power_info
*pi
= kv_get_pi(rdev
);
770 struct radeon_uvd_clock_voltage_dependency_table
*table
=
771 &rdev
->pm
.dpm
.dyn_state
.uvd_clock_voltage_dependency_table
;
772 struct atom_clock_dividers dividers
;
776 if (table
== NULL
|| table
->count
== 0)
779 pi
->uvd_level_count
= 0;
780 for (i
= 0; i
< table
->count
; i
++) {
781 if (pi
->high_voltage_t
&&
782 (pi
->high_voltage_t
< table
->entries
[i
].v
))
785 pi
->uvd_level
[i
].VclkFrequency
= cpu_to_be32(table
->entries
[i
].vclk
);
786 pi
->uvd_level
[i
].DclkFrequency
= cpu_to_be32(table
->entries
[i
].dclk
);
787 pi
->uvd_level
[i
].MinVddNb
= cpu_to_be16(table
->entries
[i
].v
);
789 pi
->uvd_level
[i
].VClkBypassCntl
=
790 (u8
)kv_get_clk_bypass(rdev
, table
->entries
[i
].vclk
);
791 pi
->uvd_level
[i
].DClkBypassCntl
=
792 (u8
)kv_get_clk_bypass(rdev
, table
->entries
[i
].dclk
);
794 ret
= radeon_atom_get_clock_dividers(rdev
, COMPUTE_ENGINE_PLL_PARAM
,
795 table
->entries
[i
].vclk
, false, ÷rs
);
798 pi
->uvd_level
[i
].VclkDivider
= (u8
)dividers
.post_div
;
800 ret
= radeon_atom_get_clock_dividers(rdev
, COMPUTE_ENGINE_PLL_PARAM
,
801 table
->entries
[i
].dclk
, false, ÷rs
);
804 pi
->uvd_level
[i
].DclkDivider
= (u8
)dividers
.post_div
;
806 pi
->uvd_level_count
++;
809 ret
= kv_copy_bytes_to_smc(rdev
,
810 pi
->dpm_table_start
+
811 offsetof(SMU7_Fusion_DpmTable
, UvdLevelCount
),
812 (u8
*)&pi
->uvd_level_count
,
813 sizeof(u8
), pi
->sram_end
);
817 pi
->uvd_interval
= 1;
819 ret
= kv_copy_bytes_to_smc(rdev
,
820 pi
->dpm_table_start
+
821 offsetof(SMU7_Fusion_DpmTable
, UVDInterval
),
823 sizeof(u8
), pi
->sram_end
);
827 ret
= kv_copy_bytes_to_smc(rdev
,
828 pi
->dpm_table_start
+
829 offsetof(SMU7_Fusion_DpmTable
, UvdLevel
),
830 (u8
*)&pi
->uvd_level
,
831 sizeof(SMU7_Fusion_UvdLevel
) * SMU7_MAX_LEVELS_UVD
,
838 static int kv_populate_vce_table(struct radeon_device
*rdev
)
840 struct kv_power_info
*pi
= kv_get_pi(rdev
);
843 struct radeon_vce_clock_voltage_dependency_table
*table
=
844 &rdev
->pm
.dpm
.dyn_state
.vce_clock_voltage_dependency_table
;
845 struct atom_clock_dividers dividers
;
847 if (table
== NULL
|| table
->count
== 0)
850 pi
->vce_level_count
= 0;
851 for (i
= 0; i
< table
->count
; i
++) {
852 if (pi
->high_voltage_t
&&
853 pi
->high_voltage_t
< table
->entries
[i
].v
)
856 pi
->vce_level
[i
].Frequency
= cpu_to_be32(table
->entries
[i
].evclk
);
857 pi
->vce_level
[i
].MinVoltage
= cpu_to_be16(table
->entries
[i
].v
);
859 pi
->vce_level
[i
].ClkBypassCntl
=
860 (u8
)kv_get_clk_bypass(rdev
, table
->entries
[i
].evclk
);
862 ret
= radeon_atom_get_clock_dividers(rdev
, COMPUTE_ENGINE_PLL_PARAM
,
863 table
->entries
[i
].evclk
, false, ÷rs
);
866 pi
->vce_level
[i
].Divider
= (u8
)dividers
.post_div
;
868 pi
->vce_level_count
++;
871 ret
= kv_copy_bytes_to_smc(rdev
,
872 pi
->dpm_table_start
+
873 offsetof(SMU7_Fusion_DpmTable
, VceLevelCount
),
874 (u8
*)&pi
->vce_level_count
,
880 pi
->vce_interval
= 1;
882 ret
= kv_copy_bytes_to_smc(rdev
,
883 pi
->dpm_table_start
+
884 offsetof(SMU7_Fusion_DpmTable
, VCEInterval
),
885 (u8
*)&pi
->vce_interval
,
891 ret
= kv_copy_bytes_to_smc(rdev
,
892 pi
->dpm_table_start
+
893 offsetof(SMU7_Fusion_DpmTable
, VceLevel
),
894 (u8
*)&pi
->vce_level
,
895 sizeof(SMU7_Fusion_ExtClkLevel
) * SMU7_MAX_LEVELS_VCE
,
901 static int kv_populate_samu_table(struct radeon_device
*rdev
)
903 struct kv_power_info
*pi
= kv_get_pi(rdev
);
904 struct radeon_clock_voltage_dependency_table
*table
=
905 &rdev
->pm
.dpm
.dyn_state
.samu_clock_voltage_dependency_table
;
906 struct atom_clock_dividers dividers
;
910 if (table
== NULL
|| table
->count
== 0)
913 pi
->samu_level_count
= 0;
914 for (i
= 0; i
< table
->count
; i
++) {
915 if (pi
->high_voltage_t
&&
916 pi
->high_voltage_t
< table
->entries
[i
].v
)
919 pi
->samu_level
[i
].Frequency
= cpu_to_be32(table
->entries
[i
].clk
);
920 pi
->samu_level
[i
].MinVoltage
= cpu_to_be16(table
->entries
[i
].v
);
922 pi
->samu_level
[i
].ClkBypassCntl
=
923 (u8
)kv_get_clk_bypass(rdev
, table
->entries
[i
].clk
);
925 ret
= radeon_atom_get_clock_dividers(rdev
, COMPUTE_ENGINE_PLL_PARAM
,
926 table
->entries
[i
].clk
, false, ÷rs
);
929 pi
->samu_level
[i
].Divider
= (u8
)dividers
.post_div
;
931 pi
->samu_level_count
++;
934 ret
= kv_copy_bytes_to_smc(rdev
,
935 pi
->dpm_table_start
+
936 offsetof(SMU7_Fusion_DpmTable
, SamuLevelCount
),
937 (u8
*)&pi
->samu_level_count
,
943 pi
->samu_interval
= 1;
945 ret
= kv_copy_bytes_to_smc(rdev
,
946 pi
->dpm_table_start
+
947 offsetof(SMU7_Fusion_DpmTable
, SAMUInterval
),
948 (u8
*)&pi
->samu_interval
,
954 ret
= kv_copy_bytes_to_smc(rdev
,
955 pi
->dpm_table_start
+
956 offsetof(SMU7_Fusion_DpmTable
, SamuLevel
),
957 (u8
*)&pi
->samu_level
,
958 sizeof(SMU7_Fusion_ExtClkLevel
) * SMU7_MAX_LEVELS_SAMU
,
967 static int kv_populate_acp_table(struct radeon_device
*rdev
)
969 struct kv_power_info
*pi
= kv_get_pi(rdev
);
970 struct radeon_clock_voltage_dependency_table
*table
=
971 &rdev
->pm
.dpm
.dyn_state
.acp_clock_voltage_dependency_table
;
972 struct atom_clock_dividers dividers
;
976 if (table
== NULL
|| table
->count
== 0)
979 pi
->acp_level_count
= 0;
980 for (i
= 0; i
< table
->count
; i
++) {
981 pi
->acp_level
[i
].Frequency
= cpu_to_be32(table
->entries
[i
].clk
);
982 pi
->acp_level
[i
].MinVoltage
= cpu_to_be16(table
->entries
[i
].v
);
984 ret
= radeon_atom_get_clock_dividers(rdev
, COMPUTE_ENGINE_PLL_PARAM
,
985 table
->entries
[i
].clk
, false, ÷rs
);
988 pi
->acp_level
[i
].Divider
= (u8
)dividers
.post_div
;
990 pi
->acp_level_count
++;
993 ret
= kv_copy_bytes_to_smc(rdev
,
994 pi
->dpm_table_start
+
995 offsetof(SMU7_Fusion_DpmTable
, AcpLevelCount
),
996 (u8
*)&pi
->acp_level_count
,
1002 pi
->acp_interval
= 1;
1004 ret
= kv_copy_bytes_to_smc(rdev
,
1005 pi
->dpm_table_start
+
1006 offsetof(SMU7_Fusion_DpmTable
, ACPInterval
),
1007 (u8
*)&pi
->acp_interval
,
1013 ret
= kv_copy_bytes_to_smc(rdev
,
1014 pi
->dpm_table_start
+
1015 offsetof(SMU7_Fusion_DpmTable
, AcpLevel
),
1016 (u8
*)&pi
->acp_level
,
1017 sizeof(SMU7_Fusion_ExtClkLevel
) * SMU7_MAX_LEVELS_ACP
,
1025 static void kv_calculate_dfs_bypass_settings(struct radeon_device
*rdev
)
1027 struct kv_power_info
*pi
= kv_get_pi(rdev
);
1029 struct radeon_clock_voltage_dependency_table
*table
=
1030 &rdev
->pm
.dpm
.dyn_state
.vddc_dependency_on_sclk
;
1032 if (table
&& table
->count
) {
1033 for (i
= 0; i
< pi
->graphics_dpm_level_count
; i
++) {
1034 if (pi
->caps_enable_dfs_bypass
) {
1035 if (kv_get_clock_difference(table
->entries
[i
].clk
, 40000) < 200)
1036 pi
->graphics_level
[i
].ClkBypassCntl
= 3;
1037 else if (kv_get_clock_difference(table
->entries
[i
].clk
, 30000) < 200)
1038 pi
->graphics_level
[i
].ClkBypassCntl
= 2;
1039 else if (kv_get_clock_difference(table
->entries
[i
].clk
, 26600) < 200)
1040 pi
->graphics_level
[i
].ClkBypassCntl
= 7;
1041 else if (kv_get_clock_difference(table
->entries
[i
].clk
, 20000) < 200)
1042 pi
->graphics_level
[i
].ClkBypassCntl
= 6;
1043 else if (kv_get_clock_difference(table
->entries
[i
].clk
, 10000) < 200)
1044 pi
->graphics_level
[i
].ClkBypassCntl
= 8;
1046 pi
->graphics_level
[i
].ClkBypassCntl
= 0;
1048 pi
->graphics_level
[i
].ClkBypassCntl
= 0;
1052 struct sumo_sclk_voltage_mapping_table
*table
=
1053 &pi
->sys_info
.sclk_voltage_mapping_table
;
1054 for (i
= 0; i
< pi
->graphics_dpm_level_count
; i
++) {
1055 if (pi
->caps_enable_dfs_bypass
) {
1056 if (kv_get_clock_difference(table
->entries
[i
].sclk_frequency
, 40000) < 200)
1057 pi
->graphics_level
[i
].ClkBypassCntl
= 3;
1058 else if (kv_get_clock_difference(table
->entries
[i
].sclk_frequency
, 30000) < 200)
1059 pi
->graphics_level
[i
].ClkBypassCntl
= 2;
1060 else if (kv_get_clock_difference(table
->entries
[i
].sclk_frequency
, 26600) < 200)
1061 pi
->graphics_level
[i
].ClkBypassCntl
= 7;
1062 else if (kv_get_clock_difference(table
->entries
[i
].sclk_frequency
, 20000) < 200)
1063 pi
->graphics_level
[i
].ClkBypassCntl
= 6;
1064 else if (kv_get_clock_difference(table
->entries
[i
].sclk_frequency
, 10000) < 200)
1065 pi
->graphics_level
[i
].ClkBypassCntl
= 8;
1067 pi
->graphics_level
[i
].ClkBypassCntl
= 0;
1069 pi
->graphics_level
[i
].ClkBypassCntl
= 0;
1075 static int kv_enable_ulv(struct radeon_device
*rdev
, bool enable
)
1077 return kv_notify_message_to_smu(rdev
, enable
?
1078 PPSMC_MSG_EnableULV
: PPSMC_MSG_DisableULV
);
1081 static void kv_update_current_ps(struct radeon_device
*rdev
,
1082 struct radeon_ps
*rps
)
1084 struct kv_ps
*new_ps
= kv_get_ps(rps
);
1085 struct kv_power_info
*pi
= kv_get_pi(rdev
);
1087 pi
->current_rps
= *rps
;
1088 pi
->current_ps
= *new_ps
;
1089 pi
->current_rps
.ps_priv
= &pi
->current_ps
;
1092 static void kv_update_requested_ps(struct radeon_device
*rdev
,
1093 struct radeon_ps
*rps
)
1095 struct kv_ps
*new_ps
= kv_get_ps(rps
);
1096 struct kv_power_info
*pi
= kv_get_pi(rdev
);
1098 pi
->requested_rps
= *rps
;
1099 pi
->requested_ps
= *new_ps
;
1100 pi
->requested_rps
.ps_priv
= &pi
->requested_ps
;
1103 int kv_dpm_enable(struct radeon_device
*rdev
)
1105 struct kv_power_info
*pi
= kv_get_pi(rdev
);
1108 cik_update_cg(rdev
, (RADEON_CG_BLOCK_GFX
|
1109 RADEON_CG_BLOCK_SDMA
|
1110 RADEON_CG_BLOCK_BIF
|
1111 RADEON_CG_BLOCK_HDP
), false);
1113 ret
= kv_process_firmware_header(rdev
);
1115 DRM_ERROR("kv_process_firmware_header failed\n");
1118 kv_init_fps_limits(rdev
);
1119 kv_init_graphics_levels(rdev
);
1120 ret
= kv_program_bootup_state(rdev
);
1122 DRM_ERROR("kv_program_bootup_state failed\n");
1125 kv_calculate_dfs_bypass_settings(rdev
);
1126 ret
= kv_upload_dpm_settings(rdev
);
1128 DRM_ERROR("kv_upload_dpm_settings failed\n");
1131 ret
= kv_populate_uvd_table(rdev
);
1133 DRM_ERROR("kv_populate_uvd_table failed\n");
1136 ret
= kv_populate_vce_table(rdev
);
1138 DRM_ERROR("kv_populate_vce_table failed\n");
1141 ret
= kv_populate_samu_table(rdev
);
1143 DRM_ERROR("kv_populate_samu_table failed\n");
1146 ret
= kv_populate_acp_table(rdev
);
1148 DRM_ERROR("kv_populate_acp_table failed\n");
1151 kv_program_vc(rdev
);
1153 kv_initialize_hardware_cac_manager(rdev
);
1156 if (pi
->enable_auto_thermal_throttling
) {
1157 ret
= kv_enable_auto_thermal_throttling(rdev
);
1159 DRM_ERROR("kv_enable_auto_thermal_throttling failed\n");
1163 ret
= kv_enable_dpm_voltage_scaling(rdev
);
1165 DRM_ERROR("kv_enable_dpm_voltage_scaling failed\n");
1168 ret
= kv_set_dpm_interval(rdev
);
1170 DRM_ERROR("kv_set_dpm_interval failed\n");
1173 ret
= kv_set_dpm_boot_state(rdev
);
1175 DRM_ERROR("kv_set_dpm_boot_state failed\n");
1178 ret
= kv_enable_ulv(rdev
, true);
1180 DRM_ERROR("kv_enable_ulv failed\n");
1184 ret
= kv_enable_didt(rdev
, true);
1186 DRM_ERROR("kv_enable_didt failed\n");
1189 ret
= kv_enable_smc_cac(rdev
, true);
1191 DRM_ERROR("kv_enable_smc_cac failed\n");
1195 if (rdev
->irq
.installed
&&
1196 r600_is_internal_thermal_sensor(rdev
->pm
.int_thermal_type
)) {
1197 ret
= kv_set_thermal_temperature_range(rdev
, R600_TEMP_RANGE_MIN
, R600_TEMP_RANGE_MAX
);
1199 DRM_ERROR("kv_set_thermal_temperature_range failed\n");
1202 rdev
->irq
.dpm_thermal
= true;
1203 radeon_irq_set(rdev
);
1206 /* powerdown unused blocks for now */
1207 kv_dpm_powergate_acp(rdev
, true);
1208 kv_dpm_powergate_samu(rdev
, true);
1209 kv_dpm_powergate_vce(rdev
, true);
1210 kv_dpm_powergate_uvd(rdev
, true);
1212 cik_update_cg(rdev
, (RADEON_CG_BLOCK_GFX
|
1213 RADEON_CG_BLOCK_SDMA
|
1214 RADEON_CG_BLOCK_BIF
|
1215 RADEON_CG_BLOCK_HDP
), true);
1217 kv_update_current_ps(rdev
, rdev
->pm
.dpm
.boot_ps
);
1222 void kv_dpm_disable(struct radeon_device
*rdev
)
1224 cik_update_cg(rdev
, (RADEON_CG_BLOCK_GFX
|
1225 RADEON_CG_BLOCK_SDMA
|
1226 RADEON_CG_BLOCK_BIF
|
1227 RADEON_CG_BLOCK_HDP
), false);
1229 /* powerup blocks */
1230 kv_dpm_powergate_acp(rdev
, false);
1231 kv_dpm_powergate_samu(rdev
, false);
1232 kv_dpm_powergate_vce(rdev
, false);
1233 kv_dpm_powergate_uvd(rdev
, false);
1235 kv_enable_smc_cac(rdev
, false);
1236 kv_enable_didt(rdev
, false);
1239 kv_enable_ulv(rdev
, false);
1242 kv_update_current_ps(rdev
, rdev
->pm
.dpm
.boot_ps
);
1246 static int kv_write_smc_soft_register(struct radeon_device
*rdev
,
1247 u16 reg_offset
, u32 value
)
1249 struct kv_power_info
*pi
= kv_get_pi(rdev
);
1251 return kv_copy_bytes_to_smc(rdev
, pi
->soft_regs_start
+ reg_offset
,
1252 (u8
*)&value
, sizeof(u16
), pi
->sram_end
);
1255 static int kv_read_smc_soft_register(struct radeon_device
*rdev
,
1256 u16 reg_offset
, u32
*value
)
1258 struct kv_power_info
*pi
= kv_get_pi(rdev
);
1260 return kv_read_smc_sram_dword(rdev
, pi
->soft_regs_start
+ reg_offset
,
1261 value
, pi
->sram_end
);
1265 static void kv_init_sclk_t(struct radeon_device
*rdev
)
1267 struct kv_power_info
*pi
= kv_get_pi(rdev
);
1269 pi
->low_sclk_interrupt_t
= 0;
1272 static int kv_init_fps_limits(struct radeon_device
*rdev
)
1274 struct kv_power_info
*pi
= kv_get_pi(rdev
);
1281 pi
->fps_high_t
= cpu_to_be16(tmp
);
1282 ret
= kv_copy_bytes_to_smc(rdev
,
1283 pi
->dpm_table_start
+
1284 offsetof(SMU7_Fusion_DpmTable
, FpsHighT
),
1285 (u8
*)&pi
->fps_high_t
,
1286 sizeof(u16
), pi
->sram_end
);
1289 pi
->fps_low_t
= cpu_to_be16(tmp
);
1291 ret
= kv_copy_bytes_to_smc(rdev
,
1292 pi
->dpm_table_start
+
1293 offsetof(SMU7_Fusion_DpmTable
, FpsLowT
),
1294 (u8
*)&pi
->fps_low_t
,
1295 sizeof(u16
), pi
->sram_end
);
1301 static void kv_init_powergate_state(struct radeon_device
*rdev
)
1303 struct kv_power_info
*pi
= kv_get_pi(rdev
);
1305 pi
->uvd_power_gated
= false;
1306 pi
->vce_power_gated
= false;
1307 pi
->samu_power_gated
= false;
1308 pi
->acp_power_gated
= false;
1312 static int kv_enable_uvd_dpm(struct radeon_device
*rdev
, bool enable
)
1314 return kv_notify_message_to_smu(rdev
, enable
?
1315 PPSMC_MSG_UVDDPM_Enable
: PPSMC_MSG_UVDDPM_Disable
);
1319 static int kv_enable_vce_dpm(struct radeon_device
*rdev
, bool enable
)
1321 return kv_notify_message_to_smu(rdev
, enable
?
1322 PPSMC_MSG_VCEDPM_Enable
: PPSMC_MSG_VCEDPM_Disable
);
1326 static int kv_enable_samu_dpm(struct radeon_device
*rdev
, bool enable
)
1328 return kv_notify_message_to_smu(rdev
, enable
?
1329 PPSMC_MSG_SAMUDPM_Enable
: PPSMC_MSG_SAMUDPM_Disable
);
1332 static int kv_enable_acp_dpm(struct radeon_device
*rdev
, bool enable
)
1334 return kv_notify_message_to_smu(rdev
, enable
?
1335 PPSMC_MSG_ACPDPM_Enable
: PPSMC_MSG_ACPDPM_Disable
);
1338 static int kv_update_uvd_dpm(struct radeon_device
*rdev
, bool gate
)
1340 struct kv_power_info
*pi
= kv_get_pi(rdev
);
1341 struct radeon_uvd_clock_voltage_dependency_table
*table
=
1342 &rdev
->pm
.dpm
.dyn_state
.uvd_clock_voltage_dependency_table
;
1346 if (!pi
->caps_uvd_dpm
|| table
->count
|| pi
->caps_stable_p_state
)
1347 pi
->uvd_boot_level
= table
->count
- 1;
1349 pi
->uvd_boot_level
= 0;
1351 ret
= kv_copy_bytes_to_smc(rdev
,
1352 pi
->dpm_table_start
+
1353 offsetof(SMU7_Fusion_DpmTable
, UvdBootLevel
),
1354 (uint8_t *)&pi
->uvd_boot_level
,
1355 sizeof(u8
), pi
->sram_end
);
1359 if (!pi
->caps_uvd_dpm
||
1360 pi
->caps_stable_p_state
)
1361 kv_send_msg_to_smc_with_parameter(rdev
,
1362 PPSMC_MSG_UVDDPM_SetEnabledMask
,
1363 (1 << pi
->uvd_boot_level
));
1366 return kv_enable_uvd_dpm(rdev
, !gate
);
1370 static u8
kv_get_vce_boot_level(struct radeon_device
*rdev
)
1373 struct radeon_vce_clock_voltage_dependency_table
*table
=
1374 &rdev
->pm
.dpm
.dyn_state
.vce_clock_voltage_dependency_table
;
1376 for (i
= 0; i
< table
->count
; i
++) {
1377 if (table
->entries
[i
].evclk
>= 0) /* XXX */
1384 static int kv_update_vce_dpm(struct radeon_device
*rdev
,
1385 struct radeon_ps
*radeon_new_state
,
1386 struct radeon_ps
*radeon_current_state
)
1388 struct kv_power_info
*pi
= kv_get_pi(rdev
);
1389 struct radeon_vce_clock_voltage_dependency_table
*table
=
1390 &rdev
->pm
.dpm
.dyn_state
.vce_clock_voltage_dependency_table
;
1393 if (radeon_new_state
->evclk
> 0 && radeon_current_state
->evclk
== 0) {
1394 if (pi
->caps_stable_p_state
)
1395 pi
->vce_boot_level
= table
->count
- 1;
1397 pi
->vce_boot_level
= kv_get_vce_boot_level(rdev
);
1399 ret
= kv_copy_bytes_to_smc(rdev
,
1400 pi
->dpm_table_start
+
1401 offsetof(SMU7_Fusion_DpmTable
, VceBootLevel
),
1402 (u8
*)&pi
->vce_boot_level
,
1408 if (pi
->caps_stable_p_state
)
1409 kv_send_msg_to_smc_with_parameter(rdev
,
1410 PPSMC_MSG_VCEDPM_SetEnabledMask
,
1411 (1 << pi
->vce_boot_level
));
1413 kv_enable_vce_dpm(rdev
, true);
1414 } else if (radeon_new_state
->evclk
== 0 && radeon_current_state
->evclk
> 0) {
1415 kv_enable_vce_dpm(rdev
, false);
1422 static int kv_update_samu_dpm(struct radeon_device
*rdev
, bool gate
)
1424 struct kv_power_info
*pi
= kv_get_pi(rdev
);
1425 struct radeon_clock_voltage_dependency_table
*table
=
1426 &rdev
->pm
.dpm
.dyn_state
.samu_clock_voltage_dependency_table
;
1430 if (pi
->caps_stable_p_state
)
1431 pi
->samu_boot_level
= table
->count
- 1;
1433 pi
->samu_boot_level
= 0;
1435 ret
= kv_copy_bytes_to_smc(rdev
,
1436 pi
->dpm_table_start
+
1437 offsetof(SMU7_Fusion_DpmTable
, SamuBootLevel
),
1438 (u8
*)&pi
->samu_boot_level
,
1444 if (pi
->caps_stable_p_state
)
1445 kv_send_msg_to_smc_with_parameter(rdev
,
1446 PPSMC_MSG_SAMUDPM_SetEnabledMask
,
1447 (1 << pi
->samu_boot_level
));
1450 return kv_enable_samu_dpm(rdev
, !gate
);
1453 static int kv_update_acp_dpm(struct radeon_device
*rdev
, bool gate
)
1455 struct kv_power_info
*pi
= kv_get_pi(rdev
);
1456 struct radeon_clock_voltage_dependency_table
*table
=
1457 &rdev
->pm
.dpm
.dyn_state
.acp_clock_voltage_dependency_table
;
1461 if (pi
->caps_stable_p_state
)
1462 pi
->acp_boot_level
= table
->count
- 1;
1464 pi
->acp_boot_level
= 0;
1466 ret
= kv_copy_bytes_to_smc(rdev
,
1467 pi
->dpm_table_start
+
1468 offsetof(SMU7_Fusion_DpmTable
, AcpBootLevel
),
1469 (u8
*)&pi
->acp_boot_level
,
1475 if (pi
->caps_stable_p_state
)
1476 kv_send_msg_to_smc_with_parameter(rdev
,
1477 PPSMC_MSG_ACPDPM_SetEnabledMask
,
1478 (1 << pi
->acp_boot_level
));
1481 return kv_enable_acp_dpm(rdev
, !gate
);
1484 void kv_dpm_powergate_uvd(struct radeon_device
*rdev
, bool gate
)
1486 struct kv_power_info
*pi
= kv_get_pi(rdev
);
1488 if (pi
->uvd_power_gated
== gate
)
1491 pi
->uvd_power_gated
= gate
;
1494 if (pi
->caps_uvd_pg
) {
1495 uvd_v1_0_stop(rdev
);
1496 cik_update_cg(rdev
, RADEON_CG_BLOCK_UVD
, false);
1498 kv_update_uvd_dpm(rdev
, gate
);
1499 if (pi
->caps_uvd_pg
)
1500 kv_notify_message_to_smu(rdev
, PPSMC_MSG_UVDPowerOFF
);
1502 if (pi
->caps_uvd_pg
) {
1503 kv_notify_message_to_smu(rdev
, PPSMC_MSG_UVDPowerON
);
1504 uvd_v4_2_resume(rdev
);
1505 uvd_v1_0_start(rdev
);
1506 cik_update_cg(rdev
, RADEON_CG_BLOCK_UVD
, true);
1508 kv_update_uvd_dpm(rdev
, gate
);
1512 static void kv_dpm_powergate_vce(struct radeon_device
*rdev
, bool gate
)
1514 struct kv_power_info
*pi
= kv_get_pi(rdev
);
1516 if (pi
->vce_power_gated
== gate
)
1519 pi
->vce_power_gated
= gate
;
1522 if (pi
->caps_vce_pg
)
1523 kv_notify_message_to_smu(rdev
, PPSMC_MSG_VCEPowerOFF
);
1525 if (pi
->caps_vce_pg
)
1526 kv_notify_message_to_smu(rdev
, PPSMC_MSG_VCEPowerON
);
1530 static void kv_dpm_powergate_samu(struct radeon_device
*rdev
, bool gate
)
1532 struct kv_power_info
*pi
= kv_get_pi(rdev
);
1534 if (pi
->samu_power_gated
== gate
)
1537 pi
->samu_power_gated
= gate
;
1540 kv_update_samu_dpm(rdev
, true);
1541 if (pi
->caps_samu_pg
)
1542 kv_notify_message_to_smu(rdev
, PPSMC_MSG_SAMPowerOFF
);
1544 if (pi
->caps_samu_pg
)
1545 kv_notify_message_to_smu(rdev
, PPSMC_MSG_SAMPowerON
);
1546 kv_update_samu_dpm(rdev
, false);
1550 static void kv_dpm_powergate_acp(struct radeon_device
*rdev
, bool gate
)
1552 struct kv_power_info
*pi
= kv_get_pi(rdev
);
1554 if (pi
->acp_power_gated
== gate
)
1557 if (rdev
->family
== CHIP_KABINI
)
1560 pi
->acp_power_gated
= gate
;
1563 kv_update_acp_dpm(rdev
, true);
1564 if (pi
->caps_acp_pg
)
1565 kv_notify_message_to_smu(rdev
, PPSMC_MSG_ACPPowerOFF
);
1567 if (pi
->caps_acp_pg
)
1568 kv_notify_message_to_smu(rdev
, PPSMC_MSG_ACPPowerON
);
1569 kv_update_acp_dpm(rdev
, false);
1573 static void kv_set_valid_clock_range(struct radeon_device
*rdev
,
1574 struct radeon_ps
*new_rps
)
1576 struct kv_ps
*new_ps
= kv_get_ps(new_rps
);
1577 struct kv_power_info
*pi
= kv_get_pi(rdev
);
1579 struct radeon_clock_voltage_dependency_table
*table
=
1580 &rdev
->pm
.dpm
.dyn_state
.vddc_dependency_on_sclk
;
1582 if (table
&& table
->count
) {
1583 for (i
= 0; i
< pi
->graphics_dpm_level_count
; i
++) {
1584 if ((table
->entries
[i
].clk
>= new_ps
->levels
[0].sclk
) ||
1585 (i
== (pi
->graphics_dpm_level_count
- 1))) {
1586 pi
->lowest_valid
= i
;
1591 for (i
= pi
->graphics_dpm_level_count
- 1; i
>= 0; i
--) {
1592 if ((table
->entries
[i
].clk
<= new_ps
->levels
[new_ps
->num_levels
-1].sclk
) ||
1594 pi
->highest_valid
= i
;
1599 if (pi
->lowest_valid
> pi
->highest_valid
) {
1600 if ((new_ps
->levels
[0].sclk
- table
->entries
[pi
->highest_valid
].clk
) >
1601 (table
->entries
[pi
->lowest_valid
].clk
- new_ps
->levels
[new_ps
->num_levels
- 1].sclk
))
1602 pi
->highest_valid
= pi
->lowest_valid
;
1604 pi
->lowest_valid
= pi
->highest_valid
;
1607 struct sumo_sclk_voltage_mapping_table
*table
=
1608 &pi
->sys_info
.sclk_voltage_mapping_table
;
1610 for (i
= 0; i
< (int)pi
->graphics_dpm_level_count
; i
++) {
1611 if (table
->entries
[i
].sclk_frequency
>= new_ps
->levels
[0].sclk
||
1612 i
== (int)(pi
->graphics_dpm_level_count
- 1)) {
1613 pi
->lowest_valid
= i
;
1618 for (i
= pi
->graphics_dpm_level_count
- 1; i
>= 0; i
--) {
1619 if (table
->entries
[i
].sclk_frequency
<=
1620 new_ps
->levels
[new_ps
->num_levels
- 1].sclk
||
1622 pi
->highest_valid
= i
;
1627 if (pi
->lowest_valid
> pi
->highest_valid
) {
1628 if ((new_ps
->levels
[0].sclk
-
1629 table
->entries
[pi
->highest_valid
].sclk_frequency
) >
1630 (table
->entries
[pi
->lowest_valid
].sclk_frequency
-
1631 new_ps
->levels
[new_ps
->num_levels
-1].sclk
))
1632 pi
->highest_valid
= pi
->lowest_valid
;
1634 pi
->lowest_valid
= pi
->highest_valid
;
1639 static int kv_update_dfs_bypass_settings(struct radeon_device
*rdev
,
1640 struct radeon_ps
*new_rps
)
1642 struct kv_ps
*new_ps
= kv_get_ps(new_rps
);
1643 struct kv_power_info
*pi
= kv_get_pi(rdev
);
1647 if (pi
->caps_enable_dfs_bypass
) {
1648 clk_bypass_cntl
= new_ps
->need_dfs_bypass
?
1649 pi
->graphics_level
[pi
->graphics_boot_level
].ClkBypassCntl
: 0;
1650 ret
= kv_copy_bytes_to_smc(rdev
,
1651 (pi
->dpm_table_start
+
1652 offsetof(SMU7_Fusion_DpmTable
, GraphicsLevel
) +
1653 (pi
->graphics_boot_level
* sizeof(SMU7_Fusion_GraphicsLevel
)) +
1654 offsetof(SMU7_Fusion_GraphicsLevel
, ClkBypassCntl
)),
1656 sizeof(u8
), pi
->sram_end
);
1662 static int kv_enable_nb_dpm(struct radeon_device
*rdev
)
1664 struct kv_power_info
*pi
= kv_get_pi(rdev
);
1667 if (pi
->enable_nb_dpm
&& !pi
->nb_dpm_enabled
) {
1668 ret
= kv_notify_message_to_smu(rdev
, PPSMC_MSG_NBDPM_Enable
);
1670 pi
->nb_dpm_enabled
= true;
1676 int kv_dpm_force_performance_level(struct radeon_device
*rdev
,
1677 enum radeon_dpm_forced_level level
)
1681 if (level
== RADEON_DPM_FORCED_LEVEL_HIGH
) {
1682 ret
= kv_force_dpm_highest(rdev
);
1685 } else if (level
== RADEON_DPM_FORCED_LEVEL_LOW
) {
1686 ret
= kv_force_dpm_lowest(rdev
);
1689 } else if (level
== RADEON_DPM_FORCED_LEVEL_AUTO
) {
1690 ret
= kv_unforce_levels(rdev
);
1695 rdev
->pm
.dpm
.forced_level
= level
;
1700 int kv_dpm_pre_set_power_state(struct radeon_device
*rdev
)
1702 struct kv_power_info
*pi
= kv_get_pi(rdev
);
1703 struct radeon_ps requested_ps
= *rdev
->pm
.dpm
.requested_ps
;
1704 struct radeon_ps
*new_ps
= &requested_ps
;
1706 kv_update_requested_ps(rdev
, new_ps
);
1708 kv_apply_state_adjust_rules(rdev
,
1715 int kv_dpm_set_power_state(struct radeon_device
*rdev
)
1717 struct kv_power_info
*pi
= kv_get_pi(rdev
);
1718 struct radeon_ps
*new_ps
= &pi
->requested_rps
;
1719 /*struct radeon_ps *old_ps = &pi->current_rps;*/
1722 cik_update_cg(rdev
, (RADEON_CG_BLOCK_GFX
|
1723 RADEON_CG_BLOCK_SDMA
|
1724 RADEON_CG_BLOCK_BIF
|
1725 RADEON_CG_BLOCK_HDP
), false);
1727 if (rdev
->family
== CHIP_KABINI
) {
1728 if (pi
->enable_dpm
) {
1729 kv_set_valid_clock_range(rdev
, new_ps
);
1730 kv_update_dfs_bypass_settings(rdev
, new_ps
);
1731 ret
= kv_calculate_ds_divider(rdev
);
1733 DRM_ERROR("kv_calculate_ds_divider failed\n");
1736 kv_calculate_nbps_level_settings(rdev
);
1737 kv_calculate_dpm_settings(rdev
);
1738 kv_force_lowest_valid(rdev
);
1739 kv_enable_new_levels(rdev
);
1740 kv_upload_dpm_settings(rdev
);
1741 kv_program_nbps_index_settings(rdev
, new_ps
);
1742 kv_unforce_levels(rdev
);
1743 kv_set_enabled_levels(rdev
);
1744 kv_force_lowest_valid(rdev
);
1745 kv_unforce_levels(rdev
);
1747 ret
= kv_update_vce_dpm(rdev
, new_ps
, old_ps
);
1749 DRM_ERROR("kv_update_vce_dpm failed\n");
1753 kv_update_sclk_t(rdev
);
1756 if (pi
->enable_dpm
) {
1757 kv_set_valid_clock_range(rdev
, new_ps
);
1758 kv_update_dfs_bypass_settings(rdev
, new_ps
);
1759 ret
= kv_calculate_ds_divider(rdev
);
1761 DRM_ERROR("kv_calculate_ds_divider failed\n");
1764 kv_calculate_nbps_level_settings(rdev
);
1765 kv_calculate_dpm_settings(rdev
);
1766 kv_freeze_sclk_dpm(rdev
, true);
1767 kv_upload_dpm_settings(rdev
);
1768 kv_program_nbps_index_settings(rdev
, new_ps
);
1769 kv_freeze_sclk_dpm(rdev
, false);
1770 kv_set_enabled_levels(rdev
);
1772 ret
= kv_update_vce_dpm(rdev
, new_ps
, old_ps
);
1774 DRM_ERROR("kv_update_vce_dpm failed\n");
1778 kv_update_sclk_t(rdev
);
1779 kv_enable_nb_dpm(rdev
);
1783 cik_update_cg(rdev
, (RADEON_CG_BLOCK_GFX
|
1784 RADEON_CG_BLOCK_SDMA
|
1785 RADEON_CG_BLOCK_BIF
|
1786 RADEON_CG_BLOCK_HDP
), true);
1788 rdev
->pm
.dpm
.forced_level
= RADEON_DPM_FORCED_LEVEL_AUTO
;
1792 void kv_dpm_post_set_power_state(struct radeon_device
*rdev
)
1794 struct kv_power_info
*pi
= kv_get_pi(rdev
);
1795 struct radeon_ps
*new_ps
= &pi
->requested_rps
;
1797 kv_update_current_ps(rdev
, new_ps
);
1800 void kv_dpm_setup_asic(struct radeon_device
*rdev
)
1802 sumo_take_smu_control(rdev
, true);
1803 kv_init_powergate_state(rdev
);
1804 kv_init_sclk_t(rdev
);
1807 void kv_dpm_reset_asic(struct radeon_device
*rdev
)
1809 kv_force_lowest_valid(rdev
);
1810 kv_init_graphics_levels(rdev
);
1811 kv_program_bootup_state(rdev
);
1812 kv_upload_dpm_settings(rdev
);
1813 kv_force_lowest_valid(rdev
);
1814 kv_unforce_levels(rdev
);
1817 //XXX use sumo_dpm_display_configuration_changed
1819 static void kv_construct_max_power_limits_table(struct radeon_device
*rdev
,
1820 struct radeon_clock_and_voltage_limits
*table
)
1822 struct kv_power_info
*pi
= kv_get_pi(rdev
);
1824 if (pi
->sys_info
.sclk_voltage_mapping_table
.num_max_dpm_entries
> 0) {
1825 int idx
= pi
->sys_info
.sclk_voltage_mapping_table
.num_max_dpm_entries
- 1;
1827 pi
->sys_info
.sclk_voltage_mapping_table
.entries
[idx
].sclk_frequency
;
1829 kv_convert_2bit_index_to_voltage(rdev
,
1830 pi
->sys_info
.sclk_voltage_mapping_table
.entries
[idx
].vid_2bit
);
1833 table
->mclk
= pi
->sys_info
.nbp_memory_clock
[0];
1836 static void kv_patch_voltage_values(struct radeon_device
*rdev
)
1839 struct radeon_uvd_clock_voltage_dependency_table
*table
=
1840 &rdev
->pm
.dpm
.dyn_state
.uvd_clock_voltage_dependency_table
;
1843 for (i
= 0; i
< table
->count
; i
++)
1844 table
->entries
[i
].v
=
1845 kv_convert_8bit_index_to_voltage(rdev
,
1846 table
->entries
[i
].v
);
1851 static void kv_construct_boot_state(struct radeon_device
*rdev
)
1853 struct kv_power_info
*pi
= kv_get_pi(rdev
);
1855 pi
->boot_pl
.sclk
= pi
->sys_info
.bootup_sclk
;
1856 pi
->boot_pl
.vddc_index
= pi
->sys_info
.bootup_nb_voltage_index
;
1857 pi
->boot_pl
.ds_divider_index
= 0;
1858 pi
->boot_pl
.ss_divider_index
= 0;
1859 pi
->boot_pl
.allow_gnb_slow
= 1;
1860 pi
->boot_pl
.force_nbp_state
= 0;
1861 pi
->boot_pl
.display_wm
= 0;
1862 pi
->boot_pl
.vce_wm
= 0;
1865 static int kv_force_dpm_highest(struct radeon_device
*rdev
)
1870 ret
= kv_dpm_get_enable_mask(rdev
, &enable_mask
);
1874 for (i
= SMU7_MAX_LEVELS_GRAPHICS
- 1; i
>= 0; i
--) {
1875 if (enable_mask
& (1 << i
))
1879 return kv_send_msg_to_smc_with_parameter(rdev
, PPSMC_MSG_DPM_ForceState
, i
);
1882 static int kv_force_dpm_lowest(struct radeon_device
*rdev
)
1887 ret
= kv_dpm_get_enable_mask(rdev
, &enable_mask
);
1891 for (i
= 0; i
< SMU7_MAX_LEVELS_GRAPHICS
; i
++) {
1892 if (enable_mask
& (1 << i
))
1896 return kv_send_msg_to_smc_with_parameter(rdev
, PPSMC_MSG_DPM_ForceState
, i
);
1899 static u8
kv_get_sleep_divider_id_from_clock(struct radeon_device
*rdev
,
1900 u32 sclk
, u32 min_sclk_in_sr
)
1902 struct kv_power_info
*pi
= kv_get_pi(rdev
);
1905 u32 min
= (min_sclk_in_sr
> KV_MINIMUM_ENGINE_CLOCK
) ?
1906 min_sclk_in_sr
: KV_MINIMUM_ENGINE_CLOCK
;
1911 if (!pi
->caps_sclk_ds
)
1914 for (i
= KV_MAX_DEEPSLEEP_DIVIDER_ID
; i
<= 0; i
--) {
1915 temp
= sclk
/ sumo_get_sleep_divider_from_id(i
);
1916 if ((temp
>= min
) || (i
== 0))
1923 static int kv_get_high_voltage_limit(struct radeon_device
*rdev
, int *limit
)
1925 struct kv_power_info
*pi
= kv_get_pi(rdev
);
1926 struct radeon_clock_voltage_dependency_table
*table
=
1927 &rdev
->pm
.dpm
.dyn_state
.vddc_dependency_on_sclk
;
1930 if (table
&& table
->count
) {
1931 for (i
= table
->count
- 1; i
>= 0; i
--) {
1932 if (pi
->high_voltage_t
&&
1933 (kv_convert_8bit_index_to_voltage(rdev
, table
->entries
[i
].v
) <=
1934 pi
->high_voltage_t
)) {
1940 struct sumo_sclk_voltage_mapping_table
*table
=
1941 &pi
->sys_info
.sclk_voltage_mapping_table
;
1943 for (i
= table
->num_max_dpm_entries
- 1; i
>= 0; i
--) {
1944 if (pi
->high_voltage_t
&&
1945 (kv_convert_2bit_index_to_voltage(rdev
, table
->entries
[i
].vid_2bit
) <=
1946 pi
->high_voltage_t
)) {
1957 static void kv_apply_state_adjust_rules(struct radeon_device
*rdev
,
1958 struct radeon_ps
*new_rps
,
1959 struct radeon_ps
*old_rps
)
1961 struct kv_ps
*ps
= kv_get_ps(new_rps
);
1962 struct kv_power_info
*pi
= kv_get_pi(rdev
);
1963 u32 min_sclk
= 10000; /* ??? */
1967 struct radeon_clock_voltage_dependency_table
*table
=
1968 &rdev
->pm
.dpm
.dyn_state
.vddc_dependency_on_sclk
;
1969 u32 stable_p_state_sclk
= 0;
1970 struct radeon_clock_and_voltage_limits
*max_limits
=
1971 &rdev
->pm
.dpm
.dyn_state
.max_clock_voltage_on_ac
;
1973 mclk
= max_limits
->mclk
;
1976 if (pi
->caps_stable_p_state
) {
1977 stable_p_state_sclk
= (max_limits
->sclk
* 75) / 100;
1979 for (i
= table
->count
- 1; i
>= 0; i
++) {
1980 if (stable_p_state_sclk
>= table
->entries
[i
].clk
) {
1981 stable_p_state_sclk
= table
->entries
[i
].clk
;
1987 stable_p_state_sclk
= table
->entries
[0].clk
;
1989 sclk
= stable_p_state_sclk
;
1992 ps
->need_dfs_bypass
= true;
1994 for (i
= 0; i
< ps
->num_levels
; i
++) {
1995 if (ps
->levels
[i
].sclk
< sclk
)
1996 ps
->levels
[i
].sclk
= sclk
;
1999 if (table
&& table
->count
) {
2000 for (i
= 0; i
< ps
->num_levels
; i
++) {
2001 if (pi
->high_voltage_t
&&
2002 (pi
->high_voltage_t
<
2003 kv_convert_8bit_index_to_voltage(rdev
, ps
->levels
[i
].vddc_index
))) {
2004 kv_get_high_voltage_limit(rdev
, &limit
);
2005 ps
->levels
[i
].sclk
= table
->entries
[limit
].clk
;
2009 struct sumo_sclk_voltage_mapping_table
*table
=
2010 &pi
->sys_info
.sclk_voltage_mapping_table
;
2012 for (i
= 0; i
< ps
->num_levels
; i
++) {
2013 if (pi
->high_voltage_t
&&
2014 (pi
->high_voltage_t
<
2015 kv_convert_8bit_index_to_voltage(rdev
, ps
->levels
[i
].vddc_index
))) {
2016 kv_get_high_voltage_limit(rdev
, &limit
);
2017 ps
->levels
[i
].sclk
= table
->entries
[limit
].sclk_frequency
;
2022 if (pi
->caps_stable_p_state
) {
2023 for (i
= 0; i
< ps
->num_levels
; i
++) {
2024 ps
->levels
[i
].sclk
= stable_p_state_sclk
;
2028 pi
->video_start
= new_rps
->dclk
|| new_rps
->vclk
;
2030 if ((new_rps
->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK
) ==
2031 ATOM_PPLIB_CLASSIFICATION_UI_BATTERY
)
2032 pi
->battery_state
= true;
2034 pi
->battery_state
= false;
2036 if (rdev
->family
== CHIP_KABINI
) {
2037 ps
->dpm0_pg_nb_ps_lo
= 0x1;
2038 ps
->dpm0_pg_nb_ps_hi
= 0x0;
2039 ps
->dpmx_nb_ps_lo
= 0x1;
2040 ps
->dpmx_nb_ps_hi
= 0x0;
2042 ps
->dpm0_pg_nb_ps_lo
= 0x1;
2043 ps
->dpm0_pg_nb_ps_hi
= 0x0;
2044 ps
->dpmx_nb_ps_lo
= 0x2;
2045 ps
->dpmx_nb_ps_hi
= 0x1;
2047 if (pi
->sys_info
.nb_dpm_enable
&& pi
->battery_state
) {
2048 force_high
= (mclk
>= pi
->sys_info
.nbp_memory_clock
[3]) ||
2049 pi
->video_start
|| (rdev
->pm
.dpm
.new_active_crtc_count
>= 3) ||
2050 pi
->disable_nb_ps3_in_battery
;
2051 ps
->dpm0_pg_nb_ps_lo
= force_high
? 0x2 : 0x3;
2052 ps
->dpm0_pg_nb_ps_hi
= 0x2;
2053 ps
->dpmx_nb_ps_lo
= force_high
? 0x2 : 0x3;
2054 ps
->dpmx_nb_ps_hi
= 0x2;
2059 static void kv_dpm_power_level_enabled_for_throttle(struct radeon_device
*rdev
,
2060 u32 index
, bool enable
)
2062 struct kv_power_info
*pi
= kv_get_pi(rdev
);
2064 pi
->graphics_level
[index
].EnabledForThrottle
= enable
? 1 : 0;
2067 static int kv_calculate_ds_divider(struct radeon_device
*rdev
)
2069 struct kv_power_info
*pi
= kv_get_pi(rdev
);
2070 u32 sclk_in_sr
= 10000; /* ??? */
2073 if (pi
->lowest_valid
> pi
->highest_valid
)
2076 for (i
= pi
->lowest_valid
; i
<= pi
->highest_valid
; i
++) {
2077 pi
->graphics_level
[i
].DeepSleepDivId
=
2078 kv_get_sleep_divider_id_from_clock(rdev
,
2079 be32_to_cpu(pi
->graphics_level
[i
].SclkFrequency
),
2085 static int kv_calculate_nbps_level_settings(struct radeon_device
*rdev
)
2087 struct kv_power_info
*pi
= kv_get_pi(rdev
);
2090 struct radeon_clock_and_voltage_limits
*max_limits
=
2091 &rdev
->pm
.dpm
.dyn_state
.max_clock_voltage_on_ac
;
2092 u32 mclk
= max_limits
->mclk
;
2094 if (pi
->lowest_valid
> pi
->highest_valid
)
2097 if (rdev
->family
== CHIP_KABINI
) {
2098 for (i
= pi
->lowest_valid
; i
<= pi
->highest_valid
; i
++) {
2099 pi
->graphics_level
[i
].GnbSlow
= 1;
2100 pi
->graphics_level
[i
].ForceNbPs1
= 0;
2101 pi
->graphics_level
[i
].UpH
= 0;
2104 if (!pi
->sys_info
.nb_dpm_enable
)
2107 force_high
= ((mclk
>= pi
->sys_info
.nbp_memory_clock
[3]) ||
2108 (rdev
->pm
.dpm
.new_active_crtc_count
>= 3) || pi
->video_start
);
2111 for (i
= pi
->lowest_valid
; i
<= pi
->highest_valid
; i
++)
2112 pi
->graphics_level
[i
].GnbSlow
= 0;
2114 if (pi
->battery_state
)
2115 pi
->graphics_level
[0].ForceNbPs1
= 1;
2117 pi
->graphics_level
[1].GnbSlow
= 0;
2118 pi
->graphics_level
[2].GnbSlow
= 0;
2119 pi
->graphics_level
[3].GnbSlow
= 0;
2120 pi
->graphics_level
[4].GnbSlow
= 0;
2123 for (i
= pi
->lowest_valid
; i
<= pi
->highest_valid
; i
++) {
2124 pi
->graphics_level
[i
].GnbSlow
= 1;
2125 pi
->graphics_level
[i
].ForceNbPs1
= 0;
2126 pi
->graphics_level
[i
].UpH
= 0;
2129 if (pi
->sys_info
.nb_dpm_enable
&& pi
->battery_state
) {
2130 pi
->graphics_level
[pi
->lowest_valid
].UpH
= 0x28;
2131 pi
->graphics_level
[pi
->lowest_valid
].GnbSlow
= 0;
2132 if (pi
->lowest_valid
!= pi
->highest_valid
)
2133 pi
->graphics_level
[pi
->lowest_valid
].ForceNbPs1
= 1;
2139 static int kv_calculate_dpm_settings(struct radeon_device
*rdev
)
2141 struct kv_power_info
*pi
= kv_get_pi(rdev
);
2144 if (pi
->lowest_valid
> pi
->highest_valid
)
2147 for (i
= pi
->lowest_valid
; i
<= pi
->highest_valid
; i
++)
2148 pi
->graphics_level
[i
].DisplayWatermark
= (i
== pi
->highest_valid
) ? 1 : 0;
2153 static void kv_init_graphics_levels(struct radeon_device
*rdev
)
2155 struct kv_power_info
*pi
= kv_get_pi(rdev
);
2157 struct radeon_clock_voltage_dependency_table
*table
=
2158 &rdev
->pm
.dpm
.dyn_state
.vddc_dependency_on_sclk
;
2160 if (table
&& table
->count
) {
2163 pi
->graphics_dpm_level_count
= 0;
2164 for (i
= 0; i
< table
->count
; i
++) {
2165 if (pi
->high_voltage_t
&&
2166 (pi
->high_voltage_t
<
2167 kv_convert_8bit_index_to_voltage(rdev
, table
->entries
[i
].v
)))
2170 kv_set_divider_value(rdev
, i
, table
->entries
[i
].clk
);
2171 vid_2bit
= sumo_convert_vid7_to_vid2(rdev
,
2172 &pi
->sys_info
.vid_mapping_table
,
2173 table
->entries
[i
].v
);
2174 kv_set_vid(rdev
, i
, vid_2bit
);
2175 kv_set_at(rdev
, i
, pi
->at
[i
]);
2176 kv_dpm_power_level_enabled_for_throttle(rdev
, i
, true);
2177 pi
->graphics_dpm_level_count
++;
2180 struct sumo_sclk_voltage_mapping_table
*table
=
2181 &pi
->sys_info
.sclk_voltage_mapping_table
;
2183 pi
->graphics_dpm_level_count
= 0;
2184 for (i
= 0; i
< table
->num_max_dpm_entries
; i
++) {
2185 if (pi
->high_voltage_t
&&
2186 pi
->high_voltage_t
<
2187 kv_convert_2bit_index_to_voltage(rdev
, table
->entries
[i
].vid_2bit
))
2190 kv_set_divider_value(rdev
, i
, table
->entries
[i
].sclk_frequency
);
2191 kv_set_vid(rdev
, i
, table
->entries
[i
].vid_2bit
);
2192 kv_set_at(rdev
, i
, pi
->at
[i
]);
2193 kv_dpm_power_level_enabled_for_throttle(rdev
, i
, true);
2194 pi
->graphics_dpm_level_count
++;
2198 for (i
= 0; i
< SMU7_MAX_LEVELS_GRAPHICS
; i
++)
2199 kv_dpm_power_level_enable(rdev
, i
, false);
2202 static void kv_enable_new_levels(struct radeon_device
*rdev
)
2204 struct kv_power_info
*pi
= kv_get_pi(rdev
);
2207 for (i
= 0; i
< SMU7_MAX_LEVELS_GRAPHICS
; i
++) {
2208 if (i
>= pi
->lowest_valid
&& i
<= pi
->highest_valid
)
2209 kv_dpm_power_level_enable(rdev
, i
, true);
2213 static int kv_set_enabled_levels(struct radeon_device
*rdev
)
2215 struct kv_power_info
*pi
= kv_get_pi(rdev
);
2216 u32 i
, new_mask
= 0;
2218 for (i
= pi
->lowest_valid
; i
<= pi
->highest_valid
; i
++)
2219 new_mask
|= (1 << i
);
2221 return kv_send_msg_to_smc_with_parameter(rdev
,
2222 PPSMC_MSG_SCLKDPM_SetEnabledMask
,
2226 static void kv_program_nbps_index_settings(struct radeon_device
*rdev
,
2227 struct radeon_ps
*new_rps
)
2229 struct kv_ps
*new_ps
= kv_get_ps(new_rps
);
2230 struct kv_power_info
*pi
= kv_get_pi(rdev
);
2233 if (rdev
->family
== CHIP_KABINI
)
2236 if (pi
->sys_info
.nb_dpm_enable
) {
2237 nbdpmconfig1
= RREG32_SMC(NB_DPM_CONFIG_1
);
2238 nbdpmconfig1
&= ~(Dpm0PgNbPsLo_MASK
| Dpm0PgNbPsHi_MASK
|
2239 DpmXNbPsLo_MASK
| DpmXNbPsHi_MASK
);
2240 nbdpmconfig1
|= (Dpm0PgNbPsLo(new_ps
->dpm0_pg_nb_ps_lo
) |
2241 Dpm0PgNbPsHi(new_ps
->dpm0_pg_nb_ps_hi
) |
2242 DpmXNbPsLo(new_ps
->dpmx_nb_ps_lo
) |
2243 DpmXNbPsHi(new_ps
->dpmx_nb_ps_hi
));
2244 WREG32_SMC(NB_DPM_CONFIG_1
, nbdpmconfig1
);
2248 static int kv_set_thermal_temperature_range(struct radeon_device
*rdev
,
2249 int min_temp
, int max_temp
)
2251 int low_temp
= 0 * 1000;
2252 int high_temp
= 255 * 1000;
2255 if (low_temp
< min_temp
)
2256 low_temp
= min_temp
;
2257 if (high_temp
> max_temp
)
2258 high_temp
= max_temp
;
2259 if (high_temp
< low_temp
) {
2260 DRM_ERROR("invalid thermal range: %d - %d\n", low_temp
, high_temp
);
2264 tmp
= RREG32_SMC(CG_THERMAL_INT_CTRL
);
2265 tmp
&= ~(DIG_THERM_INTH_MASK
| DIG_THERM_INTL_MASK
);
2266 tmp
|= (DIG_THERM_INTH(49 + (high_temp
/ 1000)) |
2267 DIG_THERM_INTL(49 + (low_temp
/ 1000)));
2268 WREG32_SMC(CG_THERMAL_INT_CTRL
, tmp
);
2270 rdev
->pm
.dpm
.thermal
.min_temp
= low_temp
;
2271 rdev
->pm
.dpm
.thermal
.max_temp
= high_temp
;
2277 struct _ATOM_INTEGRATED_SYSTEM_INFO info
;
2278 struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2
;
2279 struct _ATOM_INTEGRATED_SYSTEM_INFO_V5 info_5
;
2280 struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 info_6
;
2281 struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7 info_7
;
2282 struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_8 info_8
;
2285 static int kv_parse_sys_info_table(struct radeon_device
*rdev
)
2287 struct kv_power_info
*pi
= kv_get_pi(rdev
);
2288 struct radeon_mode_info
*mode_info
= &rdev
->mode_info
;
2289 int index
= GetIndexIntoMasterTable(DATA
, IntegratedSystemInfo
);
2290 union igp_info
*igp_info
;
2295 if (atom_parse_data_header(mode_info
->atom_context
, index
, NULL
,
2296 &frev
, &crev
, &data_offset
)) {
2297 igp_info
= (union igp_info
*)(mode_info
->atom_context
->bios
+
2301 DRM_ERROR("Unsupported IGP table: %d %d\n", frev
, crev
);
2304 pi
->sys_info
.bootup_sclk
= le32_to_cpu(igp_info
->info_8
.ulBootUpEngineClock
);
2305 pi
->sys_info
.bootup_uma_clk
= le32_to_cpu(igp_info
->info_8
.ulBootUpUMAClock
);
2306 pi
->sys_info
.bootup_nb_voltage_index
=
2307 le16_to_cpu(igp_info
->info_8
.usBootUpNBVoltage
);
2308 if (igp_info
->info_8
.ucHtcTmpLmt
== 0)
2309 pi
->sys_info
.htc_tmp_lmt
= 203;
2311 pi
->sys_info
.htc_tmp_lmt
= igp_info
->info_8
.ucHtcTmpLmt
;
2312 if (igp_info
->info_8
.ucHtcHystLmt
== 0)
2313 pi
->sys_info
.htc_hyst_lmt
= 5;
2315 pi
->sys_info
.htc_hyst_lmt
= igp_info
->info_8
.ucHtcHystLmt
;
2316 if (pi
->sys_info
.htc_tmp_lmt
<= pi
->sys_info
.htc_hyst_lmt
) {
2317 DRM_ERROR("The htcTmpLmt should be larger than htcHystLmt.\n");
2320 if (le32_to_cpu(igp_info
->info_8
.ulSystemConfig
) & (1 << 3))
2321 pi
->sys_info
.nb_dpm_enable
= true;
2323 pi
->sys_info
.nb_dpm_enable
= false;
2325 for (i
= 0; i
< KV_NUM_NBPSTATES
; i
++) {
2326 pi
->sys_info
.nbp_memory_clock
[i
] =
2327 le32_to_cpu(igp_info
->info_8
.ulNbpStateMemclkFreq
[i
]);
2328 pi
->sys_info
.nbp_n_clock
[i
] =
2329 le32_to_cpu(igp_info
->info_8
.ulNbpStateNClkFreq
[i
]);
2331 if (le32_to_cpu(igp_info
->info_8
.ulGPUCapInfo
) &
2332 SYS_INFO_GPUCAPS__ENABEL_DFS_BYPASS
)
2333 pi
->caps_enable_dfs_bypass
= true;
2335 sumo_construct_sclk_voltage_mapping_table(rdev
,
2336 &pi
->sys_info
.sclk_voltage_mapping_table
,
2337 igp_info
->info_8
.sAvail_SCLK
);
2339 sumo_construct_vid_mapping_table(rdev
,
2340 &pi
->sys_info
.vid_mapping_table
,
2341 igp_info
->info_8
.sAvail_SCLK
);
2343 kv_construct_max_power_limits_table(rdev
,
2344 &rdev
->pm
.dpm
.dyn_state
.max_clock_voltage_on_ac
);
2350 struct _ATOM_POWERPLAY_INFO info
;
2351 struct _ATOM_POWERPLAY_INFO_V2 info_2
;
2352 struct _ATOM_POWERPLAY_INFO_V3 info_3
;
2353 struct _ATOM_PPLIB_POWERPLAYTABLE pplib
;
2354 struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2
;
2355 struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3
;
2358 union pplib_clock_info
{
2359 struct _ATOM_PPLIB_R600_CLOCK_INFO r600
;
2360 struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780
;
2361 struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen
;
2362 struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo
;
2365 union pplib_power_state
{
2366 struct _ATOM_PPLIB_STATE v1
;
2367 struct _ATOM_PPLIB_STATE_V2 v2
;
2370 static void kv_patch_boot_state(struct radeon_device
*rdev
,
2373 struct kv_power_info
*pi
= kv_get_pi(rdev
);
2376 ps
->levels
[0] = pi
->boot_pl
;
2379 static void kv_parse_pplib_non_clock_info(struct radeon_device
*rdev
,
2380 struct radeon_ps
*rps
,
2381 struct _ATOM_PPLIB_NONCLOCK_INFO
*non_clock_info
,
2384 struct kv_ps
*ps
= kv_get_ps(rps
);
2386 rps
->caps
= le32_to_cpu(non_clock_info
->ulCapsAndSettings
);
2387 rps
->class = le16_to_cpu(non_clock_info
->usClassification
);
2388 rps
->class2
= le16_to_cpu(non_clock_info
->usClassification2
);
2390 if (ATOM_PPLIB_NONCLOCKINFO_VER1
< table_rev
) {
2391 rps
->vclk
= le32_to_cpu(non_clock_info
->ulVCLK
);
2392 rps
->dclk
= le32_to_cpu(non_clock_info
->ulDCLK
);
2398 if (rps
->class & ATOM_PPLIB_CLASSIFICATION_BOOT
) {
2399 rdev
->pm
.dpm
.boot_ps
= rps
;
2400 kv_patch_boot_state(rdev
, ps
);
2402 if (rps
->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE
)
2403 rdev
->pm
.dpm
.uvd_ps
= rps
;
2406 static void kv_parse_pplib_clock_info(struct radeon_device
*rdev
,
2407 struct radeon_ps
*rps
, int index
,
2408 union pplib_clock_info
*clock_info
)
2410 struct kv_power_info
*pi
= kv_get_pi(rdev
);
2411 struct kv_ps
*ps
= kv_get_ps(rps
);
2412 struct kv_pl
*pl
= &ps
->levels
[index
];
2415 sclk
= le16_to_cpu(clock_info
->sumo
.usEngineClockLow
);
2416 sclk
|= clock_info
->sumo
.ucEngineClockHigh
<< 16;
2418 pl
->vddc_index
= clock_info
->sumo
.vddcIndex
;
2420 ps
->num_levels
= index
+ 1;
2422 if (pi
->caps_sclk_ds
) {
2423 pl
->ds_divider_index
= 5;
2424 pl
->ss_divider_index
= 5;
2428 static int kv_parse_power_table(struct radeon_device
*rdev
)
2430 struct radeon_mode_info
*mode_info
= &rdev
->mode_info
;
2431 struct _ATOM_PPLIB_NONCLOCK_INFO
*non_clock_info
;
2432 union pplib_power_state
*power_state
;
2433 int i
, j
, k
, non_clock_array_index
, clock_array_index
;
2434 union pplib_clock_info
*clock_info
;
2435 struct _StateArray
*state_array
;
2436 struct _ClockInfoArray
*clock_info_array
;
2437 struct _NonClockInfoArray
*non_clock_info_array
;
2438 union power_info
*power_info
;
2439 int index
= GetIndexIntoMasterTable(DATA
, PowerPlayInfo
);
2442 u8
*power_state_offset
;
2445 if (!atom_parse_data_header(mode_info
->atom_context
, index
, NULL
,
2446 &frev
, &crev
, &data_offset
))
2448 power_info
= (union power_info
*)(mode_info
->atom_context
->bios
+ data_offset
);
2450 state_array
= (struct _StateArray
*)
2451 (mode_info
->atom_context
->bios
+ data_offset
+
2452 le16_to_cpu(power_info
->pplib
.usStateArrayOffset
));
2453 clock_info_array
= (struct _ClockInfoArray
*)
2454 (mode_info
->atom_context
->bios
+ data_offset
+
2455 le16_to_cpu(power_info
->pplib
.usClockInfoArrayOffset
));
2456 non_clock_info_array
= (struct _NonClockInfoArray
*)
2457 (mode_info
->atom_context
->bios
+ data_offset
+
2458 le16_to_cpu(power_info
->pplib
.usNonClockInfoArrayOffset
));
2460 rdev
->pm
.dpm
.ps
= kzalloc(sizeof(struct radeon_ps
) *
2461 state_array
->ucNumEntries
, GFP_KERNEL
);
2462 if (!rdev
->pm
.dpm
.ps
)
2464 power_state_offset
= (u8
*)state_array
->states
;
2465 rdev
->pm
.dpm
.platform_caps
= le32_to_cpu(power_info
->pplib
.ulPlatformCaps
);
2466 rdev
->pm
.dpm
.backbias_response_time
= le16_to_cpu(power_info
->pplib
.usBackbiasTime
);
2467 rdev
->pm
.dpm
.voltage_response_time
= le16_to_cpu(power_info
->pplib
.usVoltageTime
);
2468 for (i
= 0; i
< state_array
->ucNumEntries
; i
++) {
2470 power_state
= (union pplib_power_state
*)power_state_offset
;
2471 non_clock_array_index
= power_state
->v2
.nonClockInfoIndex
;
2472 non_clock_info
= (struct _ATOM_PPLIB_NONCLOCK_INFO
*)
2473 &non_clock_info_array
->nonClockInfo
[non_clock_array_index
];
2474 if (!rdev
->pm
.power_state
[i
].clock_info
)
2476 ps
= kzalloc(sizeof(struct kv_ps
), GFP_KERNEL
);
2478 kfree(rdev
->pm
.dpm
.ps
);
2481 rdev
->pm
.dpm
.ps
[i
].ps_priv
= ps
;
2483 idx
= (u8
*)&power_state
->v2
.clockInfoIndex
[0];
2484 for (j
= 0; j
< power_state
->v2
.ucNumDPMLevels
; j
++) {
2485 clock_array_index
= idx
[j
];
2486 if (clock_array_index
>= clock_info_array
->ucNumEntries
)
2488 if (k
>= SUMO_MAX_HARDWARE_POWERLEVELS
)
2490 clock_info
= (union pplib_clock_info
*)
2491 ((u8
*)&clock_info_array
->clockInfo
[0] +
2492 (clock_array_index
* clock_info_array
->ucEntrySize
));
2493 kv_parse_pplib_clock_info(rdev
,
2494 &rdev
->pm
.dpm
.ps
[i
], k
,
2498 kv_parse_pplib_non_clock_info(rdev
, &rdev
->pm
.dpm
.ps
[i
],
2500 non_clock_info_array
->ucEntrySize
);
2501 power_state_offset
+= 2 + power_state
->v2
.ucNumDPMLevels
;
2503 rdev
->pm
.dpm
.num_ps
= state_array
->ucNumEntries
;
2507 int kv_dpm_init(struct radeon_device
*rdev
)
2509 struct kv_power_info
*pi
;
2512 pi
= kzalloc(sizeof(struct kv_power_info
), GFP_KERNEL
);
2515 rdev
->pm
.dpm
.priv
= pi
;
2517 ret
= r600_parse_extended_power_table(rdev
);
2521 for (i
= 0; i
< SUMO_MAX_HARDWARE_POWERLEVELS
; i
++)
2522 pi
->at
[i
] = TRINITY_AT_DFLT
;
2524 pi
->sram_end
= SMC_RAM_END
;
2526 if (rdev
->family
== CHIP_KABINI
)
2527 pi
->high_voltage_t
= 4001;
2529 pi
->enable_nb_dpm
= true;
2531 pi
->caps_power_containment
= true;
2532 pi
->caps_cac
= true;
2533 pi
->enable_didt
= false;
2534 if (pi
->enable_didt
) {
2535 pi
->caps_sq_ramping
= true;
2536 pi
->caps_db_ramping
= true;
2537 pi
->caps_td_ramping
= true;
2538 pi
->caps_tcp_ramping
= true;
2541 pi
->caps_sclk_ds
= true;
2542 pi
->enable_auto_thermal_throttling
= true;
2543 pi
->disable_nb_ps3_in_battery
= false;
2544 pi
->bapm_enable
= true;
2545 pi
->voltage_drop_t
= 0;
2546 pi
->caps_sclk_throttle_low_notification
= false;
2547 pi
->caps_fps
= false; /* true? */
2548 pi
->caps_uvd_pg
= true;
2549 pi
->caps_uvd_dpm
= true;
2550 pi
->caps_vce_pg
= false;
2551 pi
->caps_samu_pg
= false;
2552 pi
->caps_acp_pg
= false;
2553 pi
->caps_stable_p_state
= false;
2555 ret
= kv_parse_sys_info_table(rdev
);
2559 kv_patch_voltage_values(rdev
);
2560 kv_construct_boot_state(rdev
);
2562 ret
= kv_parse_power_table(rdev
);
2566 pi
->enable_dpm
= true;
2571 void kv_dpm_debugfs_print_current_performance_level(struct radeon_device
*rdev
,
2574 struct kv_power_info
*pi
= kv_get_pi(rdev
);
2576 (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX
) & CURR_SCLK_INDEX_MASK
) >>
2577 CURR_SCLK_INDEX_SHIFT
;
2581 if (current_index
>= SMU__NUM_SCLK_DPM_STATE
) {
2582 seq_printf(m
, "invalid dpm profile %d\n", current_index
);
2584 sclk
= be32_to_cpu(pi
->graphics_level
[current_index
].SclkFrequency
);
2585 tmp
= (RREG32_SMC(SMU_VOLTAGE_STATUS
) & SMU_VOLTAGE_CURRENT_LEVEL_MASK
) >>
2586 SMU_VOLTAGE_CURRENT_LEVEL_SHIFT
;
2587 vddc
= kv_convert_8bit_index_to_voltage(rdev
, (u16
)tmp
);
2588 seq_printf(m
, "power level %d sclk: %u vddc: %u\n",
2589 current_index
, sclk
, vddc
);
2593 void kv_dpm_print_power_state(struct radeon_device
*rdev
,
2594 struct radeon_ps
*rps
)
2597 struct kv_ps
*ps
= kv_get_ps(rps
);
2599 r600_dpm_print_class_info(rps
->class, rps
->class2
);
2600 r600_dpm_print_cap_info(rps
->caps
);
2601 printk("\tuvd vclk: %d dclk: %d\n", rps
->vclk
, rps
->dclk
);
2602 for (i
= 0; i
< ps
->num_levels
; i
++) {
2603 struct kv_pl
*pl
= &ps
->levels
[i
];
2604 printk("\t\tpower level %d sclk: %u vddc: %u\n",
2606 kv_convert_8bit_index_to_voltage(rdev
, pl
->vddc_index
));
2608 r600_dpm_print_ps_status(rdev
, rps
);
2611 void kv_dpm_fini(struct radeon_device
*rdev
)
2615 for (i
= 0; i
< rdev
->pm
.dpm
.num_ps
; i
++) {
2616 kfree(rdev
->pm
.dpm
.ps
[i
].ps_priv
);
2618 kfree(rdev
->pm
.dpm
.ps
);
2619 kfree(rdev
->pm
.dpm
.priv
);
2620 r600_free_extended_power_table(rdev
);
2623 void kv_dpm_display_configuration_changed(struct radeon_device
*rdev
)
2628 u32
kv_dpm_get_sclk(struct radeon_device
*rdev
, bool low
)
2630 struct kv_power_info
*pi
= kv_get_pi(rdev
);
2631 struct kv_ps
*requested_state
= kv_get_ps(&pi
->requested_rps
);
2634 return requested_state
->levels
[0].sclk
;
2636 return requested_state
->levels
[requested_state
->num_levels
- 1].sclk
;
2639 u32
kv_dpm_get_mclk(struct radeon_device
*rdev
, bool low
)
2641 struct kv_power_info
*pi
= kv_get_pi(rdev
);
2643 return pi
->sys_info
.bootup_uma_clk
;