drm/radeon/kms: add dpm support for KB/KV
[deliverable/linux.git] / drivers / gpu / drm / radeon / kv_dpm.c
CommitLineData
41a524ab
AD
1/*
2 * Copyright 2013 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include "drmP.h"
25#include "radeon.h"
26#include "cikd.h"
27#include "r600_dpm.h"
28#include "kv_dpm.h"
29
30#define KV_MAX_DEEPSLEEP_DIVIDER_ID 5
31#define KV_MINIMUM_ENGINE_CLOCK 800
32#define SMC_RAM_END 0x40000
33
34static void kv_init_graphics_levels(struct radeon_device *rdev);
35static int kv_calculate_ds_divider(struct radeon_device *rdev);
36static int kv_calculate_nbps_level_settings(struct radeon_device *rdev);
37static int kv_calculate_dpm_settings(struct radeon_device *rdev);
38static void kv_enable_new_levels(struct radeon_device *rdev);
39static void kv_program_nbps_index_settings(struct radeon_device *rdev,
40 struct radeon_ps *new_rps);
41static int kv_set_enabled_levels(struct radeon_device *rdev);
42static int kv_force_dpm_lowest(struct radeon_device *rdev);
43static void kv_apply_state_adjust_rules(struct radeon_device *rdev,
44 struct radeon_ps *new_rps,
45 struct radeon_ps *old_rps);
46static int kv_set_thermal_temperature_range(struct radeon_device *rdev,
47 int min_temp, int max_temp);
48static int kv_init_fps_limits(struct radeon_device *rdev);
49
50static void kv_dpm_powergate_uvd(struct radeon_device *rdev, bool gate);
51static void kv_dpm_powergate_vce(struct radeon_device *rdev, bool gate);
52static void kv_dpm_powergate_samu(struct radeon_device *rdev, bool gate);
53static void kv_dpm_powergate_acp(struct radeon_device *rdev, bool gate);
54
55extern void cik_enter_rlc_safe_mode(struct radeon_device *rdev);
56extern void cik_exit_rlc_safe_mode(struct radeon_device *rdev);
57extern void cik_update_cg(struct radeon_device *rdev,
58 u32 block, bool enable);
59
60static const struct kv_lcac_config_values sx_local_cac_cfg_kv[] =
61{
62 { 0, 4, 1 },
63 { 1, 4, 1 },
64 { 2, 5, 1 },
65 { 3, 4, 2 },
66 { 4, 1, 1 },
67 { 5, 5, 2 },
68 { 6, 6, 1 },
69 { 7, 9, 2 },
70 { 0xffffffff }
71};
72
73static const struct kv_lcac_config_values mc0_local_cac_cfg_kv[] =
74{
75 { 0, 4, 1 },
76 { 0xffffffff }
77};
78
79static const struct kv_lcac_config_values mc1_local_cac_cfg_kv[] =
80{
81 { 0, 4, 1 },
82 { 0xffffffff }
83};
84
85static const struct kv_lcac_config_values mc2_local_cac_cfg_kv[] =
86{
87 { 0, 4, 1 },
88 { 0xffffffff }
89};
90
91static const struct kv_lcac_config_values mc3_local_cac_cfg_kv[] =
92{
93 { 0, 4, 1 },
94 { 0xffffffff }
95};
96
97static const struct kv_lcac_config_values cpl_local_cac_cfg_kv[] =
98{
99 { 0, 4, 1 },
100 { 1, 4, 1 },
101 { 2, 5, 1 },
102 { 3, 4, 1 },
103 { 4, 1, 1 },
104 { 5, 5, 1 },
105 { 6, 6, 1 },
106 { 7, 9, 1 },
107 { 8, 4, 1 },
108 { 9, 2, 1 },
109 { 10, 3, 1 },
110 { 11, 6, 1 },
111 { 12, 8, 2 },
112 { 13, 1, 1 },
113 { 14, 2, 1 },
114 { 15, 3, 1 },
115 { 16, 1, 1 },
116 { 17, 4, 1 },
117 { 18, 3, 1 },
118 { 19, 1, 1 },
119 { 20, 8, 1 },
120 { 21, 5, 1 },
121 { 22, 1, 1 },
122 { 23, 1, 1 },
123 { 24, 4, 1 },
124 { 27, 6, 1 },
125 { 28, 1, 1 },
126 { 0xffffffff }
127};
128
129static const struct kv_lcac_config_reg sx0_cac_config_reg[] =
130{
131 { 0xc0400d00, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
132};
133
134static const struct kv_lcac_config_reg mc0_cac_config_reg[] =
135{
136 { 0xc0400d30, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
137};
138
139static const struct kv_lcac_config_reg mc1_cac_config_reg[] =
140{
141 { 0xc0400d3c, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
142};
143
144static const struct kv_lcac_config_reg mc2_cac_config_reg[] =
145{
146 { 0xc0400d48, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
147};
148
149static const struct kv_lcac_config_reg mc3_cac_config_reg[] =
150{
151 { 0xc0400d54, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
152};
153
154static const struct kv_lcac_config_reg cpl_cac_config_reg[] =
155{
156 { 0xc0400d80, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
157};
158
159static const struct kv_pt_config_reg didt_config_kv[] =
160{
161 { 0x10, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
162 { 0x10, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
163 { 0x10, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
164 { 0x10, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
165 { 0x11, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
166 { 0x11, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
167 { 0x11, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
168 { 0x11, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
169 { 0x12, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
170 { 0x12, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
171 { 0x12, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
172 { 0x12, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
173 { 0x2, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND },
174 { 0x2, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND },
175 { 0x2, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND },
176 { 0x1, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND },
177 { 0x1, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND },
178 { 0x0, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND },
179 { 0x30, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
180 { 0x30, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
181 { 0x30, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
182 { 0x30, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
183 { 0x31, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
184 { 0x31, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
185 { 0x31, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
186 { 0x31, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
187 { 0x32, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
188 { 0x32, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
189 { 0x32, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
190 { 0x32, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
191 { 0x22, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND },
192 { 0x22, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND },
193 { 0x22, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND },
194 { 0x21, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND },
195 { 0x21, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND },
196 { 0x20, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND },
197 { 0x50, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
198 { 0x50, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
199 { 0x50, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
200 { 0x50, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
201 { 0x51, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
202 { 0x51, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
203 { 0x51, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
204 { 0x51, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
205 { 0x52, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
206 { 0x52, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
207 { 0x52, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
208 { 0x52, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
209 { 0x42, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND },
210 { 0x42, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND },
211 { 0x42, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND },
212 { 0x41, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND },
213 { 0x41, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND },
214 { 0x40, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND },
215 { 0x70, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
216 { 0x70, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
217 { 0x70, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
218 { 0x70, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
219 { 0x71, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
220 { 0x71, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
221 { 0x71, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
222 { 0x71, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
223 { 0x72, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
224 { 0x72, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
225 { 0x72, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
226 { 0x72, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
227 { 0x62, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND },
228 { 0x62, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND },
229 { 0x62, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND },
230 { 0x61, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND },
231 { 0x61, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND },
232 { 0x60, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND },
233 { 0xFFFFFFFF }
234};
235
236static struct kv_ps *kv_get_ps(struct radeon_ps *rps)
237{
238 struct kv_ps *ps = rps->ps_priv;
239
240 return ps;
241}
242
243static struct kv_power_info *kv_get_pi(struct radeon_device *rdev)
244{
245 struct kv_power_info *pi = rdev->pm.dpm.priv;
246
247 return pi;
248}
249
250#if 0
251static void kv_program_local_cac_table(struct radeon_device *rdev,
252 const struct kv_lcac_config_values *local_cac_table,
253 const struct kv_lcac_config_reg *local_cac_reg)
254{
255 u32 i, count, data;
256 const struct kv_lcac_config_values *values = local_cac_table;
257
258 while (values->block_id != 0xffffffff) {
259 count = values->signal_id;
260 for (i = 0; i < count; i++) {
261 data = ((values->block_id << local_cac_reg->block_shift) &
262 local_cac_reg->block_mask);
263 data |= ((i << local_cac_reg->signal_shift) &
264 local_cac_reg->signal_mask);
265 data |= ((values->t << local_cac_reg->t_shift) &
266 local_cac_reg->t_mask);
267 data |= ((1 << local_cac_reg->enable_shift) &
268 local_cac_reg->enable_mask);
269 WREG32_SMC(local_cac_reg->cntl, data);
270 }
271 values++;
272 }
273}
274#endif
275
276static int kv_program_pt_config_registers(struct radeon_device *rdev,
277 const struct kv_pt_config_reg *cac_config_regs)
278{
279 const struct kv_pt_config_reg *config_regs = cac_config_regs;
280 u32 data;
281 u32 cache = 0;
282
283 if (config_regs == NULL)
284 return -EINVAL;
285
286 while (config_regs->offset != 0xFFFFFFFF) {
287 if (config_regs->type == KV_CONFIGREG_CACHE) {
288 cache |= ((config_regs->value << config_regs->shift) & config_regs->mask);
289 } else {
290 switch (config_regs->type) {
291 case KV_CONFIGREG_SMC_IND:
292 data = RREG32_SMC(config_regs->offset);
293 break;
294 case KV_CONFIGREG_DIDT_IND:
295 data = RREG32_DIDT(config_regs->offset);
296 break;
297 default:
298 data = RREG32(config_regs->offset << 2);
299 break;
300 }
301
302 data &= ~config_regs->mask;
303 data |= ((config_regs->value << config_regs->shift) & config_regs->mask);
304 data |= cache;
305 cache = 0;
306
307 switch (config_regs->type) {
308 case KV_CONFIGREG_SMC_IND:
309 WREG32_SMC(config_regs->offset, data);
310 break;
311 case KV_CONFIGREG_DIDT_IND:
312 WREG32_DIDT(config_regs->offset, data);
313 break;
314 default:
315 WREG32(config_regs->offset << 2, data);
316 break;
317 }
318 }
319 config_regs++;
320 }
321
322 return 0;
323}
324
325static void kv_do_enable_didt(struct radeon_device *rdev, bool enable)
326{
327 struct kv_power_info *pi = kv_get_pi(rdev);
328 u32 data;
329
330 if (pi->caps_sq_ramping) {
331 data = RREG32_DIDT(DIDT_SQ_CTRL0);
332 if (enable)
333 data |= DIDT_CTRL_EN;
334 else
335 data &= ~DIDT_CTRL_EN;
336 WREG32_DIDT(DIDT_SQ_CTRL0, data);
337 }
338
339 if (pi->caps_db_ramping) {
340 data = RREG32_DIDT(DIDT_DB_CTRL0);
341 if (enable)
342 data |= DIDT_CTRL_EN;
343 else
344 data &= ~DIDT_CTRL_EN;
345 WREG32_DIDT(DIDT_DB_CTRL0, data);
346 }
347
348 if (pi->caps_td_ramping) {
349 data = RREG32_DIDT(DIDT_TD_CTRL0);
350 if (enable)
351 data |= DIDT_CTRL_EN;
352 else
353 data &= ~DIDT_CTRL_EN;
354 WREG32_DIDT(DIDT_TD_CTRL0, data);
355 }
356
357 if (pi->caps_tcp_ramping) {
358 data = RREG32_DIDT(DIDT_TCP_CTRL0);
359 if (enable)
360 data |= DIDT_CTRL_EN;
361 else
362 data &= ~DIDT_CTRL_EN;
363 WREG32_DIDT(DIDT_TCP_CTRL0, data);
364 }
365}
366
367static int kv_enable_didt(struct radeon_device *rdev, bool enable)
368{
369 struct kv_power_info *pi = kv_get_pi(rdev);
370 int ret;
371
372 if (pi->caps_sq_ramping ||
373 pi->caps_db_ramping ||
374 pi->caps_td_ramping ||
375 pi->caps_tcp_ramping) {
376 cik_enter_rlc_safe_mode(rdev);
377
378 if (enable) {
379 ret = kv_program_pt_config_registers(rdev, didt_config_kv);
380 if (ret) {
381 cik_exit_rlc_safe_mode(rdev);
382 return ret;
383 }
384 }
385
386 kv_do_enable_didt(rdev, enable);
387
388 cik_exit_rlc_safe_mode(rdev);
389 }
390
391 return 0;
392}
393
394#if 0
395static void kv_initialize_hardware_cac_manager(struct radeon_device *rdev)
396{
397 struct kv_power_info *pi = kv_get_pi(rdev);
398
399 if (pi->caps_cac) {
400 WREG32_SMC(LCAC_SX0_OVR_SEL, 0);
401 WREG32_SMC(LCAC_SX0_OVR_VAL, 0);
402 kv_program_local_cac_table(rdev, sx_local_cac_cfg_kv, sx0_cac_config_reg);
403
404 WREG32_SMC(LCAC_MC0_OVR_SEL, 0);
405 WREG32_SMC(LCAC_MC0_OVR_VAL, 0);
406 kv_program_local_cac_table(rdev, mc0_local_cac_cfg_kv, mc0_cac_config_reg);
407
408 WREG32_SMC(LCAC_MC1_OVR_SEL, 0);
409 WREG32_SMC(LCAC_MC1_OVR_VAL, 0);
410 kv_program_local_cac_table(rdev, mc1_local_cac_cfg_kv, mc1_cac_config_reg);
411
412 WREG32_SMC(LCAC_MC2_OVR_SEL, 0);
413 WREG32_SMC(LCAC_MC2_OVR_VAL, 0);
414 kv_program_local_cac_table(rdev, mc2_local_cac_cfg_kv, mc2_cac_config_reg);
415
416 WREG32_SMC(LCAC_MC3_OVR_SEL, 0);
417 WREG32_SMC(LCAC_MC3_OVR_VAL, 0);
418 kv_program_local_cac_table(rdev, mc3_local_cac_cfg_kv, mc3_cac_config_reg);
419
420 WREG32_SMC(LCAC_CPL_OVR_SEL, 0);
421 WREG32_SMC(LCAC_CPL_OVR_VAL, 0);
422 kv_program_local_cac_table(rdev, cpl_local_cac_cfg_kv, cpl_cac_config_reg);
423 }
424}
425#endif
426
427static int kv_enable_smc_cac(struct radeon_device *rdev, bool enable)
428{
429 struct kv_power_info *pi = kv_get_pi(rdev);
430 int ret = 0;
431
432 if (pi->caps_cac) {
433 if (enable) {
434 ret = kv_notify_message_to_smu(rdev, PPSMC_MSG_EnableCac);
435 if (ret)
436 pi->cac_enabled = false;
437 else
438 pi->cac_enabled = true;
439 } else if (pi->cac_enabled) {
440 kv_notify_message_to_smu(rdev, PPSMC_MSG_DisableCac);
441 pi->cac_enabled = false;
442 }
443 }
444
445 return ret;
446}
447
448static int kv_process_firmware_header(struct radeon_device *rdev)
449{
450 struct kv_power_info *pi = kv_get_pi(rdev);
451 u32 tmp;
452 int ret;
453
454 ret = kv_read_smc_sram_dword(rdev, SMU7_FIRMWARE_HEADER_LOCATION +
455 offsetof(SMU7_Firmware_Header, DpmTable),
456 &tmp, pi->sram_end);
457
458 if (ret == 0)
459 pi->dpm_table_start = tmp;
460
461 ret = kv_read_smc_sram_dword(rdev, SMU7_FIRMWARE_HEADER_LOCATION +
462 offsetof(SMU7_Firmware_Header, SoftRegisters),
463 &tmp, pi->sram_end);
464
465 if (ret == 0)
466 pi->soft_regs_start = tmp;
467
468 return ret;
469}
470
471static int kv_enable_dpm_voltage_scaling(struct radeon_device *rdev)
472{
473 struct kv_power_info *pi = kv_get_pi(rdev);
474 int ret;
475
476 pi->graphics_voltage_change_enable = 1;
477
478 ret = kv_copy_bytes_to_smc(rdev,
479 pi->dpm_table_start +
480 offsetof(SMU7_Fusion_DpmTable, GraphicsVoltageChangeEnable),
481 &pi->graphics_voltage_change_enable,
482 sizeof(u8), pi->sram_end);
483
484 return ret;
485}
486
487static int kv_set_dpm_interval(struct radeon_device *rdev)
488{
489 struct kv_power_info *pi = kv_get_pi(rdev);
490 int ret;
491
492 pi->graphics_interval = 1;
493
494 ret = kv_copy_bytes_to_smc(rdev,
495 pi->dpm_table_start +
496 offsetof(SMU7_Fusion_DpmTable, GraphicsInterval),
497 &pi->graphics_interval,
498 sizeof(u8), pi->sram_end);
499
500 return ret;
501}
502
503static int kv_set_dpm_boot_state(struct radeon_device *rdev)
504{
505 struct kv_power_info *pi = kv_get_pi(rdev);
506 int ret;
507
508 ret = kv_copy_bytes_to_smc(rdev,
509 pi->dpm_table_start +
510 offsetof(SMU7_Fusion_DpmTable, GraphicsBootLevel),
511 &pi->graphics_boot_level,
512 sizeof(u8), pi->sram_end);
513
514 return ret;
515}
516
517static void kv_program_vc(struct radeon_device *rdev)
518{
519 WREG32_SMC(CG_FTV_0, 0x3FFFC000);
520}
521
522static void kv_clear_vc(struct radeon_device *rdev)
523{
524 WREG32_SMC(CG_FTV_0, 0);
525}
526
527static int kv_set_divider_value(struct radeon_device *rdev,
528 u32 index, u32 sclk)
529{
530 struct kv_power_info *pi = kv_get_pi(rdev);
531 struct atom_clock_dividers dividers;
532 int ret;
533
534 ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
535 sclk, false, &dividers);
536 if (ret)
537 return ret;
538
539 pi->graphics_level[index].SclkDid = (u8)dividers.post_div;
540 pi->graphics_level[index].SclkFrequency = cpu_to_be32(sclk);
541
542 return 0;
543}
544
545static u16 kv_convert_8bit_index_to_voltage(struct radeon_device *rdev,
546 u16 voltage)
547{
548 return 6200 - (voltage * 25);
549}
550
551static u16 kv_convert_2bit_index_to_voltage(struct radeon_device *rdev,
552 u32 vid_2bit)
553{
554 struct kv_power_info *pi = kv_get_pi(rdev);
555 u32 vid_8bit = sumo_convert_vid2_to_vid7(rdev,
556 &pi->sys_info.vid_mapping_table,
557 vid_2bit);
558
559 return kv_convert_8bit_index_to_voltage(rdev, (u16)vid_8bit);
560}
561
562
563static int kv_set_vid(struct radeon_device *rdev, u32 index, u32 vid)
564{
565 struct kv_power_info *pi = kv_get_pi(rdev);
566
567 pi->graphics_level[index].VoltageDownH = (u8)pi->voltage_drop_t;
568 pi->graphics_level[index].MinVddNb =
569 cpu_to_be32(kv_convert_2bit_index_to_voltage(rdev, vid));
570
571 return 0;
572}
573
574static int kv_set_at(struct radeon_device *rdev, u32 index, u32 at)
575{
576 struct kv_power_info *pi = kv_get_pi(rdev);
577
578 pi->graphics_level[index].AT = cpu_to_be16((u16)at);
579
580 return 0;
581}
582
583static void kv_dpm_power_level_enable(struct radeon_device *rdev,
584 u32 index, bool enable)
585{
586 struct kv_power_info *pi = kv_get_pi(rdev);
587
588 pi->graphics_level[index].EnabledForActivity = enable ? 1 : 0;
589}
590
591static void kv_start_dpm(struct radeon_device *rdev)
592{
593 u32 tmp = RREG32_SMC(GENERAL_PWRMGT);
594
595 tmp |= GLOBAL_PWRMGT_EN;
596 WREG32_SMC(GENERAL_PWRMGT, tmp);
597
598 kv_smc_dpm_enable(rdev, true);
599}
600
601static void kv_stop_dpm(struct radeon_device *rdev)
602{
603 kv_smc_dpm_enable(rdev, false);
604}
605
606static void kv_start_am(struct radeon_device *rdev)
607{
608 u32 sclk_pwrmgt_cntl = RREG32_SMC(SCLK_PWRMGT_CNTL);
609
610 sclk_pwrmgt_cntl &= ~(RESET_SCLK_CNT | RESET_BUSY_CNT);
611 sclk_pwrmgt_cntl |= DYNAMIC_PM_EN;
612
613 WREG32_SMC(SCLK_PWRMGT_CNTL, sclk_pwrmgt_cntl);
614}
615
616static void kv_reset_am(struct radeon_device *rdev)
617{
618 u32 sclk_pwrmgt_cntl = RREG32_SMC(SCLK_PWRMGT_CNTL);
619
620 sclk_pwrmgt_cntl |= (RESET_SCLK_CNT | RESET_BUSY_CNT);
621
622 WREG32_SMC(SCLK_PWRMGT_CNTL, sclk_pwrmgt_cntl);
623}
624
625static int kv_freeze_sclk_dpm(struct radeon_device *rdev, bool freeze)
626{
627 return kv_notify_message_to_smu(rdev, freeze ?
628 PPSMC_MSG_SCLKDPM_FreezeLevel : PPSMC_MSG_SCLKDPM_UnfreezeLevel);
629}
630
631static int kv_force_lowest_valid(struct radeon_device *rdev)
632{
633 return kv_force_dpm_lowest(rdev);
634}
635
636static int kv_unforce_levels(struct radeon_device *rdev)
637{
638 return kv_notify_message_to_smu(rdev, PPSMC_MSG_NoForcedLevel);
639}
640
641static int kv_update_sclk_t(struct radeon_device *rdev)
642{
643 struct kv_power_info *pi = kv_get_pi(rdev);
644 u32 low_sclk_interrupt_t = 0;
645 int ret = 0;
646
647 if (pi->caps_sclk_throttle_low_notification) {
648 low_sclk_interrupt_t = cpu_to_be32(pi->low_sclk_interrupt_t);
649
650 ret = kv_copy_bytes_to_smc(rdev,
651 pi->dpm_table_start +
652 offsetof(SMU7_Fusion_DpmTable, LowSclkInterruptT),
653 (u8 *)&low_sclk_interrupt_t,
654 sizeof(u32), pi->sram_end);
655 }
656 return ret;
657}
658
659static int kv_program_bootup_state(struct radeon_device *rdev)
660{
661 struct kv_power_info *pi = kv_get_pi(rdev);
662 u32 i;
663 struct radeon_clock_voltage_dependency_table *table =
664 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
665
666 if (table && table->count) {
667 for (i = pi->graphics_dpm_level_count - 1; i >= 0; i--) {
668 if ((table->entries[i].clk == pi->boot_pl.sclk) ||
669 (i == 0))
670 break;
671 }
672
673 pi->graphics_boot_level = (u8)i;
674 kv_dpm_power_level_enable(rdev, i, true);
675 } else {
676 struct sumo_sclk_voltage_mapping_table *table =
677 &pi->sys_info.sclk_voltage_mapping_table;
678
679 if (table->num_max_dpm_entries == 0)
680 return -EINVAL;
681
682 for (i = pi->graphics_dpm_level_count - 1; i >= 0; i--) {
683 if ((table->entries[i].sclk_frequency == pi->boot_pl.sclk) ||
684 (i == 0))
685 break;
686 }
687
688 pi->graphics_boot_level = (u8)i;
689 kv_dpm_power_level_enable(rdev, i, true);
690 }
691 return 0;
692}
693
694static int kv_enable_auto_thermal_throttling(struct radeon_device *rdev)
695{
696 struct kv_power_info *pi = kv_get_pi(rdev);
697 int ret;
698
699 pi->graphics_therm_throttle_enable = 1;
700
701 ret = kv_copy_bytes_to_smc(rdev,
702 pi->dpm_table_start +
703 offsetof(SMU7_Fusion_DpmTable, GraphicsThermThrottleEnable),
704 &pi->graphics_therm_throttle_enable,
705 sizeof(u8), pi->sram_end);
706
707 return ret;
708}
709
710static int kv_upload_dpm_settings(struct radeon_device *rdev)
711{
712 struct kv_power_info *pi = kv_get_pi(rdev);
713 int ret;
714
715 ret = kv_copy_bytes_to_smc(rdev,
716 pi->dpm_table_start +
717 offsetof(SMU7_Fusion_DpmTable, GraphicsLevel),
718 (u8 *)&pi->graphics_level,
719 sizeof(SMU7_Fusion_GraphicsLevel) * SMU7_MAX_LEVELS_GRAPHICS,
720 pi->sram_end);
721
722 if (ret)
723 return ret;
724
725 ret = kv_copy_bytes_to_smc(rdev,
726 pi->dpm_table_start +
727 offsetof(SMU7_Fusion_DpmTable, GraphicsDpmLevelCount),
728 &pi->graphics_dpm_level_count,
729 sizeof(u8), pi->sram_end);
730
731 return ret;
732}
733
734static u32 kv_get_clock_difference(u32 a, u32 b)
735{
736 return (a >= b) ? a - b : b - a;
737}
738
739static u32 kv_get_clk_bypass(struct radeon_device *rdev, u32 clk)
740{
741 struct kv_power_info *pi = kv_get_pi(rdev);
742 u32 value;
743
744 if (pi->caps_enable_dfs_bypass) {
745 if (kv_get_clock_difference(clk, 40000) < 200)
746 value = 3;
747 else if (kv_get_clock_difference(clk, 30000) < 200)
748 value = 2;
749 else if (kv_get_clock_difference(clk, 20000) < 200)
750 value = 7;
751 else if (kv_get_clock_difference(clk, 15000) < 200)
752 value = 6;
753 else if (kv_get_clock_difference(clk, 10000) < 200)
754 value = 8;
755 else
756 value = 0;
757 } else {
758 value = 0;
759 }
760
761 return value;
762}
763
764static int kv_populate_uvd_table(struct radeon_device *rdev)
765{
766 struct kv_power_info *pi = kv_get_pi(rdev);
767 struct radeon_uvd_clock_voltage_dependency_table *table =
768 &rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table;
769 struct atom_clock_dividers dividers;
770 int ret;
771 u32 i;
772
773 if (table == NULL || table->count == 0)
774 return 0;
775
776 pi->uvd_level_count = 0;
777 for (i = 0; i < table->count; i++) {
778 if (pi->high_voltage_t &&
779 (pi->high_voltage_t < table->entries[i].v))
780 break;
781
782 pi->uvd_level[i].VclkFrequency = cpu_to_be32(table->entries[i].vclk);
783 pi->uvd_level[i].DclkFrequency = cpu_to_be32(table->entries[i].dclk);
784 pi->uvd_level[i].MinVddNb = cpu_to_be16(table->entries[i].v);
785
786 pi->uvd_level[i].VClkBypassCntl =
787 (u8)kv_get_clk_bypass(rdev, table->entries[i].vclk);
788 pi->uvd_level[i].DClkBypassCntl =
789 (u8)kv_get_clk_bypass(rdev, table->entries[i].dclk);
790
791 ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
792 table->entries[i].vclk, false, &dividers);
793 if (ret)
794 return ret;
795 pi->uvd_level[i].VclkDivider = (u8)dividers.post_div;
796
797 ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
798 table->entries[i].dclk, false, &dividers);
799 if (ret)
800 return ret;
801 pi->uvd_level[i].DclkDivider = (u8)dividers.post_div;
802
803 pi->uvd_level_count++;
804 }
805
806 ret = kv_copy_bytes_to_smc(rdev,
807 pi->dpm_table_start +
808 offsetof(SMU7_Fusion_DpmTable, UvdLevelCount),
809 (u8 *)&pi->uvd_level_count,
810 sizeof(u8), pi->sram_end);
811 if (ret)
812 return ret;
813
814 pi->uvd_interval = 1;
815
816 ret = kv_copy_bytes_to_smc(rdev,
817 pi->dpm_table_start +
818 offsetof(SMU7_Fusion_DpmTable, UVDInterval),
819 &pi->uvd_interval,
820 sizeof(u8), pi->sram_end);
821 if (ret)
822 return ret;
823
824 ret = kv_copy_bytes_to_smc(rdev,
825 pi->dpm_table_start +
826 offsetof(SMU7_Fusion_DpmTable, UvdLevel),
827 (u8 *)&pi->uvd_level,
828 sizeof(SMU7_Fusion_UvdLevel) * SMU7_MAX_LEVELS_UVD,
829 pi->sram_end);
830
831 return ret;
832
833}
834
835static int kv_populate_vce_table(struct radeon_device *rdev)
836{
837 struct kv_power_info *pi = kv_get_pi(rdev);
838 int ret;
839 u32 i;
840 struct radeon_vce_clock_voltage_dependency_table *table =
841 &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
842 struct atom_clock_dividers dividers;
843
844 if (table == NULL || table->count == 0)
845 return 0;
846
847 pi->vce_level_count = 0;
848 for (i = 0; i < table->count; i++) {
849 if (pi->high_voltage_t &&
850 pi->high_voltage_t < table->entries[i].v)
851 break;
852
853 pi->vce_level[i].Frequency = cpu_to_be32(table->entries[i].evclk);
854 pi->vce_level[i].MinVoltage = cpu_to_be16(table->entries[i].v);
855
856 pi->vce_level[i].ClkBypassCntl =
857 (u8)kv_get_clk_bypass(rdev, table->entries[i].evclk);
858
859 ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
860 table->entries[i].evclk, false, &dividers);
861 if (ret)
862 return ret;
863 pi->vce_level[i].Divider = (u8)dividers.post_div;
864
865 pi->vce_level_count++;
866 }
867
868 ret = kv_copy_bytes_to_smc(rdev,
869 pi->dpm_table_start +
870 offsetof(SMU7_Fusion_DpmTable, VceLevelCount),
871 (u8 *)&pi->vce_level_count,
872 sizeof(u8),
873 pi->sram_end);
874 if (ret)
875 return ret;
876
877 pi->vce_interval = 1;
878
879 ret = kv_copy_bytes_to_smc(rdev,
880 pi->dpm_table_start +
881 offsetof(SMU7_Fusion_DpmTable, VCEInterval),
882 (u8 *)&pi->vce_interval,
883 sizeof(u8),
884 pi->sram_end);
885 if (ret)
886 return ret;
887
888 ret = kv_copy_bytes_to_smc(rdev,
889 pi->dpm_table_start +
890 offsetof(SMU7_Fusion_DpmTable, VceLevel),
891 (u8 *)&pi->vce_level,
892 sizeof(SMU7_Fusion_ExtClkLevel) * SMU7_MAX_LEVELS_VCE,
893 pi->sram_end);
894
895 return ret;
896}
897
898static int kv_populate_samu_table(struct radeon_device *rdev)
899{
900 struct kv_power_info *pi = kv_get_pi(rdev);
901 struct radeon_clock_voltage_dependency_table *table =
902 &rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table;
903 struct atom_clock_dividers dividers;
904 int ret;
905 u32 i;
906
907 if (table == NULL || table->count == 0)
908 return 0;
909
910 pi->samu_level_count = 0;
911 for (i = 0; i < table->count; i++) {
912 if (pi->high_voltage_t &&
913 pi->high_voltage_t < table->entries[i].v)
914 break;
915
916 pi->samu_level[i].Frequency = cpu_to_be32(table->entries[i].clk);
917 pi->samu_level[i].MinVoltage = cpu_to_be16(table->entries[i].v);
918
919 pi->samu_level[i].ClkBypassCntl =
920 (u8)kv_get_clk_bypass(rdev, table->entries[i].clk);
921
922 ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
923 table->entries[i].clk, false, &dividers);
924 if (ret)
925 return ret;
926 pi->samu_level[i].Divider = (u8)dividers.post_div;
927
928 pi->samu_level_count++;
929 }
930
931 ret = kv_copy_bytes_to_smc(rdev,
932 pi->dpm_table_start +
933 offsetof(SMU7_Fusion_DpmTable, SamuLevelCount),
934 (u8 *)&pi->samu_level_count,
935 sizeof(u8),
936 pi->sram_end);
937 if (ret)
938 return ret;
939
940 pi->samu_interval = 1;
941
942 ret = kv_copy_bytes_to_smc(rdev,
943 pi->dpm_table_start +
944 offsetof(SMU7_Fusion_DpmTable, SAMUInterval),
945 (u8 *)&pi->samu_interval,
946 sizeof(u8),
947 pi->sram_end);
948 if (ret)
949 return ret;
950
951 ret = kv_copy_bytes_to_smc(rdev,
952 pi->dpm_table_start +
953 offsetof(SMU7_Fusion_DpmTable, SamuLevel),
954 (u8 *)&pi->samu_level,
955 sizeof(SMU7_Fusion_ExtClkLevel) * SMU7_MAX_LEVELS_SAMU,
956 pi->sram_end);
957 if (ret)
958 return ret;
959
960 return ret;
961}
962
963
964static int kv_populate_acp_table(struct radeon_device *rdev)
965{
966 struct kv_power_info *pi = kv_get_pi(rdev);
967 struct radeon_clock_voltage_dependency_table *table =
968 &rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table;
969 struct atom_clock_dividers dividers;
970 int ret;
971 u32 i;
972
973 if (table == NULL || table->count == 0)
974 return 0;
975
976 pi->acp_level_count = 0;
977 for (i = 0; i < table->count; i++) {
978 pi->acp_level[i].Frequency = cpu_to_be32(table->entries[i].clk);
979 pi->acp_level[i].MinVoltage = cpu_to_be16(table->entries[i].v);
980
981 ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
982 table->entries[i].clk, false, &dividers);
983 if (ret)
984 return ret;
985 pi->acp_level[i].Divider = (u8)dividers.post_div;
986
987 pi->acp_level_count++;
988 }
989
990 ret = kv_copy_bytes_to_smc(rdev,
991 pi->dpm_table_start +
992 offsetof(SMU7_Fusion_DpmTable, AcpLevelCount),
993 (u8 *)&pi->acp_level_count,
994 sizeof(u8),
995 pi->sram_end);
996 if (ret)
997 return ret;
998
999 pi->acp_interval = 1;
1000
1001 ret = kv_copy_bytes_to_smc(rdev,
1002 pi->dpm_table_start +
1003 offsetof(SMU7_Fusion_DpmTable, ACPInterval),
1004 (u8 *)&pi->acp_interval,
1005 sizeof(u8),
1006 pi->sram_end);
1007 if (ret)
1008 return ret;
1009
1010 ret = kv_copy_bytes_to_smc(rdev,
1011 pi->dpm_table_start +
1012 offsetof(SMU7_Fusion_DpmTable, AcpLevel),
1013 (u8 *)&pi->acp_level,
1014 sizeof(SMU7_Fusion_ExtClkLevel) * SMU7_MAX_LEVELS_ACP,
1015 pi->sram_end);
1016 if (ret)
1017 return ret;
1018
1019 return ret;
1020}
1021
1022static void kv_calculate_dfs_bypass_settings(struct radeon_device *rdev)
1023{
1024 struct kv_power_info *pi = kv_get_pi(rdev);
1025 u32 i;
1026 struct radeon_clock_voltage_dependency_table *table =
1027 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
1028
1029 if (table && table->count) {
1030 for (i = 0; i < pi->graphics_dpm_level_count; i++) {
1031 if (pi->caps_enable_dfs_bypass) {
1032 if (kv_get_clock_difference(table->entries[i].clk, 40000) < 200)
1033 pi->graphics_level[i].ClkBypassCntl = 3;
1034 else if (kv_get_clock_difference(table->entries[i].clk, 30000) < 200)
1035 pi->graphics_level[i].ClkBypassCntl = 2;
1036 else if (kv_get_clock_difference(table->entries[i].clk, 26600) < 200)
1037 pi->graphics_level[i].ClkBypassCntl = 7;
1038 else if (kv_get_clock_difference(table->entries[i].clk , 20000) < 200)
1039 pi->graphics_level[i].ClkBypassCntl = 6;
1040 else if (kv_get_clock_difference(table->entries[i].clk , 10000) < 200)
1041 pi->graphics_level[i].ClkBypassCntl = 8;
1042 else
1043 pi->graphics_level[i].ClkBypassCntl = 0;
1044 } else {
1045 pi->graphics_level[i].ClkBypassCntl = 0;
1046 }
1047 }
1048 } else {
1049 struct sumo_sclk_voltage_mapping_table *table =
1050 &pi->sys_info.sclk_voltage_mapping_table;
1051 for (i = 0; i < pi->graphics_dpm_level_count; i++) {
1052 if (pi->caps_enable_dfs_bypass) {
1053 if (kv_get_clock_difference(table->entries[i].sclk_frequency, 40000) < 200)
1054 pi->graphics_level[i].ClkBypassCntl = 3;
1055 else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 30000) < 200)
1056 pi->graphics_level[i].ClkBypassCntl = 2;
1057 else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 26600) < 200)
1058 pi->graphics_level[i].ClkBypassCntl = 7;
1059 else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 20000) < 200)
1060 pi->graphics_level[i].ClkBypassCntl = 6;
1061 else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 10000) < 200)
1062 pi->graphics_level[i].ClkBypassCntl = 8;
1063 else
1064 pi->graphics_level[i].ClkBypassCntl = 0;
1065 } else {
1066 pi->graphics_level[i].ClkBypassCntl = 0;
1067 }
1068 }
1069 }
1070}
1071
1072static int kv_enable_ulv(struct radeon_device *rdev, bool enable)
1073{
1074 return kv_notify_message_to_smu(rdev, enable ?
1075 PPSMC_MSG_EnableULV : PPSMC_MSG_DisableULV);
1076}
1077
1078static void kv_update_current_ps(struct radeon_device *rdev,
1079 struct radeon_ps *rps)
1080{
1081 struct kv_ps *new_ps = kv_get_ps(rps);
1082 struct kv_power_info *pi = kv_get_pi(rdev);
1083
1084 pi->current_rps = *rps;
1085 pi->current_ps = *new_ps;
1086 pi->current_rps.ps_priv = &pi->current_ps;
1087}
1088
1089static void kv_update_requested_ps(struct radeon_device *rdev,
1090 struct radeon_ps *rps)
1091{
1092 struct kv_ps *new_ps = kv_get_ps(rps);
1093 struct kv_power_info *pi = kv_get_pi(rdev);
1094
1095 pi->requested_rps = *rps;
1096 pi->requested_ps = *new_ps;
1097 pi->requested_rps.ps_priv = &pi->requested_ps;
1098}
1099
1100int kv_dpm_enable(struct radeon_device *rdev)
1101{
1102 struct kv_power_info *pi = kv_get_pi(rdev);
1103 int ret;
1104
1105 ret = kv_process_firmware_header(rdev);
1106 if (ret) {
1107 DRM_ERROR("kv_process_firmware_header failed\n");
1108 return ret;
1109 }
1110 kv_init_fps_limits(rdev);
1111 kv_init_graphics_levels(rdev);
1112 ret = kv_program_bootup_state(rdev);
1113 if (ret) {
1114 DRM_ERROR("kv_program_bootup_state failed\n");
1115 return ret;
1116 }
1117 kv_calculate_dfs_bypass_settings(rdev);
1118 ret = kv_upload_dpm_settings(rdev);
1119 if (ret) {
1120 DRM_ERROR("kv_upload_dpm_settings failed\n");
1121 return ret;
1122 }
1123 ret = kv_populate_uvd_table(rdev);
1124 if (ret) {
1125 DRM_ERROR("kv_populate_uvd_table failed\n");
1126 return ret;
1127 }
1128 ret = kv_populate_vce_table(rdev);
1129 if (ret) {
1130 DRM_ERROR("kv_populate_vce_table failed\n");
1131 return ret;
1132 }
1133 ret = kv_populate_samu_table(rdev);
1134 if (ret) {
1135 DRM_ERROR("kv_populate_samu_table failed\n");
1136 return ret;
1137 }
1138 ret = kv_populate_acp_table(rdev);
1139 if (ret) {
1140 DRM_ERROR("kv_populate_acp_table failed\n");
1141 return ret;
1142 }
1143 kv_program_vc(rdev);
1144#if 0
1145 kv_initialize_hardware_cac_manager(rdev);
1146#endif
1147 kv_start_am(rdev);
1148 if (pi->enable_auto_thermal_throttling) {
1149 ret = kv_enable_auto_thermal_throttling(rdev);
1150 if (ret) {
1151 DRM_ERROR("kv_enable_auto_thermal_throttling failed\n");
1152 return ret;
1153 }
1154 }
1155 ret = kv_enable_dpm_voltage_scaling(rdev);
1156 if (ret) {
1157 DRM_ERROR("kv_enable_dpm_voltage_scaling failed\n");
1158 return ret;
1159 }
1160 ret = kv_set_dpm_interval(rdev);
1161 if (ret) {
1162 DRM_ERROR("kv_set_dpm_interval failed\n");
1163 return ret;
1164 }
1165 ret = kv_set_dpm_boot_state(rdev);
1166 if (ret) {
1167 DRM_ERROR("kv_set_dpm_boot_state failed\n");
1168 return ret;
1169 }
1170 ret = kv_enable_ulv(rdev, true);
1171 if (ret) {
1172 DRM_ERROR("kv_enable_ulv failed\n");
1173 return ret;
1174 }
1175 kv_start_dpm(rdev);
1176 ret = kv_enable_didt(rdev, true);
1177 if (ret) {
1178 DRM_ERROR("kv_enable_didt failed\n");
1179 return ret;
1180 }
1181 ret = kv_enable_smc_cac(rdev, true);
1182 if (ret) {
1183 DRM_ERROR("kv_enable_smc_cac failed\n");
1184 return ret;
1185 }
1186
1187 if (rdev->irq.installed &&
1188 r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
1189 ret = kv_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
1190 if (ret) {
1191 DRM_ERROR("kv_set_thermal_temperature_range failed\n");
1192 return ret;
1193 }
1194 rdev->irq.dpm_thermal = true;
1195 radeon_irq_set(rdev);
1196 }
1197
1198 /* powerdown unused blocks for now */
1199 kv_dpm_powergate_acp(rdev, true);
1200 kv_dpm_powergate_samu(rdev, true);
1201 kv_dpm_powergate_vce(rdev, true);
1202
1203 kv_update_current_ps(rdev, rdev->pm.dpm.boot_ps);
1204
1205 return ret;
1206}
1207
1208void kv_dpm_disable(struct radeon_device *rdev)
1209{
1210 kv_enable_smc_cac(rdev, false);
1211 kv_enable_didt(rdev, false);
1212 kv_clear_vc(rdev);
1213 kv_stop_dpm(rdev);
1214 kv_enable_ulv(rdev, false);
1215 kv_reset_am(rdev);
1216
1217 kv_update_current_ps(rdev, rdev->pm.dpm.boot_ps);
1218}
1219
1220#if 0
1221static int kv_write_smc_soft_register(struct radeon_device *rdev,
1222 u16 reg_offset, u32 value)
1223{
1224 struct kv_power_info *pi = kv_get_pi(rdev);
1225
1226 return kv_copy_bytes_to_smc(rdev, pi->soft_regs_start + reg_offset,
1227 (u8 *)&value, sizeof(u16), pi->sram_end);
1228}
1229
1230static int kv_read_smc_soft_register(struct radeon_device *rdev,
1231 u16 reg_offset, u32 *value)
1232{
1233 struct kv_power_info *pi = kv_get_pi(rdev);
1234
1235 return kv_read_smc_sram_dword(rdev, pi->soft_regs_start + reg_offset,
1236 value, pi->sram_end);
1237}
1238#endif
1239
1240static void kv_init_sclk_t(struct radeon_device *rdev)
1241{
1242 struct kv_power_info *pi = kv_get_pi(rdev);
1243
1244 pi->low_sclk_interrupt_t = 0;
1245}
1246
1247static int kv_init_fps_limits(struct radeon_device *rdev)
1248{
1249 struct kv_power_info *pi = kv_get_pi(rdev);
1250 int ret = 0;
1251
1252 if (pi->caps_fps) {
1253 u16 tmp;
1254
1255 tmp = 45;
1256 pi->fps_high_t = cpu_to_be16(tmp);
1257 ret = kv_copy_bytes_to_smc(rdev,
1258 pi->dpm_table_start +
1259 offsetof(SMU7_Fusion_DpmTable, FpsHighT),
1260 (u8 *)&pi->fps_high_t,
1261 sizeof(u16), pi->sram_end);
1262
1263 tmp = 30;
1264 pi->fps_low_t = cpu_to_be16(tmp);
1265
1266 ret = kv_copy_bytes_to_smc(rdev,
1267 pi->dpm_table_start +
1268 offsetof(SMU7_Fusion_DpmTable, FpsLowT),
1269 (u8 *)&pi->fps_low_t,
1270 sizeof(u16), pi->sram_end);
1271
1272 }
1273 return ret;
1274}
1275
1276static void kv_init_powergate_state(struct radeon_device *rdev)
1277{
1278 struct kv_power_info *pi = kv_get_pi(rdev);
1279
1280 pi->uvd_power_gated = false;
1281 pi->vce_power_gated = false;
1282 pi->samu_power_gated = false;
1283 pi->acp_power_gated = false;
1284
1285}
1286
1287static int kv_enable_uvd_dpm(struct radeon_device *rdev, bool enable)
1288{
1289 return kv_notify_message_to_smu(rdev, enable ?
1290 PPSMC_MSG_UVDDPM_Enable : PPSMC_MSG_UVDDPM_Disable);
1291}
1292
1293#if 0
1294static int kv_enable_vce_dpm(struct radeon_device *rdev, bool enable)
1295{
1296 return kv_notify_message_to_smu(rdev, enable ?
1297 PPSMC_MSG_VCEDPM_Enable : PPSMC_MSG_VCEDPM_Disable);
1298}
1299#endif
1300
1301static int kv_enable_samu_dpm(struct radeon_device *rdev, bool enable)
1302{
1303 return kv_notify_message_to_smu(rdev, enable ?
1304 PPSMC_MSG_SAMUDPM_Enable : PPSMC_MSG_SAMUDPM_Disable);
1305}
1306
1307static int kv_enable_acp_dpm(struct radeon_device *rdev, bool enable)
1308{
1309 return kv_notify_message_to_smu(rdev, enable ?
1310 PPSMC_MSG_ACPDPM_Enable : PPSMC_MSG_ACPDPM_Disable);
1311}
1312
1313static int kv_update_uvd_dpm(struct radeon_device *rdev, bool gate)
1314{
1315 struct kv_power_info *pi = kv_get_pi(rdev);
1316 struct radeon_uvd_clock_voltage_dependency_table *table =
1317 &rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table;
1318 int ret;
1319
1320 if (!gate) {
1321 if (!pi->caps_uvd_dpm || table->count || pi->caps_stable_p_state)
1322 pi->uvd_boot_level = table->count - 1;
1323 else
1324 pi->uvd_boot_level = 0;
1325
1326 ret = kv_copy_bytes_to_smc(rdev,
1327 pi->dpm_table_start +
1328 offsetof(SMU7_Fusion_DpmTable, UvdBootLevel),
1329 (uint8_t *)&pi->uvd_boot_level,
1330 sizeof(u8), pi->sram_end);
1331 if (ret)
1332 return ret;
1333
1334 if (!pi->caps_uvd_dpm ||
1335 pi->caps_stable_p_state)
1336 kv_send_msg_to_smc_with_parameter(rdev,
1337 PPSMC_MSG_UVDDPM_SetEnabledMask,
1338 (1 << pi->uvd_boot_level));
1339 }
1340
1341 return kv_enable_uvd_dpm(rdev, !gate);
1342}
1343
1344#if 0
1345static u8 kv_get_vce_boot_level(struct radeon_device *rdev)
1346{
1347 u8 i;
1348 struct radeon_vce_clock_voltage_dependency_table *table =
1349 &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
1350
1351 for (i = 0; i < table->count; i++) {
1352 if (table->entries[i].evclk >= 0) /* XXX */
1353 break;
1354 }
1355
1356 return i;
1357}
1358
1359static int kv_update_vce_dpm(struct radeon_device *rdev,
1360 struct radeon_ps *radeon_new_state,
1361 struct radeon_ps *radeon_current_state)
1362{
1363 struct kv_power_info *pi = kv_get_pi(rdev);
1364 struct radeon_vce_clock_voltage_dependency_table *table =
1365 &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
1366 int ret;
1367
1368 if (radeon_new_state->evclk > 0 && radeon_current_state->evclk == 0) {
1369 if (pi->caps_stable_p_state)
1370 pi->vce_boot_level = table->count - 1;
1371 else
1372 pi->vce_boot_level = kv_get_vce_boot_level(rdev);
1373
1374 ret = kv_copy_bytes_to_smc(rdev,
1375 pi->dpm_table_start +
1376 offsetof(SMU7_Fusion_DpmTable, VceBootLevel),
1377 (u8 *)&pi->vce_boot_level,
1378 sizeof(u8),
1379 pi->sram_end);
1380 if (ret)
1381 return ret;
1382
1383 if (pi->caps_stable_p_state)
1384 kv_send_msg_to_smc_with_parameter(rdev,
1385 PPSMC_MSG_VCEDPM_SetEnabledMask,
1386 (1 << pi->vce_boot_level));
1387
1388 kv_enable_vce_dpm(rdev, true);
1389 } else if (radeon_new_state->evclk == 0 && radeon_current_state->evclk > 0) {
1390 kv_enable_vce_dpm(rdev, false);
1391 }
1392
1393 return 0;
1394}
1395#endif
1396
1397static int kv_update_samu_dpm(struct radeon_device *rdev, bool gate)
1398{
1399 struct kv_power_info *pi = kv_get_pi(rdev);
1400 struct radeon_clock_voltage_dependency_table *table =
1401 &rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table;
1402 int ret;
1403
1404 if (!gate) {
1405 if (pi->caps_stable_p_state)
1406 pi->samu_boot_level = table->count - 1;
1407 else
1408 pi->samu_boot_level = 0;
1409
1410 ret = kv_copy_bytes_to_smc(rdev,
1411 pi->dpm_table_start +
1412 offsetof(SMU7_Fusion_DpmTable, SamuBootLevel),
1413 (u8 *)&pi->samu_boot_level,
1414 sizeof(u8),
1415 pi->sram_end);
1416 if (ret)
1417 return ret;
1418
1419 if (pi->caps_stable_p_state)
1420 kv_send_msg_to_smc_with_parameter(rdev,
1421 PPSMC_MSG_SAMUDPM_SetEnabledMask,
1422 (1 << pi->samu_boot_level));
1423 }
1424
1425 return kv_enable_samu_dpm(rdev, !gate);
1426}
1427
1428static int kv_update_acp_dpm(struct radeon_device *rdev, bool gate)
1429{
1430 struct kv_power_info *pi = kv_get_pi(rdev);
1431 struct radeon_clock_voltage_dependency_table *table =
1432 &rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table;
1433 int ret;
1434
1435 if (!gate) {
1436 if (pi->caps_stable_p_state)
1437 pi->acp_boot_level = table->count - 1;
1438 else
1439 pi->acp_boot_level = 0;
1440
1441 ret = kv_copy_bytes_to_smc(rdev,
1442 pi->dpm_table_start +
1443 offsetof(SMU7_Fusion_DpmTable, AcpBootLevel),
1444 (u8 *)&pi->acp_boot_level,
1445 sizeof(u8),
1446 pi->sram_end);
1447 if (ret)
1448 return ret;
1449
1450 if (pi->caps_stable_p_state)
1451 kv_send_msg_to_smc_with_parameter(rdev,
1452 PPSMC_MSG_ACPDPM_SetEnabledMask,
1453 (1 << pi->acp_boot_level));
1454 }
1455
1456 return kv_enable_acp_dpm(rdev, !gate);
1457}
1458
1459static void kv_dpm_powergate_uvd(struct radeon_device *rdev, bool gate)
1460{
1461 struct kv_power_info *pi = kv_get_pi(rdev);
1462
1463 if (pi->uvd_power_gated == gate)
1464 return;
1465
1466 pi->uvd_power_gated = gate;
1467
1468 if (gate) {
1469 kv_update_uvd_dpm(rdev, true);
1470 if (pi->caps_uvd_pg)
1471 kv_notify_message_to_smu(rdev, PPSMC_MSG_UVDPowerOFF);
1472 } else {
1473 if (pi->caps_uvd_pg)
1474 kv_notify_message_to_smu(rdev, PPSMC_MSG_UVDPowerON);
1475 kv_update_uvd_dpm(rdev, false);
1476 }
1477}
1478
1479static void kv_dpm_powergate_vce(struct radeon_device *rdev, bool gate)
1480{
1481 struct kv_power_info *pi = kv_get_pi(rdev);
1482
1483 if (pi->vce_power_gated == gate)
1484 return;
1485
1486 pi->vce_power_gated = gate;
1487
1488 if (gate) {
1489 if (pi->caps_vce_pg)
1490 kv_notify_message_to_smu(rdev, PPSMC_MSG_VCEPowerOFF);
1491 } else {
1492 if (pi->caps_vce_pg)
1493 kv_notify_message_to_smu(rdev, PPSMC_MSG_VCEPowerON);
1494 }
1495}
1496
1497static void kv_dpm_powergate_samu(struct radeon_device *rdev, bool gate)
1498{
1499 struct kv_power_info *pi = kv_get_pi(rdev);
1500
1501 if (pi->samu_power_gated == gate)
1502 return;
1503
1504 pi->samu_power_gated = gate;
1505
1506 if (gate) {
1507 kv_update_samu_dpm(rdev, true);
1508 if (pi->caps_samu_pg)
1509 kv_notify_message_to_smu(rdev, PPSMC_MSG_SAMPowerOFF);
1510 } else {
1511 if (pi->caps_samu_pg)
1512 kv_notify_message_to_smu(rdev, PPSMC_MSG_SAMPowerON);
1513 kv_update_samu_dpm(rdev, false);
1514 }
1515}
1516
1517static void kv_dpm_powergate_acp(struct radeon_device *rdev, bool gate)
1518{
1519 struct kv_power_info *pi = kv_get_pi(rdev);
1520
1521 if (pi->acp_power_gated == gate)
1522 return;
1523
1524 if (rdev->family == CHIP_KABINI)
1525 return;
1526
1527 pi->acp_power_gated = gate;
1528
1529 if (gate) {
1530 kv_update_acp_dpm(rdev, true);
1531 if (pi->caps_acp_pg)
1532 kv_notify_message_to_smu(rdev, PPSMC_MSG_ACPPowerOFF);
1533 } else {
1534 if (pi->caps_acp_pg)
1535 kv_notify_message_to_smu(rdev, PPSMC_MSG_ACPPowerON);
1536 kv_update_acp_dpm(rdev, false);
1537 }
1538}
1539
1540static void kv_set_valid_clock_range(struct radeon_device *rdev,
1541 struct radeon_ps *new_rps)
1542{
1543 struct kv_ps *new_ps = kv_get_ps(new_rps);
1544 struct kv_power_info *pi = kv_get_pi(rdev);
1545 u32 i;
1546 struct radeon_clock_voltage_dependency_table *table =
1547 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
1548
1549 if (table && table->count) {
1550 for (i = 0; i < pi->graphics_dpm_level_count; i++) {
1551 if ((table->entries[i].clk >= new_ps->levels[0].sclk) ||
1552 (i == (pi->graphics_dpm_level_count - 1))) {
1553 pi->lowest_valid = i;
1554 break;
1555 }
1556 }
1557
1558 for (i = pi->graphics_dpm_level_count - 1; i >= 0; i--) {
1559 if ((table->entries[i].clk <= new_ps->levels[new_ps->num_levels -1].sclk) ||
1560 (i == 0)) {
1561 pi->highest_valid = i;
1562 break;
1563 }
1564 }
1565
1566 if (pi->lowest_valid > pi->highest_valid) {
1567 if ((new_ps->levels[0].sclk - table->entries[pi->highest_valid].clk) >
1568 (table->entries[pi->lowest_valid].clk - new_ps->levels[new_ps->num_levels - 1].sclk))
1569 pi->highest_valid = pi->lowest_valid;
1570 else
1571 pi->lowest_valid = pi->highest_valid;
1572 }
1573 } else {
1574 struct sumo_sclk_voltage_mapping_table *table =
1575 &pi->sys_info.sclk_voltage_mapping_table;
1576
1577 for (i = 0; i < (int)pi->graphics_dpm_level_count; i++) {
1578 if (table->entries[i].sclk_frequency >= new_ps->levels[0].sclk ||
1579 i == (int)(pi->graphics_dpm_level_count - 1)) {
1580 pi->lowest_valid = i;
1581 break;
1582 }
1583 }
1584
1585 for (i = pi->graphics_dpm_level_count - 1; i >= 0; i--) {
1586 if (table->entries[i].sclk_frequency <=
1587 new_ps->levels[new_ps->num_levels - 1].sclk ||
1588 i == 0) {
1589 pi->highest_valid = i;
1590 break;
1591 }
1592 }
1593
1594 if (pi->lowest_valid > pi->highest_valid) {
1595 if ((new_ps->levels[0].sclk -
1596 table->entries[pi->highest_valid].sclk_frequency) >
1597 (table->entries[pi->lowest_valid].sclk_frequency -
1598 new_ps->levels[new_ps->num_levels -1].sclk))
1599 pi->highest_valid = pi->lowest_valid;
1600 else
1601 pi->lowest_valid = pi->highest_valid;
1602 }
1603 }
1604}
1605
1606static int kv_update_dfs_bypass_settings(struct radeon_device *rdev,
1607 struct radeon_ps *new_rps)
1608{
1609 struct kv_ps *new_ps = kv_get_ps(new_rps);
1610 struct kv_power_info *pi = kv_get_pi(rdev);
1611 int ret = 0;
1612 u8 clk_bypass_cntl;
1613
1614 if (pi->caps_enable_dfs_bypass) {
1615 clk_bypass_cntl = new_ps->need_dfs_bypass ?
1616 pi->graphics_level[pi->graphics_boot_level].ClkBypassCntl : 0;
1617 ret = kv_copy_bytes_to_smc(rdev,
1618 (pi->dpm_table_start +
1619 offsetof(SMU7_Fusion_DpmTable, GraphicsLevel) +
1620 (pi->graphics_boot_level * sizeof(SMU7_Fusion_GraphicsLevel)) +
1621 offsetof(SMU7_Fusion_GraphicsLevel, ClkBypassCntl)),
1622 &clk_bypass_cntl,
1623 sizeof(u8), pi->sram_end);
1624 }
1625
1626 return ret;
1627}
1628
1629static int kv_enable_nb_dpm(struct radeon_device *rdev)
1630{
1631 struct kv_power_info *pi = kv_get_pi(rdev);
1632 int ret = 0;
1633
1634 if (pi->enable_nb_dpm && !pi->nb_dpm_enabled) {
1635 ret = kv_notify_message_to_smu(rdev, PPSMC_MSG_NBDPM_Enable);
1636 if (ret == 0)
1637 pi->nb_dpm_enabled = true;
1638 }
1639
1640 return ret;
1641}
1642
1643int kv_dpm_pre_set_power_state(struct radeon_device *rdev)
1644{
1645 struct kv_power_info *pi = kv_get_pi(rdev);
1646 struct radeon_ps requested_ps = *rdev->pm.dpm.requested_ps;
1647 struct radeon_ps *new_ps = &requested_ps;
1648
1649 kv_update_requested_ps(rdev, new_ps);
1650
1651 kv_apply_state_adjust_rules(rdev,
1652 &pi->requested_rps,
1653 &pi->current_rps);
1654
1655 return 0;
1656}
1657
1658int kv_dpm_set_power_state(struct radeon_device *rdev)
1659{
1660 struct kv_power_info *pi = kv_get_pi(rdev);
1661 struct radeon_ps *new_ps = &pi->requested_rps;
1662 /*struct radeon_ps *old_ps = &pi->current_rps;*/
1663 int ret;
1664
1665 if (rdev->family == CHIP_KABINI) {
1666 if (pi->enable_dpm) {
1667 kv_set_valid_clock_range(rdev, new_ps);
1668 kv_update_dfs_bypass_settings(rdev, new_ps);
1669 ret = kv_calculate_ds_divider(rdev);
1670 if (ret) {
1671 DRM_ERROR("kv_calculate_ds_divider failed\n");
1672 return ret;
1673 }
1674 kv_calculate_nbps_level_settings(rdev);
1675 kv_calculate_dpm_settings(rdev);
1676 kv_force_lowest_valid(rdev);
1677 kv_enable_new_levels(rdev);
1678 kv_upload_dpm_settings(rdev);
1679 kv_program_nbps_index_settings(rdev, new_ps);
1680 kv_unforce_levels(rdev);
1681 kv_set_enabled_levels(rdev);
1682 kv_force_lowest_valid(rdev);
1683 kv_unforce_levels(rdev);
1684#if 0
1685 ret = kv_update_vce_dpm(rdev, new_ps, old_ps);
1686 if (ret) {
1687 DRM_ERROR("kv_update_vce_dpm failed\n");
1688 return ret;
1689 }
1690#endif
1691 kv_update_uvd_dpm(rdev, false);
1692 kv_update_sclk_t(rdev);
1693 }
1694 } else {
1695 if (pi->enable_dpm) {
1696 kv_set_valid_clock_range(rdev, new_ps);
1697 kv_update_dfs_bypass_settings(rdev, new_ps);
1698 ret = kv_calculate_ds_divider(rdev);
1699 if (ret) {
1700 DRM_ERROR("kv_calculate_ds_divider failed\n");
1701 return ret;
1702 }
1703 kv_calculate_nbps_level_settings(rdev);
1704 kv_calculate_dpm_settings(rdev);
1705 kv_freeze_sclk_dpm(rdev, true);
1706 kv_upload_dpm_settings(rdev);
1707 kv_program_nbps_index_settings(rdev, new_ps);
1708 kv_freeze_sclk_dpm(rdev, false);
1709 kv_set_enabled_levels(rdev);
1710#if 0
1711 ret = kv_update_vce_dpm(rdev, new_ps, old_ps);
1712 if (ret) {
1713 DRM_ERROR("kv_update_vce_dpm failed\n");
1714 return ret;
1715 }
1716#endif
1717 kv_update_uvd_dpm(rdev, false);
1718 kv_update_sclk_t(rdev);
1719 kv_enable_nb_dpm(rdev);
1720 }
1721 }
1722 return 0;
1723}
1724
1725void kv_dpm_post_set_power_state(struct radeon_device *rdev)
1726{
1727 struct kv_power_info *pi = kv_get_pi(rdev);
1728 struct radeon_ps *new_ps = &pi->requested_rps;
1729
1730 kv_update_current_ps(rdev, new_ps);
1731}
1732
1733void kv_dpm_setup_asic(struct radeon_device *rdev)
1734{
1735 sumo_take_smu_control(rdev, true);
1736 kv_init_powergate_state(rdev);
1737 kv_init_sclk_t(rdev);
1738}
1739
1740void kv_dpm_reset_asic(struct radeon_device *rdev)
1741{
1742 kv_force_lowest_valid(rdev);
1743 kv_init_graphics_levels(rdev);
1744 kv_program_bootup_state(rdev);
1745 kv_upload_dpm_settings(rdev);
1746 kv_force_lowest_valid(rdev);
1747 kv_unforce_levels(rdev);
1748}
1749
1750//XXX use sumo_dpm_display_configuration_changed
1751
1752static void kv_construct_max_power_limits_table(struct radeon_device *rdev,
1753 struct radeon_clock_and_voltage_limits *table)
1754{
1755 struct kv_power_info *pi = kv_get_pi(rdev);
1756
1757 if (pi->sys_info.sclk_voltage_mapping_table.num_max_dpm_entries > 0) {
1758 int idx = pi->sys_info.sclk_voltage_mapping_table.num_max_dpm_entries - 1;
1759 table->sclk =
1760 pi->sys_info.sclk_voltage_mapping_table.entries[idx].sclk_frequency;
1761 table->vddc =
1762 kv_convert_2bit_index_to_voltage(rdev,
1763 pi->sys_info.sclk_voltage_mapping_table.entries[idx].vid_2bit);
1764 }
1765
1766 table->mclk = pi->sys_info.nbp_memory_clock[0];
1767}
1768
1769static void kv_patch_voltage_values(struct radeon_device *rdev)
1770{
1771 int i;
1772 struct radeon_uvd_clock_voltage_dependency_table *table =
1773 &rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table;
1774
1775 if (table->count) {
1776 for (i = 0; i < table->count; i++)
1777 table->entries[i].v =
1778 kv_convert_8bit_index_to_voltage(rdev,
1779 table->entries[i].v);
1780 }
1781
1782}
1783
1784static void kv_construct_boot_state(struct radeon_device *rdev)
1785{
1786 struct kv_power_info *pi = kv_get_pi(rdev);
1787
1788 pi->boot_pl.sclk = pi->sys_info.bootup_sclk;
1789 pi->boot_pl.vddc_index = pi->sys_info.bootup_nb_voltage_index;
1790 pi->boot_pl.ds_divider_index = 0;
1791 pi->boot_pl.ss_divider_index = 0;
1792 pi->boot_pl.allow_gnb_slow = 1;
1793 pi->boot_pl.force_nbp_state = 0;
1794 pi->boot_pl.display_wm = 0;
1795 pi->boot_pl.vce_wm = 0;
1796}
1797
1798static int kv_force_dpm_lowest(struct radeon_device *rdev)
1799{
1800 int ret;
1801 u32 enable_mask, i;
1802
1803 ret = kv_dpm_get_enable_mask(rdev, &enable_mask);
1804 if (ret)
1805 return ret;
1806
1807 for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++) {
1808 if (enable_mask & (1 << i))
1809 break;
1810 }
1811
1812 return kv_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, i);
1813}
1814
1815static u8 kv_get_sleep_divider_id_from_clock(struct radeon_device *rdev,
1816 u32 sclk, u32 min_sclk_in_sr)
1817{
1818 struct kv_power_info *pi = kv_get_pi(rdev);
1819 u32 i;
1820 u32 temp;
1821 u32 min = (min_sclk_in_sr > KV_MINIMUM_ENGINE_CLOCK) ?
1822 min_sclk_in_sr : KV_MINIMUM_ENGINE_CLOCK;
1823
1824 if (sclk < min)
1825 return 0;
1826
1827 if (!pi->caps_sclk_ds)
1828 return 0;
1829
1830 for (i = KV_MAX_DEEPSLEEP_DIVIDER_ID; i <= 0; i--) {
1831 temp = sclk / sumo_get_sleep_divider_from_id(i);
1832 if ((temp >= min) || (i == 0))
1833 break;
1834 }
1835
1836 return (u8)i;
1837}
1838
1839static int kv_get_high_voltage_limit(struct radeon_device *rdev, int *limit)
1840{
1841 struct kv_power_info *pi = kv_get_pi(rdev);
1842 struct radeon_clock_voltage_dependency_table *table =
1843 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
1844 int i;
1845
1846 if (table && table->count) {
1847 for (i = table->count - 1; i >= 0; i--) {
1848 if (pi->high_voltage_t &&
1849 (kv_convert_8bit_index_to_voltage(rdev, table->entries[i].v) <=
1850 pi->high_voltage_t)) {
1851 *limit = i;
1852 return 0;
1853 }
1854 }
1855 } else {
1856 struct sumo_sclk_voltage_mapping_table *table =
1857 &pi->sys_info.sclk_voltage_mapping_table;
1858
1859 for (i = table->num_max_dpm_entries - 1; i >= 0; i--) {
1860 if (pi->high_voltage_t &&
1861 (kv_convert_2bit_index_to_voltage(rdev, table->entries[i].vid_2bit) <=
1862 pi->high_voltage_t)) {
1863 *limit = i;
1864 return 0;
1865 }
1866 }
1867 }
1868
1869 *limit = 0;
1870 return 0;
1871}
1872
1873static void kv_apply_state_adjust_rules(struct radeon_device *rdev,
1874 struct radeon_ps *new_rps,
1875 struct radeon_ps *old_rps)
1876{
1877 struct kv_ps *ps = kv_get_ps(new_rps);
1878 struct kv_power_info *pi = kv_get_pi(rdev);
1879 u32 min_sclk = 10000; /* ??? */
1880 u32 sclk, mclk = 0;
1881 int i, limit;
1882 bool force_high;
1883 struct radeon_clock_voltage_dependency_table *table =
1884 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
1885 u32 stable_p_state_sclk = 0;
1886 struct radeon_clock_and_voltage_limits *max_limits =
1887 &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
1888
1889 mclk = max_limits->mclk;
1890 sclk = min_sclk;
1891
1892 if (pi->caps_stable_p_state) {
1893 stable_p_state_sclk = (max_limits->sclk * 75) / 100;
1894
1895 for (i = table->count - 1; i >= 0; i++) {
1896 if (stable_p_state_sclk >= table->entries[i].clk) {
1897 stable_p_state_sclk = table->entries[i].clk;
1898 break;
1899 }
1900 }
1901
1902 if (i > 0)
1903 stable_p_state_sclk = table->entries[0].clk;
1904
1905 sclk = stable_p_state_sclk;
1906 }
1907
1908 ps->need_dfs_bypass = true;
1909
1910 for (i = 0; i < ps->num_levels; i++) {
1911 if (ps->levels[i].sclk < sclk)
1912 ps->levels[i].sclk = sclk;
1913 }
1914
1915 if (table && table->count) {
1916 for (i = 0; i < ps->num_levels; i++) {
1917 if (pi->high_voltage_t &&
1918 (pi->high_voltage_t <
1919 kv_convert_8bit_index_to_voltage(rdev, ps->levels[i].vddc_index))) {
1920 kv_get_high_voltage_limit(rdev, &limit);
1921 ps->levels[i].sclk = table->entries[limit].clk;
1922 }
1923 }
1924 } else {
1925 struct sumo_sclk_voltage_mapping_table *table =
1926 &pi->sys_info.sclk_voltage_mapping_table;
1927
1928 for (i = 0; i < ps->num_levels; i++) {
1929 if (pi->high_voltage_t &&
1930 (pi->high_voltage_t <
1931 kv_convert_8bit_index_to_voltage(rdev, ps->levels[i].vddc_index))) {
1932 kv_get_high_voltage_limit(rdev, &limit);
1933 ps->levels[i].sclk = table->entries[limit].sclk_frequency;
1934 }
1935 }
1936 }
1937
1938 if (pi->caps_stable_p_state) {
1939 for (i = 0; i < ps->num_levels; i++) {
1940 ps->levels[i].sclk = stable_p_state_sclk;
1941 }
1942 }
1943
1944 pi->video_start = new_rps->dclk || new_rps->vclk;
1945
1946 if ((new_rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) ==
1947 ATOM_PPLIB_CLASSIFICATION_UI_BATTERY)
1948 pi->battery_state = true;
1949 else
1950 pi->battery_state = false;
1951
1952 if (rdev->family == CHIP_KABINI) {
1953 ps->dpm0_pg_nb_ps_lo = 0x1;
1954 ps->dpm0_pg_nb_ps_hi = 0x0;
1955 ps->dpmx_nb_ps_lo = 0x1;
1956 ps->dpmx_nb_ps_hi = 0x0;
1957 } else {
1958 ps->dpm0_pg_nb_ps_lo = 0x1;
1959 ps->dpm0_pg_nb_ps_hi = 0x0;
1960 ps->dpmx_nb_ps_lo = 0x2;
1961 ps->dpmx_nb_ps_hi = 0x1;
1962
1963 if (pi->sys_info.nb_dpm_enable && pi->battery_state) {
1964 force_high = (mclk >= pi->sys_info.nbp_memory_clock[3]) ||
1965 pi->video_start || (rdev->pm.dpm.new_active_crtc_count >= 3) ||
1966 pi->disable_nb_ps3_in_battery;
1967 ps->dpm0_pg_nb_ps_lo = force_high ? 0x2 : 0x3;
1968 ps->dpm0_pg_nb_ps_hi = 0x2;
1969 ps->dpmx_nb_ps_lo = force_high ? 0x2 : 0x3;
1970 ps->dpmx_nb_ps_hi = 0x2;
1971 }
1972 }
1973}
1974
1975static void kv_dpm_power_level_enabled_for_throttle(struct radeon_device *rdev,
1976 u32 index, bool enable)
1977{
1978 struct kv_power_info *pi = kv_get_pi(rdev);
1979
1980 pi->graphics_level[index].EnabledForThrottle = enable ? 1 : 0;
1981}
1982
1983static int kv_calculate_ds_divider(struct radeon_device *rdev)
1984{
1985 struct kv_power_info *pi = kv_get_pi(rdev);
1986 u32 sclk_in_sr = 10000; /* ??? */
1987 u32 i;
1988
1989 if (pi->lowest_valid > pi->highest_valid)
1990 return -EINVAL;
1991
1992 for (i = pi->lowest_valid; i <= pi->highest_valid; i++) {
1993 pi->graphics_level[i].DeepSleepDivId =
1994 kv_get_sleep_divider_id_from_clock(rdev,
1995 be32_to_cpu(pi->graphics_level[i].SclkFrequency),
1996 sclk_in_sr);
1997 }
1998 return 0;
1999}
2000
2001static int kv_calculate_nbps_level_settings(struct radeon_device *rdev)
2002{
2003 struct kv_power_info *pi = kv_get_pi(rdev);
2004 u32 i;
2005 bool force_high;
2006 struct radeon_clock_and_voltage_limits *max_limits =
2007 &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
2008 u32 mclk = max_limits->mclk;
2009
2010 if (pi->lowest_valid > pi->highest_valid)
2011 return -EINVAL;
2012
2013 if (rdev->family == CHIP_KABINI) {
2014 for (i = pi->lowest_valid; i <= pi->highest_valid; i++) {
2015 pi->graphics_level[i].GnbSlow = 1;
2016 pi->graphics_level[i].ForceNbPs1 = 0;
2017 pi->graphics_level[i].UpH = 0;
2018 }
2019
2020 if (!pi->sys_info.nb_dpm_enable)
2021 return 0;
2022
2023 force_high = ((mclk >= pi->sys_info.nbp_memory_clock[3]) ||
2024 (rdev->pm.dpm.new_active_crtc_count >= 3) || pi->video_start);
2025
2026 if (force_high) {
2027 for (i = pi->lowest_valid; i <= pi->highest_valid; i++)
2028 pi->graphics_level[i].GnbSlow = 0;
2029 } else {
2030 if (pi->battery_state)
2031 pi->graphics_level[0].ForceNbPs1 = 1;
2032
2033 pi->graphics_level[1].GnbSlow = 0;
2034 pi->graphics_level[2].GnbSlow = 0;
2035 pi->graphics_level[3].GnbSlow = 0;
2036 pi->graphics_level[4].GnbSlow = 0;
2037 }
2038 } else {
2039 for (i = pi->lowest_valid; i <= pi->highest_valid; i++) {
2040 pi->graphics_level[i].GnbSlow = 1;
2041 pi->graphics_level[i].ForceNbPs1 = 0;
2042 pi->graphics_level[i].UpH = 0;
2043 }
2044
2045 if (pi->sys_info.nb_dpm_enable && pi->battery_state) {
2046 pi->graphics_level[pi->lowest_valid].UpH = 0x28;
2047 pi->graphics_level[pi->lowest_valid].GnbSlow = 0;
2048 if (pi->lowest_valid != pi->highest_valid)
2049 pi->graphics_level[pi->lowest_valid].ForceNbPs1 = 1;
2050 }
2051 }
2052 return 0;
2053}
2054
2055static int kv_calculate_dpm_settings(struct radeon_device *rdev)
2056{
2057 struct kv_power_info *pi = kv_get_pi(rdev);
2058 u32 i;
2059
2060 if (pi->lowest_valid > pi->highest_valid)
2061 return -EINVAL;
2062
2063 for (i = pi->lowest_valid; i <= pi->highest_valid; i++)
2064 pi->graphics_level[i].DisplayWatermark = (i == pi->highest_valid) ? 1 : 0;
2065
2066 return 0;
2067}
2068
2069static void kv_init_graphics_levels(struct radeon_device *rdev)
2070{
2071 struct kv_power_info *pi = kv_get_pi(rdev);
2072 u32 i;
2073 struct radeon_clock_voltage_dependency_table *table =
2074 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
2075
2076 if (table && table->count) {
2077 u32 vid_2bit;
2078
2079 pi->graphics_dpm_level_count = 0;
2080 for (i = 0; i < table->count; i++) {
2081 if (pi->high_voltage_t &&
2082 (pi->high_voltage_t <
2083 kv_convert_8bit_index_to_voltage(rdev, table->entries[i].v)))
2084 break;
2085
2086 kv_set_divider_value(rdev, i, table->entries[i].clk);
2087 vid_2bit = sumo_convert_vid7_to_vid2(rdev,
2088 &pi->sys_info.vid_mapping_table,
2089 table->entries[i].v);
2090 kv_set_vid(rdev, i, vid_2bit);
2091 kv_set_at(rdev, i, pi->at[i]);
2092 kv_dpm_power_level_enabled_for_throttle(rdev, i, true);
2093 pi->graphics_dpm_level_count++;
2094 }
2095 } else {
2096 struct sumo_sclk_voltage_mapping_table *table =
2097 &pi->sys_info.sclk_voltage_mapping_table;
2098
2099 pi->graphics_dpm_level_count = 0;
2100 for (i = 0; i < table->num_max_dpm_entries; i++) {
2101 if (pi->high_voltage_t &&
2102 pi->high_voltage_t <
2103 kv_convert_2bit_index_to_voltage(rdev, table->entries[i].vid_2bit))
2104 break;
2105
2106 kv_set_divider_value(rdev, i, table->entries[i].sclk_frequency);
2107 kv_set_vid(rdev, i, table->entries[i].vid_2bit);
2108 kv_set_at(rdev, i, pi->at[i]);
2109 kv_dpm_power_level_enabled_for_throttle(rdev, i, true);
2110 pi->graphics_dpm_level_count++;
2111 }
2112 }
2113
2114 for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++)
2115 kv_dpm_power_level_enable(rdev, i, false);
2116}
2117
2118static void kv_enable_new_levels(struct radeon_device *rdev)
2119{
2120 struct kv_power_info *pi = kv_get_pi(rdev);
2121 u32 i;
2122
2123 for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++) {
2124 if (i >= pi->lowest_valid && i <= pi->highest_valid)
2125 kv_dpm_power_level_enable(rdev, i, true);
2126 }
2127}
2128
2129static int kv_set_enabled_levels(struct radeon_device *rdev)
2130{
2131 struct kv_power_info *pi = kv_get_pi(rdev);
2132 u32 i, new_mask = 0;
2133
2134 for (i = pi->lowest_valid; i <= pi->highest_valid; i++)
2135 new_mask |= (1 << i);
2136
2137 return kv_send_msg_to_smc_with_parameter(rdev,
2138 PPSMC_MSG_SCLKDPM_SetEnabledMask,
2139 new_mask);
2140}
2141
2142static void kv_program_nbps_index_settings(struct radeon_device *rdev,
2143 struct radeon_ps *new_rps)
2144{
2145 struct kv_ps *new_ps = kv_get_ps(new_rps);
2146 struct kv_power_info *pi = kv_get_pi(rdev);
2147 u32 nbdpmconfig1;
2148
2149 if (rdev->family == CHIP_KABINI)
2150 return;
2151
2152 if (pi->sys_info.nb_dpm_enable) {
2153 nbdpmconfig1 = RREG32_SMC(NB_DPM_CONFIG_1);
2154 nbdpmconfig1 &= ~(Dpm0PgNbPsLo_MASK | Dpm0PgNbPsHi_MASK |
2155 DpmXNbPsLo_MASK | DpmXNbPsHi_MASK);
2156 nbdpmconfig1 |= (Dpm0PgNbPsLo(new_ps->dpm0_pg_nb_ps_lo) |
2157 Dpm0PgNbPsHi(new_ps->dpm0_pg_nb_ps_hi) |
2158 DpmXNbPsLo(new_ps->dpmx_nb_ps_lo) |
2159 DpmXNbPsHi(new_ps->dpmx_nb_ps_hi));
2160 WREG32_SMC(NB_DPM_CONFIG_1, nbdpmconfig1);
2161 }
2162}
2163
2164static int kv_set_thermal_temperature_range(struct radeon_device *rdev,
2165 int min_temp, int max_temp)
2166{
2167 int low_temp = 0 * 1000;
2168 int high_temp = 255 * 1000;
2169 u32 tmp;
2170
2171 if (low_temp < min_temp)
2172 low_temp = min_temp;
2173 if (high_temp > max_temp)
2174 high_temp = max_temp;
2175 if (high_temp < low_temp) {
2176 DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp);
2177 return -EINVAL;
2178 }
2179
2180 tmp = RREG32_SMC(CG_THERMAL_INT_CTRL);
2181 tmp &= ~(DIG_THERM_INTH_MASK | DIG_THERM_INTL_MASK);
2182 tmp |= (DIG_THERM_INTH(49 + (high_temp / 1000)) |
2183 DIG_THERM_INTL(49 + (low_temp / 1000)));
2184 WREG32_SMC(CG_THERMAL_INT_CTRL, tmp);
2185
2186 rdev->pm.dpm.thermal.min_temp = low_temp;
2187 rdev->pm.dpm.thermal.max_temp = high_temp;
2188
2189 return 0;
2190}
2191
2192union igp_info {
2193 struct _ATOM_INTEGRATED_SYSTEM_INFO info;
2194 struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2;
2195 struct _ATOM_INTEGRATED_SYSTEM_INFO_V5 info_5;
2196 struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 info_6;
2197 struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7 info_7;
2198 struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_8 info_8;
2199};
2200
2201static int kv_parse_sys_info_table(struct radeon_device *rdev)
2202{
2203 struct kv_power_info *pi = kv_get_pi(rdev);
2204 struct radeon_mode_info *mode_info = &rdev->mode_info;
2205 int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo);
2206 union igp_info *igp_info;
2207 u8 frev, crev;
2208 u16 data_offset;
2209 int i;
2210
2211 if (atom_parse_data_header(mode_info->atom_context, index, NULL,
2212 &frev, &crev, &data_offset)) {
2213 igp_info = (union igp_info *)(mode_info->atom_context->bios +
2214 data_offset);
2215
2216 if (crev != 8) {
2217 DRM_ERROR("Unsupported IGP table: %d %d\n", frev, crev);
2218 return -EINVAL;
2219 }
2220 pi->sys_info.bootup_sclk = le32_to_cpu(igp_info->info_8.ulBootUpEngineClock);
2221 pi->sys_info.bootup_uma_clk = le32_to_cpu(igp_info->info_8.ulBootUpUMAClock);
2222 pi->sys_info.bootup_nb_voltage_index =
2223 le16_to_cpu(igp_info->info_8.usBootUpNBVoltage);
2224 if (igp_info->info_8.ucHtcTmpLmt == 0)
2225 pi->sys_info.htc_tmp_lmt = 203;
2226 else
2227 pi->sys_info.htc_tmp_lmt = igp_info->info_8.ucHtcTmpLmt;
2228 if (igp_info->info_8.ucHtcHystLmt == 0)
2229 pi->sys_info.htc_hyst_lmt = 5;
2230 else
2231 pi->sys_info.htc_hyst_lmt = igp_info->info_8.ucHtcHystLmt;
2232 if (pi->sys_info.htc_tmp_lmt <= pi->sys_info.htc_hyst_lmt) {
2233 DRM_ERROR("The htcTmpLmt should be larger than htcHystLmt.\n");
2234 }
2235
2236 if (le32_to_cpu(igp_info->info_8.ulSystemConfig) & (1 << 3))
2237 pi->sys_info.nb_dpm_enable = true;
2238 else
2239 pi->sys_info.nb_dpm_enable = false;
2240
2241 for (i = 0; i < KV_NUM_NBPSTATES; i++) {
2242 pi->sys_info.nbp_memory_clock[i] =
2243 le32_to_cpu(igp_info->info_8.ulNbpStateMemclkFreq[i]);
2244 pi->sys_info.nbp_n_clock[i] =
2245 le32_to_cpu(igp_info->info_8.ulNbpStateNClkFreq[i]);
2246 }
2247 if (le32_to_cpu(igp_info->info_8.ulGPUCapInfo) &
2248 SYS_INFO_GPUCAPS__ENABEL_DFS_BYPASS)
2249 pi->caps_enable_dfs_bypass = true;
2250
2251 sumo_construct_sclk_voltage_mapping_table(rdev,
2252 &pi->sys_info.sclk_voltage_mapping_table,
2253 igp_info->info_8.sAvail_SCLK);
2254
2255 sumo_construct_vid_mapping_table(rdev,
2256 &pi->sys_info.vid_mapping_table,
2257 igp_info->info_8.sAvail_SCLK);
2258
2259 kv_construct_max_power_limits_table(rdev,
2260 &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac);
2261 }
2262 return 0;
2263}
2264
2265union power_info {
2266 struct _ATOM_POWERPLAY_INFO info;
2267 struct _ATOM_POWERPLAY_INFO_V2 info_2;
2268 struct _ATOM_POWERPLAY_INFO_V3 info_3;
2269 struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
2270 struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
2271 struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
2272};
2273
2274union pplib_clock_info {
2275 struct _ATOM_PPLIB_R600_CLOCK_INFO r600;
2276 struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780;
2277 struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen;
2278 struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo;
2279};
2280
2281union pplib_power_state {
2282 struct _ATOM_PPLIB_STATE v1;
2283 struct _ATOM_PPLIB_STATE_V2 v2;
2284};
2285
2286static void kv_patch_boot_state(struct radeon_device *rdev,
2287 struct kv_ps *ps)
2288{
2289 struct kv_power_info *pi = kv_get_pi(rdev);
2290
2291 ps->num_levels = 1;
2292 ps->levels[0] = pi->boot_pl;
2293}
2294
2295static void kv_parse_pplib_non_clock_info(struct radeon_device *rdev,
2296 struct radeon_ps *rps,
2297 struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info,
2298 u8 table_rev)
2299{
2300 struct kv_ps *ps = kv_get_ps(rps);
2301
2302 rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings);
2303 rps->class = le16_to_cpu(non_clock_info->usClassification);
2304 rps->class2 = le16_to_cpu(non_clock_info->usClassification2);
2305
2306 if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) {
2307 rps->vclk = le32_to_cpu(non_clock_info->ulVCLK);
2308 rps->dclk = le32_to_cpu(non_clock_info->ulDCLK);
2309 } else {
2310 rps->vclk = 0;
2311 rps->dclk = 0;
2312 }
2313
2314 if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) {
2315 rdev->pm.dpm.boot_ps = rps;
2316 kv_patch_boot_state(rdev, ps);
2317 }
2318 if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
2319 rdev->pm.dpm.uvd_ps = rps;
2320}
2321
2322static void kv_parse_pplib_clock_info(struct radeon_device *rdev,
2323 struct radeon_ps *rps, int index,
2324 union pplib_clock_info *clock_info)
2325{
2326 struct kv_power_info *pi = kv_get_pi(rdev);
2327 struct kv_ps *ps = kv_get_ps(rps);
2328 struct kv_pl *pl = &ps->levels[index];
2329 u32 sclk;
2330
2331 sclk = le16_to_cpu(clock_info->sumo.usEngineClockLow);
2332 sclk |= clock_info->sumo.ucEngineClockHigh << 16;
2333 pl->sclk = sclk;
2334 pl->vddc_index = clock_info->sumo.vddcIndex;
2335
2336 ps->num_levels = index + 1;
2337
2338 if (pi->caps_sclk_ds) {
2339 pl->ds_divider_index = 5;
2340 pl->ss_divider_index = 5;
2341 }
2342}
2343
2344static int kv_parse_power_table(struct radeon_device *rdev)
2345{
2346 struct radeon_mode_info *mode_info = &rdev->mode_info;
2347 struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
2348 union pplib_power_state *power_state;
2349 int i, j, k, non_clock_array_index, clock_array_index;
2350 union pplib_clock_info *clock_info;
2351 struct _StateArray *state_array;
2352 struct _ClockInfoArray *clock_info_array;
2353 struct _NonClockInfoArray *non_clock_info_array;
2354 union power_info *power_info;
2355 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
2356 u16 data_offset;
2357 u8 frev, crev;
2358 u8 *power_state_offset;
2359 struct kv_ps *ps;
2360
2361 if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
2362 &frev, &crev, &data_offset))
2363 return -EINVAL;
2364 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
2365
2366 state_array = (struct _StateArray *)
2367 (mode_info->atom_context->bios + data_offset +
2368 le16_to_cpu(power_info->pplib.usStateArrayOffset));
2369 clock_info_array = (struct _ClockInfoArray *)
2370 (mode_info->atom_context->bios + data_offset +
2371 le16_to_cpu(power_info->pplib.usClockInfoArrayOffset));
2372 non_clock_info_array = (struct _NonClockInfoArray *)
2373 (mode_info->atom_context->bios + data_offset +
2374 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
2375
2376 rdev->pm.dpm.ps = kzalloc(sizeof(struct radeon_ps) *
2377 state_array->ucNumEntries, GFP_KERNEL);
2378 if (!rdev->pm.dpm.ps)
2379 return -ENOMEM;
2380 power_state_offset = (u8 *)state_array->states;
2381 rdev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps);
2382 rdev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime);
2383 rdev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime);
2384 for (i = 0; i < state_array->ucNumEntries; i++) {
2385 power_state = (union pplib_power_state *)power_state_offset;
2386 non_clock_array_index = power_state->v2.nonClockInfoIndex;
2387 non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
2388 &non_clock_info_array->nonClockInfo[non_clock_array_index];
2389 if (!rdev->pm.power_state[i].clock_info)
2390 return -EINVAL;
2391 ps = kzalloc(sizeof(struct kv_ps), GFP_KERNEL);
2392 if (ps == NULL) {
2393 kfree(rdev->pm.dpm.ps);
2394 return -ENOMEM;
2395 }
2396 rdev->pm.dpm.ps[i].ps_priv = ps;
2397 k = 0;
2398 for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
2399 clock_array_index = power_state->v2.clockInfoIndex[j];
2400 if (clock_array_index >= clock_info_array->ucNumEntries)
2401 continue;
2402 if (k >= SUMO_MAX_HARDWARE_POWERLEVELS)
2403 break;
2404 clock_info = (union pplib_clock_info *)
2405 &clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize];
2406 kv_parse_pplib_clock_info(rdev,
2407 &rdev->pm.dpm.ps[i], k,
2408 clock_info);
2409 k++;
2410 }
2411 kv_parse_pplib_non_clock_info(rdev, &rdev->pm.dpm.ps[i],
2412 non_clock_info,
2413 non_clock_info_array->ucEntrySize);
2414 power_state_offset += 2 + power_state->v2.ucNumDPMLevels;
2415 }
2416 rdev->pm.dpm.num_ps = state_array->ucNumEntries;
2417 return 0;
2418}
2419
2420int kv_dpm_init(struct radeon_device *rdev)
2421{
2422 struct kv_power_info *pi;
2423 int ret, i;
2424
2425 pi = kzalloc(sizeof(struct kv_power_info), GFP_KERNEL);
2426 if (pi == NULL)
2427 return -ENOMEM;
2428 rdev->pm.dpm.priv = pi;
2429
2430 ret = r600_parse_extended_power_table(rdev);
2431 if (ret)
2432 return ret;
2433
2434 for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++)
2435 pi->at[i] = TRINITY_AT_DFLT;
2436
2437 pi->sram_end = SMC_RAM_END;
2438
2439 if (rdev->family == CHIP_KABINI)
2440 pi->high_voltage_t = 4001;
2441
2442 pi->enable_nb_dpm = true;
2443
2444 pi->caps_power_containment = true;
2445 pi->caps_cac = true;
2446 pi->enable_didt = false;
2447 if (pi->enable_didt) {
2448 pi->caps_sq_ramping = true;
2449 pi->caps_db_ramping = true;
2450 pi->caps_td_ramping = true;
2451 pi->caps_tcp_ramping = true;
2452 }
2453
2454 pi->caps_sclk_ds = true;
2455 pi->enable_auto_thermal_throttling = true;
2456 pi->disable_nb_ps3_in_battery = false;
2457 pi->bapm_enable = true;
2458 pi->voltage_drop_t = 0;
2459 pi->caps_sclk_throttle_low_notification = false;
2460 pi->caps_fps = false; /* true? */
2461 pi->caps_uvd_pg = false; /* XXX */
2462 pi->caps_uvd_dpm = true;
2463 pi->caps_vce_pg = false;
2464 pi->caps_samu_pg = false;
2465 pi->caps_acp_pg = false;
2466 pi->caps_stable_p_state = false;
2467
2468 ret = kv_parse_sys_info_table(rdev);
2469 if (ret)
2470 return ret;
2471
2472 kv_patch_voltage_values(rdev);
2473 kv_construct_boot_state(rdev);
2474
2475 ret = kv_parse_power_table(rdev);
2476 if (ret)
2477 return ret;
2478
2479 pi->enable_dpm = true;
2480
2481 return 0;
2482}
2483
2484void kv_dpm_print_power_state(struct radeon_device *rdev,
2485 struct radeon_ps *rps)
2486{
2487 int i;
2488 struct kv_ps *ps = kv_get_ps(rps);
2489
2490 r600_dpm_print_class_info(rps->class, rps->class2);
2491 r600_dpm_print_cap_info(rps->caps);
2492 printk("\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
2493 for (i = 0; i < ps->num_levels; i++) {
2494 struct kv_pl *pl = &ps->levels[i];
2495 printk("\t\tpower level %d sclk: %u vddc: %u\n",
2496 i, pl->sclk,
2497 kv_convert_8bit_index_to_voltage(rdev, pl->vddc_index));
2498 }
2499 r600_dpm_print_ps_status(rdev, rps);
2500}
2501
2502void kv_dpm_fini(struct radeon_device *rdev)
2503{
2504 int i;
2505
2506 for (i = 0; i < rdev->pm.dpm.num_ps; i++) {
2507 kfree(rdev->pm.dpm.ps[i].ps_priv);
2508 }
2509 kfree(rdev->pm.dpm.ps);
2510 kfree(rdev->pm.dpm.priv);
2511 r600_free_extended_power_table(rdev);
2512}
2513
2514void kv_dpm_display_configuration_changed(struct radeon_device *rdev)
2515{
2516
2517}
2518
2519u32 kv_dpm_get_sclk(struct radeon_device *rdev, bool low)
2520{
2521 struct kv_power_info *pi = kv_get_pi(rdev);
2522 struct kv_ps *requested_state = kv_get_ps(&pi->requested_rps);
2523
2524 if (low)
2525 return requested_state->levels[0].sclk;
2526 else
2527 return requested_state->levels[requested_state->num_levels - 1].sclk;
2528}
2529
2530u32 kv_dpm_get_mclk(struct radeon_device *rdev, bool low)
2531{
2532 struct kv_power_info *pi = kv_get_pi(rdev);
2533
2534 return pi->sys_info.bootup_uma_clk;
2535}
2536
This page took 0.115775 seconds and 5 git commands to generate.