drm/radeon/dpm: add smc fan control for SI (v2)
[deliverable/linux.git] / drivers / gpu / drm / radeon / ci_dpm.c
CommitLineData
cc8dbbb4
AD
1/*
2 * Copyright 2013 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
7e1858f9 24#include <linux/firmware.h>
cc8dbbb4
AD
25#include "drmP.h"
26#include "radeon.h"
01467a9b 27#include "radeon_asic.h"
7e1858f9 28#include "radeon_ucode.h"
cc8dbbb4
AD
29#include "cikd.h"
30#include "r600_dpm.h"
31#include "ci_dpm.h"
32#include "atom.h"
94b4adc5 33#include <linux/seq_file.h>
cc8dbbb4
AD
34
35#define MC_CG_ARB_FREQ_F0 0x0a
36#define MC_CG_ARB_FREQ_F1 0x0b
37#define MC_CG_ARB_FREQ_F2 0x0c
38#define MC_CG_ARB_FREQ_F3 0x0d
39
40#define SMC_RAM_END 0x40000
41
42#define VOLTAGE_SCALE 4
43#define VOLTAGE_VID_OFFSET_SCALE1 625
44#define VOLTAGE_VID_OFFSET_SCALE2 100
45
2d40038d
AD
46static const struct ci_pt_defaults defaults_hawaii_xt =
47{
48 1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0xB0000,
542b379b
AD
49 { 0x2E, 0x00, 0x00, 0x88, 0x00, 0x00, 0x72, 0x60, 0x51, 0xA7, 0x79, 0x6B, 0x90, 0xBD, 0x79 },
50 { 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 }
2d40038d
AD
51};
52
53static const struct ci_pt_defaults defaults_hawaii_pro =
54{
55 1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0x65062,
542b379b
AD
56 { 0x2E, 0x00, 0x00, 0x88, 0x00, 0x00, 0x72, 0x60, 0x51, 0xA7, 0x79, 0x6B, 0x90, 0xBD, 0x79 },
57 { 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 }
2d40038d
AD
58};
59
cc8dbbb4
AD
60static const struct ci_pt_defaults defaults_bonaire_xt =
61{
62 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000,
63 { 0x79, 0x253, 0x25D, 0xAE, 0x72, 0x80, 0x83, 0x86, 0x6F, 0xC8, 0xC9, 0xC9, 0x2F, 0x4D, 0x61 },
64 { 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 }
65};
66
67static const struct ci_pt_defaults defaults_bonaire_pro =
68{
69 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0x65062,
70 { 0x8C, 0x23F, 0x244, 0xA6, 0x83, 0x85, 0x86, 0x86, 0x83, 0xDB, 0xDB, 0xDA, 0x67, 0x60, 0x5F },
71 { 0x187, 0x193, 0x193, 0x1C7, 0x1D1, 0x1D1, 0x210, 0x219, 0x219, 0x266, 0x26C, 0x26C, 0x2C9, 0x2CB, 0x2CB }
72};
73
74static const struct ci_pt_defaults defaults_saturn_xt =
75{
76 1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x70000,
77 { 0x8C, 0x247, 0x249, 0xA6, 0x80, 0x81, 0x8B, 0x89, 0x86, 0xC9, 0xCA, 0xC9, 0x4D, 0x4D, 0x4D },
78 { 0x187, 0x187, 0x187, 0x1C7, 0x1C7, 0x1C7, 0x210, 0x210, 0x210, 0x266, 0x266, 0x266, 0x2C9, 0x2C9, 0x2C9 }
79};
80
81static const struct ci_pt_defaults defaults_saturn_pro =
82{
83 1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x30000,
84 { 0x96, 0x21D, 0x23B, 0xA1, 0x85, 0x87, 0x83, 0x84, 0x81, 0xE6, 0xE6, 0xE6, 0x71, 0x6A, 0x6A },
85 { 0x193, 0x19E, 0x19E, 0x1D2, 0x1DC, 0x1DC, 0x21A, 0x223, 0x223, 0x26E, 0x27E, 0x274, 0x2CF, 0x2D2, 0x2D2 }
86};
87
88static const struct ci_pt_config_reg didt_config_ci[] =
89{
90 { 0x10, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
91 { 0x10, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
92 { 0x10, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
93 { 0x10, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
94 { 0x11, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
95 { 0x11, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
96 { 0x11, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
97 { 0x11, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
98 { 0x12, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
99 { 0x12, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
100 { 0x12, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
101 { 0x12, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
102 { 0x2, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
103 { 0x2, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
104 { 0x2, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
105 { 0x1, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
106 { 0x1, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
107 { 0x0, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
108 { 0x30, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
109 { 0x30, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
110 { 0x30, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
111 { 0x30, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
112 { 0x31, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
113 { 0x31, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
114 { 0x31, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
115 { 0x31, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
116 { 0x32, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
117 { 0x32, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
118 { 0x32, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
119 { 0x32, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
120 { 0x22, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
121 { 0x22, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
122 { 0x22, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
123 { 0x21, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
124 { 0x21, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
125 { 0x20, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
126 { 0x50, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
127 { 0x50, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
128 { 0x50, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
129 { 0x50, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
130 { 0x51, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
131 { 0x51, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
132 { 0x51, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
133 { 0x51, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
134 { 0x52, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
135 { 0x52, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
136 { 0x52, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
137 { 0x52, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
138 { 0x42, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
139 { 0x42, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
140 { 0x42, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
141 { 0x41, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
142 { 0x41, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
143 { 0x40, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
144 { 0x70, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
145 { 0x70, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
146 { 0x70, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
147 { 0x70, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
148 { 0x71, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
149 { 0x71, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
150 { 0x71, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
151 { 0x71, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
152 { 0x72, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
153 { 0x72, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
154 { 0x72, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
155 { 0x72, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
156 { 0x62, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
157 { 0x62, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
158 { 0x62, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
159 { 0x61, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
160 { 0x61, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
161 { 0x60, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
162 { 0xFFFFFFFF }
163};
164
165extern u8 rv770_get_memory_module_index(struct radeon_device *rdev);
166extern int ni_copy_and_switch_arb_sets(struct radeon_device *rdev,
167 u32 arb_freq_src, u32 arb_freq_dest);
168extern u8 si_get_ddr3_mclk_frequency_ratio(u32 memory_clock);
169extern u8 si_get_mclk_frequency_ratio(u32 memory_clock, bool strobe_mode);
170extern void si_trim_voltage_table_to_fit_state_table(struct radeon_device *rdev,
171 u32 max_voltage_steps,
172 struct atom_voltage_table *voltage_table);
173extern void cik_enter_rlc_safe_mode(struct radeon_device *rdev);
174extern void cik_exit_rlc_safe_mode(struct radeon_device *rdev);
6c7bccea 175extern int ci_mc_load_microcode(struct radeon_device *rdev);
a1d6f97c
AD
176extern void cik_update_cg(struct radeon_device *rdev,
177 u32 block, bool enable);
cc8dbbb4
AD
178
179static int ci_get_std_voltage_value_sidd(struct radeon_device *rdev,
180 struct atom_voltage_table_entry *voltage_table,
181 u16 *std_voltage_hi_sidd, u16 *std_voltage_lo_sidd);
182static int ci_set_power_limit(struct radeon_device *rdev, u32 n);
183static int ci_set_overdrive_target_tdp(struct radeon_device *rdev,
184 u32 target_tdp);
185static int ci_update_uvd_dpm(struct radeon_device *rdev, bool gate);
186
187static struct ci_power_info *ci_get_pi(struct radeon_device *rdev)
188{
189 struct ci_power_info *pi = rdev->pm.dpm.priv;
190
191 return pi;
192}
193
194static struct ci_ps *ci_get_ps(struct radeon_ps *rps)
195{
196 struct ci_ps *ps = rps->ps_priv;
197
198 return ps;
199}
200
201static void ci_initialize_powertune_defaults(struct radeon_device *rdev)
202{
203 struct ci_power_info *pi = ci_get_pi(rdev);
204
205 switch (rdev->pdev->device) {
6abc6d5c 206 case 0x6649:
2d40038d 207 case 0x6650:
6abc6d5c 208 case 0x6651:
2d40038d
AD
209 case 0x6658:
210 case 0x665C:
6abc6d5c 211 case 0x665D:
2d40038d 212 default:
cc8dbbb4
AD
213 pi->powertune_defaults = &defaults_bonaire_xt;
214 break;
2d40038d 215 case 0x6640:
2d40038d 216 case 0x6641:
6abc6d5c
AD
217 case 0x6646:
218 case 0x6647:
219 pi->powertune_defaults = &defaults_saturn_xt;
cc8dbbb4 220 break;
2d40038d
AD
221 case 0x67B8:
222 case 0x67B0:
6abc6d5c
AD
223 pi->powertune_defaults = &defaults_hawaii_xt;
224 break;
225 case 0x67BA:
226 case 0x67B1:
227 pi->powertune_defaults = &defaults_hawaii_pro;
228 break;
2d40038d
AD
229 case 0x67A0:
230 case 0x67A1:
231 case 0x67A2:
232 case 0x67A8:
233 case 0x67A9:
234 case 0x67AA:
235 case 0x67B9:
236 case 0x67BE:
6abc6d5c 237 pi->powertune_defaults = &defaults_bonaire_xt;
2d40038d 238 break;
cc8dbbb4
AD
239 }
240
241 pi->dte_tj_offset = 0;
242
243 pi->caps_power_containment = true;
244 pi->caps_cac = false;
245 pi->caps_sq_ramping = false;
246 pi->caps_db_ramping = false;
247 pi->caps_td_ramping = false;
248 pi->caps_tcp_ramping = false;
249
250 if (pi->caps_power_containment) {
251 pi->caps_cac = true;
542b379b
AD
252 if (rdev->family == CHIP_HAWAII)
253 pi->enable_bapm_feature = false;
254 else
255 pi->enable_bapm_feature = true;
cc8dbbb4
AD
256 pi->enable_tdc_limit_feature = true;
257 pi->enable_pkg_pwr_tracking_feature = true;
258 }
259}
260
261static u8 ci_convert_to_vid(u16 vddc)
262{
263 return (6200 - (vddc * VOLTAGE_SCALE)) / 25;
264}
265
266static int ci_populate_bapm_vddc_vid_sidd(struct radeon_device *rdev)
267{
268 struct ci_power_info *pi = ci_get_pi(rdev);
269 u8 *hi_vid = pi->smc_powertune_table.BapmVddCVidHiSidd;
270 u8 *lo_vid = pi->smc_powertune_table.BapmVddCVidLoSidd;
271 u8 *hi2_vid = pi->smc_powertune_table.BapmVddCVidHiSidd2;
272 u32 i;
273
274 if (rdev->pm.dpm.dyn_state.cac_leakage_table.entries == NULL)
275 return -EINVAL;
276 if (rdev->pm.dpm.dyn_state.cac_leakage_table.count > 8)
277 return -EINVAL;
278 if (rdev->pm.dpm.dyn_state.cac_leakage_table.count !=
279 rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count)
280 return -EINVAL;
281
282 for (i = 0; i < rdev->pm.dpm.dyn_state.cac_leakage_table.count; i++) {
283 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
284 lo_vid[i] = ci_convert_to_vid(rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1);
285 hi_vid[i] = ci_convert_to_vid(rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2);
286 hi2_vid[i] = ci_convert_to_vid(rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3);
287 } else {
288 lo_vid[i] = ci_convert_to_vid(rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc);
289 hi_vid[i] = ci_convert_to_vid((u16)rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage);
290 }
291 }
292 return 0;
293}
294
295static int ci_populate_vddc_vid(struct radeon_device *rdev)
296{
297 struct ci_power_info *pi = ci_get_pi(rdev);
298 u8 *vid = pi->smc_powertune_table.VddCVid;
299 u32 i;
300
301 if (pi->vddc_voltage_table.count > 8)
302 return -EINVAL;
303
304 for (i = 0; i < pi->vddc_voltage_table.count; i++)
305 vid[i] = ci_convert_to_vid(pi->vddc_voltage_table.entries[i].value);
306
307 return 0;
308}
309
310static int ci_populate_svi_load_line(struct radeon_device *rdev)
311{
312 struct ci_power_info *pi = ci_get_pi(rdev);
313 const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
314
315 pi->smc_powertune_table.SviLoadLineEn = pt_defaults->svi_load_line_en;
316 pi->smc_powertune_table.SviLoadLineVddC = pt_defaults->svi_load_line_vddc;
317 pi->smc_powertune_table.SviLoadLineTrimVddC = 3;
318 pi->smc_powertune_table.SviLoadLineOffsetVddC = 0;
319
320 return 0;
321}
322
323static int ci_populate_tdc_limit(struct radeon_device *rdev)
324{
325 struct ci_power_info *pi = ci_get_pi(rdev);
326 const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
327 u16 tdc_limit;
328
329 tdc_limit = rdev->pm.dpm.dyn_state.cac_tdp_table->tdc * 256;
330 pi->smc_powertune_table.TDC_VDDC_PkgLimit = cpu_to_be16(tdc_limit);
331 pi->smc_powertune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
332 pt_defaults->tdc_vddc_throttle_release_limit_perc;
333 pi->smc_powertune_table.TDC_MAWt = pt_defaults->tdc_mawt;
334
335 return 0;
336}
337
338static int ci_populate_dw8(struct radeon_device *rdev)
339{
340 struct ci_power_info *pi = ci_get_pi(rdev);
341 const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
342 int ret;
343
344 ret = ci_read_smc_sram_dword(rdev,
345 SMU7_FIRMWARE_HEADER_LOCATION +
346 offsetof(SMU7_Firmware_Header, PmFuseTable) +
347 offsetof(SMU7_Discrete_PmFuses, TdcWaterfallCtl),
348 (u32 *)&pi->smc_powertune_table.TdcWaterfallCtl,
349 pi->sram_end);
350 if (ret)
351 return -EINVAL;
352 else
353 pi->smc_powertune_table.TdcWaterfallCtl = pt_defaults->tdc_waterfall_ctl;
354
355 return 0;
356}
357
358static int ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(struct radeon_device *rdev)
359{
360 struct ci_power_info *pi = ci_get_pi(rdev);
361 u8 *hi_vid = pi->smc_powertune_table.BapmVddCVidHiSidd;
362 u8 *lo_vid = pi->smc_powertune_table.BapmVddCVidLoSidd;
363 int i, min, max;
364
365 min = max = hi_vid[0];
366 for (i = 0; i < 8; i++) {
367 if (0 != hi_vid[i]) {
368 if (min > hi_vid[i])
369 min = hi_vid[i];
370 if (max < hi_vid[i])
371 max = hi_vid[i];
372 }
373
374 if (0 != lo_vid[i]) {
375 if (min > lo_vid[i])
376 min = lo_vid[i];
377 if (max < lo_vid[i])
378 max = lo_vid[i];
379 }
380 }
381
382 if ((min == 0) || (max == 0))
383 return -EINVAL;
384 pi->smc_powertune_table.GnbLPMLMaxVid = (u8)max;
385 pi->smc_powertune_table.GnbLPMLMinVid = (u8)min;
386
387 return 0;
388}
389
390static int ci_populate_bapm_vddc_base_leakage_sidd(struct radeon_device *rdev)
391{
392 struct ci_power_info *pi = ci_get_pi(rdev);
393 u16 hi_sidd = pi->smc_powertune_table.BapmVddCBaseLeakageHiSidd;
394 u16 lo_sidd = pi->smc_powertune_table.BapmVddCBaseLeakageLoSidd;
395 struct radeon_cac_tdp_table *cac_tdp_table =
396 rdev->pm.dpm.dyn_state.cac_tdp_table;
397
398 hi_sidd = cac_tdp_table->high_cac_leakage / 100 * 256;
399 lo_sidd = cac_tdp_table->low_cac_leakage / 100 * 256;
400
401 pi->smc_powertune_table.BapmVddCBaseLeakageHiSidd = cpu_to_be16(hi_sidd);
402 pi->smc_powertune_table.BapmVddCBaseLeakageLoSidd = cpu_to_be16(lo_sidd);
403
404 return 0;
405}
406
407static int ci_populate_bapm_parameters_in_dpm_table(struct radeon_device *rdev)
408{
409 struct ci_power_info *pi = ci_get_pi(rdev);
410 const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
411 SMU7_Discrete_DpmTable *dpm_table = &pi->smc_state_table;
412 struct radeon_cac_tdp_table *cac_tdp_table =
413 rdev->pm.dpm.dyn_state.cac_tdp_table;
414 struct radeon_ppm_table *ppm = rdev->pm.dpm.dyn_state.ppm_table;
415 int i, j, k;
416 const u16 *def1;
417 const u16 *def2;
418
419 dpm_table->DefaultTdp = cac_tdp_table->tdp * 256;
420 dpm_table->TargetTdp = cac_tdp_table->configurable_tdp * 256;
421
422 dpm_table->DTETjOffset = (u8)pi->dte_tj_offset;
423 dpm_table->GpuTjMax =
424 (u8)(pi->thermal_temp_setting.temperature_high / 1000);
425 dpm_table->GpuTjHyst = 8;
426
427 dpm_table->DTEAmbientTempBase = pt_defaults->dte_ambient_temp_base;
428
429 if (ppm) {
430 dpm_table->PPM_PkgPwrLimit = cpu_to_be16((u16)ppm->dgpu_tdp * 256 / 1000);
431 dpm_table->PPM_TemperatureLimit = cpu_to_be16((u16)ppm->tj_max * 256);
432 } else {
433 dpm_table->PPM_PkgPwrLimit = cpu_to_be16(0);
434 dpm_table->PPM_TemperatureLimit = cpu_to_be16(0);
435 }
436
437 dpm_table->BAPM_TEMP_GRADIENT = cpu_to_be32(pt_defaults->bapm_temp_gradient);
438 def1 = pt_defaults->bapmti_r;
439 def2 = pt_defaults->bapmti_rc;
440
441 for (i = 0; i < SMU7_DTE_ITERATIONS; i++) {
442 for (j = 0; j < SMU7_DTE_SOURCES; j++) {
443 for (k = 0; k < SMU7_DTE_SINKS; k++) {
444 dpm_table->BAPMTI_R[i][j][k] = cpu_to_be16(*def1);
445 dpm_table->BAPMTI_RC[i][j][k] = cpu_to_be16(*def2);
446 def1++;
447 def2++;
448 }
449 }
450 }
451
452 return 0;
453}
454
455static int ci_populate_pm_base(struct radeon_device *rdev)
456{
457 struct ci_power_info *pi = ci_get_pi(rdev);
458 u32 pm_fuse_table_offset;
459 int ret;
460
461 if (pi->caps_power_containment) {
462 ret = ci_read_smc_sram_dword(rdev,
463 SMU7_FIRMWARE_HEADER_LOCATION +
464 offsetof(SMU7_Firmware_Header, PmFuseTable),
465 &pm_fuse_table_offset, pi->sram_end);
466 if (ret)
467 return ret;
468 ret = ci_populate_bapm_vddc_vid_sidd(rdev);
469 if (ret)
470 return ret;
471 ret = ci_populate_vddc_vid(rdev);
472 if (ret)
473 return ret;
474 ret = ci_populate_svi_load_line(rdev);
475 if (ret)
476 return ret;
477 ret = ci_populate_tdc_limit(rdev);
478 if (ret)
479 return ret;
480 ret = ci_populate_dw8(rdev);
481 if (ret)
482 return ret;
483 ret = ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(rdev);
484 if (ret)
485 return ret;
486 ret = ci_populate_bapm_vddc_base_leakage_sidd(rdev);
487 if (ret)
488 return ret;
489 ret = ci_copy_bytes_to_smc(rdev, pm_fuse_table_offset,
490 (u8 *)&pi->smc_powertune_table,
491 sizeof(SMU7_Discrete_PmFuses), pi->sram_end);
492 if (ret)
493 return ret;
494 }
495
496 return 0;
497}
498
499static void ci_do_enable_didt(struct radeon_device *rdev, const bool enable)
500{
501 struct ci_power_info *pi = ci_get_pi(rdev);
502 u32 data;
503
504 if (pi->caps_sq_ramping) {
505 data = RREG32_DIDT(DIDT_SQ_CTRL0);
506 if (enable)
507 data |= DIDT_CTRL_EN;
508 else
509 data &= ~DIDT_CTRL_EN;
510 WREG32_DIDT(DIDT_SQ_CTRL0, data);
511 }
512
513 if (pi->caps_db_ramping) {
514 data = RREG32_DIDT(DIDT_DB_CTRL0);
515 if (enable)
516 data |= DIDT_CTRL_EN;
517 else
518 data &= ~DIDT_CTRL_EN;
519 WREG32_DIDT(DIDT_DB_CTRL0, data);
520 }
521
522 if (pi->caps_td_ramping) {
523 data = RREG32_DIDT(DIDT_TD_CTRL0);
524 if (enable)
525 data |= DIDT_CTRL_EN;
526 else
527 data &= ~DIDT_CTRL_EN;
528 WREG32_DIDT(DIDT_TD_CTRL0, data);
529 }
530
531 if (pi->caps_tcp_ramping) {
532 data = RREG32_DIDT(DIDT_TCP_CTRL0);
533 if (enable)
534 data |= DIDT_CTRL_EN;
535 else
536 data &= ~DIDT_CTRL_EN;
537 WREG32_DIDT(DIDT_TCP_CTRL0, data);
538 }
539}
540
541static int ci_program_pt_config_registers(struct radeon_device *rdev,
542 const struct ci_pt_config_reg *cac_config_regs)
543{
544 const struct ci_pt_config_reg *config_regs = cac_config_regs;
545 u32 data;
546 u32 cache = 0;
547
548 if (config_regs == NULL)
549 return -EINVAL;
550
551 while (config_regs->offset != 0xFFFFFFFF) {
552 if (config_regs->type == CISLANDS_CONFIGREG_CACHE) {
553 cache |= ((config_regs->value << config_regs->shift) & config_regs->mask);
554 } else {
555 switch (config_regs->type) {
556 case CISLANDS_CONFIGREG_SMC_IND:
557 data = RREG32_SMC(config_regs->offset);
558 break;
559 case CISLANDS_CONFIGREG_DIDT_IND:
560 data = RREG32_DIDT(config_regs->offset);
561 break;
562 default:
563 data = RREG32(config_regs->offset << 2);
564 break;
565 }
566
567 data &= ~config_regs->mask;
568 data |= ((config_regs->value << config_regs->shift) & config_regs->mask);
569 data |= cache;
570
571 switch (config_regs->type) {
572 case CISLANDS_CONFIGREG_SMC_IND:
573 WREG32_SMC(config_regs->offset, data);
574 break;
575 case CISLANDS_CONFIGREG_DIDT_IND:
576 WREG32_DIDT(config_regs->offset, data);
577 break;
578 default:
579 WREG32(config_regs->offset << 2, data);
580 break;
581 }
582 cache = 0;
583 }
584 config_regs++;
585 }
586 return 0;
587}
588
589static int ci_enable_didt(struct radeon_device *rdev, bool enable)
590{
591 struct ci_power_info *pi = ci_get_pi(rdev);
592 int ret;
593
594 if (pi->caps_sq_ramping || pi->caps_db_ramping ||
595 pi->caps_td_ramping || pi->caps_tcp_ramping) {
596 cik_enter_rlc_safe_mode(rdev);
597
598 if (enable) {
599 ret = ci_program_pt_config_registers(rdev, didt_config_ci);
600 if (ret) {
601 cik_exit_rlc_safe_mode(rdev);
602 return ret;
603 }
604 }
605
606 ci_do_enable_didt(rdev, enable);
607
608 cik_exit_rlc_safe_mode(rdev);
609 }
610
611 return 0;
612}
613
614static int ci_enable_power_containment(struct radeon_device *rdev, bool enable)
615{
616 struct ci_power_info *pi = ci_get_pi(rdev);
617 PPSMC_Result smc_result;
618 int ret = 0;
619
620 if (enable) {
621 pi->power_containment_features = 0;
622 if (pi->caps_power_containment) {
623 if (pi->enable_bapm_feature) {
624 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableDTE);
625 if (smc_result != PPSMC_Result_OK)
626 ret = -EINVAL;
627 else
628 pi->power_containment_features |= POWERCONTAINMENT_FEATURE_BAPM;
629 }
630
631 if (pi->enable_tdc_limit_feature) {
632 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_TDCLimitEnable);
633 if (smc_result != PPSMC_Result_OK)
634 ret = -EINVAL;
635 else
636 pi->power_containment_features |= POWERCONTAINMENT_FEATURE_TDCLimit;
637 }
638
639 if (pi->enable_pkg_pwr_tracking_feature) {
640 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_PkgPwrLimitEnable);
641 if (smc_result != PPSMC_Result_OK) {
642 ret = -EINVAL;
643 } else {
644 struct radeon_cac_tdp_table *cac_tdp_table =
645 rdev->pm.dpm.dyn_state.cac_tdp_table;
646 u32 default_pwr_limit =
647 (u32)(cac_tdp_table->maximum_power_delivery_limit * 256);
648
649 pi->power_containment_features |= POWERCONTAINMENT_FEATURE_PkgPwrLimit;
650
651 ci_set_power_limit(rdev, default_pwr_limit);
652 }
653 }
654 }
655 } else {
656 if (pi->caps_power_containment && pi->power_containment_features) {
657 if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_TDCLimit)
658 ci_send_msg_to_smc(rdev, PPSMC_MSG_TDCLimitDisable);
659
660 if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_BAPM)
661 ci_send_msg_to_smc(rdev, PPSMC_MSG_DisableDTE);
662
663 if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_PkgPwrLimit)
664 ci_send_msg_to_smc(rdev, PPSMC_MSG_PkgPwrLimitDisable);
665 pi->power_containment_features = 0;
666 }
667 }
668
669 return ret;
670}
671
672static int ci_enable_smc_cac(struct radeon_device *rdev, bool enable)
673{
674 struct ci_power_info *pi = ci_get_pi(rdev);
675 PPSMC_Result smc_result;
676 int ret = 0;
677
678 if (pi->caps_cac) {
679 if (enable) {
680 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableCac);
681 if (smc_result != PPSMC_Result_OK) {
682 ret = -EINVAL;
683 pi->cac_enabled = false;
684 } else {
685 pi->cac_enabled = true;
686 }
687 } else if (pi->cac_enabled) {
688 ci_send_msg_to_smc(rdev, PPSMC_MSG_DisableCac);
689 pi->cac_enabled = false;
690 }
691 }
692
693 return ret;
694}
695
696static int ci_power_control_set_level(struct radeon_device *rdev)
697{
698 struct ci_power_info *pi = ci_get_pi(rdev);
699 struct radeon_cac_tdp_table *cac_tdp_table =
700 rdev->pm.dpm.dyn_state.cac_tdp_table;
701 s32 adjust_percent;
702 s32 target_tdp;
703 int ret = 0;
704 bool adjust_polarity = false; /* ??? */
705
542b379b 706 if (pi->caps_power_containment) {
cc8dbbb4
AD
707 adjust_percent = adjust_polarity ?
708 rdev->pm.dpm.tdp_adjustment : (-1 * rdev->pm.dpm.tdp_adjustment);
709 target_tdp = ((100 + adjust_percent) *
710 (s32)cac_tdp_table->configurable_tdp) / 100;
cc8dbbb4
AD
711
712 ret = ci_set_overdrive_target_tdp(rdev, (u32)target_tdp);
713 }
714
715 return ret;
716}
717
942bdf7f 718void ci_dpm_powergate_uvd(struct radeon_device *rdev, bool gate)
cc8dbbb4 719{
47acb1ff
AD
720 struct ci_power_info *pi = ci_get_pi(rdev);
721
722 if (pi->uvd_power_gated == gate)
723 return;
724
725 pi->uvd_power_gated = gate;
726
cc8dbbb4
AD
727 ci_update_uvd_dpm(rdev, gate);
728}
729
5496131e
AD
730bool ci_dpm_vblank_too_short(struct radeon_device *rdev)
731{
732 struct ci_power_info *pi = ci_get_pi(rdev);
733 u32 vblank_time = r600_dpm_get_vblank_time(rdev);
734 u32 switch_limit = pi->mem_gddr5 ? 450 : 300;
735
736 if (vblank_time < switch_limit)
737 return true;
738 else
739 return false;
740
741}
742
cc8dbbb4
AD
743static void ci_apply_state_adjust_rules(struct radeon_device *rdev,
744 struct radeon_ps *rps)
745{
746 struct ci_ps *ps = ci_get_ps(rps);
747 struct ci_power_info *pi = ci_get_pi(rdev);
748 struct radeon_clock_and_voltage_limits *max_limits;
749 bool disable_mclk_switching;
750 u32 sclk, mclk;
751 int i;
752
8cd36682
AD
753 if (rps->vce_active) {
754 rps->evclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].evclk;
755 rps->ecclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].ecclk;
756 } else {
757 rps->evclk = 0;
758 rps->ecclk = 0;
759 }
760
5496131e
AD
761 if ((rdev->pm.dpm.new_active_crtc_count > 1) ||
762 ci_dpm_vblank_too_short(rdev))
cc8dbbb4
AD
763 disable_mclk_switching = true;
764 else
765 disable_mclk_switching = false;
766
767 if ((rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY)
768 pi->battery_state = true;
769 else
770 pi->battery_state = false;
771
772 if (rdev->pm.dpm.ac_power)
773 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
774 else
775 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
776
777 if (rdev->pm.dpm.ac_power == false) {
778 for (i = 0; i < ps->performance_level_count; i++) {
779 if (ps->performance_levels[i].mclk > max_limits->mclk)
780 ps->performance_levels[i].mclk = max_limits->mclk;
781 if (ps->performance_levels[i].sclk > max_limits->sclk)
782 ps->performance_levels[i].sclk = max_limits->sclk;
783 }
784 }
785
786 /* XXX validate the min clocks required for display */
787
788 if (disable_mclk_switching) {
789 mclk = ps->performance_levels[ps->performance_level_count - 1].mclk;
790 sclk = ps->performance_levels[0].sclk;
791 } else {
792 mclk = ps->performance_levels[0].mclk;
793 sclk = ps->performance_levels[0].sclk;
794 }
795
8cd36682
AD
796 if (rps->vce_active) {
797 if (sclk < rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].sclk)
798 sclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].sclk;
799 if (mclk < rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].mclk)
800 mclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].mclk;
801 }
802
cc8dbbb4
AD
803 ps->performance_levels[0].sclk = sclk;
804 ps->performance_levels[0].mclk = mclk;
805
806 if (ps->performance_levels[1].sclk < ps->performance_levels[0].sclk)
807 ps->performance_levels[1].sclk = ps->performance_levels[0].sclk;
808
809 if (disable_mclk_switching) {
810 if (ps->performance_levels[0].mclk < ps->performance_levels[1].mclk)
811 ps->performance_levels[0].mclk = ps->performance_levels[1].mclk;
812 } else {
813 if (ps->performance_levels[1].mclk < ps->performance_levels[0].mclk)
814 ps->performance_levels[1].mclk = ps->performance_levels[0].mclk;
815 }
816}
817
1955f107 818static int ci_thermal_set_temperature_range(struct radeon_device *rdev,
cc8dbbb4
AD
819 int min_temp, int max_temp)
820{
821 int low_temp = 0 * 1000;
822 int high_temp = 255 * 1000;
823 u32 tmp;
824
825 if (low_temp < min_temp)
826 low_temp = min_temp;
827 if (high_temp > max_temp)
828 high_temp = max_temp;
829 if (high_temp < low_temp) {
830 DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp);
831 return -EINVAL;
832 }
833
834 tmp = RREG32_SMC(CG_THERMAL_INT);
835 tmp &= ~(CI_DIG_THERM_INTH_MASK | CI_DIG_THERM_INTL_MASK);
836 tmp |= CI_DIG_THERM_INTH(high_temp / 1000) |
837 CI_DIG_THERM_INTL(low_temp / 1000);
838 WREG32_SMC(CG_THERMAL_INT, tmp);
839
840#if 0
841 /* XXX: need to figure out how to handle this properly */
842 tmp = RREG32_SMC(CG_THERMAL_CTRL);
843 tmp &= DIG_THERM_DPM_MASK;
844 tmp |= DIG_THERM_DPM(high_temp / 1000);
845 WREG32_SMC(CG_THERMAL_CTRL, tmp);
846#endif
847
6bce8d97
OC
848 rdev->pm.dpm.thermal.min_temp = low_temp;
849 rdev->pm.dpm.thermal.max_temp = high_temp;
850
cc8dbbb4
AD
851 return 0;
852}
853
1955f107
AD
854static int ci_thermal_enable_alert(struct radeon_device *rdev,
855 bool enable)
856{
857 u32 thermal_int = RREG32_SMC(CG_THERMAL_INT);
858 PPSMC_Result result;
859
860 if (enable) {
861 thermal_int &= ~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW);
862 rdev->irq.dpm_thermal = false;
863 result = ci_send_msg_to_smc(rdev, PPSMC_MSG_Thermal_Cntl_Enable);
864 if (result != PPSMC_Result_OK) {
865 DRM_DEBUG_KMS("Could not enable thermal interrupts.\n");
866 return -EINVAL;
867 }
868 } else {
869 thermal_int |= THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW;
870 rdev->irq.dpm_thermal = true;
871 result = ci_send_msg_to_smc(rdev, PPSMC_MSG_Thermal_Cntl_Disable);
872 if (result != PPSMC_Result_OK) {
873 DRM_DEBUG_KMS("Could not disable thermal interrupts.\n");
874 return -EINVAL;
875 }
876 }
877
878 WREG32_SMC(CG_THERMAL_INT, thermal_int);
879
880 return 0;
881}
882
cc8dbbb4
AD
883#if 0
884static int ci_read_smc_soft_register(struct radeon_device *rdev,
885 u16 reg_offset, u32 *value)
886{
887 struct ci_power_info *pi = ci_get_pi(rdev);
888
889 return ci_read_smc_sram_dword(rdev,
890 pi->soft_regs_start + reg_offset,
891 value, pi->sram_end);
892}
893#endif
894
895static int ci_write_smc_soft_register(struct radeon_device *rdev,
896 u16 reg_offset, u32 value)
897{
898 struct ci_power_info *pi = ci_get_pi(rdev);
899
900 return ci_write_smc_sram_dword(rdev,
901 pi->soft_regs_start + reg_offset,
902 value, pi->sram_end);
903}
904
905static void ci_init_fps_limits(struct radeon_device *rdev)
906{
907 struct ci_power_info *pi = ci_get_pi(rdev);
908 SMU7_Discrete_DpmTable *table = &pi->smc_state_table;
909
910 if (pi->caps_fps) {
911 u16 tmp;
912
913 tmp = 45;
914 table->FpsHighT = cpu_to_be16(tmp);
915
916 tmp = 30;
917 table->FpsLowT = cpu_to_be16(tmp);
918 }
919}
920
921static int ci_update_sclk_t(struct radeon_device *rdev)
922{
923 struct ci_power_info *pi = ci_get_pi(rdev);
924 int ret = 0;
925 u32 low_sclk_interrupt_t = 0;
926
927 if (pi->caps_sclk_throttle_low_notification) {
928 low_sclk_interrupt_t = cpu_to_be32(pi->low_sclk_interrupt_t);
929
930 ret = ci_copy_bytes_to_smc(rdev,
931 pi->dpm_table_start +
932 offsetof(SMU7_Discrete_DpmTable, LowSclkInterruptT),
933 (u8 *)&low_sclk_interrupt_t,
934 sizeof(u32), pi->sram_end);
935
936 }
937
938 return ret;
939}
940
941static void ci_get_leakage_voltages(struct radeon_device *rdev)
942{
943 struct ci_power_info *pi = ci_get_pi(rdev);
944 u16 leakage_id, virtual_voltage_id;
945 u16 vddc, vddci;
946 int i;
947
948 pi->vddc_leakage.count = 0;
949 pi->vddci_leakage.count = 0;
950
6b57f20c
AD
951 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
952 for (i = 0; i < CISLANDS_MAX_LEAKAGE_COUNT; i++) {
953 virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
954 if (radeon_atom_get_voltage_evv(rdev, virtual_voltage_id, &vddc) != 0)
955 continue;
956 if (vddc != 0 && vddc != virtual_voltage_id) {
957 pi->vddc_leakage.actual_voltage[pi->vddc_leakage.count] = vddc;
958 pi->vddc_leakage.leakage_id[pi->vddc_leakage.count] = virtual_voltage_id;
959 pi->vddc_leakage.count++;
960 }
961 }
962 } else if (radeon_atom_get_leakage_id_from_vbios(rdev, &leakage_id) == 0) {
cc8dbbb4
AD
963 for (i = 0; i < CISLANDS_MAX_LEAKAGE_COUNT; i++) {
964 virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
965 if (radeon_atom_get_leakage_vddc_based_on_leakage_params(rdev, &vddc, &vddci,
966 virtual_voltage_id,
967 leakage_id) == 0) {
968 if (vddc != 0 && vddc != virtual_voltage_id) {
969 pi->vddc_leakage.actual_voltage[pi->vddc_leakage.count] = vddc;
970 pi->vddc_leakage.leakage_id[pi->vddc_leakage.count] = virtual_voltage_id;
971 pi->vddc_leakage.count++;
972 }
973 if (vddci != 0 && vddci != virtual_voltage_id) {
974 pi->vddci_leakage.actual_voltage[pi->vddci_leakage.count] = vddci;
975 pi->vddci_leakage.leakage_id[pi->vddci_leakage.count] = virtual_voltage_id;
976 pi->vddci_leakage.count++;
977 }
978 }
979 }
980 }
981}
982
983static void ci_set_dpm_event_sources(struct radeon_device *rdev, u32 sources)
984{
985 struct ci_power_info *pi = ci_get_pi(rdev);
986 bool want_thermal_protection;
987 enum radeon_dpm_event_src dpm_event_src;
988 u32 tmp;
989
990 switch (sources) {
991 case 0:
992 default:
993 want_thermal_protection = false;
994 break;
995 case (1 << RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL):
996 want_thermal_protection = true;
997 dpm_event_src = RADEON_DPM_EVENT_SRC_DIGITAL;
998 break;
999 case (1 << RADEON_DPM_AUTO_THROTTLE_SRC_EXTERNAL):
1000 want_thermal_protection = true;
1001 dpm_event_src = RADEON_DPM_EVENT_SRC_EXTERNAL;
1002 break;
1003 case ((1 << RADEON_DPM_AUTO_THROTTLE_SRC_EXTERNAL) |
1004 (1 << RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL)):
1005 want_thermal_protection = true;
1006 dpm_event_src = RADEON_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL;
1007 break;
1008 }
1009
1010 if (want_thermal_protection) {
1011#if 0
1012 /* XXX: need to figure out how to handle this properly */
1013 tmp = RREG32_SMC(CG_THERMAL_CTRL);
1014 tmp &= DPM_EVENT_SRC_MASK;
1015 tmp |= DPM_EVENT_SRC(dpm_event_src);
1016 WREG32_SMC(CG_THERMAL_CTRL, tmp);
1017#endif
1018
1019 tmp = RREG32_SMC(GENERAL_PWRMGT);
1020 if (pi->thermal_protection)
1021 tmp &= ~THERMAL_PROTECTION_DIS;
1022 else
1023 tmp |= THERMAL_PROTECTION_DIS;
1024 WREG32_SMC(GENERAL_PWRMGT, tmp);
1025 } else {
1026 tmp = RREG32_SMC(GENERAL_PWRMGT);
1027 tmp |= THERMAL_PROTECTION_DIS;
1028 WREG32_SMC(GENERAL_PWRMGT, tmp);
1029 }
1030}
1031
1032static void ci_enable_auto_throttle_source(struct radeon_device *rdev,
1033 enum radeon_dpm_auto_throttle_src source,
1034 bool enable)
1035{
1036 struct ci_power_info *pi = ci_get_pi(rdev);
1037
1038 if (enable) {
1039 if (!(pi->active_auto_throttle_sources & (1 << source))) {
1040 pi->active_auto_throttle_sources |= 1 << source;
1041 ci_set_dpm_event_sources(rdev, pi->active_auto_throttle_sources);
1042 }
1043 } else {
1044 if (pi->active_auto_throttle_sources & (1 << source)) {
1045 pi->active_auto_throttle_sources &= ~(1 << source);
1046 ci_set_dpm_event_sources(rdev, pi->active_auto_throttle_sources);
1047 }
1048 }
1049}
1050
1051static void ci_enable_vr_hot_gpio_interrupt(struct radeon_device *rdev)
1052{
1053 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_REGULATOR_HOT)
1054 ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableVRHotGPIOInterrupt);
1055}
1056
1057static int ci_unfreeze_sclk_mclk_dpm(struct radeon_device *rdev)
1058{
1059 struct ci_power_info *pi = ci_get_pi(rdev);
1060 PPSMC_Result smc_result;
1061
1062 if (!pi->need_update_smu7_dpm_table)
1063 return 0;
1064
1065 if ((!pi->sclk_dpm_key_disabled) &&
1066 (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK))) {
1067 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_SCLKDPM_UnfreezeLevel);
1068 if (smc_result != PPSMC_Result_OK)
1069 return -EINVAL;
1070 }
1071
1072 if ((!pi->mclk_dpm_key_disabled) &&
1073 (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
1074 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_UnfreezeLevel);
1075 if (smc_result != PPSMC_Result_OK)
1076 return -EINVAL;
1077 }
1078
1079 pi->need_update_smu7_dpm_table = 0;
1080 return 0;
1081}
1082
1083static int ci_enable_sclk_mclk_dpm(struct radeon_device *rdev, bool enable)
1084{
1085 struct ci_power_info *pi = ci_get_pi(rdev);
1086 PPSMC_Result smc_result;
1087
1088 if (enable) {
1089 if (!pi->sclk_dpm_key_disabled) {
1090 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_DPM_Enable);
1091 if (smc_result != PPSMC_Result_OK)
1092 return -EINVAL;
1093 }
1094
1095 if (!pi->mclk_dpm_key_disabled) {
1096 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_Enable);
1097 if (smc_result != PPSMC_Result_OK)
1098 return -EINVAL;
1099
1100 WREG32_P(MC_SEQ_CNTL_3, CAC_EN, ~CAC_EN);
1101
1102 WREG32_SMC(LCAC_MC0_CNTL, 0x05);
1103 WREG32_SMC(LCAC_MC1_CNTL, 0x05);
1104 WREG32_SMC(LCAC_CPL_CNTL, 0x100005);
1105
1106 udelay(10);
1107
1108 WREG32_SMC(LCAC_MC0_CNTL, 0x400005);
1109 WREG32_SMC(LCAC_MC1_CNTL, 0x400005);
1110 WREG32_SMC(LCAC_CPL_CNTL, 0x500005);
1111 }
1112 } else {
1113 if (!pi->sclk_dpm_key_disabled) {
1114 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_DPM_Disable);
1115 if (smc_result != PPSMC_Result_OK)
1116 return -EINVAL;
1117 }
1118
1119 if (!pi->mclk_dpm_key_disabled) {
1120 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_Disable);
1121 if (smc_result != PPSMC_Result_OK)
1122 return -EINVAL;
1123 }
1124 }
1125
1126 return 0;
1127}
1128
1129static int ci_start_dpm(struct radeon_device *rdev)
1130{
1131 struct ci_power_info *pi = ci_get_pi(rdev);
1132 PPSMC_Result smc_result;
1133 int ret;
1134 u32 tmp;
1135
1136 tmp = RREG32_SMC(GENERAL_PWRMGT);
1137 tmp |= GLOBAL_PWRMGT_EN;
1138 WREG32_SMC(GENERAL_PWRMGT, tmp);
1139
1140 tmp = RREG32_SMC(SCLK_PWRMGT_CNTL);
1141 tmp |= DYNAMIC_PM_EN;
1142 WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
1143
1144 ci_write_smc_soft_register(rdev, offsetof(SMU7_SoftRegisters, VoltageChangeTimeout), 0x1000);
1145
1146 WREG32_P(BIF_LNCNT_RESET, 0, ~RESET_LNCNT_EN);
1147
1148 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_Voltage_Cntl_Enable);
1149 if (smc_result != PPSMC_Result_OK)
1150 return -EINVAL;
1151
1152 ret = ci_enable_sclk_mclk_dpm(rdev, true);
1153 if (ret)
1154 return ret;
1155
1156 if (!pi->pcie_dpm_key_disabled) {
1157 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_PCIeDPM_Enable);
1158 if (smc_result != PPSMC_Result_OK)
1159 return -EINVAL;
1160 }
1161
1162 return 0;
1163}
1164
1165static int ci_freeze_sclk_mclk_dpm(struct radeon_device *rdev)
1166{
1167 struct ci_power_info *pi = ci_get_pi(rdev);
1168 PPSMC_Result smc_result;
1169
1170 if (!pi->need_update_smu7_dpm_table)
1171 return 0;
1172
1173 if ((!pi->sclk_dpm_key_disabled) &&
1174 (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK))) {
1175 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_SCLKDPM_FreezeLevel);
1176 if (smc_result != PPSMC_Result_OK)
1177 return -EINVAL;
1178 }
1179
1180 if ((!pi->mclk_dpm_key_disabled) &&
1181 (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
1182 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_FreezeLevel);
1183 if (smc_result != PPSMC_Result_OK)
1184 return -EINVAL;
1185 }
1186
1187 return 0;
1188}
1189
1190static int ci_stop_dpm(struct radeon_device *rdev)
1191{
1192 struct ci_power_info *pi = ci_get_pi(rdev);
1193 PPSMC_Result smc_result;
1194 int ret;
1195 u32 tmp;
1196
1197 tmp = RREG32_SMC(GENERAL_PWRMGT);
1198 tmp &= ~GLOBAL_PWRMGT_EN;
1199 WREG32_SMC(GENERAL_PWRMGT, tmp);
1200
ed963771 1201 tmp = RREG32_SMC(SCLK_PWRMGT_CNTL);
cc8dbbb4
AD
1202 tmp &= ~DYNAMIC_PM_EN;
1203 WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
1204
1205 if (!pi->pcie_dpm_key_disabled) {
1206 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_PCIeDPM_Disable);
1207 if (smc_result != PPSMC_Result_OK)
1208 return -EINVAL;
1209 }
1210
1211 ret = ci_enable_sclk_mclk_dpm(rdev, false);
1212 if (ret)
1213 return ret;
1214
1215 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_Voltage_Cntl_Disable);
1216 if (smc_result != PPSMC_Result_OK)
1217 return -EINVAL;
1218
1219 return 0;
1220}
1221
1222static void ci_enable_sclk_control(struct radeon_device *rdev, bool enable)
1223{
1224 u32 tmp = RREG32_SMC(SCLK_PWRMGT_CNTL);
1225
1226 if (enable)
1227 tmp &= ~SCLK_PWRMGT_OFF;
1228 else
1229 tmp |= SCLK_PWRMGT_OFF;
1230 WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
1231}
1232
1233#if 0
1234static int ci_notify_hw_of_power_source(struct radeon_device *rdev,
1235 bool ac_power)
1236{
1237 struct ci_power_info *pi = ci_get_pi(rdev);
1238 struct radeon_cac_tdp_table *cac_tdp_table =
1239 rdev->pm.dpm.dyn_state.cac_tdp_table;
1240 u32 power_limit;
1241
1242 if (ac_power)
1243 power_limit = (u32)(cac_tdp_table->maximum_power_delivery_limit * 256);
1244 else
1245 power_limit = (u32)(cac_tdp_table->battery_power_limit * 256);
1246
1247 ci_set_power_limit(rdev, power_limit);
1248
1249 if (pi->caps_automatic_dc_transition) {
1250 if (ac_power)
1251 ci_send_msg_to_smc(rdev, PPSMC_MSG_RunningOnAC);
1252 else
1253 ci_send_msg_to_smc(rdev, PPSMC_MSG_Remove_DC_Clamp);
1254 }
1255
1256 return 0;
1257}
1258#endif
1259
1260static PPSMC_Result ci_send_msg_to_smc_with_parameter(struct radeon_device *rdev,
1261 PPSMC_Msg msg, u32 parameter)
1262{
1263 WREG32(SMC_MSG_ARG_0, parameter);
1264 return ci_send_msg_to_smc(rdev, msg);
1265}
1266
1267static PPSMC_Result ci_send_msg_to_smc_return_parameter(struct radeon_device *rdev,
1268 PPSMC_Msg msg, u32 *parameter)
1269{
1270 PPSMC_Result smc_result;
1271
1272 smc_result = ci_send_msg_to_smc(rdev, msg);
1273
1274 if ((smc_result == PPSMC_Result_OK) && parameter)
1275 *parameter = RREG32(SMC_MSG_ARG_0);
1276
1277 return smc_result;
1278}
1279
1280static int ci_dpm_force_state_sclk(struct radeon_device *rdev, u32 n)
1281{
1282 struct ci_power_info *pi = ci_get_pi(rdev);
1283
1284 if (!pi->sclk_dpm_key_disabled) {
1285 PPSMC_Result smc_result =
1c52279f 1286 ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SCLKDPM_SetEnabledMask, 1 << n);
cc8dbbb4
AD
1287 if (smc_result != PPSMC_Result_OK)
1288 return -EINVAL;
1289 }
1290
1291 return 0;
1292}
1293
1294static int ci_dpm_force_state_mclk(struct radeon_device *rdev, u32 n)
1295{
1296 struct ci_power_info *pi = ci_get_pi(rdev);
1297
1298 if (!pi->mclk_dpm_key_disabled) {
1299 PPSMC_Result smc_result =
1c52279f 1300 ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_MCLKDPM_SetEnabledMask, 1 << n);
cc8dbbb4
AD
1301 if (smc_result != PPSMC_Result_OK)
1302 return -EINVAL;
1303 }
1304
1305 return 0;
1306}
1307
1308static int ci_dpm_force_state_pcie(struct radeon_device *rdev, u32 n)
1309{
1310 struct ci_power_info *pi = ci_get_pi(rdev);
1311
1312 if (!pi->pcie_dpm_key_disabled) {
1313 PPSMC_Result smc_result =
1314 ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_PCIeDPM_ForceLevel, n);
1315 if (smc_result != PPSMC_Result_OK)
1316 return -EINVAL;
1317 }
1318
1319 return 0;
1320}
1321
1322static int ci_set_power_limit(struct radeon_device *rdev, u32 n)
1323{
1324 struct ci_power_info *pi = ci_get_pi(rdev);
1325
1326 if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_PkgPwrLimit) {
1327 PPSMC_Result smc_result =
1328 ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_PkgPwrSetLimit, n);
1329 if (smc_result != PPSMC_Result_OK)
1330 return -EINVAL;
1331 }
1332
1333 return 0;
1334}
1335
1336static int ci_set_overdrive_target_tdp(struct radeon_device *rdev,
1337 u32 target_tdp)
1338{
1339 PPSMC_Result smc_result =
1340 ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_OverDriveSetTargetTdp, target_tdp);
1341 if (smc_result != PPSMC_Result_OK)
1342 return -EINVAL;
1343 return 0;
1344}
1345
1346static int ci_set_boot_state(struct radeon_device *rdev)
1347{
1348 return ci_enable_sclk_mclk_dpm(rdev, false);
1349}
1350
1351static u32 ci_get_average_sclk_freq(struct radeon_device *rdev)
1352{
1353 u32 sclk_freq;
1354 PPSMC_Result smc_result =
1355 ci_send_msg_to_smc_return_parameter(rdev,
1356 PPSMC_MSG_API_GetSclkFrequency,
1357 &sclk_freq);
1358 if (smc_result != PPSMC_Result_OK)
1359 sclk_freq = 0;
1360
1361 return sclk_freq;
1362}
1363
1364static u32 ci_get_average_mclk_freq(struct radeon_device *rdev)
1365{
1366 u32 mclk_freq;
1367 PPSMC_Result smc_result =
1368 ci_send_msg_to_smc_return_parameter(rdev,
1369 PPSMC_MSG_API_GetMclkFrequency,
1370 &mclk_freq);
1371 if (smc_result != PPSMC_Result_OK)
1372 mclk_freq = 0;
1373
1374 return mclk_freq;
1375}
1376
1377static void ci_dpm_start_smc(struct radeon_device *rdev)
1378{
1379 int i;
1380
1381 ci_program_jump_on_start(rdev);
1382 ci_start_smc_clock(rdev);
1383 ci_start_smc(rdev);
1384 for (i = 0; i < rdev->usec_timeout; i++) {
1385 if (RREG32_SMC(FIRMWARE_FLAGS) & INTERRUPTS_ENABLED)
1386 break;
1387 }
1388}
1389
1390static void ci_dpm_stop_smc(struct radeon_device *rdev)
1391{
1392 ci_reset_smc(rdev);
1393 ci_stop_smc_clock(rdev);
1394}
1395
1396static int ci_process_firmware_header(struct radeon_device *rdev)
1397{
1398 struct ci_power_info *pi = ci_get_pi(rdev);
1399 u32 tmp;
1400 int ret;
1401
1402 ret = ci_read_smc_sram_dword(rdev,
1403 SMU7_FIRMWARE_HEADER_LOCATION +
1404 offsetof(SMU7_Firmware_Header, DpmTable),
1405 &tmp, pi->sram_end);
1406 if (ret)
1407 return ret;
1408
1409 pi->dpm_table_start = tmp;
1410
1411 ret = ci_read_smc_sram_dword(rdev,
1412 SMU7_FIRMWARE_HEADER_LOCATION +
1413 offsetof(SMU7_Firmware_Header, SoftRegisters),
1414 &tmp, pi->sram_end);
1415 if (ret)
1416 return ret;
1417
1418 pi->soft_regs_start = tmp;
1419
1420 ret = ci_read_smc_sram_dword(rdev,
1421 SMU7_FIRMWARE_HEADER_LOCATION +
1422 offsetof(SMU7_Firmware_Header, mcRegisterTable),
1423 &tmp, pi->sram_end);
1424 if (ret)
1425 return ret;
1426
1427 pi->mc_reg_table_start = tmp;
1428
1429 ret = ci_read_smc_sram_dword(rdev,
1430 SMU7_FIRMWARE_HEADER_LOCATION +
1431 offsetof(SMU7_Firmware_Header, FanTable),
1432 &tmp, pi->sram_end);
1433 if (ret)
1434 return ret;
1435
1436 pi->fan_table_start = tmp;
1437
1438 ret = ci_read_smc_sram_dword(rdev,
1439 SMU7_FIRMWARE_HEADER_LOCATION +
1440 offsetof(SMU7_Firmware_Header, mcArbDramTimingTable),
1441 &tmp, pi->sram_end);
1442 if (ret)
1443 return ret;
1444
1445 pi->arb_table_start = tmp;
1446
1447 return 0;
1448}
1449
1450static void ci_read_clock_registers(struct radeon_device *rdev)
1451{
1452 struct ci_power_info *pi = ci_get_pi(rdev);
1453
1454 pi->clock_registers.cg_spll_func_cntl =
1455 RREG32_SMC(CG_SPLL_FUNC_CNTL);
1456 pi->clock_registers.cg_spll_func_cntl_2 =
1457 RREG32_SMC(CG_SPLL_FUNC_CNTL_2);
1458 pi->clock_registers.cg_spll_func_cntl_3 =
1459 RREG32_SMC(CG_SPLL_FUNC_CNTL_3);
1460 pi->clock_registers.cg_spll_func_cntl_4 =
1461 RREG32_SMC(CG_SPLL_FUNC_CNTL_4);
1462 pi->clock_registers.cg_spll_spread_spectrum =
1463 RREG32_SMC(CG_SPLL_SPREAD_SPECTRUM);
1464 pi->clock_registers.cg_spll_spread_spectrum_2 =
1465 RREG32_SMC(CG_SPLL_SPREAD_SPECTRUM_2);
1466 pi->clock_registers.dll_cntl = RREG32(DLL_CNTL);
1467 pi->clock_registers.mclk_pwrmgt_cntl = RREG32(MCLK_PWRMGT_CNTL);
1468 pi->clock_registers.mpll_ad_func_cntl = RREG32(MPLL_AD_FUNC_CNTL);
1469 pi->clock_registers.mpll_dq_func_cntl = RREG32(MPLL_DQ_FUNC_CNTL);
1470 pi->clock_registers.mpll_func_cntl = RREG32(MPLL_FUNC_CNTL);
1471 pi->clock_registers.mpll_func_cntl_1 = RREG32(MPLL_FUNC_CNTL_1);
1472 pi->clock_registers.mpll_func_cntl_2 = RREG32(MPLL_FUNC_CNTL_2);
1473 pi->clock_registers.mpll_ss1 = RREG32(MPLL_SS1);
1474 pi->clock_registers.mpll_ss2 = RREG32(MPLL_SS2);
1475}
1476
1477static void ci_init_sclk_t(struct radeon_device *rdev)
1478{
1479 struct ci_power_info *pi = ci_get_pi(rdev);
1480
1481 pi->low_sclk_interrupt_t = 0;
1482}
1483
1484static void ci_enable_thermal_protection(struct radeon_device *rdev,
1485 bool enable)
1486{
1487 u32 tmp = RREG32_SMC(GENERAL_PWRMGT);
1488
1489 if (enable)
1490 tmp &= ~THERMAL_PROTECTION_DIS;
1491 else
1492 tmp |= THERMAL_PROTECTION_DIS;
1493 WREG32_SMC(GENERAL_PWRMGT, tmp);
1494}
1495
1496static void ci_enable_acpi_power_management(struct radeon_device *rdev)
1497{
1498 u32 tmp = RREG32_SMC(GENERAL_PWRMGT);
1499
1500 tmp |= STATIC_PM_EN;
1501
1502 WREG32_SMC(GENERAL_PWRMGT, tmp);
1503}
1504
1505#if 0
1506static int ci_enter_ulp_state(struct radeon_device *rdev)
1507{
1508
1509 WREG32(SMC_MESSAGE_0, PPSMC_MSG_SwitchToMinimumPower);
1510
1511 udelay(25000);
1512
1513 return 0;
1514}
1515
1516static int ci_exit_ulp_state(struct radeon_device *rdev)
1517{
1518 int i;
1519
1520 WREG32(SMC_MESSAGE_0, PPSMC_MSG_ResumeFromMinimumPower);
1521
1522 udelay(7000);
1523
1524 for (i = 0; i < rdev->usec_timeout; i++) {
1525 if (RREG32(SMC_RESP_0) == 1)
1526 break;
1527 udelay(1000);
1528 }
1529
1530 return 0;
1531}
1532#endif
1533
1534static int ci_notify_smc_display_change(struct radeon_device *rdev,
1535 bool has_display)
1536{
1537 PPSMC_Msg msg = has_display ? PPSMC_MSG_HasDisplay : PPSMC_MSG_NoDisplay;
1538
1539 return (ci_send_msg_to_smc(rdev, msg) == PPSMC_Result_OK) ? 0 : -EINVAL;
1540}
1541
1542static int ci_enable_ds_master_switch(struct radeon_device *rdev,
1543 bool enable)
1544{
1545 struct ci_power_info *pi = ci_get_pi(rdev);
1546
1547 if (enable) {
1548 if (pi->caps_sclk_ds) {
1549 if (ci_send_msg_to_smc(rdev, PPSMC_MSG_MASTER_DeepSleep_ON) != PPSMC_Result_OK)
1550 return -EINVAL;
1551 } else {
1552 if (ci_send_msg_to_smc(rdev, PPSMC_MSG_MASTER_DeepSleep_OFF) != PPSMC_Result_OK)
1553 return -EINVAL;
1554 }
1555 } else {
1556 if (pi->caps_sclk_ds) {
1557 if (ci_send_msg_to_smc(rdev, PPSMC_MSG_MASTER_DeepSleep_OFF) != PPSMC_Result_OK)
1558 return -EINVAL;
1559 }
1560 }
1561
1562 return 0;
1563}
1564
1565static void ci_program_display_gap(struct radeon_device *rdev)
1566{
1567 u32 tmp = RREG32_SMC(CG_DISPLAY_GAP_CNTL);
1568 u32 pre_vbi_time_in_us;
1569 u32 frame_time_in_us;
1570 u32 ref_clock = rdev->clock.spll.reference_freq;
1571 u32 refresh_rate = r600_dpm_get_vrefresh(rdev);
1572 u32 vblank_time = r600_dpm_get_vblank_time(rdev);
1573
1574 tmp &= ~DISP_GAP_MASK;
1575 if (rdev->pm.dpm.new_active_crtc_count > 0)
1576 tmp |= DISP_GAP(R600_PM_DISPLAY_GAP_VBLANK_OR_WM);
1577 else
1578 tmp |= DISP_GAP(R600_PM_DISPLAY_GAP_IGNORE);
1579 WREG32_SMC(CG_DISPLAY_GAP_CNTL, tmp);
1580
1581 if (refresh_rate == 0)
1582 refresh_rate = 60;
1583 if (vblank_time == 0xffffffff)
1584 vblank_time = 500;
1585 frame_time_in_us = 1000000 / refresh_rate;
1586 pre_vbi_time_in_us =
1587 frame_time_in_us - 200 - vblank_time;
1588 tmp = pre_vbi_time_in_us * (ref_clock / 100);
1589
1590 WREG32_SMC(CG_DISPLAY_GAP_CNTL2, tmp);
1591 ci_write_smc_soft_register(rdev, offsetof(SMU7_SoftRegisters, PreVBlankGap), 0x64);
1592 ci_write_smc_soft_register(rdev, offsetof(SMU7_SoftRegisters, VBlankTimeout), (frame_time_in_us - pre_vbi_time_in_us));
1593
1594
1595 ci_notify_smc_display_change(rdev, (rdev->pm.dpm.new_active_crtc_count == 1));
1596
1597}
1598
1599static void ci_enable_spread_spectrum(struct radeon_device *rdev, bool enable)
1600{
1601 struct ci_power_info *pi = ci_get_pi(rdev);
1602 u32 tmp;
1603
1604 if (enable) {
1605 if (pi->caps_sclk_ss_support) {
1606 tmp = RREG32_SMC(GENERAL_PWRMGT);
1607 tmp |= DYN_SPREAD_SPECTRUM_EN;
1608 WREG32_SMC(GENERAL_PWRMGT, tmp);
1609 }
1610 } else {
1611 tmp = RREG32_SMC(CG_SPLL_SPREAD_SPECTRUM);
1612 tmp &= ~SSEN;
1613 WREG32_SMC(CG_SPLL_SPREAD_SPECTRUM, tmp);
1614
1615 tmp = RREG32_SMC(GENERAL_PWRMGT);
1616 tmp &= ~DYN_SPREAD_SPECTRUM_EN;
1617 WREG32_SMC(GENERAL_PWRMGT, tmp);
1618 }
1619}
1620
1621static void ci_program_sstp(struct radeon_device *rdev)
1622{
1623 WREG32_SMC(CG_SSP, (SSTU(R600_SSTU_DFLT) | SST(R600_SST_DFLT)));
1624}
1625
1626static void ci_enable_display_gap(struct radeon_device *rdev)
1627{
1628 u32 tmp = RREG32_SMC(CG_DISPLAY_GAP_CNTL);
1629
1630 tmp &= ~(DISP_GAP_MASK | DISP_GAP_MCHG_MASK);
1631 tmp |= (DISP_GAP(R600_PM_DISPLAY_GAP_IGNORE) |
1632 DISP_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK));
1633
1634 WREG32_SMC(CG_DISPLAY_GAP_CNTL, tmp);
1635}
1636
1637static void ci_program_vc(struct radeon_device *rdev)
1638{
1639 u32 tmp;
1640
1641 tmp = RREG32_SMC(SCLK_PWRMGT_CNTL);
1642 tmp &= ~(RESET_SCLK_CNT | RESET_BUSY_CNT);
1643 WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
1644
1645 WREG32_SMC(CG_FTV_0, CISLANDS_VRC_DFLT0);
1646 WREG32_SMC(CG_FTV_1, CISLANDS_VRC_DFLT1);
1647 WREG32_SMC(CG_FTV_2, CISLANDS_VRC_DFLT2);
1648 WREG32_SMC(CG_FTV_3, CISLANDS_VRC_DFLT3);
1649 WREG32_SMC(CG_FTV_4, CISLANDS_VRC_DFLT4);
1650 WREG32_SMC(CG_FTV_5, CISLANDS_VRC_DFLT5);
1651 WREG32_SMC(CG_FTV_6, CISLANDS_VRC_DFLT6);
1652 WREG32_SMC(CG_FTV_7, CISLANDS_VRC_DFLT7);
1653}
1654
1655static void ci_clear_vc(struct radeon_device *rdev)
1656{
1657 u32 tmp;
1658
1659 tmp = RREG32_SMC(SCLK_PWRMGT_CNTL);
1660 tmp |= (RESET_SCLK_CNT | RESET_BUSY_CNT);
1661 WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
1662
1663 WREG32_SMC(CG_FTV_0, 0);
1664 WREG32_SMC(CG_FTV_1, 0);
1665 WREG32_SMC(CG_FTV_2, 0);
1666 WREG32_SMC(CG_FTV_3, 0);
1667 WREG32_SMC(CG_FTV_4, 0);
1668 WREG32_SMC(CG_FTV_5, 0);
1669 WREG32_SMC(CG_FTV_6, 0);
1670 WREG32_SMC(CG_FTV_7, 0);
1671}
1672
1673static int ci_upload_firmware(struct radeon_device *rdev)
1674{
1675 struct ci_power_info *pi = ci_get_pi(rdev);
1676 int i, ret;
1677
1678 for (i = 0; i < rdev->usec_timeout; i++) {
1679 if (RREG32_SMC(RCU_UC_EVENTS) & BOOT_SEQ_DONE)
1680 break;
1681 }
1682 WREG32_SMC(SMC_SYSCON_MISC_CNTL, 1);
1683
1684 ci_stop_smc_clock(rdev);
1685 ci_reset_smc(rdev);
1686
1687 ret = ci_load_smc_ucode(rdev, pi->sram_end);
1688
1689 return ret;
1690
1691}
1692
1693static int ci_get_svi2_voltage_table(struct radeon_device *rdev,
1694 struct radeon_clock_voltage_dependency_table *voltage_dependency_table,
1695 struct atom_voltage_table *voltage_table)
1696{
1697 u32 i;
1698
1699 if (voltage_dependency_table == NULL)
1700 return -EINVAL;
1701
1702 voltage_table->mask_low = 0;
1703 voltage_table->phase_delay = 0;
1704
1705 voltage_table->count = voltage_dependency_table->count;
1706 for (i = 0; i < voltage_table->count; i++) {
1707 voltage_table->entries[i].value = voltage_dependency_table->entries[i].v;
1708 voltage_table->entries[i].smio_low = 0;
1709 }
1710
1711 return 0;
1712}
1713
1714static int ci_construct_voltage_tables(struct radeon_device *rdev)
1715{
1716 struct ci_power_info *pi = ci_get_pi(rdev);
1717 int ret;
1718
1719 if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
1720 ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_VDDC,
1721 VOLTAGE_OBJ_GPIO_LUT,
1722 &pi->vddc_voltage_table);
1723 if (ret)
1724 return ret;
1725 } else if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
1726 ret = ci_get_svi2_voltage_table(rdev,
1727 &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
1728 &pi->vddc_voltage_table);
1729 if (ret)
1730 return ret;
1731 }
1732
1733 if (pi->vddc_voltage_table.count > SMU7_MAX_LEVELS_VDDC)
1734 si_trim_voltage_table_to_fit_state_table(rdev, SMU7_MAX_LEVELS_VDDC,
1735 &pi->vddc_voltage_table);
1736
1737 if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
1738 ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_VDDCI,
1739 VOLTAGE_OBJ_GPIO_LUT,
1740 &pi->vddci_voltage_table);
1741 if (ret)
1742 return ret;
1743 } else if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
1744 ret = ci_get_svi2_voltage_table(rdev,
1745 &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
1746 &pi->vddci_voltage_table);
1747 if (ret)
1748 return ret;
1749 }
1750
1751 if (pi->vddci_voltage_table.count > SMU7_MAX_LEVELS_VDDCI)
1752 si_trim_voltage_table_to_fit_state_table(rdev, SMU7_MAX_LEVELS_VDDCI,
1753 &pi->vddci_voltage_table);
1754
1755 if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
1756 ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_MVDDC,
1757 VOLTAGE_OBJ_GPIO_LUT,
1758 &pi->mvdd_voltage_table);
1759 if (ret)
1760 return ret;
1761 } else if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
1762 ret = ci_get_svi2_voltage_table(rdev,
1763 &rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
1764 &pi->mvdd_voltage_table);
1765 if (ret)
1766 return ret;
1767 }
1768
1769 if (pi->mvdd_voltage_table.count > SMU7_MAX_LEVELS_MVDD)
1770 si_trim_voltage_table_to_fit_state_table(rdev, SMU7_MAX_LEVELS_MVDD,
1771 &pi->mvdd_voltage_table);
1772
1773 return 0;
1774}
1775
1776static void ci_populate_smc_voltage_table(struct radeon_device *rdev,
1777 struct atom_voltage_table_entry *voltage_table,
1778 SMU7_Discrete_VoltageLevel *smc_voltage_table)
1779{
1780 int ret;
1781
1782 ret = ci_get_std_voltage_value_sidd(rdev, voltage_table,
1783 &smc_voltage_table->StdVoltageHiSidd,
1784 &smc_voltage_table->StdVoltageLoSidd);
1785
1786 if (ret) {
1787 smc_voltage_table->StdVoltageHiSidd = voltage_table->value * VOLTAGE_SCALE;
1788 smc_voltage_table->StdVoltageLoSidd = voltage_table->value * VOLTAGE_SCALE;
1789 }
1790
1791 smc_voltage_table->Voltage = cpu_to_be16(voltage_table->value * VOLTAGE_SCALE);
1792 smc_voltage_table->StdVoltageHiSidd =
1793 cpu_to_be16(smc_voltage_table->StdVoltageHiSidd);
1794 smc_voltage_table->StdVoltageLoSidd =
1795 cpu_to_be16(smc_voltage_table->StdVoltageLoSidd);
1796}
1797
1798static int ci_populate_smc_vddc_table(struct radeon_device *rdev,
1799 SMU7_Discrete_DpmTable *table)
1800{
1801 struct ci_power_info *pi = ci_get_pi(rdev);
1802 unsigned int count;
1803
1804 table->VddcLevelCount = pi->vddc_voltage_table.count;
1805 for (count = 0; count < table->VddcLevelCount; count++) {
1806 ci_populate_smc_voltage_table(rdev,
1807 &pi->vddc_voltage_table.entries[count],
1808 &table->VddcLevel[count]);
1809
1810 if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
1811 table->VddcLevel[count].Smio |=
1812 pi->vddc_voltage_table.entries[count].smio_low;
1813 else
1814 table->VddcLevel[count].Smio = 0;
1815 }
1816 table->VddcLevelCount = cpu_to_be32(table->VddcLevelCount);
1817
1818 return 0;
1819}
1820
1821static int ci_populate_smc_vddci_table(struct radeon_device *rdev,
1822 SMU7_Discrete_DpmTable *table)
1823{
1824 unsigned int count;
1825 struct ci_power_info *pi = ci_get_pi(rdev);
1826
1827 table->VddciLevelCount = pi->vddci_voltage_table.count;
1828 for (count = 0; count < table->VddciLevelCount; count++) {
1829 ci_populate_smc_voltage_table(rdev,
1830 &pi->vddci_voltage_table.entries[count],
1831 &table->VddciLevel[count]);
1832
1833 if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
1834 table->VddciLevel[count].Smio |=
1835 pi->vddci_voltage_table.entries[count].smio_low;
1836 else
1837 table->VddciLevel[count].Smio = 0;
1838 }
1839 table->VddciLevelCount = cpu_to_be32(table->VddciLevelCount);
1840
1841 return 0;
1842}
1843
1844static int ci_populate_smc_mvdd_table(struct radeon_device *rdev,
1845 SMU7_Discrete_DpmTable *table)
1846{
1847 struct ci_power_info *pi = ci_get_pi(rdev);
1848 unsigned int count;
1849
1850 table->MvddLevelCount = pi->mvdd_voltage_table.count;
1851 for (count = 0; count < table->MvddLevelCount; count++) {
1852 ci_populate_smc_voltage_table(rdev,
1853 &pi->mvdd_voltage_table.entries[count],
1854 &table->MvddLevel[count]);
1855
1856 if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
1857 table->MvddLevel[count].Smio |=
1858 pi->mvdd_voltage_table.entries[count].smio_low;
1859 else
1860 table->MvddLevel[count].Smio = 0;
1861 }
1862 table->MvddLevelCount = cpu_to_be32(table->MvddLevelCount);
1863
1864 return 0;
1865}
1866
1867static int ci_populate_smc_voltage_tables(struct radeon_device *rdev,
1868 SMU7_Discrete_DpmTable *table)
1869{
1870 int ret;
1871
1872 ret = ci_populate_smc_vddc_table(rdev, table);
1873 if (ret)
1874 return ret;
1875
1876 ret = ci_populate_smc_vddci_table(rdev, table);
1877 if (ret)
1878 return ret;
1879
1880 ret = ci_populate_smc_mvdd_table(rdev, table);
1881 if (ret)
1882 return ret;
1883
1884 return 0;
1885}
1886
1887static int ci_populate_mvdd_value(struct radeon_device *rdev, u32 mclk,
1888 SMU7_Discrete_VoltageLevel *voltage)
1889{
1890 struct ci_power_info *pi = ci_get_pi(rdev);
1891 u32 i = 0;
1892
1893 if (pi->mvdd_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
1894 for (i = 0; i < rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.count; i++) {
1895 if (mclk <= rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries[i].clk) {
1896 voltage->Voltage = pi->mvdd_voltage_table.entries[i].value;
1897 break;
1898 }
1899 }
1900
1901 if (i >= rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.count)
1902 return -EINVAL;
1903 }
1904
1905 return -EINVAL;
1906}
1907
1908static int ci_get_std_voltage_value_sidd(struct radeon_device *rdev,
1909 struct atom_voltage_table_entry *voltage_table,
1910 u16 *std_voltage_hi_sidd, u16 *std_voltage_lo_sidd)
1911{
1912 u16 v_index, idx;
1913 bool voltage_found = false;
1914 *std_voltage_hi_sidd = voltage_table->value * VOLTAGE_SCALE;
1915 *std_voltage_lo_sidd = voltage_table->value * VOLTAGE_SCALE;
1916
1917 if (rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries == NULL)
1918 return -EINVAL;
1919
1920 if (rdev->pm.dpm.dyn_state.cac_leakage_table.entries) {
1921 for (v_index = 0; (u32)v_index < rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) {
1922 if (voltage_table->value ==
1923 rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) {
1924 voltage_found = true;
1925 if ((u32)v_index < rdev->pm.dpm.dyn_state.cac_leakage_table.count)
1926 idx = v_index;
1927 else
1928 idx = rdev->pm.dpm.dyn_state.cac_leakage_table.count - 1;
1929 *std_voltage_lo_sidd =
1930 rdev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].vddc * VOLTAGE_SCALE;
1931 *std_voltage_hi_sidd =
1932 rdev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].leakage * VOLTAGE_SCALE;
1933 break;
1934 }
1935 }
1936
1937 if (!voltage_found) {
1938 for (v_index = 0; (u32)v_index < rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) {
1939 if (voltage_table->value <=
1940 rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) {
1941 voltage_found = true;
1942 if ((u32)v_index < rdev->pm.dpm.dyn_state.cac_leakage_table.count)
1943 idx = v_index;
1944 else
1945 idx = rdev->pm.dpm.dyn_state.cac_leakage_table.count - 1;
1946 *std_voltage_lo_sidd =
1947 rdev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].vddc * VOLTAGE_SCALE;
1948 *std_voltage_hi_sidd =
1949 rdev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].leakage * VOLTAGE_SCALE;
1950 break;
1951 }
1952 }
1953 }
1954 }
1955
1956 return 0;
1957}
1958
1959static void ci_populate_phase_value_based_on_sclk(struct radeon_device *rdev,
1960 const struct radeon_phase_shedding_limits_table *limits,
1961 u32 sclk,
1962 u32 *phase_shedding)
1963{
1964 unsigned int i;
1965
1966 *phase_shedding = 1;
1967
1968 for (i = 0; i < limits->count; i++) {
1969 if (sclk < limits->entries[i].sclk) {
1970 *phase_shedding = i;
1971 break;
1972 }
1973 }
1974}
1975
1976static void ci_populate_phase_value_based_on_mclk(struct radeon_device *rdev,
1977 const struct radeon_phase_shedding_limits_table *limits,
1978 u32 mclk,
1979 u32 *phase_shedding)
1980{
1981 unsigned int i;
1982
1983 *phase_shedding = 1;
1984
1985 for (i = 0; i < limits->count; i++) {
1986 if (mclk < limits->entries[i].mclk) {
1987 *phase_shedding = i;
1988 break;
1989 }
1990 }
1991}
1992
1993static int ci_init_arb_table_index(struct radeon_device *rdev)
1994{
1995 struct ci_power_info *pi = ci_get_pi(rdev);
1996 u32 tmp;
1997 int ret;
1998
1999 ret = ci_read_smc_sram_dword(rdev, pi->arb_table_start,
2000 &tmp, pi->sram_end);
2001 if (ret)
2002 return ret;
2003
2004 tmp &= 0x00FFFFFF;
2005 tmp |= MC_CG_ARB_FREQ_F1 << 24;
2006
2007 return ci_write_smc_sram_dword(rdev, pi->arb_table_start,
2008 tmp, pi->sram_end);
2009}
2010
2011static int ci_get_dependency_volt_by_clk(struct radeon_device *rdev,
2012 struct radeon_clock_voltage_dependency_table *allowed_clock_voltage_table,
2013 u32 clock, u32 *voltage)
2014{
2015 u32 i = 0;
2016
2017 if (allowed_clock_voltage_table->count == 0)
2018 return -EINVAL;
2019
2020 for (i = 0; i < allowed_clock_voltage_table->count; i++) {
2021 if (allowed_clock_voltage_table->entries[i].clk >= clock) {
2022 *voltage = allowed_clock_voltage_table->entries[i].v;
2023 return 0;
2024 }
2025 }
2026
2027 *voltage = allowed_clock_voltage_table->entries[i-1].v;
2028
2029 return 0;
2030}
2031
2032static u8 ci_get_sleep_divider_id_from_clock(struct radeon_device *rdev,
2033 u32 sclk, u32 min_sclk_in_sr)
2034{
2035 u32 i;
2036 u32 tmp;
2037 u32 min = (min_sclk_in_sr > CISLAND_MINIMUM_ENGINE_CLOCK) ?
2038 min_sclk_in_sr : CISLAND_MINIMUM_ENGINE_CLOCK;
2039
2040 if (sclk < min)
2041 return 0;
2042
2043 for (i = CISLAND_MAX_DEEPSLEEP_DIVIDER_ID; ; i--) {
2044 tmp = sclk / (1 << i);
2045 if (tmp >= min || i == 0)
2046 break;
2047 }
2048
2049 return (u8)i;
2050}
2051
2052static int ci_initial_switch_from_arb_f0_to_f1(struct radeon_device *rdev)
2053{
2054 return ni_copy_and_switch_arb_sets(rdev, MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1);
2055}
2056
2057static int ci_reset_to_default(struct radeon_device *rdev)
2058{
2059 return (ci_send_msg_to_smc(rdev, PPSMC_MSG_ResetToDefaults) == PPSMC_Result_OK) ?
2060 0 : -EINVAL;
2061}
2062
2063static int ci_force_switch_to_arb_f0(struct radeon_device *rdev)
2064{
2065 u32 tmp;
2066
2067 tmp = (RREG32_SMC(SMC_SCRATCH9) & 0x0000ff00) >> 8;
2068
2069 if (tmp == MC_CG_ARB_FREQ_F0)
2070 return 0;
2071
2072 return ni_copy_and_switch_arb_sets(rdev, tmp, MC_CG_ARB_FREQ_F0);
2073}
2074
21b8a369
AD
2075static void ci_register_patching_mc_arb(struct radeon_device *rdev,
2076 const u32 engine_clock,
2077 const u32 memory_clock,
2078 u32 *dram_timimg2)
2079{
2080 bool patch;
2081 u32 tmp, tmp2;
2082
2083 tmp = RREG32(MC_SEQ_MISC0);
2084 patch = ((tmp & 0x0000f00) == 0x300) ? true : false;
2085
2086 if (patch &&
2087 ((rdev->pdev->device == 0x67B0) ||
2088 (rdev->pdev->device == 0x67B1))) {
2089 if ((memory_clock > 100000) && (memory_clock <= 125000)) {
2090 tmp2 = (((0x31 * engine_clock) / 125000) - 1) & 0xff;
2091 *dram_timimg2 &= ~0x00ff0000;
2092 *dram_timimg2 |= tmp2 << 16;
2093 } else if ((memory_clock > 125000) && (memory_clock <= 137500)) {
2094 tmp2 = (((0x36 * engine_clock) / 137500) - 1) & 0xff;
2095 *dram_timimg2 &= ~0x00ff0000;
2096 *dram_timimg2 |= tmp2 << 16;
2097 }
2098 }
2099}
2100
2101
cc8dbbb4
AD
2102static int ci_populate_memory_timing_parameters(struct radeon_device *rdev,
2103 u32 sclk,
2104 u32 mclk,
2105 SMU7_Discrete_MCArbDramTimingTableEntry *arb_regs)
2106{
2107 u32 dram_timing;
2108 u32 dram_timing2;
2109 u32 burst_time;
2110
2111 radeon_atom_set_engine_dram_timings(rdev, sclk, mclk);
2112
2113 dram_timing = RREG32(MC_ARB_DRAM_TIMING);
2114 dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2);
2115 burst_time = RREG32(MC_ARB_BURST_TIME) & STATE0_MASK;
2116
21b8a369
AD
2117 ci_register_patching_mc_arb(rdev, sclk, mclk, &dram_timing2);
2118
cc8dbbb4
AD
2119 arb_regs->McArbDramTiming = cpu_to_be32(dram_timing);
2120 arb_regs->McArbDramTiming2 = cpu_to_be32(dram_timing2);
2121 arb_regs->McArbBurstTime = (u8)burst_time;
2122
2123 return 0;
2124}
2125
2126static int ci_do_program_memory_timing_parameters(struct radeon_device *rdev)
2127{
2128 struct ci_power_info *pi = ci_get_pi(rdev);
2129 SMU7_Discrete_MCArbDramTimingTable arb_regs;
2130 u32 i, j;
2131 int ret = 0;
2132
2133 memset(&arb_regs, 0, sizeof(SMU7_Discrete_MCArbDramTimingTable));
2134
2135 for (i = 0; i < pi->dpm_table.sclk_table.count; i++) {
2136 for (j = 0; j < pi->dpm_table.mclk_table.count; j++) {
2137 ret = ci_populate_memory_timing_parameters(rdev,
2138 pi->dpm_table.sclk_table.dpm_levels[i].value,
2139 pi->dpm_table.mclk_table.dpm_levels[j].value,
2140 &arb_regs.entries[i][j]);
2141 if (ret)
2142 break;
2143 }
2144 }
2145
2146 if (ret == 0)
2147 ret = ci_copy_bytes_to_smc(rdev,
2148 pi->arb_table_start,
2149 (u8 *)&arb_regs,
2150 sizeof(SMU7_Discrete_MCArbDramTimingTable),
2151 pi->sram_end);
2152
2153 return ret;
2154}
2155
2156static int ci_program_memory_timing_parameters(struct radeon_device *rdev)
2157{
2158 struct ci_power_info *pi = ci_get_pi(rdev);
2159
2160 if (pi->need_update_smu7_dpm_table == 0)
2161 return 0;
2162
2163 return ci_do_program_memory_timing_parameters(rdev);
2164}
2165
2166static void ci_populate_smc_initial_state(struct radeon_device *rdev,
2167 struct radeon_ps *radeon_boot_state)
2168{
2169 struct ci_ps *boot_state = ci_get_ps(radeon_boot_state);
2170 struct ci_power_info *pi = ci_get_pi(rdev);
2171 u32 level = 0;
2172
2173 for (level = 0; level < rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; level++) {
2174 if (rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[level].clk >=
2175 boot_state->performance_levels[0].sclk) {
2176 pi->smc_state_table.GraphicsBootLevel = level;
2177 break;
2178 }
2179 }
2180
2181 for (level = 0; level < rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.count; level++) {
2182 if (rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries[level].clk >=
2183 boot_state->performance_levels[0].mclk) {
2184 pi->smc_state_table.MemoryBootLevel = level;
2185 break;
2186 }
2187 }
2188}
2189
2190static u32 ci_get_dpm_level_enable_mask_value(struct ci_single_dpm_table *dpm_table)
2191{
2192 u32 i;
2193 u32 mask_value = 0;
2194
2195 for (i = dpm_table->count; i > 0; i--) {
2196 mask_value = mask_value << 1;
2197 if (dpm_table->dpm_levels[i-1].enabled)
2198 mask_value |= 0x1;
2199 else
2200 mask_value &= 0xFFFFFFFE;
2201 }
2202
2203 return mask_value;
2204}
2205
2206static void ci_populate_smc_link_level(struct radeon_device *rdev,
2207 SMU7_Discrete_DpmTable *table)
2208{
2209 struct ci_power_info *pi = ci_get_pi(rdev);
2210 struct ci_dpm_table *dpm_table = &pi->dpm_table;
2211 u32 i;
2212
2213 for (i = 0; i < dpm_table->pcie_speed_table.count; i++) {
2214 table->LinkLevel[i].PcieGenSpeed =
2215 (u8)dpm_table->pcie_speed_table.dpm_levels[i].value;
2216 table->LinkLevel[i].PcieLaneCount =
2217 r600_encode_pci_lane_width(dpm_table->pcie_speed_table.dpm_levels[i].param1);
2218 table->LinkLevel[i].EnabledForActivity = 1;
2219 table->LinkLevel[i].DownT = cpu_to_be32(5);
2220 table->LinkLevel[i].UpT = cpu_to_be32(30);
2221 }
2222
2223 pi->smc_state_table.LinkLevelCount = (u8)dpm_table->pcie_speed_table.count;
2224 pi->dpm_level_enable_mask.pcie_dpm_enable_mask =
2225 ci_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
2226}
2227
2228static int ci_populate_smc_uvd_level(struct radeon_device *rdev,
2229 SMU7_Discrete_DpmTable *table)
2230{
2231 u32 count;
2232 struct atom_clock_dividers dividers;
2233 int ret = -EINVAL;
2234
2235 table->UvdLevelCount =
2236 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count;
2237
2238 for (count = 0; count < table->UvdLevelCount; count++) {
2239 table->UvdLevel[count].VclkFrequency =
2240 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].vclk;
2241 table->UvdLevel[count].DclkFrequency =
2242 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].dclk;
2243 table->UvdLevel[count].MinVddc =
2244 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
2245 table->UvdLevel[count].MinVddcPhases = 1;
2246
2247 ret = radeon_atom_get_clock_dividers(rdev,
2248 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2249 table->UvdLevel[count].VclkFrequency, false, &dividers);
2250 if (ret)
2251 return ret;
2252
2253 table->UvdLevel[count].VclkDivider = (u8)dividers.post_divider;
2254
2255 ret = radeon_atom_get_clock_dividers(rdev,
2256 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2257 table->UvdLevel[count].DclkFrequency, false, &dividers);
2258 if (ret)
2259 return ret;
2260
2261 table->UvdLevel[count].DclkDivider = (u8)dividers.post_divider;
2262
2263 table->UvdLevel[count].VclkFrequency = cpu_to_be32(table->UvdLevel[count].VclkFrequency);
2264 table->UvdLevel[count].DclkFrequency = cpu_to_be32(table->UvdLevel[count].DclkFrequency);
2265 table->UvdLevel[count].MinVddc = cpu_to_be16(table->UvdLevel[count].MinVddc);
2266 }
2267
2268 return ret;
2269}
2270
2271static int ci_populate_smc_vce_level(struct radeon_device *rdev,
2272 SMU7_Discrete_DpmTable *table)
2273{
2274 u32 count;
2275 struct atom_clock_dividers dividers;
2276 int ret = -EINVAL;
2277
2278 table->VceLevelCount =
2279 rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count;
2280
2281 for (count = 0; count < table->VceLevelCount; count++) {
2282 table->VceLevel[count].Frequency =
2283 rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[count].evclk;
2284 table->VceLevel[count].MinVoltage =
2285 (u16)rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
2286 table->VceLevel[count].MinPhases = 1;
2287
2288 ret = radeon_atom_get_clock_dividers(rdev,
2289 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2290 table->VceLevel[count].Frequency, false, &dividers);
2291 if (ret)
2292 return ret;
2293
2294 table->VceLevel[count].Divider = (u8)dividers.post_divider;
2295
2296 table->VceLevel[count].Frequency = cpu_to_be32(table->VceLevel[count].Frequency);
2297 table->VceLevel[count].MinVoltage = cpu_to_be16(table->VceLevel[count].MinVoltage);
2298 }
2299
2300 return ret;
2301
2302}
2303
2304static int ci_populate_smc_acp_level(struct radeon_device *rdev,
2305 SMU7_Discrete_DpmTable *table)
2306{
2307 u32 count;
2308 struct atom_clock_dividers dividers;
2309 int ret = -EINVAL;
2310
2311 table->AcpLevelCount = (u8)
2312 (rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count);
2313
2314 for (count = 0; count < table->AcpLevelCount; count++) {
2315 table->AcpLevel[count].Frequency =
2316 rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[count].clk;
2317 table->AcpLevel[count].MinVoltage =
2318 rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[count].v;
2319 table->AcpLevel[count].MinPhases = 1;
2320
2321 ret = radeon_atom_get_clock_dividers(rdev,
2322 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2323 table->AcpLevel[count].Frequency, false, &dividers);
2324 if (ret)
2325 return ret;
2326
2327 table->AcpLevel[count].Divider = (u8)dividers.post_divider;
2328
2329 table->AcpLevel[count].Frequency = cpu_to_be32(table->AcpLevel[count].Frequency);
2330 table->AcpLevel[count].MinVoltage = cpu_to_be16(table->AcpLevel[count].MinVoltage);
2331 }
2332
2333 return ret;
2334}
2335
2336static int ci_populate_smc_samu_level(struct radeon_device *rdev,
2337 SMU7_Discrete_DpmTable *table)
2338{
2339 u32 count;
2340 struct atom_clock_dividers dividers;
2341 int ret = -EINVAL;
2342
2343 table->SamuLevelCount =
2344 rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count;
2345
2346 for (count = 0; count < table->SamuLevelCount; count++) {
2347 table->SamuLevel[count].Frequency =
2348 rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[count].clk;
2349 table->SamuLevel[count].MinVoltage =
2350 rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
2351 table->SamuLevel[count].MinPhases = 1;
2352
2353 ret = radeon_atom_get_clock_dividers(rdev,
2354 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2355 table->SamuLevel[count].Frequency, false, &dividers);
2356 if (ret)
2357 return ret;
2358
2359 table->SamuLevel[count].Divider = (u8)dividers.post_divider;
2360
2361 table->SamuLevel[count].Frequency = cpu_to_be32(table->SamuLevel[count].Frequency);
2362 table->SamuLevel[count].MinVoltage = cpu_to_be16(table->SamuLevel[count].MinVoltage);
2363 }
2364
2365 return ret;
2366}
2367
2368static int ci_calculate_mclk_params(struct radeon_device *rdev,
2369 u32 memory_clock,
2370 SMU7_Discrete_MemoryLevel *mclk,
2371 bool strobe_mode,
2372 bool dll_state_on)
2373{
2374 struct ci_power_info *pi = ci_get_pi(rdev);
2375 u32 dll_cntl = pi->clock_registers.dll_cntl;
2376 u32 mclk_pwrmgt_cntl = pi->clock_registers.mclk_pwrmgt_cntl;
2377 u32 mpll_ad_func_cntl = pi->clock_registers.mpll_ad_func_cntl;
2378 u32 mpll_dq_func_cntl = pi->clock_registers.mpll_dq_func_cntl;
2379 u32 mpll_func_cntl = pi->clock_registers.mpll_func_cntl;
2380 u32 mpll_func_cntl_1 = pi->clock_registers.mpll_func_cntl_1;
2381 u32 mpll_func_cntl_2 = pi->clock_registers.mpll_func_cntl_2;
2382 u32 mpll_ss1 = pi->clock_registers.mpll_ss1;
2383 u32 mpll_ss2 = pi->clock_registers.mpll_ss2;
2384 struct atom_mpll_param mpll_param;
2385 int ret;
2386
2387 ret = radeon_atom_get_memory_pll_dividers(rdev, memory_clock, strobe_mode, &mpll_param);
2388 if (ret)
2389 return ret;
2390
2391 mpll_func_cntl &= ~BWCTRL_MASK;
2392 mpll_func_cntl |= BWCTRL(mpll_param.bwcntl);
2393
2394 mpll_func_cntl_1 &= ~(CLKF_MASK | CLKFRAC_MASK | VCO_MODE_MASK);
2395 mpll_func_cntl_1 |= CLKF(mpll_param.clkf) |
2396 CLKFRAC(mpll_param.clkfrac) | VCO_MODE(mpll_param.vco_mode);
2397
2398 mpll_ad_func_cntl &= ~YCLK_POST_DIV_MASK;
2399 mpll_ad_func_cntl |= YCLK_POST_DIV(mpll_param.post_div);
2400
2401 if (pi->mem_gddr5) {
2402 mpll_dq_func_cntl &= ~(YCLK_SEL_MASK | YCLK_POST_DIV_MASK);
2403 mpll_dq_func_cntl |= YCLK_SEL(mpll_param.yclk_sel) |
2404 YCLK_POST_DIV(mpll_param.post_div);
2405 }
2406
2407 if (pi->caps_mclk_ss_support) {
2408 struct radeon_atom_ss ss;
2409 u32 freq_nom;
2410 u32 tmp;
2411 u32 reference_clock = rdev->clock.mpll.reference_freq;
2412
c0392f8f
AD
2413 if (mpll_param.qdr == 1)
2414 freq_nom = memory_clock * 4 * (1 << mpll_param.post_div);
cc8dbbb4 2415 else
c0392f8f 2416 freq_nom = memory_clock * 2 * (1 << mpll_param.post_div);
cc8dbbb4
AD
2417
2418 tmp = (freq_nom / reference_clock);
2419 tmp = tmp * tmp;
2420 if (radeon_atombios_get_asic_ss_info(rdev, &ss,
2421 ASIC_INTERNAL_MEMORY_SS, freq_nom)) {
2422 u32 clks = reference_clock * 5 / ss.rate;
2423 u32 clkv = (u32)((((131 * ss.percentage * ss.rate) / 100) * tmp) / freq_nom);
2424
2425 mpll_ss1 &= ~CLKV_MASK;
2426 mpll_ss1 |= CLKV(clkv);
2427
2428 mpll_ss2 &= ~CLKS_MASK;
2429 mpll_ss2 |= CLKS(clks);
2430 }
2431 }
2432
2433 mclk_pwrmgt_cntl &= ~DLL_SPEED_MASK;
2434 mclk_pwrmgt_cntl |= DLL_SPEED(mpll_param.dll_speed);
2435
2436 if (dll_state_on)
2437 mclk_pwrmgt_cntl |= MRDCK0_PDNB | MRDCK1_PDNB;
2438 else
2439 mclk_pwrmgt_cntl &= ~(MRDCK0_PDNB | MRDCK1_PDNB);
2440
2441 mclk->MclkFrequency = memory_clock;
2442 mclk->MpllFuncCntl = mpll_func_cntl;
2443 mclk->MpllFuncCntl_1 = mpll_func_cntl_1;
2444 mclk->MpllFuncCntl_2 = mpll_func_cntl_2;
2445 mclk->MpllAdFuncCntl = mpll_ad_func_cntl;
2446 mclk->MpllDqFuncCntl = mpll_dq_func_cntl;
2447 mclk->MclkPwrmgtCntl = mclk_pwrmgt_cntl;
2448 mclk->DllCntl = dll_cntl;
2449 mclk->MpllSs1 = mpll_ss1;
2450 mclk->MpllSs2 = mpll_ss2;
2451
2452 return 0;
2453}
2454
2455static int ci_populate_single_memory_level(struct radeon_device *rdev,
2456 u32 memory_clock,
2457 SMU7_Discrete_MemoryLevel *memory_level)
2458{
2459 struct ci_power_info *pi = ci_get_pi(rdev);
2460 int ret;
2461 bool dll_state_on;
2462
2463 if (rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries) {
2464 ret = ci_get_dependency_volt_by_clk(rdev,
2465 &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
2466 memory_clock, &memory_level->MinVddc);
2467 if (ret)
2468 return ret;
2469 }
2470
2471 if (rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries) {
2472 ret = ci_get_dependency_volt_by_clk(rdev,
2473 &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
2474 memory_clock, &memory_level->MinVddci);
2475 if (ret)
2476 return ret;
2477 }
2478
2479 if (rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries) {
2480 ret = ci_get_dependency_volt_by_clk(rdev,
2481 &rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
2482 memory_clock, &memory_level->MinMvdd);
2483 if (ret)
2484 return ret;
2485 }
2486
2487 memory_level->MinVddcPhases = 1;
2488
2489 if (pi->vddc_phase_shed_control)
2490 ci_populate_phase_value_based_on_mclk(rdev,
2491 &rdev->pm.dpm.dyn_state.phase_shedding_limits_table,
2492 memory_clock,
2493 &memory_level->MinVddcPhases);
2494
2495 memory_level->EnabledForThrottle = 1;
cc8dbbb4
AD
2496 memory_level->UpH = 0;
2497 memory_level->DownH = 100;
2498 memory_level->VoltageDownH = 0;
2499 memory_level->ActivityLevel = (u16)pi->mclk_activity_target;
2500
2501 memory_level->StutterEnable = false;
2502 memory_level->StrobeEnable = false;
2503 memory_level->EdcReadEnable = false;
2504 memory_level->EdcWriteEnable = false;
2505 memory_level->RttEnable = false;
2506
2507 memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
2508
2509 if (pi->mclk_stutter_mode_threshold &&
2510 (memory_clock <= pi->mclk_stutter_mode_threshold) &&
2511 (pi->uvd_enabled == false) &&
2512 (RREG32(DPG_PIPE_STUTTER_CONTROL) & STUTTER_ENABLE) &&
2513 (rdev->pm.dpm.new_active_crtc_count <= 2))
2514 memory_level->StutterEnable = true;
2515
2516 if (pi->mclk_strobe_mode_threshold &&
2517 (memory_clock <= pi->mclk_strobe_mode_threshold))
2518 memory_level->StrobeEnable = 1;
2519
2520 if (pi->mem_gddr5) {
2521 memory_level->StrobeRatio =
2522 si_get_mclk_frequency_ratio(memory_clock, memory_level->StrobeEnable);
2523 if (pi->mclk_edc_enable_threshold &&
2524 (memory_clock > pi->mclk_edc_enable_threshold))
2525 memory_level->EdcReadEnable = true;
2526
2527 if (pi->mclk_edc_wr_enable_threshold &&
2528 (memory_clock > pi->mclk_edc_wr_enable_threshold))
2529 memory_level->EdcWriteEnable = true;
2530
2531 if (memory_level->StrobeEnable) {
2532 if (si_get_mclk_frequency_ratio(memory_clock, true) >=
2533 ((RREG32(MC_SEQ_MISC7) >> 16) & 0xf))
2534 dll_state_on = ((RREG32(MC_SEQ_MISC5) >> 1) & 0x1) ? true : false;
2535 else
2536 dll_state_on = ((RREG32(MC_SEQ_MISC6) >> 1) & 0x1) ? true : false;
2537 } else {
2538 dll_state_on = pi->dll_default_on;
2539 }
2540 } else {
2541 memory_level->StrobeRatio = si_get_ddr3_mclk_frequency_ratio(memory_clock);
2542 dll_state_on = ((RREG32(MC_SEQ_MISC5) >> 1) & 0x1) ? true : false;
2543 }
2544
2545 ret = ci_calculate_mclk_params(rdev, memory_clock, memory_level, memory_level->StrobeEnable, dll_state_on);
2546 if (ret)
2547 return ret;
2548
2549 memory_level->MinVddc = cpu_to_be32(memory_level->MinVddc * VOLTAGE_SCALE);
2550 memory_level->MinVddcPhases = cpu_to_be32(memory_level->MinVddcPhases);
2551 memory_level->MinVddci = cpu_to_be32(memory_level->MinVddci * VOLTAGE_SCALE);
2552 memory_level->MinMvdd = cpu_to_be32(memory_level->MinMvdd * VOLTAGE_SCALE);
2553
2554 memory_level->MclkFrequency = cpu_to_be32(memory_level->MclkFrequency);
2555 memory_level->ActivityLevel = cpu_to_be16(memory_level->ActivityLevel);
2556 memory_level->MpllFuncCntl = cpu_to_be32(memory_level->MpllFuncCntl);
2557 memory_level->MpllFuncCntl_1 = cpu_to_be32(memory_level->MpllFuncCntl_1);
2558 memory_level->MpllFuncCntl_2 = cpu_to_be32(memory_level->MpllFuncCntl_2);
2559 memory_level->MpllAdFuncCntl = cpu_to_be32(memory_level->MpllAdFuncCntl);
2560 memory_level->MpllDqFuncCntl = cpu_to_be32(memory_level->MpllDqFuncCntl);
2561 memory_level->MclkPwrmgtCntl = cpu_to_be32(memory_level->MclkPwrmgtCntl);
2562 memory_level->DllCntl = cpu_to_be32(memory_level->DllCntl);
2563 memory_level->MpllSs1 = cpu_to_be32(memory_level->MpllSs1);
2564 memory_level->MpllSs2 = cpu_to_be32(memory_level->MpllSs2);
2565
2566 return 0;
2567}
2568
2569static int ci_populate_smc_acpi_level(struct radeon_device *rdev,
2570 SMU7_Discrete_DpmTable *table)
2571{
2572 struct ci_power_info *pi = ci_get_pi(rdev);
2573 struct atom_clock_dividers dividers;
2574 SMU7_Discrete_VoltageLevel voltage_level;
2575 u32 spll_func_cntl = pi->clock_registers.cg_spll_func_cntl;
2576 u32 spll_func_cntl_2 = pi->clock_registers.cg_spll_func_cntl_2;
2577 u32 dll_cntl = pi->clock_registers.dll_cntl;
2578 u32 mclk_pwrmgt_cntl = pi->clock_registers.mclk_pwrmgt_cntl;
2579 int ret;
2580
2581 table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
2582
2583 if (pi->acpi_vddc)
2584 table->ACPILevel.MinVddc = cpu_to_be32(pi->acpi_vddc * VOLTAGE_SCALE);
2585 else
2586 table->ACPILevel.MinVddc = cpu_to_be32(pi->min_vddc_in_pp_table * VOLTAGE_SCALE);
2587
2588 table->ACPILevel.MinVddcPhases = pi->vddc_phase_shed_control ? 0 : 1;
2589
2590 table->ACPILevel.SclkFrequency = rdev->clock.spll.reference_freq;
2591
2592 ret = radeon_atom_get_clock_dividers(rdev,
2593 COMPUTE_GPUCLK_INPUT_FLAG_SCLK,
2594 table->ACPILevel.SclkFrequency, false, &dividers);
2595 if (ret)
2596 return ret;
2597
2598 table->ACPILevel.SclkDid = (u8)dividers.post_divider;
2599 table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
2600 table->ACPILevel.DeepSleepDivId = 0;
2601
2602 spll_func_cntl &= ~SPLL_PWRON;
2603 spll_func_cntl |= SPLL_RESET;
2604
2605 spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK;
2606 spll_func_cntl_2 |= SCLK_MUX_SEL(4);
2607
2608 table->ACPILevel.CgSpllFuncCntl = spll_func_cntl;
2609 table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2;
2610 table->ACPILevel.CgSpllFuncCntl3 = pi->clock_registers.cg_spll_func_cntl_3;
2611 table->ACPILevel.CgSpllFuncCntl4 = pi->clock_registers.cg_spll_func_cntl_4;
2612 table->ACPILevel.SpllSpreadSpectrum = pi->clock_registers.cg_spll_spread_spectrum;
2613 table->ACPILevel.SpllSpreadSpectrum2 = pi->clock_registers.cg_spll_spread_spectrum_2;
2614 table->ACPILevel.CcPwrDynRm = 0;
2615 table->ACPILevel.CcPwrDynRm1 = 0;
2616
2617 table->ACPILevel.Flags = cpu_to_be32(table->ACPILevel.Flags);
2618 table->ACPILevel.MinVddcPhases = cpu_to_be32(table->ACPILevel.MinVddcPhases);
2619 table->ACPILevel.SclkFrequency = cpu_to_be32(table->ACPILevel.SclkFrequency);
2620 table->ACPILevel.CgSpllFuncCntl = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl);
2621 table->ACPILevel.CgSpllFuncCntl2 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl2);
2622 table->ACPILevel.CgSpllFuncCntl3 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl3);
2623 table->ACPILevel.CgSpllFuncCntl4 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl4);
2624 table->ACPILevel.SpllSpreadSpectrum = cpu_to_be32(table->ACPILevel.SpllSpreadSpectrum);
2625 table->ACPILevel.SpllSpreadSpectrum2 = cpu_to_be32(table->ACPILevel.SpllSpreadSpectrum2);
2626 table->ACPILevel.CcPwrDynRm = cpu_to_be32(table->ACPILevel.CcPwrDynRm);
2627 table->ACPILevel.CcPwrDynRm1 = cpu_to_be32(table->ACPILevel.CcPwrDynRm1);
2628
2629 table->MemoryACPILevel.MinVddc = table->ACPILevel.MinVddc;
2630 table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;
2631
2632 if (pi->vddci_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
2633 if (pi->acpi_vddci)
2634 table->MemoryACPILevel.MinVddci =
2635 cpu_to_be32(pi->acpi_vddci * VOLTAGE_SCALE);
2636 else
2637 table->MemoryACPILevel.MinVddci =
2638 cpu_to_be32(pi->min_vddci_in_pp_table * VOLTAGE_SCALE);
2639 }
2640
2641 if (ci_populate_mvdd_value(rdev, 0, &voltage_level))
2642 table->MemoryACPILevel.MinMvdd = 0;
2643 else
2644 table->MemoryACPILevel.MinMvdd =
2645 cpu_to_be32(voltage_level.Voltage * VOLTAGE_SCALE);
2646
2647 mclk_pwrmgt_cntl |= MRDCK0_RESET | MRDCK1_RESET;
2648 mclk_pwrmgt_cntl &= ~(MRDCK0_PDNB | MRDCK1_PDNB);
2649
2650 dll_cntl &= ~(MRDCK0_BYPASS | MRDCK1_BYPASS);
2651
2652 table->MemoryACPILevel.DllCntl = cpu_to_be32(dll_cntl);
2653 table->MemoryACPILevel.MclkPwrmgtCntl = cpu_to_be32(mclk_pwrmgt_cntl);
2654 table->MemoryACPILevel.MpllAdFuncCntl =
2655 cpu_to_be32(pi->clock_registers.mpll_ad_func_cntl);
2656 table->MemoryACPILevel.MpllDqFuncCntl =
2657 cpu_to_be32(pi->clock_registers.mpll_dq_func_cntl);
2658 table->MemoryACPILevel.MpllFuncCntl =
2659 cpu_to_be32(pi->clock_registers.mpll_func_cntl);
2660 table->MemoryACPILevel.MpllFuncCntl_1 =
2661 cpu_to_be32(pi->clock_registers.mpll_func_cntl_1);
2662 table->MemoryACPILevel.MpllFuncCntl_2 =
2663 cpu_to_be32(pi->clock_registers.mpll_func_cntl_2);
2664 table->MemoryACPILevel.MpllSs1 = cpu_to_be32(pi->clock_registers.mpll_ss1);
2665 table->MemoryACPILevel.MpllSs2 = cpu_to_be32(pi->clock_registers.mpll_ss2);
2666
2667 table->MemoryACPILevel.EnabledForThrottle = 0;
2668 table->MemoryACPILevel.EnabledForActivity = 0;
2669 table->MemoryACPILevel.UpH = 0;
2670 table->MemoryACPILevel.DownH = 100;
2671 table->MemoryACPILevel.VoltageDownH = 0;
2672 table->MemoryACPILevel.ActivityLevel =
2673 cpu_to_be16((u16)pi->mclk_activity_target);
2674
2675 table->MemoryACPILevel.StutterEnable = false;
2676 table->MemoryACPILevel.StrobeEnable = false;
2677 table->MemoryACPILevel.EdcReadEnable = false;
2678 table->MemoryACPILevel.EdcWriteEnable = false;
2679 table->MemoryACPILevel.RttEnable = false;
2680
2681 return 0;
2682}
2683
2684
2685static int ci_enable_ulv(struct radeon_device *rdev, bool enable)
2686{
2687 struct ci_power_info *pi = ci_get_pi(rdev);
2688 struct ci_ulv_parm *ulv = &pi->ulv;
2689
2690 if (ulv->supported) {
2691 if (enable)
2692 return (ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableULV) == PPSMC_Result_OK) ?
2693 0 : -EINVAL;
2694 else
2695 return (ci_send_msg_to_smc(rdev, PPSMC_MSG_DisableULV) == PPSMC_Result_OK) ?
2696 0 : -EINVAL;
2697 }
2698
2699 return 0;
2700}
2701
2702static int ci_populate_ulv_level(struct radeon_device *rdev,
2703 SMU7_Discrete_Ulv *state)
2704{
2705 struct ci_power_info *pi = ci_get_pi(rdev);
2706 u16 ulv_voltage = rdev->pm.dpm.backbias_response_time;
2707
2708 state->CcPwrDynRm = 0;
2709 state->CcPwrDynRm1 = 0;
2710
2711 if (ulv_voltage == 0) {
2712 pi->ulv.supported = false;
2713 return 0;
2714 }
2715
2716 if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
2717 if (ulv_voltage > rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v)
2718 state->VddcOffset = 0;
2719 else
2720 state->VddcOffset =
2721 rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v - ulv_voltage;
2722 } else {
2723 if (ulv_voltage > rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v)
2724 state->VddcOffsetVid = 0;
2725 else
2726 state->VddcOffsetVid = (u8)
2727 ((rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v - ulv_voltage) *
2728 VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1);
2729 }
2730 state->VddcPhase = pi->vddc_phase_shed_control ? 0 : 1;
2731
2732 state->CcPwrDynRm = cpu_to_be32(state->CcPwrDynRm);
2733 state->CcPwrDynRm1 = cpu_to_be32(state->CcPwrDynRm1);
2734 state->VddcOffset = cpu_to_be16(state->VddcOffset);
2735
2736 return 0;
2737}
2738
2739static int ci_calculate_sclk_params(struct radeon_device *rdev,
2740 u32 engine_clock,
2741 SMU7_Discrete_GraphicsLevel *sclk)
2742{
2743 struct ci_power_info *pi = ci_get_pi(rdev);
2744 struct atom_clock_dividers dividers;
2745 u32 spll_func_cntl_3 = pi->clock_registers.cg_spll_func_cntl_3;
2746 u32 spll_func_cntl_4 = pi->clock_registers.cg_spll_func_cntl_4;
2747 u32 cg_spll_spread_spectrum = pi->clock_registers.cg_spll_spread_spectrum;
2748 u32 cg_spll_spread_spectrum_2 = pi->clock_registers.cg_spll_spread_spectrum_2;
2749 u32 reference_clock = rdev->clock.spll.reference_freq;
2750 u32 reference_divider;
2751 u32 fbdiv;
2752 int ret;
2753
2754 ret = radeon_atom_get_clock_dividers(rdev,
2755 COMPUTE_GPUCLK_INPUT_FLAG_SCLK,
2756 engine_clock, false, &dividers);
2757 if (ret)
2758 return ret;
2759
2760 reference_divider = 1 + dividers.ref_div;
2761 fbdiv = dividers.fb_div & 0x3FFFFFF;
2762
2763 spll_func_cntl_3 &= ~SPLL_FB_DIV_MASK;
2764 spll_func_cntl_3 |= SPLL_FB_DIV(fbdiv);
2765 spll_func_cntl_3 |= SPLL_DITHEN;
2766
2767 if (pi->caps_sclk_ss_support) {
2768 struct radeon_atom_ss ss;
2769 u32 vco_freq = engine_clock * dividers.post_div;
2770
2771 if (radeon_atombios_get_asic_ss_info(rdev, &ss,
2772 ASIC_INTERNAL_ENGINE_SS, vco_freq)) {
2773 u32 clk_s = reference_clock * 5 / (reference_divider * ss.rate);
2774 u32 clk_v = 4 * ss.percentage * fbdiv / (clk_s * 10000);
2775
2776 cg_spll_spread_spectrum &= ~CLK_S_MASK;
2777 cg_spll_spread_spectrum |= CLK_S(clk_s);
2778 cg_spll_spread_spectrum |= SSEN;
2779
2780 cg_spll_spread_spectrum_2 &= ~CLK_V_MASK;
2781 cg_spll_spread_spectrum_2 |= CLK_V(clk_v);
2782 }
2783 }
2784
2785 sclk->SclkFrequency = engine_clock;
2786 sclk->CgSpllFuncCntl3 = spll_func_cntl_3;
2787 sclk->CgSpllFuncCntl4 = spll_func_cntl_4;
2788 sclk->SpllSpreadSpectrum = cg_spll_spread_spectrum;
2789 sclk->SpllSpreadSpectrum2 = cg_spll_spread_spectrum_2;
2790 sclk->SclkDid = (u8)dividers.post_divider;
2791
2792 return 0;
2793}
2794
2795static int ci_populate_single_graphic_level(struct radeon_device *rdev,
2796 u32 engine_clock,
2797 u16 sclk_activity_level_t,
2798 SMU7_Discrete_GraphicsLevel *graphic_level)
2799{
2800 struct ci_power_info *pi = ci_get_pi(rdev);
2801 int ret;
2802
2803 ret = ci_calculate_sclk_params(rdev, engine_clock, graphic_level);
2804 if (ret)
2805 return ret;
2806
2807 ret = ci_get_dependency_volt_by_clk(rdev,
2808 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
2809 engine_clock, &graphic_level->MinVddc);
2810 if (ret)
2811 return ret;
2812
2813 graphic_level->SclkFrequency = engine_clock;
2814
2815 graphic_level->Flags = 0;
2816 graphic_level->MinVddcPhases = 1;
2817
2818 if (pi->vddc_phase_shed_control)
2819 ci_populate_phase_value_based_on_sclk(rdev,
2820 &rdev->pm.dpm.dyn_state.phase_shedding_limits_table,
2821 engine_clock,
2822 &graphic_level->MinVddcPhases);
2823
2824 graphic_level->ActivityLevel = sclk_activity_level_t;
2825
2826 graphic_level->CcPwrDynRm = 0;
2827 graphic_level->CcPwrDynRm1 = 0;
cc8dbbb4
AD
2828 graphic_level->EnabledForThrottle = 1;
2829 graphic_level->UpH = 0;
2830 graphic_level->DownH = 0;
2831 graphic_level->VoltageDownH = 0;
2832 graphic_level->PowerThrottle = 0;
2833
2834 if (pi->caps_sclk_ds)
2835 graphic_level->DeepSleepDivId = ci_get_sleep_divider_id_from_clock(rdev,
2836 engine_clock,
2837 CISLAND_MINIMUM_ENGINE_CLOCK);
2838
2839 graphic_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
2840
2841 graphic_level->Flags = cpu_to_be32(graphic_level->Flags);
2842 graphic_level->MinVddc = cpu_to_be32(graphic_level->MinVddc * VOLTAGE_SCALE);
2843 graphic_level->MinVddcPhases = cpu_to_be32(graphic_level->MinVddcPhases);
2844 graphic_level->SclkFrequency = cpu_to_be32(graphic_level->SclkFrequency);
2845 graphic_level->ActivityLevel = cpu_to_be16(graphic_level->ActivityLevel);
2846 graphic_level->CgSpllFuncCntl3 = cpu_to_be32(graphic_level->CgSpllFuncCntl3);
2847 graphic_level->CgSpllFuncCntl4 = cpu_to_be32(graphic_level->CgSpllFuncCntl4);
2848 graphic_level->SpllSpreadSpectrum = cpu_to_be32(graphic_level->SpllSpreadSpectrum);
2849 graphic_level->SpllSpreadSpectrum2 = cpu_to_be32(graphic_level->SpllSpreadSpectrum2);
2850 graphic_level->CcPwrDynRm = cpu_to_be32(graphic_level->CcPwrDynRm);
2851 graphic_level->CcPwrDynRm1 = cpu_to_be32(graphic_level->CcPwrDynRm1);
2852
2853 return 0;
2854}
2855
2856static int ci_populate_all_graphic_levels(struct radeon_device *rdev)
2857{
2858 struct ci_power_info *pi = ci_get_pi(rdev);
2859 struct ci_dpm_table *dpm_table = &pi->dpm_table;
2860 u32 level_array_address = pi->dpm_table_start +
2861 offsetof(SMU7_Discrete_DpmTable, GraphicsLevel);
2862 u32 level_array_size = sizeof(SMU7_Discrete_GraphicsLevel) *
2863 SMU7_MAX_LEVELS_GRAPHICS;
2864 SMU7_Discrete_GraphicsLevel *levels = pi->smc_state_table.GraphicsLevel;
2865 u32 i, ret;
2866
2867 memset(levels, 0, level_array_size);
2868
2869 for (i = 0; i < dpm_table->sclk_table.count; i++) {
2870 ret = ci_populate_single_graphic_level(rdev,
2871 dpm_table->sclk_table.dpm_levels[i].value,
2872 (u16)pi->activity_target[i],
2873 &pi->smc_state_table.GraphicsLevel[i]);
2874 if (ret)
2875 return ret;
489ba72c
AD
2876 if (i > 1)
2877 pi->smc_state_table.GraphicsLevel[i].DeepSleepDivId = 0;
cc8dbbb4
AD
2878 if (i == (dpm_table->sclk_table.count - 1))
2879 pi->smc_state_table.GraphicsLevel[i].DisplayWatermark =
2880 PPSMC_DISPLAY_WATERMARK_HIGH;
2881 }
d3052b8c 2882 pi->smc_state_table.GraphicsLevel[0].EnabledForActivity = 1;
cc8dbbb4
AD
2883
2884 pi->smc_state_table.GraphicsDpmLevelCount = (u8)dpm_table->sclk_table.count;
2885 pi->dpm_level_enable_mask.sclk_dpm_enable_mask =
2886 ci_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
2887
2888 ret = ci_copy_bytes_to_smc(rdev, level_array_address,
2889 (u8 *)levels, level_array_size,
2890 pi->sram_end);
2891 if (ret)
2892 return ret;
2893
2894 return 0;
2895}
2896
2897static int ci_populate_ulv_state(struct radeon_device *rdev,
2898 SMU7_Discrete_Ulv *ulv_level)
2899{
2900 return ci_populate_ulv_level(rdev, ulv_level);
2901}
2902
2903static int ci_populate_all_memory_levels(struct radeon_device *rdev)
2904{
2905 struct ci_power_info *pi = ci_get_pi(rdev);
2906 struct ci_dpm_table *dpm_table = &pi->dpm_table;
2907 u32 level_array_address = pi->dpm_table_start +
2908 offsetof(SMU7_Discrete_DpmTable, MemoryLevel);
2909 u32 level_array_size = sizeof(SMU7_Discrete_MemoryLevel) *
2910 SMU7_MAX_LEVELS_MEMORY;
2911 SMU7_Discrete_MemoryLevel *levels = pi->smc_state_table.MemoryLevel;
2912 u32 i, ret;
2913
2914 memset(levels, 0, level_array_size);
2915
2916 for (i = 0; i < dpm_table->mclk_table.count; i++) {
2917 if (dpm_table->mclk_table.dpm_levels[i].value == 0)
2918 return -EINVAL;
2919 ret = ci_populate_single_memory_level(rdev,
2920 dpm_table->mclk_table.dpm_levels[i].value,
2921 &pi->smc_state_table.MemoryLevel[i]);
2922 if (ret)
2923 return ret;
2924 }
2925
d3052b8c
AD
2926 pi->smc_state_table.MemoryLevel[0].EnabledForActivity = 1;
2927
127e056e
AD
2928 if ((dpm_table->mclk_table.count >= 2) &&
2929 ((rdev->pdev->device == 0x67B0) || (rdev->pdev->device == 0x67B1))) {
2930 pi->smc_state_table.MemoryLevel[1].MinVddc =
2931 pi->smc_state_table.MemoryLevel[0].MinVddc;
2932 pi->smc_state_table.MemoryLevel[1].MinVddcPhases =
2933 pi->smc_state_table.MemoryLevel[0].MinVddcPhases;
2934 }
2935
cc8dbbb4
AD
2936 pi->smc_state_table.MemoryLevel[0].ActivityLevel = cpu_to_be16(0x1F);
2937
2938 pi->smc_state_table.MemoryDpmLevelCount = (u8)dpm_table->mclk_table.count;
2939 pi->dpm_level_enable_mask.mclk_dpm_enable_mask =
2940 ci_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
2941
2942 pi->smc_state_table.MemoryLevel[dpm_table->mclk_table.count - 1].DisplayWatermark =
2943 PPSMC_DISPLAY_WATERMARK_HIGH;
2944
2945 ret = ci_copy_bytes_to_smc(rdev, level_array_address,
2946 (u8 *)levels, level_array_size,
2947 pi->sram_end);
2948 if (ret)
2949 return ret;
2950
2951 return 0;
2952}
2953
2954static void ci_reset_single_dpm_table(struct radeon_device *rdev,
2955 struct ci_single_dpm_table* dpm_table,
2956 u32 count)
2957{
2958 u32 i;
2959
2960 dpm_table->count = count;
2961 for (i = 0; i < MAX_REGULAR_DPM_NUMBER; i++)
2962 dpm_table->dpm_levels[i].enabled = false;
2963}
2964
2965static void ci_setup_pcie_table_entry(struct ci_single_dpm_table* dpm_table,
2966 u32 index, u32 pcie_gen, u32 pcie_lanes)
2967{
2968 dpm_table->dpm_levels[index].value = pcie_gen;
2969 dpm_table->dpm_levels[index].param1 = pcie_lanes;
2970 dpm_table->dpm_levels[index].enabled = true;
2971}
2972
2973static int ci_setup_default_pcie_tables(struct radeon_device *rdev)
2974{
2975 struct ci_power_info *pi = ci_get_pi(rdev);
2976
2977 if (!pi->use_pcie_performance_levels && !pi->use_pcie_powersaving_levels)
2978 return -EINVAL;
2979
2980 if (pi->use_pcie_performance_levels && !pi->use_pcie_powersaving_levels) {
2981 pi->pcie_gen_powersaving = pi->pcie_gen_performance;
2982 pi->pcie_lane_powersaving = pi->pcie_lane_performance;
2983 } else if (!pi->use_pcie_performance_levels && pi->use_pcie_powersaving_levels) {
2984 pi->pcie_gen_performance = pi->pcie_gen_powersaving;
2985 pi->pcie_lane_performance = pi->pcie_lane_powersaving;
2986 }
2987
2988 ci_reset_single_dpm_table(rdev,
2989 &pi->dpm_table.pcie_speed_table,
2990 SMU7_MAX_LEVELS_LINK);
2991
36654dd4
AD
2992 if (rdev->family == CHIP_BONAIRE)
2993 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 0,
2994 pi->pcie_gen_powersaving.min,
2995 pi->pcie_lane_powersaving.max);
2996 else
2997 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 0,
2998 pi->pcie_gen_powersaving.min,
2999 pi->pcie_lane_powersaving.min);
cc8dbbb4
AD
3000 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 1,
3001 pi->pcie_gen_performance.min,
3002 pi->pcie_lane_performance.min);
3003 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 2,
3004 pi->pcie_gen_powersaving.min,
3005 pi->pcie_lane_powersaving.max);
3006 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 3,
3007 pi->pcie_gen_performance.min,
3008 pi->pcie_lane_performance.max);
3009 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 4,
3010 pi->pcie_gen_powersaving.max,
3011 pi->pcie_lane_powersaving.max);
3012 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 5,
3013 pi->pcie_gen_performance.max,
3014 pi->pcie_lane_performance.max);
3015
3016 pi->dpm_table.pcie_speed_table.count = 6;
3017
3018 return 0;
3019}
3020
3021static int ci_setup_default_dpm_tables(struct radeon_device *rdev)
3022{
3023 struct ci_power_info *pi = ci_get_pi(rdev);
3024 struct radeon_clock_voltage_dependency_table *allowed_sclk_vddc_table =
3025 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
3026 struct radeon_clock_voltage_dependency_table *allowed_mclk_table =
3027 &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk;
3028 struct radeon_cac_leakage_table *std_voltage_table =
3029 &rdev->pm.dpm.dyn_state.cac_leakage_table;
3030 u32 i;
3031
3032 if (allowed_sclk_vddc_table == NULL)
3033 return -EINVAL;
3034 if (allowed_sclk_vddc_table->count < 1)
3035 return -EINVAL;
3036 if (allowed_mclk_table == NULL)
3037 return -EINVAL;
3038 if (allowed_mclk_table->count < 1)
3039 return -EINVAL;
3040
3041 memset(&pi->dpm_table, 0, sizeof(struct ci_dpm_table));
3042
3043 ci_reset_single_dpm_table(rdev,
3044 &pi->dpm_table.sclk_table,
3045 SMU7_MAX_LEVELS_GRAPHICS);
3046 ci_reset_single_dpm_table(rdev,
3047 &pi->dpm_table.mclk_table,
3048 SMU7_MAX_LEVELS_MEMORY);
3049 ci_reset_single_dpm_table(rdev,
3050 &pi->dpm_table.vddc_table,
3051 SMU7_MAX_LEVELS_VDDC);
3052 ci_reset_single_dpm_table(rdev,
3053 &pi->dpm_table.vddci_table,
3054 SMU7_MAX_LEVELS_VDDCI);
3055 ci_reset_single_dpm_table(rdev,
3056 &pi->dpm_table.mvdd_table,
3057 SMU7_MAX_LEVELS_MVDD);
3058
3059 pi->dpm_table.sclk_table.count = 0;
3060 for (i = 0; i < allowed_sclk_vddc_table->count; i++) {
3061 if ((i == 0) ||
3062 (pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count-1].value !=
3063 allowed_sclk_vddc_table->entries[i].clk)) {
3064 pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].value =
3065 allowed_sclk_vddc_table->entries[i].clk;
b6b41cf3
AD
3066 pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].enabled =
3067 (i == 0) ? true : false;
cc8dbbb4
AD
3068 pi->dpm_table.sclk_table.count++;
3069 }
3070 }
3071
3072 pi->dpm_table.mclk_table.count = 0;
3073 for (i = 0; i < allowed_mclk_table->count; i++) {
b6b41cf3 3074 if ((i == 0) ||
cc8dbbb4
AD
3075 (pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count-1].value !=
3076 allowed_mclk_table->entries[i].clk)) {
3077 pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].value =
3078 allowed_mclk_table->entries[i].clk;
b6b41cf3
AD
3079 pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].enabled =
3080 (i == 0) ? true : false;
cc8dbbb4
AD
3081 pi->dpm_table.mclk_table.count++;
3082 }
3083 }
3084
3085 for (i = 0; i < allowed_sclk_vddc_table->count; i++) {
3086 pi->dpm_table.vddc_table.dpm_levels[i].value =
3087 allowed_sclk_vddc_table->entries[i].v;
3088 pi->dpm_table.vddc_table.dpm_levels[i].param1 =
3089 std_voltage_table->entries[i].leakage;
3090 pi->dpm_table.vddc_table.dpm_levels[i].enabled = true;
3091 }
3092 pi->dpm_table.vddc_table.count = allowed_sclk_vddc_table->count;
3093
3094 allowed_mclk_table = &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk;
3095 if (allowed_mclk_table) {
3096 for (i = 0; i < allowed_mclk_table->count; i++) {
3097 pi->dpm_table.vddci_table.dpm_levels[i].value =
3098 allowed_mclk_table->entries[i].v;
3099 pi->dpm_table.vddci_table.dpm_levels[i].enabled = true;
3100 }
3101 pi->dpm_table.vddci_table.count = allowed_mclk_table->count;
3102 }
3103
3104 allowed_mclk_table = &rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk;
3105 if (allowed_mclk_table) {
3106 for (i = 0; i < allowed_mclk_table->count; i++) {
3107 pi->dpm_table.mvdd_table.dpm_levels[i].value =
3108 allowed_mclk_table->entries[i].v;
3109 pi->dpm_table.mvdd_table.dpm_levels[i].enabled = true;
3110 }
3111 pi->dpm_table.mvdd_table.count = allowed_mclk_table->count;
3112 }
3113
3114 ci_setup_default_pcie_tables(rdev);
3115
3116 return 0;
3117}
3118
3119static int ci_find_boot_level(struct ci_single_dpm_table *table,
3120 u32 value, u32 *boot_level)
3121{
3122 u32 i;
3123 int ret = -EINVAL;
3124
3125 for(i = 0; i < table->count; i++) {
3126 if (value == table->dpm_levels[i].value) {
3127 *boot_level = i;
3128 ret = 0;
3129 }
3130 }
3131
3132 return ret;
3133}
3134
3135static int ci_init_smc_table(struct radeon_device *rdev)
3136{
3137 struct ci_power_info *pi = ci_get_pi(rdev);
3138 struct ci_ulv_parm *ulv = &pi->ulv;
3139 struct radeon_ps *radeon_boot_state = rdev->pm.dpm.boot_ps;
3140 SMU7_Discrete_DpmTable *table = &pi->smc_state_table;
3141 int ret;
3142
3143 ret = ci_setup_default_dpm_tables(rdev);
3144 if (ret)
3145 return ret;
3146
3147 if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_NONE)
3148 ci_populate_smc_voltage_tables(rdev, table);
3149
3150 ci_init_fps_limits(rdev);
3151
3152 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_HARDWAREDC)
3153 table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
3154
3155 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_STEPVDDC)
3156 table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
3157
3158 if (pi->mem_gddr5)
3159 table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
3160
3161 if (ulv->supported) {
3162 ret = ci_populate_ulv_state(rdev, &pi->smc_state_table.Ulv);
3163 if (ret)
3164 return ret;
3165 WREG32_SMC(CG_ULV_PARAMETER, ulv->cg_ulv_parameter);
3166 }
3167
3168 ret = ci_populate_all_graphic_levels(rdev);
3169 if (ret)
3170 return ret;
3171
3172 ret = ci_populate_all_memory_levels(rdev);
3173 if (ret)
3174 return ret;
3175
3176 ci_populate_smc_link_level(rdev, table);
3177
3178 ret = ci_populate_smc_acpi_level(rdev, table);
3179 if (ret)
3180 return ret;
3181
3182 ret = ci_populate_smc_vce_level(rdev, table);
3183 if (ret)
3184 return ret;
3185
3186 ret = ci_populate_smc_acp_level(rdev, table);
3187 if (ret)
3188 return ret;
3189
3190 ret = ci_populate_smc_samu_level(rdev, table);
3191 if (ret)
3192 return ret;
3193
3194 ret = ci_do_program_memory_timing_parameters(rdev);
3195 if (ret)
3196 return ret;
3197
3198 ret = ci_populate_smc_uvd_level(rdev, table);
3199 if (ret)
3200 return ret;
3201
3202 table->UvdBootLevel = 0;
3203 table->VceBootLevel = 0;
3204 table->AcpBootLevel = 0;
3205 table->SamuBootLevel = 0;
3206 table->GraphicsBootLevel = 0;
3207 table->MemoryBootLevel = 0;
3208
3209 ret = ci_find_boot_level(&pi->dpm_table.sclk_table,
3210 pi->vbios_boot_state.sclk_bootup_value,
3211 (u32 *)&pi->smc_state_table.GraphicsBootLevel);
3212
3213 ret = ci_find_boot_level(&pi->dpm_table.mclk_table,
3214 pi->vbios_boot_state.mclk_bootup_value,
3215 (u32 *)&pi->smc_state_table.MemoryBootLevel);
3216
3217 table->BootVddc = pi->vbios_boot_state.vddc_bootup_value;
3218 table->BootVddci = pi->vbios_boot_state.vddci_bootup_value;
3219 table->BootMVdd = pi->vbios_boot_state.mvdd_bootup_value;
3220
3221 ci_populate_smc_initial_state(rdev, radeon_boot_state);
3222
3223 ret = ci_populate_bapm_parameters_in_dpm_table(rdev);
3224 if (ret)
3225 return ret;
3226
3227 table->UVDInterval = 1;
3228 table->VCEInterval = 1;
3229 table->ACPInterval = 1;
3230 table->SAMUInterval = 1;
3231 table->GraphicsVoltageChangeEnable = 1;
3232 table->GraphicsThermThrottleEnable = 1;
3233 table->GraphicsInterval = 1;
3234 table->VoltageInterval = 1;
3235 table->ThermalInterval = 1;
3236 table->TemperatureLimitHigh = (u16)((pi->thermal_temp_setting.temperature_high *
3237 CISLANDS_Q88_FORMAT_CONVERSION_UNIT) / 1000);
3238 table->TemperatureLimitLow = (u16)((pi->thermal_temp_setting.temperature_low *
3239 CISLANDS_Q88_FORMAT_CONVERSION_UNIT) / 1000);
3240 table->MemoryVoltageChangeEnable = 1;
3241 table->MemoryInterval = 1;
3242 table->VoltageResponseTime = 0;
3243 table->VddcVddciDelta = 4000;
3244 table->PhaseResponseTime = 0;
3245 table->MemoryThermThrottleEnable = 1;
4e21518c 3246 table->PCIeBootLinkLevel = pi->dpm_table.pcie_speed_table.count - 1;
cc8dbbb4
AD
3247 table->PCIeGenInterval = 1;
3248 if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2)
3249 table->SVI2Enable = 1;
3250 else
3251 table->SVI2Enable = 0;
3252
3253 table->ThermGpio = 17;
3254 table->SclkStepSize = 0x4000;
3255
3256 table->SystemFlags = cpu_to_be32(table->SystemFlags);
3257 table->SmioMaskVddcVid = cpu_to_be32(table->SmioMaskVddcVid);
3258 table->SmioMaskVddcPhase = cpu_to_be32(table->SmioMaskVddcPhase);
3259 table->SmioMaskVddciVid = cpu_to_be32(table->SmioMaskVddciVid);
3260 table->SmioMaskMvddVid = cpu_to_be32(table->SmioMaskMvddVid);
3261 table->SclkStepSize = cpu_to_be32(table->SclkStepSize);
3262 table->TemperatureLimitHigh = cpu_to_be16(table->TemperatureLimitHigh);
3263 table->TemperatureLimitLow = cpu_to_be16(table->TemperatureLimitLow);
3264 table->VddcVddciDelta = cpu_to_be16(table->VddcVddciDelta);
3265 table->VoltageResponseTime = cpu_to_be16(table->VoltageResponseTime);
3266 table->PhaseResponseTime = cpu_to_be16(table->PhaseResponseTime);
3267 table->BootVddc = cpu_to_be16(table->BootVddc * VOLTAGE_SCALE);
3268 table->BootVddci = cpu_to_be16(table->BootVddci * VOLTAGE_SCALE);
3269 table->BootMVdd = cpu_to_be16(table->BootMVdd * VOLTAGE_SCALE);
3270
3271 ret = ci_copy_bytes_to_smc(rdev,
3272 pi->dpm_table_start +
3273 offsetof(SMU7_Discrete_DpmTable, SystemFlags),
3274 (u8 *)&table->SystemFlags,
3275 sizeof(SMU7_Discrete_DpmTable) - 3 * sizeof(SMU7_PIDController),
3276 pi->sram_end);
3277 if (ret)
3278 return ret;
3279
3280 return 0;
3281}
3282
3283static void ci_trim_single_dpm_states(struct radeon_device *rdev,
3284 struct ci_single_dpm_table *dpm_table,
3285 u32 low_limit, u32 high_limit)
3286{
3287 u32 i;
3288
3289 for (i = 0; i < dpm_table->count; i++) {
3290 if ((dpm_table->dpm_levels[i].value < low_limit) ||
3291 (dpm_table->dpm_levels[i].value > high_limit))
3292 dpm_table->dpm_levels[i].enabled = false;
3293 else
3294 dpm_table->dpm_levels[i].enabled = true;
3295 }
3296}
3297
3298static void ci_trim_pcie_dpm_states(struct radeon_device *rdev,
3299 u32 speed_low, u32 lanes_low,
3300 u32 speed_high, u32 lanes_high)
3301{
3302 struct ci_power_info *pi = ci_get_pi(rdev);
3303 struct ci_single_dpm_table *pcie_table = &pi->dpm_table.pcie_speed_table;
3304 u32 i, j;
3305
3306 for (i = 0; i < pcie_table->count; i++) {
3307 if ((pcie_table->dpm_levels[i].value < speed_low) ||
3308 (pcie_table->dpm_levels[i].param1 < lanes_low) ||
3309 (pcie_table->dpm_levels[i].value > speed_high) ||
3310 (pcie_table->dpm_levels[i].param1 > lanes_high))
3311 pcie_table->dpm_levels[i].enabled = false;
3312 else
3313 pcie_table->dpm_levels[i].enabled = true;
3314 }
3315
3316 for (i = 0; i < pcie_table->count; i++) {
3317 if (pcie_table->dpm_levels[i].enabled) {
3318 for (j = i + 1; j < pcie_table->count; j++) {
3319 if (pcie_table->dpm_levels[j].enabled) {
3320 if ((pcie_table->dpm_levels[i].value == pcie_table->dpm_levels[j].value) &&
3321 (pcie_table->dpm_levels[i].param1 == pcie_table->dpm_levels[j].param1))
3322 pcie_table->dpm_levels[j].enabled = false;
3323 }
3324 }
3325 }
3326 }
3327}
3328
3329static int ci_trim_dpm_states(struct radeon_device *rdev,
3330 struct radeon_ps *radeon_state)
3331{
3332 struct ci_ps *state = ci_get_ps(radeon_state);
3333 struct ci_power_info *pi = ci_get_pi(rdev);
3334 u32 high_limit_count;
3335
3336 if (state->performance_level_count < 1)
3337 return -EINVAL;
3338
3339 if (state->performance_level_count == 1)
3340 high_limit_count = 0;
3341 else
3342 high_limit_count = 1;
3343
3344 ci_trim_single_dpm_states(rdev,
3345 &pi->dpm_table.sclk_table,
3346 state->performance_levels[0].sclk,
3347 state->performance_levels[high_limit_count].sclk);
3348
3349 ci_trim_single_dpm_states(rdev,
3350 &pi->dpm_table.mclk_table,
3351 state->performance_levels[0].mclk,
3352 state->performance_levels[high_limit_count].mclk);
3353
3354 ci_trim_pcie_dpm_states(rdev,
3355 state->performance_levels[0].pcie_gen,
3356 state->performance_levels[0].pcie_lane,
3357 state->performance_levels[high_limit_count].pcie_gen,
3358 state->performance_levels[high_limit_count].pcie_lane);
3359
3360 return 0;
3361}
3362
3363static int ci_apply_disp_minimum_voltage_request(struct radeon_device *rdev)
3364{
3365 struct radeon_clock_voltage_dependency_table *disp_voltage_table =
3366 &rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk;
3367 struct radeon_clock_voltage_dependency_table *vddc_table =
3368 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
3369 u32 requested_voltage = 0;
3370 u32 i;
3371
3372 if (disp_voltage_table == NULL)
3373 return -EINVAL;
3374 if (!disp_voltage_table->count)
3375 return -EINVAL;
3376
3377 for (i = 0; i < disp_voltage_table->count; i++) {
3378 if (rdev->clock.current_dispclk == disp_voltage_table->entries[i].clk)
3379 requested_voltage = disp_voltage_table->entries[i].v;
3380 }
3381
3382 for (i = 0; i < vddc_table->count; i++) {
3383 if (requested_voltage <= vddc_table->entries[i].v) {
3384 requested_voltage = vddc_table->entries[i].v;
3385 return (ci_send_msg_to_smc_with_parameter(rdev,
3386 PPSMC_MSG_VddC_Request,
3387 requested_voltage * VOLTAGE_SCALE) == PPSMC_Result_OK) ?
3388 0 : -EINVAL;
3389 }
3390 }
3391
3392 return -EINVAL;
3393}
3394
3395static int ci_upload_dpm_level_enable_mask(struct radeon_device *rdev)
3396{
3397 struct ci_power_info *pi = ci_get_pi(rdev);
3398 PPSMC_Result result;
3399
3400 if (!pi->sclk_dpm_key_disabled) {
3401 if (pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
3402 result = ci_send_msg_to_smc_with_parameter(rdev,
3403 PPSMC_MSG_SCLKDPM_SetEnabledMask,
3404 pi->dpm_level_enable_mask.sclk_dpm_enable_mask);
3405 if (result != PPSMC_Result_OK)
3406 return -EINVAL;
3407 }
3408 }
3409
3410 if (!pi->mclk_dpm_key_disabled) {
3411 if (pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
3412 result = ci_send_msg_to_smc_with_parameter(rdev,
3413 PPSMC_MSG_MCLKDPM_SetEnabledMask,
3414 pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
3415 if (result != PPSMC_Result_OK)
3416 return -EINVAL;
3417 }
3418 }
3419
3420 if (!pi->pcie_dpm_key_disabled) {
3421 if (pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
3422 result = ci_send_msg_to_smc_with_parameter(rdev,
3423 PPSMC_MSG_PCIeDPM_SetEnabledMask,
3424 pi->dpm_level_enable_mask.pcie_dpm_enable_mask);
3425 if (result != PPSMC_Result_OK)
3426 return -EINVAL;
3427 }
3428 }
3429
3430 ci_apply_disp_minimum_voltage_request(rdev);
3431
3432 return 0;
3433}
3434
3435static void ci_find_dpm_states_clocks_in_dpm_table(struct radeon_device *rdev,
3436 struct radeon_ps *radeon_state)
3437{
3438 struct ci_power_info *pi = ci_get_pi(rdev);
3439 struct ci_ps *state = ci_get_ps(radeon_state);
3440 struct ci_single_dpm_table *sclk_table = &pi->dpm_table.sclk_table;
3441 u32 sclk = state->performance_levels[state->performance_level_count-1].sclk;
3442 struct ci_single_dpm_table *mclk_table = &pi->dpm_table.mclk_table;
3443 u32 mclk = state->performance_levels[state->performance_level_count-1].mclk;
3444 u32 i;
3445
3446 pi->need_update_smu7_dpm_table = 0;
3447
3448 for (i = 0; i < sclk_table->count; i++) {
3449 if (sclk == sclk_table->dpm_levels[i].value)
3450 break;
3451 }
3452
3453 if (i >= sclk_table->count) {
3454 pi->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
3455 } else {
3456 /* XXX check display min clock requirements */
3457 if (0 != CISLAND_MINIMUM_ENGINE_CLOCK)
3458 pi->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK;
3459 }
3460
3461 for (i = 0; i < mclk_table->count; i++) {
3462 if (mclk == mclk_table->dpm_levels[i].value)
3463 break;
3464 }
3465
3466 if (i >= mclk_table->count)
3467 pi->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
3468
3469 if (rdev->pm.dpm.current_active_crtc_count !=
3470 rdev->pm.dpm.new_active_crtc_count)
3471 pi->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_MCLK;
3472}
3473
3474static int ci_populate_and_upload_sclk_mclk_dpm_levels(struct radeon_device *rdev,
3475 struct radeon_ps *radeon_state)
3476{
3477 struct ci_power_info *pi = ci_get_pi(rdev);
3478 struct ci_ps *state = ci_get_ps(radeon_state);
3479 u32 sclk = state->performance_levels[state->performance_level_count-1].sclk;
3480 u32 mclk = state->performance_levels[state->performance_level_count-1].mclk;
3481 struct ci_dpm_table *dpm_table = &pi->dpm_table;
3482 int ret;
3483
3484 if (!pi->need_update_smu7_dpm_table)
3485 return 0;
3486
3487 if (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK)
3488 dpm_table->sclk_table.dpm_levels[dpm_table->sclk_table.count-1].value = sclk;
3489
3490 if (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)
3491 dpm_table->mclk_table.dpm_levels[dpm_table->mclk_table.count-1].value = mclk;
3492
3493 if (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK)) {
3494 ret = ci_populate_all_graphic_levels(rdev);
3495 if (ret)
3496 return ret;
3497 }
3498
3499 if (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_MCLK | DPMTABLE_UPDATE_MCLK)) {
3500 ret = ci_populate_all_memory_levels(rdev);
3501 if (ret)
3502 return ret;
3503 }
3504
3505 return 0;
3506}
3507
3508static int ci_enable_uvd_dpm(struct radeon_device *rdev, bool enable)
3509{
3510 struct ci_power_info *pi = ci_get_pi(rdev);
3511 const struct radeon_clock_and_voltage_limits *max_limits;
3512 int i;
3513
3514 if (rdev->pm.dpm.ac_power)
3515 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
3516 else
3517 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
3518
3519 if (enable) {
3520 pi->dpm_level_enable_mask.uvd_dpm_enable_mask = 0;
3521
3522 for (i = rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
3523 if (rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
3524 pi->dpm_level_enable_mask.uvd_dpm_enable_mask |= 1 << i;
3525
3526 if (!pi->caps_uvd_dpm)
3527 break;
3528 }
3529 }
3530
3531 ci_send_msg_to_smc_with_parameter(rdev,
3532 PPSMC_MSG_UVDDPM_SetEnabledMask,
3533 pi->dpm_level_enable_mask.uvd_dpm_enable_mask);
3534
3535 if (pi->last_mclk_dpm_enable_mask & 0x1) {
3536 pi->uvd_enabled = true;
3537 pi->dpm_level_enable_mask.mclk_dpm_enable_mask &= 0xFFFFFFFE;
3538 ci_send_msg_to_smc_with_parameter(rdev,
3539 PPSMC_MSG_MCLKDPM_SetEnabledMask,
3540 pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
3541 }
3542 } else {
3543 if (pi->last_mclk_dpm_enable_mask & 0x1) {
3544 pi->uvd_enabled = false;
3545 pi->dpm_level_enable_mask.mclk_dpm_enable_mask |= 1;
3546 ci_send_msg_to_smc_with_parameter(rdev,
3547 PPSMC_MSG_MCLKDPM_SetEnabledMask,
3548 pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
3549 }
3550 }
3551
3552 return (ci_send_msg_to_smc(rdev, enable ?
3553 PPSMC_MSG_UVDDPM_Enable : PPSMC_MSG_UVDDPM_Disable) == PPSMC_Result_OK) ?
3554 0 : -EINVAL;
3555}
3556
cc8dbbb4
AD
3557static int ci_enable_vce_dpm(struct radeon_device *rdev, bool enable)
3558{
3559 struct ci_power_info *pi = ci_get_pi(rdev);
3560 const struct radeon_clock_and_voltage_limits *max_limits;
3561 int i;
3562
3563 if (rdev->pm.dpm.ac_power)
3564 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
3565 else
3566 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
3567
3568 if (enable) {
3569 pi->dpm_level_enable_mask.vce_dpm_enable_mask = 0;
3570 for (i = rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
3571 if (rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
3572 pi->dpm_level_enable_mask.vce_dpm_enable_mask |= 1 << i;
3573
3574 if (!pi->caps_vce_dpm)
3575 break;
3576 }
3577 }
3578
3579 ci_send_msg_to_smc_with_parameter(rdev,
3580 PPSMC_MSG_VCEDPM_SetEnabledMask,
3581 pi->dpm_level_enable_mask.vce_dpm_enable_mask);
3582 }
3583
3584 return (ci_send_msg_to_smc(rdev, enable ?
3585 PPSMC_MSG_VCEDPM_Enable : PPSMC_MSG_VCEDPM_Disable) == PPSMC_Result_OK) ?
3586 0 : -EINVAL;
3587}
3588
8cd36682 3589#if 0
cc8dbbb4
AD
3590static int ci_enable_samu_dpm(struct radeon_device *rdev, bool enable)
3591{
3592 struct ci_power_info *pi = ci_get_pi(rdev);
3593 const struct radeon_clock_and_voltage_limits *max_limits;
3594 int i;
3595
3596 if (rdev->pm.dpm.ac_power)
3597 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
3598 else
3599 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
3600
3601 if (enable) {
3602 pi->dpm_level_enable_mask.samu_dpm_enable_mask = 0;
3603 for (i = rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
3604 if (rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
3605 pi->dpm_level_enable_mask.samu_dpm_enable_mask |= 1 << i;
3606
3607 if (!pi->caps_samu_dpm)
3608 break;
3609 }
3610 }
3611
3612 ci_send_msg_to_smc_with_parameter(rdev,
3613 PPSMC_MSG_SAMUDPM_SetEnabledMask,
3614 pi->dpm_level_enable_mask.samu_dpm_enable_mask);
3615 }
3616 return (ci_send_msg_to_smc(rdev, enable ?
3617 PPSMC_MSG_SAMUDPM_Enable : PPSMC_MSG_SAMUDPM_Disable) == PPSMC_Result_OK) ?
3618 0 : -EINVAL;
3619}
3620
3621static int ci_enable_acp_dpm(struct radeon_device *rdev, bool enable)
3622{
3623 struct ci_power_info *pi = ci_get_pi(rdev);
3624 const struct radeon_clock_and_voltage_limits *max_limits;
3625 int i;
3626
3627 if (rdev->pm.dpm.ac_power)
3628 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
3629 else
3630 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
3631
3632 if (enable) {
3633 pi->dpm_level_enable_mask.acp_dpm_enable_mask = 0;
3634 for (i = rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
3635 if (rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
3636 pi->dpm_level_enable_mask.acp_dpm_enable_mask |= 1 << i;
3637
3638 if (!pi->caps_acp_dpm)
3639 break;
3640 }
3641 }
3642
3643 ci_send_msg_to_smc_with_parameter(rdev,
3644 PPSMC_MSG_ACPDPM_SetEnabledMask,
3645 pi->dpm_level_enable_mask.acp_dpm_enable_mask);
3646 }
3647
3648 return (ci_send_msg_to_smc(rdev, enable ?
3649 PPSMC_MSG_ACPDPM_Enable : PPSMC_MSG_ACPDPM_Disable) == PPSMC_Result_OK) ?
3650 0 : -EINVAL;
3651}
3652#endif
3653
3654static int ci_update_uvd_dpm(struct radeon_device *rdev, bool gate)
3655{
3656 struct ci_power_info *pi = ci_get_pi(rdev);
3657 u32 tmp;
3658
3659 if (!gate) {
3660 if (pi->caps_uvd_dpm ||
3661 (rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count <= 0))
3662 pi->smc_state_table.UvdBootLevel = 0;
3663 else
3664 pi->smc_state_table.UvdBootLevel =
3665 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count - 1;
3666
3667 tmp = RREG32_SMC(DPM_TABLE_475);
3668 tmp &= ~UvdBootLevel_MASK;
3669 tmp |= UvdBootLevel(pi->smc_state_table.UvdBootLevel);
3670 WREG32_SMC(DPM_TABLE_475, tmp);
3671 }
3672
3673 return ci_enable_uvd_dpm(rdev, !gate);
3674}
3675
cc8dbbb4
AD
3676static u8 ci_get_vce_boot_level(struct radeon_device *rdev)
3677{
3678 u8 i;
3679 u32 min_evclk = 30000; /* ??? */
3680 struct radeon_vce_clock_voltage_dependency_table *table =
3681 &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
3682
3683 for (i = 0; i < table->count; i++) {
3684 if (table->entries[i].evclk >= min_evclk)
3685 return i;
3686 }
3687
3688 return table->count - 1;
3689}
3690
3691static int ci_update_vce_dpm(struct radeon_device *rdev,
3692 struct radeon_ps *radeon_new_state,
3693 struct radeon_ps *radeon_current_state)
3694{
3695 struct ci_power_info *pi = ci_get_pi(rdev);
cc8dbbb4
AD
3696 int ret = 0;
3697 u32 tmp;
3698
8cd36682
AD
3699 if (radeon_current_state->evclk != radeon_new_state->evclk) {
3700 if (radeon_new_state->evclk) {
a1d6f97c
AD
3701 /* turn the clocks on when encoding */
3702 cik_update_cg(rdev, RADEON_CG_BLOCK_VCE, false);
cc8dbbb4 3703
a1d6f97c 3704 pi->smc_state_table.VceBootLevel = ci_get_vce_boot_level(rdev);
cc8dbbb4
AD
3705 tmp = RREG32_SMC(DPM_TABLE_475);
3706 tmp &= ~VceBootLevel_MASK;
3707 tmp |= VceBootLevel(pi->smc_state_table.VceBootLevel);
3708 WREG32_SMC(DPM_TABLE_475, tmp);
3709
3710 ret = ci_enable_vce_dpm(rdev, true);
3711 } else {
a1d6f97c
AD
3712 /* turn the clocks off when not encoding */
3713 cik_update_cg(rdev, RADEON_CG_BLOCK_VCE, true);
3714
cc8dbbb4
AD
3715 ret = ci_enable_vce_dpm(rdev, false);
3716 }
3717 }
3718 return ret;
3719}
3720
8cd36682 3721#if 0
cc8dbbb4
AD
3722static int ci_update_samu_dpm(struct radeon_device *rdev, bool gate)
3723{
3724 return ci_enable_samu_dpm(rdev, gate);
3725}
3726
3727static int ci_update_acp_dpm(struct radeon_device *rdev, bool gate)
3728{
3729 struct ci_power_info *pi = ci_get_pi(rdev);
3730 u32 tmp;
3731
3732 if (!gate) {
3733 pi->smc_state_table.AcpBootLevel = 0;
3734
3735 tmp = RREG32_SMC(DPM_TABLE_475);
3736 tmp &= ~AcpBootLevel_MASK;
3737 tmp |= AcpBootLevel(pi->smc_state_table.AcpBootLevel);
3738 WREG32_SMC(DPM_TABLE_475, tmp);
3739 }
3740
3741 return ci_enable_acp_dpm(rdev, !gate);
3742}
3743#endif
3744
3745static int ci_generate_dpm_level_enable_mask(struct radeon_device *rdev,
3746 struct radeon_ps *radeon_state)
3747{
3748 struct ci_power_info *pi = ci_get_pi(rdev);
3749 int ret;
3750
3751 ret = ci_trim_dpm_states(rdev, radeon_state);
3752 if (ret)
3753 return ret;
3754
3755 pi->dpm_level_enable_mask.sclk_dpm_enable_mask =
3756 ci_get_dpm_level_enable_mask_value(&pi->dpm_table.sclk_table);
3757 pi->dpm_level_enable_mask.mclk_dpm_enable_mask =
3758 ci_get_dpm_level_enable_mask_value(&pi->dpm_table.mclk_table);
3759 pi->last_mclk_dpm_enable_mask =
3760 pi->dpm_level_enable_mask.mclk_dpm_enable_mask;
3761 if (pi->uvd_enabled) {
3762 if (pi->dpm_level_enable_mask.mclk_dpm_enable_mask & 1)
3763 pi->dpm_level_enable_mask.mclk_dpm_enable_mask &= 0xFFFFFFFE;
3764 }
3765 pi->dpm_level_enable_mask.pcie_dpm_enable_mask =
3766 ci_get_dpm_level_enable_mask_value(&pi->dpm_table.pcie_speed_table);
3767
3768 return 0;
3769}
3770
89536fd6
AD
3771static u32 ci_get_lowest_enabled_level(struct radeon_device *rdev,
3772 u32 level_mask)
3773{
3774 u32 level = 0;
3775
3776 while ((level_mask & (1 << level)) == 0)
3777 level++;
3778
3779 return level;
3780}
3781
3782
3783int ci_dpm_force_performance_level(struct radeon_device *rdev,
3784 enum radeon_dpm_forced_level level)
3785{
3786 struct ci_power_info *pi = ci_get_pi(rdev);
89536fd6
AD
3787 u32 tmp, levels, i;
3788 int ret;
3789
3790 if (level == RADEON_DPM_FORCED_LEVEL_HIGH) {
3791 if ((!pi->sclk_dpm_key_disabled) &&
3792 pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
3793 levels = 0;
3794 tmp = pi->dpm_level_enable_mask.sclk_dpm_enable_mask;
3795 while (tmp >>= 1)
3796 levels++;
3797 if (levels) {
3798 ret = ci_dpm_force_state_sclk(rdev, levels);
3799 if (ret)
3800 return ret;
3801 for (i = 0; i < rdev->usec_timeout; i++) {
3802 tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) &
3803 CURR_SCLK_INDEX_MASK) >> CURR_SCLK_INDEX_SHIFT;
3804 if (tmp == levels)
3805 break;
3806 udelay(1);
3807 }
3808 }
3809 }
3810 if ((!pi->mclk_dpm_key_disabled) &&
3811 pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
3812 levels = 0;
3813 tmp = pi->dpm_level_enable_mask.mclk_dpm_enable_mask;
3814 while (tmp >>= 1)
3815 levels++;
3816 if (levels) {
3817 ret = ci_dpm_force_state_mclk(rdev, levels);
3818 if (ret)
3819 return ret;
3820 for (i = 0; i < rdev->usec_timeout; i++) {
3821 tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) &
3822 CURR_MCLK_INDEX_MASK) >> CURR_MCLK_INDEX_SHIFT;
3823 if (tmp == levels)
3824 break;
3825 udelay(1);
3826 }
3827 }
3828 }
3829 if ((!pi->pcie_dpm_key_disabled) &&
3830 pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
3831 levels = 0;
3832 tmp = pi->dpm_level_enable_mask.pcie_dpm_enable_mask;
3833 while (tmp >>= 1)
3834 levels++;
3835 if (levels) {
3836 ret = ci_dpm_force_state_pcie(rdev, level);
3837 if (ret)
3838 return ret;
3839 for (i = 0; i < rdev->usec_timeout; i++) {
3840 tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX_1) &
3841 CURR_PCIE_INDEX_MASK) >> CURR_PCIE_INDEX_SHIFT;
3842 if (tmp == levels)
3843 break;
3844 udelay(1);
3845 }
3846 }
3847 }
3848 } else if (level == RADEON_DPM_FORCED_LEVEL_LOW) {
3849 if ((!pi->sclk_dpm_key_disabled) &&
3850 pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
3851 levels = ci_get_lowest_enabled_level(rdev,
3852 pi->dpm_level_enable_mask.sclk_dpm_enable_mask);
3853 ret = ci_dpm_force_state_sclk(rdev, levels);
3854 if (ret)
3855 return ret;
3856 for (i = 0; i < rdev->usec_timeout; i++) {
3857 tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) &
3858 CURR_SCLK_INDEX_MASK) >> CURR_SCLK_INDEX_SHIFT;
3859 if (tmp == levels)
3860 break;
3861 udelay(1);
3862 }
3863 }
3864 if ((!pi->mclk_dpm_key_disabled) &&
3865 pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
3866 levels = ci_get_lowest_enabled_level(rdev,
3867 pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
3868 ret = ci_dpm_force_state_mclk(rdev, levels);
3869 if (ret)
3870 return ret;
3871 for (i = 0; i < rdev->usec_timeout; i++) {
3872 tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) &
3873 CURR_MCLK_INDEX_MASK) >> CURR_MCLK_INDEX_SHIFT;
3874 if (tmp == levels)
3875 break;
3876 udelay(1);
3877 }
3878 }
3879 if ((!pi->pcie_dpm_key_disabled) &&
3880 pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
3881 levels = ci_get_lowest_enabled_level(rdev,
3882 pi->dpm_level_enable_mask.pcie_dpm_enable_mask);
3883 ret = ci_dpm_force_state_pcie(rdev, levels);
3884 if (ret)
3885 return ret;
3886 for (i = 0; i < rdev->usec_timeout; i++) {
3887 tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX_1) &
3888 CURR_PCIE_INDEX_MASK) >> CURR_PCIE_INDEX_SHIFT;
3889 if (tmp == levels)
3890 break;
3891 udelay(1);
3892 }
3893 }
3894 } else if (level == RADEON_DPM_FORCED_LEVEL_AUTO) {
1c52279f
AD
3895 ret = ci_upload_dpm_level_enable_mask(rdev);
3896 if (ret)
3897 return ret;
89536fd6
AD
3898 }
3899
3900 rdev->pm.dpm.forced_level = level;
3901
3902 return 0;
3903}
3904
cc8dbbb4
AD
3905static int ci_set_mc_special_registers(struct radeon_device *rdev,
3906 struct ci_mc_reg_table *table)
3907{
3908 struct ci_power_info *pi = ci_get_pi(rdev);
3909 u8 i, j, k;
3910 u32 temp_reg;
3911
3912 for (i = 0, j = table->last; i < table->last; i++) {
3913 if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
3914 return -EINVAL;
3915 switch(table->mc_reg_address[i].s1 << 2) {
3916 case MC_SEQ_MISC1:
3917 temp_reg = RREG32(MC_PMG_CMD_EMRS);
3918 table->mc_reg_address[j].s1 = MC_PMG_CMD_EMRS >> 2;
3919 table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_EMRS_LP >> 2;
3920 for (k = 0; k < table->num_entries; k++) {
3921 table->mc_reg_table_entry[k].mc_data[j] =
3922 ((temp_reg & 0xffff0000)) | ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16);
3923 }
3924 j++;
3925 if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
3926 return -EINVAL;
3927
3928 temp_reg = RREG32(MC_PMG_CMD_MRS);
3929 table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS >> 2;
3930 table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS_LP >> 2;
3931 for (k = 0; k < table->num_entries; k++) {
3932 table->mc_reg_table_entry[k].mc_data[j] =
3933 (temp_reg & 0xffff0000) | (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
3934 if (!pi->mem_gddr5)
3935 table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
3936 }
3937 j++;
3938 if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
3939 return -EINVAL;
3940
3941 if (!pi->mem_gddr5) {
3942 table->mc_reg_address[j].s1 = MC_PMG_AUTO_CMD >> 2;
3943 table->mc_reg_address[j].s0 = MC_PMG_AUTO_CMD >> 2;
3944 for (k = 0; k < table->num_entries; k++) {
3945 table->mc_reg_table_entry[k].mc_data[j] =
3946 (table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16;
3947 }
3948 j++;
3949 if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
3950 return -EINVAL;
3951 }
3952 break;
3953 case MC_SEQ_RESERVE_M:
3954 temp_reg = RREG32(MC_PMG_CMD_MRS1);
3955 table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS1 >> 2;
3956 table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS1_LP >> 2;
3957 for (k = 0; k < table->num_entries; k++) {
3958 table->mc_reg_table_entry[k].mc_data[j] =
3959 (temp_reg & 0xffff0000) | (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
3960 }
3961 j++;
3962 if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
3963 return -EINVAL;
3964 break;
3965 default:
3966 break;
3967 }
3968
3969 }
3970
3971 table->last = j;
3972
3973 return 0;
3974}
3975
3976static bool ci_check_s0_mc_reg_index(u16 in_reg, u16 *out_reg)
3977{
3978 bool result = true;
3979
3980 switch(in_reg) {
3981 case MC_SEQ_RAS_TIMING >> 2:
3982 *out_reg = MC_SEQ_RAS_TIMING_LP >> 2;
3983 break;
3984 case MC_SEQ_DLL_STBY >> 2:
3985 *out_reg = MC_SEQ_DLL_STBY_LP >> 2;
3986 break;
3987 case MC_SEQ_G5PDX_CMD0 >> 2:
3988 *out_reg = MC_SEQ_G5PDX_CMD0_LP >> 2;
3989 break;
3990 case MC_SEQ_G5PDX_CMD1 >> 2:
3991 *out_reg = MC_SEQ_G5PDX_CMD1_LP >> 2;
3992 break;
3993 case MC_SEQ_G5PDX_CTRL >> 2:
3994 *out_reg = MC_SEQ_G5PDX_CTRL_LP >> 2;
3995 break;
3996 case MC_SEQ_CAS_TIMING >> 2:
3997 *out_reg = MC_SEQ_CAS_TIMING_LP >> 2;
3998 break;
3999 case MC_SEQ_MISC_TIMING >> 2:
4000 *out_reg = MC_SEQ_MISC_TIMING_LP >> 2;
4001 break;
4002 case MC_SEQ_MISC_TIMING2 >> 2:
4003 *out_reg = MC_SEQ_MISC_TIMING2_LP >> 2;
4004 break;
4005 case MC_SEQ_PMG_DVS_CMD >> 2:
4006 *out_reg = MC_SEQ_PMG_DVS_CMD_LP >> 2;
4007 break;
4008 case MC_SEQ_PMG_DVS_CTL >> 2:
4009 *out_reg = MC_SEQ_PMG_DVS_CTL_LP >> 2;
4010 break;
4011 case MC_SEQ_RD_CTL_D0 >> 2:
4012 *out_reg = MC_SEQ_RD_CTL_D0_LP >> 2;
4013 break;
4014 case MC_SEQ_RD_CTL_D1 >> 2:
4015 *out_reg = MC_SEQ_RD_CTL_D1_LP >> 2;
4016 break;
4017 case MC_SEQ_WR_CTL_D0 >> 2:
4018 *out_reg = MC_SEQ_WR_CTL_D0_LP >> 2;
4019 break;
4020 case MC_SEQ_WR_CTL_D1 >> 2:
4021 *out_reg = MC_SEQ_WR_CTL_D1_LP >> 2;
4022 break;
4023 case MC_PMG_CMD_EMRS >> 2:
4024 *out_reg = MC_SEQ_PMG_CMD_EMRS_LP >> 2;
4025 break;
4026 case MC_PMG_CMD_MRS >> 2:
4027 *out_reg = MC_SEQ_PMG_CMD_MRS_LP >> 2;
4028 break;
4029 case MC_PMG_CMD_MRS1 >> 2:
4030 *out_reg = MC_SEQ_PMG_CMD_MRS1_LP >> 2;
4031 break;
4032 case MC_SEQ_PMG_TIMING >> 2:
4033 *out_reg = MC_SEQ_PMG_TIMING_LP >> 2;
4034 break;
4035 case MC_PMG_CMD_MRS2 >> 2:
4036 *out_reg = MC_SEQ_PMG_CMD_MRS2_LP >> 2;
4037 break;
4038 case MC_SEQ_WR_CTL_2 >> 2:
4039 *out_reg = MC_SEQ_WR_CTL_2_LP >> 2;
4040 break;
4041 default:
4042 result = false;
4043 break;
4044 }
4045
4046 return result;
4047}
4048
4049static void ci_set_valid_flag(struct ci_mc_reg_table *table)
4050{
4051 u8 i, j;
4052
4053 for (i = 0; i < table->last; i++) {
4054 for (j = 1; j < table->num_entries; j++) {
4055 if (table->mc_reg_table_entry[j-1].mc_data[i] !=
4056 table->mc_reg_table_entry[j].mc_data[i]) {
4057 table->valid_flag |= 1 << i;
4058 break;
4059 }
4060 }
4061 }
4062}
4063
4064static void ci_set_s0_mc_reg_index(struct ci_mc_reg_table *table)
4065{
4066 u32 i;
4067 u16 address;
4068
4069 for (i = 0; i < table->last; i++) {
4070 table->mc_reg_address[i].s0 =
4071 ci_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address) ?
4072 address : table->mc_reg_address[i].s1;
4073 }
4074}
4075
4076static int ci_copy_vbios_mc_reg_table(const struct atom_mc_reg_table *table,
4077 struct ci_mc_reg_table *ci_table)
4078{
4079 u8 i, j;
4080
4081 if (table->last > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4082 return -EINVAL;
4083 if (table->num_entries > MAX_AC_TIMING_ENTRIES)
4084 return -EINVAL;
4085
4086 for (i = 0; i < table->last; i++)
4087 ci_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1;
4088
4089 ci_table->last = table->last;
4090
4091 for (i = 0; i < table->num_entries; i++) {
4092 ci_table->mc_reg_table_entry[i].mclk_max =
4093 table->mc_reg_table_entry[i].mclk_max;
4094 for (j = 0; j < table->last; j++)
4095 ci_table->mc_reg_table_entry[i].mc_data[j] =
4096 table->mc_reg_table_entry[i].mc_data[j];
4097 }
4098 ci_table->num_entries = table->num_entries;
4099
4100 return 0;
4101}
4102
90b2fee3
AD
4103static int ci_register_patching_mc_seq(struct radeon_device *rdev,
4104 struct ci_mc_reg_table *table)
4105{
4106 u8 i, k;
4107 u32 tmp;
4108 bool patch;
4109
4110 tmp = RREG32(MC_SEQ_MISC0);
4111 patch = ((tmp & 0x0000f00) == 0x300) ? true : false;
4112
4113 if (patch &&
4114 ((rdev->pdev->device == 0x67B0) ||
4115 (rdev->pdev->device == 0x67B1))) {
4116 for (i = 0; i < table->last; i++) {
4117 if (table->last >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4118 return -EINVAL;
4119 switch(table->mc_reg_address[i].s1 >> 2) {
4120 case MC_SEQ_MISC1:
4121 for (k = 0; k < table->num_entries; k++) {
4122 if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
4123 (table->mc_reg_table_entry[k].mclk_max == 137500))
4124 table->mc_reg_table_entry[k].mc_data[i] =
4125 (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFF8) |
4126 0x00000007;
4127 }
4128 break;
4129 case MC_SEQ_WR_CTL_D0:
4130 for (k = 0; k < table->num_entries; k++) {
4131 if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
4132 (table->mc_reg_table_entry[k].mclk_max == 137500))
4133 table->mc_reg_table_entry[k].mc_data[i] =
4134 (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFF0F00) |
4135 0x0000D0DD;
4136 }
4137 break;
4138 case MC_SEQ_WR_CTL_D1:
4139 for (k = 0; k < table->num_entries; k++) {
4140 if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
4141 (table->mc_reg_table_entry[k].mclk_max == 137500))
4142 table->mc_reg_table_entry[k].mc_data[i] =
4143 (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFF0F00) |
4144 0x0000D0DD;
4145 }
4146 break;
4147 case MC_SEQ_WR_CTL_2:
4148 for (k = 0; k < table->num_entries; k++) {
4149 if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
4150 (table->mc_reg_table_entry[k].mclk_max == 137500))
4151 table->mc_reg_table_entry[k].mc_data[i] = 0;
4152 }
4153 break;
4154 case MC_SEQ_CAS_TIMING:
4155 for (k = 0; k < table->num_entries; k++) {
4156 if (table->mc_reg_table_entry[k].mclk_max == 125000)
4157 table->mc_reg_table_entry[k].mc_data[i] =
4158 (table->mc_reg_table_entry[k].mc_data[i] & 0xFFE0FE0F) |
4159 0x000C0140;
4160 else if (table->mc_reg_table_entry[k].mclk_max == 137500)
4161 table->mc_reg_table_entry[k].mc_data[i] =
4162 (table->mc_reg_table_entry[k].mc_data[i] & 0xFFE0FE0F) |
4163 0x000C0150;
4164 }
4165 break;
4166 case MC_SEQ_MISC_TIMING:
4167 for (k = 0; k < table->num_entries; k++) {
4168 if (table->mc_reg_table_entry[k].mclk_max == 125000)
4169 table->mc_reg_table_entry[k].mc_data[i] =
4170 (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFE0) |
4171 0x00000030;
4172 else if (table->mc_reg_table_entry[k].mclk_max == 137500)
4173 table->mc_reg_table_entry[k].mc_data[i] =
4174 (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFE0) |
4175 0x00000035;
4176 }
4177 break;
4178 default:
4179 break;
4180 }
4181 }
4182
4183 WREG32(MC_SEQ_IO_DEBUG_INDEX, 3);
4184 tmp = RREG32(MC_SEQ_IO_DEBUG_DATA);
4185 tmp = (tmp & 0xFFF8FFFF) | (1 << 16);
4186 WREG32(MC_SEQ_IO_DEBUG_INDEX, 3);
4187 WREG32(MC_SEQ_IO_DEBUG_DATA, tmp);
4188 }
4189
4190 return 0;
4191}
4192
cc8dbbb4
AD
4193static int ci_initialize_mc_reg_table(struct radeon_device *rdev)
4194{
4195 struct ci_power_info *pi = ci_get_pi(rdev);
4196 struct atom_mc_reg_table *table;
4197 struct ci_mc_reg_table *ci_table = &pi->mc_reg_table;
4198 u8 module_index = rv770_get_memory_module_index(rdev);
4199 int ret;
4200
4201 table = kzalloc(sizeof(struct atom_mc_reg_table), GFP_KERNEL);
4202 if (!table)
4203 return -ENOMEM;
4204
4205 WREG32(MC_SEQ_RAS_TIMING_LP, RREG32(MC_SEQ_RAS_TIMING));
4206 WREG32(MC_SEQ_CAS_TIMING_LP, RREG32(MC_SEQ_CAS_TIMING));
4207 WREG32(MC_SEQ_DLL_STBY_LP, RREG32(MC_SEQ_DLL_STBY));
4208 WREG32(MC_SEQ_G5PDX_CMD0_LP, RREG32(MC_SEQ_G5PDX_CMD0));
4209 WREG32(MC_SEQ_G5PDX_CMD1_LP, RREG32(MC_SEQ_G5PDX_CMD1));
4210 WREG32(MC_SEQ_G5PDX_CTRL_LP, RREG32(MC_SEQ_G5PDX_CTRL));
4211 WREG32(MC_SEQ_PMG_DVS_CMD_LP, RREG32(MC_SEQ_PMG_DVS_CMD));
4212 WREG32(MC_SEQ_PMG_DVS_CTL_LP, RREG32(MC_SEQ_PMG_DVS_CTL));
4213 WREG32(MC_SEQ_MISC_TIMING_LP, RREG32(MC_SEQ_MISC_TIMING));
4214 WREG32(MC_SEQ_MISC_TIMING2_LP, RREG32(MC_SEQ_MISC_TIMING2));
4215 WREG32(MC_SEQ_PMG_CMD_EMRS_LP, RREG32(MC_PMG_CMD_EMRS));
4216 WREG32(MC_SEQ_PMG_CMD_MRS_LP, RREG32(MC_PMG_CMD_MRS));
4217 WREG32(MC_SEQ_PMG_CMD_MRS1_LP, RREG32(MC_PMG_CMD_MRS1));
4218 WREG32(MC_SEQ_WR_CTL_D0_LP, RREG32(MC_SEQ_WR_CTL_D0));
4219 WREG32(MC_SEQ_WR_CTL_D1_LP, RREG32(MC_SEQ_WR_CTL_D1));
4220 WREG32(MC_SEQ_RD_CTL_D0_LP, RREG32(MC_SEQ_RD_CTL_D0));
4221 WREG32(MC_SEQ_RD_CTL_D1_LP, RREG32(MC_SEQ_RD_CTL_D1));
4222 WREG32(MC_SEQ_PMG_TIMING_LP, RREG32(MC_SEQ_PMG_TIMING));
4223 WREG32(MC_SEQ_PMG_CMD_MRS2_LP, RREG32(MC_PMG_CMD_MRS2));
4224 WREG32(MC_SEQ_WR_CTL_2_LP, RREG32(MC_SEQ_WR_CTL_2));
4225
4226 ret = radeon_atom_init_mc_reg_table(rdev, module_index, table);
4227 if (ret)
4228 goto init_mc_done;
4229
4230 ret = ci_copy_vbios_mc_reg_table(table, ci_table);
4231 if (ret)
4232 goto init_mc_done;
4233
4234 ci_set_s0_mc_reg_index(ci_table);
4235
90b2fee3
AD
4236 ret = ci_register_patching_mc_seq(rdev, ci_table);
4237 if (ret)
4238 goto init_mc_done;
4239
cc8dbbb4
AD
4240 ret = ci_set_mc_special_registers(rdev, ci_table);
4241 if (ret)
4242 goto init_mc_done;
4243
4244 ci_set_valid_flag(ci_table);
4245
4246init_mc_done:
4247 kfree(table);
4248
4249 return ret;
4250}
4251
4252static int ci_populate_mc_reg_addresses(struct radeon_device *rdev,
4253 SMU7_Discrete_MCRegisters *mc_reg_table)
4254{
4255 struct ci_power_info *pi = ci_get_pi(rdev);
4256 u32 i, j;
4257
4258 for (i = 0, j = 0; j < pi->mc_reg_table.last; j++) {
4259 if (pi->mc_reg_table.valid_flag & (1 << j)) {
4260 if (i >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4261 return -EINVAL;
4262 mc_reg_table->address[i].s0 = cpu_to_be16(pi->mc_reg_table.mc_reg_address[j].s0);
4263 mc_reg_table->address[i].s1 = cpu_to_be16(pi->mc_reg_table.mc_reg_address[j].s1);
4264 i++;
4265 }
4266 }
4267
4268 mc_reg_table->last = (u8)i;
4269
4270 return 0;
4271}
4272
4273static void ci_convert_mc_registers(const struct ci_mc_reg_entry *entry,
4274 SMU7_Discrete_MCRegisterSet *data,
4275 u32 num_entries, u32 valid_flag)
4276{
4277 u32 i, j;
4278
4279 for (i = 0, j = 0; j < num_entries; j++) {
4280 if (valid_flag & (1 << j)) {
4281 data->value[i] = cpu_to_be32(entry->mc_data[j]);
4282 i++;
4283 }
4284 }
4285}
4286
4287static void ci_convert_mc_reg_table_entry_to_smc(struct radeon_device *rdev,
4288 const u32 memory_clock,
4289 SMU7_Discrete_MCRegisterSet *mc_reg_table_data)
4290{
4291 struct ci_power_info *pi = ci_get_pi(rdev);
4292 u32 i = 0;
4293
4294 for(i = 0; i < pi->mc_reg_table.num_entries; i++) {
4295 if (memory_clock <= pi->mc_reg_table.mc_reg_table_entry[i].mclk_max)
4296 break;
4297 }
4298
4299 if ((i == pi->mc_reg_table.num_entries) && (i > 0))
4300 --i;
4301
4302 ci_convert_mc_registers(&pi->mc_reg_table.mc_reg_table_entry[i],
4303 mc_reg_table_data, pi->mc_reg_table.last,
4304 pi->mc_reg_table.valid_flag);
4305}
4306
4307static void ci_convert_mc_reg_table_to_smc(struct radeon_device *rdev,
4308 SMU7_Discrete_MCRegisters *mc_reg_table)
4309{
4310 struct ci_power_info *pi = ci_get_pi(rdev);
4311 u32 i;
4312
4313 for (i = 0; i < pi->dpm_table.mclk_table.count; i++)
4314 ci_convert_mc_reg_table_entry_to_smc(rdev,
4315 pi->dpm_table.mclk_table.dpm_levels[i].value,
4316 &mc_reg_table->data[i]);
4317}
4318
4319static int ci_populate_initial_mc_reg_table(struct radeon_device *rdev)
4320{
4321 struct ci_power_info *pi = ci_get_pi(rdev);
4322 int ret;
4323
4324 memset(&pi->smc_mc_reg_table, 0, sizeof(SMU7_Discrete_MCRegisters));
4325
4326 ret = ci_populate_mc_reg_addresses(rdev, &pi->smc_mc_reg_table);
4327 if (ret)
4328 return ret;
4329 ci_convert_mc_reg_table_to_smc(rdev, &pi->smc_mc_reg_table);
4330
4331 return ci_copy_bytes_to_smc(rdev,
4332 pi->mc_reg_table_start,
4333 (u8 *)&pi->smc_mc_reg_table,
4334 sizeof(SMU7_Discrete_MCRegisters),
4335 pi->sram_end);
4336}
4337
4338static int ci_update_and_upload_mc_reg_table(struct radeon_device *rdev)
4339{
4340 struct ci_power_info *pi = ci_get_pi(rdev);
4341
4342 if (!(pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK))
4343 return 0;
4344
4345 memset(&pi->smc_mc_reg_table, 0, sizeof(SMU7_Discrete_MCRegisters));
4346
4347 ci_convert_mc_reg_table_to_smc(rdev, &pi->smc_mc_reg_table);
4348
4349 return ci_copy_bytes_to_smc(rdev,
4350 pi->mc_reg_table_start +
4351 offsetof(SMU7_Discrete_MCRegisters, data[0]),
4352 (u8 *)&pi->smc_mc_reg_table.data[0],
4353 sizeof(SMU7_Discrete_MCRegisterSet) *
4354 pi->dpm_table.mclk_table.count,
4355 pi->sram_end);
4356}
4357
4358static void ci_enable_voltage_control(struct radeon_device *rdev)
4359{
4360 u32 tmp = RREG32_SMC(GENERAL_PWRMGT);
4361
4362 tmp |= VOLT_PWRMGT_EN;
4363 WREG32_SMC(GENERAL_PWRMGT, tmp);
4364}
4365
4366static enum radeon_pcie_gen ci_get_maximum_link_speed(struct radeon_device *rdev,
4367 struct radeon_ps *radeon_state)
4368{
4369 struct ci_ps *state = ci_get_ps(radeon_state);
4370 int i;
4371 u16 pcie_speed, max_speed = 0;
4372
4373 for (i = 0; i < state->performance_level_count; i++) {
4374 pcie_speed = state->performance_levels[i].pcie_gen;
4375 if (max_speed < pcie_speed)
4376 max_speed = pcie_speed;
4377 }
4378
4379 return max_speed;
4380}
4381
4382static u16 ci_get_current_pcie_speed(struct radeon_device *rdev)
4383{
4384 u32 speed_cntl = 0;
4385
4386 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL) & LC_CURRENT_DATA_RATE_MASK;
4387 speed_cntl >>= LC_CURRENT_DATA_RATE_SHIFT;
4388
4389 return (u16)speed_cntl;
4390}
4391
4392static int ci_get_current_pcie_lane_number(struct radeon_device *rdev)
4393{
4394 u32 link_width = 0;
4395
4396 link_width = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL) & LC_LINK_WIDTH_RD_MASK;
4397 link_width >>= LC_LINK_WIDTH_RD_SHIFT;
4398
4399 switch (link_width) {
4400 case RADEON_PCIE_LC_LINK_WIDTH_X1:
4401 return 1;
4402 case RADEON_PCIE_LC_LINK_WIDTH_X2:
4403 return 2;
4404 case RADEON_PCIE_LC_LINK_WIDTH_X4:
4405 return 4;
4406 case RADEON_PCIE_LC_LINK_WIDTH_X8:
4407 return 8;
4408 case RADEON_PCIE_LC_LINK_WIDTH_X12:
4409 /* not actually supported */
4410 return 12;
4411 case RADEON_PCIE_LC_LINK_WIDTH_X0:
4412 case RADEON_PCIE_LC_LINK_WIDTH_X16:
4413 default:
4414 return 16;
4415 }
4416}
4417
4418static void ci_request_link_speed_change_before_state_change(struct radeon_device *rdev,
4419 struct radeon_ps *radeon_new_state,
4420 struct radeon_ps *radeon_current_state)
4421{
4422 struct ci_power_info *pi = ci_get_pi(rdev);
4423 enum radeon_pcie_gen target_link_speed =
4424 ci_get_maximum_link_speed(rdev, radeon_new_state);
4425 enum radeon_pcie_gen current_link_speed;
4426
4427 if (pi->force_pcie_gen == RADEON_PCIE_GEN_INVALID)
4428 current_link_speed = ci_get_maximum_link_speed(rdev, radeon_current_state);
4429 else
4430 current_link_speed = pi->force_pcie_gen;
4431
4432 pi->force_pcie_gen = RADEON_PCIE_GEN_INVALID;
4433 pi->pspp_notify_required = false;
4434 if (target_link_speed > current_link_speed) {
4435 switch (target_link_speed) {
ab62e768 4436#ifdef CONFIG_ACPI
cc8dbbb4
AD
4437 case RADEON_PCIE_GEN3:
4438 if (radeon_acpi_pcie_performance_request(rdev, PCIE_PERF_REQ_PECI_GEN3, false) == 0)
4439 break;
4440 pi->force_pcie_gen = RADEON_PCIE_GEN2;
4441 if (current_link_speed == RADEON_PCIE_GEN2)
4442 break;
4443 case RADEON_PCIE_GEN2:
4444 if (radeon_acpi_pcie_performance_request(rdev, PCIE_PERF_REQ_PECI_GEN2, false) == 0)
4445 break;
ab62e768 4446#endif
cc8dbbb4
AD
4447 default:
4448 pi->force_pcie_gen = ci_get_current_pcie_speed(rdev);
4449 break;
4450 }
4451 } else {
4452 if (target_link_speed < current_link_speed)
4453 pi->pspp_notify_required = true;
4454 }
4455}
4456
4457static void ci_notify_link_speed_change_after_state_change(struct radeon_device *rdev,
4458 struct radeon_ps *radeon_new_state,
4459 struct radeon_ps *radeon_current_state)
4460{
4461 struct ci_power_info *pi = ci_get_pi(rdev);
4462 enum radeon_pcie_gen target_link_speed =
4463 ci_get_maximum_link_speed(rdev, radeon_new_state);
4464 u8 request;
4465
4466 if (pi->pspp_notify_required) {
4467 if (target_link_speed == RADEON_PCIE_GEN3)
4468 request = PCIE_PERF_REQ_PECI_GEN3;
4469 else if (target_link_speed == RADEON_PCIE_GEN2)
4470 request = PCIE_PERF_REQ_PECI_GEN2;
4471 else
4472 request = PCIE_PERF_REQ_PECI_GEN1;
4473
4474 if ((request == PCIE_PERF_REQ_PECI_GEN1) &&
4475 (ci_get_current_pcie_speed(rdev) > 0))
4476 return;
4477
ab62e768 4478#ifdef CONFIG_ACPI
cc8dbbb4 4479 radeon_acpi_pcie_performance_request(rdev, request, false);
ab62e768 4480#endif
cc8dbbb4
AD
4481 }
4482}
4483
4484static int ci_set_private_data_variables_based_on_pptable(struct radeon_device *rdev)
4485{
4486 struct ci_power_info *pi = ci_get_pi(rdev);
4487 struct radeon_clock_voltage_dependency_table *allowed_sclk_vddc_table =
4488 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
4489 struct radeon_clock_voltage_dependency_table *allowed_mclk_vddc_table =
4490 &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk;
4491 struct radeon_clock_voltage_dependency_table *allowed_mclk_vddci_table =
4492 &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk;
4493
4494 if (allowed_sclk_vddc_table == NULL)
4495 return -EINVAL;
4496 if (allowed_sclk_vddc_table->count < 1)
4497 return -EINVAL;
4498 if (allowed_mclk_vddc_table == NULL)
4499 return -EINVAL;
4500 if (allowed_mclk_vddc_table->count < 1)
4501 return -EINVAL;
4502 if (allowed_mclk_vddci_table == NULL)
4503 return -EINVAL;
4504 if (allowed_mclk_vddci_table->count < 1)
4505 return -EINVAL;
4506
4507 pi->min_vddc_in_pp_table = allowed_sclk_vddc_table->entries[0].v;
4508 pi->max_vddc_in_pp_table =
4509 allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
4510
4511 pi->min_vddci_in_pp_table = allowed_mclk_vddci_table->entries[0].v;
4512 pi->max_vddci_in_pp_table =
4513 allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v;
4514
4515 rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk =
4516 allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk;
4517 rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.mclk =
4518 allowed_mclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk;
4519 rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddc =
4520 allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
4521 rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddci =
4522 allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v;
4523
4524 return 0;
4525}
4526
4527static void ci_patch_with_vddc_leakage(struct radeon_device *rdev, u16 *vddc)
4528{
4529 struct ci_power_info *pi = ci_get_pi(rdev);
4530 struct ci_leakage_voltage *leakage_table = &pi->vddc_leakage;
4531 u32 leakage_index;
4532
4533 for (leakage_index = 0; leakage_index < leakage_table->count; leakage_index++) {
4534 if (leakage_table->leakage_id[leakage_index] == *vddc) {
4535 *vddc = leakage_table->actual_voltage[leakage_index];
4536 break;
4537 }
4538 }
4539}
4540
4541static void ci_patch_with_vddci_leakage(struct radeon_device *rdev, u16 *vddci)
4542{
4543 struct ci_power_info *pi = ci_get_pi(rdev);
4544 struct ci_leakage_voltage *leakage_table = &pi->vddci_leakage;
4545 u32 leakage_index;
4546
4547 for (leakage_index = 0; leakage_index < leakage_table->count; leakage_index++) {
4548 if (leakage_table->leakage_id[leakage_index] == *vddci) {
4549 *vddci = leakage_table->actual_voltage[leakage_index];
4550 break;
4551 }
4552 }
4553}
4554
4555static void ci_patch_clock_voltage_dependency_table_with_vddc_leakage(struct radeon_device *rdev,
4556 struct radeon_clock_voltage_dependency_table *table)
4557{
4558 u32 i;
4559
4560 if (table) {
4561 for (i = 0; i < table->count; i++)
4562 ci_patch_with_vddc_leakage(rdev, &table->entries[i].v);
4563 }
4564}
4565
4566static void ci_patch_clock_voltage_dependency_table_with_vddci_leakage(struct radeon_device *rdev,
4567 struct radeon_clock_voltage_dependency_table *table)
4568{
4569 u32 i;
4570
4571 if (table) {
4572 for (i = 0; i < table->count; i++)
4573 ci_patch_with_vddci_leakage(rdev, &table->entries[i].v);
4574 }
4575}
4576
4577static void ci_patch_vce_clock_voltage_dependency_table_with_vddc_leakage(struct radeon_device *rdev,
4578 struct radeon_vce_clock_voltage_dependency_table *table)
4579{
4580 u32 i;
4581
4582 if (table) {
4583 for (i = 0; i < table->count; i++)
4584 ci_patch_with_vddc_leakage(rdev, &table->entries[i].v);
4585 }
4586}
4587
4588static void ci_patch_uvd_clock_voltage_dependency_table_with_vddc_leakage(struct radeon_device *rdev,
4589 struct radeon_uvd_clock_voltage_dependency_table *table)
4590{
4591 u32 i;
4592
4593 if (table) {
4594 for (i = 0; i < table->count; i++)
4595 ci_patch_with_vddc_leakage(rdev, &table->entries[i].v);
4596 }
4597}
4598
4599static void ci_patch_vddc_phase_shed_limit_table_with_vddc_leakage(struct radeon_device *rdev,
4600 struct radeon_phase_shedding_limits_table *table)
4601{
4602 u32 i;
4603
4604 if (table) {
4605 for (i = 0; i < table->count; i++)
4606 ci_patch_with_vddc_leakage(rdev, &table->entries[i].voltage);
4607 }
4608}
4609
4610static void ci_patch_clock_voltage_limits_with_vddc_leakage(struct radeon_device *rdev,
4611 struct radeon_clock_and_voltage_limits *table)
4612{
4613 if (table) {
4614 ci_patch_with_vddc_leakage(rdev, (u16 *)&table->vddc);
4615 ci_patch_with_vddci_leakage(rdev, (u16 *)&table->vddci);
4616 }
4617}
4618
4619static void ci_patch_cac_leakage_table_with_vddc_leakage(struct radeon_device *rdev,
4620 struct radeon_cac_leakage_table *table)
4621{
4622 u32 i;
4623
4624 if (table) {
4625 for (i = 0; i < table->count; i++)
4626 ci_patch_with_vddc_leakage(rdev, &table->entries[i].vddc);
4627 }
4628}
4629
4630static void ci_patch_dependency_tables_with_leakage(struct radeon_device *rdev)
4631{
4632
4633 ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev,
4634 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk);
4635 ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev,
4636 &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk);
4637 ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev,
4638 &rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk);
4639 ci_patch_clock_voltage_dependency_table_with_vddci_leakage(rdev,
4640 &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk);
4641 ci_patch_vce_clock_voltage_dependency_table_with_vddc_leakage(rdev,
4642 &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table);
4643 ci_patch_uvd_clock_voltage_dependency_table_with_vddc_leakage(rdev,
4644 &rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table);
4645 ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev,
4646 &rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table);
4647 ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev,
4648 &rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table);
4649 ci_patch_vddc_phase_shed_limit_table_with_vddc_leakage(rdev,
4650 &rdev->pm.dpm.dyn_state.phase_shedding_limits_table);
4651 ci_patch_clock_voltage_limits_with_vddc_leakage(rdev,
4652 &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac);
4653 ci_patch_clock_voltage_limits_with_vddc_leakage(rdev,
4654 &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc);
4655 ci_patch_cac_leakage_table_with_vddc_leakage(rdev,
4656 &rdev->pm.dpm.dyn_state.cac_leakage_table);
4657
4658}
4659
4660static void ci_get_memory_type(struct radeon_device *rdev)
4661{
4662 struct ci_power_info *pi = ci_get_pi(rdev);
4663 u32 tmp;
4664
4665 tmp = RREG32(MC_SEQ_MISC0);
4666
4667 if (((tmp & MC_SEQ_MISC0_GDDR5_MASK) >> MC_SEQ_MISC0_GDDR5_SHIFT) ==
4668 MC_SEQ_MISC0_GDDR5_VALUE)
4669 pi->mem_gddr5 = true;
4670 else
4671 pi->mem_gddr5 = false;
4672
4673}
4674
9a04dad3
AD
4675static void ci_update_current_ps(struct radeon_device *rdev,
4676 struct radeon_ps *rps)
cc8dbbb4
AD
4677{
4678 struct ci_ps *new_ps = ci_get_ps(rps);
4679 struct ci_power_info *pi = ci_get_pi(rdev);
4680
4681 pi->current_rps = *rps;
4682 pi->current_ps = *new_ps;
4683 pi->current_rps.ps_priv = &pi->current_ps;
4684}
4685
9a04dad3
AD
4686static void ci_update_requested_ps(struct radeon_device *rdev,
4687 struct radeon_ps *rps)
cc8dbbb4
AD
4688{
4689 struct ci_ps *new_ps = ci_get_ps(rps);
4690 struct ci_power_info *pi = ci_get_pi(rdev);
4691
4692 pi->requested_rps = *rps;
4693 pi->requested_ps = *new_ps;
4694 pi->requested_rps.ps_priv = &pi->requested_ps;
4695}
4696
4697int ci_dpm_pre_set_power_state(struct radeon_device *rdev)
4698{
4699 struct ci_power_info *pi = ci_get_pi(rdev);
4700 struct radeon_ps requested_ps = *rdev->pm.dpm.requested_ps;
4701 struct radeon_ps *new_ps = &requested_ps;
4702
4703 ci_update_requested_ps(rdev, new_ps);
4704
4705 ci_apply_state_adjust_rules(rdev, &pi->requested_rps);
4706
4707 return 0;
4708}
4709
4710void ci_dpm_post_set_power_state(struct radeon_device *rdev)
4711{
4712 struct ci_power_info *pi = ci_get_pi(rdev);
4713 struct radeon_ps *new_ps = &pi->requested_rps;
4714
4715 ci_update_current_ps(rdev, new_ps);
4716}
4717
4718
4719void ci_dpm_setup_asic(struct radeon_device *rdev)
4720{
6c7bccea
AD
4721 int r;
4722
4723 r = ci_mc_load_microcode(rdev);
4724 if (r)
4725 DRM_ERROR("Failed to load MC firmware!\n");
cc8dbbb4
AD
4726 ci_read_clock_registers(rdev);
4727 ci_get_memory_type(rdev);
4728 ci_enable_acpi_power_management(rdev);
4729 ci_init_sclk_t(rdev);
4730}
4731
4732int ci_dpm_enable(struct radeon_device *rdev)
4733{
4734 struct ci_power_info *pi = ci_get_pi(rdev);
4735 struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps;
4736 int ret;
4737
4738 if (ci_is_smc_running(rdev))
4739 return -EINVAL;
4740 if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
4741 ci_enable_voltage_control(rdev);
4742 ret = ci_construct_voltage_tables(rdev);
4743 if (ret) {
4744 DRM_ERROR("ci_construct_voltage_tables failed\n");
4745 return ret;
4746 }
4747 }
4748 if (pi->caps_dynamic_ac_timing) {
4749 ret = ci_initialize_mc_reg_table(rdev);
4750 if (ret)
4751 pi->caps_dynamic_ac_timing = false;
4752 }
4753 if (pi->dynamic_ss)
4754 ci_enable_spread_spectrum(rdev, true);
4755 if (pi->thermal_protection)
4756 ci_enable_thermal_protection(rdev, true);
4757 ci_program_sstp(rdev);
4758 ci_enable_display_gap(rdev);
4759 ci_program_vc(rdev);
4760 ret = ci_upload_firmware(rdev);
4761 if (ret) {
4762 DRM_ERROR("ci_upload_firmware failed\n");
4763 return ret;
4764 }
4765 ret = ci_process_firmware_header(rdev);
4766 if (ret) {
4767 DRM_ERROR("ci_process_firmware_header failed\n");
4768 return ret;
4769 }
4770 ret = ci_initial_switch_from_arb_f0_to_f1(rdev);
4771 if (ret) {
4772 DRM_ERROR("ci_initial_switch_from_arb_f0_to_f1 failed\n");
4773 return ret;
4774 }
4775 ret = ci_init_smc_table(rdev);
4776 if (ret) {
4777 DRM_ERROR("ci_init_smc_table failed\n");
4778 return ret;
4779 }
4780 ret = ci_init_arb_table_index(rdev);
4781 if (ret) {
4782 DRM_ERROR("ci_init_arb_table_index failed\n");
4783 return ret;
4784 }
4785 if (pi->caps_dynamic_ac_timing) {
4786 ret = ci_populate_initial_mc_reg_table(rdev);
4787 if (ret) {
4788 DRM_ERROR("ci_populate_initial_mc_reg_table failed\n");
4789 return ret;
4790 }
4791 }
4792 ret = ci_populate_pm_base(rdev);
4793 if (ret) {
4794 DRM_ERROR("ci_populate_pm_base failed\n");
4795 return ret;
4796 }
4797 ci_dpm_start_smc(rdev);
4798 ci_enable_vr_hot_gpio_interrupt(rdev);
4799 ret = ci_notify_smc_display_change(rdev, false);
4800 if (ret) {
4801 DRM_ERROR("ci_notify_smc_display_change failed\n");
4802 return ret;
4803 }
4804 ci_enable_sclk_control(rdev, true);
4805 ret = ci_enable_ulv(rdev, true);
4806 if (ret) {
4807 DRM_ERROR("ci_enable_ulv failed\n");
4808 return ret;
4809 }
4810 ret = ci_enable_ds_master_switch(rdev, true);
4811 if (ret) {
4812 DRM_ERROR("ci_enable_ds_master_switch failed\n");
4813 return ret;
4814 }
4815 ret = ci_start_dpm(rdev);
4816 if (ret) {
4817 DRM_ERROR("ci_start_dpm failed\n");
4818 return ret;
4819 }
4820 ret = ci_enable_didt(rdev, true);
4821 if (ret) {
4822 DRM_ERROR("ci_enable_didt failed\n");
4823 return ret;
4824 }
4825 ret = ci_enable_smc_cac(rdev, true);
4826 if (ret) {
4827 DRM_ERROR("ci_enable_smc_cac failed\n");
4828 return ret;
4829 }
4830 ret = ci_enable_power_containment(rdev, true);
4831 if (ret) {
4832 DRM_ERROR("ci_enable_power_containment failed\n");
4833 return ret;
4834 }
cc8dbbb4 4835
b94b95e7
AD
4836 ret = ci_power_control_set_level(rdev);
4837 if (ret) {
4838 DRM_ERROR("ci_power_control_set_level failed\n");
4839 return ret;
4840 }
4841
cc8dbbb4
AD
4842 ci_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
4843
4844 ci_update_current_ps(rdev, boot_ps);
4845
4846 return 0;
4847}
4848
1955f107 4849static int ci_set_temperature_range(struct radeon_device *rdev)
90208427
AD
4850{
4851 int ret;
4852
1955f107
AD
4853 ret = ci_thermal_enable_alert(rdev, false);
4854 if (ret)
4855 return ret;
4856 ret = ci_thermal_set_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
4857 if (ret)
4858 return ret;
4859 ret = ci_thermal_enable_alert(rdev, true);
4860 if (ret)
4861 return ret;
90208427 4862
1955f107
AD
4863 return ret;
4864}
4865
4866int ci_dpm_late_enable(struct radeon_device *rdev)
4867{
4868 int ret;
4869
4870 ret = ci_set_temperature_range(rdev);
4871 if (ret)
4872 return ret;
90208427
AD
4873
4874 ci_dpm_powergate_uvd(rdev, true);
4875
4876 return 0;
4877}
4878
cc8dbbb4
AD
4879void ci_dpm_disable(struct radeon_device *rdev)
4880{
4881 struct ci_power_info *pi = ci_get_pi(rdev);
4882 struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps;
4883
47acb1ff
AD
4884 ci_dpm_powergate_uvd(rdev, false);
4885
cc8dbbb4
AD
4886 if (!ci_is_smc_running(rdev))
4887 return;
4888
4889 if (pi->thermal_protection)
4890 ci_enable_thermal_protection(rdev, false);
4891 ci_enable_power_containment(rdev, false);
4892 ci_enable_smc_cac(rdev, false);
4893 ci_enable_didt(rdev, false);
4894 ci_enable_spread_spectrum(rdev, false);
4895 ci_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, false);
4896 ci_stop_dpm(rdev);
129acb7c 4897 ci_enable_ds_master_switch(rdev, false);
cc8dbbb4
AD
4898 ci_enable_ulv(rdev, false);
4899 ci_clear_vc(rdev);
4900 ci_reset_to_default(rdev);
4901 ci_dpm_stop_smc(rdev);
4902 ci_force_switch_to_arb_f0(rdev);
4903
4904 ci_update_current_ps(rdev, boot_ps);
4905}
4906
4907int ci_dpm_set_power_state(struct radeon_device *rdev)
4908{
4909 struct ci_power_info *pi = ci_get_pi(rdev);
4910 struct radeon_ps *new_ps = &pi->requested_rps;
4911 struct radeon_ps *old_ps = &pi->current_rps;
4912 int ret;
4913
4914 ci_find_dpm_states_clocks_in_dpm_table(rdev, new_ps);
4915 if (pi->pcie_performance_request)
4916 ci_request_link_speed_change_before_state_change(rdev, new_ps, old_ps);
4917 ret = ci_freeze_sclk_mclk_dpm(rdev);
4918 if (ret) {
4919 DRM_ERROR("ci_freeze_sclk_mclk_dpm failed\n");
4920 return ret;
4921 }
4922 ret = ci_populate_and_upload_sclk_mclk_dpm_levels(rdev, new_ps);
4923 if (ret) {
4924 DRM_ERROR("ci_populate_and_upload_sclk_mclk_dpm_levels failed\n");
4925 return ret;
4926 }
4927 ret = ci_generate_dpm_level_enable_mask(rdev, new_ps);
4928 if (ret) {
4929 DRM_ERROR("ci_generate_dpm_level_enable_mask failed\n");
4930 return ret;
4931 }
8cd36682 4932
cc8dbbb4
AD
4933 ret = ci_update_vce_dpm(rdev, new_ps, old_ps);
4934 if (ret) {
4935 DRM_ERROR("ci_update_vce_dpm failed\n");
4936 return ret;
4937 }
8cd36682 4938
cc8dbbb4
AD
4939 ret = ci_update_sclk_t(rdev);
4940 if (ret) {
4941 DRM_ERROR("ci_update_sclk_t failed\n");
4942 return ret;
4943 }
4944 if (pi->caps_dynamic_ac_timing) {
4945 ret = ci_update_and_upload_mc_reg_table(rdev);
4946 if (ret) {
4947 DRM_ERROR("ci_update_and_upload_mc_reg_table failed\n");
4948 return ret;
4949 }
4950 }
4951 ret = ci_program_memory_timing_parameters(rdev);
4952 if (ret) {
4953 DRM_ERROR("ci_program_memory_timing_parameters failed\n");
4954 return ret;
4955 }
4956 ret = ci_unfreeze_sclk_mclk_dpm(rdev);
4957 if (ret) {
4958 DRM_ERROR("ci_unfreeze_sclk_mclk_dpm failed\n");
4959 return ret;
4960 }
4961 ret = ci_upload_dpm_level_enable_mask(rdev);
4962 if (ret) {
4963 DRM_ERROR("ci_upload_dpm_level_enable_mask failed\n");
4964 return ret;
4965 }
4966 if (pi->pcie_performance_request)
4967 ci_notify_link_speed_change_after_state_change(rdev, new_ps, old_ps);
4968
4969 return 0;
4970}
4971
cc8dbbb4
AD
4972void ci_dpm_reset_asic(struct radeon_device *rdev)
4973{
4974 ci_set_boot_state(rdev);
4975}
4976
4977void ci_dpm_display_configuration_changed(struct radeon_device *rdev)
4978{
4979 ci_program_display_gap(rdev);
4980}
4981
4982union power_info {
4983 struct _ATOM_POWERPLAY_INFO info;
4984 struct _ATOM_POWERPLAY_INFO_V2 info_2;
4985 struct _ATOM_POWERPLAY_INFO_V3 info_3;
4986 struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
4987 struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
4988 struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
4989};
4990
4991union pplib_clock_info {
4992 struct _ATOM_PPLIB_R600_CLOCK_INFO r600;
4993 struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780;
4994 struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen;
4995 struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo;
4996 struct _ATOM_PPLIB_SI_CLOCK_INFO si;
4997 struct _ATOM_PPLIB_CI_CLOCK_INFO ci;
4998};
4999
5000union pplib_power_state {
5001 struct _ATOM_PPLIB_STATE v1;
5002 struct _ATOM_PPLIB_STATE_V2 v2;
5003};
5004
5005static void ci_parse_pplib_non_clock_info(struct radeon_device *rdev,
5006 struct radeon_ps *rps,
5007 struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info,
5008 u8 table_rev)
5009{
5010 rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings);
5011 rps->class = le16_to_cpu(non_clock_info->usClassification);
5012 rps->class2 = le16_to_cpu(non_clock_info->usClassification2);
5013
5014 if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) {
5015 rps->vclk = le32_to_cpu(non_clock_info->ulVCLK);
5016 rps->dclk = le32_to_cpu(non_clock_info->ulDCLK);
5017 } else {
5018 rps->vclk = 0;
5019 rps->dclk = 0;
5020 }
5021
5022 if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT)
5023 rdev->pm.dpm.boot_ps = rps;
5024 if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
5025 rdev->pm.dpm.uvd_ps = rps;
5026}
5027
5028static void ci_parse_pplib_clock_info(struct radeon_device *rdev,
5029 struct radeon_ps *rps, int index,
5030 union pplib_clock_info *clock_info)
5031{
5032 struct ci_power_info *pi = ci_get_pi(rdev);
5033 struct ci_ps *ps = ci_get_ps(rps);
5034 struct ci_pl *pl = &ps->performance_levels[index];
5035
5036 ps->performance_level_count = index + 1;
5037
5038 pl->sclk = le16_to_cpu(clock_info->ci.usEngineClockLow);
5039 pl->sclk |= clock_info->ci.ucEngineClockHigh << 16;
5040 pl->mclk = le16_to_cpu(clock_info->ci.usMemoryClockLow);
5041 pl->mclk |= clock_info->ci.ucMemoryClockHigh << 16;
5042
5043 pl->pcie_gen = r600_get_pcie_gen_support(rdev,
5044 pi->sys_pcie_mask,
5045 pi->vbios_boot_state.pcie_gen_bootup_value,
5046 clock_info->ci.ucPCIEGen);
5047 pl->pcie_lane = r600_get_pcie_lane_support(rdev,
5048 pi->vbios_boot_state.pcie_lane_bootup_value,
5049 le16_to_cpu(clock_info->ci.usPCIELane));
5050
5051 if (rps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) {
5052 pi->acpi_pcie_gen = pl->pcie_gen;
5053 }
5054
5055 if (rps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) {
5056 pi->ulv.supported = true;
5057 pi->ulv.pl = *pl;
5058 pi->ulv.cg_ulv_parameter = CISLANDS_CGULVPARAMETER_DFLT;
5059 }
5060
5061 /* patch up boot state */
5062 if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) {
5063 pl->mclk = pi->vbios_boot_state.mclk_bootup_value;
5064 pl->sclk = pi->vbios_boot_state.sclk_bootup_value;
5065 pl->pcie_gen = pi->vbios_boot_state.pcie_gen_bootup_value;
5066 pl->pcie_lane = pi->vbios_boot_state.pcie_lane_bootup_value;
5067 }
5068
5069 switch (rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) {
5070 case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY:
5071 pi->use_pcie_powersaving_levels = true;
5072 if (pi->pcie_gen_powersaving.max < pl->pcie_gen)
5073 pi->pcie_gen_powersaving.max = pl->pcie_gen;
5074 if (pi->pcie_gen_powersaving.min > pl->pcie_gen)
5075 pi->pcie_gen_powersaving.min = pl->pcie_gen;
5076 if (pi->pcie_lane_powersaving.max < pl->pcie_lane)
5077 pi->pcie_lane_powersaving.max = pl->pcie_lane;
5078 if (pi->pcie_lane_powersaving.min > pl->pcie_lane)
5079 pi->pcie_lane_powersaving.min = pl->pcie_lane;
5080 break;
5081 case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE:
5082 pi->use_pcie_performance_levels = true;
5083 if (pi->pcie_gen_performance.max < pl->pcie_gen)
5084 pi->pcie_gen_performance.max = pl->pcie_gen;
5085 if (pi->pcie_gen_performance.min > pl->pcie_gen)
5086 pi->pcie_gen_performance.min = pl->pcie_gen;
5087 if (pi->pcie_lane_performance.max < pl->pcie_lane)
5088 pi->pcie_lane_performance.max = pl->pcie_lane;
5089 if (pi->pcie_lane_performance.min > pl->pcie_lane)
5090 pi->pcie_lane_performance.min = pl->pcie_lane;
5091 break;
5092 default:
5093 break;
5094 }
5095}
5096
5097static int ci_parse_power_table(struct radeon_device *rdev)
5098{
5099 struct radeon_mode_info *mode_info = &rdev->mode_info;
5100 struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
5101 union pplib_power_state *power_state;
5102 int i, j, k, non_clock_array_index, clock_array_index;
5103 union pplib_clock_info *clock_info;
5104 struct _StateArray *state_array;
5105 struct _ClockInfoArray *clock_info_array;
5106 struct _NonClockInfoArray *non_clock_info_array;
5107 union power_info *power_info;
5108 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
5109 u16 data_offset;
5110 u8 frev, crev;
5111 u8 *power_state_offset;
5112 struct ci_ps *ps;
5113
5114 if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
5115 &frev, &crev, &data_offset))
5116 return -EINVAL;
5117 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
5118
5119 state_array = (struct _StateArray *)
5120 (mode_info->atom_context->bios + data_offset +
5121 le16_to_cpu(power_info->pplib.usStateArrayOffset));
5122 clock_info_array = (struct _ClockInfoArray *)
5123 (mode_info->atom_context->bios + data_offset +
5124 le16_to_cpu(power_info->pplib.usClockInfoArrayOffset));
5125 non_clock_info_array = (struct _NonClockInfoArray *)
5126 (mode_info->atom_context->bios + data_offset +
5127 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
5128
5129 rdev->pm.dpm.ps = kzalloc(sizeof(struct radeon_ps) *
5130 state_array->ucNumEntries, GFP_KERNEL);
5131 if (!rdev->pm.dpm.ps)
5132 return -ENOMEM;
5133 power_state_offset = (u8 *)state_array->states;
cc8dbbb4 5134 for (i = 0; i < state_array->ucNumEntries; i++) {
b309ed98 5135 u8 *idx;
cc8dbbb4
AD
5136 power_state = (union pplib_power_state *)power_state_offset;
5137 non_clock_array_index = power_state->v2.nonClockInfoIndex;
5138 non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
5139 &non_clock_info_array->nonClockInfo[non_clock_array_index];
5140 if (!rdev->pm.power_state[i].clock_info)
5141 return -EINVAL;
5142 ps = kzalloc(sizeof(struct ci_ps), GFP_KERNEL);
5143 if (ps == NULL) {
5144 kfree(rdev->pm.dpm.ps);
5145 return -ENOMEM;
5146 }
5147 rdev->pm.dpm.ps[i].ps_priv = ps;
5148 ci_parse_pplib_non_clock_info(rdev, &rdev->pm.dpm.ps[i],
5149 non_clock_info,
5150 non_clock_info_array->ucEntrySize);
5151 k = 0;
b309ed98 5152 idx = (u8 *)&power_state->v2.clockInfoIndex[0];
cc8dbbb4 5153 for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
b309ed98 5154 clock_array_index = idx[j];
cc8dbbb4
AD
5155 if (clock_array_index >= clock_info_array->ucNumEntries)
5156 continue;
5157 if (k >= CISLANDS_MAX_HARDWARE_POWERLEVELS)
5158 break;
5159 clock_info = (union pplib_clock_info *)
b309ed98
AD
5160 ((u8 *)&clock_info_array->clockInfo[0] +
5161 (clock_array_index * clock_info_array->ucEntrySize));
cc8dbbb4
AD
5162 ci_parse_pplib_clock_info(rdev,
5163 &rdev->pm.dpm.ps[i], k,
5164 clock_info);
5165 k++;
5166 }
5167 power_state_offset += 2 + power_state->v2.ucNumDPMLevels;
5168 }
5169 rdev->pm.dpm.num_ps = state_array->ucNumEntries;
8cd36682
AD
5170
5171 /* fill in the vce power states */
5172 for (i = 0; i < RADEON_MAX_VCE_LEVELS; i++) {
5173 u32 sclk, mclk;
5174 clock_array_index = rdev->pm.dpm.vce_states[i].clk_idx;
5175 clock_info = (union pplib_clock_info *)
5176 &clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize];
5177 sclk = le16_to_cpu(clock_info->ci.usEngineClockLow);
5178 sclk |= clock_info->ci.ucEngineClockHigh << 16;
5179 mclk = le16_to_cpu(clock_info->ci.usMemoryClockLow);
5180 mclk |= clock_info->ci.ucMemoryClockHigh << 16;
5181 rdev->pm.dpm.vce_states[i].sclk = sclk;
5182 rdev->pm.dpm.vce_states[i].mclk = mclk;
5183 }
5184
cc8dbbb4
AD
5185 return 0;
5186}
5187
9a04dad3
AD
5188static int ci_get_vbios_boot_values(struct radeon_device *rdev,
5189 struct ci_vbios_boot_state *boot_state)
cc8dbbb4
AD
5190{
5191 struct radeon_mode_info *mode_info = &rdev->mode_info;
5192 int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
5193 ATOM_FIRMWARE_INFO_V2_2 *firmware_info;
5194 u8 frev, crev;
5195 u16 data_offset;
5196
5197 if (atom_parse_data_header(mode_info->atom_context, index, NULL,
5198 &frev, &crev, &data_offset)) {
5199 firmware_info =
5200 (ATOM_FIRMWARE_INFO_V2_2 *)(mode_info->atom_context->bios +
5201 data_offset);
5202 boot_state->mvdd_bootup_value = le16_to_cpu(firmware_info->usBootUpMVDDCVoltage);
5203 boot_state->vddc_bootup_value = le16_to_cpu(firmware_info->usBootUpVDDCVoltage);
5204 boot_state->vddci_bootup_value = le16_to_cpu(firmware_info->usBootUpVDDCIVoltage);
5205 boot_state->pcie_gen_bootup_value = ci_get_current_pcie_speed(rdev);
5206 boot_state->pcie_lane_bootup_value = ci_get_current_pcie_lane_number(rdev);
5207 boot_state->sclk_bootup_value = le32_to_cpu(firmware_info->ulDefaultEngineClock);
5208 boot_state->mclk_bootup_value = le32_to_cpu(firmware_info->ulDefaultMemoryClock);
5209
5210 return 0;
5211 }
5212 return -EINVAL;
5213}
5214
5215void ci_dpm_fini(struct radeon_device *rdev)
5216{
5217 int i;
5218
5219 for (i = 0; i < rdev->pm.dpm.num_ps; i++) {
5220 kfree(rdev->pm.dpm.ps[i].ps_priv);
5221 }
5222 kfree(rdev->pm.dpm.ps);
5223 kfree(rdev->pm.dpm.priv);
5224 kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries);
5225 r600_free_extended_power_table(rdev);
5226}
5227
5228int ci_dpm_init(struct radeon_device *rdev)
5229{
5230 int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info);
34fc0b58
AD
5231 SMU7_Discrete_DpmTable *dpm_table;
5232 struct radeon_gpio_rec gpio;
cc8dbbb4
AD
5233 u16 data_offset, size;
5234 u8 frev, crev;
5235 struct ci_power_info *pi;
5236 int ret;
5237 u32 mask;
5238
5239 pi = kzalloc(sizeof(struct ci_power_info), GFP_KERNEL);
5240 if (pi == NULL)
5241 return -ENOMEM;
5242 rdev->pm.dpm.priv = pi;
5243
5244 ret = drm_pcie_get_speed_cap_mask(rdev->ddev, &mask);
5245 if (ret)
5246 pi->sys_pcie_mask = 0;
5247 else
5248 pi->sys_pcie_mask = mask;
5249 pi->force_pcie_gen = RADEON_PCIE_GEN_INVALID;
5250
5251 pi->pcie_gen_performance.max = RADEON_PCIE_GEN1;
5252 pi->pcie_gen_performance.min = RADEON_PCIE_GEN3;
5253 pi->pcie_gen_powersaving.max = RADEON_PCIE_GEN1;
5254 pi->pcie_gen_powersaving.min = RADEON_PCIE_GEN3;
5255
5256 pi->pcie_lane_performance.max = 0;
5257 pi->pcie_lane_performance.min = 16;
5258 pi->pcie_lane_powersaving.max = 0;
5259 pi->pcie_lane_powersaving.min = 16;
5260
5261 ret = ci_get_vbios_boot_values(rdev, &pi->vbios_boot_state);
5262 if (ret) {
5263 ci_dpm_fini(rdev);
5264 return ret;
5265 }
82f79cc5
AD
5266
5267 ret = r600_get_platform_caps(rdev);
5268 if (ret) {
5269 ci_dpm_fini(rdev);
5270 return ret;
5271 }
8cd36682
AD
5272
5273 ret = r600_parse_extended_power_table(rdev);
cc8dbbb4
AD
5274 if (ret) {
5275 ci_dpm_fini(rdev);
5276 return ret;
5277 }
8cd36682
AD
5278
5279 ret = ci_parse_power_table(rdev);
cc8dbbb4
AD
5280 if (ret) {
5281 ci_dpm_fini(rdev);
5282 return ret;
5283 }
5284
5285 pi->dll_default_on = false;
5286 pi->sram_end = SMC_RAM_END;
5287
5288 pi->activity_target[0] = CISLAND_TARGETACTIVITY_DFLT;
5289 pi->activity_target[1] = CISLAND_TARGETACTIVITY_DFLT;
5290 pi->activity_target[2] = CISLAND_TARGETACTIVITY_DFLT;
5291 pi->activity_target[3] = CISLAND_TARGETACTIVITY_DFLT;
5292 pi->activity_target[4] = CISLAND_TARGETACTIVITY_DFLT;
5293 pi->activity_target[5] = CISLAND_TARGETACTIVITY_DFLT;
5294 pi->activity_target[6] = CISLAND_TARGETACTIVITY_DFLT;
5295 pi->activity_target[7] = CISLAND_TARGETACTIVITY_DFLT;
5296
5297 pi->mclk_activity_target = CISLAND_MCLK_TARGETACTIVITY_DFLT;
5298
5299 pi->sclk_dpm_key_disabled = 0;
5300 pi->mclk_dpm_key_disabled = 0;
5301 pi->pcie_dpm_key_disabled = 0;
5302
7e1858f9
AD
5303 /* mclk dpm is unstable on some R7 260X cards with the old mc ucode */
5304 if ((rdev->pdev->device == 0x6658) &&
5305 (rdev->mc_fw->size == (BONAIRE_MC_UCODE_SIZE * 4))) {
57700ad1 5306 pi->mclk_dpm_key_disabled = 1;
7e1858f9 5307 }
57700ad1 5308
cc8dbbb4
AD
5309 pi->caps_sclk_ds = true;
5310
5311 pi->mclk_strobe_mode_threshold = 40000;
5312 pi->mclk_stutter_mode_threshold = 40000;
5313 pi->mclk_edc_enable_threshold = 40000;
5314 pi->mclk_edc_wr_enable_threshold = 40000;
5315
5316 ci_initialize_powertune_defaults(rdev);
5317
5318 pi->caps_fps = false;
5319
5320 pi->caps_sclk_throttle_low_notification = false;
5321
9597fe1e 5322 pi->caps_uvd_dpm = true;
ee35b002 5323 pi->caps_vce_dpm = true;
9597fe1e 5324
cc8dbbb4
AD
5325 ci_get_leakage_voltages(rdev);
5326 ci_patch_dependency_tables_with_leakage(rdev);
5327 ci_set_private_data_variables_based_on_pptable(rdev);
5328
5329 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries =
5330 kzalloc(4 * sizeof(struct radeon_clock_voltage_dependency_entry), GFP_KERNEL);
5331 if (!rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries) {
5332 ci_dpm_fini(rdev);
5333 return -ENOMEM;
5334 }
5335 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.count = 4;
5336 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].clk = 0;
5337 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].v = 0;
5338 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].clk = 36000;
5339 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].v = 720;
5340 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].clk = 54000;
5341 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].v = 810;
5342 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].clk = 72000;
5343 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].v = 900;
5344
5345 rdev->pm.dpm.dyn_state.mclk_sclk_ratio = 4;
5346 rdev->pm.dpm.dyn_state.sclk_mclk_delta = 15000;
5347 rdev->pm.dpm.dyn_state.vddc_vddci_delta = 200;
5348
5349 rdev->pm.dpm.dyn_state.valid_sclk_values.count = 0;
5350 rdev->pm.dpm.dyn_state.valid_sclk_values.values = NULL;
5351 rdev->pm.dpm.dyn_state.valid_mclk_values.count = 0;
5352 rdev->pm.dpm.dyn_state.valid_mclk_values.values = NULL;
5353
2d40038d
AD
5354 if (rdev->family == CHIP_HAWAII) {
5355 pi->thermal_temp_setting.temperature_low = 94500;
5356 pi->thermal_temp_setting.temperature_high = 95000;
5357 pi->thermal_temp_setting.temperature_shutdown = 104000;
5358 } else {
5359 pi->thermal_temp_setting.temperature_low = 99500;
5360 pi->thermal_temp_setting.temperature_high = 100000;
5361 pi->thermal_temp_setting.temperature_shutdown = 104000;
5362 }
cc8dbbb4
AD
5363
5364 pi->uvd_enabled = false;
5365
34fc0b58
AD
5366 dpm_table = &pi->smc_state_table;
5367
5368 gpio = radeon_atombios_lookup_gpio(rdev, VDDC_VRHOT_GPIO_PINID);
5369 if (gpio.valid) {
5370 dpm_table->VRHotGpio = gpio.shift;
5371 rdev->pm.dpm.platform_caps |= ATOM_PP_PLATFORM_CAP_REGULATOR_HOT;
5372 } else {
5373 dpm_table->VRHotGpio = CISLANDS_UNUSED_GPIO_PIN;
5374 rdev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_REGULATOR_HOT;
5375 }
5376
5377 gpio = radeon_atombios_lookup_gpio(rdev, PP_AC_DC_SWITCH_GPIO_PINID);
5378 if (gpio.valid) {
5379 dpm_table->AcDcGpio = gpio.shift;
5380 rdev->pm.dpm.platform_caps |= ATOM_PP_PLATFORM_CAP_HARDWAREDC;
5381 } else {
5382 dpm_table->AcDcGpio = CISLANDS_UNUSED_GPIO_PIN;
5383 rdev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_HARDWAREDC;
5384 }
5385
5386 gpio = radeon_atombios_lookup_gpio(rdev, VDDC_PCC_GPIO_PINID);
5387 if (gpio.valid) {
5388 u32 tmp = RREG32_SMC(CNB_PWRMGT_CNTL);
5389
5390 switch (gpio.shift) {
5391 case 0:
5392 tmp &= ~GNB_SLOW_MODE_MASK;
5393 tmp |= GNB_SLOW_MODE(1);
5394 break;
5395 case 1:
5396 tmp &= ~GNB_SLOW_MODE_MASK;
5397 tmp |= GNB_SLOW_MODE(2);
5398 break;
5399 case 2:
5400 tmp |= GNB_SLOW;
5401 break;
5402 case 3:
5403 tmp |= FORCE_NB_PS1;
5404 break;
5405 case 4:
5406 tmp |= DPM_ENABLED;
5407 break;
5408 default:
5409 DRM_ERROR("Invalid PCC GPIO!");
5410 break;
5411 }
5412 WREG32_SMC(CNB_PWRMGT_CNTL, tmp);
5413 }
5414
cc8dbbb4
AD
5415 pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_NONE;
5416 pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_NONE;
5417 pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_NONE;
5418 if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_GPIO_LUT))
5419 pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO;
5420 else if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2))
5421 pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
5422
5423 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL) {
5424 if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT))
5425 pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO;
5426 else if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_SVID2))
5427 pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
5428 else
5429 rdev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL;
5430 }
5431
5432 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_MVDDCONTROL) {
5433 if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT))
5434 pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO;
5435 else if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2))
5436 pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
5437 else
5438 rdev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_MVDDCONTROL;
5439 }
5440
5441 pi->vddc_phase_shed_control = true;
5442
5443#if defined(CONFIG_ACPI)
5444 pi->pcie_performance_request =
5445 radeon_acpi_is_pcie_performance_request_supported(rdev);
5446#else
5447 pi->pcie_performance_request = false;
5448#endif
5449
5450 if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size,
5451 &frev, &crev, &data_offset)) {
5452 pi->caps_sclk_ss_support = true;
5453 pi->caps_mclk_ss_support = true;
5454 pi->dynamic_ss = true;
5455 } else {
5456 pi->caps_sclk_ss_support = false;
5457 pi->caps_mclk_ss_support = false;
5458 pi->dynamic_ss = true;
5459 }
5460
5461 if (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE)
5462 pi->thermal_protection = true;
5463 else
5464 pi->thermal_protection = false;
5465
5466 pi->caps_dynamic_ac_timing = true;
5467
47acb1ff
AD
5468 pi->uvd_power_gated = false;
5469
679fe80f
AD
5470 /* make sure dc limits are valid */
5471 if ((rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk == 0) ||
5472 (rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk == 0))
5473 rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc =
5474 rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
5475
cc8dbbb4
AD
5476 return 0;
5477}
5478
94b4adc5
AD
5479void ci_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
5480 struct seq_file *m)
5481{
3e15c353
AD
5482 struct ci_power_info *pi = ci_get_pi(rdev);
5483 struct radeon_ps *rps = &pi->current_rps;
94b4adc5
AD
5484 u32 sclk = ci_get_average_sclk_freq(rdev);
5485 u32 mclk = ci_get_average_mclk_freq(rdev);
5486
3e15c353
AD
5487 seq_printf(m, "uvd %sabled\n", pi->uvd_enabled ? "en" : "dis");
5488 seq_printf(m, "vce %sabled\n", rps->vce_active ? "en" : "dis");
94b4adc5
AD
5489 seq_printf(m, "power level avg sclk: %u mclk: %u\n",
5490 sclk, mclk);
5491}
5492
cc8dbbb4
AD
5493void ci_dpm_print_power_state(struct radeon_device *rdev,
5494 struct radeon_ps *rps)
5495{
5496 struct ci_ps *ps = ci_get_ps(rps);
5497 struct ci_pl *pl;
5498 int i;
5499
5500 r600_dpm_print_class_info(rps->class, rps->class2);
5501 r600_dpm_print_cap_info(rps->caps);
5502 printk("\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
5503 for (i = 0; i < ps->performance_level_count; i++) {
5504 pl = &ps->performance_levels[i];
5505 printk("\t\tpower level %d sclk: %u mclk: %u pcie gen: %u pcie lanes: %u\n",
5506 i, pl->sclk, pl->mclk, pl->pcie_gen + 1, pl->pcie_lane);
5507 }
5508 r600_dpm_print_ps_status(rdev, rps);
5509}
5510
5511u32 ci_dpm_get_sclk(struct radeon_device *rdev, bool low)
5512{
5513 struct ci_power_info *pi = ci_get_pi(rdev);
5514 struct ci_ps *requested_state = ci_get_ps(&pi->requested_rps);
5515
5516 if (low)
5517 return requested_state->performance_levels[0].sclk;
5518 else
5519 return requested_state->performance_levels[requested_state->performance_level_count - 1].sclk;
5520}
5521
5522u32 ci_dpm_get_mclk(struct radeon_device *rdev, bool low)
5523{
5524 struct ci_power_info *pi = ci_get_pi(rdev);
5525 struct ci_ps *requested_state = ci_get_ps(&pi->requested_rps);
5526
5527 if (low)
5528 return requested_state->performance_levels[0].mclk;
5529 else
5530 return requested_state->performance_levels[requested_state->performance_level_count - 1].mclk;
5531}
This page took 0.304572 seconds and 5 git commands to generate.